blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3b42ac5534171999418bb0acec06d6b47705d2eb
|
Python
|
vegetablejuiceftw/sauron
|
/playground/shared_memory/generator.py
|
UTF-8
| 1,079
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
from time import time, sleep
import cv2 as cv
import SharedArray as sa
import numpy as np
cap = cv.VideoCapture(0)
# cap.set(cv.CAP_PROP_FPS, 29)
# cap.set(cv.CAP_PROP_FRAME_HEIGHT, 720)
# cap.set(cv.CAP_PROP_FRAME_WIDTH, 1280)
ret, frame = cap.read()
frame: np.ndarray
shape = frame.shape
dtype = frame.dtype
print(shape)
def get_publisher(channel: str, shape: tuple, dtype) -> np.ndarray:
# Create an array in shared memory.
short_name = channel.split("://")[-1]
mapping = {e.name.decode(): e for e in sa.list()}
if short_name in mapping:
array = mapping[short_name]
if array.dtype == dtype and array.dims == shape:
return sa.attach(channel)
sa.delete(short_name)
return sa.create(channel, shape, dtype)
frame = get_publisher("shm://test", shape, dtype)
start = time()
counter = 1
while cap.isOpened:
counter += 1
cap.read(frame)
if counter % 30 == 0:
print(1 / (time() - start))
start = time()
# cv.imshow('Capture - producer', frame)
# if cv.waitKey(1) == 27:
# break
| true
|
c59f38407747bd9c45b86c783b40f2813f84bf25
|
Python
|
bilherron/advent-of-code
|
/2019/Day 7/7.1.py
|
UTF-8
| 3,226
| 2.828125
| 3
|
[] |
no_license
|
import sys, io
from itertools import permutations
def computer(cmd, label, inp_values=[]):
# print(inp_values)
input_values_index = 0
# print("input_values_index", input_values_index)
output = ""
pointer = 0
instruction = str(cmd[pointer]).zfill(5)
opcode = int(instruction[3:])
try:
while opcode != 99:
# print(pointer, instruction)
# print(cmd)
parameter_1 = cmd[pointer + 1]
parameter_1_mode = instruction[2]
parameter_2 = cmd[pointer + 2]
parameter_2_mode = instruction[1]
parameter_3 = cmd[pointer + 3]
parameter_3_mode = instruction[0] # should always be 0
value_1 = cmd[parameter_1] if parameter_1_mode == "0" else parameter_1
if opcode not in [3,4]:
value_2 = cmd[parameter_2] if parameter_2_mode == "0" else parameter_2
if opcode == 1:
cmd[parameter_3] = value_1 + value_2
pointer += 4
elif opcode == 2:
cmd[parameter_3] = value_1 * value_2
pointer += 4
elif opcode == 3:
try:
cmd[parameter_1] = int(inp_values[input_values_index])
input_values_index += 1
except KeyError:
cmd[parameter_1] = int(input(f"Enter phase setting for amplifier {label}: "))
pointer += 2
elif opcode == 4:
value_1 = cmd[parameter_1] if parameter_1_mode == "0" else parameter_1
# print(value_1)
output = value_1
pointer += 2
elif opcode == 5:
pointer = pointer + 3 if value_1 == 0 else value_2
elif opcode == 6:
pointer = value_2 if value_1 == 0 else pointer + 3
elif opcode == 7:
cmd[parameter_3] = 1 if value_1 < value_2 else 0
pointer += 4
elif opcode == 8:
cmd[parameter_3] = 1 if value_1 == value_2 else 0
pointer += 4
else:
raise ValueError
instruction = str(cmd[pointer]).zfill(5)
opcode = int(instruction[3:])
return output
except KeyboardInterrupt:
sys.exit()
except:
print("Computer on fire.", pointer, opcode, sys.exc_info()[0])
print(cmd)
with open("input.txt") as fp:
amplifier_controller_software = [int(n) for n in fp.read().split(",")]
acs_test_1 = [int(n) for n in "3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0".split(",")]
acs_test_2 = [int(n) for n in "3,31,3,32,1002,32,10,32,1001,31,-2,31,1007,31,0,33,1002,33,7,33,1,33,31,31,1,32,31,31,4,31,99,0,0,0".split(",")]
max_thrust = 0
amplifiers = ["A","B","C","D","E"]
thruster_signal_permutations = list(permutations(range(0, 5)))
for thruster_signals in thruster_signal_permutations:
out = 0
for i, amp in enumerate(amplifiers):
inp = [thruster_signals[i], out]
out = computer(amplifier_controller_software[:], amp, inp)
if out > max_thrust:
max_thrust = out
best_signal = thruster_signals
print(best_signal, max_thrust)
| true
|
7653e9f07bc705c9083511474975d2c65a1900ab
|
Python
|
xmillero/python2
|
/D03/ex00/geohashing.py
|
UTF-8
| 670
| 3.1875
| 3
|
[] |
no_license
|
import sys
import antigravity
def Traitement(lat, lon, reste):
antigravity.geohash(lat, lon, reste.encode())
if __name__ == '__main__':
try:
lat = float(sys.argv[1]) #verif lattitude
except ValueError:
print("Parametre 1 : mauvais format")
exit(1)
try:
lon = float(sys.argv[2]) #verif longitude
except ValueError:
print("Parametre 2 : mauvais format")
exit(2)
try:
dj = float(sys.argv[4]) #dernier indice dow a la date
inStr = sys.argv[3] + '-' + str(dj) #concatenation de la date et de l'indice
except ValueError:
print("Parametre 4 : mauvais format")
exit(3)
Traitement(lat, lon, inStr) #renvoie les coordonnees d'un point
| true
|
ab17604d524229c063740dd7e5b4365c95bd8265
|
Python
|
broadinstitute/dig-loam
|
/src/scripts/make_samples_restore_table.py
|
UTF-8
| 1,843
| 2.59375
| 3
|
[] |
no_license
|
import pandas as pd
import argparse
def main(args=None):
df = pd.DataFrame({'IID': [], 'RestoreFrom': []})
if args.ancestry_outliers_keep:
with open(args.ancestry_outliers_keep) as f:
lines = f.read().splitlines()
tempdf=pd.DataFrame({'IID': lines, 'RestoreFrom': 'ancestryOutliersKeep'})
df = df.append(tempdf, ignore_index=True)
if args.duplicates_keep:
with open(args.duplicates_keep) as f:
lines = f.read().splitlines()
tempdf=pd.DataFrame({'IID': lines, 'RestoreFrom': 'duplicatesKeep'})
df = df.append(tempdf, ignore_index=True)
if args.famsize_keep:
with open(args.famsize_keep) as f:
lines = f.read().splitlines()
tempdf=pd.DataFrame({'IID': lines, 'RestoreFrom': 'famsizeKeep'})
df = df.append(tempdf, ignore_index=True)
if args.sampleqc_keep:
with open(args.sampleqc_keep) as f:
lines = f.read().splitlines()
tempdf=pd.DataFrame({'IID': lines, 'RestoreFrom': 'sampleqcKeep'})
df = df.append(tempdf, ignore_index=True)
if args.sexcheck_keep:
with open(args.sexcheck_keep) as f:
lines = f.read().splitlines()
tempdf=pd.DataFrame({'IID': lines, 'RestoreFrom': 'sexcheckKeep'})
df = df.append(tempdf, ignore_index=True)
df.to_csv(args.out, header=True, index=False, sep="\t")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ancestry-outliers-keep', help='a list of sample IDs')
parser.add_argument('--duplicates-keep', help='a list of sample IDs')
parser.add_argument('--famsize-keep', help='a list of sample IDs')
parser.add_argument('--sampleqc-keep', help='a list of sample IDs')
parser.add_argument('--sexcheck-keep', help='a list of sample IDs')
requiredArgs = parser.add_argument_group('required arguments')
requiredArgs.add_argument('--out', help='an output filename', required=True)
args = parser.parse_args()
main(args)
| true
|
17c140df3ffe08f993629a49bdc02633e6e53092
|
Python
|
raindolf/African-Grading-Program
|
/African-Grading-Program/main.py
|
UTF-8
| 2,832
| 3.671875
| 4
|
[] |
no_license
|
## Importing Standard Libraries ##
import sys
class African_Grading_Program():
## Begin License ##
print ""
print ""
print "African Grading Program (Python Version) Copyright (C) 2012 Cody Dostal"
print "This program comes with ABSOLUTELY NO WARRANTY; for details, go to http://www.gnu.org/licenses/gpl-3.0.html."
print "This is free software, and you are welcome to redistribute it"
print "under certain conditions; go to http://www.gnu.org/licenses/gpl-3.0.html for details."
## End License ##
## Two empty lines ##
print ""
print ""
print "Please enter your full name:"
name = raw_input()
print "Welcome " + name + ", this is a basic Python program based off of Raindolf Owusu's C++ version."
print "This program asks for a numerical percentile grade, and returns the letter grade, and an"
print "interpretation of the grade."
print "At any time, type any number greater than 100, less than 0 to quit."
print ""
while True:
print "Please enter your numerical examination score: "
num_marks = int(input())
if 70 <= num_marks <= 100:
print "Your grade is an A, and it is interpreted as an excellent score."
elif 65 <= num_marks <= 69:
print "Your grade is an A-, and it is interpreted as a very good score."
elif 60 <= num_marks <= 64:
print "Your grade is a B+, and it is interpreted as a good score."
elif 55 <= num_marks <= 59:
print "Your grade is a B, and it is interpreted as an above average score."
elif 50 <= num_marks <= 54:
print "Your grade is a B-, and it is interpreted as an average score."
elif 45 <= num_marks <= 49:
print "Your grade is a C+, and it is interpreted as a passing grade."
elif 40 <= num_marks <= 44:
print "Your grade is a C, and it is interpreted as a passing grade."
elif 30 <= num_marks <= 39:
print "Your grade is a D, and it is interpreted as a failing grade."
elif 0 <= num_marks <= 29:
print "Your grade is an F, and it is interpreted as a failing grade."
else:
print ""
print ""
print "Thank you for using the Python version of Raindolf Owusu's African Grading Program."
print "This python version was written by Cody Dostal. The original C++ version was written by Raindolf Owusu."
print "If you have found any bugs, please email me at dostalcody@gmail.com"
print "You may view the license for this program at http://www.gnu.org/licenses/gpl-3.0.html"
sys.exit()
| true
|
e85de1281c62f5f4d102c11fc4677e58839aebac
|
Python
|
evejazmin/c-digo
|
/resta.py
|
UTF-8
| 231
| 3.28125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 11 14:39:43 2020
@author: EstAnthonyFabricioSa
"""
def subtra(a,b):
print(a-b)
subtra(5, 2) # outputs:3
subtra(2, 5)
subtra(a=2, b=5)
subtra(b=2, a=5)
subtra(2, b=5)
| true
|
63ee00cbd9297c22ef490d7a235125330d38a91c
|
Python
|
georgi-lyubenov/HackBulgaria
|
/HackSearchProject/main.py
|
UTF-8
| 1,881
| 2.9375
| 3
|
[] |
no_license
|
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
import requests
from bs4 import BeautifulSoup
from create_db import *
from urllib.parse import urljoin
class Spider():
def __init__(self, homepage):
self.scanned_pages = []
self.queue = []
self.engine = create_engine("sqlite:///my_database.db")
Base.metadata.create_all(self.engine)
self.session = Session(bind=self.engine)
self.homepage = homepage
def prepare_url(self, url, href):
return urljoin(url, href)
def scan_page(self, url):
self.scanned_pages.append(url)
r = requests.get(url)
soup = BeautifulSoup(r.content)
soup.prettify()
html_title = soup.title.string
try:
description = soup.find(attrs={"property": "og:description"}).get("content")
desc = description
except Exception as e:
desc = "None"
links = soup.findAll('a')
for link in links:
url = self.prepare_url(url, link.get("href"))
obj = Website(URL=url, title=html_title, description=desc)
self.session.add(obj)
self.session.commit()
if "http" in link.get("href"):
if link.get("href") not in self.scanned_pages:
self.queue.append(self.prepare_url(url, link.get("href")))
def scan_website(self):
self.scan_page(self.homepage)
while self.queue:
print(self.queue.pop(), len(self.scanned_pages))
try:
self.scan_page(self.queue.pop())
except:
pass
return ("pages scanned: ", len(self.scanned_pages))
def main():
base_url = 'http://fmi.py-bg.net/'
#base_url = 'http://google.com'
s = Spider(base_url)
s.scan_website()
if __name__ == '__main__':
main()
| true
|
97eb410fee349185c6375859b7cb87eee509300e
|
Python
|
ha8sh/IBSng
|
/core/server/handler.py
|
UTF-8
| 1,494
| 3.0625
| 3
|
[] |
no_license
|
class Handler: #ABSTRACT
"""
this is parent class for all handlers
all handlers must set their name, and register their fucntions using register method
so it'll return it's own name and methods . Hanlder methods can be called
by "name"."method"("args") syntax.
HandlerManagers use handler instances and call their methods on demand. As IBS uses threads
for multiple requests, handlers must take care of multi threading issues if necassary.
It's better not to use global/object variables for changing and use local variables instead
"""
def __init__(self,handler_name):
self.__handler_name=handler_name
self.__handler_methods=[]
def getHandlerName(self):
"""
return name of handler that will be used in xmlrpc calls
"""
return self.__handler_name
def getHandlerMethods(self):
"""
return a list of name strings that represent this handler methods
this list is used to see if this handler has such method, and method is callable
via rpc requests
"""
return self.__handler_methods
def registerHandlerMethod(self,method_name):
"""
register a new method into this handler
"""
if method_name in self.__handler_methods:
raise HandlerException("Duplicate registration of method %s"%method_name)
self.__handler_methods.append(method_name)
| true
|
fef2596ad53cb01fd56c81351bbaaf8aead00dcd
|
Python
|
Uxooss/Lessons-Hellel
|
/Lesson_09/9.2 - Shift_bit_place.py
|
UTF-8
| 1,732
| 4.21875
| 4
|
[] |
no_license
|
'''
Написать функцию, циклически сдвигающую целое число на N разрядов вправо или влево, в зависимости от третьего
параметра функции. Функция должна принимать три параметра: в первом параметре передаётся число для сдвига, второй
параметр задаёт количество разрядов для сдвига (по умолчанию сдвиг на 1 разряд), 3-й булевский параметр задаёт
направление сдвига (по умолчанию влево (False)).
'''
def bit_shift(num, shft=1, drct=False):
if not drct:
num = num * (10 ** -shft)
elif drct:
num = num * (10 ** shft)
return num
# ----------------------------------------
# Без ввода данных:
print()
res = bit_shift(225)
print(res)
res = bit_shift(205, 2, True)
print(res)
# -----------------------------------------
# С ввод данных:
n = int(input('\nВведите число:\t'))
qtn = str(input('\u2755 По-умолчанию, происходит сдвиг на 1 разряд влево.\n'
'Хотите изменить количество и направление сдвига? (y / n):\t'))
if qtn == 'y':
s = int(input('Введите величену сдвига:\t'))
d = str(input('Введите направление сдвига ("-" или "+"):\t'))
elif qtn == 'n':
s = 1
d = False
if d == '-':
d = False
elif d == '+':
d = True
res = bit_shift(n, s, d)
print('\nРезультат сдвига:\t', res)
| true
|
67cf2a4394e8dc0d426061ed2d96b1a875bb147d
|
Python
|
Kane610/deconz
|
/pydeconz/models/sensor/time.py
|
UTF-8
| 509
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
"""Python library to connect deCONZ and Home Assistant to work together."""
from typing import TypedDict
from . import SensorBase
class TypedTimeState(TypedDict):
"""Time state type definition."""
lastset: str
class TypedTime(TypedDict):
"""Time type definition."""
state: TypedTimeState
class Time(SensorBase):
"""Time sensor."""
raw: TypedTime
@property
def last_set(self) -> str:
"""Last time time was set."""
return self.raw["state"]["lastset"]
| true
|
bf9fcbc3cdd86f0c8a2d62bfc2d863bc9a070cd7
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2354/49823/300147.py
|
UTF-8
| 90
| 2.953125
| 3
|
[] |
no_license
|
import random
n=int(random.random()*2)
if n==0:
print('1')
elif n==1:
print('20')
| true
|
a770074c29078a4dd3a1cba0aabc8a086864f976
|
Python
|
jacobwojcik/codewars-algorithms
|
/python/uniqueInOrder.py
|
UTF-8
| 437
| 4.03125
| 4
|
[] |
no_license
|
# Implement the function unique_in_order which takes as argument a sequence
# and returns a list of items without any elements with the same value
# next to each other and preserving the original order of elements.
def unique_in_order(iterable):
if not iterable:
return []
else:
result=[iterable[0]]
for i in iterable:
if i!= result[-1]:
result.append(i)
return result
| true
|
44047097744141a31f352e60ecb22531f862e589
|
Python
|
kaismithereens/realpython
|
/pages 0-100/exercises_pg43.py
|
UTF-8
| 310
| 3.640625
| 4
|
[] |
no_license
|
my_string1 = "AAA"
print(my_string1.find("a"))
my_string2 = "version 2.0"
number = 2.0
my_string3 = my_string2.find(str(number))
print(my_string3)
user_input = input("How are you feeling today? ")
print(user_input.find("a"))
print(user_input.find("e"))
print(user_input.find("i"))
print(user_input.find("o"))
| true
|
ae6d229afa4d7c429f76c827b0e7513c1099efed
|
Python
|
juanjoqmelian/python-silver-bars
|
/silver-bars/silver_bars_live_orders_board.py
|
UTF-8
| 2,453
| 3.171875
| 3
|
[] |
no_license
|
import uuid
from functools import reduce
from itertools import groupby
from order import Order
from order_type import OrderType
from summary_info import SummaryInfo
class SilverBarsLiveOrdersBoard:
def __init__(self) -> None:
self.orders = []
def register(self, order: Order) -> str:
"""Registers a valid order in the live orders board, returning the id assigned to the new order"""
if not order.is_valid():
raise ValueError('Cannot register an invalid order!')
order.id = uuid.uuid1()
self.orders.append(order)
return order.id
def summary(self) -> SummaryInfo:
"""Shows a summary list for all the existing orders in the board"""
orders_by_type = self.__sort_by_type()
final_orders = self.__group_by_type_and_price(orders_by_type)
summaries = self.__generate_summary(final_orders)
return SummaryInfo(*summaries)
def cancel(self, order_id: str) -> None:
"""Cancels an existing order in the board. Raises exception if order does not exist."""
self.orders = [order for order in self.orders if order.id != order_id]
def __sort_by_type(self):
orders_by_type = dict()
for order_type, items in groupby(self.orders, key=lambda o: o.type):
orders_by_type[order_type] = list(items)
orders_by_type[order_type].sort(key=lambda o: o.price) if order_type == OrderType.SELL else orders_by_type[order_type].sort(key=lambda o: o.price, reverse=True)
return orders_by_type
def __group_by_type_and_price(self, orders_by_type):
final_orders = dict()
for order_type in orders_by_type.keys():
final_orders[order_type] = dict()
for price, items_by_price in groupby(list(orders_by_type[order_type]), key=lambda o: o.price):
final_orders[order_type][price] = reduce(lambda left, right:
Order(right.user_id, left.quantity + right.quantity,
right.price, right.type, right.id),
list(items_by_price)
)
return final_orders
def __generate_summary(self, final_orders):
return [order.summary() for orders_by_price in final_orders.values() for order in orders_by_price.values()]
| true
|
a41772741683a584406d00d5b82339d7a61309ed
|
Python
|
ghilbing/Ejemplos
|
/isUnique.py
|
UTF-8
| 99
| 3.296875
| 3
|
[] |
no_license
|
def isUnique(a):
return len(set(a)) == len(a)
a = ["A", "B", "C", "D", "A"]
print isUnique(a)
| true
|
23c23c1105a6076d714a44159e13db730b680733
|
Python
|
devjoag/PyAssignments
|
/Assignment_2/Assignment2_10.py
|
UTF-8
| 236
| 3.75
| 4
|
[] |
no_license
|
def main():
no=int(input("Enter Number: "))
st=list(str(no))
sum=0
for i in range (0,len(st)):
sum=sum+int(st[i])
print("Sum of Digits in {} are {}".format(no,sum))
if(__name__) == "__main__":
main()
| true
|
2a883e80a476386db01ce6af3f9372ed99019245
|
Python
|
serofly/The-rudiments-of-Machine-Learning
|
/07kmeans.py
|
UTF-8
| 2,825
| 2.859375
| 3
|
[] |
no_license
|
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score
import numpy as np
# 中文和负号的正常显示
plt.rcParams['font.sans-serif'] = 'Microsoft YaHei'
plt.rcParams['axes.unicode_minus'] = False
# 读取四张表数据
prior = pd.read_csv("./data/PCA/order_products__prior.csv") # product_id, order_id
products = pd.read_csv("./data/PCA/products.csv") # product_id,aisle_id
orders = pd.read_csv("./data/PCA/orders.csv") # order_id, user_id
aisle = pd.read_csv("./data/PCA/aisles.csv") # aisle_id, aisle
# 合并四张表,得到用户-物品类型
_mg = pd.merge(prior, products, left_on="product_id", right_on="product_id")
_mg = pd.merge(_mg, orders, left_on="order_id", right_on="order_id")
mt = pd.merge(_mg, aisle, left_on="aisle_id", right_on="aisle_id")
# 交叉表
cross = pd.crosstab(mt['user_id'], mt['aisle'])
# 主成分分析
pca = PCA(n_components=0.9)
data = pca.fit_transform(cross)
print(data.shape)
# 假设一共有4个类别的用户,维度不高可以不用主成分分析
data = data[:500]
km = KMeans(n_clusters=4)
km.fit(data)
predict = km.predict(data) # 每个数变成了0,1,2,3
# km.labels_获取聚类标签//km.cluster_centers_ #获取聚类中心
print("聚类分析的轮廓系数", silhouette_score(data, predict)) # -1到0为差,0-1为好,越靠近1越好
# 显示聚类结果散点图
plt.figure(figsize=(10, 10))
color = ["orange", "green", "blue", "red"]
col = [color[i] for i in predict]
plt.scatter(data[:, 1], data[:, 20], color=col)
plt.xlabel("1")
plt.ylabel("20")
plt.show()
# 图例带人数的雷达图
plt.figure(figsize=(10, 8))
plt.subplot(111, polar=True)
plt.style.use('ggplot')
# 统计频数
r1 = pd.Series(km.labels_).value_counts()
# 将n个簇类中心转换成DataFrame格式
r2 = pd.DataFrame(km.cluster_centers_).iloc[:, :6]
center = pd.concat([r2, r1], axis=1)
feature = [1, 2, 3, 4, 5, 6]
angles = np.linspace(0, 2 * np.pi, len(feature), endpoint=False)
angles = np.concatenate((angles, [angles[0]]))
for i, j in enumerate(center.values): # i 是行,即第几类人;j是列,表示feature+r1
# 为了使雷达图一圈封闭,需要以下步骤
values = np.concatenate((j[:-1], [j[0]]))
# 绘制折线图
plt.plot(angles, values, 'o-', linewidth=2, label='第%d类人群,%d人' % (i + 1, j[-1]))
# 填充颜色
plt.fill(angles, values, col[i], alpha=0.25)
# 添加每个特质的标签
plt.thetagrids(angles * 180 / np.pi, feature)
# 添加标题
plt.title('顾客分类状况')
# 设置图例
plt.legend(loc='upper right', bbox_to_anchor=(1.2, 1.0), ncol=1, fancybox=True, shadow=True)
# 显示图形
plt.show()
| true
|
1b72e7090866cb9997be8ac7b5a4556b9e8dad34
|
Python
|
jamtot/PyProjects
|
/The Coding Dead/game/hunter.py
|
UTF-8
| 564
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
from entity import Entity
class Hunter(Entity):
# is initialised with position from base class Entity
# and a map reference
def destroy_zombie(self):
# use position to check nearby tiles for zombies
# check in every direction
# kill up to 2 zombies
pass
def move(self, potential_moves):
x_moves, y_moves = super(Hunter,self).move(potential_moves)
def update(self):
# can move 1 square in any direction to find zombie to kill
# can slay up to two zombies after a move
pass
| true
|
a89a4bf447f2825e1afa5765055e6d7d12209de7
|
Python
|
must-11/ap_experiment
|
/src/chaos/euler_method.py
|
UTF-8
| 1,674
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
import matplotlib.pyplot as plt
import numpy as np
# パラメータ
g = 9.8
l_1 = 0.5
l_2 = 0.5
m_1 = 0.5
m_2 = 0.3
w = np.sqrt(g / l_1)
l = l_2 / l_1
M = m_2 / (m_1 + m_2)
h = 0.01
t = 100
n = np.int(t / h)
def f(Y):
d = Y[0] - Y[1]
d1 = np.square(Y[2])
d2 = np.square(Y[3])
w2 = np.square(w)
f_1 = (w2*l*(-np.sin(Y[0]) + M * np.cos(d) * np.sin(Y[1])) - M*l*(d1 * np.cos(d) + l * d2)*np.sin(d)) / (l - M*l*np.square(np.cos(d)))
f_2 = (w2*np.cos(d)*np.sin(Y[0]) - w2*np.sin(Y[1]) + (d1 + M*l*d2*np.cos(d))*np.sin(d)) / (l - M*l*np.square(np.cos(d)))
return np.array([Y[2], Y[3], f_1, f_2])
def exp_euler(Y, h):
return Y + h * f(Y)
def H(Y):
d = Y[0] - Y[1]
d1 = np.square(Y[2])
d2 = np.square(Y[3])
T = m_1*np.square(l_1)*d1/2 + (np.square(l_1)*d1 + np.square(l_2)*d2 + 2*l_1*l_2*Y[2]*Y[3]*np.cos(d))*m_2/2
U = -m_1*l_1*g*np.cos(Y[0]) - m_2*g*(l_1*np.cos(Y[0]) + l_2*np.cos(Y[1]))
return T + U
def main():
Y = np.array([0.1, 0, 0, 0])
theta = [[0.1], [0]]
ham = [H(Y)]
for i in range(n):
Y = exp_euler(Y, h)
theta[0].append(Y[0])
theta[1].append(Y[1])
ham.append(H(Y))
x = [i*0.01 for i in range(len(theta[0]))]
fig = plt.figure(figsize=(12, 8))
plt.plot(x, theta[0], label="theta1")
plt.plot(x, theta[1], label="theta2")
plt.xlabel('time', fontsize=14)
plt.legend(loc='upper right')
fig.savefig("img01.png")
fig = plt.figure(figsize=(12, 8))
plt.plot(x, ham, label="Hamiltonian")
plt.xlabel('time', fontsize=14)
plt.legend(loc='upper right')
fig.savefig("img02.png")
if __name__ == "__main__":
main()
| true
|
556e8bda5150838a5b56a7b5dee2bea105490d8d
|
Python
|
anpark/transform
|
/tensorflow_transform/tf_metadata/dataset_schema.py
|
UTF-8
| 15,548
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""In-memory representation of the schema of a dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import tensorflow as tf
class Schema(collections.namedtuple('Schema', ['column_schemas'])):
"""The schema of a dataset.
This is an in-memory representation that may be serialized and deserialized to
and from a variety of disk representations.
Args:
column_schemas: A dict from logical column names to `ColumnSchema`s.
"""
def __new__(cls, column_schemas=None):
if not column_schemas:
column_schemas = {}
return super(Schema, cls).__new__(cls, column_schemas)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._asdict() == other._asdict()
return NotImplemented
def __ne__(self, other):
return not self == other
def merge(self, other):
# possible argument: resolution strategy (error or pick first and warn?)
for key, value in other.column_schemas.items():
if key in self.column_schemas:
self.column_schemas[key].merge(value)
else:
self.column_schemas[key] = value
def as_feature_spec(self):
"""Returns a representation of this Schema as a feature spec.
A feature spec (for a whole dataset) is a dictionary from logical feature
names to one of `FixedLenFeature`, `SparseFeature` or `VarLenFeature`.
Returns:
A representation of this Schema as a feature spec.
"""
return {key: column_schema.as_feature_spec()
for key, column_schema in self.column_schemas.items()}
def as_batched_placeholders(self):
"""Returns a representation of this Schema as placeholder Tensors.
Returns:
A representation of this Schema as placeholder Tensors.
"""
return {key: column_schema.as_batched_placeholder()
for key, column_schema in self.column_schemas.items()}
class ColumnSchema(collections.namedtuple(
'ColumnSchema', ['logical_column', 'representation'])):
"""The schema for a single column in a dataset.
The schema contains two parts: the logical description of the column, which
describes the nature of the actual data in the column (particularly this
determines how this will ultimately be represented as a tensor) and the
physical representation of the column, i.e. how the column's data is
represented in memory or on disk.
Fields:
logical_column: A `LogicalColumnSchema` that describes the data of the
column.
representation: A `ColumnRepresentation` that describes how the data is
represented.
"""
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._asdict() == other._asdict()
return NotImplemented
def __ne__(self, other):
return not self == other
def as_feature_spec(self):
"""Returns a representation of this ColumnSchema as a feature spec.
A feature spec (for a specific column) is one of a FixedLenFeature,
SparseFeature or VarLenFeature.
Returns:
A representation of this ColumnSchema as a feature spec.
"""
return self.representation.as_feature_spec(self.logical_column)
def as_batched_placeholder(self):
"""Returns a representation of this ColumnSchema as a placeholder Tensor.
Returns:
A representation of this ColumnSchema as a placeholder Tensor.
"""
return self.representation.as_batched_placeholder(self.logical_column)
def merge(self, other):
raise NotImplementedError('Merge not implemented yet.')
class LogicalColumnSchema(collections.namedtuple(
'LogicalColumnSchema', ['domain', 'shape'])):
"""A description of the kind of data contained within a single column.
Args:
domain: a Domain object, providing the dtype and possibly other constraints.
shape: a LogicalShape object describing the intrinsic shape of the data,
irrespective of its representation as dense or sparse.
"""
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._asdict() == other._asdict()
return NotImplemented
def __ne__(self, other):
return not self == other
class Domain(object):
"""A description of the valid values that a column can take."""
__metaclass__ = abc.ABCMeta
def __init__(self, dtype):
self._dtype = dtype
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
return not self == other
@property
def dtype(self):
return self._dtype
# Serialize the tf.dtype as a string so that it can be unpickled on DataFlow.
def __getstate__(self):
return self._dtype.name
def __setstate__(self, state):
self._dtype = tf.as_dtype(state)
class FloatDomain(Domain):
pass
class IntDomain(Domain):
pass
class StringDomain(Domain):
pass
class BoolDomain(Domain):
pass
def dtype_to_domain(dtype):
if dtype.is_integer:
return IntDomain(dtype)
if dtype.is_floating:
return FloatDomain(dtype)
if dtype == tf.string:
return StringDomain(dtype)
if dtype == tf.bool:
return BoolDomain(dtype)
raise ValueError('Schema cannot accommodate dtype: {}'.format(dtype))
class LogicalShape(collections.namedtuple('LogicalShape', ['axes'])):
"""The logical shape of a column, including axes, sequence nature, etc."""
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._asdict() == other._asdict()
return NotImplemented
def __ne__(self, other):
return not self == other
def tf_shape(self):
"""Represent this shape as a `TensorShape`."""
if self.axes is None:
return tf.TensorShape(None)
return tf.TensorShape([axis.size for axis in self.axes])
def is_fixed_size(self):
if self.axes is None:
return False
for axis in self.axes:
if axis.size is None:
return False
return True
class Axis(collections.namedtuple('Axis', ['size'])):
"""An axis representing one dimension of the shape of a column.
Elements are:
size: integer. The length of the axis. None = unknown.
"""
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._asdict() == other._asdict()
return NotImplemented
def __ne__(self, other):
return not self == other
class ColumnRepresentation(object):
"""A description of the representation of a column in memory or on disk."""
__metaclass__ = abc.ABCMeta
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
return not self == other
@abc.abstractmethod
def as_feature_spec(self, logical_column):
"""Returns the representation of this column as a feature spec.
Args:
logical_column: The logical column to be represented.
"""
raise NotImplementedError()
@abc.abstractmethod
def as_batched_placeholder(self, logical_column):
"""Returns the representation of this column as a placeholder Tensor.
Args:
logical_column: The logical column to be represented.
"""
raise NotImplementedError()
# note we don't provide tf.FixedLenSequenceFeature yet, because that is
# only used to parse tf.SequenceExample.
class FixedColumnRepresentation(ColumnRepresentation):
"""Represent the column using a fixed size."""
def __init__(self, default_value=None):
super(FixedColumnRepresentation, self).__init__()
self._default_value = default_value
@property
def default_value(self):
"""Default value may be None, but then missing data produces an error."""
return self._default_value
def as_feature_spec(self, logical_column):
if logical_column.shape is None or not logical_column.shape.is_fixed_size():
raise ValueError('A column of unknown size cannot be represented as '
'fixed-size.')
return tf.FixedLenFeature(logical_column.shape.tf_shape().as_list(),
logical_column.domain.dtype,
self.default_value)
def as_batched_placeholder(self, logical_column):
if logical_column.shape is None or not logical_column.shape.is_fixed_size():
raise ValueError('A column of unknown size cannot be represented as '
'fixed-size.')
return tf.placeholder(logical_column.domain.dtype,
[None] + logical_column.shape.tf_shape().as_list())
class ListColumnRepresentation(ColumnRepresentation):
"""Represent the column using a variable size."""
def __init__(self):
super(ListColumnRepresentation, self).__init__()
def as_feature_spec(self, logical_column):
return tf.VarLenFeature(logical_column.domain.dtype)
def as_batched_placeholder(self, logical_column):
return tf.sparse_placeholder(
logical_column.domain.dtype,
[None] + logical_column.shape.tf_shape().as_list())
class SparseColumnRepresentation(ColumnRepresentation):
"""Sparse physical representation of a logically fixed-size column."""
def __init__(self, value_field_name, index_fields):
super(SparseColumnRepresentation, self).__init__()
self._value_field_name = value_field_name
self._index_fields = index_fields
@property
def value_field_name(self):
return self._value_field_name
@property
def index_fields(self):
# SparseIndexes
return self._index_fields
def as_feature_spec(self, logical_column):
ind = self.index_fields
if len(ind) != 1 or len(logical_column.shape.axes) != 1:
raise ValueError('tf.Example parser supports only 1-d sparse features.')
index = ind[0]
return tf.SparseFeature(index.name,
self._value_field_name,
logical_column.domain.dtype,
logical_column.shape.axes[0].size,
index.is_sorted)
def as_batched_placeholder(self, logical_column):
return tf.sparse_placeholder(
logical_column.domain.dtype,
[None] + logical_column.shape.tf_shape().as_list())
class SparseIndexField(collections.namedtuple('SparseIndexField',
['name', 'is_sorted'])):
pass
def from_feature_spec(feature_spec):
"""Convert a feature_spec to a Schema.
Args:
feature_spec: a features specification in the format expected by
tf.parse_example(), i.e.
`{name: FixedLenFeature(...), name: VarLenFeature(...), ...'
Returns:
A Schema representing the provided set of columns.
"""
return Schema({
key: _from_parse_feature(parse_feature)
for key, parse_feature in feature_spec.items()
})
def _from_parse_feature(parse_feature):
"""Convert a single feature spec to a ColumnSchema."""
# FixedLenFeature
if isinstance(parse_feature, tf.FixedLenFeature):
logical = LogicalColumnSchema(
domain=_dtype_to_domain(parse_feature.dtype),
shape=_tf_shape_to_logical_shape(
parse_feature.shape))
representation = FixedColumnRepresentation(parse_feature.default_value)
return ColumnSchema(logical, representation)
# FixedLenSequenceFeature
if isinstance(parse_feature, tf.FixedLenSequenceFeature):
raise ValueError('DatasetSchema does not support '
'FixedLenSequenceFeature yet.')
# VarLenFeature
if isinstance(parse_feature, tf.VarLenFeature):
var_len_shape = LogicalShape(axes=[Axis(None)])
logical = LogicalColumnSchema(
domain=_dtype_to_domain(parse_feature.dtype),
shape=var_len_shape)
representation = ListColumnRepresentation()
return ColumnSchema(logical, representation)
# SparseFeature
if isinstance(parse_feature, tf.SparseFeature):
sparse_shape = LogicalShape(
axes=[Axis(parse_feature.size)])
logical = LogicalColumnSchema(
domain=_dtype_to_domain(parse_feature.dtype),
shape=sparse_shape)
index_field = SparseIndexField(name=parse_feature.index_key,
is_sorted=parse_feature.already_sorted)
representation = SparseColumnRepresentation(
value_field_name=parse_feature.value_key,
index_fields=[index_field])
return ColumnSchema(logical, representation)
raise ValueError('Cannot interpret feature spec: {}'.format(parse_feature))
def infer_column_schema_from_tensor(tensor):
"""Infer a ColumnSchema from a tensor."""
if isinstance(tensor, tf.SparseTensor):
# For SparseTensor, there's insufficient information to distinguish between
# ListColumnRepresentation and SparseColumnRepresentation. So we just guess
# the former, and callers are expected to handle the latter case on their
# own (e.g. by requiring the user to provide the schema). This is a policy
# motivated by the prevalence of VarLenFeature in current tf.Learn code.
var_len_shape = LogicalShape(axes=[Axis(None)])
logical = LogicalColumnSchema(
domain=_dtype_to_domain(tensor.dtype),
shape=var_len_shape)
representation = ListColumnRepresentation()
else:
logical = LogicalColumnSchema(
domain=_dtype_to_domain(tensor.dtype),
shape=_tf_shape_to_logical_shape(
tensor.get_shape(), remove_batch_dimension=True))
representation = FixedColumnRepresentation()
return ColumnSchema(logical, representation)
_BOOL_TYPES = [tf.bool]
_INT_TYPES = [tf.int8, tf.uint8, tf.uint16, tf.int16, tf.int32, tf.int64]
_FLOAT_TYPES = [tf.float16, tf.float32, tf.float64]
_STRING_TYPES = [tf.string]
def _dtype_to_domain(dtype):
"""Create an appropriate Domain for the given dtype."""
if dtype in _BOOL_TYPES:
return BoolDomain(dtype=dtype)
if dtype in _INT_TYPES:
return IntDomain(dtype=dtype)
if dtype in _STRING_TYPES:
return StringDomain(dtype=dtype)
if dtype in _FLOAT_TYPES:
return FloatDomain(dtype=dtype)
raise ValueError('DatasetSchema does not yet support dtype: {}'.format(dtype))
def _tf_shape_to_logical_shape(tf_shape, remove_batch_dimension=False):
"""Create a `LogicalShape` for the given shape.
Args:
tf_shape: A `TensorShape` or anything that can be converted to a
`TensorShape`.
remove_batch_dimension: A boolean indicating whether to remove the 0th
dimension.
Returns:
A `LogicalShape` representing the given shape.
Raises:
ValueError: If `remove_batch_dimension` is True and the given shape does not
have rank >= 1.
"""
if not isinstance(tf_shape, tf.TensorShape):
tf_shape = tf.TensorShape(tf_shape)
if tf_shape.dims is None:
axes = None
else:
axes = [Axis(axis_size) for axis_size in tf_shape.as_list()]
if remove_batch_dimension:
if len(axes) < 1:
raise ValueError('Expected tf_shape to have rank >= 1')
axes = axes[1:]
return LogicalShape(axes)
| true
|
cca07c7ea8eb9aedf89dd8e24aa7f3ae92d488cf
|
Python
|
mamadyonline/data_engineering
|
/data_engineering/database/players.py
|
UTF-8
| 1,496
| 3.5
| 4
|
[] |
no_license
|
"""Create players class and define its characteristics"""
from data_engineering.database.base import Base
from sqlalchemy import String, Integer, Column, Boolean
class Player(Base):
__tablename__ = 'players'
id = Column(Integer, primary_key=True)
name = Column('name', String)
nationality = Column('nationality',String)
club = Column('club', String) # current if still active, last club if retired
total_scored_goals = Column('total_scored_goals', Integer)
total_personal_trophies = Column('total_personal_goals', Integer)
active = Column('active', Boolean)
def __init__(self, name, nationality, club,
total_scored_goals, total_personal_trophies, active):
"""Initialize without the id component as it is taken care of automatically."""
self.name = name
self.nationality = nationality
self.club = club
self.total_scored_goals = total_scored_goals
self.total_personal_trophies = total_personal_trophies
self.active = active
def __repr__(self):
"""Prettify when we print a player"""
if self.active:
to_print = f"<Player({self.name} from {self.nationality}, " \
f"currently playing at {self.club} with {self.total_scored_goals} goals)>"
else:
to_print = f"<Player({self.name} from {self.nationality}, " \
f"retired while at {self.club}) with {self.total_scored_goals} goals>"
return to_print
| true
|
6248db0ba7d34e0059177f3f99279065529d107a
|
Python
|
gowthamanniit/python
|
/for-loop.py
|
UTF-8
| 50
| 3.03125
| 3
|
[] |
no_license
|
num=2
for a in range(1,10):
print(num*a)
| true
|
629d477314231a4509413a9fb3131668db7bb5f5
|
Python
|
SLKyrim/vscode-leetcode
|
/0213.打家劫舍-ii.py
|
UTF-8
| 2,237
| 3.390625
| 3
|
[] |
no_license
|
#
# @lc app=leetcode.cn id=213 lang=python3
#
# [213] 打家劫舍 II
#
# https://leetcode-cn.com/problems/house-robber-ii/description/
#
# algorithms
# Medium (35.58%)
# Likes: 234
# Dislikes: 0
# Total Accepted: 28.9K
# Total Submissions: 77.5K
# Testcase Example: '[2,3,2]'
#
#
# 你是一个专业的小偷,计划偷窃沿街的房屋,每间房内都藏有一定的现金。这个地方所有的房屋都围成一圈,这意味着第一个房屋和最后一个房屋是紧挨着的。同时,相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警。
#
# 给定一个代表每个房屋存放金额的非负整数数组,计算你在不触动警报装置的情况下,能够偷窃到的最高金额。
#
# 示例 1:
#
# 输入: [2,3,2]
# 输出: 3
# 解释: 你不能先偷窃 1 号房屋(金额 = 2),然后偷窃 3 号房屋(金额 = 2), 因为他们是相邻的。
#
#
# 示例 2:
#
# 输入: [1,2,3,1]
# 输出: 4
# 解释: 你可以先偷窃 1 号房屋(金额 = 1),然后偷窃 3 号房屋(金额 = 3)。
# 偷窃到的最高金额 = 1 + 3 = 4 。
#
#
# @lc code=start
class Solution:
def rob(self, nums: List[int]) -> int:
# 思路:环形,则第一间和最后一间不可同时被偷
# 分别对不包含第一间和不包含最后一间的房屋进行动态规划偷窃,最后取两者较大者
# 这两间房都不偷的可能性已包含在以上两种情况中,故可不用重复计算
if not nums:
return 0
if len(nums) == 1:
return nums[0]
if len(nums) == 2:
return max(nums[0],nums[1])
dp1 = [0] * (len(nums) - 1) # 不包含第一间
dp2 = list()
dp2[:] = dp1 # 不包含最后一间
dp1[0] = nums[1]
dp1[1] = max(nums[1], nums[2])
for i in range(2, len(nums) - 1):
dp1[i] = max(dp1[i-2]+nums[i+1], dp1[i-1])
dp2[0] = nums[0]
dp2[1] = max(nums[0], nums[1])
for i in range(2, len(nums) - 1):
dp2[i] = max(dp2[i-2]+nums[i], dp2[i-1])
return max(dp1[-1], dp2[-1])
# @lc code=end
| true
|
08ac316233c0b6b34dcff7ae505829a4da2dd86e
|
Python
|
eyalMDM/MDMthesis
|
/eaTweetStreamer.py
|
UTF-8
| 5,377
| 3
| 3
|
[] |
no_license
|
'''
eaStreamTwitter
by Eyal Assaf
v0.1
Stream tweets based on hashtags and compiles them into a CSV file, to be sent to UE4
'''
# -*- coding: cp1252 -*-
# _*_ coding:utf-8 _*_
import sys
import io
# Import the necessary methods from tweepy library
import tweepy
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import time
import json
import csv
# Variables that contains the user credentials to access Twitter API
access_token = "3741374896-nHOo1GxSDXzKOwrGf3zTXuIQ5azENs5RfKQxz2y"
access_token_secret = "RiuAq559cr4DDHbt2NxdZVr1AbI7J37wdMa9IQGus8BRJ"
consumer_key = "HNDGwz1zOHXKjWhUovAkfHzpd"
consumer_secret = "7D2XrKIyWt6uNMB8f2XZgRDMWb2IZlE0l37475nsNvMdQqy8ki"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# prevents unicode errors - I hope
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
# sys.stderr = codecs.getwriter('utf8')(sys.stderr.buffer, 'strict')
# confirm login into Twitter api
print("Successfully logged in as " + api.me().name + ".")
# tell computer where to put CSV
outfile_path='D:/GITHUB/MDMthesis/test.csv'
# open it up, the w means we will write to it
writer = csv.writer(open(outfile_path, 'w'))
#create a list with headings for our columns
headers = ['user', 'tweet_text']
#write the row of headings to our CSV file
writer.writerow(headers)
# This is a basic listener that just prints received tweets to stdout.
class eaTweetStreamer(StreamListener):
'''
def __init__(self):
self.startStream()
'''
def twitterStart(self):
# variable to get user input. Send to get_input() method
self.get_input(["s", "p", "q"])
def get_input(self,userChoice):
choice=""
while choice not in userChoice:
print("Twitter client menu - press 's' to search, 'p' to post and 'q' to quit.")
choice=input("-->")
if choice=="s":
self.searchHt() #go to search tweet method
elif choice=="p":
self.tweetMsg() # go to tweet message method
elif choice=="q":
print ("goodbye!")
return choice
def tweetMsg(self):
print("tweeting message")
self.twitterStart()
def searchHt(self):
numTweets= input("Number of tweets to iterate (minimum 1):")
if len(numTweets)>=1 and numTweets.isdigit():
search = input("Search for a hashtag:") # get user input
getHt=api.search(q="#{0}".format(search), count=numTweets)
#self.startStream(search)
'''
for index,tweet in enumerate(getHt):
print(index,tweet.text)
'''
# Only iterate through the first 200 statuses
for index,tweet in enumerate(tweepy.Cursor(api.search,q=search).items(int(numTweets),)):
print(index,tweet.text)
print("location: {0}".format(tweet.user.location))
print()
print("created: ",tweet.user.created_at)
print("Time zone: ", tweet.user.time_zone)
print("Place: ", tweet.place)
print("============================")
print("RAW DATA:")
tUser=tweet.user
print(tUser)
row = []
row.append(tweet.user.screen_name)
row.append(tweet.created_at)
row.append(tweet.text.encode('utf-8'))
#once you have all the cells in there, write the row to your csv
writer.writerow(row)
self.on_status(tweet)
#self.on_data(tweet)
print("============================")
self.twitterStart()
else:
print("Enter at least 1 for tweets to search!")
self.searchHt()
'''
def on_status(self, status):
if status.coordinates:
print('coords:', status.coordinates)
if status.place:
print('place:', status.place.full_name)
return True
on_event = on_status
def on_error(self, status):
print("ERROR:" ,status)
def on_data(self, data):
print(data)
parsed_json=json.loads(data)
return True
def startStream(self,search):
#This handles Twitter authetification and the connection to Twitter Streaming API
l = eaTweetStreamer()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
stream.filter(track=[search])
return True
IS THIS EVEN NEEDED?
def on_data(self, data):
print(data)
return True
def on_error(self, status):
print(status)
def startStream(self):
#This handles Twitter authetification and the connection to Twitter Streaming API
l = eaTweetStreamer()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
stream.filter(track=['zika'])
'''
if __name__ == '__main__':
eaTweetStreamer().twitterStart()
| true
|
2cb391d72c8980fbc316634b5dc2bd0cbe4d5d87
|
Python
|
Max-E/CS361-Reading-App
|
/UI/UI.py
|
UTF-8
| 12,711
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
import wx, wx.lib.scrolledpanel, wx.lib.newevent
import threading
#
# Library Screen
#
COLUMN_WIDTH = 150
ROW_HEIGHT = COLUMN_WIDTH*0.75
CELL_PAD = 5
class BookThumbnail (wx.BitmapButton):
def __init__ (self, parent, enqueue_callback, bookcover):
self.enqueue_callback = enqueue_callback
img = wx.Image (bookcover)
img.Rescale (COLUMN_WIDTH, ROW_HEIGHT)
super(BookThumbnail, self).__init__(parent, wx.ID_ANY, img.ConvertToBitmap(), size = (COLUMN_WIDTH, ROW_HEIGHT))
self.Bind (wx.EVT_BUTTON, self.onClick, self)
def onClick (self, evt):
self.enqueue_callback ()
def width_to_numcolumns (width):
ret = (width-wx.SystemSettings.GetMetric (wx.SYS_VSCROLL_X))/(COLUMN_WIDTH+2*CELL_PAD)
return max (ret, 0)
def numcolumns_to_width (numcolumns):
return numcolumns*(COLUMN_WIDTH+2*CELL_PAD)+wx.SystemSettings.GetMetric (wx.SYS_VSCROLL_X)
def height_to_numrows (height):
return height/(ROW_HEIGHT+2*CELL_PAD)
def numrows_to_height (numrows):
return numrows*(ROW_HEIGHT+2*CELL_PAD)
class LibraryScreen (wx.lib.scrolledpanel.ScrolledPanel):
def __init__ (self, parent):
super(LibraryScreen, self).__init__ (parent)
self.parent = parent
self.sizer = wx.GridSizer (vgap = CELL_PAD, hgap = CELL_PAD)
self.SetSizer (self.sizer)
self.Bind (wx.EVT_SIZE, self.onResize)
def setSize (self):
(w, h) = self.GetSizeTuple ()
self.sizer.SetCols (width_to_numcolumns(w))
self.sizer.CalcRowsCols ()
self.SetupScrolling(scroll_x = False, scrollToTop = False)
self.sizer.Layout ()
def onResize (self, evt):
self.setSize()
evt.Skip ()
def Show (self):
self.parent.SetSize ((numcolumns_to_width (4), numrows_to_height (4)))
self.parent.SetMinSize ((numcolumns_to_width (2), numrows_to_height (2)))
self.setSize()
self.sizer.Layout ()
super(LibraryScreen, self).Show()
def add_book (self, enqueue_callback, bookcover):
thumbnail = BookThumbnail (self, enqueue_callback, bookcover)
self.sizer.Add (thumbnail, 1, wx.FIXED_MINSIZE)
self.setSize()
#
# Book Page Screen
#
ICON_WIDTH = 100
ICON_HEIGHT = 75
class PageButton (wx.BitmapButton):
def __init__ (self, parent, iconpath):
img = wx.Image (iconpath)
img.Rescale (ICON_WIDTH, ICON_HEIGHT)
super(PageButton, self).__init__(parent, wx.ID_ANY, img.ConvertToBitmap())
class BookPageScreen (wx.Panel):
def __init__ (self, parent, quitcallback):
super(BookPageScreen, self).__init__ (parent)
self.parent = parent
self.min_size = (700, 600)
self.illustration_path = "test_bookpage.png"
self.sizer = wx.BoxSizer (wx.VERTICAL)
self.SetSizer (self.sizer)
self.onQuit = lambda evt: quitcallback ()
self.topbar_panel = wx.Panel (self)
self.topbar_sizer = wx.BoxSizer (wx.HORIZONTAL)
self.topbar_panel.SetSizer (self.topbar_sizer)
self.page_title = wx.StaticText (self.topbar_panel, wx.ID_ANY, "insert title here, if any")
self.topbar_sizer.AddStretchSpacer ()
self.topbar_sizer.Add (self.page_title, proportion = 1, flag = wx.ALIGN_CENTER_HORIZONTAL | wx.EXPAND)
self.quit_button = PageButton (self.topbar_panel, "test_bookquit.png")
self.quit_button.Bind (wx.EVT_BUTTON, self.onQuit)
self.topbar_sizer.Add (self.quit_button, proportion = 0, flag = wx.ALIGN_RIGHT)
self.sizer.Add (self.topbar_panel, proportion = 0, flag = wx.ALIGN_TOP | wx.EXPAND)
self.main_panel = wx.Panel (self)
self.main_sizer = wx.BoxSizer (wx.HORIZONTAL)
self.main_panel.SetSizer (self.main_sizer)
self.back_button = PageButton (self.main_panel, "test_bookprev.png")
self.main_sizer.Add (self.back_button, proportion = 0, flag = wx.ALIGN_LEFT | wx.ALIGN_CENTER_VERTICAL)
self.illustration = wx.StaticBitmap (self.main_panel, wx.ID_ANY)
self.main_sizer.Add (self.illustration, proportion = 1, flag = wx.EXPAND)
self.forward_button = PageButton (self.main_panel, "test_booknext.png")
self.main_sizer.Add (self.forward_button, proportion = 0, flag = wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
self.main_sizer.InsertSpacer (1, (1,1))
self.sizer.Add (self.main_panel, proportion = 1, flag = wx.EXPAND)
self.text_panel = wx.Panel (self)
self.text_sizer = wx.BoxSizer (wx.HORIZONTAL)
self.text_panel.SetSizer (self.text_sizer)
self.sizer.Add (self.text_panel, proportion = 0, flag = wx.ALIGN_BOTTOM | wx.EXPAND)
self.bottombar_panel = wx.Panel (self)
self.bottombar_sizer = wx.BoxSizer (wx.HORIZONTAL)
self.bottombar_panel.SetSizer (self.bottombar_sizer)
self.speak_button = PageButton (self.bottombar_panel, "test_speakicon.png")
self.bottombar_sizer.Add (self.speak_button, proportion = 0, flag = wx.ALIGN_LEFT)
self.sizer.Add (self.bottombar_panel, proportion = 0, flag = wx.ALIGN_BOTTOM | wx.EXPAND)
self.Bind (wx.EVT_SIZE, self.onResize)
self.clear ()
def setBitmap (self):
(w, h) = self.main_sizer.GetSizeTuple ()
w = max (w, self.min_size[0])
img = wx.Image (self.illustration_path)
old_w = img.GetWidth ()
old_h = img.GetHeight ()
if self.forward_button.IsShown ():
w -= ICON_WIDTH
if self.back_button.IsShown ():
w -= ICON_WIDTH
new_w = w
new_h = (new_w*old_h)/old_w
if new_h > h and h > 0:
new_h = h
new_w = (new_h*old_w)/old_h
img.Rescale (new_w, new_h)
self.main_sizer.Remove (1)
self.main_sizer.InsertSpacer (1, ((w-new_w)/2, 1))
self.illustration.SetBitmap (img.ConvertToBitmap())
def centerText (self):
(avail_w, avail_h) = self.text_sizer.GetSizeTuple()
self.text_sizer.Remove (0)
self.text_sizer.PrependSpacer (((avail_w-self.text_total_width)/2, 1))
def setSize (self):
self.setBitmap ()
self.centerText ()
self.Layout ()
def onResize (self, evt):
self.setSize ()
evt.Skip ()
def Show (self):
self.parent.SetMinSize (self.min_size)
self.setSize()
super(BookPageScreen, self).Show()
def clear (self):
self.text_sizer.Clear (True)
self.text_total_width = 0
self.text_sizer.Add ((1,1))
self.back_button.Hide ()
self.forward_button.Hide ()
self.Layout ()
def add_word (self, word_text, word_color):
text = wx.StaticText (self.text_panel, wx.ID_ANY, word_text)
text.SetForegroundColour (word_color)
text.SetFont (wx.Font (20, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
(w, h) = text.GetSize ()
self.text_total_width += w
self.text_sizer.Add (text, 0, wx.ALIGN_LEFT)
def set_back (self, enqueue_callback):
self.back_button.Bind (wx.EVT_BUTTON, lambda evt: enqueue_callback())
self.back_button.Show ()
self.Layout ()
def set_forward (self, enqueue_callback):
self.forward_button.Bind (wx.EVT_BUTTON, lambda evt: enqueue_callback())
self.forward_button.Show ()
self.Layout ()
def set_illustration_path (self, illustration_path):
self.illustration_path = illustration_path
self.setSize ()
#
# Main Window
#
class MainWindow (wx.Frame):
def __init__(self, title):
super(MainWindow, self).__init__(None, title = title)
self.sizer = wx.BoxSizer (wx.VERTICAL)
self.SetSizer (self.sizer)
self.allscreens = []
def AddScreen (self, screen):
self.sizer.Add (screen, 1, wx.EXPAND)
screen.Hide ()
self.allscreens += [screen]
def SwitchToScreen (self, screen):
for otherscreen in self.allscreens:
otherscreen.Hide ()
screen.Show ()
self.Layout ()
self.Show ()
#
# UI Interface
#
class UI:
def __init__ (self):
self.callback_queue_lock = threading.Lock ()
self.callback_queue = []
self.init_lock = threading.Lock ()
self.init_lock.acquire ()
self.gui_thread = threading.Thread (target = self.thread_toplevel)
self.gui_thread.daemon = True
self.gui_thread.start ()
self.init_lock.acquire ()
# Called from the same thread as the rest of the program. Used to run
# an arbitrary method in the GUI thread.
def run_method_in_thread (self, callback):
evt = self.RunMethodEvent (callback = callback)
#PostEvent is explicitly supposed to be thread-safe.
wx.PostEvent (self.window, evt)
# runs in the GUI thread
def onRunMethod (self, evt):
evt.callback ()
# GUI thread main loop
def thread_toplevel (self):
self.app = wx.App()
self.RunMethodEvent, self.EVT_RUN_METHOD = wx.lib.newevent.NewEvent ()
self.window = MainWindow(title = 'Reading App')
self.libraryscreen = LibraryScreen (self.window)
self.window.AddScreen (self.libraryscreen)
self.bookpagescreen = BookPageScreen (self.window, self.make_add_callback_callback (self.display_library))
self.window.AddScreen (self.bookpagescreen)
self.window.Bind (self.EVT_RUN_METHOD, self.onRunMethod)
self.window.Bind (wx.EVT_CLOSE, self.onClose)
self.init_lock.release ()
self.app.MainLoop ()
# runs in either thread, makes callbacks which will run in GUI thread
def make_add_callback_callback (self, callback):
def enqueue_callback ():
with self.callback_queue_lock:
self.callback_queue += [callback]
return enqueue_callback
# runs in GUI thread
def onClose (self, evt):
with self.callback_queue_lock:
self.callback_queue += [lambda: exit (0)]
# Begin methods intended to be public:
def display_library (self):
self.run_method_in_thread (lambda: self.window.SwitchToScreen (self.libraryscreen))
def add_book_to_library (self, bookcallback, bookcover):
enqueue_callback = self.make_add_callback_callback (bookcallback)
self.run_method_in_thread (lambda: self.libraryscreen.add_book (enqueue_callback, bookcover))
def clear_bookpage (self):
self.run_method_in_thread (lambda: self.bookpagescreen.clear ())
def add_bookpage_word (self, word_text, word_color):
self.run_method_in_thread (lambda: self.bookpagescreen.add_word (word_text, word_color))
def set_bookpage_next (self, nextpage_callback):
enqueue_callback = self.make_add_callback_callback (nextpage_callback)
self.run_method_in_thread (lambda: self.bookpagescreen.set_forward (enqueue_callback))
def set_bookpage_prev (self, prevpage_callback):
enqueue_callback = self.make_add_callback_callback (prevpage_callback)
self.run_method_in_thread (lambda: self.bookpagescreen.set_back (enqueue_callback))
def set_bookpage_illustration_path (self, illustration_path):
self.run_method_in_thread (lambda: self.bookpagescreen.set_illustration_path (illustration_path))
def display_bookpage (self):
self.run_method_in_thread (lambda: self.window.SwitchToScreen (self.bookpagescreen))
def flush_callback_queue (self):
old_callback_queue = None
with self.callback_queue_lock:
old_callback_queue = self.callback_queue
self.callback_queue = []
for old_callback in old_callback_queue:
old_callback ()
| true
|
34e1f4728f7e32d4d1be8491d05a272f14343d3f
|
Python
|
proffillipesilva/aulasdelogicaprogramacao
|
/aula0705/matriz_fundamentos.py
|
UTF-8
| 961
| 4.28125
| 4
|
[
"MIT"
] |
permissive
|
from random import randint
def cria_matriz(num_linhas, num_colunas):
matriz = []
for i in range(num_linhas):
linha = []
for j in range(num_colunas):
linha.append(randint(0,10))
matriz.append(linha)
return matriz
def imprime_matriz(matriz):
for i in range(len(matriz)):
linha = matriz[i] # pega a linha correspondente
for j in range(len(linha)): # um for para o numero de elementos da linha (colunas)
print(matriz[i][j], end=' ') # da um espaco para cada coluna da linha
print() # pula uma linha quando acabar as colunas da linha
def imprime_uns(num_linhas, num_colunas):
for i in range(num_linhas):
for j in range(num_colunas):
print('1', end=' ')
print()
matriz = cria_matriz(3, 4) # cria matriz 3x4 (3L x 4C)
print(matriz) # imprime na forma primária (listas)
imprime_matriz(matriz) # imprime na forma de matriz
#imprime_uns(3, 4)
| true
|
d9e826a062bc52a92e69920bd2f146cbdd763d63
|
Python
|
barthap/BPMN
|
/tests/test_network.py
|
UTF-8
| 7,646
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
import unittest
import network_factory
from network import Network, NodeType
"""
Network:
A --. .-- E
|-- C -- D -- |
B --^ ^-- F
"""
test_network = {
'A': { 'C' },
'B': { 'C' },
'C': { 'D' },
'D': { 'E', 'F' },
}
class NodeTests(unittest.TestCase):
def setUp(self) -> None:
self.net = network_factory.from_simple_direct_succession(test_network)
def test_is_split(self):
# these are not splits
self.assertFalse(self.net.nodes['A'].is_split())
self.assertFalse(self.net.nodes['B'].is_split())
self.assertFalse(self.net.nodes['C'].is_split())
self.assertFalse(self.net.nodes['E'].is_split())
self.assertFalse(self.net.nodes['F'].is_split())
self.assertTrue(self.net.nodes['D'].is_split())
def test_is_merge(self):
# these are not splits
self.assertFalse(self.net.nodes['A'].is_merge())
self.assertFalse(self.net.nodes['B'].is_merge())
self.assertFalse(self.net.nodes['D'].is_merge())
self.assertFalse(self.net.nodes['E'].is_merge())
self.assertFalse(self.net.nodes['F'].is_merge())
self.assertTrue(self.net.nodes['C'].is_merge())
def test_next(self):
a = self.net.nodes['A']
c = self.net.nodes['C']
d = self.net.nodes['D']
self.assertEqual(a.next().name, c.name)
self.assertEqual(c.next().name, d.name)
def test_prev(self):
f = self.net.nodes['F']
c = self.net.nodes['C']
d = self.net.nodes['D']
self.assertEqual(d.prev().name, c.name)
self.assertEqual(f.prev().name, d.name)
def test_next_throws_on_multiple_successors(self):
d = self.net.nodes['D']
with self.assertRaisesRegex(AssertionError, "Node .* must have only one successor .*"):
d.next()
def test_prev_throws_on_multiple_successors(self):
c = self.net.nodes['C']
with self.assertRaisesRegex(AssertionError, "Node .* must have only one predecessor .*"):
c.prev()
def test_prev_throws_if_no_predecessors(self):
a = self.net.nodes['A']
with self.assertRaises(StopIteration):
a.prev()
def test_remove_successor(self):
c = self.net.nodes['C']
d = self.net.nodes['D']
c.remove_successor(d)
self.assertSetEqual(c.successors, set())
self.assertSetEqual(d.predecessors, set())
def test_remove_predecessor(self):
c = self.net.nodes['C']
d = self.net.nodes['D']
d.remove_predecessor(c)
self.assertSetEqual(c.successors, set())
self.assertSetEqual(d.predecessors, set())
def test_remove_all_successors(self):
d = self.net.nodes['D']
e = self.net.nodes['E']
f = self.net.nodes['F']
d.remove_all_successors()
self.assertSetEqual(d.successors, set())
self.assertSetEqual(e.predecessors, set())
self.assertSetEqual(f.predecessors, set())
def test_remove_all_predecessors(self):
a = self.net.nodes['A']
b = self.net.nodes['B']
c = self.net.nodes['C']
c.remove_all_predecessors()
self.assertSetEqual(a.successors, set())
self.assertSetEqual(b.successors, set())
self.assertSetEqual(c.predecessors, set())
class NetworkTests(unittest.TestCase):
def test_add_node(self):
net = Network()
net.add_node('abc', cnt=5)
self.assertEqual(net.nodes['abc'].name, 'abc')
self.assertEqual(net.nodes['abc'].cnt, 5)
def test_add_edge(self):
net = Network()
net.add_node('a')
net.add_node('b')
net.add_edge('a', 'b', 3)
a = net.nodes['a']
b = net.nodes['b']
self.assertSetEqual(a.successors, {b})
self.assertSetEqual(b.predecessors, {a})
self.assertEqual(net.edges['a']['b'].src, a)
self.assertEqual(net.edges['a']['b'].target, b)
self.assertEqual(net.edges['a']['b'].cnt, 3)
def test_delete_node(self):
net = network_factory.from_simple_direct_succession(test_network)
a = net.nodes['A']
b = net.nodes['B']
c = net.nodes['C']
d = net.nodes['D']
net.delete_node(c)
self.assertDictEqual(net.edges['A'], {})
self.assertDictEqual(net.edges['B'], {})
self.assertNotIn('C', net.edges.keys())
self.assertSetEqual(a.successors, set())
self.assertSetEqual(b.successors, set())
self.assertSetEqual(d.predecessors, set())
def test_delete_edge(self):
net = network_factory.from_simple_direct_succession(test_network)
b = net.nodes['B']
c = net.nodes['C']
net.delete_edge(net.edges['B']['C'])
self.assertNotIn('C', net.edges['B'].keys())
self.assertNotIn(c, b.successors)
self.assertNotIn(b, c.predecessors)
def test_delete_filtered_out_items(self):
net = network_factory.from_simple_direct_succession(test_network)
a = net.nodes['A']
b = net.nodes['B']
c = net.nodes['C']
d = net.nodes['D']
e = net.nodes['E']
c.is_filtered_out = True
net.edges['D']['E'].is_filtered_out = True
net.delete_filtered_out_items()
self.assertDictEqual(net.edges['A'], {})
self.assertDictEqual(net.edges['B'], {})
self.assertNotIn('C', net.edges.keys())
self.assertSetEqual(a.successors, set())
self.assertSetEqual(b.successors, set())
self.assertSetEqual(d.predecessors, set())
self.assertNotIn(e, d.successors)
self.assertNotIn(d, e.predecessors)
self.assertNotIn('E', net.edges['D'].keys())
def test_delete_node_merge_edges(self):
# Create network a->b->c
net = Network()
net.add_node('a')
net.add_node('b')
net.add_node('c')
net.add_edge('a', 'b')
net.add_edge('b', 'c')
a = net.nodes['a']
b = net.nodes['b']
c = net.nodes['c']
net.delete_node_merge_edges(b)
edge = net.edges['a']['c']
self.assertSetEqual(a.successors, {c})
self.assertSetEqual(c.predecessors, {a})
self.assertIn('c', net.edges['a'].keys())
self.assertEqual(edge.src, a)
self.assertEqual(edge.target, c)
def test_insert_dummy_before(self):
net = network_factory.from_simple_direct_succession(test_network)
c = net.nodes['C']
original_predecessors = set(c.predecessors)
net.insert_dummy_before(c, 'dummy1')
self.assertIn('dummy1', net.nodes.keys())
dummy = net.nodes['dummy1']
self.assertEqual(dummy.type, NodeType.DUMMY)
self.assertSetEqual(dummy.predecessors, original_predecessors)
self.assertSetEqual(dummy.successors, {c})
self.assertSetEqual(c.predecessors, {dummy})
# validate edges - im too lazy to write assertions
net._validate_structure()
def test_insert_dummy_after(self):
net = network_factory.from_simple_direct_succession(test_network)
d = net.nodes['D']
original_successors = set(d.successors)
net.insert_dummy_after(d, 'dummy1')
self.assertIn('dummy1', net.nodes.keys())
dummy = net.nodes['dummy1']
self.assertEqual(dummy.type, NodeType.DUMMY)
self.assertSetEqual(dummy.successors, original_successors)
self.assertSetEqual(dummy.predecessors, {d})
self.assertSetEqual(d.successors, {dummy})
# validate edges - im too lazy to write assertions
net._validate_structure()
| true
|
93130729cc1e3635b31fd3025310bf6380feef8e
|
Python
|
FurknKOC/Opencv_ve_Python_kullanarak_RaspberryPI3_ile_Yuz_Tanima
|
/Face REC/Face REC/Ana_Menu.py
|
UTF-8
| 413
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
def secim() :
prompt = '> '
print "\n"
print "Hoşgeldiniz %s\n" %os.getlogin()
print("Islem secimi yapiniz : \n")
print("[ 1 ] Grup Oluştur \n[ 2 ] Kişi Oluştur \n[ 3 ] Kişi Yüzü Ekle \n[ 4 ] Kişi Yüzü Ara \n[ 5 ] Yüz Tespiti \n[ 6 ] Yüz Tanı \n\n[ 7 ] Grup Sil\n ")
secim.secim = int(raw_input(prompt))
| true
|
01e4a7138f889120d8e3b04c6ecc2c78e898b7a5
|
Python
|
joduncan/RaceScoring
|
/parse-mychiptime.py
|
UTF-8
| 319
| 2.5625
| 3
|
[] |
no_license
|
import sys
inl = open( sys.argv[1] ).readlines()
inl = [ i.split("\t") for i in inl ]
for i in range( len( inl ) / 3 ):
i1,i2,i3=inl[i*3:i*3+3]
number = i1[2]
name = i1[3]+" "+i1[4]
age='' #i3[6]
#print i3[9]
sex = sys.argv[2]
time = i1[1]
print ",".join( [number,name,age,sex] )
| true
|
712d6a0c64485ace27aa0aebda504d4733ed0640
|
Python
|
thomasmburke/PersonalizedGreeter
|
/environment_setup/populate_redis.py
|
UTF-8
| 5,843
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
import redis
import os
import pickle
greetings = {"defaultGreetings" : {"<speak>Greetings and Salutations {}!</speak>",
"<speak>Howdy, Howdy, Howdy {}!</speak>",
"<speak>Howdy doody {}!</speak>",
"<speak>{}, Ahoy, Matey</speak>",
"<speak>What's crackin {}?</speak>",
"<speak>{}, This may be recorded for training purposes</speak>",
"<speak>Why, hello there {}</speak>",
"<speak>Konnichiwa {}</speak>",
"<speak>Aloha {}</speak>",
"<speak>Breaker, Breaker {} has arrived <break time='1s'/> Copy that</speak>",
"<speak>{}, I hope you are having a wonderful day</speak>",
"<speak>Ciao {}</speak>",
"<speak>What's cookin {}?</speak>",
"<speak>Beautiful day outside isn't it {}?</speak>",
"<speak>Yo, {} its good to see you again</speak>",
"<speak>Hey there {}, welcome to the Burke household</speak>",
"<speak>Hi, Welcome to McDonalds my name is {}. How can I help you?</speak>",
"<speak>Want to hear Victoria's Secret? <break time='1s'/><amazon:effect name='whispered'>She has a crush on {}</amazon:effect></speak>",
"<speak>Yo ho, yo ho its a pirates life for {}</speak>",
"<speak>What's happenin {}?</speak>",
"<speak>Knock, Knock <break time='1s'/> Well, {} why don't you try and find out!</speak>",
"<speak>{}, you are not allowed please turn back now!</speak>",
"<speak>{}, I told you never to come back here, for what reason have you shown your face</speak>",
"<speak>Hello {}, please do not forget to tip the door man</speak>",
"<speak>Do you {} take this personalized machine learning greeter as you lawfully wedded wife?</speak>",
"<speak>Welcome {}, please feel free to make yourself at home</speak>"},
"christmasGreetings" : {"<speak>Hey, {}. Why does Santa Claus have such a big sack? <break time='1s'/> He only comes once a year!</speak>",
"<speak>Hey, {}. Why does Santa always come through the chimney?<break time='1s'/>Because he knows better than to try the back door.</speak>",
"<speak>Hey, {}. What’s the difference between snowmen and snowwomen?<break time='1s'/>Snowballs</speak>",
"<speak>Hey, {}. What’s the difference between Tiger Woods and Santa?<break time='1s'/>Santa was smart enough to stop at three ho's.</speak>",
"<speak>Hey, {}. Why did the Grinch rob the liquor store?<break time='1s'/>He was desperate for some holiday spirit.</speak>",
"<speak>Hey, {}. Why does Santa go to strip clubs?<break time='1s'/>To visit all his ho ho ho’s.</speak>",
"<speak>Hey, {}. Is your name Jingle Bells?<break time='1s'/>Cause you look ready to go all the way.</speak>"},
"aprilFoolsGreetings" : {"<speak>Hey, {}. Your shoe is untied.<break time='1s'/>April Fools!</speak>",
"<speak>Hey, {}. Who is that guy behind you?<break time='1s'/>Just kidding, April Fools!</speak>",
"<speak>Hey, {}. Looks like you dropped something<break time='1s'/>April Fools!</speak>"},
"thanksgivingGreetings" : {"<speak>Hey, {}. What does Miley Cyrus eat for Thanksgiving?<break time='1s'/>Twerky</speak>",
"<speak>Hey, {}. What kind of music did the Pilgrims like?<break time='1s'/>Plymouth Rock</speak>",
"<speak>Hey, {}. Why was the Thanksgiving soup so expensive?<break time='1s'/>It had 24 carrots</speak>",
"<speak>Hey, {}. What did baby corn say to mama corn?<break time='1s'/>Where's pop corn</speak>",
"<speak>Hey, {}. What did the turkey say before it was roasted?<break time='1s'/>Boy! I'm stuffed.</speak>",
"<speak>Hey, {}. What do you call a running turkey?<break time='1s'/>Fast food!</speak>"},
"easterGreetings" : {"<speak>Hey, {}. What did the Easter egg say to the boiling water?<break time='1s'/>It might take me a while to get hard cause I just got laid by some chick.</speak>",
"<speak>Hey, {}. Why wouldn't you want to be an Easter egg?<break time='1s'/>You only get laid once.</speak>",
"<speak>Hey, {}. Why is Easter an Alzheimer patient's favorite holiday?<break time='1s'/>They get to hide their own eggs.</speak>",
"<speak>Hey, {}. How do you make Easter easier?<break time='1s'/>Replace the t with an i.</speak>",
"<speak>Hey, {}. Where does the Easter Bunny get his eggs?<break time='1s'/>From egg plants</speak>",
"<speak>Hey, {}. How should you send a letter to the Easter Bunny?<break time='1s'/>By hare mail</speak>"},
"saintpatricksdayGreetings" : {"<speak>Hey, {}. break time='1s'/></speak>",
"<speak>Hey, {}. Why can't you borrow money from a leprechaun?<break time='1s'/>Because they're always a little short</speak>",
"<speak>Hey, {}. Are you from Ireland?<break time='1s'/>Because when I look at you my penis is Dublin</speak>",
"<speak>Hey, {}. What do you call a potato that's not Irish?<break time='1s'/>A french fry</speak>"},
"newYearsGreetings" : {"<speak>Hey, {}. A new years resolution is something that goes in one year and out the other.</speak>",
"<speak>Hey, {}. hope you popped of this year because tonight will be even crazier.</speak>"},
"laborDayGreetings" : {"<speak>Did you hear about the Labor Day joke? It doesn't work for me.</speak>"}}
"""
memorial day
presidents day
MLK day
Columbus day
independence day / 4th of july
Ghandis bday october 2
Diwali
"""
with open('./greetings/greetings.pkl', 'wb') as defaultPickleFile:
pickle.dump(greetings, defaultPickleFile)
redisClient = redis.Redis(host=os.getenv('REDIS_INSTANCE_IP'), port=6379, db=0)
redisClient.flushall()
for key, greeting in greetings.items():
redisClient.sadd(key, *greeting)
mems = redisClient.smembers(name=key)
rand = redisClient.srandmember(key)
print(mems)
print(rand)
| true
|
f92f3e98a1fbbc92a2ac9b135b35219284a2d700
|
Python
|
AshrithPradeep/PythonT
|
/Python/TASK 1 AND 2.py
|
UTF-8
| 8,609
| 5.03125
| 5
|
[] |
no_license
|
# TASK ONE
# 1. Create three variables in a single line and assign values to them in such a manner that each one of
# them belongs to a different data type.
a,b,c = 1, 2.0, 'ashrith'
# 2. Create a variable of type complex and swap it with another variable of type integer.
a = 2 +1j
b = 1
a,b = b,a
#3. Swap two numbers using a third variable and do the same task without using any third variable.
#by using third variable
a,b = 1,2
temp = a
a = b
b = temp
#without using third variable
a,b = b,a
#4.Write a program that takes input from the user and prints it using both python 2x. and python 3.x version
a = eval(raw_input("enter a number"))
print(a)
b = eval(raw_input("Enter a number"))
print(b)
#5. Write a program to complete the task given below: Ask users to enter any 2 numbers in between 1-10 , add the two numbers and keep the sum in
#another variable called z. Add 30 to z and store the output in variable result and print result as the final output.
a = int(input("enter number 1 :"))
b = int(input("enter number 2 :"))
print(a,b)
z = a+b
result = z+30
print(z)
#6. Write a program to check the data type of the entered values.
a = eval(input("enter the value"))
print(type(a))
#7. Create Variables using formats such as Upper CamelCase, Lower CamelCase, SnakeCase and UPPERCASE.
UpperCamel = 1
lowerCamel = 2
snake_case = 3
UPPERCASE = 4
#8 If one data type value is assigned to ‘a’ variable and then a different data type value is assigned to ‘a’
#again. Will it change the value? If Yes then Why?
# ANSWER : Yes, the value changes as we assign a value of different data type, because the first one is saved in one memory location and the second one in another.
# So the variable changes its datatype
# TASK TWO
#1. Write a python program to perform the following operations:
#If a number is divisible by 3 it should print “ConsAd” as a string
#If a number is divisible by 5 it should print “Python” as a string
#If a number is divisible by both 3 and 5 it should print “ConsAd - Python” as a string
x=int(input("Enter a number here :"))
if (a%3)==0 and (a%5)==0:
print("ConsAd - Python")
elif (a%5)==0:
print("Python")
elif (a%3)==0:
print("ConsAd")
#2. Write a python program to perform the following operator based task:
# Ask user to choose the following option first:
#if user enter 1 : Addition
#if user enter's 2 : Subtraction
#if user enter 3: division
#if user enter 4: Multiplication
#if user enter 5: Average
#Ask user to enter two numbers and keep those numbers in variables num1 and num2 respectively for the first 4 options mentioned above.
# Ask the user to enter two more numbers as first and second for calculating the average as soon as the user chooses an option 5.
# At the end if the answer of any operation is Negative print a statement saying “NEGATIVE”.
print("Choose one of the following operations by their number : 1 for Addition, 2 for Subtraction, 3 for Division, 4 for Multiplication, 5 for Average")
a=int(input())
print("Enter the first number :")
num1=int(input())
print("Enter the second number :")
num2=int(input())
if a == 1:
res = float(num1 + num2)
print('The result is :', res)
elif a == 2:
res = float(num1 - num2)
print('The result is :', res)
elif a == 3:
res = float(num1 / num2)
print("The result is :", res)
elif a == 4:
res = float(num1 * num2)
print("The result is:", res)
elif a == 5:
sum = float(num1 + num2)
res = float(sum / 2)
print("The result is:", res)
else:
print("choose an option between 1-5")
if res < 0:
print("Negative Number")
#3. Write a program in python to implement the flow chart:
a=10
b=20
c=30
average=(a+b+c)/3
print("avg: ",average)
while average in range(10,31):
if average >((a) and (b) and (c)):
print("Average is greater than a,b and c")
break
elif average > ((a)and (b)):
print("Average is greater than a,b and c")
break
elif average >((a)and (c)):
print("The average is greater than a,c")
break
elif average >((b) and (c)):
print("The average is greater than b,c")
break
elif average > (a):
print("The average is just greater than a")
break
elif average > (b):
print ("The average is just greater than b")
break
elif average > (c):
print ("The average is just greater than c")
break
else:
break
#4. Write a program in python to break and continue if the following cases occur:
# if user enters a negative number just break the loop and print " It's over"
# if user enters a positive number just continue the loop and print "good going"
a=int(input("Enter a number"))
while a in range(-10000,10000):
if a <0:
print("It's over")
break
if a>=0:
print("Good going")
a+=1
continue
#5. Write a program in Python which will find all such numbers which are divisible by 7 but are not a multiple of 5, between 2000 and 3200.
for i in range(2000,3200):
if ((i%7==0) and (i%5!=0)):
print(i)
#6. What is the output of the following code snippets
x=123
for i in x:
print(i)
## Type Error - int not iterable.
i=0
while i<5:
print(i)
i+=1
if i==3:
break
else:
print("error")
# Break can be used inside the loop and here it is outside it, so it throws an error for the same
count=0
while True:
print(count)
count +=1
if count>=5:
break
# prints 01234 and then breaks due to the condition
#7. Write a program that prints all the numbers from 0 to 6 except 3 and 6.
i=0
while i in range(0,7):
if i == 3 or i ==6:
i+=1
continue
print(i)
i+=1
#8. Write a program that accepts a string as an input from the user and calculate the number of digits and letters.
x = input("enter the string")
number, alphabet = 0,0
for y in x:
if y.isdigit():
number+=1
elif y.isalpha():
alphabet+=1
else:
pass
print("alphabets:", alphabet )
print("numbers:", number)
#9.Read the two parts of the question below:
#Write a program such that it asks users to “guess the lucky number”. If the correct number is guessed the program stops, otherwise it continues forever.
#Modify the program so that it asks users whether they want to guess again each time. Use two variables, ‘number’ for the number and ‘answer’ for the answer to the question whether they want
#to continue guessing. The program stops if the user guesses the correct number or answers “no”. ( The program continues as long as a user has not answered “no” and has not guessed the correct number)
number = int(input("Guess the lucky number here: "))
while number != 9:
print("You have not guessed the lucky number")
number = int(input("guess the lucky number again: "))
# Modified version
again = "yes"
number = int(input("Guess the lucky number"))
while number != 9:
print("You have not guessed the lucky number")
again = (input("guess again?"))
if again == "no":
break
else:
number = int(input("Guess the lucky number"))
if number == 9:
print("you win!!!!!!!")
#10. Write a program that asks five times to guess the lucky number. Use a while loop and a counter, such as While counter <= 5: print(“Type in the”, counter, “number” counter=counter+1
# The program asks for five guesses (no matter whether the correct number was guessed or not). If the correct number is guessed, the program outputs “Good guess!”, otherwise it outputs “Try again!”.
#After the fifth guess it stops and prints “Game over!”.
counter = 1
while counter <= 5:
if counter == 5:
print("sorry your attempts were not successful")
break
num = int(input("Guess the number"))
if num == 5:
print ("Good guesss.")
break
else:
print ("try again!")
counter +=1
else:
print ("game over")
#11.In the previous question, insert break after the “Good guess!” print statement. break will terminate
#the while loop so that users do not have to continue guessing after they found the number. If the user
#does not guess the number at all, print “Sorry but that was not very successful”.
counter = 1
while counter <= 5:
num = int(input("Guess the number"))
if num == 5:
print("Good guesss.")
break
else:
if counter == 5:
print("sorry your attempts were not successful")
break
else:
print("try again!")
counter += 1
print("game over")
| true
|
97283e71227dfc436e35ad27d41122e1758d2dc3
|
Python
|
Windrist/UET-CAM-Demo
|
/captureImage.py
|
UTF-8
| 502
| 2.734375
| 3
|
[] |
no_license
|
import time
import cv2
if __name__ == "__main__":
# Camera configuration
cap = cv2.VideoCapture(0)
count = 0
cap.set(3, 1280)
cap.set(4, 720)
while True:
ret, image = cap.read()
cv2.imshow("Capture", image)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
elif key == ord('s'):
cv2.imwrite('data/demo/{}.jpg'.format(count), image)
count+=1
cap.release()
cv2.destroyAllWindows()
| true
|
20a9d93c0a66c511f385ba91cbec00643a4cb8ab
|
Python
|
misterbeebee/Games
|
/Connect/connect.py
|
UTF-8
| 7,628
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/env python
import random
import sys
import time
def printf(*args):
sys.stdout.write(*args)
sys.stdout.flush()
class Column:
SPACE = ' '
def __init__(s, height):
s.height=height
# Bottom-to-top (aka order of insertion, per gravity)
s.content = [s.SPACE]*s.height
def full(s):
return not (s[s.height-1] == s.SPACE)
def insert(s, token):
for rowi in range(s.height):
if (s[rowi] == s.SPACE) and (rowi == 0 or (not s[rowi-1] == s.SPACE)):
s[rowi] = token
return True
return False
def remove(s):
for rowi in range(s.height):
if (not s[rowi] == s.SPACE) and (rowi == (len(s)-1) or (s[rowi+1] == s.SPACE)):
s[rowi] = s.SPACE
return True
return False
def __len__(s): return len(s.content)
def __iter__(s): return iter(s.content)
def __getitem__(s, i): return s.content[i]
def __setitem__(s, i, value): s.content[i] = value
class Board:
WIN_SIZE = 4
WIDTH = 7
HEIGHT = 6
SPACE = ' '
def __init__(s, tokens):
s.cols = [Column(s.HEIGHT) for i in range(s.WIDTH)]
s.tokens = tokens
def full(s): return all([col.full() for col in s.cols])
def insert(s, colname, token): return s.cols[colname].insert(token)
def remove(s, colname): return s.cols[colname].remove()
def winner(s):
for coli in range(s.WIDTH):
for rowi in range(s.HEIGHT):
token = s.cols[coli][rowi]
offsets = range(s.WIN_SIZE)
# Scans everything twice, but OK
for token in s.tokens:
# horizontal
h = all([(coli + offset < s.WIDTH) and (token == s.cols[coli+offset][rowi]) for offset in offsets])
# vertical
v = all([(rowi + offset < s.HEIGHT) and (token == s.cols[coli][rowi + offset]) for offset in offsets])
# diagonal
d1 = all([(coli + offset < s.WIDTH) and (rowi + offset < s.HEIGHT)
and (token == s.cols[coli + offset][rowi + offset]) for offset in offsets])
d2 = all([(coli + offset < s.WIDTH) and (rowi - offset > 0)
and (token == s.cols[coli + offset][rowi - offset]) for offset in offsets])
winner = h or v or d2
if winner:
return token
return winner
def legal_move(s, coli): return not s.cols[coli].full()
def legal_moves(s): return [coli for coli in range(len(s.cols)) if s.legal_move(coli)]
def print(s):
printf('\n')
for rowi in range(s.HEIGHT-1, -1, -1):
printf('|')
labels = [s.cols[coli][rowi] for coli in range(s.WIDTH)]
printf(' '.join(labels))
printf('|\n')
printf('+%s+\n' % '-'.join(('-' * s.WIDTH)))
printf(' %s \n' % ' '.join([str(i) for i in range(s.WIDTH)]))
class Player():
def __init__(s, name, token):
s.name = name
s.token = token
# Caller must call setup() next!
def setup(s, game, board):
s.game = game
s.board = board
def indexof(array, value):
for i in range(len(array)):
if array[i] == value:
return i
return None
class AI(Player):
def __init__(s, token):
Player.__init__(s, name="Marvin", token=token)
def move(s):
# Win or avoid immediate loss if possible
tokeni = indexof(s.game.tokens, s.token)
# my moves:
winning_moves = set()
losing_moves = set()
this_token = s.game.tokens[tokeni]
for colname in s.board.legal_moves():
s.board.insert(colname, this_token)
if s.board.winner():
winning_moves.add(colname)
# don't break, need to undo insert
else:
next_token = s.game.tokens[(tokeni + 1) % len(s.game.tokens)]
opponent_winning_move = None
for next_colname in s.board.legal_moves():
s.board.insert(next_colname, next_token)
if s.board.winner():
losing_moves.add(colname)
# don't break, need to undo insert
s.board.remove(next_colname)
s.board.remove(colname)
# opponent moves:
blocking_moves = set()
sacrificial_moves = set()
tokeni = (tokeni + 1) % len(s.game.tokens)
this_token = s.game.tokens[tokeni]
for colname in s.board.legal_moves():
s.board.insert(colname, this_token)
if s.board.winner():
# Prevent opponent win.
blocking_moves.add(colname)
# don't break, need to undo insert
else:
next_token = s.game.tokens[(tokeni + 1) % len(s.game.tokens)]
next_winning_move = None
for next_colname in s.board.legal_moves():
s.board.insert(next_colname, next_token)
if s.board.winner():
sacrificial_moves.add(next_colname)
# don't break, need to undo insert
s.board.remove(next_colname)
s.board.remove(colname)
if winning_moves:
#printf("Winning move.\n")
return list(winning_moves)[0]
if blocking_moves:
#printf("Blocking move.\n")
return list(blocking_moves)[0]
legal_moves = s.board.legal_moves()
#print("sacrificial:", sacrificial_moves)
#print("losing: ", losing_moves)
#print("legal: ", legal_moves)
# not-bad moves, or else a move that might let opponent block, or else any move (surrender)
possible_moves = ([move for move in legal_moves
if (move not in losing_moves)
and (move not in sacrificial_moves)]
or list(sacrificial_moves) or legal_moves)
# random move
#printf("Random move.\n")
start_movei = int(random.random() * len(possible_moves))
for move_i in possible_moves:
move = possible_moves[(start_movei + move_i) % len(possible_moves)]
if s.board.legal_move(move):
return move
raise Exception("Can't move!")
class User(Player):
def __init__(s, name, token):
Player.__init__(s, name, token)
def move(s):
try:
return int(sys.stdin.readline()[0])
except:
return None
class Game:
X = 'X'
O = 'O'
def __init__(s, tokens, players, board):
s.tokens = tokens
s.players = players
s.board = board
s.turni = int(len(s.players)*random.random())
for player in s.players:
player.setup(game=s, board=board)
def over(s):
return (s.board.full(), s.board.winner())
def play(s):
s.board.print()
while True:
s.playturn()
(full, winner_token) = s.over()
if winner_token or full:
printf('\n GAME OVER. ')
if winner_token:
printf('%s wins!\n\n' % winner_token)
else:
printf('Draw.')
return
# time.sleep(1)
def playturn(s):
player = s.players[s.turni]
printf("\n%s %s's turn. Move (column #): \n" % (player.token, player.name))
s.turni = (s.turni+1) % len(s.players)
while True:
colname = player.move()
printf("Move: Column %s\n" % colname)
try:
if s.board.insert(colname, player.token):
break
except: pass
printf('ILLEGAL MOVE. Try again: ')
s.board.print()
def main():
tokens = [Game.X, Game.O]
players = []
for playeri in range(len(tokens)):
printf("%s Player: Enter name, or press [ENTER] for AI: " % tokens[playeri])
name = sys.stdin.readline()[:-1]
if name:
player = User(token=tokens[playeri], name=name)
else:
player = AI(token=tokens[playeri])
players.append(player)
while True:
board = Board(tokens)
game = Game(tokens, players, board).play()
printf("Play again? [Y/n]: ")
again = sys.stdin.readline()[:-1]
if again in "NnQq":
break
main()
| true
|
c090af4139919b65cead84071f21df07c3976026
|
Python
|
AnjoliPodder/PracticePython
|
/8.py
|
UTF-8
| 1,867
| 4.5
| 4
|
[] |
no_license
|
'''
Solution by: Anjoli Podder
December 2016
http://www.practicepython.org/exercise/2014/03/26/08-rock-paper-scissors.html
Let’s say I give you a list saved in a variable: a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]. Write one line of Python
that takes this list a and makes a new list that has only the even elements of this list in it.
'''
def rps():
print("Welcome to the game! Your move must be rock, paper or scissors.")
valid_moves = ["rock", "paper", "scissors"]
valid1 = False
valid2 = False
pl1 = ""
pl2 = ""
while not(valid1):
pl1 = input("Please enter your move, Player 1: ").lower()
if not(pl1 in valid_moves):
print("That was not a valid move. Try again.")
else:
valid1 = True
while not(valid2):
pl2 = input("Please enter your move, Player 2: ").lower()
if not(pl2 in valid_moves):
print("That was not a valid move. Try again.")
else:
valid2 = True
if pl1 == pl2:
print("You both chose", pl1,". It's a tie!")
elif (pl1, pl2) == ("rock", "paper"):
print("Player 2 Wins! Rock is wrapped by Paper")
elif (pl1, pl2) == ("rock", "scissors"):
print("Player 1 Wins! Rock breaks Scissors")
elif (pl1, pl2) == ("paper", "scissors"):
print("Player 2 Wins! Paper is cut by Scissors")
elif (pl1, pl2) == ("paper", "rock"):
print("Player 1 Wins! Paper wraps Rock")
elif (pl1, pl2) == ("scissors", "paper"):
print("Player 1 Wins! Scissors cuts Paper")
elif (pl1, pl2) == ("scissors", "rock"):
print("Player 2 Wins! Scissors is broken by Rock")
again = input("Do you want to play again? (Y/N): ").lower()
if again == "y":
rps()
else:
print("Thanks for playing!")
pass
rps()
| true
|
7232b30a5d8032fa83bfd182064edecb8ec19412
|
Python
|
evd0kim/poc-threshold-ecdsa-secp256k1
|
/paillier.py
|
UTF-8
| 2,878
| 3.3125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#!/usr/bin/env python
import struct
import utils
KEY_LENGHT = 4096
def inverse(a, n):
"""Find the inverse of a modulo n if it exists"""
t = 0
newt = 1
r = n
newr = a
while newr != 0:
quotient = int((r - (r % newr)) / newr)
tt = t
t = newt
newt = tt - quotient * newt
rr = r
r = newr
newr = rr - quotient * newr
if r > 1:
return "a is not invertible"
if t < 0:
t = t + n
return t
def lcm(a, b):
"""Computing the least common multiple between a and b"""
n = a * b
if n < 0:
n = n * -1
return int(n / utils.nonrec_gcd(a, b))
def L(x, n):
return int((x - 1) / n)
def key_gen(p, q):
if utils.nonrec_gcd(p, q) != 1:
# non-distinct
exit(1)
n = p * q
g = n + 1
lmdba = (p-1) * (q-1)
mu = utils.invert(lmdba, n)
return (n, g), (n, p, q, g, lmdba, mu)
def gen_key():
p = utils.getprimeover(KEY_LENGHT>>1)
q = utils.getprimeover(KEY_LENGHT>>1)
return key_gen(p, q)
def R_old(n):
with open("/dev/urandom", 'rb') as f:
r = struct.unpack(">Q", f.read(8))[0] % n
return r
def R(n):
return utils.randomnumber(n)
while True:
r = utils.randomnumber(n)
if utils.nonrec_gcd(r, n) == 1:
return r
def encrypt(m, pub):
n, g = pub
n2 = n * n
r = R(n)
return (utils.powmod(g, m, n2) * utils.powmod(r, n, n2)) % n2, r
def decrypt(c, priv):
n, p, q, g, lmdba, mu = priv
n2 = n * n
return L(utils.powmod(c, lmdba, n2), n) * mu % n
def mult(cipher, scalar, n2):
return utils.powmod(cipher, scalar, n2)
def add(c1, c2, n2):
return c1 * c2 % n2
if __name__ == "__main__":
# print(KEY_LENGHT>>1)
p = utils.getprimeover(KEY_LENGHT>>1)
q = utils.getprimeover(KEY_LENGHT>>1)
# print(p)
# print(q)
# http://www.primos.mat.br/primeiros_10000_primos.txt
pub, priv = key_gen(p, q)
n, p, q, g, lmdba, mu = priv
n2 = n * n
# print(pub, priv)
s1 = 180
s2 = 10
print(s1)
print(s2)
c1, r1 = encrypt(s1, pub)
c2, r2 = encrypt(s2, pub)
# print(c1)
# print(c2)
# Homomorphic properties
cadd = c1 * c2 % n2
# print(cadd)
cmult = utils.powmod(c1, 20, n2)
# print(cmult)
# (180 + 10) * 10 + 180 = 2'080
test = add(mult(add(c1, c2, n2), 10, n2), c1, n2)
# 180 * 100 + 180 * 100 = 36'000
test2 = add(mult(c1, 100, n2), mult(c1, 100, n2), n2)
madd = decrypt(cadd, priv)
mmult = decrypt(cmult, priv)
mtest = decrypt(test, priv)
mtest2 = decrypt(test2, priv)
m1 = decrypt(c1, priv)
m2 = decrypt(c2, priv)
print("add c1 + c2:", madd)
print("mult c1 * 20:", mmult)
print("test composition:", mtest)
print("test composition 2:", mtest2)
print(decrypt(encrypt(10, pub)[0], priv))
| true
|
382f48d18d6506a8fdeb77004d642f9947a3be0b
|
Python
|
CSSS/csss-site
|
/csss-site/src/csss/convert_markdown.py
|
UTF-8
| 500
| 2.5625
| 3
|
[] |
no_license
|
import markdown
def markdown_message(message):
"""
Marks down the given message using the markdown module
Keyword Argument
message -- the message to mark down
Return
message - the marked down message
"""
return markdown.markdown(
message, extensions=[
'sane_lists', 'markdown_link_attr_modifier'
],
extension_configs={
'markdown_link_attr_modifier': {
'new_tab': 'on',
},
}
)
| true
|
0cafa77b3197d634b00e72ffb6250fa46a67f3e4
|
Python
|
varshanth/Coding_Algorithms
|
/Linked_Lists/remove_all_occurrences_of_element_from_linked_list.py
|
UTF-8
| 1,480
| 4.09375
| 4
|
[] |
no_license
|
'''
LeetCode: Remove Element from LinkedList
Remove all elements from a linked list of integers that have value val.
Example
Given: 1 --> 2 --> 6 --> 3 --> 4 --> 5 --> 6, val = 6
Return: 1 --> 2 --> 3 --> 4 --> 5
Solution: O(n)
Algorithm:
1) Maintain 2 pointers, fast and slow. Slow will start on dummy_head and
fast will start on head
2) Link slow to fast i.e slow.next = fast and update slow as slow.next only
if fast.val is not equal to target val.
3) Make sure to take care of the case where the target to be deleted is the
last node. If the node at the end was not the target, it would have had
its next pointer as None. Hence we just assign slow.next as None because
the slow would have the last known node whose value is not the target val.
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
if head is None:
return head
dummy_head = ListNode(0)
dummy_head.next = head
slow = dummy_head
fast = dummy_head.next
while fast is not None:
if fast.val != val:
slow.next = fast
slow = fast
fast = fast.next
slow.next = None
return dummy_head.next
| true
|
31d766c28f9df0982fd539e6582d7a77bfce64ac
|
Python
|
mndesai/Python
|
/ex16.py
|
UTF-8
| 1,244
| 3.671875
| 4
|
[] |
no_license
|
from sys import argv #open python file with script name and file name
script, filename = argv
print "We're going to erase %r." % filename
print "If you don't want that, hit CTRL-C (^C)." #closes script
print "If you do want that, hit RETURN."
raw_input("?")
print "Opening the file..." #I guess this is what happens if you hit return
target = open(filename, 'w') #can write to file, file contents assigned to target variable
# print "Truncating the file. Goodbye!"
# target.truncate() #tell target to truncate, but truncate doesn't matter if you open with "w"
print "Now I'm going to ask you for three lines." #gets 3 lines from user
line1 = raw_input("line 1: ")
line2 = raw_input("line 2: ")
line3 = raw_input("line 3: ")
print "I'm going to write these to the file."
# target.write(line1) #writes new lines to file, each line is its own line
# target.write("\n")
# target.write(line2)
# target.write("\n")
# target.write(line3)
# target.write("\n")
target.write("%s\n%s\n%s\n" % (line1, line2, line3))
target.close()
target = open(filename)
print target.read()
print "And finally, we close it." #closes file
target.close()
# from sys import argv
# script, filename= argv
# print open(filename).read()
# open(filename).close()
| true
|
7a7c00f304306e438067fd20581b4579fe9e08cc
|
Python
|
JasperGan-smile/fileserver
|
/fs.py
|
UTF-8
| 1,231
| 2.671875
| 3
|
[] |
no_license
|
import os,magic
import re
from genericpath import isdir, isfile
from flask import Flask,send_file,render_template,request
def show_dir(pwd):
files = os.listdir(pwd)
return render_template("index.html",files = files,pwd = pwd)
def send_to_client(pwd):
path = pwd[:-1]
return send_file(path,as_attachment=True)
def file_or_dir(pwd):
if(os.path.isdir(pwd)):
return show_dir(pwd)
else:
return send_to_client(pwd)
app = Flask(__name__)
@app.route('/edit/<path:dummy>')
def editor(dummy):
file_path = '/'+str(dummy)
with open(file_path,'r',encoding='utf-8') as f:
content = f.read()
return render_template('editor.html',path = file_path,content = content)
@app.route('/save',methods=['POST'])
def save():
content = request.form['content']
path = request.form['path']
with open(path,'w') as f:
f.write(content)
return "saved!"
@app.route('/<path:dummy>')
def fallback(dummy):
if str(dummy).startswith('edit'):
return editor(str(dummy))
else:
return file_or_dir('/'+str(dummy)+'/')
@app.route('/')
def index():
html = file_or_dir("/")
return html
if __name__ == '__main__':
app.run(debug=True)
| true
|
1a837d72da20cb51c81b437029624d536cb25a23
|
Python
|
uenewsar/nlp100fungos
|
/chap9/83.py
|
UTF-8
| 1,905
| 2.828125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
'''
83. 単語/文脈の頻度の計測
82の出力を利用し,以下の出現分布,および定数を求めよ.
f(t,c): 単語tと文脈語cの共起回数
f(t,∗): 単語tの出現回数
f(∗,c): 文脈語cの出現回数
N: 単語と文脈語のペアの総出現回数
'''
import pickle
import sys
def write_res(word2idx, idx2word, ftc, ft, fc, N, fn):
with open(fn, 'wb') as fw:
pickle.dump( (word2idx, idx2word, ftc, ft, fc, N), fw )
## main
# read outcome of 82
fr = open('context.txt', 'r', encoding='utf-8')
# initialize variables
# f(t,c)
ftc = {}
# f(t,*)
ft = {}
# f(*,c)
fc = {}
# N
N = 0
# word id - word
# word - word id
idx2word = {}
word2idx = {}
for line in fr:
# there are some cases that line doesn't have two colums, so use strip (not rstrip)
# to remove all unnecessary spaces
line = line.strip()
# split to words
tab = line.split('\t')
if len(tab) != 2:
continue
t = tab[0]
c = tab[1]
# register word id
if t not in word2idx:
word2idx[t] = len(word2idx)
idx2word[ word2idx[t] ] = t
if c not in word2idx:
word2idx[c] = len(word2idx)
idx2word[ word2idx[c] ] = c
# convert to word id
t = word2idx[t]
c = word2idx[c]
# count f(t,c)
if t not in ftc:
ftc[t] = {}
if c not in ftc[t]:
ftc[t][c] = 0
ftc[t][c] += 1
# count f(t,*)
if t not in ft:
ft[t] = 0
ft[t] += 1
# count f(*,c)
if c not in fc:
fc[c] = 0
fc[c] += 1
N += 1
if N % 10000000 == 0:
sys.stderr.write(' N={}\n'.format(N))
#if N % 100000000 == 0:
# break
fr.close()
# write final result
sys.stderr.write(' writing final result ...\n')
write_res(word2idx, idx2word, ftc, ft, fc, N, 'context_count.pickle')
sys.stderr.write(' end\n')
| true
|
4dbf77fc15640c98b89990cc35c6bbaca2347bca
|
Python
|
Setugekka/Finance-Investment-Analysis-System
|
/webapp/Library/pyalgotrade_custom/plotter.py
|
UTF-8
| 2,153
| 2.75
| 3
|
[] |
no_license
|
#encoding:utf-8
from pyalgotrade import broker
from backtestOrder import backtestOrder as Order
class StrategyPlotter(object):
"""Class responsible for plotting a strategy execution.
:param strat: The strategy to plot.
:type strat: :class:`pyalgotrade.strategy.BaseStrategy`.
"""
def __init__(self, strat):
self.__dateTimes = []
self.__tradehistory=[]
self.__portfolio=[]
self.__startdate=strat.getStartdate()
strat.getBarsProcessedEvent().subscribe(self.__onBarsProcessed)
strat.getBroker().getOrderUpdatedEvent().subscribe(self.__onOrderEvent)
def getTradehistory(self):
self.__tradehistory.reverse()
return self.__tradehistory
def getPortfolio(self):
point=None
for i in self.__dateTimes:
if i>=self.__startdate:
point=i
break
num=self.__dateTimes.index(point)
return {"date":self.__dateTimes[num:],"data":self.__portfolio[num:]}
def __onBarsProcessed(self, strat, bars):
dateTime = bars.getDateTime()
self.__dateTimes.append(dateTime)
self.__portfolio.append(strat.getBroker().getEquity())
# 可以返回市值变化
def __onOrderEvent(self, broker_, orderEvent):
order = orderEvent.getOrder()
if orderEvent.getEventType() in (broker.OrderEvent.Type.PARTIALLY_FILLED,broker.OrderEvent.Type.FILLED):
code=order.getInstrument()
action = order.getAction()
execInfo = orderEvent.getEventInfo()
if action in [broker.Order.Action.BUY, broker.Order.Action.BUY_TO_COVER]:
historder = Order(code,'buy',execInfo.getPrice(),execInfo.getQuantity(),execInfo.getCommission(),execInfo.getDateTime())
self.__tradehistory.append(historder)
elif action in [broker.Order.Action.SELL, broker.Order.Action.SELL_SHORT]:
historder = Order(code, 'sell', execInfo.getPrice(), execInfo.getQuantity(), execInfo.getCommission(),
execInfo.getDateTime())
self.__tradehistory.append(historder)
| true
|
32e47048d19132ce0a69f18498972258512f9f24
|
Python
|
zhigang0529/PythonLearn
|
/src/main/operation/RSSSum.py
|
UTF-8
| 294
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/python
# !coding=utf-8
import os
list = []
sum = 0
pslines = os.popen('ps aux', 'r').readlines()
for line in pslines:
str2 = line.split()
new_rss = str2[5]
list.append(new_rss)
for i in list[1:-1]:
num = int(i)
sum = sum + num
print '%s:%s' % (list[0], sum)
| true
|
de90b59e1acff18c0704a821bdd58c02a3a0b382
|
Python
|
NiyazUrazaev/SummerPractice
|
/practice/models.py
|
UTF-8
| 3,244
| 2.671875
| 3
|
[] |
no_license
|
import datetime
from django.db import models
class BaseDiaryDay(models.Model):
"""Базовый класс для одного дня у всех аппов"""
date = models.DateField(
default=datetime.datetime.today(),
)
work_info = models.CharField(
max_length=5000,
default='',
)
is_complete = models.BooleanField(
default=False,
)
class Meta:
abstract = True
class BaseAppsDiaryDay(BaseDiaryDay):
"""
Базовый класс для аппов gamification и mindfullness
одного дня у дневника
"""
LIKE = 1
DISLIKE = 2
NEUTRAL = 3
EVALUATION = (
(LIKE, 'Понравилось'),
(DISLIKE, 'Не понравилось'),
(NEUTRAL, 'Нейтрально'),
)
liked_things = models.TextField(
default='',
)
disliked_things = models.TextField(
default='',
)
day_evaluation = models.IntegerField(
choices=EVALUATION,
verbose_name='Day evaluation',
null=True,
blank=True,
)
class Meta:
abstract = True
class BasePractice(models.Model):
"""Базовы класс для практик всех аппов"""
STUDY = 'Учебная'
WORK = 'Производственная'
TYPE = (
(STUDY, 'Учебная'),
(WORK, 'Производственная'),
)
teacher = models.ForeignKey(
'user.Teacher',
on_delete=models.CASCADE,
null=True,
)
practice_type = models.CharField(
max_length=30,
choices=TYPE,
verbose_name='Тип практики'
)
practice_addres = models.CharField(
max_length=500,
default='',
)
date_start=models.DateField(
default=datetime.datetime.today(),
)
date_end=models.DateField(
default=datetime.datetime.today(),
)
class Meta:
abstract = True
class ClassicPractice(BasePractice):
"""Практика без gamification и без mindfullness"""
class Meta:
verbose_name = "Практика без gamification и без mindfullness"
verbose_name_plural = "Практики без gamification и без mindfullness"
class ClassicDiaryDay(BaseDiaryDay):
"""День практики в дневнике без gamification и без mindfullness"""
class Meta:
verbose_name = (
"День практики в дневнике без gamification и без mindfullness"
)
verbose_name_plural = (
"Дни практики в дневнике без gamification и без mindfullness"
)
class ClassicDiary(models.Model):
"""Дневник без gamification и без mindfullness"""
diary_days = models.ManyToManyField(
ClassicDiaryDay
)
practice = models.ForeignKey(
ClassicPractice,
on_delete=models.CASCADE,
null=True,
blank=True,
)
class Meta:
verbose_name = "Дневник без gamification и без mindfullness"
verbose_name_plural = "Дневники без gamification и без mindfullness"
| true
|
0b8391aa4fbbb308e9b1c41cead7b427302e157f
|
Python
|
FZKeehn/Sunlight-Data-Exploration-Project
|
/Association_Rules.py
|
UTF-8
| 2,094
| 3.265625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Frank Zebulon Keehn
Sunlight Data Exploration Project
Written in Python (x,y) with Python 2.7.10
"""
"""Orange is a Python data mining and visualization library that I installed
for its association rules functions"""
import Orange
#save the basket file in a data table the association rules functions can use
data = Orange.data.Table("Phrases_2015.basket")
"""function that generates the set association rules for given constraints, in
this case support = 0.4."""
rules = Orange.associate.AssociationRulesSparseInducer(data,
support = 0.4)
""""
Next, these rules are written to another text file for exploration
and interpretation.
"""
rules_file = open('rules_2015.txt','w+')
for item in rules:
rules_file.write("%5.3f %5.3f %s\n" % (item.support, item.confidence,
item))
rules_file.close()
"""This code is for if you want to explore the frequent itemsets themselves,
instead of just the association rules."""
inducer = Orange.associate.AssociationRulesSparseInducer(support = 0.4,
store_examples = True)
itemsets = inducer.get_itemsets(data)
"""The following section provides another text file with automatically
generated examples of basic interpretations of some of the association rules.
"""
interpretation = []
for item in rules:
interpretation.append([item.support,item.confidence,
str(item).split(" -> ")])
interpretation_file = open('interpretation.txt','w+')
for item in interpretation:
part1 = "The itemset '%s' occurs in %1.0f%% of cases in the dataset.\n" % \
(item[2][0], item[0]*100)
part2 = "The rule '%s -> %s' holds in %1.0f%% these cases.\n\n" % \
(item[2][0], item[2][1], item[1]*100)
interpretation_file.write(part1)
interpretation_file.write(part2)
interpretation_file.close()
| true
|
a93eed488a88d5d46c1844e082867a6253020248
|
Python
|
thomasgibson/tabula-rasa
|
/verification/LDGH/LDGH.py
|
UTF-8
| 14,676
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
"""
This module runs a convergence history for a hybridized-DG
discretization of a model elliptic problem (detailed in the main
function). The method used is the LDG-H method.
"""
from firedrake import *
from firedrake.petsc import PETSc
from firedrake import COMM_WORLD
import numpy as np
import pandas as pd
def run_LDG_H_problem(r, degree, tau_order, write=False):
"""
Solves the Dirichlet problem for the elliptic equation:
-div(grad(u)) = f in [0, 1]^2, u = g on the domain boundary.
The source function f and g are chosen such that the analytic
solution is:
u(x, y) = sin(x*pi)*sin(y*pi).
This problem was crafted so that we can test the theoretical
convergence rates for the hybridized DG method: LDG-H. This
is accomplished by introducing the numerical fluxes:
u_hat = lambda,
q_hat = q + tau*(u - u_hat).
The Slate DLS in Firedrake is used to perform the static condensation
of the full LDG-H formulation of the Poisson problem to a single
system for the trace u_hat (lambda) on the mesh skeleton:
S * Lambda = E.
The resulting linear system is solved via a direct method (LU) to
ensure an accurate approximation to the trace variable. Once
the trace is solved, the Slate DSL is used again to solve the
elemental systems for the scalar solution u and its flux q.
Post-processing of the scalar variable, as well as its flux, is
performed using Slate to form and solve the elemental-systems for
new approximations u*, q*. Depending on the choice of tau, these
new solutions have superconvergent properties.
The post-processed scalar u* superconverges at a rate of k+2 when
two conditions are satisfied:
(1) q converges at a rate of k+1, and
(2) the cell average of u, ubar, superconverges at a rate of k+2.
The choice of tau heavily influences these two conditions. For all
tau > 0, the post-processed flux q* has enhanced convervation
properties! The new solution q* has the following three properties:
(1) q* converges at the same rate as q. However,
(2) q* is in H(Div), meaning that the interior jump of q* is zero!
And lastly,
(3) div(q - q*) converges at a rate of k+1.
The expected (theoretical) rates for the LDG-H method are
summarized below for various orders of tau:
-----------------------------------------------------------------
u q ubar u* q* div(p*)
-----------------------------------------------------------------
tau = O(1) (k>0) k+1 k+1 k+2 k+2 k+1 k+1
tau = O(h) (k>0) k k+1 k+2 k+2 k+1 k+1
tau = O(1/h) (k>0) k+1 k k+1 k+1 k k+1
-----------------------------------------------------------------
Note that the post-processing used for the flux q only holds for
simplices (triangles and tetrahedra). If someone knows of a local
post-processing method valid for quadrilaterals, please contact me!
For these numerical results, we chose the following values of tau:
tau = O(1) -> tau = 1,
tau = O(h) -> tau = h,
tau = O(1/h) -> tau = 1/h,
where h here denotes the facet area.
This demo was written by: Thomas H. Gibson (t.gibson15@imperial.ac.uk)
"""
if tau_order is None or tau_order not in ("1", "1/h", "h"):
raise ValueError(
"Must specify tau to be of order '1', '1/h', or 'h'"
)
assert degree > 0, "Provide a degree >= 1"
# Set up problem domain
mesh = UnitSquareMesh(2**r, 2**r, quadrilateral=False)
x = SpatialCoordinate(mesh)
n = FacetNormal(mesh)
# Set up function spaces
U = VectorFunctionSpace(mesh, "DG", degree)
V = FunctionSpace(mesh, "DG", degree)
T = FunctionSpace(mesh, "HDiv Trace", degree)
# Mixed space and test/trial functions
W = U * V * T
s = Function(W, name="solutions").assign(0.0)
q, u, uhat = split(s)
v, w, mu = TestFunctions(W)
# Analytical solutions for u and q
V_a = FunctionSpace(mesh, "DG", degree + 3)
U_a = VectorFunctionSpace(mesh, "DG", degree + 3)
u_a = Function(V_a, name="Analytic Scalar")
a_scalar = sin(pi*x[0])*sin(pi*x[1])
u_a.interpolate(a_scalar)
q_a = Function(U_a, name="Analytic Flux")
a_flux = -grad(a_scalar)
q_a.project(a_flux)
Vh = FunctionSpace(mesh, "DG", degree + 3)
f = Function(Vh).interpolate(-div(grad(a_scalar)))
# Determine stability parameter tau
if tau_order == "1":
tau = Constant(1)
elif tau_order == "1/h":
tau = 1/FacetArea(mesh)
elif tau_order == "h":
tau = FacetArea(mesh)
else:
raise ValueError("Invalid choice of tau")
# Numerical flux
qhat = q + tau*(u - uhat)*n
# Formulate the LDG-H method in UFL
a = ((dot(v, q) - div(v)*u)*dx
+ uhat('+')*jump(v, n=n)*dS
+ uhat*dot(v, n)*ds
- dot(grad(w), q)*dx
+ jump(qhat, n=n)*w('+')*dS
+ dot(qhat, n)*w*ds
# Transmission condition
+ mu('+')*jump(qhat, n=n)*dS)
L = w*f*dx
F = a - L
PETSc.Sys.Print("Solving using static condensation.\n")
params = {'snes_type': 'ksponly',
'mat_type': 'matfree',
'pmat_type': 'matfree',
'ksp_type': 'preonly',
'pc_type': 'python',
# Use the static condensation PC for hybridized problems
# and use a direct solve on the reduced system for u_hat
'pc_python_type': 'firedrake.SCPC',
'pc_sc_eliminate_fields': '0, 1',
'condensed_field': {'ksp_type': 'preonly',
'pc_type': 'lu',
'pc_factor_mat_solver_type': 'mumps'}}
bcs = DirichletBC(W.sub(2), Constant(0.0), "on_boundary")
problem = NonlinearVariationalProblem(F, s, bcs=bcs)
solver = NonlinearVariationalSolver(problem, solver_parameters=params)
solver.solve()
PETSc.Sys.Print("Solver finished.\n")
# Computed flux, scalar, and trace
q_h, u_h, uhat_h = s.split()
# Now we compute the various metrics. First we
# simply compute the L2 error between the analytic
# solutions and the computed ones.
scalar_error = errornorm(a_scalar, u_h, norm_type="L2")
flux_error = errornorm(a_flux, q_h, norm_type="L2")
# We keep track of all metrics using a Python dictionary
error_dictionary = {"scalar_error": scalar_error,
"flux_error": flux_error}
# Now we use Slate to perform element-wise post-processing
# Scalar post-processing:
# This gives an approximation in DG(k+1) via solving for
# the solution of the local Neumman data problem:
#
# (grad(u), grad(w))*dx = -(q_h, grad(w))*dx
# m(u) = m(u_h) for all elements K, where
#
# m(v) := measure(K)^-1 * int_K v dx.
# NOTE: It is currently not possible to correctly formulate this
# in UFL. However, we can introduce a Lagrange multiplier and
# transform the local problem above into a local mixed system:
#
# find (u, psi) in DG(k+1) * DG(0) such that:
#
# (grad(u), grad(w))*dx + (psi, grad(w))*dx = -(q_h, grad(w))*dx
# (u, phi)*dx = (u_h, phi)*dx,
#
# for all w, phi in DG(k+1) * DG(0).
DGk1 = FunctionSpace(mesh, "DG", degree + 1)
DG0 = FunctionSpace(mesh, "DG", 0)
Wpp = DGk1 * DG0
up, psi = TrialFunctions(Wpp)
wp, phi = TestFunctions(Wpp)
# Create mixed tensors:
K = Tensor((inner(grad(up), grad(wp)) +
inner(psi, wp) +
inner(up, phi))*dx)
F = Tensor((-inner(q_h, grad(wp)) +
inner(u_h, phi))*dx)
E = K.inv * F
PETSc.Sys.Print("Local post-processing of the scalar variable.\n")
u_pp = Function(DGk1, name="Post-processed scalar")
assemble(E.blocks[0], tensor=u_pp)
# Now we compute the error in the post-processed solution
# and update our error dictionary
scalar_pp_error = errornorm(a_scalar, u_pp, norm_type="L2")
error_dictionary.update({"scalar_pp_error": scalar_pp_error})
# Post processing of the flux:
# This is a modification of the local Raviart-Thomas projector.
# We solve the local problem: find 'q_pp' in RT(k+1)(K) such that
#
# (q_pp, v)*dx = (q_h, v)*dx,
# (q_pp.n, gamma)*dS = (qhat.n, gamma)*dS
#
# for all v, gamma in DG(k-1) * DG(k)|_{trace}. The post-processed
# solution q_pp converges at the same rate as q_h, but is HDiv
# conforming. For all LDG-H methods,
# div(q_pp) converges at the rate k+1. This is a way to obtain a
# flux with better conservation properties. For tau of order 1/h,
# div(q_pp) converges faster than q_h.
qhat_h = q_h + tau*(u_h - uhat_h)*n
local_RT = FiniteElement("RT", triangle, degree + 1)
RTd = FunctionSpace(mesh, BrokenElement(local_RT))
DGkn1 = VectorFunctionSpace(mesh, "DG", degree - 1)
# Use the trace space already defined
Npp = DGkn1 * T
n_p = TrialFunction(RTd)
vp, mu = TestFunctions(Npp)
# Assemble the local system and invert using Slate
A = Tensor(inner(n_p, vp)*dx +
jump(n_p, n=n)*mu*dS + dot(n_p, n)*mu*ds)
B = Tensor(inner(q_h, vp)*dx +
jump(qhat_h, n=n)*mu*dS + dot(qhat_h, n)*mu*ds)
PETSc.Sys.Print("Local post-processing of the flux.\n")
q_pp = assemble(A.inv * B)
# And check the error in our new flux
flux_pp_error = errornorm(a_flux, q_pp, norm_type="L2")
# To verify our new flux is HDiv conforming, we also
# evaluate its jump over mesh interiors. This should be
# approximately zero if everything worked correctly.
flux_pp_jump = assemble(jump(q_pp, n=n)*dS)
error_dictionary.update({"flux_pp_error": flux_pp_error})
error_dictionary.update({"flux_pp_jump": np.abs(flux_pp_jump)})
PETSc.Sys.Print("Post-processing finished.\n")
PETSc.Sys.Print("Finished test case for h=1/2^%d.\n" % r)
# If write specified, then write output
if write:
if tau_order == "1/h":
o = "hneg1"
else:
o = tau_order
File("results/LDGH_tauO%s_deg%d.pvd" %
(o, degree)).write(q_a, u_a, q_h, u_h, u_pp)
# Return all error metrics
return error_dictionary, mesh
def compute_conv_rates(u):
"""Computes the convergence rate for this particular test case
:arg u: a list of errors.
Returns a list of convergence rates. Note the first element of
the list will be empty, as there is no previous computation to
compare with. '---' will be inserted into the first component.
"""
u_array = np.array(u)
rates = list(np.log2(u_array[:-1] / u_array[1:]))
rates.insert(0, '---')
return rates
def run_single_test(r, degree, tau_order, write=False):
# Run a quick test given a degree, tau order, and resolution
resolution_param = r
PETSc.Sys.Print("Running LDG-H method (triangles) of degree %d with tau=O('%s') "
"and mesh parameter h=1/2^%d." %
(degree, tau_order, resolution_param))
error_dict, _ = run_LDG_H_problem(r=resolution_param,
degree=degree,
tau_order=tau_order,
write=write)
PETSc.Sys.Print("Error in scalar: %0.8f" %
error_dict["scalar_error"])
PETSc.Sys.Print("Error in post-processed scalar: %0.8f" %
error_dict["scalar_pp_error"])
PETSc.Sys.Print("Error in flux: %0.8f" %
error_dict["flux_error"])
PETSc.Sys.Print("Error in post-processed flux: %0.8f" %
error_dict["flux_pp_error"])
PETSc.Sys.Print("Interior jump of post-processed flux: %0.8f" %
np.abs(error_dict["flux_pp_jump"]))
def run_LDG_H_convergence(degree, tau_order, start, end):
PETSc.Sys.Print("Running convergence test for LDG-H method (triangles) "
"of degree %d with tau order '%s'"
% (degree, tau_order))
# Create arrays to write to CSV file
r_array = []
scalar_errors = []
scalar_pp_errors = []
flux_errors = []
flux_pp_errors = []
flux_pp_jumps = []
num_cells = []
# Run over mesh parameters and collect error metrics
for r in range(start, end + 1):
r_array.append(r)
error_dict, mesh = run_LDG_H_problem(r=r,
degree=degree,
tau_order=tau_order,
write=False)
# Extract errors and metrics
scalar_errors.append(error_dict["scalar_error"])
scalar_pp_errors.append(error_dict["scalar_pp_error"])
flux_errors.append(error_dict["flux_error"])
flux_pp_errors.append(error_dict["flux_pp_error"])
flux_pp_jumps.append(error_dict["flux_pp_jump"])
num_cells.append(mesh.num_cells())
# Now that all error metrics are collected, we can compute the rates:
scalar_rates = compute_conv_rates(scalar_errors)
scalar_pp_rates = compute_conv_rates(scalar_pp_errors)
flux_rates = compute_conv_rates(flux_errors)
flux_pp_rates = compute_conv_rates(flux_pp_errors)
PETSc.Sys.Print("Error in scalar: %0.13f" %
scalar_errors[-1])
PETSc.Sys.Print("Error in post-processed scalar: %0.13f" %
scalar_pp_errors[-1])
PETSc.Sys.Print("Error in flux: %0.13f" %
flux_errors[-1])
PETSc.Sys.Print("Error in post-processed flux: %0.13f" %
flux_pp_errors[-1])
PETSc.Sys.Print("Interior jump of post-processed flux: %0.13f" %
np.abs(flux_pp_jumps[-1]))
if COMM_WORLD.rank == 0:
degrees = [degree] * len(r_array)
data = {"Mesh": r_array,
"Degree": degrees,
"NumCells": num_cells,
"ScalarErrors": scalar_errors,
"ScalarConvRates": scalar_rates,
"PostProcessedScalarErrors": scalar_pp_errors,
"PostProcessedScalarRates": scalar_pp_rates,
"FluxErrors": flux_errors,
"FluxConvRates": flux_rates,
"PostProcessedFluxErrors": flux_pp_errors,
"PostProcessedFluxRates": flux_pp_rates}
if tau_order == "1/h":
o = "hneg1"
else:
o = tau_order
df = pd.DataFrame(data)
result = "results/LDG-H-d%d-tau_order-%s.csv" % (degree, o)
df.to_csv(result, index=False, mode="w")
| true
|
fe095267b52ebecf582f19f64c9886966d6af7ed
|
Python
|
uzdpe/german_credit_risk_xai
|
/backend/data_preprocessing/data_loading.py
|
UTF-8
| 9,415
| 2.890625
| 3
|
[] |
no_license
|
"""Sources:
dataset: https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29
"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from backend.util.static import PATHS, dataset, params
from backend.util.helper_functions import get_ct_feature_names
from backend.util.util import clean_folder
import plotly.express as px
import plotly.offline as py
german_credit_dataset = dataset[params["dataset"]]
def initialize():
"""This is for the first initialization (splitting dataset and setting paths / folders) """
# clean preprocessed and output folder
clean_folder(folder=PATHS["02_data_preprocessed"])
clean_folder(folder=PATHS["03_data_outputs"])
clean_folder(folder=PATHS["04_trained_models"])
# load the data
df_credit = load_data()
# Small preparation
df_credit = data_preparation(df_credit)
# Encode into lime format
df_credit, categorical_encoding, categorical_encoding_label = encoding(df_credit)
# Split the data into training and testing
create_training_split(df_credit)
def load_data(type="raw"):
"""Loads the data, type can be raw, training, testing."""
if type == "raw":
df_credit = pd.read_excel(PATHS["01_data_raw"] + "credit_risk.xls", index_col=0)
return df_credit
elif type == "training":
X_train = pd.read_csv(PATHS["02_data_preprocessed"] + "X_train.csv", index_col=0)
y_train = pd.read_csv(PATHS["02_data_preprocessed"] + "y_train.csv", index_col=0)
return X_train, y_train
elif type == "testing":
X_test = pd.read_csv(PATHS["02_data_preprocessed"] + "X_test.csv", index_col=0)
y_test = pd.read_csv(PATHS["02_data_preprocessed"] + "y_test.csv", index_col=0)
return X_test, y_test
else:
raise AssertionError("Error in if-else statement")
def explore_data(df_credit):
"""Explore the data."""
print("Shape of the data")
print(df_credit.info())
#print("Looking unique values")
#print(df_credit.nunique())
print("Header")
print(df_credit.head())
# Prints unique data values
# print("Checking account : ", df_credit['Checking account'].unique())
# print("Credit history : ", df_credit['Credit history'].unique())
# print("Saving accounts : ", df_credit['Saving accounts'].unique())
# print("Length of current employment : ", df_credit['Length of current employment'].unique())
# print("Purpose : ", df_credit.Purpose.unique())
# print("Sex : ", df_credit['Sex'].unique())
# print("Marital status : ", df_credit['Marital status'].unique())
# print("Other debtors / guarantors : ", df_credit['Other debtors / guarantors'].unique())
# print("Property ", df_credit['Property'].unique())
# print("Other installment plans : ", df_credit['Other installment plans'].unique())
# print("Housing : ", df_credit.Housing.unique())
# print("Job : ", df_credit.Job.unique())
# print("Telephone : ", df_credit.Telephone.unique())
# print("Foreign Worker : ", df_credit['Foreign Worker'].unique())
# print("Risk : ", df_credit['Risk'].unique())
def create_training_split(df_credit):
"""Creates the train, val, and test split."""
y = df_credit["Risk"] # .map({"bad": 0, "good": 1})
X = df_credit.drop("Risk", axis=1)
# Splitting X and y into train and test version
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) # test size of 100
# Save train and test set
pd.DataFrame(X_train).to_csv(PATHS["02_data_preprocessed"] + "X_train.csv")
pd.DataFrame(X_test).to_csv(PATHS["02_data_preprocessed"] + "X_test.csv")
pd.DataFrame(y_train).to_csv(PATHS["02_data_preprocessed"] + "y_train.csv")
pd.DataFrame(y_test).to_csv(PATHS["02_data_preprocessed"] + "y_test.csv")
return X_train, X_test, y_train, y_test
def data_preparation(df_credit):
"""Small dataset fixes"""
# df_credit.loc[(df_credit.Job == 0), 'Job'] = "unskilled and non-resident"
# df_credit.loc[(df_credit.Job == 1), 'Job'] = "unskilled and resident"
# df_credit.loc[(df_credit.Job == 2), 'Job'] = "skilled"
# df_credit.loc[(df_credit.Job == 3), 'Job'] = "highly skilled"
# df_credit["Saving accounts"] = df_credit["Saving accounts"].astype(str)
# df_credit["Checking account"] = df_credit["Checking account"].astype(str)
# interval = (18, 25, 35, 60, 120)
# categories = ['Student', 'Young', 'Adult', 'Senior']
# df_credit["Age_cat"] = pd.cut(df_credit.Age, interval, labels=categories)
# df_credit['Credit amount'] = np.log(df_credit['Credit amount'])
return df_credit
def analyze_dataset():
"""Analyze the datasest and give back which columns contains which type of features"""
df_credit = load_data()
categorical_features_names = [col for col in df_credit.columns if df_credit[col].dtype == 'object']
categorical_features_indices = german_credit_dataset["categorical_features_indices"]
feature_names = df_credit.columns.tolist()
return categorical_features_names, categorical_features_indices, feature_names
def encoding(df_credit):
"""Preprocessing: Encodes the categorical labels into the LIME format. This format should not be used for
training since it is not one-hot-encoded (high multicollinearity)"""
categorical_encoding = {}
for col in german_credit_dataset["categorical_features_indices"]:
label_encoder = preprocessing.LabelEncoder()
df_credit.iloc[:, col] = label_encoder.fit_transform(df_credit.iloc[:, col])
categorical_encoding[col] = label_encoder.classes_
categorical_encoding_label = {}
for col in german_credit_dataset["label_indices"]:
label_encoder = preprocessing.LabelEncoder()
df_credit.iloc[:, col] = label_encoder.fit_transform(df_credit.iloc[:, col])
categorical_encoding_label = label_encoder.classes_
return df_credit, categorical_encoding, categorical_encoding_label
def load_encoded_data():
"""Load lime-encoded training and testing data and return one-hot-encoded data."""
X_train, y_train = load_data(type="training")
X_test, y_test = load_data(type="testing")
# one-hot-encode the data
X_train_encoded, encoder, columns, encoder = build_and_fit_one_hot_encoder(X_train)
X_test_encoded = pd.DataFrame(encoder.transform(X_test), columns=columns)
return X_train_encoded, y_train, X_test_encoded, y_test, encoder
def build_and_fit_one_hot_encoder(X_train):
"""Returns a one hot encoder and an encoded dataset."""
numeric_features = german_credit_dataset["num_features"]
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = german_credit_dataset["cat_features"]
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant')),
('onehot', OneHotEncoder(handle_unknown='error', drop="first"))])
encoder = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)],
remainder="passthrough")
encoder.fit(X_train)
X_train_encoded = encoder.transform(X_train)
columns = get_ct_feature_names(encoder)
X_train_encoded = pd.DataFrame(X_train_encoded, columns=columns)
return X_train_encoded, encoder, columns, encoder
def one_hot_encoder_old(df_credit, nan_as_category=False):
"""OLD """
original_columns = list(df_credit.columns)
categorical_columns = [col for col in df_credit.columns if df_credit[col].dtype == 'object']
df_credit = pd.get_dummies(df_credit, columns=categorical_columns, dummy_na=nan_as_category, drop_first=True)
new_columns = [c for c in df_credit.columns if c not in original_columns]
return df_credit, new_columns
def data_exploration(df_credit):
"""Data exploration with seaborn"""
ax = sns.countplot(x="Risk",hue="Risk", data=df_credit)
plt.show()
# ax = sns.countplot(data=df_credit, x="Sex", hue="Risk")
# plt.show()
# ax = sns.histplot(data=df_credit, x="Credit amount", hue="Risk", element="step")
# plt.show()
# ax = sns.histplot(data=df_credit, x="Age", hue="Risk", element="step")
# plt.show()
# ax = sns.histplot(data=df_credit, x="Duration", hue="Risk", element="step")
# plt.show()
# ax = sns.countplot(data=df_credit, x="Purpose", hue="Risk")
# plt.show()
# ax = sns.countplot(data=df_credit, x="Saving accounts", hue="Risk")
# plt.show()
#ax = sns.countplot(data=df_credit, x="Checking account", hue="Risk")
#plt.show()
# ax = sns.pairplot(data=df_credit, hue="Risk", kind="kde")
# ax.savefig("pairplot_all.png")
# plt.show() # Takes a while to plot
# plt.figure(figsize=(14, 12))
# sns.heatmap(df_credit.astype(float).corr(),linewidths=0.1,vmax=1.0,
# square=True, linecolor='white', annot=True)
# plt.show()
# sns.countplot(x="Housing", hue="Risk", data=df_credit)
#plt.show()
| true
|
267aade8227f528d948078718884cceb55adfb7a
|
Python
|
damaresende/USP
|
/SCC5900/pj2/algorithms/Kruskal.py
|
UTF-8
| 3,445
| 3.8125
| 4
|
[] |
no_license
|
'''
Uses union find data structure with ranking and path reduction to build a MST
based on Kruskal's algorithm. The algorithm runs V - 1 - K - 1 times, where V is
the number of vertices and K the number of clusters to be found.
@author: Damares Resende
@contact: damaresresende@usp.br
@since: May 26, 2019
@organization: University of Sao Paulo (USP)
Institute of Mathematics and Computer Science (ICMC)
Project of Algorithms Class (SCC5000)
'''
class Kruskal:
def __init__(self, nclusters):
'''
Initializes the number of clusters
@param nclusters: number of clusters
@return None
'''
self.nclusters = nclusters
def build_mst(self, graph, npoints):
'''
Builds the MST for Kruskal's algorithm. It runs until all clusters are found.
@param graph: List of nodes in the graph. Each node is like [s, v, w] were s is the source,
v the destination and w the weight
@param npoints: number of points in the graph
@return list of nodes in Minimum Spanning Tree. Each node is like [s, v, w]
were s is the source, v the destination and w the weight
'''
i = 0
e = 0
rank = [] # stores the rank of each set
result = [] # stores the MST formed
parent = [] # stores the parent of each set
graph = sorted(graph, key=lambda item: item[2]) # sorts the graph so the smallest edges
# are picked first. This guarantees that the K largest edges will not be considered
# initializes rank and parent arrays
for node in range(npoints):
parent.append(node)
rank.append(0)
# while I do not reach the number of vertices minus the root minus the number of clusters minus one
while e < (npoints - 1) - (self.nclusters - 1):
u, v, w = graph[i] # gets source, destination and weight of the graph node
i = i + 1
x = self.find(parent, u) # finds the node parent
y = self.find(parent, v) # finds the node parent
# if the inclusion of the edge does not form a circle
if x != y:
e += 1
result.append([u, v, w])
self.union(parent, rank, x, y) # add the set to the MST
return result
def find(self, parent, i):
'''
Finds the set of an element i by using the path compression method
@param parent: parent set id
@param i: node id
@return the set connected to i
'''
if parent[i] == i:
return i
return self.find(parent, parent[i])
def union(self, parent, rank, x, y):
'''
Performs the union of two sets by using the rank of them
@param parent: parent set
@param rank: set rank
@param x: set x
@param y: set y
@return None
'''
xroot = self.find(parent, x)
yroot = self.find(parent, y)
# Attach smaller rank tree under root of high rank tree
if rank[xroot] < rank[yroot]:
parent[xroot] = yroot
elif rank[xroot] > rank[yroot]:
parent[yroot] = xroot
else:
# If ranks are same, then make one as root and increment its rank by one
parent[yroot] = xroot
rank[xroot] += 1
| true
|
adaea801ae00faf6413a2fdc6a05d678f7b3b809
|
Python
|
bespoke-silicon-group/hb_starlite
|
/torch-sparse-example.py
|
UTF-8
| 1,656
| 3.09375
| 3
|
[] |
no_license
|
"""Some useful utilities for working with sparse tensors using the
`torch.sparse` library.
"""
import torch.sparse
import torch_sparse
import scipy.sparse
import numpy
def sparse_scipy2torch(matrix):
"""Convert a matrix from *any* `scipy.sparse` representation to a
sparse `torch.tensor` value.
"""
coo_matrix = matrix.tocoo()
return torch.sparse.FloatTensor(
torch.LongTensor(numpy.vstack((coo_matrix.row, coo_matrix.col))),
torch.FloatTensor(coo_matrix.data),
torch.Size(coo_matrix.shape),
)
def sparse_torch2scipy(tensor):
"""Convert a sparse `torch.tensor` matrix (which must be
two-dimensional, i.e., a matrix) to a `scipy.sparse` matrix. The
result uses the COO representation, but you can convert it to any
other sparse representation you need.
"""
coalesced = tensor.coalesce()
values = coalesced.values()
rows, cols = coalesced.indices()
return scipy.sparse.coo_matrix((
values.numpy(),
(rows.numpy(), cols.numpy()),
))
def sparse_sparse_mm(a, b):
"""Sparse/sparse matrix multiply for `torch.sparse` tensors.
Requires the supplemental `torch_sparse` library.
"""
assert a.shape[1] == b.shape[0], "dimension mismatch for multiply"
a_coalesced = a.coalesce()
b_coalesced = b.coalesce()
c_indices, c_values = torch_sparse.spspmm(
a_coalesced.indices(), a_coalesced.values(),
b_coalesced.indices(), b_coalesced.values(),
a.shape[0], a.shape[1], b.shape[1],
)
return torch.sparse.FloatTensor(
c_indices,
c_values,
torch.Size([a.shape[0], b.shape[1]]),
)
| true
|
41618c8341326df432c3f6b8ebebb4f43845fca1
|
Python
|
andersonms1/python-brasil
|
/strings/9_validade_cpr.py
|
UTF-8
| 1,116
| 3.609375
| 4
|
[] |
no_license
|
valid = True
sum = 0
cpf = input('Digite o CPF desejado: ')
counter = 10
second_sum = 0
second_counter = 11
# verifica o tamanho
if len(cpf) != (11 + 3):
print('Invalid size!')
valid = False
# verifica os digitos
elif not cpf[-2:].isdigit() or not cpf[4:7].isdigit() or not cpf[8:11].isdigit() or not cpf[-2:].isdigit():
print('Digite numeros!')
valid = False
# verifica formato
elif cpf[3] != '.' or cpf[7] != '.' or cpf[11] != '-':
print('Digite no formato correto')
valid = False
for w in cpf[:11]:
if w.isdigit():
sum = sum + int(w) * counter
counter = counter - 1
for w in cpf[:13]:
if w.isdigit():
second_sum = second_sum + int(w) * second_counter
second_counter = second_counter - 1
def valid_digits (any_sum, digit):
v = False
if any_sum % 11 == 0 or any_sum % 11 == 1:
v = (digit == 0)
else:
v = (11 - (any_sum % 11) == digit)
return v
if valid_digits(sum, cpf[len(cpf)-2]) or valid_digits(second_sum, cpf[len(cpf)-1]):
valid = False
print(valid)
# https://www.somatematica.com.br/faq/cpf.php
| true
|
818ba842af171b6e52aa02435fcf8982e1c2c6b7
|
Python
|
ReubenBond/serverless-orleans
|
/loadtest/locustfile.py
|
UTF-8
| 597
| 2.65625
| 3
|
[] |
no_license
|
import string
import random
from locust import HttpUser, task, between
class MessageActorUser(HttpUser):
wait_time = between(0.5, 2)
@task(2)
def add_message(self):
actor_id = random.randint(1, 100)
self.client.post(f"/messages/{actor_id}", json=self.__get_random_text())
@task
def get_messages(self):
actor_id = random.randint(1, 100)
self.client.get(f"/messages/{actor_id}")
def __get_random_text(self):
length = random.randint(10, 50)
return ''.join(random.choices(string.ascii_uppercase + string.digits, k = length))
| true
|
aec2c3424a24e051c12c131d6099d04567dff97c
|
Python
|
boyoonc/stackathon
|
/workspace/csvs/datamanip_guide.py
|
UTF-8
| 864
| 2.828125
| 3
|
[] |
no_license
|
arr = [comp1:{name: 1, size:5}, comp2:{name:2, size:10}, ... ]
level2dict = {
1A: {name: bucket1a, childern:[]},
1B: {name: bucket1b, childern:[]},
2A: {name: bucket1b, childern:[]}
}
level1dict = {
1: {name: bucket1, children:[]},
2: {name: bucket1, children:[]},
}
root = {
name: root,
children:[]
}
map = {comp1:1A, comp2:2A}
for key in arr:
company = key
targetValue = map[key]
targetDict = level2dict[targetValue]
targetDict[children].append(array[company]) //figure out
nextTargetValue = targetValue[:-1]
nextTargetDict = level1dict[nextTargetValue]
nextTargetDict[children].append(targetDict)
for each in level1dict:
root[children].append(level1dict[each])
//got this
level2dict = {
1A: {name: bucket1a, childern:[comp1:{name: 1, size:5}]},
1B: {name: bucket1b, childern:[]},
2A: {name: bucket1b, childern:[comp2:{name:2, size:10}]}
}
| true
|
446ec42fccc72d32f67b60e250237b9549882c10
|
Python
|
mlvfx/vfxAssetBox
|
/assetbox/base/host_manager.py
|
UTF-8
| 2,928
| 2.546875
| 3
|
[
"CC0-1.0"
] |
permissive
|
"""
HostManager reads the plugins directory, loads all plugins, and registers
their actions.
"""
import os
import imp
from assetbox.base.plugins.host import BaseHost
from assetbox.base.plugins import actions
PROJECT_FOLDER = os.path.dirname(os.path.dirname(__file__))
def find_plugins():
"""Query the plugin folder for plugins to load. Yields found locations."""
plugins_folder = os.path.join(PROJECT_FOLDER, 'plugins').replace('\\', '/')
def full_path(p):
return os.path.join(plugins_folder, p).replace('\\', '/')
if os.path.isdir(plugins_folder):
plugins = [full_path(p)
for p in os.listdir(plugins_folder)
if os.path.isdir(full_path(p)) and is_plugin(full_path(p))]
if plugins:
for plugin in plugins:
yield plugin
def is_plugin(plugin):
"""
Query whether the plugin has required files. Plugin folder must have
actions and host files.
Args:
plugin (str): path to a plugin folder.
"""
files = os.listdir(plugin)
plugin_files = set(['actions.py', 'host.py'])
return plugin_files.issubset(set(files))
class HostManager(object):
"""Primary hostmanager class, loads and stores all plugins."""
def __init__(self):
"""Initialise the class, load the plugins."""
plugins = find_plugins()
self.host_app = None
self.host_actions = actions.register_actions()
self.host_filetypes = []
for p in plugins:
try:
host_path = os.path.join(p, 'host.py').replace('\\', '/')
name, ext = os.path.splitext(host_path)
host = imp.load_source(name, host_path)
host_app = host.HostApp()
if host_app.inhost:
self.host_app = host_app
self.host_filetypes = self.host_app.filetypes
try:
action_path = os.path.join(p, 'actions.py').replace('\\', '/')
name, ext = os.path.splitext(action_path)
action = imp.load_source(name, action_path)
self.host_actions += action.register_actions()
except IOError:
print 'HostManager:IOError -- No actions found'
except IOError, ioe:
print 'HostManager:IOError -- ', ioe, p
except ImportError, ime:
print 'HostManager:ImportError -- ', ime, p
def get_hostapp(self):
"""Return the host application."""
if not self.host_app:
self.host_app = BaseHost()
return self.host_app
def get_actions(self):
"""Return the actions associated with this host."""
return self.host_actions
def get_filetypes(self):
"""Return what filetypes this host supports."""
return self.host_filetypes
| true
|
5e130fb37a03149bdab242df468d0a3e8b036279
|
Python
|
joehigh/COMSW4771
|
/HW 2/q5/adadelta.py
|
UTF-8
| 940
| 2.671875
| 3
|
[] |
no_license
|
import random
from jax import grad
import jax.numpy as np
def adadelta(f, start, step, max_iter, prec):
x_hist = []
y_hist = []
x = start
iters = 0
delta_x = 0
eps = 1e-8
rho = .95
print(x.shape)
eg = np.zeros(x.shape[0])
ed = np.zeros(x.shape[0])
while True:
l = [x for x in range(500)]
#random.shuffle(l)
for i in l:
print(x)
last_x = x
g = grad(f(i))(x)
eg = rho * eg + (1 - rho)*g**2
cur_delta = np.sqrt(ed + eps) / np.sqrt(eg + eps) * g
ed = rho * ed + (1 - rho) * cur_delta**2
x -= cur_delta
delta_x = x - last_x
#if np.linalg.norm(delta_x) < prec or iters > max_iter:
if iters > max_iter:
return x_hist, y_hist
x_hist.append(x)
y_hist.append(f(i)(x))
iters += 1
return x_hist, y_hist
| true
|
eeded8e15175153813131f9c88234392b3a42737
|
Python
|
dheerajgm/daily-coding-problem
|
/solutions/problem_340.py
|
UTF-8
| 850
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
import sys
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "[x={},y={}]".format(self.x, self.y)
def get_distance(p1, p2):
return ((p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2) ** 0.5
def get_closest(point_tuples):
points = [Point(x, y) for (x, y) in point_tuples]
min_dist, min_dist_pts = sys.maxsize, None
for i in range(len(points) - 1):
for j in range(i + 1, len(points)):
dist = get_distance(points[i], points[j])
if dist < min_dist:
min_dist = dist
min_dist_pts = ((points[i].x, points[i].y),
(points[j].x, points[j].y))
return min_dist_pts
# Tests
assert get_closest([(1, 1), (-1, -1), (3, 4), (6, 1), (-1, -6), (-4, -3)]) == \
((1, 1), (-1, -1))
| true
|
5546c56aa0bc5afdc62e4290046d856cb92c1f3b
|
Python
|
JohnDoneth/cis457-project2
|
/src/ftp/ftp_server.py
|
UTF-8
| 3,877
| 2.53125
| 3
|
[] |
no_license
|
import common
from asyncio import StreamReader, StreamWriter
import asyncio
import os
import base64
from typing import Dict
# filter out files with *.py extensions
def filter_files(path):
_, extension = os.path.splitext(path[0])
if extension == ".py":
return False
else:
return True
class FTPServer:
async def handle_file_request(
self, request: Dict, reader: StreamReader, writer: StreamWriter
):
filename = request["filename"]
if not os.path.exists(filename):
common.send_json(writer, {"error": "file does not exist"})
return
with open(filename, "rb") as infile:
contents = infile.read()
# base64 encode the binary file
contents = base64.b64encode(contents).decode("utf-8")
common.send_json(writer, {"filename": filename, "content": contents})
async def run_forever(self, local_port):
server = await asyncio.start_server(
self.handle_request, "127.0.0.1", local_port
)
addr = server.sockets[0].getsockname()
print("Server started")
print(f"Waiting for file requests on {addr}")
async with server:
await server.serve_forever()
async def handle_request(self, reader: StreamReader, writer: StreamWriter):
while True:
request = await common.recv_json(reader)
print(request)
if request is None:
break
if not request["method"]:
print("Invalid Request: missing method field.")
if request["method"].upper().startswith("LIST"):
files = [f for f in os.listdir(".") if os.path.isfile(f)]
file_sizes = [
common.sizeof_fmt(os.path.getsize(f))
for f in os.listdir(".")
if os.path.isfile(f)
]
files = list(zip(files, file_sizes))
files = filter(filter_files, files)
files = list(files)
await common.send_json(writer, {"files": files,})
elif request["method"].upper().startswith("RETRIEVE"):
filename = request["filename"]
if not os.path.exists(filename):
await common.send_json(writer, {"error": "file does not exist"})
return
with open(filename, "rb") as infile:
contents = infile.read()
# base64 encode the binary file
contents = base64.b64encode(contents).decode("utf-8")
await common.send_json(
writer, {"filename": filename, "content": contents}
)
elif request["method"].upper().startswith("STORE"):
filename = request["filename"]
with open(filename, "wb") as outfile:
# base64 decode from the request body
contents = base64.b64decode(request["content"])
outfile.write(contents)
# threaded_print("-> Store Complete")
elif request["method"].upper().startswith("QUIT"):
# threaded_print("-> Client disconnected via QUIT")
pass
elif request["method"].upper().startswith("DELETE"):
filename = request["filename"]
if not os.path.exists(filename):
await common.send_json(writer, {"error": "file does not exist"})
else:
os.remove(filename)
await common.send_json(writer, {"success": "file removed"})
else:
await common.send_json(writer, {"error": "Unsupported command"})
writer.close()
await writer.wait_closed()
| true
|
30c7f1dfdcd19f43939f915fe25f518ffdf6c2b7
|
Python
|
jwodder/doapi
|
/doapi/tag.py
|
UTF-8
| 9,742
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
from six import iteritems, string_types
from six.moves import map # pylint: disable=redefined-builtin
from .base import Resource
from .droplet import Droplet
from .floating_ip import FloatingIP
from .image import Image
resource_types = {
"droplets": Droplet,
# Not supported by DO yet, but it's good to be prepared:
"images": Image,
"floating_ips": FloatingIP,
}
class Tag(Resource):
r"""
.. versionadded:: 0.2.0
A tag resource, representing a label that can be applied to other
resources.
New tags are created via the :meth:`doapi.create_tag` method and can be
retrieved with the :meth:`doapi.fetch_tag` and :meth:`doapi.fetch_all_tags`
methods.
The DigitalOcean API specifies the following fields for tag objects:
:var name: the name of the tag
:vartype name: string
:var resources: a `dict` mapping resource types (e.g., ``"droplets"``) to
sub-`dict`\ s containing fields ``"count"`` (the number of resources of
the given type with the given tag) and ``"last_tagged"`` (the resource
of the given type to which the tag was most recently applied)
"""
def __init__(self, state=None, **extra):
if isinstance(state, string_types):
state = {"name": state}
super(Tag, self).__init__(state, **extra)
self.setdefault('resources', dict())
for name, cls in iteritems(resource_types):
if isinstance(self.resources.get(name), dict):
last_tagged = self.resources[name].get("last_tagged")
if last_tagged is not None and not isinstance(last_tagged, cls):
self.resources[name]["last_tagged"] = \
cls(last_tagged, doapi_manager=self.doapi_manager)
@property
def url(self):
""" The endpoint for general operations on the individual tag """
return self._url('/v2/tags/' + self.name)
def fetch(self):
"""
Fetch & return a new `Tag` object representing the tag's current state
:rtype: Tag
:raises DOAPIError: if the API endpoint replies with an error (e.g., if
the tag no longer exists)
"""
api = self.doapi_manager
return api._tag(api.request(self.url)["tag"])
def __str__(self):
""" Convert the tag object to its name """
return self.name
def update_tag(self, name):
# The `_tag` is to avoid conflicts with MutableMapping.update.
"""
Update (i.e., rename) the tag
:param str name: the new name for the tag
:return: an updated `Tag` object
:rtype: Tag
:raises DOAPIError: if the API endpoint replies with an error
"""
api = self.doapi_manager
return api._tag(api.request(self.url, method='PUT',
data={"name": name})["tag"])
def delete(self):
"""
Delete the tag
:return: `None`
:raises DOAPIError: if the API endpoint replies with an error
"""
self.doapi_manager.request(self.url, method='DELETE')
def add(self, *resources):
"""
Apply the tag to one or more resources
:param resources: one or more `Resource` objects to which tags can be
applied
:return: `None`
:raises DOAPIError: if the API endpoint replies with an error
"""
self.doapi_manager.request(self.url + '/resources', method='POST',
data={"resources": _to_taggable(resources)})
def remove(self, *resources):
"""
Remove the tag from one or more resources
:param resources: one or more `Resource` objects to which tags can be
applied
:return: `None`
:raises DOAPIError: if the API endpoint replies with an error
"""
self.doapi_manager.request(self.url + '/resources', method='DELETE',
data={"resources": _to_taggable(resources)})
def fetch_all_droplets(self):
r"""
Returns a generator that yields all of the droplets to which the tag is
currently applied
:rtype: generator of `Droplet`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
return self.doapi_manager.fetch_all_droplets(tag_name=self.name)
def delete_all_droplets(self):
"""
Delete all of the droplets to which the tag is applied
:return: `None`
:raises DOAPIError: if the API endpoint replies with an error
"""
self.doapi_manager.request('/v2/droplets', method='DELETE',
params={"tag_name": self.name})
def act_on_droplets(self, **data):
r"""
Perform an arbitrary action on all of the droplets to which the tag is
applied. ``data`` will be serialized as JSON and POSTed to the proper
API endpoint. All currently-documented actions require the POST body
to be a JSON object containing, at a minimum, a ``"type"`` field.
:return: a generator of `Action`\ s representing the in-progress
operations on the droplets
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
api = self.doapi_manager
return map(api._action, api.request('/v2/droplets/actions', method='POST', params={"tag_name": self.name}, data=data)["actions"])
def power_cycle(self):
r"""
Power cycle all of the droplets to which the tag is applied
:return: a generator of `Action`\ s representing the in-progress
operations on the droplets
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
return self.act_on_droplets(type='power_cycle')
def power_on(self):
r"""
Power on all of the droplets to which the tag is applied
:return: a generator of `Action`\ s representing the in-progress
operations on the droplets
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
return self.act_on_droplets(type='power_on')
def power_off(self):
r"""
Power off all of the droplets to which the tag is applied
:return: a generator of `Action`\ s representing the in-progress
operations on the droplets
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
return self.act_on_droplets(type='power_off')
def shutdown(self):
r"""
Shut down all of the droplets to which the tag is applied
:return: a generator of `Action`\ s representing the in-progress
operations on the droplets
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
return self.act_on_droplets(type='shutdown')
def enable_private_networking(self):
r"""
Enable private networking on all of the droplets to which the tag is
applied
:return: a generator of `Action`\ s representing the in-progress
operations on the droplets
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
return self.act_on_droplets(type='enable_private_networking')
def enable_ipv6(self):
r"""
Enable IPv6 networking on all of the droplets to which the tag is
applied
:return: a generator of `Action`\ s representing the in-progress
operations on the droplets
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
return self.act_on_droplets(type='enable_ipv6')
def enable_backups(self):
r"""
Enable backups on all of the droplets to which the tag is applied
:return: a generator of `Action`\ s representing the in-progress
operations on the droplets
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
return self.act_on_droplets(type='enable_backups')
def disable_backups(self):
r"""
Disable backups on all of the droplets to which the tag is applied
:return: a generator of `Action`\ s representing the in-progress
operations on the droplets
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
return self.act_on_droplets(type='disable_backups')
def snapshot(self, name):
r"""
Create snapshot images of all of the droplets to which the tag is
applied
:param str name: the name for the new snapshots
:return: a generator of `Action`\ s representing the in-progress
operations on the droplets
:rtype: generator of `Action`\ s
:raises DOAPIError: if the API endpoint replies with an error
"""
return self.act_on_droplets(type='snapshot', name=name)
def _to_taggable(resources):
res = []
for r in resources:
try:
res.append(r._taggable())
except (AttributeError, TypeError):
if isinstance(r, Resource):
raise TypeError('Tagging {0!r} objects is not supported'
.format(r._class()))
else:
# Assume `r` is a "primitive" type
res.append(r)
return res
| true
|
d9ce5be78cb22c14c503cca4d6fa615d01366506
|
Python
|
SIGMAOON/Greedy
|
/20300.py
|
UTF-8
| 363
| 2.96875
| 3
|
[] |
no_license
|
# 서강근육맨
import sys
input = sys.stdin.readline
n = int(input())
muscle = list(map(int,input().split()))
muscle.sort()
if n%2 == 1:
hap = 0
for i in range((n-1)//2):
hap = max(hap,muscle[i]+muscle[-i-2])
print(max(hap,muscle[-1]))
else:
hap = 0
for i in range(n//2):
hap = max(hap,muscle[i]+muscle[-i-1])
print(hap)
| true
|
86c867bcc57c186136f640753c039f06fdea30d9
|
Python
|
PriyanshuChatterjee/30-Days-of-Python
|
/day_14/Question_16.py
|
UTF-8
| 161
| 3.21875
| 3
|
[] |
no_license
|
from functools import reduce
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def add(num1, num2):
return num1 + num2
total = reduce(add, numbers)
print(total)
| true
|
929217201936de4bdaa29e6147fcfd4a78560f41
|
Python
|
aokirae/mobaPquestionnaire
|
/src/checkNotation.py
|
UTF-8
| 4,456
| 2.96875
| 3
|
[] |
no_license
|
# アイドルの表記ゆれをみる
import csv
import sys
import numpy as np
import pandas as pd
import mojimoji
def openIdolData():
df = pd.read_csv("../data/アイドルデータ.csv", header = 0, index_col=None)
df = df.dropna(how='all')
names = df['名前'].tolist()
return names
def openQuestionnaireIdolname():
df = pd.read_csv("../data/アンケートアイドル名一覧.csv", header = None, index_col=None)
names = []
for i in range(len(df.columns.tolist())):
names.extend(df.iloc[:,i])
names = [str(i) for i in names]
names = [[i] for i in names]
names.sort()
return names
# 完全一致しなかったものだけを返す
def notPerfectMatching(questionnaire, idolname):
not_perfect_match = []
for iq in range(len(questionnaire)):
try:
for jq in range(len(questionnaire[iq])):
idolname.index(questionnaire[iq][jq])
except Exception as e:
not_perfect_match.append(questionnaire[iq])
return not_perfect_match
# 'nan'を消しとばす
def eraseNan(names):
list = []
for i in range(len(names)):
if names[i][0] != 'nan':
list.append(names[i])
return list
# "さん","ちゃん","。"を消しとばす
def eraseHonorific(names):
for i in range(len(names)):
for j in range(len(names[i])):
if names[i][j].find('さん') != -1:
names[i][j] = names[i][j][0:names[i][j].find('さん')]
if names[i][j].find('ちゃん') != -1:
if names[i][j].find('ちゃんみお') == -1:
names[i][j] = names[i][j][0:names[i][j].find('ちゃん')]
if names[i][j].find('。') != -1:
names[i][j] = names[i][j][0:names[i][j].find('。')]
return names
# 性のみ、名のみのアイドルを姓名にする
def plusLastFirstName(names, idolname):
for i in range(len(names)):
for j in range(len(names[i])):
# 候補アイドル
candidate = []
for ii in idolname:
if ii.find(names[i][j]) != -1:
candidate.append(ii)
if len(candidate) == 1:
names[i][j] = candidate[0]
return names
# "性 名"となってるアイドルをどうにかする
# 1. 空白を区切り文字として文字列分解
# 2. リスト長が2のとき、性と名で部分一致検索。それぞれでアイドル名が一致したらそのアイドル
# 3. それ以外は元に戻す
def compareFirstLastName(names, idolname):
for i in range(len(names)):
for j in range(len(names[i])):
splits = names[i][j].split(' ')
splits = [x for x in splits if x != '']
if len(splits) == 1:
names[i][j] = splits[0]
if len(splits) != 2:
continue
candidate_firstname = []
candidate_lastname = []
for ii in idolname:
if ii.find(splits[0]) != -1:
candidate_firstname.append(ii)
if ii.find(splits[1]) != -1:
candidate_lastname.append(ii)
for icf in candidate_firstname:
for icl in candidate_lastname:
if icf == icl:
names[i][j] = icf
return names
# 複数連なってるアイドルをどうにかする
def splitIdol(names):
splits = []
for i in range(len(names)):
split_name = [names[i]]
if len(split_name) == 1:
# 半角空白
split_name = names[i][0].split(' ')
if len(split_name) == 1:
# 全角空白
split_name = names[i][0].split(' ')
if len(split_name) == 1:
split_name = names[i][0].split('、')
if len(split_name) == 1:
split_name = names[i][0].split('・')
if len(split_name) == 1:
split_name = names[i][0].split(',')
if len(split_name) == 1:
split_name = names[i][0].split('とか')
split_name = [i for i in split_name if i != '']
names[i] = split_name
return names
if __name__ == '__main__':
names = openQuestionnaireIdolname()
idolname = openIdolData()
# 半角を全角にする
idolname = [mojimoji.han_to_zen(i) for i in idolname]
names = notPerfectMatching(names, idolname)
names = eraseNan(names)
names = eraseHonorific(names)
names = notPerfectMatching(names, idolname)
names = plusLastFirstName(names, idolname)
names = notPerfectMatching(names, idolname)
names = compareFirstLastName(names, idolname)
names = notPerfectMatching(names, idolname)
names = splitIdol(names)
names = eraseHonorific(names)
names = plusLastFirstName(names, idolname)
names = compareFirstLastName(names, idolname)
names = notPerfectMatching(names, idolname)
with open('../data/表記ゆれ.csv', mode='w') as f:
for i in range(len(names)):
f.write(names[i][0])
for j in range(1,len(names[i])):
f.write('_')
f.write(names[i][j])
f.write('\n')
| true
|
2a265122875d3349595394bb04895151f66ce5f2
|
Python
|
Horta-IoT/horta-iot
|
/client-sub/sub.py
|
UTF-8
| 1,645
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# Source: https://pypi.python.org/pypi/paho-mqtt/1.3.0
import paho.mqtt.client as mqtt
import sys
HOSTNAME = "mosquitto"
# HOSTNAME = "10.73.21.215"
PORT = 1883
TOPIC = 'temp/random'
def log(topic, payload):
sys.stderr.write("[sub][{}] < {:.1f}\n".format(topic, payload))
sys.stderr.flush()
def info(message):
sys.stderr.write("[sub][info] {}\n".format(message))
sys.stderr.flush()
def error(message):
sys.stderr.write("[sub][err] {}\n".format(message))
sys.stderr.flush()
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
if rc != 0:
error("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# client.subscribe("$SYS/#")
client.subscribe(TOPIC)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
log(msg.topic, float(msg.payload))
def main():
client = mqtt.Client()
import logging
logging.basicConfig(level=logging.DEBUG)
client.enable_logger()
client.on_connect = on_connect
client.on_message = on_message
client.connect(HOSTNAME, PORT, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
try:
client.loop_forever()
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
| true
|
f239fdff7c8a9c2509c20b14c44b164d3f7ce474
|
Python
|
Mauricio1xtra/Revisar_Python_JCAVI
|
/Proj_class7/modulo_prog.py
|
UTF-8
| 344
| 3.6875
| 4
|
[] |
no_license
|
##Funções no Modulo
def soma(x,y):
return x + y
def calculaMaximo(x,y,z):
lista = [x,y,z]
print(max(lista))
##Tudo daqui para baixo será executado apenas em modo iterativo
#Ou quando executado o script do módulo diretamente
if __name__ == "__main__":
print(soma(2,3))
print("Executei Sozinho")
nome = "Mauricio"
| true
|
8c61c022e973ec5715367411b0de962e3db2cb65
|
Python
|
wanghaibo1996/PytestAuto_Security_WZ_Test
|
/util/clipboard.py
|
UTF-8
| 591
| 2.71875
| 3
|
[] |
no_license
|
import win32con
import win32clipboard as WC
class ClipBoard(object):
'''设置剪切板内容和获取剪切板内容'''
@staticmethod
def getText():
'''获取剪切板内容'''
WC.OpenClipboard()
value = WC.GetClipboardData(win32con.CF_TEXT)
WC.CloseClipboard()
return value
@staticmethod
def setText(value):
'''设置剪切板内容'''
WC.OpenClipboard()
WC.EmptyClipboard()
WC.SetClipboardData(win32con.CF_UNICODETEXT, value)
WC.CloseClipboard()
if __name__ == '__main__':
pass
| true
|
ca7bf68c607564bb40286df2d82079d14df511cb
|
Python
|
jzengler/IN1000
|
/Innlevering 4/egen_oppgave.py
|
UTF-8
| 2,423
| 4.53125
| 5
|
[] |
no_license
|
'''
Lag et program som lar brukeren administrere biler og registreringsnummer.
Brukeren skal kunne legge til, slette eller skrive ut en oversikt.
Bruk løkker og lister for å løse oppgaven
'''
# Liste til å holde registreringsnummer, bilmerke, modell og farge
# kan skrives/fra fil hvis det skal lagres mellom ekskeveringer av programmet
regNummer = []
# funksjon for å legge til bil i listen
def leggTilBil():
# lokal variabel for å holde brukerinput
bil = []
# legg til RegNr, merke, modell og farge
bil.append( input("Skriv inn registreringsnummer: ").upper() )
bil.append( input("Skriv inn bilmerke: ").title() )
bil.append( input("Skriv inn bilmodell: ").upper() )
bil.append( input("Skriv inn farge: ").lower() )
# returner hele listen
return bil
# funskjon for å slette bil fra liste
def slettBil(regNummer):
# be bruker om registreringsnummer på bilen hen ønsker å slette
slettReg = input("Skriv inn registreringsnummer på bilen du vil slette: ").upper()
# løkke for for å sjekke om regnummer finnes i de nøstede listene
for bil in regNummer:
# hvis regnr finnes slett listen for den bilen
if slettReg in bil:
regNummer.remove(bil)
print(bil[1], "med registreringsnummer", slettReg, "ble slettet!")
# returner den oppdaterte listen
return regNummer
# prosedyre for å skrive ut en oversikt over biler
def printOversikt(regNummer):
# løkke som skriver ut hver bil med kjennetegn
for bil in regNummer:
print("RegNr:", bil[0], "er en", bil[3], bil[1], bil[2])
# prosedyre for å printe meny og kalle andre funksjoner
def meny(regNummer):
# skriv ut meny til bruker
print("\n1: Legg til bil")
print("2: Slett bil")
print("3: Skriv ut oversikt")
print("9: Avslutt program\n")
# lagre brukerens menyvalg
valg = int(input("Skriv inn menyvalg: \n"))
# kall funksjon iht brukerens valg
# lagrer resultatet av funksjonen i samme liste som funksjonens parameter
if valg == 1:
regNummer.append( leggTilBil() )
elif valg == 2:
regNummer = slettBil(regNummer)
elif valg == 3:
printOversikt(regNummer)
elif valg == 9:
exit()
else:
print("Ugyldig menyvalg!")
# løkke for å kalle meny-prosedyren frem til brukeren avslutter programmet gjennom menyvalg 9
while True:
meny(regNummer)
| true
|
3dbe7b36da796347f3e5f65a231c2f6918f212b0
|
Python
|
dmmfix/amath-352
|
/h6/tabular.py
|
UTF-8
| 678
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/local/bin/python3
import math
vals = [2, 4, 8, 16, 64, 512]
fns = [
[lambda x: x, 'n'],
[lambda x: x * math.log2(x), 'n\log_2(n)'],
[lambda x: x ** 2, 'n^2'],
[lambda x: x ** 3, 'n^3'],
[lambda x: 2 ** x, '2^n'],
[lambda x: math.factorial(x), 'x!']
]
for f in fns:
print('\\hline')
print('${}$'.format(f[1]), end='')
for v in vals:
n = f[0](v)
if n < 100000:
print(' & {}'.format(math.floor(n)), end='')
else:
log10n = math.floor(math.log10(n))
strn = str(n)
print(' & ${}.{} \\times 10^{{{}}}$'.format(strn[0], strn[1:4], log10n), end='')
print(' \\\\')
| true
|
e674af34d0abdba51912907fdef58bf30e224447
|
Python
|
oucsealee/scipysample
|
/test/testPandasReadExcel.py
|
UTF-8
| 273
| 2.5625
| 3
|
[] |
no_license
|
#-*- coding:utf-8 -*-
import pandas as pd
import pandasReadExcel
def main():
data_frame = pandasReadExcel.read_from_excel()
print(data_frame)
last_data = pandasReadExcel.get_last_red_data(data_frame)
print (last_data)
if __name__ == '__main__':
main()
| true
|
b1c41114752aa0aa23e004031fcefafd4f91d83a
|
Python
|
plee0117/LC
|
/238.py
|
UTF-8
| 368
| 2.75
| 3
|
[] |
no_license
|
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
left = [1]
right = [1]
for i in range(len(nums) - 1):
left.append(left[i] * nums[i])
right.append(right[i] * nums[-1 - i])
out = []
for j in range(len(nums)):
out.append(left[j] * right[-1 - j])
return out
| true
|
2a8028e176e1013a49931aebef967331e6b31afd
|
Python
|
wangpanqiao/Circos_python_config
|
/Pylomaker3.py
|
UTF-8
| 3,527
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
from optparse import OptionParser
import sys
import subprocess
from Bio.Blast.Applications import NcbiblastpCommandline
from Bio import SeqIO
import os
from Bio.Align.Applications import ClustalwCommandline
def dowhat():
print "Gives you an iterative mutations made from germline starting with most conserved.\n"
def call_clustal(string):
cline = ClustalwCommandline("clustalw2", infile=string)
process = subprocess.Popen(str(cline), shell=(sys.platform!="win32"), stdout=subprocess.PIPE)
return process.communicate()[0]
def get_scores(logfile):
for line in logfile.split("\n"):
if "Score:" in line:
return int(line.split(":")[-1])
def main():
####Parser options####
parser = OptionParser()
parser.add_option("-i", "--input", action="store", dest="input", help="input file to make phylotree")
parser.add_option("-g", "--germline", action="store", dest="germline", help="germline fasta")
parser.add_option("-o", "--output" , action="store", dest="output", help="the file where you want all your data")
(options,args) = parser.parse_args()
if len(sys.argv) < 2:
dowhat()
parser.print_help()
exit()
####Begin Declarations###
#start an output file
fileout = open(options.output, 'w').write("Your Sequence Results:\n\n")
#parse germline sequence
germobject = SeqIO.read(options.germline, "fasta")
#parsexs target sequences
SO = list(SeqIO.parse(options.input, "fasta"))
#while there are sequences in the file
# while SO:
#clustal score holder
#highestscore = 0
#find best score method with current germline
for sequence in SO:
#open and write
clustalhandle = open("clustalentry.fasta" , 'w')
SeqIO.write(germobject, clustalhandle, "fasta")
SeqIO.write(sequence, clustalhandle, "fasta")
clustalhandle.close
#close and reopen for reading by clustal
clustalhandle = open("clustalentry.fasta" , 'r')
#call clust all and return the log (the log contains the score)
current_log = call_clustal(clustalhandle.name)
clustalhandle.close
#get score from clustal log
current_score = get_scores(current_log)
#reopen the clustal handle for one last iteration
clustalhandle = open("clustalentry.fasta" , 'w')
#add current germobject and highest scoring
SeqIO.write(germobject, clustalhandle, "fasta")
SeqIO.write(sequence, clustalhandle, "fasta")
clustalhandle.close
#open for reading by clustal
clustalhandle = open("clustalentry.fasta")
current_log = call_clustal(clustalhandle.name)
current_score = get_scores(current_log)
#append to output
for line in open("clustalentry.aln"):
open(options.output, 'a').write(line)
open(options.output, 'a').write("With Score of : " + str(current_score) + "\n")
#remove best scorer
# SO.remove(sequence_in_question)
#set germobject to best scorer
# germobject = sequence_in_question
def cleanup():
os.remove("clustalentry.aln")
os.remove("clustalentry.fasta")
os.remove("clustalentry.dnd")
if __name__ == "__main__":
main()
cleanup()
| true
|
c24ca456ac22dd9cedb67c1336e1e1ca0930be54
|
Python
|
DaiJitao/algorithm
|
/solution/binary.py
|
UTF-8
| 906
| 3.84375
| 4
|
[] |
no_license
|
def binaryVal(arr, e):
minIndex = 0
maxIndex = len(arr) - 1
while minIndex <= maxIndex:
midd = minIndex + ((maxIndex - minIndex) >> 1)
if e < arr[midd]:
maxIndex = midd - 1
elif e > arr[midd]:
minIndex = midd + 1
elif midd < len(arr) and arr[midd] == e:
return midd
return -1
def binFind(arr, p):
if len(arr) == 0:
return -1
if len(arr) == 1:
return 0 if arr[0] == p else -1
left = 0
right = len(arr) - 1
while left <= right:
midd = left + ((right-left) >> 1)
if p < arr[midd]:
right = midd - 1
elif p > arr[midd]:
left = midd + 1
else:
return midd
return -1
if __name__ == '__main__':
arr = [1, 4, 5, 6, 7, 9]
res = binaryVal(arr, 6)
res = binFind(arr, p=6)
print(res)
| true
|
465fbdc4d672c7a9268e221698cf591d12c4f6c6
|
Python
|
lyleaf/sound-fun
|
/tempo_sync_animation_demo.py
|
UTF-8
| 2,400
| 2.671875
| 3
|
[] |
no_license
|
import cv2
import librosa
import librosa.display
import wave
import pyaudio
from cv2 import VideoWriter, VideoWriter_fourcc
VIDEO_PATH = 'Demo_Full_1.mp4'
video = cv2.VideoCapture(VIDEO_PATH)
# Read a video to get FPS
fps = video.get(cv2.CAP_PROP_FPS)
print('Image frames per second is %d' % fps)
video.release()
# Create audio mp3 from mp4
# ffmpeg -i holdon.mp4 holdon.mp3 not the same length
# ffmpeg -i holdon.mp4 -async 1 holdon.wav
AUDIO_PATH = 'Demo_Full_1.wav' # difference between wav and mp3
seconds = 5
y, sr = librosa.load(AUDIO_PATH)
print(int(len(y)))
print('Sample rate %d' % sr)
time = int(len(y)/sr)
print('Song time %d' % time)
interval = []
for i in range(time):
sample_frames = sr * seconds
sample = y[i*sr:i*sr+sample_frames]
tempo, beat_frames = librosa.beat.beat_track(y=sample, sr=sr) #y should be audio time series np.ndarray [shape=(n,)] or None
#print('Estimated tempo: {:.2f} beats per minute'.format(tempo))
interval.append(60/tempo)
print(interval)
downtempo = [] #in seconds
downtempo.append(5)
while(downtempo[-1] <= time):
downtempo.append(interval[int(downtempo[-1])-5]+downtempo[-1])
print(downtempo)
uptempo = []
for i in range(len(downtempo)-1):
uptempo.append((downtempo[i]+downtempo[i+1])/2)
tempo = []
for i in range(len(uptempo)):
tempo.append(downtempo[i])
tempo.append(uptempo[i])
tempo_frames = np.multiply(tempo,fps).astype(int)
print(tempo_frames)
# Create output video with animation
width = 1280
height = 720
FPS = fps
radius = 150
paint_h = int(height/2)
paint_x = int(width/2)
fourcc = VideoWriter_fourcc(*'MP42')
video = VideoWriter('./Demo_Full_1_noises.avi', fourcc, float(FPS), (width, height))
index = 0
black = False
for timestamp in range(int(FPS)*time):
frame = np.random.randint(0, 256,
(height, width, 3),
dtype=np.uint8)
if ((index+2 < len(tempo_frames)-1) and (timestamp >= tempo_frames[index]) and (timestamp < tempo_frames[index+2])):
print(timestamp)
if (timestamp < tempo_frames[index+1]):
cv2.circle(frame, (paint_x, paint_h), radius, (0, 0, 0), -1)
print('add frame')
elif ((index+2 < len(tempo_frames)-1) and (timestamp >= tempo_frames[index+2])):
index = index+2
video.write(frame)
video.release()
# ffmpeg -i Demo_Full_1_noises.avi NOISES.mp4
| true
|
b6a13fc6df55777f31afe900bb6ec6d38c517d59
|
Python
|
bernease/Mmani
|
/benchmarks/bench_laplacian_dense_temp.py
|
UTF-8
| 5,374
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
"""
Benchmarks of geometry functions (distance_matrix, affinity_matrix,
graph_laplacian) in sparse vs dense representation.
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
TODO: generate the data -- what should it be
with precomputing of the index?
(todo next: benchmarks for rmetric, for spectral embedding, for other methods like isomap)
"""
import gc #the garbage collector
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_swiss_roll
def compute_bench(n_samples, n_features, rad0, dim, quiet = False):
dense_d_results = []
dense_a_results = []
dense_l_results = []
it = 0
for ns in n_samples:
# make a dataset
X, t = make_swiss_roll( ns, noise = 0.0 )
X = np.asarray( X, order="C" )
for nf in n_features:
it += 1
rad = rad0/ns**(1./(dim+6)) #check the scaling
if not quiet:
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
print( 'rad=', rad, 'ns=', ns )
if nf < 3:
raise ValueError('n_features must be at least 3 for swiss roll')
else:
# add noise dimensions up to n_features
n_noisef = nf - 3
noise_rad_frac = 0.1
noiserad = rad/np.sqrt(n_noisef) * noise_rad_frac
Xnoise = np.random.random((ns, n_noisef)) * noiserad
X = np.hstack((X, Xnoise))
rad = rad*(1+noise_rad_frac) # add a fraction for noisy dimensions
gc.collect()
if not quiet:
print("- benchmarking dense")
tstart = time()
dists = distance_matrix(X, flindex = None, mode='radius_neighbors',
neighbors_radius=rad*1.5 )
dense_d_results.append(time() - tstart)
A = affinity_matrix( dists, rad )
dense_a_results.append(time() - tstart)
lap = graph_laplacian(A, normed='geometric', symmetrize=False, scaling_epps=rad, return_lapsym=False)
dense_l_results.append(time() - tstart)
gc.collect()
return dense_d_results, dense_a_results, dense_l_results
if __name__ == '__main__':
import sys
import os
path = os.path.abspath('..')
sys.path.append(path)
# path = os.path.abspath('../..')
# sys.path.append(path)
from Mmani.embedding.geometry import *
import pylab as pl
import scipy.io
is_save = True
if sys.argv.__len__() > 1:
is_save = bool(sys.argv[1])
rad0 = 2.5
dim = 2
n_features = 100
# list_n_samples = np.linspace(500, 1000, 2).astype(np.int)
list_n_samples = np.logspace(4, 6, 7).astype(np.int)
dense_d_results, dense_a_results, dense_l_results = compute_bench(list_n_samples,
[n_features], rad0, dim, quiet=False)
save_dict = { 'ns_dense_d_results':dense_d_results, 'ns_dense_a_results':dense_a_results, 'ns_dense_l_results':dense_l_results }
if is_save:
scipy.io.savemat( 'results_bench_laplacian_dense.mat', save_dict )
pl.figure('Mmani.embedding benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, dense_d_results, 'b-',
label=' distance matrix')
pl.plot(list_n_samples, dense_a_results, 'b:',
label=' affinity matrix')
pl.plot(list_n_samples, dense_l_results, 'b-.',
label=' laplacian')
pl.title(' %d features' % (n_features))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.yscale( 'log' )
pl.xscale( 'log' )
n_samples = 2000
# list_n_features = np.linspace(50, 1000, 2).astype(np.int)
list_n_features = np.logspace(1,4,10).astype(np.int)
dense_d_results, dense_a_results, dense_l_results = compute_bench([n_samples], list_n_features, rad0, dim, quiet=False)
nf_dict = { 'nf_dense_d_results':dense_d_results, 'nf_dense_a_results':dense_a_results, 'nf_dense_l_results':dense_l_results }
save_dict.update( nf_dict )
if is_save:
scipy.io.savemat( 'results_bench_laplacian_dense.mat', save_dict )
pl.subplot(212)
pl.plot(list_n_features, dense_d_results, 'b-',
label='distance matrix')
pl.plot(list_n_features, dense_a_results, 'b:',
label='affinity matrix')
pl.plot(list_n_features, dense_l_results, 'b-.',
label='laplacian')
pl.title('data index built every step, %d samples' % (n_samples))
pl.legend(loc='upper left')
pl.xlabel('number of featureses')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.yscale( 'log' )
pl.xscale( 'log' )
if is_save:
pl.savefig('results_bench_laplacian_dense'+'.png', format='png')
else:
pl.show()
| true
|
341b95915298fe3c1249fd55c8288e35ca7e2b51
|
Python
|
tabatafeeh/URI
|
/python/2867.py
|
UTF-8
| 191
| 3.171875
| 3
|
[] |
no_license
|
v = int(input())
i = 0
while i < v:
n, m = input().split(" ")
n = int(n)
m = int(m)
tam = n ** m
tam = str(tam)
tam = list(tam)
print(len(tam))
i = i + 1
| true
|
f38f34b2a53658dad12051d3e347c090cfe9dc25
|
Python
|
TakahiroSono/atcoder
|
/practice/python/dpcontest/D.py
|
UTF-8
| 918
| 2.703125
| 3
|
[] |
no_license
|
# N, W = map(int, input().split())
# W_V = [list(map(int, input().split())) for _ in range(N)]
# dp = [[0] * (W+1) for _ in range(N+1)]
# for n in range(1, N+1):
# for w in range(W+1):
# dp[n][w] = dp[n-1][w]
# if(w - W_V[n-1][0] >= 0):
# dp[n][w] = max(dp[n][w], dp[n-1][w - W_V[n-1][0]] + W_V[n-1][1])
# print(dp[-1][-1])
# import numpy as np
# N, W = map(int, input().split())
# items = [tuple(map(int, input().split())) for _ in range(N)]
# dp = np.zeros(shape=W + 1, dtype=np.int64)
# for weight, value in items:
# dp[weight:] = np.maximum(dp[weight:], dp[:-weight] + value)
# print(dp[-1])
# print(dp)
N, W = map(int, input().split())
dp = [[0] * (W+1) for _ in range(N+1)]
for i in range(1, N+1):
w, v = map(int, input().split())
for j in range(W+1):
dp[i][j] = dp[i-1][j]
if w <= j:
dp[i][j] = max(v + dp[i-1][j-w], dp[i][j])
print(dp[-1][-1])
| true
|
567caf26aa33e46aaa95d831ff4b5ea1f4b126eb
|
Python
|
rado9818/TV-programs
|
/TimeUtil.py
|
UTF-8
| 428
| 3.40625
| 3
|
[] |
no_license
|
from datetime import datetime
from Constants import SECONDS_IN_MINUTE
import time
def getDifference(start, end):
a = start
b = end
time1 = datetime.strptime(a,"%H:%M") # convert string to time
time2 = datetime.strptime(b,"%H:%M")
diff = time1 -time2
return diff.total_seconds()/SECONDS_IN_MINUTE
def getCurrentTime():
hour, minute = time.strftime("%H,%M").split(',')
return "%s:%s"%(hour,minute)
| true
|
48052e8ced5575fa301fe2e428440549fe744427
|
Python
|
Bulgakoff/hws_PyQt
|
/lesson_6/codes_6/03_pycrypto_encrypt.py
|
UTF-8
| 3,599
| 3.234375
| 3
|
[] |
no_license
|
# ========================= Аспекты безопасности ==============================
# ------------- Модуль PyCrypto для криптографических функций в Питоне --------
# ------------------------- Шифрование сообщений ------------------------------
# Библиотека PyCrypto реализует криптографические примитивы и функции на Питоне.
# Однако данная библиотека не обновляется с 2014 года.
# PyCryptodome (PyCryptoDomeEx) - это fork библиотеки PyCrypto, развивается.
# Код проекта: https://github.com/Legrandin/pycryptodome
# Установка: pip install pycryptodome
# PyCryptodome совместима по API с PyCrypto,
# PyCryptoDomeEx - дополняет/изменяет исходный API.
from binascii import hexlify
from Cryptodome.Cipher import AES
from icecream import ic
# Для шифрования данных в PyCryptodome есть поддержка нескольких алгоритмов:
# - блочные шифры: AES, DES, 3DES, Blowfish
# - поточные шифры: Salsa20, ChaCha20
plaintext = b"The rain in Spain"
def padding_text(text):
""" Выравнивание сообщения до длины кратной 16 байтам.
В данном случае исходное сообщение дополняется пробелами.
"""
pad_len = (16 - len(text) % 16) % 16
return text + b" " * pad_len
def _encrypt(plaintext, key):
""" Шифрование сообщения plaintext ключом key.
Атрибут iv - вектор инициализации для алгоритма шифрования.
Если не задаётся явно при создании объекта-шифра, то генерируется случайно.
Его следует добавить в качестве префикса к финальному шифру,
чтобы была возможность правильно расшифровать сообщение.
"""
cipher = AES.new(key, AES.MODE_CBC)
ciphertext = cipher.iv + cipher.encrypt(plaintext)
return ciphertext
def _decrypt(ciphertext, key):
""" Расшифровка шифра ciphertext ключом key
Вектор инициализации берётся из исходного шифра.
Его длина для большинства режимов шифрования всегда 16 байт.
Расшифровываться будет оставшаяся часть шифра.
"""
cipher = AES.new(key, AES.MODE_CBC, iv=ciphertext[:16])
msg = cipher.decrypt(ciphertext[16:])
return msg
# Осуществим шифрование сообщения алгоритмом AES
# key (строка байтов) - секретный ключ для симметричного шифрования.
# Ключ должен быть длиной 16 (AES-128), 24 (AES-192) или 32 (AES-256) байта.
key = b"Super Secret Key"
# Длина сообщения должна быть кратна 16, поэтому выполним выравнивание.
ic(plaintext)
plaintext = padding_text(plaintext)
ic(plaintext)
# Выполним шифрование
encryped_data = _encrypt(plaintext, key)
ic(encryped_data)
# Выполним расшифрование
msg = _decrypt(encryped_data, key)
ic(msg)
| true
|
b4382c17d4b50a12515d327dbc3c93bfc6c99e4a
|
Python
|
MacJim/CMPT-762-Assignment-3-Code
|
/helper/segmentation_path.py
|
UTF-8
| 1,105
| 3.140625
| 3
|
[] |
no_license
|
"""
Segmentation path helpers.
"""
import typing
import copy
def fit_segmentation_path_in_crop_box(path: typing.List[int], crop_box_x0, crop_box_y0, crop_box_w, crop_box_h, set_outside_values_to_boundary_value=True):
return_value = copy.copy(path)
crop_box_x1 = crop_box_x0 + crop_box_w
crop_box_y1 = crop_box_y0 + crop_box_h
def get_cropped_x_value(x: int) -> int:
if (set_outside_values_to_boundary_value):
if (x < crop_box_x0):
return 0
elif (x > crop_box_x1):
return crop_box_x1
return (x - crop_box_x0)
def get_cropped_y_value(y: int) -> int:
if (set_outside_values_to_boundary_value):
if (y < crop_box_y0):
return 0
elif (y > crop_box_y1):
return crop_box_y1
return (y - crop_box_y0)
for i in range(0, len(return_value), 2):
return_value[i] = get_cropped_x_value(return_value[i])
for j in range(1, len(return_value), 2):
return_value[j] = get_cropped_y_value(return_value[j])
return return_value
| true
|
194d7ef18596e430a96576c5748de088c081a6f6
|
Python
|
tyler-fishbone/data-structures-and-algorithms
|
/challenges/print-level-order/print_level_order.py
|
UTF-8
| 870
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
from queue import Queue
from k_tree import Node, KTree
def print_level_order(input_tree):
"""
takes in a k-ary tree and returns a string that contains
a listing of all values in the tree, with new lines in-between
each level of the tree
"""
output_lst = []
queue = Queue()
if input_tree._size > 0:
queue.enqueue(input_tree.root)
queue.enqueue(Node('/n'))
while len(queue) > 0:
current = queue.dequeue()
if current.val == '/n':
queue.enqueue(Node('/n'))
else:
for child in current.children:
queue.enqueue(child)
output_lst.append(current.val)
if queue._size <= 1:
break
print(output_lst)
output_string = ''.join(str(x) for x in output_lst)
return output_string
| true
|
e2a15e87b18c46d8f2168560a59a5fda68aaf2ac
|
Python
|
cstapler/pythice
|
/big_sorting.py
|
UTF-8
| 550
| 4.09375
| 4
|
[] |
no_license
|
def main():
"""Implement Bucktsort on an array with really big numbers
Puts numbers into buckets based on their length
"""
number_input = int(input())
buckets = {}
for _ in range(number_input):
item = input().strip()
item_len = len(item)
if item_len not in buckets:
buckets[item_len] = []
buckets[item_len].append(item)
for bucket_key in sorted(buckets):
for number in sorted(buckets[bucket_key]):
print(number)
if __name__ == "__main__":
main()
| true
|
4fd78761d52660c43bcd785bc048f3f6f946f563
|
Python
|
frc3512/Robot-2020
|
/tools/flywheel_ols.py
|
UTF-8
| 2,833
| 3.046875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env python3
"""Runs OLS on flywheel velocity recordings generated from step voltage inputs.
"""
import math
import matplotlib.pyplot as plt
import numpy as np
filename = "Flywheel characterization.csv"
# Get labels from first row of file
with open(filename) as f:
labels = [x.strip('"') for x in f.readline().rstrip().split(",")]
# Retrieve data from remaining rows of file
print(f"Plotting {filename}")
data = np.genfromtxt(filename, delimiter=",", skip_header=1, skip_footer=1)
ts = data[:, 0]
us = data[:, 1]
xs = np.zeros((len(ts), 1))
for i in range(len(ts)):
xs[i, 0] = data[i, 2]
y = np.zeros((len(ts) - 1, 1))
for i in range(y.shape[0]):
y[i, 0] = xs[i + 1, 0]
X = np.zeros((len(ts) - 1, 3))
for i in range(X.shape[0]):
X[i, 0] = xs[i, 0]
X[i, 1] = us[i]
X[i, 2] = np.sign(xs[i, 0])
# Calculate b = β that minimizes u'u.
b = np.linalg.solve(X.T @ X, X.T) @ y
alpha = b[0, 0]
beta = b[1, 0]
gamma = b[2, 0]
n = X.shape[0]
sse = np.linalg.norm(y - X @ b, ord=2)
ssto = ((y.T @ y) - (1 / n) * (y.T @ y))[0, 0]
r2 = (ssto - sse) / ssto
adjR2 = 1 - (1 - r2) * ((n - 1.0) / (n - 3))
print(f"R^2={adjR2}")
dts = []
for i in range(1, len(ts)):
dt = ts[i] - ts[i - 1]
if dt > 0.0 and dt < 1.0:
dts.append(dt)
T = sum(dts) / len(dts)
print(f"T = {T}")
Ks = -gamma / beta
Kv = (1.0 - alpha) / beta
Ka = (alpha - 1.0) * T / (beta * math.log(alpha))
print(f"Ks={Ks}")
print(f"Kv={Kv}")
print(f"Ka={Ka}")
# First label is x axis label (time). The remainder are dataset names.
plt.figure()
plt.title("Flywheel inputs")
plt.xlabel("Time (s)")
plt.ylabel("Inputs")
plt.plot(ts, us, label="Input voltage (V)")
plt.legend()
plt.figure()
plt.title("Flywheel outputs")
plt.xlabel("Time (s)")
plt.ylabel("Measurements")
plt.plot(ts, xs, label="Angular velocity measurements (rad/s)")
plt.legend()
plt.figure()
plt.title("Flywheel simulation")
plt.xlabel("Time (s)")
plt.plot(ts, xs, label="Angular velocity measurements (rad/s)")
A = -Kv / Ka
B = 1.0 / Ka
c = -Ks / Ka
pxs = []
pys = []
# Get data list and initialize model state
pxs.append(ts[0])
pys.append(xs[0, 0])
x = xs[0, 0]
t = ts[0]
for j in range(1, xs.shape[0]):
dt = ts[j] - ts[j - 1]
t += dt
# If there's a large gap or the time went backwards, it's a new
# section of data, so reset the model state
if dt < 0.0 or dt > 1.0:
plt.plot(pxs, pys, label="Angular velocity (rad/s)")
pxs = []
pys = []
x = xs[j]
continue
# Given dx/dt = Ax + Bu + c,
# x_k+1 = e^(AT) x_k + A^-1 (e^(AT) - 1) (Bu + c)
Ad = math.exp(A * dt)
Bd = 1.0 / A * (Ad - 1) * B
x = Ad * x + Bd * us[j - 1] + 1.0 / A * (Ad - 1) * c * np.sign(x)
pxs.append(t)
pys.append(x)
plt.plot(pxs, pys, label="Angular velocity estimate (rad/s)")
plt.legend()
plt.show()
| true
|
648a1a412b0277b107aa54da3a8aaeafcefde22a
|
Python
|
Cafolkes/keedmd
|
/core/controllers/fb_lin_controller.py
|
UTF-8
| 1,120
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
from numpy.linalg import solve
from .controller import Controller
class FBLinController(Controller):
"""Class for linearizing feedback policies."""
def __init__(self, fb_lin_dynamics, linear_controller):
"""Create an FBLinController object.
Policy is u = (act)^-1 * (-drift + aux), where drift and act are
components of drift vector and actuation matrix corresponding to
highest-order derivatives of each output coordinate and aux is an
auxilliary linear controller.
Inputs:
Feedback linearizable dynamics, fb_lin_dynamics: FBLinDynamics
Auxilliary linear controller, linear_controller: LinearController
"""
Controller.__init__(self, fb_lin_dynamics)
self.linear_controller = linear_controller
self.select = fb_lin_dynamics.select
self.permute = fb_lin_dynamics.permute
def eval(self, x, t):
drift = self.select(self.permute(self.dynamics.drift(x, t)))
act = self.select(self.permute(self.dynamics.act(x, t)))
return solve(act, -drift + self.linear_controller.eval(x, t))
| true
|
9003a3a3d76fdc44670c146dd707efc61a96621e
|
Python
|
Markus28/multiresolution_algorithms_code
|
/fixed_point_algorithm/visualize.py
|
UTF-8
| 4,443
| 2.859375
| 3
|
[] |
no_license
|
import numpy as np
from scipy.ndimage.filters import laplace
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.patches as patches
import os
def read_grid(file_name):
with open(file_name) as f:
N, M = f.readline().split(",")
data = np.zeros((int(N), int(M)))
for line in f:
i, j, num = line.split(",")
data[int(i), int(j)] = float(num)
return data
def transform_for_imshow(A):
return A.T[::-1]
def visualize_forward():
location = "data/forward"
grid_files = []
for file_name in os.listdir(location):
if os.path.splitext(file_name)[1] == ".txt":
grid_files.append(os.path.join(location, file_name))
grid_files = sorted(grid_files, key = lambda f: int(f.split('_')[1].split('.')[0]))
print(f"Found {len(grid_files)} files...")
print("Plotting...")
grids = [transform_for_imshow(read_grid(file_name)) for file_name in grid_files]
grids = [grid-grids[0] for grid in grids]
mini = min([np.min(grid) for grid in grids])
maxi = max([np.max(grid) for grid in grids])
fig,ax = plt.subplots(1)
rect = patches.Rectangle(((3*grids[0].shape[0])/8, (3*grids[0].shape[1])/8), grids[0].shape[0]/4, grids[0].shape[1]/4, linewidth=1,edgecolor='r',facecolor='none')
dot = patches.Circle(((1+np.cos(2.3))*grids[0].shape[0]/2, (1-np.sin(2.3))*grids[0].shape[0]/2), radius = grids[0].shape[0]/100, facecolor='r')
ims = []
h_r = 2.0/(len(grids) - 1)
for j, grid in enumerate(grids):
circle = patches.Circle(((1+np.cos(2.3))*grids[0].shape[0]/2, (1-np.sin(2.3))*grids[0].shape[0]/2), radius = j*h_r*(grids[0].shape[0])/2, linewidth=1, edgecolor='b',facecolor='none')
ims.append([ax.imshow(grid, vmin = mini, vmax = maxi, animated = True, cmap="coolwarm"), ax.add_patch(rect), ax.add_patch(circle), ax.add_patch(dot)])
ax.axis("off")
ani = animation.ArtistAnimation(fig, ims, interval=5000/len(grids), blit=True,
repeat_delay=0)
ani.save('visualization.mp4')
plt.title(r'$\phi - \phi_{*}$')
plt.show()
#We want to generate a static plot:
begin_frame = 67
frame_skip = 22
num_frames = 4
fig, ax = plt.subplots(ncols = num_frames)
frames = [grids[i*frame_skip + begin_frame] for i in range(num_frames)]
mini_stat = min([np.min(grid) for grid in frames])
maxi_stat = max([np.max(grid) for grid in frames])
for i, frame in enumerate(frames):
rect = patches.Rectangle(((3*grids[0].shape[0])/8, (3*grids[0].shape[1])/8), grids[0].shape[0]/4, grids[0].shape[1]/4, linewidth=1,edgecolor='r',facecolor='none')
dot = patches.Circle(((1+np.cos(2.3))*grids[0].shape[0]/2, (1-np.sin(2.3))*grids[0].shape[0]/2), radius = grids[0].shape[0]/100, facecolor='r')
circle = patches.Circle(((1+np.cos(2.3))*grids[0].shape[0]/2, (1-np.sin(2.3))*grids[0].shape[0]/2),
radius = (i*frame_skip + begin_frame)*h_r*(grids[0].shape[0])/2, linewidth=1, edgecolor='b',facecolor='none')
ax[i].add_patch(circle)
ax[i].add_patch(rect)
ax[i].add_patch(dot)
ax[i].axis("off")
ax[i].imshow(frame, vmin = mini_stat, vmax = maxi_stat, cmap="coolwarm")
fig.tight_layout()
plt.savefig("forward_problems.pdf", dpi=1200, bbox_inches='tight')
plt.show()
def visualize_psi():
location = "data/psi.txt"
psi = transform_for_imshow(read_grid(location))
laplace_psi = np.zeros_like(psi)
laplace_psi[100:300, 100:300] = laplace(psi)[100:300, 100:300]
fig, ax = plt.subplots(ncols = 2)
ax[1].imshow(laplace_psi, cmap="coolwarm")
ax[0].imshow(psi, cmap="coolwarm")
ax[0].axis("off")
ax[1].axis("off")
ax[0].set_title("$\\psi$", verticalalignment='top', y=-0.15)
ax[1].set_title(f"$\\Delta \\psi$""", verticalalignment='top', y=-0.15)
fig.tight_layout()
plt.savefig("psi.pdf", dpi = 1200, bbox_inches='tight')
plt.show()
def visualize_reconstruction():
location = "data/absorption.txt"
reconstruction = transform_for_imshow(read_grid(location))
plt.imshow(reconstruction, cmap="coolwarm")
plt.axis("off")
plt.savefig("reconstruction.pdf", dpi = 1200, bbox_inches='tight')
plt.show()
if __name__=="__main__":
visualize_reconstruction()
visualize_psi()
visualize_forward()
| true
|
76d70b8cdd44bfceb98f62219d496c1db7d0afce
|
Python
|
priya192/guvi--priya.github.io
|
/ans9.py
|
UTF-8
| 152
| 3.515625
| 4
|
[] |
no_license
|
data = input("Please enter the input")
count = 0
for i in data:
if i in "0,1,2,3,4,5,6,7,8,9":
count = count + 1
print("Count is:", count)
| true
|
9f4d75ab9d873c22e0b5575c3a44af6d61235fbe
|
Python
|
WinnieJiangHW/Carry-lookahead_RNN
|
/Benchmarks/char_cnn/char_cnn_test.py
|
UTF-8
| 7,243
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
import argparse
import torch.nn as nn
import torch.optim as optim
from utils import *
from model import CL_RNN
import time
import math
import warnings
warnings.filterwarnings("ignore") # Suppress the RunTimeWarning on unicode
parser = argparse.ArgumentParser(description='Sequence Modeling - Character Level Language Model')
parser.add_argument('--batch_size', type=int, default=32, metavar='N',
help='batch size (default: 32)')
parser.add_argument('--cuda', action='store_false',
help='use CUDA (default: True)')
parser.add_argument('--dropout', type=float, default=0.1,
help='dropout applied to layers (default: 0.1)')
parser.add_argument('--emb_dropout', type=float, default=0.1,
help='dropout applied to the embedded layer (0 = no dropout) (default: 0.1)')
parser.add_argument('--clip', type=float, default=0.15,
help='gradient clip, -1 means no clip (default: 0.15)')
parser.add_argument('--epochs', type=int, default=100,
help='upper epoch limit (default: 100)')
parser.add_argument('--ksize', type=int, default=3,
help='kernel size (default: 3)')
parser.add_argument('--levels', type=int, default=3,
help='# of levels (default: 3)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval (default: 100')
parser.add_argument('--lr', type=float, default=4,
help='initial learning rate (default: 4)')
parser.add_argument('--emsize', type=int, default=100,
help='dimension of character embeddings (default: 100)')
parser.add_argument('--optim', type=str, default='SGD',
help='optimizer to use (default: SGD)')
parser.add_argument('--nhid', type=int, default=450,
help='number of hidden units per layer (default: 450)')
parser.add_argument('--validseqlen', type=int, default=320,
help='valid sequence length (default: 320)')
parser.add_argument('--seq_len', type=int, default=400,
help='total sequence length, including effective history (default: 400)')
parser.add_argument('--seed', type=int, default=1111,
help='random seed (default: 1111)')
parser.add_argument('--dataset', type=str, default='ptb',
help='dataset to use (default: ptb)')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
print(args)
file, file_len, valfile, valfile_len, testfile, testfile_len, corpus = data_generator(args)
n_characters = len(corpus.dict)
train_data = batchify(char_tensor(corpus, file), args.batch_size, args)
val_data = batchify(char_tensor(corpus, valfile), 1, args)
test_data = batchify(char_tensor(corpus, testfile), 1, args)
print("Corpus size: ", n_characters)
num_chans = [args.nhid] * (args.levels - 1) + [args.emsize]
k_size = args.ksize
dropout = args.dropout
emb_dropout = args.emb_dropout
model = CL_RNN(args.emsize, n_characters, num_chans, kernel_size=k_size, dropout=dropout, emb_dropout=emb_dropout)
if args.cuda:
model.cuda()
criterion = nn.CrossEntropyLoss()
lr = args.lr
optimizer = getattr(optim, args.optim)(model.parameters(), lr=lr)
def evaluate(source):
model.eval()
total_loss = 0
count = 0
source_len = source.size(1)
with torch.no_grad():
for batch, i in enumerate(range(0, source_len - 1, args.validseqlen)):
if i + args.seq_len - args.validseqlen >= source_len:
continue
inp, target = get_batch(source, i, args)
output = model(inp)
eff_history = args.seq_len - args.validseqlen
final_output = output[:, eff_history:].contiguous().view(-1, n_characters)
final_target = target[:, eff_history:].contiguous().view(-1)
loss = criterion(final_output, final_target)
total_loss += loss.data * final_output.size(0)
count += final_output.size(0)
val_loss = total_loss.item() / count * 1.0
return val_loss
def train(epoch):
model.train()
total_loss = 0
start_time = time.time()
losses = []
source = train_data
source_len = source.size(1)
for batch_idx, i in enumerate(range(0, source_len - 1, args.validseqlen)):
if i + args.seq_len - args.validseqlen >= source_len:
continue
inp, target = get_batch(source, i, args)
optimizer.zero_grad()
output = model(inp)
eff_history = args.seq_len - args.validseqlen
final_output = output[:, eff_history:].contiguous().view(-1, n_characters)
final_target = target[:, eff_history:].contiguous().view(-1)
loss = criterion(final_output, final_target)
loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
total_loss += loss.item()
if batch_idx % args.log_interval == 0 and batch_idx > 0:
cur_loss = total_loss / args.log_interval
losses.append(cur_loss)
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.5f} | ms/batch {:5.2f} | '
'loss {:5.3f} | bpc {:5.3f}'.format(
epoch, batch_idx, int((source_len-0.5) / args.validseqlen), lr,
elapsed * 1000 / args.log_interval, cur_loss, cur_loss / math.log(2)))
total_loss = 0
start_time = time.time()
return sum(losses) * 1.0 / len(losses)
def main():
global lr
try:
print("Training for %d epochs..." % args.epochs)
all_losses = []
best_vloss = 1e7
for epoch in range(1, args.epochs + 1):
loss = train(epoch)
vloss = evaluate(val_data)
print('-' * 89)
print('| End of epoch {:3d} | valid loss {:5.3f} | valid bpc {:8.3f}'.format(
epoch, vloss, vloss / math.log(2)))
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of epoch {:3d} | test loss {:5.3f} | test bpc {:8.3f}'.format(
epoch, test_loss, test_loss / math.log(2)))
print('=' * 89)
if epoch > 5 and vloss > max(all_losses[-3:]):
lr = lr / 10.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
all_losses.append(vloss)
if vloss < best_vloss:
print("Saving...")
save(model)
best_vloss = vloss
except KeyboardInterrupt:
print('-' * 89)
print("Saving before quit...")
save(model)
# Run on test data.
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of training | test loss {:5.3f} | test bpc {:8.3f}'.format(
test_loss, test_loss / math.log(2)))
print('=' * 89)
# train_by_random_chunk()
if __name__ == "__main__":
main()
| true
|
fd684597bc545a23afc18b49d8760149a5ab8eb6
|
Python
|
giselabelen/redes2020
|
/Taller1-Wiretapping/fuenteInfoEntropia.py
|
UTF-8
| 902
| 2.890625
| 3
|
[] |
no_license
|
#!/usr/bin/python
from math import *
from scapy.all import *
frames = 0
S1 = {}
def mostrar_fuente(S):
global frames
frames += 1
N = sum(S.values())
simbolos = sorted(S.iteritems(), key=lambda x: -x[1])
entropia = 0
for d,k in simbolos:
prob = k/N # k/N es la prob de ese simbolo
info = - math.log(prob,2) # info de ese simbolo
entropia += prob*info # acumulo para la entropia
print "\n %s : %.5f --- %s %.5f" % (d,prob,"Info: ",info)
print "\n %s : %.5f" % ("Entropia: ",entropia)
print "\n%d frames" % (frames)
print "\n -----------"
def callback(pkt):
if pkt.haslayer(Ether):
dire = "BROADCAST" if pkt[Ether].dst=="ff:ff:ff:ff:ff:ff" else "UNICAST"
proto = pkt[Ether].type # El campo type del frame tiene el protocolo
s_i = (dire, proto) # Aca se define el simbolo de la fuente
if s_i not in S1: S1[s_i] = 0.0
S1[s_i] += 1.0
mostrar_fuente(S1)
sniff(prn=callback)
| true
|
a8d8482f1e811a8125bea971b1bb72eba82f983b
|
Python
|
magnuspedro/gateCrawler
|
/AmazonCrawler/spiders/scraper_americanas.py
|
UTF-8
| 1,380
| 2.65625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.selector import Selector
import hashlib
from ..items import AmericanasItem
class BotAmericanasSraper(scrapy.Spider):
name = "botamericanas"
allowed_domain = 'www.amazon.com.br'
start_urls = [
'https://www.americanas.com.br/busca/xiaomi',
]
def parse(self, response):
items = AmericanasItem()
products = response.css('.product-grid-item')
for product in products:
product_name = product.css('.gYIWNc::text').extract()
price = product.css('.dHyYVS::text').extract()
link = products.css(
'.Link-bwhjk3-2').xpath('@href').extract()
unique_code = hashlib.md5(product_name[0].encode())
items['code'] = unique_code.hexdigest()
items['product'] = self.clear_text(product_name[0]) if product else ''
items['price'] = price[1]
items['link'] = self.clear_text(link[0]) if link else ''
yield items
next_page = response.css('#content-middle li:nth-child(10) a').xpath('@href').extract_first()
if next_page is not None:
yield response.follow(next_page, callback=self.parse)
def clear_text(self, text):
return text.replace(' ', '').replace('\n', '').replace('R$','')
| true
|
0e60a917a1e70ce05902674b59dd2d8b479bcc21
|
Python
|
Renato0402/Uri-Online-Judge
|
/python/uri1001.py
|
UTF-8
| 92
| 3.34375
| 3
|
[] |
no_license
|
if __name__ == '__main__':
n1 = int(input())
n2 = int(input())
print(f'X = {n1+n2}')
| true
|
450e8f9cb380b1539efdee94b89c93ce60d7c85b
|
Python
|
kutaybuyukkorukcu/Codewars
|
/6 kyu/NthFibonacci.py
|
UTF-8
| 212
| 3.171875
| 3
|
[] |
no_license
|
# https://www.codewars.com/kata/522551eee9abb932420004a0
def nth_fib(n):
if n==1:
return 0
elif n==2:
return 1
else:
return nth_fib(n-1)+nth_fib(n-2)
# 🧙♂️👍
| true
|
0769366c4c520892c0f8ca5d54c14264eec70a2e
|
Python
|
Pradeepnataraj/pradeep
|
/alphabet.py
|
UTF-8
| 117
| 3.09375
| 3
|
[] |
no_license
|
cht = input()
if((cht>='a' and cht<= 'z') or (cht>='A' and cht<='Z')):
print( "Alphabet")
else:
print( "no")
| true
|
5c719bee992a818b2f531a743698cae1558f4c4e
|
Python
|
Sabrout/cantal_cryptocurrency
|
/src/structure/crypto.py
|
UTF-8
| 2,695
| 3.09375
| 3
|
[] |
no_license
|
from ecdsa import SigningKey
from ecdsa import VerifyingKey
from ecdsa import BadSignatureError
import os
import binascii
class Crypto():
"""
This class encapsulate the ecdsa library
"""
def __init__(self, path=os.getcwd()):
"""
We load in the constructor the private and the public key
"""
# We create the path for the private and the public key in the hard
# drive
private_path = os.path.normpath(os.path.join(path, "private.pem"))
public_path = os.path.normpath(os.path.join(path, "public.pem"))
# We get (or generate) the private key
if(os.path.exists(private_path)):
private_file = open(private_path, "rb")
self.private = SigningKey.from_pem(private_file.read())
private_file.close()
else:
self.private = SigningKey.generate()
private_file = open(private_path, "wb")
private_file.write(self.private.to_pem())
private_file.close()
# We get (or generate) the public key
if(os.path.exists(public_path)):
public_file = open(public_path, "rb")
self.public = VerifyingKey.from_pem(public_file.read())
public_file.close()
else:
self.public = self.private.get_verifying_key()
public_file = open(public_path, "wb")
public_file.write(self.public.to_pem())
public_file.close()
def get_public(self):
"""
We get the public key
"""
return binascii.hexlify(self.public.to_string()).decode("utf-8")
def sign(self, message):
"""
The function sign a message
"""
# We turn the message into bytes if the message is in string
if isinstance(message, str):
message = message.encode("utf-8")
return binascii.hexlify(self.private.sign(message)).decode("utf-8")
def verify(public, signature, message):
"""
The function verify if a signature correspond to a message
"""
# We turn the message into bytes if the message is in string
if isinstance(message, str):
message = message.encode("utf-8")
# We transform the signature in a signature with bytes
if isinstance(signature, str):
signature = binascii.unhexlify(signature)
# We create an object for the public key
public = binascii.unhexlify(public)
public = VerifyingKey.from_string(public)
# We verify the key
try:
public.verify(signature, message)
return True
except BadSignatureError:
return False
| true
|
bae59598b2f9982ac97067d65c55bf8bb5c71563
|
Python
|
kefirzhang/algorithms
|
/leetcode/python/easy/p1037_isBoomerang.py
|
UTF-8
| 528
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
class Solution:
def isBoomerang(self, points) -> bool:
if points[2][0] == points[1][0] and points[2][1] == points[1][1]:
return False
if points[1][0] == points[0][0] and points[1][1] == points[0][1]:
return False
if (points[2][0] - points[0][0]) * (points[1][1] - points[0][1]) == (points[1][0] - points[0][0]) * (
points[2][1] - points[0][1]):
return False
return True
slu = Solution()
print(slu.isBoomerang([[1, 1], [2, 3], [3, 2]]))
| true
|
5524ecd8c318551c6e1ee7a4202c995aae3b0d11
|
Python
|
JeffryHermanto/Python-Programming
|
/05 Data Structures/16_swapping_variables.py
|
UTF-8
| 103
| 3.3125
| 3
|
[] |
no_license
|
# Swapping Variables
x = 10
y = 11
# z = x
# x = y
# y = z
x, y = y, x
print("x", x)
print("y", y)
| true
|
c40629dcad9853bdedc406eb61bee257c91731b2
|
Python
|
DenkSchuldt/MasAllaDelEspacio
|
/ProyectoPython/src/Boton.py
|
UTF-8
| 641
| 3.25
| 3
|
[] |
no_license
|
import pygame
class Boton(pygame.sprite.Sprite):
def __init__(self,imagen_01,imagen_02,x=200,y=200):
self.imagen_normal = imagen_01
self.imagen_seleccion = imagen_02
self.imagen_actual = self.imagen_normal
self.rect = self.imagen_actual.get_rect()
self.rect.left,self.rect.top = (x,y)
def update(self,pantalla,cursor):
if cursor.colliderect(self.rect):
self.imagen_actual = self.imagen_seleccion
else:
self.imagen_actual = self.imagen_normal
pantalla.blit(self.imagen_actual,self.rect)
| true
|
8103c96ce81121092b81f81e0023bfb81f6c115b
|
Python
|
fairviewrobotics/Python-Knight-Armor
|
/env/lib/python3.6/site-packages/wpilib/command/subsystem.py
|
UTF-8
| 6,147
| 2.640625
| 3
|
[] |
no_license
|
# validated: 2018-01-06 TW 8346caed9cbf edu/wpi/first/wpilibj/command/Subsystem.java
#----------------------------------------------------------------------------
# Copyright (c) FIRST 2008-2012. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
#----------------------------------------------------------------------------
import logging
from .scheduler import Scheduler
from ..livewindow import LiveWindow
from ..sendablebase import SendableBase
__all__ = ["Subsystem"]
class Subsystem(SendableBase):
"""This class defines a major component of the robot.
A good example of a subsystem is the driveline, or a claw if the robot has
one.
All motors should be a part of a subsystem. For instance, all the wheel
motors should be a part of some kind of "Driveline" subsystem.
Subsystems are used within the command system as requirements for Command.
Only one command which requires a subsystem can run at a time. Also,
subsystems can have default commands which are started if there is no
command running which requires this subsystem.
.. seealso:: :class:`.Command`
"""
def __init__(self, name=None):
"""Creates a subsystem.
:param name: the name of the subsystem; if None, it will be set to the
name to the name of the class.
"""
super().__init__()
# The name
if name is None:
name = self.__class__.__name__
self.setName(name)
Scheduler.getInstance().registerSubsystem(self)
self.logger = logging.getLogger(__name__)
# Whether or not getDefaultCommand() was called
self.initializedDefaultCommand = False
# The current command
self.currentCommand = None
self.currentCommandChanged = True
# The default command
self.defaultCommand = None
def initDefaultCommand(self):
"""Initialize the default command for a subsystem
By default subsystems have no default command, but if they do, the
default command is set with this method. It is called on all
Subsystems by CommandBase in the users program after all the
Subsystems are created.
"""
pass
def periodic(self):
"""When the run method of the scheduler is called this method will be called.
"""
func = self.periodic.__func__
if not hasattr(func, "firstRun"):
self.logger.info("Default Subsystem.periodic() method... Overload me!")
func.firstRun = False
def setDefaultCommand(self, command):
"""Sets the default command. If this is not called or is called with
None, then there will be no default command for the subsystem.
:param command: the default command (or None if there should be none)
.. warning:: This should NOT be called in a constructor if the subsystem
is a singleton.
"""
if command is None:
self.defaultCommand = None
else:
if self not in command.getRequirements():
raise ValueError("A default command must require the subsystem")
self.defaultCommand = command
def getDefaultCommand(self):
"""Returns the default command (or None if there is none).
:returns: the default command
"""
if not self.initializedDefaultCommand:
self.initializedDefaultCommand = True
self.initDefaultCommand()
return self.defaultCommand
def getDefaultCommandName(self):
"""
Returns the default command name, or empty string is there is none.
:returns: the default command name
"""
defaultCommand = self.getDefaultCommand()
if defaultCommand is not None:
return defaultCommand.getName()
return ""
def setCurrentCommand(self, command):
"""Sets the current command
:param command: the new current command
"""
self.currentCommand = command
self.currentCommandChanged = True
def confirmCommand(self):
"""Call this to alert Subsystem that the current command is actually
the command. Sometimes, the Subsystem is told that it has no command
while the Scheduler is going through the loop, only to be soon after
given a new one. This will avoid that situation.
"""
if self.currentCommandChanged:
self.currentCommandChanged = False
def getCurrentCommand(self):
"""Returns the command which currently claims this subsystem.
:returns: the command which currently claims this subsystem
"""
return self.currentCommand
def getCurrentCommandName(self):
"""
Returns the current command name, or empty string if no current command.
:returns: the current command name
"""
currentCommand = self.getCurrentCommand()
if currentCommand is not None:
return currentCommand.getName()
return ""
def addChild(self, child, name=None):
"""
Associate a :class:`.Sendable` with this Subsystem.
Update the child's name if provided
:param child: sendable
:param name: name to give child
"""
if name is not None:
child.setName(self.getSubsystem(), name)
else:
child.setSubsystem(self.getSubsystem())
LiveWindow.add(child)
def __str__(self):
return self.getSubsystem()
def initSendable(self, builder):
builder.setSmartDashboardType("Subsystem")
builder.addBooleanProperty(".hasDefault", lambda: self.defaultCommand is not None, None)
builder.addStringProperty(".default", self.getDefaultCommandName, None)
builder.addBooleanProperty(".hasCommand", lambda: self.defaultCommand is not None, None)
builder.addStringProperty(".command", self.getCurrentCommandName, None)
| true
|
6c444f40bc74180c6ee6f9550fd1b2b55137401c
|
Python
|
honyacho/atcoder_excercise
|
/abc070/d.py
|
UTF-8
| 521
| 2.53125
| 3
|
[] |
no_license
|
import sys
sys.setrecursionlimit(10**7)
N=int(input())
NODES={}
for i in range(N-1):
a,b,c=map(int,input().split())
if not a in NODES: NODES[a] = []
if not b in NODES: NODES[b] = []
NODES[a].append((b, c))
NODES[b].append((a, c))
Q,K=map(int,input().split())
MP=[-1]*(N+1)
MP[K]=0
def dfs(k):
for i, c in NODES[k]:
if MP[i] == -1:
MP[i] = MP[k] + c
dfs(i)
dfs(K)
# print(MP)
for l,r in [tuple(map(int,input().split())) for _ in range(Q)]:
print(MP[l]+MP[r])
| true
|
312f51e5fb5aca37df5ae91e89657034099ab023
|
Python
|
gianv9/CursoDeIntroduccionAPythonYterminalLinux
|
/Python/pygame/cuadrado/cuadradoConSaltoFunciones.py
|
UTF-8
| 3,874
| 3.53125
| 4
|
[] |
no_license
|
# importamos la libreria
import pygame
def dibujar(ventana, R1):
# para evitar que mi rectangulo deje una marca dibujo el fondo nuevamente
ventana["juego"].fill((0,0,0))
# aca el 255,0,0 es un color RGBpygame.event.get()
# rect es para dibujar un rectangulo
# hay mas funciones para mas figuras geometricas...
pygame.draw.rect(ventana["juego"],(255,0,0),(R1["x"],R1["y"],R1["ancho"],R1["alto"]))
# si comento esto no se vuelve a dibujar mi rectangulo
pygame.display.update()
# mueve al jugador a la nueva posicion
# si salto, devuelve su posiion en el aire (indicadores de salto)
def moverJugador(teclas, saltando, contadorSaltos):
if not(saltando): # si no esta saltando puede empezar a hacerlo...
# Comentamos esto para que no pueda subir, solo saltar
# if teclas[pygame.K_UP]:
# ejes[0], ejes[1], ejes[2], ejes[3] = True, False, False, False
# if teclas[pygame.K_DOWN]:
# ejes[0], ejes[1], ejes[2], ejes[3] = False, True, False, False
# si quiere empezar a saltar
if teclas[pygame.K_SPACE]:
saltando = True
else: # si aun esta saltando...
if contadorSaltos >= -10:
direccion = 1
if contadorSaltos < 0:
direccion = -1
R1["y"] -= (contadorSaltos ** 2) * 0.3 * direccion
contadorSaltos -= 1
else: # cuando termine de saltar, resetea las variables de salto...
saltando = False
contadorSaltos = 10
if teclas[pygame.K_LEFT]:
ejes[0], ejes[1], ejes[2], ejes[3] = False, False, True, False
if teclas[pygame.K_RIGHT]:
ejes[0], ejes[1], ejes[2], ejes[3] = False, False, False, True
# añadimos restricciones al momento de mover
# verificando si sale de la pantalla en alguno de los ejes...
# Comentamos esto para que no pueda subir, solo saltar
# if ejes[0] and R1["y"] > R1["velocidad"]:
# R1["y"] -= R1["velocidad"]
# if ejes[1] and R1["y"] < ( ventana["alto"] - R1["alto"] - R1["velocidad"] ) :
# R1["y"] += R1["velocidad"]
if ejes[2] and R1["x"] > R1["velocidad"]:
R1["x"] -= R1["velocidad"]
if ejes[3] and R1["x"] < ( ventana["ancho"] - R1["ancho"] - R1["velocidad"] ):
R1["x"] += R1["velocidad"]
return [saltando, contadorSaltos]
# indicadores del salto
saltando = False
contadorSaltos = 10
# atributos de la ventana
ventana = {"juego": None, "alto": 500, "ancho": 500, "titulo": "Mi primer juego en python!"}
# atributos del rectangulo
R1 = {"x": 50, "y": 300, "ancho": 50, "alto": 60, "velocidad": 5}
ventana["juego"] = pygame.display.set_mode( (ventana["alto"], ventana["ancho"]) )
pygame.display.set_caption( ventana["titulo"] )
ejes = [False, False, False, False]
# inicializamos el juego
pygame.init()
# cada juego debe tener un bucle principal
jugando = True
while jugando:
# hago una pausa de 0.1 segundo
pygame.time.delay(20)
# esto evita que el juagador se siga moviendo
# si una tecla no es presionada
ejes[0], ejes[1], ejes[2], ejes[3] = False, False, False, False
# chequeo los eventos del juego
for evento in pygame.event.get(): # esto devuelve una lista de los eventos y su estado
if evento.type == pygame.QUIT: # si la tecla esc se presiona
jugando = False # finaliza el bucle
# ver documentacion en --> https://www.pygame.org/docs/ref/key.html
# esto obtiene el estado de TODAS las teclas
teclas = pygame.key.get_pressed()
# le paso el estado de las teclas a la funcion que
# mueve el jugador
indicadoresSalto = moverJugador(teclas, saltando, contadorSaltos)
saltando = indicadoresSalto[0]
contadorSaltos = indicadoresSalto[1]
# paso la ventana y el jugador a la funcion que dibuja
dibujar(ventana,R1)
pygame.quit() # termina el juego
| true
|
841cfc9743d5f62c13c999a9aa68f4ca7a98b6e4
|
Python
|
Ash515/PyDataStructures
|
/Non Linear Structures/Binary Heap/PriorityQueue.py
|
UTF-8
| 242
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
'''
Heap in priority representation
'''
import heapq
list_=[(1,"Python"),(2,"JAVA"),(3,"C++")]
heapq.heapify(list_)
print(list_)
for i in range(len(list_)):
print(heapq.heappop(list_))
'''
Output
(1, 'Python')
(2, 'JAVA')
(3, 'C++')
'''
| true
|
17a2fb8c4b88d9aaa71ba5c2ff508d975a80b07b
|
Python
|
ordikhan/Data-cleansing
|
/feature selection.py
|
UTF-8
| 1,141
| 2.640625
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import random
from sklearn.feature_selection import VarianceThreshold
random.seed(2002)
iris = datasets.load_iris()
X = iris.data
Y = iris.target
#Xnew = VarianceThreshold(0.8 * (1-0.8)).fit_transform(X)
#print(X.shape, Xnew.shape)
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
Xnew = SelectKBest(chi2, k=2).fit_transform(X,Y)
print(X.shape, Xnew.shape)
Xnew = SelectKBest(f_classif, k=2).fit_transform(X,Y)
print(X.shape, Xnew.shape)
Xnew = SelectPercentile(chi2, percentile=75).fit_transform(X,Y)
print(X.shape, Xnew.shape)
from sklearn.feature_selection import RFE
treemodel = DecisionTreeClassifier()
selector = RFE(treemodel, 3)
Xnew = selector.fit_transform(X,Y)
print(X.shape, Xnew.shape)
| true
|
e80685991c7ef7b4cf95f1d4dd78528b43b0239a
|
Python
|
mcuv3/recipie-api-django
|
/app/core/test/test_models.py
|
UTF-8
| 1,313
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
from django.contrib.auth import get_user_model
from django.test import TestCase
class ModelTest(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with the email successful
"""
email = "test@mcuve.com"
password = "123456"
user = get_user_model().objects.create_user(
email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for new user is normilzed
"""
email = 'test@SDFSD.COM'
user = get_user_model().objects.create_user(email, '2233433')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error
"""
with self.assertRaises(ValueError):
# this should raise the value error if not this test fails
get_user_model().objects.create_user(None, '21233')
def test_create_new_superuser(self):
"""Test creating a new superuser
"""
user = get_user_model().objects.create_superuser(
"email@email.com", '232311')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| true
|
d968710afe83a84f2d8736d575c8b0e54ade95bb
|
Python
|
infotechaji/GeneralCodes
|
/Other-Modules/TimeDelta.py
|
UTF-8
| 863
| 3.125
| 3
|
[] |
no_license
|
import datetime
import time
#from time import sleep
current_time=datetime.datetime.now()
print 'current time :',current_time
timeout=datetime.timedelta(minutes=1)# seconds=1
print 'datetime.timedelta(minutes=5) :',timeout
timeout_expiration = datetime.datetime.now() + timeout
print 'next time out expiration :',timeout_expiration
while True:
current_time=datetime.datetime.now()
print 'current time :',current_time
print ('sleeeping 30 seconds ...............')
time.sleep(30)
if current_time>timeout_expiration:
print 'time crossed :!!'
timeout=datetime.timedelta(minutes=1)
timeout_expiration = datetime.datetime.now() + timeout
print 'next time out expiration :',timeout_expiration
# if elapsed > datetime.timedelta(minutes=1):
# print "Slept for > 1 minute"
# if elapsed > datetime.timedelta(seconds=1):
# print "Slept for > 1 second"
| true
|
9acdf09e5472dc68550696e775a7969603f9d467
|
Python
|
thecodevillageorg/intro-to-python
|
/Python Day 8/for_loops.py
|
UTF-8
| 2,031
| 4.96875
| 5
|
[] |
no_license
|
# Loops
"""
Loops in Python - "for loop" and "while loop"
Loops allow us to rerun the same lines of code several times.
Loops will always run until a condition is met.
Each time a loop runs is known as iteration.
"""
# For Loop - used to loop a set number of times
"""
syntax:
for num in range(5):
range(start,stop,step) (0,100+1,2)
range(0,5,1)
range(6)
range(2,10)
for - keyword that begins loop
num - temp variable, also known as counter or index
in - keyword
range function - allows us to count from one number to another while being able to define
the start and end
"""
# ex
for num in range(5+1): # range counts up to but not including
print(f"Value: {num}")
for greeting in range(10):
print("hello world")
for i in range(0,101,2):
print(i)
for j in range(0,101,5):
print(j)
# Looping By Element strings, lists, dictionaries, sets
"""
When working with data types that are iterable, meaning they have a collection of
elements that can be looped over, we can write the for loop differently:
"""
name = "John Smith"
for letter in name:
print(f"Value: {letter}")
# Continue Statement
"""
Once a continue statement is hit, the current iteration stops and goes back to the top of
the loop.
# """
for num in range(7):
if num == 3:
continue
print(num)
# Break Statement
"""
It allows us to break out of a loop at any point in time
# """
for num in range(5):
if num == 3:
break
print(num)
# Pass Statement
"""
The pass statement is simply just a placeholder so that the program does not break
"""
for i in range(5):
# TODO: add code to print number
pass
"""
Note: Using "TODO" is a general practice for setting a reminder
"""
# Nested loop (x,y)
"""
A loop within a loop. When using nested loops, the inner loop must always finish running,
before all outer loop can continue.
"""
for x in range(2): # outer loop 0 1
for y in range(3): # inner loop 0 1 2
for z in range(3):
print(f"X: {x}, Y: {y} Z: {z}")
| true
|
bc82463970195798588581474431badef4d8057a
|
Python
|
abndnc/CS5590-Python-DeepLearning
|
/ICP5/ICP5-2.py
|
UTF-8
| 1,346
| 3.296875
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
# read data
train2 = pd.read_csv('winequality-red.csv')
# get the top 3 most correlated features
corr = train2.corr()
plt.imshow(corr, cmap="YlGnBu")
plt.colorbar()
plt.xticks(range(len(corr)),corr.columns, rotation=20)
plt.yticks(range(len(corr)),corr.index)
plt.show()
print(corr['quality'].sort_values(ascending=False)[:5], '\n')
# print out total nulls
nulls = pd.DataFrame(train2.isnull().sum().sort_values(ascending=False))
nulls.columns = ['Null Count']
nulls.index.name = 'Features'
print(nulls)
# delete the null values
data = train2.select_dtypes(include=[np.number]).interpolate().dropna()
print()
print('Now, total nulls in data is: ', sum(data.isnull().sum() != 0))
# build the linear model
y = np.log(train2.quality)
x = data.drop(['quality'], axis=1)
# split data into test and train
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=42, test_size=.2)
# create the model
lrl = linear_model.LinearRegression()
model = lrl.fit(x_train, y_train)
# R2 value
print('R^2 value is:',model.score(x_test, y_test))
# RMSE value
pred = model.predict(x_test)
print('RMSE value is: ', mean_squared_error(y_test, pred))
| true
|