blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d2cecff2220496aae14bd317c88fcdab4bd3d7f2 | Python | piratejon/kernelhopping | /parseko.py | UTF-8 | 344 | 2.53125 | 3 | [] | no_license | #!/usr/bin/python
from bs4 import BeautifulSoup
import sqlite3
import sys
def get_kernel_path():
soup = BeautifulSoup(sys.stdin.read())
latest_link = soup.find("td", id="latest_link")
relative_url = latest_link.a['href']
version_string = latest_link.a.string
return(relative_url)
if __name__ == '__main__':
print(get_kernel_path())
| true |
f495c81a73d985d72e985b89b89ff4718d060a3d | Python | shenme123/NaturalLanguageProcessing | /PCFG parsing model/question6_2.py | UTF-8 | 5,719 | 2.640625 | 3 | [] | no_license | import json
from collections import defaultdict
def read_counts(file):
# read in counts from file genenrated by count_cfg_freq.py
# return counts for nonterminal, unary rules and binary rules in dicts
nonterm = defaultdict(int)
unary = defaultdict(int)
binary = defaultdict(int)
for line in file:
if line.strip():
fields = line.strip().split()
if fields[1]=="NONTERMINAL":
nonterm[fields[2]] = int(fields[0])
elif fields[1] == "UNARYRULE":
unary[(fields[2],fields[3])] = int(fields[0])
elif fields[1] == "BINARYRULE":
binary[(fields[2], fields[3], fields[4])] = int(fields[0])
file.close()
return nonterm, unary, binary
def calc_q(nonterm, unary, binary):
# takes in counts of nonterminal, unary rules and binary rules, calculate the probability of rules
q_unary = {}
q_binary = {}
for term in unary:
q_unary[term] = float(unary[term])/nonterm[term[0]]
for term in binary:
q_binary[term] = float(binary[term])/nonterm[term[0]]
return q_unary, q_binary
def count_freqs(test_file, words_nonrare):
# takes in test corpus and change rare and unseen words to "_RARE_"
# return list of original sentences and revised sentences
counts_test = defaultdict(int)
sentences_orig = []
sentences_rev = []
for line in test_file:
if line.strip():
words = line.strip().split()
sentences_orig.append(words)
words2 = words[:]
for i in range(len(words2)):
if words2[i] not in words_nonrare:
words2[i] = "_RARE_"
counts_test[words2[i]] += 1
sentences_rev.append(words2)
test_file.close()
return counts_test, sentences_orig, sentences_rev
def readin_words_nonrare(file):
# read in list of non-rare words in train corpus from file
words_nonrare = set()
for line in file:
if line.strip():
words_nonrare.add(line.strip())
file.close()
return words_nonrare
def pcfg(nonterm, sen_rev, q_unary, q_binary, bi_rules, sen_orig):
# implement pcfg to calculate the parsing tree with the highest probability
# takes in dicts of nontermnals, unary and binary rules to probabilities, revised sentences and original sentences
# return predicted parsing trees of test corpus
for n in range(len(sen_rev)):
sen = sen_rev[n]
# initalization
pis = {}
bps = {}
length = len(sen)
for i in range(1,length+1):
for X in nonterm:
if (X,sen[i-1]) in q_unary:
pis[(i,i,X)] = q_unary[(X,sen[i-1])]
# algorithm
for l in range(1,length):
for i in range(1,length-l+1):
j = i+l
for X in nonterm:
if X in bi_rules:
max_pi = 0
max_bp = ()
for YZ in bi_rules[X]:
for s in range(i,j):
if (i,s,YZ[0]) in pis and (s+1,j,YZ[1]) in pis:
pi = q_binary[(X,)+YZ]*pis[(i,s,YZ[0])]*pis[(s+1,j,YZ[1])]
if pi > max_pi:
max_pi = pi
max_bp = YZ
max_s = s
if max_pi!= 0:
pis[(i,j,X)] = max_pi
#print max_pi
bps[(i,j,X)] = max_bp+(max_s,)
# back track to find parsing trees
if (1,length,'S') in pis:
tree = gen_tree(1,length, bps, sen_orig[n], 'S')
else:
max_prob = 0
max_X = ''
for X in nonterm:
if (1,length,X) in pis and pis[(1,length, X)] > max_prob:
max_X = X
tree = gen_tree(1, length, bps, sen_orig[n], max_X)
print json.dumps(tree)
def gen_tree(start, end, bps, sen_orig, X):
# takes in start end end index of word in sentence, back point, origial sentence and root type
# generate parsing tree recursively
if start == end:
return [X, sen_orig[start-1]]
tree = [X]
bp = bps[(start, end, X)]
tree.append(gen_tree(start, bp[2], bps, sen_orig, bp[0]))
tree.append(gen_tree(bp[2]+1, end, bps, sen_orig, bp[1]))
return tree
def get_bi_rules(q_binary):
# takes in binary rules and return in format that the key is nonterminals and value is list of
# descendants according to existing binary rules
bi_rules = {}
for term in q_binary:
if term[0] in bi_rules:
bi_rules[term[0]].append((term[1],term[2]))
else:
bi_rules[term[0]] = [(term[1],term[2])]
return bi_rules
# read in files: counts, test data, non-rare word list
count_file = open("cfg_vert_rare.counts")
test_file = open("parse_dev.dat")
nonrare_file = open("words_nonrare_vert", 'r')
words_nonrare = readin_words_nonrare(nonrare_file)
# nonterm: {X:count} unary: {(X,word):count} binary: {(X,Y1,Y2):count}
nonterm, unary, binary = read_counts(count_file)
# q_unary: {(X,word):prob} q_binary: {(X,Y1,Y2):prob}
q_unary, q_binary = calc_q(nonterm, unary, binary)
bi_rules = get_bi_rules(q_binary)
# count freqs in test data, and replace rare words in sentences
# counts_test: {word:counts} sentences_org/sentences_rev: [[words],]
counts_test, sentences_orig, sentences_rev = count_freqs(test_file, words_nonrare)
pcfg(nonterm, sentences_rev, q_unary, q_binary, bi_rules, sentences_orig)
| true |
fd4fdc7c11d99a2c4c74fc03ed9d4b5e681cfb5d | Python | qzson/Study | /keras/keras63_fashion_imshow.py | UTF-8 | 511 | 2.578125 | 3 | [] | no_license | # 과제 1
import numpy as np
from keras.datasets import fashion_mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, LSTM, Conv2D
from keras.layers import Flatten, MaxPooling2D, Dropout
import matplotlib.pyplot as plt
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
print(x_train[0])
print('y_train[0] : ', y_train[0])
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)
plt.imshow(x_train[0])
plt.show() | true |
ae8ff06bd652e46ac81aacbe927e49296c7c5136 | Python | Sami1309/OneLinePython | /953.py | UTF-8 | 221 | 3.234375 | 3 | [] | no_license | # https://leetcode.com/problems/verifying-an-alien-dictionary/
class Solution:
def isAlienSorted(self, words, order: str) -> bool:
return words == sorted(words, key=lambda w: [order.index(let) for let in w])
| true |
f7e9c93b8657b4a2732d28a55040dc902ea62ef6 | Python | lisa0826/pyDataAnalysis | /02/ex-week2.py | UTF-8 | 5,862 | 4 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# flag = False
# name = 'python'
# if name == 'python':
# flag = True
# print('welcome boss')
# else:
# print(name)
# num = 2
# if num == 3: # 判断num的值
# print('boss')
# elif num == 2:
# print('user')
# elif num == 1:
# print('worker')
# elif num < 0: # 值小于零时输出
# print('error')
# else:
# print('roadman') # 条件均不成立时输出
# num = 9
# if num >= 0 and num <= 10: # 判断值是否在0~10之间
# print('hello')
# num = 10
# if num < 0 or num > 10: # 判断值是否在小于0或大于10
# print('hello')
# else:
# print('undefine')
# num = 8
# #判断值是否在0~5或者10~15之间
# if (num >= 0 and num <= 5) or (num >= 10 and num <= 15):
# print('hello')
# else:
# print('undefine')
# var = 100
# if (var == 100):print('变量var的值为100')
# print('Good bye!')
#while语句
'''
while 判断条件:
执行语句……
'''
# count = 0
# while (count < 9):
# print('The count is:', count)
# count = count + 1
# print("Good bye!")
# continue 和 break 用法
# i = 1
# while i < 10:
# i += 1
# if i%2 > 0: # 非双数时跳过输出
# continue
# print(i) # 输出双数2、4、6、8、10
# i = 1
# while 1: # 循环条件为1必定成立
# print(i) # 输出1~10
# i += 1
# if i > 10: # 当i大于10时跳出循环
# break
#while … else
# count = 0
# while count < 5:
# print(count, " is less than 5")
# count = count + 1
# else:
# print(count, " is not less than 5")
#简单语句组
# flag = 1
# while (flag): print('Given flag is really true!'); #无限循环
# flag=0;
# print("Good bye!")
# for letter in 'Python':
# print('当前字母 :', letter)
# fruits = ['banana', 'apple', 'mango']
# for fruit in fruits:
# print('当前水果 :', fruit)
# print("Good bye!")
#序列索引迭代
# fruits = ['banana', 'apple', 'mango']
# for index in range(len(fruits)):
# print('当前水果 :', fruits[index], index)
# print("Good bye!")
#for...else
# for num in range(10,20): # 迭代 10 到 20 之间的数字
# for i in range(2,num): # 根据因子迭代
# if num%i == 0: # 确定第一个因子
# j=num/i # 计算第二个因子
# print('%d 等于 %d * %d' % (num,i,j))
# break # 跳出当前循环
# else: # 循环的 else 部分
# print(num, '是一个质数')
#嵌套
# i = 2
# while(i < 100): #循环,判断100以内的素数
# j = 2
# while(j <= (i/j)):
# if not(i%j): break
# j = j + 1
# if (j > i/j) : print(i, " 是素数")
# i = i + 1
# print("Good bye!")
#break语句
# for letter in 'Python': # for循环的break语句
# if letter == 'h':
# break
# print('Current Letter :', letter)
# var = 10
# while var > 0: # while循环的break语句
# print('Current variable value :', var)
# var = var -1
# if var == 5:
# break
# print("Good bye!")
#continue语句
# for letter in 'Python': # 对应break的第一个例子
# if letter == 'h':
# continue
# print('当前字母 :', letter)
# var = 10 # 对应break的第二个例子
# while var > 0:
# var = var -1
# if var == 5:
# continue
# print('当前变量值 :', var)
# print("Good bye!")
#pass语句
# 输出 Python 的每个字母
# for letter in 'Python':
# if letter == 'h':
# pass
# print('这是 pass 块')
# print('当前字母 :', letter)
# print("Good bye!")
#时间与日期
# import time; # This is required to include time module.
# ticks = time.time()
# print("Number of ticks since 12:00am, January 1, 1970:", ticks)
# localtime = time.localtime(time.time())
# print("Local current time :", localtime)
# localtime = time.asctime( time.localtime(time.time()) )
# print("Local current time :", localtime)
# import calendar
# cal = calendar.month(2008, 1)
# print("Here is the calendar:")
# print(cal);
#函数调用
# def printme( str ):
# "打印传入的字符串到标准显示设备上"
# print(str)
# return
# #函数调用
# printme("我要调用用户自定义函数!");
# printme("再次调用同一函数");
# 可写函数说明
# def changeme(mylist):
# "修改传入的列表"
# mylist.append([1,2,3,4]);
# print("函数内取值:",mylist)
# return
# mylist = [10,20,30];
# changeme(mylist);
# print("函数外取值:",mylist)
#定义函数
# def printinfo( name, age ):
# "打印任何传入的字符串"
# print("Name: ", name)
# print("Age ", age)
# return;
# #调用函数
# printinfo( age=50, name="miki" ); #python内函数参数的位置可以不用对应
# def printinfo( arg1, *vartuple ):
# "打印任何传入的参数"
# print("输出: ")
# print(arg1)
# for var in vartuple:
# print(var)
# return;
# # 调用printinfo 函数
# printinfo( 10 );
# printinfo( 70, 60, 50 );
#匿名函数
'''
lambda [arg1 [,arg2,.....argn]]:expression
'''
# sum = lambda arg1, arg2: arg1 + arg2;
# # 调用sum函数
# print("相加后的值为 : ", sum( 10, 20 ))
# print("相加后的值为 : ", sum( 20, 20 ))
# return语句
# def sum( arg1, arg2 ):
# total = arg1 + arg2
# print("函数内 : ", total)
# return total;
# 调用sum函数
# total = sum( 10, 20 );
# print("函数外 : ", total)
#变量的作用范围
# total = 0; # 这是一个全局变量
# # 可写函数说明
# def sum( arg1, arg2 ):
# #返回2个参数的和."
# total = arg1 + arg2; # total在这里是局部变量.
# print("函数内是局部变量 : ", total)
# return total;
# #调用sum函数
# sum( 10, 20 );
# print("函数外是全局变量 : ", total)
| true |
402f4604090eabef6ebf79387fc75a06ce0c8b9c | Python | wonkodv/s5 | /s5/shared/util.py | UTF-8 | 2,732 | 3.03125 | 3 | [] | no_license | """
Helpers that do not belong anywhere else
"""
import sqlite3
def groupwiseIterator(iterable, n):
""" return iterator that yields iterator that yields at most n values of it
list(map(list,groupwiseIterator(range(5),2))) ->[[0,1],[2,3],[4]]
"""
it = iter(iterable)
r = range(1, n)
def subIt(first):
yield first
for i in r:
yield next(it)
# take the first of every n elements out here, give it to subIt to yield it
for first in it:
yield subIt(first)
def addAttribute(attr, val):
"""
An anotation that adds an attribute, for example:
@addAttribute("paramType",int)
def a(x):
...
a.paramType == int
"""
def deco(func):
setattr(func, attr, val)
return func
return deco
class CommonDatabase:
""" Basic Wrapper for SQLite Database """
def __init__(self, databasefile):
self.db = sqlite3.connect(
str(databasefile),
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
self.db.row_factory = sqlite3.Row
self.cursor = self.db.cursor
self.commit = self.db.commit
def __enter__(self, *args):
return self.db.__enter__(*args)
def __exit__(self, *args):
return self.db.__exit__(*args)
def close(self):
self.db.close()
def setSetting(self, key, value):
""" Sets a setting"""
cur = self.db.cursor()
try:
with self.db:
cur.execute("""
INSERT OR REPLACE INTO
setting (key,value)
VALUES
(?,?)""",
(key, value))
finally:
cur.close()
def getSettingDef(self, key, default):
try:
return self.getSetting(key)
except KeyError:
return default
def getSetting(self, key):
cur = self.db.cursor()
try:
cur.execute(
"SELECT value FROM setting WHERE key = :key", {"key": key})
r = cur.fetchall()
if len(r) == 1:
return r[0]['value']
assert len(r) == 0
finally:
cur.close()
raise KeyError("No Setting: " + key)
def createDatabase(self):
with self.db:
self.db.executescript("""
CREATE TABLE setting (
key STRING PRIMARY KEY,
value STRING
);
""")
def fileSizeFormat(s):
x = 1
for m in 'B', 'KB', 'MB', 'GB', 'TB':
if s < 2000 * x:
return "%d%s" % (s // x, m)
x = x * 1024
| true |
3e7e687dd676346ca49b7e0d8d5bfbbfb1985000 | Python | gferreira/zdogpy | /Demos/not-working/burger.py | UTF-8 | 1,906 | 2.59375 | 3 | [
"MIT"
] | permissive | ### BUGGY !!!
from importlib import reload
import zDogPy.boilerplate
reload(zDogPy.boilerplate)
import zDogPy.illustration
reload(zDogPy.illustration)
import zDogPy.anchor
reload(zDogPy.anchor)
import zDogPy.hemisphere
reload(zDogPy.hemisphere)
from zDogPy.boilerplate import TAU
from zDogPy.illustration import Illustration
from zDogPy.anchor import Anchor
from zDogPy.rect import Rect
from zDogPy.ellipse import Ellipse
from zDogPy.hemisphere import Hemisphere
from zDogPy.cylinder import Cylinder
yellow = '#ED0'
gold = '#EA0'
orange = '#E62'
garnet = '#C25'
I = Illustration()
I.setSize(400, 400)
burger = Anchor(addTo=I,
translate={ 'y': 0 },
rotate={ 'x' : TAU / 4 }
)
topBun = Hemisphere(
addTo=burger,
diameter=96,
translate={ 'z' : 0 },
stroke=14,
fill=True,
color=orange)
cheese = Rect(
addTo=burger,
width=92,
height=92,
translate={ 'z' : 25 },
stroke=16,
color=yellow,
fill=True)
patty = Ellipse(
addTo=burger,
diameter=96,
stroke=32,
translate={ 'z' : 53 },
color=garnet,
fill=True)
bottomBun = Cylinder(
addTo=burger,
diameter=topBun.diameter,
length=16,
translate={ 'z': 90 },
stroke=topBun.stroke,
color=topBun.color)
# // var seedAnchor = new Zdog.Anchor({
# // addTo: burger,
# // translate: topBun.translate,
# // });
# var seedAnchor = new Zdog.Anchor({
# addTo: topBun,
# });
# var seedZ = ( topBun.diameter + topBun.stroke ) / 2 + 1;
# // seed
# new Zdog.Shape({
# addTo: seedAnchor,
# path: [ { y: -3 }, { y: 3 } ],
# translate: { z: seedZ },
# stroke: 8,
# color: gold,
# });
# seedAnchor.copyGraph({
# rotate: { x: 0.6 },
# });
# seedAnchor.copyGraph({
# rotate: { x: -0.6 },
# });
# seedAnchor.copyGraph({
# rotate: { y: -0.5 },
# });
# seedAnchor.copyGraph({
# rotate: { y: 0.5 },
# });
I.showInterface()
I.updateRenderGraph()
| true |
3660f79a15e86df46f3135e71f581591c0b019a1 | Python | qbig/bio-course | /course0/gibsSampling.py | UTF-8 | 3,420 | 2.890625 | 3 | [] | no_license | import sys # you must import "sys" to read from STDIN
import random as rd
lines = sys.stdin.read().splitlines() # read in the input from STDIN
k, t, N = [int(i) for i in lines[0].split()]
DNAs = lines[1:]
def hamming(x, y):
res = 0
for i, c in enumerate(x):
if c != y[i]:
res += 1
return res
def getConsensus(motifs):
l = len(motifs[0])
cnt = len(motifs)
profile = {
'A': [0.0]*l,
'C': [0.0]*l,
'G': [0.0]*l,
'T': [0.0]*l
}
for motif in motifs:
for i, c in enumerate(motif):
profile[c][i] += 1
res = ""
for i in range(l):
if profile['A'][i] >= profile['T'][i] and \
profile['A'][i] >= profile['G'][i] and \
profile['A'][i] >= profile['C'][i]:
res+= 'A'
elif profile['T'][i] >= profile['A'][i] and \
profile['T'][i] >= profile['G'][i] and \
profile['T'][i] >= profile['C'][i]:
res+= 'T'
elif profile['G'][i] >= profile['A'][i] and \
profile['G'][i] >= profile['T'][i] and \
profile['G'][i] >= profile['C'][i]:
res+= 'G'
else:
res+= 'C'
return res
def profileScore(kmer, profile):
score = 1
for i, c in enumerate(kmer):
score *= profile[c][i]
return score
def findMotif(dna, k, profile):
score = -1
res = ""
for i in range(len(dna)-k+1):
pat = dna[i:i+k]
if profileScore(pat, profile) > score:
score = profileScore(pat, profile)
res = pat
return res
def diffAll(motifs, k):
#print motifs, k
kmer = getConsensus(motifs)
return sum([hamming(kmer, motif) for motif in motifs])
def randomDistribute(arr):
total = sum(arr)
res = rd.uniform(0, total)
cur = 0
for i, d in enumerate(arr):
if cur <= res < cur + d:
return i
cur += d
def profileRandMotif(dna, profile, k):
return [profileScore(dna[i:i+k], profile) for i in range(len(dna)-k+1)]
def createProfileMatrix(motifs):
l = len(motifs[0])
cnt = len(motifs)
profile = {
'A': [0.0]*l,
'C': [0.0]*l,
'G': [0.0]*l,
'T': [0.0]*l
}
for motif in motifs:
for i, c in enumerate(motif):
profile[c][i] += 1
for i in range(l):
profile['A'][i]/=cnt
profile['T'][i]/=cnt
profile['G'][i]/=cnt
profile['C'][i]/=cnt
return profile
def createProfileMatrixWithLap(motifs):
l = len(motifs[0])
cnt = len(motifs)+4
profile = {
'A': [1.0]*l,
'C': [1.0]*l,
'G': [1.0]*l,
'T': [1.0]*l
}
for motif in motifs:
for i, c in enumerate(motif):
profile[c][i] += 1
for i in range(l):
profile['A'][i]/=cnt
profile['T'][i]/=cnt
profile['G'][i]/=cnt
profile['C'][i]/=cnt
return profile
def randomKmer(dna, k):
random_index = rd.randint(0, len(dna)-k)
return dna[random_index:random_index+k]
def randomMotifs(DNAs, k):
return [randomKmer(dna, k) for dna in DNAs]
def gibRandomMotifSearch(DNAs, k, t, N):
motifs = randomMotifs(DNAs, k)
l = len(DNAs)
bestMotifs = motifs
for i in range(N):
index = rd.randint(0, l-1)
profile = createProfileMatrixWithLap([mot for j, mot in enumerate(motifs) if j != index])
new_mot = randomDistribute(profileRandMotif(DNAs[index], profile, k))
motifs[index] = DNAs[index][new_mot:new_mot+k]
if diffAll(motifs,k ) < diffAll(bestMotifs,k):
bestMotifs = motifs
return bestMotifs
bestMotifs = gibRandomMotifSearch(DNAs, k, t, N)
for motifs in [gibRandomMotifSearch(DNAs, k, t, N) for i in range(20)]:
if diffAll(motifs,k ) < diffAll(bestMotifs,k):
bestMotifs = motifs
print diffAll(motifs,k )
for motif in bestMotifs:
print motif
| true |
cdc373eec3179cc69a11fb51792b26e3a29383c2 | Python | Jasonluo666/LeetCode-Repository | /Python/Daily Temperatures.py | UTF-8 | 455 | 3.015625 | 3 | [] | no_license | class Solution(object):
def dailyTemperatures(self, T):
"""
:type T: List[int]
:rtype: List[int]
"""
ans = [0 for x in T]
stack = []
for index, element in enumerate(T):
while len(stack) > 0 and stack[-1][1] < element:
prev = stack.pop()
ans[prev[0]] = index - prev[0]
stack.append((index, element))
return ans | true |
e45f2f0a21cfa2854e0cb497334761ae19522625 | Python | PabloTabilo/platzi-ia-ml | /05_CursoPOO_y_algo/knaspackProblem.py | UTF-8 | 1,066 | 2.734375 | 3 | [] | no_license |
def recursiveSolve(s, w, v, n):
if n==0 or s==0:
return 0
if w[n-1] > s:
return recursiveSolve(s, w, v, n-1)
return max(v[n-1] + recursiveSolve(s - w[n-1], w, v, n-1), recursiveSolve(s, w, v, n-1))
def solve(s,w,v,n):
dp = [0 for i in range(n)]
# {A}, {B}, {C}, {D}
for i in range(n):
vis =[False for i in range(n)]
vis[i] = True
ac = w[i]
dp[i] = v[i]
k = 1
# {A,B}, {A,C}, {A,D}
# {{A,B},C}, {{A,B},D}
while k<n-1:
best = dp[i]
for j in range(n):
if not vis[j] and ac+w[j]<=s and best < dp[i] + v[j]:
best = dp[i] + v[j]
t_ac=ac+w[j]
bestJ=j
vis[bestJ]=True
dp[i] = best
ac += t_ac
k+=1
print(dp)
return max(dp)
if __name__ == "__main__":
v = [60, 105, 145, 3]
w = [15, 20, 25, 2]
s = 38
#v = [60, 100, 120]
#w = [10, 20, 30]
#s = 50
n = len(v)
print(solve(s, w, v, n)) | true |
8f954526e27656aa1eeaaef6a856afbe2a807274 | Python | satyans24/CodeSprints | /CodeSprint-2012-10-27-(3)/ConcatenatedPalindrome/cp.py | UTF-8 | 1,053 | 2.828125 | 3 | [
"BSD-2-Clause"
] | permissive |
import string
import sys
def addk(t, s):
if s == '':
return
c = s[0]
if not t.has_key(c):
t[c] = {}
addk(t[c], s[1:])
def palin(s):
k = len(s)
for i in range(len(s)):
palin = True
for j in range(int((k - i) / 2) + 1):
if s[j] != s[k - i - j - 1]:
palin = False
break
if palin:
return (k - i), s[0:(k - i)]
def chk(t, s):
if s == '':
return 0, ''
c = s[0]
if not t.has_key(c):
return palin(s)
cb, cp = chk(t[c], s[1:])
return 2 + cb, c + cp
def check(ts, ms):
t = {}
for s in ts:
addk(t, s)
rb, rp = 0, ''
for s in ms:
cb, cp = chk(t, s)
if cb > rb:
rb, rp = cb, cp
return rb, rp
n, m = map(int, sys.stdin.readline().split(' '))
strs = []
rstrs = []
for i in range(n):
s = string.strip(sys.stdin.readline())
strs.append(s)
rstrs.append(s[::-1])
lb, lp = check(strs, rstrs)
rb, rp = check(rstrs, strs)
print max(lb, rb)
| true |
4af5eb68bb7d781199e41b158e8e59594bafa5f7 | Python | jswilson28/OptimizationApplication | /ScheduleCompilation.py | UTF-8 | 22,309 | 2.65625 | 3 | [] | no_license | # This file contains postalizers (which compile individual plates) and other schedule manipulators and compilers.
from AddressCompilation import ExistingAddressBook, NewAddressBook
from openpyxl import load_workbook
from GeneralMethods import today
import os
def day_name_from_day_num(day_num):
day_list = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
return day_list[day_num]
def get_optidata_day_nums(day_num):
low = day_num * 1440
high = day_num * 1440
if day_num > 6:
return "ERROR"
return 1200 + low, 3360 + high
def get_a_day_string(trips):
mon = []
tue = []
wed = []
thu = []
fri = []
sat = []
sun = []
for trip in trips:
mon.append(trip.bin_string[0])
tue.append(trip.bin_string[1])
wed.append(trip.bin_string[2])
thu.append(trip.bin_string[3])
fri.append(trip.bin_string[4])
sat.append(trip.bin_string[5])
sun.append(trip.bin_string[6])
print_mon = True
print_tue = True
print_wed = True
print_thu = True
print_fri = True
print_sat = True
print_sun = True
sun_schedules = False
if tue == mon:
print_tue = False
if wed == tue:
print_wed = False
if thu == wed:
print_thu = False
if fri == thu:
print_fri = False
if sat == fri:
print_sat = False
if sun == sat:
print_sun = False
if "1" in sun:
sun_schedules = True
return [print_mon, print_tue, print_wed, print_thu, print_fri, print_sat, print_sun], sun_schedules
class Postalizer:
def __init__(self, input_passer, pvs_name):
self.ip = input_passer
self.pvs_name = pvs_name
self.short_name = None
self.pdc_name = None
self.readers = []
self.plate_nums = []
self.html_files = []
self.jda_files = []
self.sources = []
self.schedules = []
self.output_list = []
self.round_trips = []
self.num_round_trips = None
# get address book
folder = "Sites"
sites = [x for x in os.listdir(folder) if x[-4:] == "xlsx"]
site_names = [x[:-5] for x in sites]
if self.pvs_name in site_names:
self.address_book = ExistingAddressBook(self.pvs_name)
else:
self.address_book = NewAddressBook(self.pvs_name)
self.address_book = ExistingAddressBook(self.pvs_name)
self.set_output_list()
def set_output_list(self):
source_string = ""
if len(self.sources) > 0:
for source in self.sources:
source_string += source + ", "
source_string = source_string[:-2]
plate_num_string = ""
if len(self.plate_nums) > 0:
for plate_num in self.plate_nums:
plate_num_string += plate_num[:6] + ", "
plate_num_string = plate_num_string[:-2]
num_schedules = len(self.schedules)
if num_schedules > 0:
compliant = self.postal_compliant()
compliant_schedules, non_compliant_schedules = self.count_schedules()
else:
compliant = False
compliant_schedules = 0
non_compliant_schedules = 0
self.output_list = [self.short_name, source_string, plate_num_string, num_schedules, compliant,
compliant_schedules, non_compliant_schedules, self.num_round_trips]
def add_reader(self, reader):
if not reader or reader.pvs_name != self.pvs_name:
return
source = reader.source
if source == "HCR":
plate_num = reader.plate_number
if plate_num in self.plate_nums:
return
self.pdc_name = reader.pvs_pdc
elif source == "PVS":
plate_num = "html"
file_name = reader.source_file
if file_name in self.html_files:
return
else:
self.html_files.append(file_name)
self.pdc_name = reader.pvs_pdc
elif source == "JDA":
plate_num = "JDA"
file_name = reader.source_file
if file_name in self.jda_files:
return
else:
self.jda_files.append(file_name)
self.pdc_name = reader.pdc_name
else:
print("UHOH")
self.short_name = reader.short_name
self.sources.append(source)
self.plate_nums.append(plate_num)
self.schedules += reader.schedules
self.compile_round_trips()
self.set_output_list()
def postal_compliant(self):
for schedule in self.schedules:
schedule.is_postalized = schedule.postal_compliance_check(self.ip)
for schedule in self.schedules:
if not schedule.is_postalized:
return False
return True
def count_schedules(self):
compliant = 0
non_compliant = 0
for schedule in self.schedules:
if schedule.is_postalized:
compliant += 1
else:
non_compliant += 1
return compliant, non_compliant
def can_postalize(self):
for schedule in self.schedules:
schedule.cant_postalize = []
schedule.can_postalize = schedule.postal_compliance_possible(self.ip, self.address_book)
for schedule in self.schedules:
if not schedule.can_postalize:
# print(schedule.schedule_name)
# print(schedule.cant_postalize)
return False
return True
def postalize_schedules(self):
if self.postal_compliant():
return
if not self.can_postalize():
print("We'll do our best!")
for schedule in self.schedules:
schedule.postalize(self.ip, self.address_book)
self.set_output_list()
def compile_round_trips(self):
self.round_trips = []
for schedule in self.schedules:
for round_trip in schedule.round_trips:
self.round_trips.append(round_trip)
self.num_round_trips = len(self.round_trips)
def print_cplex_scheduler_input(self):
eleven_ton_schedule_count, single_schedule_count = self.get_schedule_day_counts()
round_trips_to_print = [x for x in self.round_trips if not x.holiday and x.trip_type == 1 and x.is_selected]
print("Printing", len(round_trips_to_print), "round trips.")
eleven_ton = [x for x in round_trips_to_print if x.vehicle_type in ("11-Ton", "11-TON")]
single = [x for x in round_trips_to_print if x.vehicle_type == "Single"]
spotter_round_trips_to_print = [x for x in self.round_trips if not x.holiday and x.trip_type == 3
and x.is_selected]
eleven_ton_spotters = [x for x in spotter_round_trips_to_print if x.vehicle_type in ("11-Ton", "11-TON")]
single_spotters = [x for x in spotter_round_trips_to_print if x.vehicle_type in ("Single", "SINGLE")]
holiday_round_trips_to_print = [x for x in self.round_trips if x.holiday and x.is_selected]
eleven_ton_holiday = [x for x in holiday_round_trips_to_print if x.vehicle_type in ("11-Ton", "11-TON")]
single_holiday = [x for x in holiday_round_trips_to_print if x.vehicle_type in ("Single", "SINGLE")]
for x, trip in enumerate(eleven_ton):
trip.optimizer_trip_num = (x + 1)
for x, trip in enumerate(eleven_ton_spotters):
trip.optimizer_trip_num = (x + 1)
for x, trip in enumerate(eleven_ton_holiday):
trip.optimizer_trip_num = (x + 1)
for x, trip in enumerate(single):
trip.optimizer_trip_num = (x + 1)
for x, trip in enumerate(single_spotters):
trip.optimizer_trip_num = (x + 1)
for x, trip in enumerate(single_holiday):
trip.optimizer_trip_num = (x + 1)
eleven_ton_day_list, eleven_ton_sun = get_a_day_string(eleven_ton)
single_day_list, single_sun = get_a_day_string(single)
eleven_ton_file_name = "OptidataSchedules_" + self.pvs_name + "_11-ton_" + today() + ".xlsx"
single_file_name = "OptidataSchedules_" + self.pvs_name + "_Single_" + today() + ".xlsx"
if eleven_ton:
wb = load_workbook("Optimization Formats/optidata.xlsx")
source_sheet = wb['Optidata Format']
source_sheet_two = wb['Solution Format']
wb.save(eleven_ton_file_name)
for day_num, print_day in enumerate(eleven_ton_day_list):
if print_day:
day_name = day_name_from_day_num(day_num)
ws = wb.copy_worksheet(source_sheet)
ws.sheet_view.showGridLines = False
ws.title = "Optidata " + day_name
ws["B2"].value = day_name + " CPLEX Input Table"
self.print_one_optidata_day(ws, eleven_ton, day_num, wb)
ws = wb.copy_worksheet(source_sheet_two)
ws["B2"].value = day_name + " Solution"
ws.title = "Solution " + day_name
wb.remove_sheet(wb['Optidata Format'])
wb.remove_sheet(wb['Solution Format'])
ws = wb["Data Format"]
row = 2
for trip in eleven_ton:
trip.print_optidata_data(ws, row)
row += len(trip.stops)
ws.title = "Trips"
ws = wb["Summary"]
ws["B1"].value = self.pvs_name
ws["B2"].value = self.pdc_name
ws["B3"].value = str(eleven_ton_sun)
for x, day in enumerate(eleven_ton_schedule_count):
ws["B" + str(x+7)].value = day
if eleven_ton_spotters:
ws = wb["Spotter Format"]
ws.title = "Spotter Trips"
row = 2
for trip in eleven_ton_spotters:
trip.print_optidata_data(ws, row)
row += len(trip.stops)
else:
wb.remove_sheet(wb["Spotter Format"])
if eleven_ton_holiday:
ws = wb["Holiday Format"]
ws.title = "Holiday Trips"
row = 2
for trip in eleven_ton_holiday:
trip.print_optidata_data(ws, row)
row += len(trip.stops)
else:
wb.remove_sheet(wb["Holiday Format"])
wb.save(eleven_ton_file_name)
wb.close()
if single:
wb = load_workbook("Optimization Formats/optidata.xlsx")
source_sheet = wb['Optidata Format']
source_sheet_two = wb['Solution Format']
wb.save(single_file_name)
for day_num, print_day in enumerate(single_day_list):
if print_day:
day_name = day_name_from_day_num(day_num)
ws = wb.copy_worksheet(source_sheet)
ws.sheet_view.showGridLines = False
ws.title = "Optidata " + day_name
ws["B2"].value = day_name + " CPLEX Input Table"
self.print_one_optidata_day(ws, single, day_num, wb)
ws = wb.copy_worksheet(source_sheet_two)
ws["B2"].value = day_name + " Solution"
ws.title = "Solution " + day_name
wb.remove_sheet(wb['Optidata Format'])
wb.remove_sheet(wb['Solution Format'])
ws = wb["Data Format"]
row = 2
for trip in single:
trip.print_optidata_data(ws, row)
row += len(trip.stops)
ws.title = "Trips"
ws = wb["Summary"]
ws["B1"].value = self.pvs_name
ws["B2"].value = self.pdc_name
ws["B3"].value = str(single_sun)
for x, day in enumerate(single_schedule_count):
ws["B" + str(x+7)].value = day
if single_spotters:
ws = wb["Spotter Format"]
ws.title = "Spotter Trips"
row = 2
for trip in single_spotters:
trip.print_optidata_data(ws, row)
row += len(trip.stops)
else:
wb.remove_sheet(wb["Spotter Format"])
if single_holiday:
ws = wb["Holiday Format"]
ws.title = "Holiday Trips"
row = 2
for trip in single_holiday:
trip.print_optidata_data(ws, row)
row += len(trip.stops)
else:
wb.remove_sheet(wb["Holiday Format"])
wb.save(single_file_name)
wb.close()
def print_schedules(self):
file_name = "Output Formats/ShortScheduleFormat.xlsx"
wb = load_workbook(file_name)
new_file_name = "Schedules " + str(self.short_name) + ".xlsx"
wb.save(filename=new_file_name)
summary_ws = wb["Schedule Summaries"]
as_read_ws = wb["As Read Schedules"]
postalized_ws = wb["Postalized Schedules"]
orig_row = 2
post_row = 2
for x, schedule in enumerate(self.schedules):
schedule.short_print_summary(summary_ws, x + 2)
schedule.short_print_original(as_read_ws, orig_row)
orig_row += len(schedule.original_stops)
schedule.short_print_postalized(postalized_ws, post_row)
post_row += len(schedule.postalized_stops)
wb.save(new_file_name)
def get_schedule_day_counts(self):
eleven_ton_schedule_count = [0, 0, 0, 0, 0, 0, 0]
single_schedule_count = [0, 0, 0, 0, 0, 0, 0]
eleven_ton = [x for x in self.schedules if x.vehicle_category in ("11-TON", "11-Ton", "11-ton")]
single = [x for x in self.schedules if x.vehicle_category in ("SINGLE", "Single", "single")]
for schedule in eleven_ton:
for x in range(0, 7):
if schedule.schedule_type == 1 and schedule.bin_string[x] in (1, "1"):
eleven_ton_schedule_count[x] += 1
for schedule in single:
for x in range(0, 7):
if schedule.schedule_type == 1 and schedule.bin_string[x] in (1, "1"):
single_schedule_count[x] += 1
return eleven_ton_schedule_count, single_schedule_count
@staticmethod
def print_one_optidata_day(ws, round_trips, day_num, wb):
low, high = get_optidata_day_nums(day_num)
rows = []
low_list = [0, 0, 0, low, low, 0]
high_list = [0, 999, 0, high, high, 0]
rows.append(low_list)
rows.append(high_list)
trips_to_print = [x for x in round_trips if x.bin_string[day_num] in (1, "1")]
for trip in trips_to_print:
rows.append(trip.optidata_row(day_num))
for x, row in enumerate(rows):
for y, item in enumerate(row):
ws.cell(row=x+7, column=y+2).value = item
max_row = 6 + len(rows)
ws.sheet_view.showGridLines = False
wb.create_named_range("TRIPS" + str(day_num+1), ws, "$B$7:$G$" + str(max_row))
class InSourceCompiler:
def __init__(self, hcr_contract, vehicle_contract, hcr_reader, input_passer, cost_model):
# this thing needs to compile whether plates are eligible for import, postalizable,
# and then the cost.
self.hcr_contract = hcr_contract
self.cost_model = cost_model
self.total_postalized_mileage = 0
self.total_postalized_duration = 0
self.total_postalized_night_diff = 0
self.total_calculated_mileage = 0
self.total_calculated_duration = 0
self.total_calculated_night_diff = 0
if hcr_contract:
self.hcr_id = hcr_contract.hcr_id
area = hcr_contract.area
else:
self.hcr_id = hcr_reader.plate_number
area = "u/k"
self.vehicle_contract = vehicle_contract
self.hcr_reader = hcr_reader
self.input_passer = input_passer
if not self.hcr_reader:
self.pvs_name = "None"
self.short_name = "None"
else:
self.pvs_name = hcr_reader.pvs_name
self.short_name = hcr_reader.short_name
folder = "Sites"
sites = [x for x in os.listdir(folder) if x[-4:] == "xlsx"]
site_names = [x[:-5] for x in sites]
if self.pvs_name in site_names:
self.address_book = ExistingAddressBook(self.pvs_name)
else:
self.address_book = NewAddressBook(self.pvs_name)
self.address_book = ExistingAddressBook(self.pvs_name)
self.costs_enough = True
self.total_trips = 0
self.insourceble_trips = 0
self.postalizable_trips = 0
self.network_trips = 0
self.one_state_trips = 0
self.current_cost = self.get_current_cost()
self.is_eligible = self.find_eligibility()
self.is_postalizable = self.can_postalize()
self.postalize_schedules()
self.compile_network_trips()
if self.hcr_contract:
self.hcr_contract.total_calculated_duration = self.total_calculated_duration
self.hcr_contract.total_postalized_duration = self.total_postalized_duration
self.hcr_contract.total_calculated_mileage = self.total_calculated_mileage
self.hcr_contract.total_postalized_mileage = self.total_postalized_mileage
self.hcr_contract.add_night_diff(self.total_postalized_night_diff, self.total_calculated_night_diff)
self.current_cost, self.dep_cost, self.full_cost, self.lease_cost, self.dep_cost_postalized, \
self.full_cost_postalized, self.lease_cost_postalized = self.get_costs()
self.output_list = [self.short_name, self.hcr_id, self.is_eligible, self.is_postalizable,
self.total_trips, self.insourceble_trips, self.postalizable_trips, self.network_trips,
self.one_state_trips, self.current_cost, self.full_cost, self.dep_cost, self.lease_cost]
self.output_list_postalized = [self.short_name, self.hcr_id, self.is_eligible, self.is_postalizable,
self.total_trips, self.insourceble_trips, self.postalizable_trips,
self.network_trips, self.one_state_trips, self.current_cost,
self.full_cost_postalized, self.dep_cost_postalized, self.lease_cost_postalized]
def find_eligibility(self):
if not self.hcr_reader:
return False
for schedule in self.hcr_reader.schedules:
schedule.cant_eligible = []
schedule.is_eligible = schedule.insource_eligible_check(self.input_passer)
if not self.costs_enough:
for schedule in self.hcr_reader.schedules:
schedule.cant_eligible.append("Minimum cost")
schedule.is_eligible = False
return_var = True
for schedule in self.hcr_reader.schedules:
if schedule.is_eligible:
self.insourceble_trips += 1
else:
return_var = False
return return_var
def can_postalize(self):
if not self.hcr_reader:
return False
for schedule in self.hcr_reader.schedules:
schedule.cant_postalize = []
schedule.can_postalize = schedule.postal_compliance_possible(self.input_passer, self.address_book)
return_var = True
for schedule in self.hcr_reader.schedules:
if schedule.can_postalize:
self.postalizable_trips += 1
else:
return_var = False
return return_var
def compile_network_trips(self):
if not self.hcr_reader:
return False
for schedule in self.hcr_reader.schedules:
self.total_trips += 1
if schedule.network_schedule:
self.network_trips += 1
if not schedule.cross_state_lines:
self.one_state_trips += 1
def get_current_cost(self):
if not self.hcr_contract or not self.vehicle_contract:
return -1
cc = self.hcr_contract.total_annual_rate
if cc <= 100000:
self.costs_enough = False
return cc
def get_costs(self):
if not self.hcr_contract or not self.vehicle_contract:
return -1, -1, -1, -1, -1, -1, -1
self.cost_model.process_contract(self.hcr_id, self.hcr_contract, self.vehicle_contract)
cc, dc, fc, lc, dcp, fcp, lcp = self.cost_model.get_table_outputs()
if cc <= 100000:
self.costs_enough = False
return cc, dc, fc, lc, dcp, fcp, lcp
def postalize_schedules(self):
if not self.hcr_reader:
return
for schedule in self.hcr_reader.schedules:
schedule.postalize(self.input_passer, self.address_book)
self.total_calculated_duration += schedule.annual_calculated_duration
self.total_postalized_duration += schedule.annual_postalized_duration
self.total_calculated_night_diff += schedule.calculated_night_hours
self.total_postalized_night_diff += schedule.postalized_night_hours
self.total_calculated_mileage += schedule.annual_calculated_mileage
self.total_postalized_mileage += schedule.annual_postalized_mileage
| true |
9c38667e8dddc8bb4072b458f64d6719fb98ae6f | Python | ljia2/leetcode.py | /solutions/binary.search/878.Nth.Magical.Number.py | UTF-8 | 1,998 | 4.28125 | 4 | [] | no_license | class Solution:
def nthMagicalNumber(self, N, A, B):
"""
A positive integer is magical if it is divisible by either A or B.
Return the N-th magical number. Since the answer may be very large, return it modulo 10^9 + 7.
Example 1:
Input: N = 1, A = 2, B = 3
Output: 2
Example 2:
Input: N = 4, A = 2, B = 3
Output: 6
Example 3:
Input: N = 5, A = 2, B = 4
Output: 10
Example 4:
Input: N = 3, A = 6, B = 4
Output: 8
:type N: int
:type A: int
:type B: int
:rtype: int
4 points to figure out:
1. Get gcd (greatest common divisor) and lcm (least common multiple) of (A, B).
(a, b) = (A, B) while b > 0: (a, b) = (b, a % b)
then, gcd = a and lcm = A * B / a
2. How many magic numbers <= x ?
By inclusion exclusion principle, we have: x / A + x / B - x / lcm
3. Set our binary search range
Lower bound is min(A, B), I just set left = 2.
Upper bound is N * min(A, B), I just set right = 10 ^ 14.
4. binary search, find the smallest x that x / A + x / B - x / lcm = N
"""
l = 2
r = 10 ** 14
mod = 10 ** 9 + 7
lcm = A * B // self.gcd(A, B)
# l and r are searching integer space
# find the smallest number (lower bound) where mid // A + mid // B - mid // lcm = N, similar to bisect_left
while l < r:
mid = (l + r) // 2
tmp = mid // A + mid // B - mid // lcm
if tmp >= N:
r = mid
else:
l = mid + 1
return l % mod
def gcd(self, A, B):
if B == 0:
return A
if A < B:
return self.gcd(B, A)
return self.gcd(B, A % B)
s = Solution()
print(s.nthMagicalNumber(10, 6, 4))
print(s.nthMagicalNumber(1, 2, 3))
print(s.nthMagicalNumber(5, 2, 4)) | true |
e0ba442b37df99162ae13699d257b0a114cb56d6 | Python | baoanh1310/project_euler | /pe001/fast.py | UTF-8 | 612 | 4.34375 | 4 | [] | no_license | """Mathematical approach to solve problem 1."""
def sum_divisible_by(num, upper):
"""Return sum of all divisible number by 'num' below 'upper'"""
return num * (upper // num) * (upper // num + 1) // 2
def multiples_3_or_5(upper):
"""Return the sum of all the multiples of 3 or 5 below upper"""
return sum_divisible_by(3, upper) + sum_divisible_by(5, upper) - sum_divisible_by(15, upper)
if __name__ == "__main__":
import time
start = time.time()
print("Result: {}".format(multiples_3_or_5(1000)))
end = time.time()
print("Mathematical solution time: {}".format(end - start)) | true |
ca229821a61e0fcc2d23d9d0dbaaf71f4fa3743b | Python | ekutukcu/SmartLock | /sensor/calibration.py | UTF-8 | 1,822 | 2.90625 | 3 | [] | no_license | from magnetometer import Magnetometer
from utime import sleep_ms
import ujson
import math
class DataLogger:
"""Logs data from the magenetomer for implementing the calibration levels"""
def __init__(self, magnetometer=None):
if magnetometer==None:
self.magnetometer = Magnetometer()
else:
self.magnetometer = magnetometer
self.x=[]
self.y=[]
self.z=[]
def measure(self, num_readings, time_ms_per_reading=0):
"""time per reading must be more than 7 ms"""
if(time_ms_per_reading>7):
time_sleep = time_ms_per_reading-7
for i in range(num_readings+2):
self.magnetometer.take_measurement()
if i>1:
self.x.append(self.magnetometer.x)
self.y.append(self.magnetometer.y)
self.z.append(self.magnetometer.z)
sleep_ms(time_sleep)
else:
for i in range(num_readings):
self.magnetometer.take_measurement()
if i>1:
self.x.append(self.magnetometer.x)
self.y.append(self.magnetometer.y)
self.z.append(self.magnetometer.z)
def get_json(self):
data["x"]=self.x
data["y"]=self.y
data["z"]=self.z
return ujson.dumps(data)
def std_dev(self):
mean = sum(self.z)/len(self.z)
z = math.sqrt(sum(map(lambda x: (x - mean) * (x - mean), self.z)) / len(self.z))
mean = sum(self.y) / len(self.y)
y = math.sqrt(sum(map(lambda x: (x - mean) * (x - mean), self.y)) / len(self.y))
mean = sum(self.x) / len(self.x)
x = math.sqrt(sum(map(lambda x: (x - mean) * (x - mean), self.x)) / len(self.x))
return (x,y,z)
| true |
0d5350a3f33765b5c84a67457202645e33a18b4a | Python | TozzoL/codecademy-python-project | /run.py | UTF-8 | 388 | 2.53125 | 3 | [] | no_license | from fetch_data import *
#Create a song in the style of Paramore
lyrics('urls.csv', 'lyrics.txt')
new = new_song('lyrics.txt', 3)
print(new)
read_out_loud(new)
#possibly useful commands for debugging
#from pprint import pprint
#pprint(lyrics)
#print(soup.prettify())
#print(soup.find_all(id="lyrics"))
#print(lyrics)
#print(requests.get(url).text) #shows the content of the page
| true |
252c838d06d2ecf07107b6b898072bcb2b15b817 | Python | s-light/OLA_test_pattern_generator | /pattern/gradient_integer.py | UTF-8 | 12,621 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python2
# coding=utf-8
"""
gradient pattern.
generates a test pattern:
gradient
this version only used integer values.
position values 0..1000000 (*A)
color values 0..655535 (16bit)
*A: this is enough for about 5h fade duration at 20ms/50Hz updaterate
for some more information see 'resolution_helper.ods'
history:
see git commits
todo:
~ all fine :-)
"""
import pattern
import array
import colorsys
##########################################
# globals
##########################################
# functions
##########################################
# classes
class Gradient_Integer(pattern.Pattern):
"""Gradient Pattern Class."""
def __init__(self, config, config_global):
"""Init pattern."""
self.config_defaults = {
"cycle_duration": 10000,
"position_current": 0,
"type": "channel",
"stops": [
{
"position": 0,
"red": 65535,
"green": 65535,
"blue": 65535,
},
{
"position": 300000,
"red": 65535,
"green": 0,
"blue": 0,
},
{
"position": 700000,
"red": 0,
"green": 65535,
"blue": 0,
},
{
"position": 1000000,
"red": 0,
"green": 0,
"blue": 65535,
},
],
}
# python3 syntax
# super().__init__()
# python2 syntax
# super(Pattern, self).__init__()
# explicit call
pattern.Pattern.__init__(self, config, config_global)
def _interpolate_channels(self, pixel_position, stop_start, stop_end):
"""Interpolate with channels."""
# print("interpolate_channels")
result = {}
# check for exact match
if pixel_position == stop_start["position"]:
result = stop_start.copy()
else:
# interpolate all colors
for color_name in self.color_channels:
result[color_name] = pattern.map(
pixel_position,
stop_start["position"],
stop_end["position"],
stop_start[color_name],
stop_end[color_name],
)
result["position"] = pixel_position
return result
def _interpolate_hsv(self, pixel_position, stop_start, stop_end):
"""Interpolate with hsv."""
print("interpolate_hsv -- TODO....")
result = {}
# check for exact match
result["red"] = 8000
result["green"] = 0
result["blue"] = 20000
result["position"] = pixel_position
return result
def _calculate_current_channel_values(self, pixel_position):
"""Calculate current pixel values."""
# calculate value:
# input:
# current position
# list of way points
stops_list = self.config["stops"]
result = {}
# print("_calculate_current_channel_values:")
# print("pixel_position:", pixel_position)
# check bounds
if pixel_position <= stops_list[0]["position"]:
# print("min.")
result = stops_list[0].copy()
elif pixel_position >= stops_list[len(stops_list)-1]["position"]:
# print("max.")
result = stops_list[len(stops_list)-1].copy()
else:
# print("search:")
# we search for the correct stops
list_index = 1
while pixel_position > stops_list[list_index]["position"]:
list_index += 1
# now list_index contains the first stop
# where position is < pixel_position
# interpolate between stops:
stop_start = stops_list[list_index-1]
stop_end = stops_list[list_index]
interpolation_type = self.config['type']
if interpolation_type.startswith("hsv"):
result = self._interpolate_hsv(
pixel_position,
stop_start,
stop_end
)
elif interpolation_type.startswith("channels"):
result = self._interpolate_channels(
pixel_position,
stop_start,
stop_end
)
else:
result = self._interpolate_channels(
pixel_position,
stop_start,
stop_end
)
return result
def _calculate_repeat_pixel_index(
self,
pixel_index,
repeate_index,
color_channels_count
):
pixel_offset = (
self.pixel_count *
color_channels_count *
repeate_index
)
local_pixel_index = pixel_offset + (
pixel_index * color_channels_count
)
if self.repeat_snake:
# every odd index
if ((repeate_index % 2) > 0):
# total_pixel_channel_count = (
# self.pixel_count * color_channels_count
# )
# local_pixel_index = local_pixel_index
local_pixel_index = pixel_offset + (
((self.pixel_count - 1) - pixel_index) *
color_channels_count
)
# print("local_pixel_index", local_pixel_index)
return local_pixel_index
def _set_data_output_w_repeat(
self,
data_output,
pixel_index,
channel_values_16bit,
color_channels_count
):
for repeate_index in range(0, self.repeat_count):
local_pixel_index = self._calculate_repeat_pixel_index(
pixel_index,
repeate_index,
color_channels_count
)
# set colors for pixel:
for color_name in self.color_channels:
# get high and low byte
hb = channel_values_16bit[color_name]['hb']
lb = channel_values_16bit[color_name]['lb']
# debug output
# if color_name.startswith("blue"):
# debug_string += (
# "{:>6}: "
# "h {:>3} "
# "l {:>3}".format(
# color_name,
# hb,
# lb
# )
# )
# get channel index with color offset
color_offset = self.color_channels.index(color_name)
if self.mode_16bit:
color_offset = color_offset * 2
# print("color_offset", color_offset)
channel_index = local_pixel_index + color_offset
# write data
if self.mode_16bit:
data_output[channel_index + 0] = hb
data_output[channel_index + 1] = lb
else:
data_output[channel_index + 0] = hb
def _calculate_pixels_for_position(
self,
data_output,
position_current,
color_channels_count
):
for pixel_index in range(0, self.pixel_count):
# map gradient to pixel position
pixel_position_step = 1000000 * pixel_index / self.pixel_count
pixel_position = position_current + pixel_position_step
# check for wrap around
if pixel_position > 1000000:
pixel_position -= 1000000
# print("handle wrap around")
# print("pixel_position", pixel_position)
# calculate current values
channel_values = self._calculate_current_channel_values(
pixel_position
)
# print(channel_values)
# print(
# "pixel_position {:<19}"
# " -> "
# # "channel_values", channel_values
# "pos {:<19}"
# "red {:<19}"
# "green {:<19}"
# "blue {:<19}".format(
# pixel_position,
# channel_values["position"],
# channel_values["red"],
# channel_values["green"],
# channel_values["blue"]
# )
# )
# debug_string = (
# "pixel_position {:<19}"
# " -> "
# # "channel_values", channel_values
# # "pos {:<19}"
# # "red {:<19}"
# # "green {:<19}"
# "blue {:<19}".format(
# pixel_position,
# # channel_values["position"],
# # channel_values["red"],
# # channel_values["green"],
# channel_values["blue"]
# )
# )
channel_values_16bit = {}
# pre calculate 16bit values
for color_name in self.color_channels:
# calculate high and low byte
hb, lb = self._calculate_16bit_values(
channel_values[color_name]
)
values = {}
values['hb'] = hb
values['lb'] = lb
channel_values_16bit[color_name] = values
# print(debug_string)
# print("0:", data_output)
self._set_data_output_w_repeat(
data_output,
pixel_index,
channel_values_16bit,
color_channels_count
)
# print("1:", data_output)
def _calculate_step(self, universe):
"""Calculate single step."""
# prepare temp array
data_output = array.array('B')
# available attributes:
# global things (readonly)
# self.channel_count
# self.pixel_count
# self.repeat_count
# self.repeat_snake
# self.color_channels
# self.update_interval
# self.mode_16bit
# self.values['off']
# self.values['low']
# self.values['high']
# self.config_global[]
# fill array with meaningfull data according to the pattern :-)
# .....
# print("")
position_current = self.config["position_current"]
color_channels_count = len(self.color_channels)
if self.mode_16bit:
color_channels_count = color_channels_count * 2
# in milliseconds
cycle_duration = self.config["cycle_duration"]
# calculate stepsize
# step_count = cycle_duration / update_interval
# cycle_duration = 1000000
# update_interval = position_stepsize
position_stepsize = 1000000 * self.update_interval / cycle_duration
# initilaize our data array to the maximal possible size:
total_channel_count = (
self.pixel_count *
color_channels_count *
self.repeat_count
)
for index in range(0, total_channel_count):
data_output.append(0)
# calculate new position
position_current = position_current + position_stepsize
# check for upper bound
if position_current >= 1000000:
position_current = 0
# write position_current back:
self.config["position_current"] = position_current
# print("position_current", position_current)
# channel_stepsize = color_channels_count
# if self.mode_16bit:
# channel_stepsize = color_channels_count*2
# print("****")
# generate values for every pixel
# this function manipulates data_output.
self._calculate_pixels_for_position(
data_output,
position_current,
color_channels_count
)
return data_output
##########################################
if __name__ == '__main__':
print(42 * '*')
print('Python Version: ' + sys.version)
print(42 * '*')
print(__doc__)
print(42 * '*')
print("This Module has no stand alone functionality.")
print(42 * '*')
##########################################
| true |
30d5c069eda92bdbfb53148734d5b55f3513821a | Python | pdx-1491/stheener-bootcamphw | /Python Homework Repository/PyPoll/main.py | UTF-8 | 3,126 | 3.5 | 4 | [] | no_license | import csv
import os
csvpath = os.path.join('Resources', 'election_data.csv')
# the total amount of rows aka "votes" in the csv
votes = []
# the unique candidates in csv
candidates = []
# the aggregated votes for each candidate
candidate_votes = {}
# percentage of the vote total that each candidate received
candidate_percentages = {}
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
csv_header = next(csvreader)
# bullet 1 - total number of votes cast
for row in csvreader:
votes.append(row[2])
# bullet 2 - complete list of candidates who received votes
# bullet 3 - percentage of votes each candidate won
if row[2] not in candidates:
candidates.append(row[2])
candidate_votes[row[2]] = 1
else:
candidate_votes[row[2]] += 1
# bullet 4 - total number of votes each candidate won
for name in candidate_votes:
calculate_percentage = round(candidate_votes[name]/len(votes)*100, 3)
candidate_percentages[name] = calculate_percentage
# bullet 5 - winner of the election based on popular vote
most_votes = max(candidate_votes, key=candidate_votes.get)
print('Election Results')
print('-----------------------')
print('Total votes: ' + str(len(votes)))
print('-----------------------')
print(candidates[0] + ':' + ' ' + str(candidate_percentages['Khan'])
+ ' ' + '(' + str(candidate_votes['Khan']) + ')')
print(candidates[1] + ':' + ' ' + str(candidate_percentages['Correy'])
+ ' ' + '(' + str(candidate_votes['Correy']) + ')')
print(candidates[2] + ':' + ' ' + str(candidate_percentages['Li'])
+ ' ' + '(' + str(candidate_votes['Li']) + ')')
print(candidates[3] + ':' + ' ' + str(candidate_percentages["O'Tooley"])
+ ' ' + '(' + str(candidate_votes["O'Tooley"]) + ')')
print('-----------------------')
print('Winner: ' + str(most_votes))
print('-----------------------')
# printing a csv
output_path = os.path.join('Analysis', 'results.csv')
with open(output_path, 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(['Election Results'])
csvwriter.writerow(['-----------------------'])
csvwriter.writerow(['Total votes: ' + str(len(votes))])
csvwriter.writerow(['-----------------------'])
csvwriter.writerow([candidates[0] + ':' + ' ' + str(candidate_percentages['Khan'])
+ ' ' + '(' + str(candidate_votes['Khan']) + ')'])
csvwriter.writerow([candidates[1] + ':' + ' ' + str(candidate_percentages['Correy'])
+ ' ' + '(' + str(candidate_votes['Correy']) + ')'])
csvwriter.writerow([candidates[2] + ':' + ' ' + str(candidate_percentages['Li'])
+ ' ' + '(' + str(candidate_votes['Li']) + ')'])
csvwriter.writerow([candidates[3] + ':' + ' ' + str(candidate_percentages["O'Tooley"])
+ ' ' + '(' + str(candidate_votes["O'Tooley"]) + ')'])
csvwriter.writerow(['-----------------------'])
csvwriter.writerow(['Winner: ' + str(most_votes)])
csvwriter.writerow(['-----------------------'])
| true |
66f7c15e057053524cf78417337e95a6f2f45f42 | Python | PolinaToivonen/ITMO_ICT_WebDevelopment_2020-2021 | /students/K33422/Toivonen_Polina/task3_server.py | UTF-8 | 584 | 2.8125 | 3 | [
"MIT"
] | permissive | import socket
import codecs
server = socket.socket()
host = '127.0.0.1'
port = 14900
server.bind((host, port))
server.listen(5)
print('Entering infinite loop; hit Ctrl+C to exit')
while True:
client, (client_host, client_port) = server.accept()
print('Got connection from', client_host, client_port)
response_type = 'HTTP/1.0 200 Ok\n'
headers = 'Content-Type: text/html\n\n'
page = codecs.open("index.html")
body = ''.join(page)
response = response_type + headers + body
client.sendall(response.encode('utf-8'))
client.close() | true |
135230b04f9854ec240b8bd5a048caa2d75deda6 | Python | johnwelt/titanic | /montecarlo.py | UTF-8 | 3,911 | 3.53125 | 4 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import timeit
"""
Monte Carlo Simulation from Empirical Distributions
Function 1 ecdf: compute empirical cumulative distribution function (ecdf) using existing data
Function 2 generate_rv: use random unif(0,1) and inverse transform method to select random variate from ecdf
Function 3 simulate_rv: simulate a user specified number of RVs
Function 4 replicate: generate user defined runs of simulation
"""
# fake data to work with
x = np.floor(np.exp(np.random.normal(2.5,0.25,10000)) + 0.5)
sns.distplot(x)
plt.show()
# function 1: get the empirical cumulative distribution
def ecdf(data):
"""
takes a 1-D array and calculates the cumulative distribution value from {0,1}
arguments
-----------
data: a 1-d array or list
output
-----------
a tuple containing X (the input data) and Y (the cumulative percentile) values
"""
n = len(data)
x = np.sort(data)
y = np.arange(1, n+1) / n
return(x,y)
# plot ecdf
x_ecdf = ecdf(x)
# sns.lineplot(x_ecdf[0], x_ecdf[1])
# plt.show()
# function 2: generate RV from ECDF
def generate_rv(ecdf):
"""
takes a UNIF(0,1) RV and generates an RV from the ECDF
arguments
-----------
ecdf: the output of the ecdf function
output
-----------
a single random variate
"""
u = np.random.uniform(0,1,1)[0]
rv = ecdf[0][np.where(ecdf[1] >= u)[0]][0]
return(rv)
# function 3: Simulate using RVs
def simulate_rv(ecdf, n = 100):
"""
generates a sequence of RVs
arguments
-----------
ecdf: the output of the ecdf function
n: the length of simulation
"""
rvs = [generate_rv(ecdf) for _ in range(n)]
return(rvs)
# # plot results: histogram
# rv_results = simulate_rv(x_ecdf, n = 100)
# sns.distplot(rv_results)
# plt.show()
# # plot results: ecdf of RVs
# rv_ecdf = ecdf(rv_results)
# sns.lineplot(rv_ecdf[0], rv_ecdf[1])
# plt.show()
# plot results: compare ecdfs
# df = pd.DataFrame({'X': x_ecdf[0], 'Y': x_ecdf[1], 'Source': 'Orig'})
# sim_df = pd.DataFrame({'X': rv_ecdf[0], 'Y': rv_ecdf[1], 'Source': 'Sim'})
# df = df.append(sim_df)
# sns.lineplot(x = 'X', y = 'Y', data = df, hue='Source', style='Source')
# plt.show()
# function 4: replicate
def replicate(ecdf, n = 1000, replications=100, quantiles = []):
"""
designed to capture point statistics from user-defined number of sim runs
arguments
----------------
ecdf: the empircal cumulative distribution function of the variable of interest
n: the length of each simulation replication
replications: the number of times to replicate the simulation
quantiles: a list containing any quantiles of interest in the output
default quantiles = [50, 75, 90, 95, 99]
additional quantiles can be added
point statistics
----------------
mean
standard deviation
variance
quantiles
output
----------------
dictionary containing replication number and point statistics
"""
# create dataframe to hold results
quantile_vals = list(set([50, 75, 90, 95, 99] + quantiles))
quantile_vals.sort()
quantile_names = ['quantile' + str(q) for q in quantile_vals]
colnames = ['replication', 'length', 'mean', 'var', 'stddev'] + quantile_names
results = {key : [] for key in colnames}
# run replications and capture statistics
for i in range(replications):
results['replication'].append(i)
X = simulate_rv(ecdf, n)
results['mean'].append(np.mean(X))
results['stddev'].append(np.std(X))
results['var'].append(np.var(X))
# quantiles
for j,k in zip(quantile_names, quantile_vals):
results[j].append(np.percentile(X, k))
results['length'].append(list(np.repeat(n,replications)))
return results
| true |
f1b574408f07a8da0ba07962f3021b9e906b8cc4 | Python | vgtomahawk/TrollDetectionShapleyValue | /GRQCDataset/processCAGRQC.py | UTF-8 | 4,527 | 2.59375 | 3 | [] | no_license | import igraph
import random
def readGraphFromFile(fileName="CA-GrQc.txt"):
g=igraph.Graph()
vertexId=0
vertexMap={}
vertexReverseMap={}
for line in open(fileName):
words=line.split()
src=int(words[0])
dest=int(words[1])
if src not in vertexMap:
vertexMap[src]=vertexId
vertexReverseMap[vertexId]=src
vertexId+=1
if dest not in vertexMap:
vertexMap[dest]=vertexId
vertexReverseMap[vertexId]=dest
vertexId+=1
g.add_vertices(vertexId)
for line in open(fileName):
words=line.split()
src=int(words[0])
dest=int(words[1])
g.add_edge(vertexMap[src],vertexMap[dest],weight=1.0)
return g,vertexMap,vertexReverseMap
def getPageRank(g):
return g.pagerank(weights="weight",damping=0.9)
def generatePermutation(N):
Nperm=[i for i in range(N)]
random.shuffle(Nperm)
return Nperm
def generateStringHash(perm):
perm.sort()
permStrings=[str(p) for p in perm]
return ",".join(permStrings)
def setPageRank(g,listOfVertices):
minVertex=min(listOfVertices.keys())
#Weight from supervertex to each vertex outside
totalWeights={}
#for v in listOfVertices:
# for e in g.incident(v):
# print g.es[e].source
# print g.es[e].dest
for u in listOfVertices.keys():
for v in g.neighbors(u):
#Ignore internal edges
if v in listOfVertices:
continue
if v not in totalWeights:
totalWeights[v]=0.0
totalWeights[v]=totalWeights[v]+1.0
severedEdges={}
#Sever all edges from any vertex in listOfVertices
for u in listOfVertices.keys():
for v in g.neighbors(u):
g.delete_edges(g.get_eid(u,v))
severedEdges[str(u)+","+str(v)]=1
#Add a new weighted edge to each outside vertex from the new "super-vertex"
for v in totalWeights.keys():
g.add_edge(minVertex,v,weight=totalWeights[v])
resetVector=[]
for i in range(g.vcount()):
resetVector.append(1.0)
for i in listOfVertices.keys():
resetVector[i]=0.0
resetVector[minVertex]=len(listOfVertices.keys())
resetVectorSum=sum(resetVector)
for i in range(g.vcount()):
resetVector[i]=resetVector[i]/resetVectorSum
#Find the required "set" pageRank
returnValue=g.personalized_pagerank(weights="weight",reset=resetVector,damping=0.9)[minVertex]
#Remove the new weighted edges you added
for v in g.neighbors(minVertex):
g.delete_edges(g.get_eid(minVertex,v))
#Restore the severed edges
for edgeKey in severedEdges.keys():
words=edgeKey.split(",")
src=int(words[0])
dest=int(words[1])
g.add_edge(src,dest,weight=1.0)
return returnValue
g,vertexMap,vertexReverseMap=readGraphFromFile("karate/karate.out")
pageRanks=getPageRank(g)
#listRanks=getPageRank(g)
#print generatePermutation(70)
#print generateStringHash(generatePermutation(70))
#print setPageRank(g,{0:True,3:True,4:True})
#print setPageRank(g,{0:True,3:True})
#print setPageRank(g,{0:True,3:True,4:True})
#print getPageRank(g)[0]
#print getPageRank(g)[3]
#print getPageRank(g)[4]
setValueTable={}
marginalGainTable={}
occurrenceTable={}
sampleComplexity=10**4
normalize=True
for v in range(g.vcount()):
occurrenceTable[v]=0.0
marginalGainTable[v]=0.0
for sample in range(sampleComplexity):
print sample
permutation=generatePermutation(g.vcount())
runningSet=[]
runningValue=0.0
for element in permutation:
runningSet.append(element)
setHash=generateStringHash(runningSet)
newRunningValue=0.0
if setHash in setValueTable:
newRunningValue=setValueTable[setHash]
else:
setHashMap={}
for elem in runningSet:
setHashMap[elem]=True
newRunningValue=setPageRank(g,setHashMap)
setValueTable[setHash]=newRunningValue
marginalGain=0.0
if not normalize:
marginalGain=newRunningValue-runningValue
runningValue=newRunningValue
else:
marginalGain=newRunningValue/len(runningSet)-runningValue
runningValue=newRunningValue/len(runningSet)
marginalGainTable[element]=marginalGainTable[element]+marginalGain
occurrenceTable[element]=occurrenceTable[element]+1.0
for v in range(g.vcount()):
marginalGainTable[v]=marginalGainTable[v]/occurrenceTable[v]
marginalGainFile=open("ShapleyRankNormalized.txt","w")
pageRankFile=open("PageRank.txt","w")
pageRankTuples=[(vertexReverseMap[v],pageRanks[v]) for v in range(g.vcount())]
marginalGainTuples=[(vertexReverseMap[v],marginalGainTable[v]) for v in range(g.vcount())]
pageRankTuples.sort(key=lambda x:x[1])
marginalGainTuples.sort(key=lambda x:x[1])
for tup in pageRankTuples:
pageRankFile.write(str(tup[0])+" "+str(tup[1])+"\n")
for tup in marginalGainTuples:
marginalGainFile.write(str(tup[0])+" "+str(tup[1])+"\n")
| true |
2657a2ea2d43be325d8cdf1eb643ea477e1d0038 | Python | SanketRevankar/TournamentManagementPy | /helpers/CloudServerHelper.py | UTF-8 | 1,724 | 2.640625 | 3 | [] | no_license | from django.http import HttpResponseServerError
from helpers.Util.CloudServerUtil import CloudServerUtil
from constants import StringConstants as sC
from firestore_data.ServerData import ServerList
class CloudServerHelper:
def __init__(self, config):
"""
Initiate Cloud Server Helper
This Class contains Cloud Server Functions
:param config: Config object
"""
self.util = CloudServerUtil(config)
print('{} - Initialized'.format(__name__))
def stop_server(self, server_id):
"""
Used to stop a Compute Engine Instance. If already stopped no action will be taken.
:param server_id: Id of the server
"""
node_name = ServerList[server_id][sC.INSTANCE_NAME]
node = self.util.get_node(node_name)
server_status = self.util.status(node)
if server_status == sC.RUNNING:
if not self.util.stop(node):
raise HttpResponseServerError
def start_server(self, server_id):
"""
Used to start a Compute Engine Instance. If already running no action will be taken.
:param server_id: ID of the server
:return: Instance variable which was started
"""
node_name = ServerList[server_id][sC.INSTANCE_NAME]
server_name = ServerList[server_id][sC.SERVER_NAME]
node = self.util.get_node(node_name)
server_status = self.util.status(node)
if server_status == sC.STOPPED:
if not self.util.start(node):
raise HttpResponseServerError
node = self.util.get_node(node_name)
return self.util.ip(node) + sC.COLON + ServerList[server_id][sC.PORT], server_name
| true |
d9b08b0324030c924c7e26e2eb7ebf194c7d06f5 | Python | m-rahimi/ML_module | /reduce_memory_usage.py | UTF-8 | 1,365 | 3.1875 | 3 | [] | no_license | def reduce_memory_usage(df, deep=True, verbose=True, categories=True):
# All types that we want to change for "lighter" ones.
# int8 and float16 are not include because we cannot reduce
# those data types.
# float32 is not include because float16 has too low precision.
numeric2reduce = ["int16", "int32", "int64", "float64"]
start_mem = 0
if verbose:
start_mem = df.memory_usage().sum() / 1024**2
for col, col_type in df.dtypes.iteritems():
best_type = None
if col_type == "object":
df[col] = df[col].astype("category")
best_type = "category"
elif col_type in numeric2reduce:
downcast = "integer" if "int" in str(col_type) else "float"
df[col] = pd.to_numeric(df[col], downcast=downcast)
best_type = df[col].dtype.name
# Log the conversion performed.
if verbose and best_type is not None and best_type != str(col_type):
print(f"Column '{col}' converted from {col_type} to {best_type}")
if verbose:
end_mem = df.memory_usage().sum() / 1024**2
diff_mem = start_mem - end_mem
percent_mem = 100 * diff_mem / start_mem
print(f"Memory usage decreased from"
f" {start_mem:.2f}MB to {end_mem:.2f}MB"
f" ({diff_mem:.2f}MB, {percent_mem:.2f}% reduction)")
| true |
fa0e321b51ecd4fb2f3dd1b1aa7f9838f3ca0827 | Python | Maksym-Gorbunov/python1 | /test.py | UTF-8 | 1,747 | 3.828125 | 4 | [] | no_license | '''
# open and write text to the file
fo = open('test.txt', 'w+')
text = 'Maksym write this code again'
fo.write(text)
fo.close()
# open and append text in new line
fo = open('test.txt', 'a')
text = '\nand this one ...'
fo.write(text)
fo.close()
# read from the file
fo = open('test.txt', 'r+')
text = fo.read()
#check if file is empty
import os
a = os.stat('test.txt').st_size
print('File size is: ', a)
if a == 0:
print('File is empty!')
else:
print(text)
fo.close()
# read by line
fo = open('test.txt')
lines = fo.readlines()
'''
'''
import maxFunctions
maxFunctions.hello('Maksym')
'''
'''
from maxFunctions import hello
hello('Olga')
'''
users = ['Dan', 'John', 'Billy', 'Peter', 'Tim']
'''
for i in range(0, len(users)):
print(users[i])
'''
'''
i = 0
while i != len(users):
print(users[i])
i += 1
'''
'''
for item in users:
print(item+'...')
'''
class Employee():
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
#self.email = fname + '.' + lname + '@company.com'
@property
def email(self):
return '{}.{}@hotmail.com'.format(self.fname, self.lname)
@property
def fullname(self):
return '{} {}'.format(self.fname, self.lname)
@fullname.setter
def fullname(self, name):
fname, lname = name.split(' ')
self.fname = fname
self.lname = lname
@fullname.deleter
def fullname(self):
self.fname = None
self.lname = None
print('Fullname was deleted!')
emp_1 = Employee('John', 'Smith')
emp_1.fullname = 'Bil Json'
'''
print(emp_1.fname)
print(emp_1.lname)
print(emp_1.fullname)
print(emp_1.email)
print(emp_1.fullname.split())
del emp_1.fullname
print(emp_1.fullname)
'''
a = 'I'
b = 'You'
msg = f'{a} and {b}'
print(msg)
#print(dir(msg))
#print(help(int))
print(help(str.split)) | true |
9d26c69a2edb43fb1c610eeec3d50ddac95b9cd5 | Python | aliciawyy/CompInvest | /tests/test_load.py | UTF-8 | 1,821 | 2.96875 | 3 | [
"MIT"
] | permissive | """
This file contains unittests for the loading functions in
.load
"""
import datetime as dt
from load.load_ticker import load_cac40_names, load_valid_cac40_names
from load.load_local_data import load_local_data_from_yahoo
from load.load_data import load_stock_close_price
from nose.plugins.attrib import attr
def test_load_cac40_names():
cac40list = load_cac40_names()
assert cac40list.size > 1
assert cac40list.index.name == 'Ticker'
assert cac40list.name == 'Name'
def test_load_local_data_from_yahoo():
start_date = dt.datetime(2011, 1, 1)
end_date = dt.datetime(2011, 12, 31)
ls_symbols = ['AAPL', 'GLD', 'GOOG', 'XOM']
local_data = load_local_data_from_yahoo(start_date, end_date, ls_symbols)
assert local_data['close'].shape == (252, 4)
def test_load_stock_close_price():
start_date = dt.datetime(2011, 1, 1)
end_date = dt.datetime(2011, 12, 31)
ls_symbols = ['AAPL', 'GLD', 'XOM']
stock_close_prices = load_stock_close_price(start_date, end_date, ls_symbols, 'yahoo')
assert stock_close_prices.shape == (252, 3)
assert len(stock_close_prices['AAPL']) == 252
local_close_prices = load_stock_close_price(start_date, end_date, ls_symbols, 'local')
assert local_close_prices.shape == (252, 3)
@attr('slow')
def test_load_valid_cac40_names():
end_date = dt.datetime.today()
start_date = end_date - dt.timedelta(days=365)
cac40_list_valid = load_valid_cac40_names()
cac40_prices = load_stock_close_price(start_date, end_date, cac40_list_valid.index, 'yahoo')
assert cac40_prices.shape[1] == len(cac40_list_valid)
assert len(cac40_list_valid) > 20
assert cac40_prices.shape[0] >= 251
if __name__ == '__main__':
test_load_cac40_names()
test_load_local_data_from_yahoo()
test_load_stock_close_price()
| true |
2047d4823c77ec439097280a2f66e305a9d438aa | Python | trriplejay/django-hiddenblade | /rosters/tests/test_models.py | UTF-8 | 2,714 | 2.515625 | 3 | [] | no_license | from django.test import TestCase
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from ..models import Player
from ..models import Roster
class modelTestMixin():
def setUp(self):
self.player1 = Player.objects.create(
username='player1',
password='mypass',
email='player1@email.com')
self.player2 = Player.objects.create(
username='player2',
password='mypass',
email='player2@email.com')
self.roster = Roster.objects.create(
name='testName',
)
self.member1 = Membership.objects.create(
roster=self.roster.id,
player=self.player1
)
self.member2 = Membership.objects.create(
roster=self.roster.id,
player=self.player2
)
class RosterTests(TestCase, modelTestMixin):
def setUp(self):
super(RosterTests, self).setUp()
def create_roster(
self,
name,
description='',
status='',
city='',
state='',
zipcode='',
is_active=True,
is_public=False,
):
return Roster.objects.create(
description=description,
status=status,
state=state,
city=city,
zipcode=zipcode,
is_active=is_active,
is_public=is_public,
)
def test_model_creation(self):
self.assertEqual(self.roster.name, self.roster.__unicode__())
self.assertIsInstance(self.roster, Roster)
self.assertEqual(self.roster.slug, slugify(self.roster.name))
self.assertEqual(
self.roster.get_absolute_url(),
reverse(
"rosters:detail",
kwargs={
'slug': self.roster.slug,
'pk': self.roster.id
}
)
)
self.assertTrue(self.roster.is_active)
self.assertFalse(self.roster.is_public)
self.assertEqual(self.roster.description, '')
self.assertEqual(self.roster.status, '')
self.assertEqual(self.roster.state, '')
self.assertEqual(self.roster.zipcode, '')
self.assertEqual(self.roster.city, '')
def test_model_manager(self):
inactive_roster = self.create_roster(name="inactive", is_active=False)
self.assertNotIn(self.roster, Roster.objects.get_roster(inactive_roster.id))
self.assertIn(self.roster, Roster.objects.get_roster(self.roster.id))
self.assertNotIn(inactive_roster, repr(Roster.objects.live()))
class MembershipTests(TestCase, modelTestMixin):
def setUp(self):
pass
| true |
8d50cd5c1ce2cdb8fed5a045ea1b27226b2cff60 | Python | anniekli/Portfolio | /superhero-text-adventure.py | UTF-8 | 4,216 | 3.453125 | 3 | [] | no_license | start = '''
You just found out that your sister has been kidnapped and is
being held in a castle guarded by dragons!
You must now become a superhero to save her!
'''
print(start)
super_power = input("Do you want super strength or super speed? Type 'strength' or 'speed'.")
time = 45
def print_time(time):
print("You have " + str(time) + " minutes left.")
return time
if super_power == "speed":
print('''You trip into a particle accelerator and it explodes.
When you emerge from the ashes, you now have super-speed!''')
print("Suddenly, a timer appears, giving you only 45 minutes to save your sister.")
superhero_friend = input('''Do you want to spend 15 minutes recruiting a
superhero friend to help you, or try to go off on your own? Type 'friend' or 'own'.''')
if superhero_friend == "friend":
print("You lost 15 minutes finding a friend to help, but you now have a partner!")
print_time(30)
print('''You arrive at the castle and easily defeat the dragon with your friend.
Unfortunately, as the two of you enter the castle, a troll appears and stats attacking your friend.''')
print_time(20)
friendship = input("Do you spend 15 minutes to save your friend, or leave him to die and continue on to your sister? Type 'friend' or 'sister'.")
if friendship == "friend":
print_time(5)
print('''You saved your friend, but now you have to find your sister.
With only 5 minutes left, you and your friend use your super speed to search the castle.
Finally, you find her in the dungeons. You and your friend rescue her, and you all ride off into the sunset!''')
elif friendship == "sister":
print_time(20)
print('''You leave your friend, and his screams echo down the hallway as you try to search for your sister.
Unfortunately, the castle is too big, even with your super speed, and there's no way you can check all of the rooms.
The troll comes running after you, and you get devoured. Your sister is left trapped in the castle for eternity.
You should have saved your friend.''')
elif superhero_friend == "own":
print("You saved 15 minutes, but you will now have to save your sister on your own.")
print_time(45)
print('''You arrive at the castle, but now you have to outsmart the dragon.
Unfortunately, it's too powerful for you to take on by yourself.
Your super speed is no help to you when the dragon burns you to a crisp.
Your sister is now trapped in the castle forever.''')
elif super_power == "strength":
print('''You trip into a particle accelerator and it explodes.
When you emerge from the ashes, you now have super-strength!''')
print("Suddenly, a giant ogre appears!")
battle = input("Do you want to run away or try to fight the ogre? Type 'run' or 'fight'.")
if battle == "run":
print("You try to run, but you aren't fast enough, and the ogre steps on you and kills you.")
print("You failed your sister and the dragons have eaten her.")
elif battle == "fight":
print("You decide to fight the ogre with your super strength and defeat him!")
print('''You continue the rescue for your sister.
You arrive at the castle, but the dragon is guarding the entrance''')
dragon = input("Do you want to fight the dragon or try to befriend it? Type 'fight' or 'befriend'.")
if dragon == "fight":
print('''You use your super strength and kill the dragon.
As you step inside the castle, a troll appears.
He sees the broken body of his only friend and flies into a wild rage, devouring you.
In his blind anger, he also kills your sister.
The last anyone sees of him, he is standing on the top of the castle, laughing manically.''')
elif dragon == "befriend":
print('''You try to befriend the dragon, and to your surprise, he accepts your friendship!
He and his friend the troll decide to give up your sister, and the four of you ride off happily into the sunset.)
| true |
07686f85aa5ddf741d83f939a4740080f77d2604 | Python | cornelinux/python-yubico | /examples/rolling_challenge_response | UTF-8 | 8,590 | 2.71875 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python
#
# Copyright (c) 2011, Yubico AB
# All rights reserved.
#
"""
Demonstrate rolling challenges.
This is a scheme for generating "one time" HMAC-SHA1 challenges, which
works by being able to access the HMAC-SHA1 key on the host computer every
time the correct response is provided.
GPGME would've been used to encrypt the HMAC-SHA1 with the next expected
response, but none of the two Python bindings to gpgme I have available
at the moment supports symmetric encryption, so for demo purposes AES CBC
is used instead.
"""
import os
import sys
import json
import hmac
import argparse
import hashlib
import yubico
from Crypto.Cipher import AES
def parse_args():
"""
Parse the command line arguments
"""
parser = argparse.ArgumentParser(description = "Demonstrate rolling challenges",
add_help=True
)
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true', default=False,
help='Enable verbose operation.'
)
parser.add_argument('--debug',
dest='debug',
action='store_true', default=False,
help='Enable debug operation.'
)
parser.add_argument('--init',
dest='init',
action='store_true', default=False,
help='Initialize demo.'
)
parser.add_argument('-F', '--filename',
dest='filename',
required=True,
help='State filename.'
)
parser.add_argument('--challenge-length',
dest='challenge_length',
type = int, default = 32,
help='Length of challenges generated, in bytes.'
)
parser.add_argument('--slot',
dest='slot',
type = int, default = 2,
help='YubiKey slot to send challenge to.'
)
args = parser.parse_args()
return args
def init_demo(args):
""" Initializes the demo by asking a few questions and creating a new stat file. """
hmac_key = raw_input("Enter HMAC-SHA1 key as 40 chars of hex (or press enter for random key) : ")
if hmac_key:
try:
hmac_key = hmac_key.decode('hex')
except:
sys.stderr.write("Could not decode HMAC-SHA1 key. Please enter 40 hex-chars.\n")
sys.exit(1)
else:
hmac_key = os.urandom(20)
if len(hmac_key) != 20:
sys.stderr.write("Decoded HMAC-SHA1 key is %i bytes, expected 20.\n" %( len(hmac_key)))
sys.exit(1)
print "To program a YubiKey >= 2.2 for challenge-response with this key, use :"
print ""
print " $ ykpersonalize -%i -ochal-resp -ochal-hmac -ohmac-lt64 -a %s" % (args.slot, hmac_key.encode('hex'))
print ""
passphrase = raw_input("Enter the secret passphrase to protect with the rolling challenges : ")
secret_dict = {"count": 0,
"passphrase": passphrase,
}
roll_next_challenge(args, hmac_key, secret_dict)
def do_challenge(args):
""" Send a challenge to the YubiKey and use the result to decrypt the state file. """
outer_j = load_state_file(args)
challenge = outer_j["challenge"]
print "Challenge : %s" % (challenge)
response = get_yubikey_response(args, outer_j["challenge"].decode('hex'))
if args.debug or args.verbose:
print "\nGot %i bytes response %s\n" % (len(response), response.encode('hex'))
else:
print "Response : %s" % (response.encode('hex'))
inner_j = decrypt_with_response(args, outer_j["inner"], response)
if args.verbose or args.debug:
print "\nDecrypted 'inner' :\n%s\n" % (inner_j)
secret_dict = {}
try:
secret_dict = json.loads(inner_j)
except ValueError:
sys.stderr.write("\nCould not parse decoded data as JSON, you probably did not produce the right response.\n")
sys.exit(1)
secret_dict["count"] += 1
print "\nThe passphrase protected using rolling challenges is :\n"
print "\t%s\n\nAccessed %i times.\n" % (secret_dict["passphrase"], secret_dict["count"])
roll_next_challenge(args, secret_dict["hmac_key"].decode('hex'), secret_dict)
def get_yubikey_response(args, challenge):
"""
Do challenge-response with the YubiKey if one is found. Otherwise prompt user to fake a response. """
try:
YK = yubico.find_yubikey(debug = args.debug)
response = YK.challenge_response(challenge.ljust(64, chr(0x0)), slot = args.slot)
return response
except yubico.yubico_exception.YubicoError as e:
print "YubiKey challenge-response failed (%s)" % e.reason
print ""
response = raw_input("Assuming you do not have a YubiKey. Enter repsonse manually (hex encoded) : ")
return response
def roll_next_challenge(args, hmac_key, inner_dict):
"""
When we have the HMAC-SHA1 key in clear, generate a random challenge and compute the
expected response for that challenge.
"""
if len(hmac_key) != 20:
hmac_key = hmac_key.decode('hex')
challenge = os.urandom(args.challenge_length)
response = get_response(hmac_key, challenge)
print "Generated challenge : %s" % (challenge.encode('hex'))
print "Expected response : %s (sssh, don't tell anyone)" % (response)
print ""
if args.debug or args.verbose or args.init:
print "To manually verify that your YubiKey procudes this response, use :"
print ""
print " $ ykchalresp -%i -x %s" % (args.slot, challenge.encode('hex'))
print ""
inner_dict["hmac_key"] = hmac_key.encode('hex')
inner_j = json.dumps(inner_dict, indent = 4)
if args.verbose or args.debug:
print "Inner JSON :\n%s\n" % (inner_j)
inner_ciphertext = encrypt_with_response(args, inner_j, response)
outer_dict = {"challenge": challenge.encode('hex'),
"inner": inner_ciphertext,
}
outer_j = json.dumps(outer_dict, indent = 4)
if args.verbose or args.debug:
print "\nOuter JSON :\n%s\n" % (outer_j)
print "Saving 'outer' JSON to file '%s'" % (args.filename)
write_state_file(args, outer_j)
def get_response(hmac_key, challenge):
""" Compute the expected response for `challenge'. """
h = hmac.new(hmac_key, challenge, hashlib.sha1)
return h.hexdigest()
def encrypt_with_response(args, data, key):
"""
Encrypt our secret inner data with the response we expect the next time.
NOTE: The use of AES CBC has not been validated as cryptographically sound
in this application.
I would have done this with GPGme if it weren't for the fact that neither
of the two versions for Python available in Ubuntu 10.10 have support for
symmetric encrypt/decrypt (LP: #295918).
"""
# pad data to multiple of 16 bytes for AES CBC
pad = len(data) % 16
data += ' ' * (16 - pad)
# need to pad key as well
aes_key = key.decode('hex')
aes_key += chr(0x0) * (32 - len(aes_key))
if args.debug:
print ("AES-CBC encrypting 'inner' with key (%i bytes) : %s" % (len(aes_key), aes_key.encode('hex')))
obj = AES.new(aes_key, AES.MODE_CBC)
ciphertext = obj.encrypt(data)
return ciphertext.encode('hex')
def decrypt_with_response(args, data, key):
"""
Try to decrypt the secret inner data with the response we got to this challenge.
"""
aes_key = key
try:
aes_key = key.decode('hex')
except TypeError:
# was not hex encoded
pass
# need to pad key
aes_key += chr(0x0) * (32 - len(aes_key))
if args.debug:
print ("AES-CBC decrypting 'inner' using key (%i bytes) : %s" % (len(aes_key), aes_key.encode('hex')))
obj = AES.new(aes_key, AES.MODE_CBC)
plaintext = obj.decrypt(data.decode('hex'))
return plaintext
def write_state_file(args, data):
""" Save state to file. """
f = open(args.filename, 'w')
f.write(data)
f.close()
def load_state_file(args):
""" Load (and parse) the state file. """
return json.loads(open(args.filename).read())
def main():
args = parse_args()
if args.init:
init_demo(args)
else:
do_challenge(args)
print "\nDone\n"
if __name__ == '__main__':
main()
| true |
fdc051bfe908ae995073028d897670e29a8a35a7 | Python | PoonamPrusty/Python_tasks | /listprogram.py | UTF-8 | 999 | 4.0625 | 4 | [] | no_license | scores = []
choice = None
while choice != 0:
print """High Score Keeper
0 - Exit
1 - Show Scores
2 - Add a score
3 - Delete a score
4 - Sort scores """
choice = (raw_input("Choice: "))
if choice == "0":
print "Bye!"
exit()
elif choice == "1":
for score in scores:
print score
elif choice == "2":
score = int(raw_input("Enter score to add: "))
scores.append(score)
print "The modified score list is: ", scores
elif choice == "3":
score = int(raw_input("Enter score to remove: "))
if score in scores:
scores.remove(score)
print "The modified score list is: ", scores
else:
print "That score is not in the list"
elif choice == "4":
scores.sort()
scores.reverse()
print scores
else:
print choice, "is not a valid entry."
| true |
a60b619ec7ba692267b21a20e00dc15faff1d171 | Python | Macchiato0/Py_sort_search_data_structure | /queue(FIFO).py | UTF-8 | 1,427 | 4.875 | 5 | [] | no_license | # Queue is a linear data structure that stores items in First In First Out (FIFO) manner.
# With a queue the least recently added item is removed first.
# Operations associated with queue are:
# Enqueue: Adds an item to the queue. If the queue is full, then it is said to be an Overflow condition
# Dequeue: Removes an item from the queue.
# Front: Get the front item from queue
# Rear: Get the last item from queue
Implementation using collections.deque
Queue in Python can be implemented using deque class from the collections module. Deque is preferred over list in the cases where we need quicker append and pop operations from both the ends of container, as deque provides an O(1) time complexity for append and pop operations as compared to list which provides O(n) time complexity. Instead of enqueue and deque, append() and popleft() functions are used.
# Python program to
# demonstrate queue implementation
# using collections.dequeue
from collections import deque
# Initializing a queue
q = deque()
# Adding elements to a queue
q.append('a')
q.append('b')
q.append('c')
print("Initial queue")
print(q)
# Removing elements from a queue
print("\nElements dequeued from the queue")
print(q.popleft())
print(q.popleft())
print(q.popleft())
print("\nQueue after removing elements")
print(q)
# Uncommenting q.popleft()
# will raise an IndexError
# as queue is now empty
| true |
264724712338429d41983634c5b674427477b1d1 | Python | antrad1978/SentimentAnalysis | /SentimentAnalysis/analysis.py | UTF-8 | 521 | 2.859375 | 3 | [] | no_license | import json
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def analyze_sentiment(sentence):
print(sentence + "\n")
sid = SentimentIntensityAnalyzer()
ss = sid.polarity_scores(sentence)
for k in sorted(ss):
print('{0}: {1}, '.format(k, ss[k]), end='')
print()
#sentence = json.loads('{"ID":"2e1b474f-1f40-453f-bf29-35313daf5b02","Name":"Tonio","Email":"antrad@libero.it","FeedbackDate":"2018-02-18T18:02:28.249322+02:00","Text":"This site sux!"}')
#analyze_sentiment(sentence["Text"])
| true |
3d0e9a31a15426b572caec4b1c90f9d45588cb02 | Python | shcqupc/hankPylib | /leetcode/LC0016_R1001_mergelist.py | UTF-8 | 1,207 | 4.0625 | 4 | [] | no_license | '''
面试题 10.01. 合并排序的数组 难度 简单
给定两个排序后的数组 A 和 B,其中 A 的末端有足够的缓冲空间容纳 B。 编写一个方法,将 B 合并入 A 并排序。
初始化 A 和 B 的元素数量分别为 m 和 n。
示例:
输入:
A = [1,2,3,0,0,0], m = 3
B = [2,5,6], n = 3
输出: [1,2,2,3,5,6]
'''
class Solution(object):
def merge1(self, A, m, B, n):
"""
:type A: List[int]
:type m: int
:type B: List[int]
:type n: int
:rtype: None Do not return anything, modify A in-place instead.
"""
A[m:] = B
return A.sort()
def merge2(self, A, m, B, n):
pa, pb, tail = m - 1, n - 1, m + n - 1
while pa >= 0 or pb >= 0:
if pa == -1:
A[tail] = B[pb]
pb -= 1
elif pb == -1:
A[tail] = A[pa]
pa -= 1
elif A[pa] > B[pb]:
A[tail] = A[pa]
pa -= 1
elif A[pa] <= B[pb]:
A[tail] = B[pb]
pb -= 1
tail -= 1
s = Solution()
A = [4, 5, 6, 0, 0, 0]
B = [1, 2, 3]
s.merge2(A, 3, B, 3)
print(A)
| true |
67bb72c9ec5e8e8e03ca4ac763c1b032616439e0 | Python | itmanni/annotated-py-projects | /flask/flask-0.5/flask/globals.py | UTF-8 | 1,261 | 2.875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
flask.globals 说明:
- 全局对象定义
- 上下文对象
- 给当前激活的上下文, 定义全部的全局对象.
~~~~~~~~~~~~~
Defines all the global objects that are proxies to the current
active context.
"""
# 关键依赖:
# - 需要看下 werkzeug 如何实现的
from werkzeug import LocalStack, LocalProxy
###################################################################
# 请求上下文
# 说明:
# - 关键模块
# - 注意对 `请求上下文` 和 `请求上下文 - 全局对象` 概念的理解.
# - 是 请求相关的
# - 是 上下文相关的
# - 是 全局对象
# - 本模块的对象, 非常关键, 涉及 Flask() 核心功能的实现.
#
###################################################################
# 请求上下文栈
# context locals
_request_ctx_stack = LocalStack()
#
# 请求上下文栈顶对象:
#
# 上下文当前 app:
current_app = LocalProxy(lambda: _request_ctx_stack.top.app)
# 上下文请求:
request = LocalProxy(lambda: _request_ctx_stack.top.request)
# 上下文 session:
session = LocalProxy(lambda: _request_ctx_stack.top.session)
# 上下文 g 对象:
g = LocalProxy(lambda: _request_ctx_stack.top.g)
| true |
315fb063fb0060313fc7c6d0ce5c5dfa801d4f59 | Python | zhengnengjin/python_Learning | /Day25/类的特殊成员.py | UTF-8 | 232 | 2.875 | 3 | [] | no_license | #__author: ZhengNengjin
#__date: 2018/10/9
class F:
def __init__(self):
print('init')
def __call__(self, *args, **kwargs): # 对象后面加俩()自动执行
print('call')
obj = F()
obj()
print(F.__dict__)
| true |
30343f9cd71218a78ebdf51e2eaa54e3aafa3d9a | Python | Dora-H/HousePricePredict_CCA_Testing | /CCA_Testing.py | UTF-8 | 1,043 | 3.421875 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as mp
import seaborn as sns
data = pd.read_csv("./train.csv")
print(data.isnull().sum())
print(r'總紀錄筆數:{}'.format(data.shape[0]))
print(r'總紀錄Columns:{}'.format(data.shape[1])) # 23
var_na = [col for col in data.columns if data[col].isnull().mean() > 0]
print('自變數(或是特徵)是有缺值的', var_na)
print('\n')
cca_var = [var for var in data.columns if data[var].isnull().mean() < 0.05]
print('篩選出小於5%的變數:', cca_var)
print('\n')
cca_df = data[cca_var].dropna()
print('移除小於5%的變數後', cca_df)
print(cca_df.shape[0], data.shape[0])
print('總共移除%d資料 ' % (data.shape[0]-cca_df.shape[0]))
sns.set(context='notebook', style='white')
mp.style.use("dark_background")
mp.title('Thresholds(SalePrice) < 0.05 (CCA) Original/After')
mp.hist(data['SalePrice'], color='blue', bins=50, label="Original")
mp.hist(data['SalePrice'], color='white', width=9000, bins=50, label="After")
mp.legend()
mp.show()
| true |
9c6249d4d5f9820e38fff01c13bf1e19d07933f6 | Python | MZandtheRaspberryPi/pi_watch | /scripts/get_transit.py | UTF-8 | 7,434 | 2.578125 | 3 | [] | no_license | import requests
import json
import pprint
from datetime import datetime
import pytz
import copy
# get_transit.py shows real time arrival predictions for stops
# for bart direction, 203806 is south
# issue for why gotta set encoding
# https://github.com/kennethreitz/requests/issues/2296
# notes on format of response
# http://assets.511.org/pdf/nextgen/developers/Open_511_Data_Exchange_Specification_v1.26_Transit.pdf
# can use this to lookup stop ids
# https://511.org/transit/agencies/stop-id
metro_mission_seventh_outbound_stop_id = 15539
metro_mission_seventh_inbound_stop_id = 17129
metro_mission_eleventh_inbound_stop_id = 15544
metro_mission_eleventh_outbound_stop_id = 15545
metro_mission_ninth_inbound_stop_id = 15542
metro_mission_ninth_outbound_stop_id = 15543
metro_outbound_civic_center_stop_id = 16997
metro_inbound_civic_center_stop_id = 15727
bart_civic_center = "CIVC"
muni = "SF"
bart = "BA"
inbound_muni = {muni: {"Missn&11": {"IB": metro_mission_eleventh_inbound_stop_id},
"CivCtr": {"IB": metro_inbound_civic_center_stop_id}}}
outbound_muni = {muni: {"Missn&11": {"OB": metro_mission_eleventh_outbound_stop_id},
"CivCtr": {"OB": metro_outbound_civic_center_stop_id}}}
southbound_bart = {bart: {"CivCtr": {bart_civic_center: ["S"]}}} # used to use these ids "203806", "203820", "203822", "204442",
northbound_bart = {bart: {"CivCtr": {bart_civic_center: ["N"]}}} # "203807", "203819", "203821", "204444",
def get_rt_arrivals_muni(agency_and_stops, my_511_token=""):
arrivals = {}
for agency in agency_and_stops.keys():
for stop in agency_and_stops[agency].keys():
for direction in agency_and_stops[agency][stop]:
arrivals.setdefault(stop, {})
url = "http://api.511.org/transit/StopMonitoring?api_key=" \
+ my_511_token + "&agency=" + agency \
+ "&stopCode=" \
+ str(agency_and_stops[agency][stop][direction])
response = requests.get(url)
response.raise_for_status()
response.encoding = 'utf-8-sig' # trims off the BOM, or you could say 'utf-8' to leave it
real_time_data = json.loads(response.text)
utc_timezone = pytz.timezone("UTC")
pst_timezone = pytz.timezone("US/Pacific")
now = datetime.now()
now = pst_timezone.localize(now)
for arrival in real_time_data['ServiceDelivery']['StopMonitoringDelivery']['MonitoredStopVisit']:
direction_response = arrival['MonitoredVehicleJourney']['DirectionRef']
if direction_response not in list(agency_and_stops[agency][stop].keys()):
continue
line_ref = arrival['MonitoredVehicleJourney']['LineRef']
arrival_time = arrival['MonitoredVehicleJourney']['MonitoredCall']['ExpectedArrivalTime']
arrival_time = datetime.strptime(arrival_time, "%Y-%m-%dT%H:%M:%SZ")
arrival_time = utc_timezone.localize(arrival_time)
arrival_time_pst = arrival_time.astimezone(pst_timezone)
time_till_arrival = arrival_time_pst - now
seconds_till_arrival = round(time_till_arrival.seconds/60)
arrivals[stop].setdefault(line_ref, []).append(seconds_till_arrival)
return arrivals
def get_rt_arrivals_bart(agency_and_stops, my_511_token="", str_real_time=True):
arrivals = {}
for agency in agency_and_stops.keys():
for stop in agency_and_stops[agency].keys():
for bart_stop_code in agency_and_stops[agency][stop].keys():
arrivals.setdefault(stop, {})
url = "http://api.511.org/transit/StopMonitoring?api_key=" \
+ my_511_token + "&agency=" + agency \
+ "&stopCode=" \
+ bart_stop_code
response = requests.get(url)
response.raise_for_status()
response.encoding = 'utf-8-sig' # trims off the BOM, or you could say 'utf-8' to leave it
real_time_data = json.loads(response.text)
utc_timezone = pytz.timezone("UTC")
pst_timezone = pytz.timezone("US/Pacific")
now = datetime.now()
now = pst_timezone.localize(now)
for arrival in real_time_data['ServiceDelivery']['StopMonitoringDelivery']['MonitoredStopVisit']:
direction_response = arrival['MonitoredVehicleJourney']['DirectionRef']
if direction_response not in agency_and_stops[agency][stop][bart_stop_code]:
continue
line_ref = arrival['MonitoredVehicleJourney']['LineRef']
arrival_time = arrival['MonitoredVehicleJourney']['MonitoredCall']['ExpectedArrivalTime']
arrival_time = datetime.strptime(arrival_time,
"%Y-%m-%dT%H:%M:%SZ")
arrival_time = utc_timezone.localize(arrival_time)
arrival_time_pst = arrival_time.astimezone(pst_timezone)
time_till_arrival = arrival_time_pst - now
time_till_arrival_str = str(round(time_till_arrival.seconds/60))
if real_time_data:
time_till_arrival_str += "*"
#arrival_time_pst_string = arrival_time_pst.strftime("%H:%M")
arrivals[stop].setdefault(line_ref, []).append(round(time_till_arrival.seconds/60))
return arrivals
def get_rt_arrivals(agency_and_stops_muni, agency_and_stops_bart, my_511_token=""):
muni_arrivals = get_rt_arrivals_muni(agency_and_stops_muni, my_511_token=my_511_token)
bart_arrivals = get_rt_arrivals_bart(agency_and_stops_bart, my_511_token=my_511_token)
arrivals = copy.deepcopy(muni_arrivals)
for bart_arrival in bart_arrivals["CivCtr"].keys():
arrivals["CivCtr"][bart_arrival] = bart_arrivals["CivCtr"][bart_arrival]
return arrivals
def checkBart(my_511_token=""):
url = "http://api.511.org/transit/StopMonitoring?api_key=" \
+ my_511_token + "&agency=" + "BA"
response = requests.get(url)
response.raise_for_status()
response.encoding = 'utf-8-sig' # trims off the BOM, or you could say 'utf-8' to leave it
real_time_data = json.loads(response.text)
return real_time_data
def get_northbound_arrivals(my_511_token=""):
north_arrivals = get_rt_arrivals(inbound_muni, northbound_bart, my_511_token=my_511_token)
return north_arrivals
def get_southbound_arrivals(my_511_token=""):
south_arrivals = get_rt_arrivals(outbound_muni, southbound_bart, my_511_token=my_511_token)
return south_arrivals
if __name__ == "__main__":
my_511_token = ""
north_arrivals = get_rt_arrivals(inbound_muni, northbound_bart, my_511_token=my_511_token)
south_arrivals = get_rt_arrivals(outbound_muni, southbound_bart, my_511_token=my_511_token)
pprint.pprint(north_arrivals)
pprint.pprint(south_arrivals)
bart_data = checkBart(my_511_token=my_511_token)
print("hi")
# bart_text_file = open("bart_data.txt",'w')
# bart_text_file.write(pprint.pformat(bart_data))
# bart_text_file.close()
| true |
d7abf0f7841b0c6cf4d343263fd6811e938c7413 | Python | selvin-joseph18/training2019 | /Desktop/file_handling/csv_writer.py | UTF-8 | 271 | 3.015625 | 3 | [] | no_license | import csv
with open('data.csv','r') as file1:
csv_reader = csv.reader(file1)
#print(csv_reader)
with open('copy','w') as file2:
csv_writer = csv.writer(file2,delimiter='-')
for line in csv_reader:
csv_writer.writerow(line)
| true |
7ae1c00529a704dda4265945f2f6b7ac3359381b | Python | karpov78/rosalind-algo | /python/mend.py | UTF-8 | 681 | 2.921875 | 3 | [] | no_license | import python.ctbl
def depthFirst(node):
if len(node.edges) == 0:
return 1 if node.value == 'AA' else 0, \
1 if node.value == 'Aa' else 0, \
1 if node.value == 'aa' else 0
aa = depthFirst(node.edges[0])
bb = depthFirst(node.edges[1])
res_1 = aa[0] * bb[0] + (aa[0] * bb[1] + aa[1] * bb[0]) / 2 + aa[1] * bb[1] / 4
res_3 = aa[2] * bb[2] + (aa[2] * bb[1] + aa[1] * bb[2]) / 2 + aa[1] * bb[1] / 4
return res_1, 1 - (res_1 + res_3), res_3
if __name__ == '__main__':
s = input()
tree = python.ctbl.parseTree(s)
a = depthFirst(tree.root)
print(' '.join([str(round(x, 3)) for x in a])) | true |
28d53bc929bab889a87fed626fac112d86d72fbd | Python | rovaughn/hilbert-lab-encoding | /py/color.py | UTF-8 | 2,384 | 3.1875 | 3 | [] | no_license |
class RGB:
__slots__ = ('r', 'g', 'b')
def __init__(self, r, g, b):
self.r, self.g, self.b = r, g, b
def toLab(self):
return self.toXYZ().toLab()
def toXYZ(self):
def f(t):
if t > 0.04045:
t = ((t + 0.055) / 1.055) ** 2.4
else:
t /= 12.92
return 100.0*t
r, g, b = f(self.r/255.0), f(self.g/255.0), f(self.b/255.0)
return XYZ(
x = r * 0.4124 + g * 0.3576 + b * 0.1805,
y = r * 0.2126 + g * 0.7152 + b * 0.0722,
z = r * 0.0193 + g * 0.1192 + b * 0.9505
)
def hex(self):
return '#%02x%02x%02x' % (self.r, self.g, self.b)
def __str__(self):
return 'RGB(%s, %s, %s)' % (self.r, self.g, self.b)
def __repr__(self):
return str(self)
class XYZ:
__slots__ = ('x', 'y', 'z')
def __init__(self, x, y, z):
self.x, self.y, self.z = x, y, z
def toRGB(self):
def f(t):
if t < 0.0:
t = 0.0
if t > 0.0031308:
t = 1.055 * t**(1.0/2.4) - 0.055
else:
t *= 12.92
return t * 0xff
x, y, z = self.x/100.0, self.y/100.0, self.z/100.0
return RGB(
r = f(x * 3.2406 + y * -1.5372 + z * -0.4986),
g = f(x * -0.9689 + y * 1.8758 + z * 0.0415),
b = f(x * 0.0557 + y * -0.2040 + z * 1.0570)
)
def toLab(self):
def f(t, w):
t = t / w
if t > 0.008856:
t = t**(1.0/3.0)
else:
t = 7.787*t + (16.0/116.0)
return t
x, y, z = f(self.x, white.x), f(self.y, white.y), f(self.z, white.z)
return Lab(
l = 116.0*y - 16.0,
a = 500.0*(x - y),
b = 200.0*(y - z)
)
def __str__(self):
return 'XYZ(%s, %s, %s)' % (self.x, self.y, self.z)
def __repr__(self):
return str(self)
class Lab:
__slots__ = ('l', 'a', 'b')
def __init__(self, l, a, b):
self.l, self.a, self.b = l, a, b
def toRGB(self):
return self.toXYZ().toRGB()
def toXYZ(self):
def f(t, w):
powed = t**3.0
if powed > 0.008856:
t = powed
else:
t = (t - 16.0/116.0)/7.787
return t * w
y = (self.l + 16.0) / 116.0
x = self.a / 500.0 + y
z = y - self.b/200.0
return XYZ(f(x, white.x), f(y, white.y), f(z, white.z))
def __str__(self):
return 'Lab(%s, %s, %s)' % (self.l, self.a, self.b)
def __repr__(self):
return str(self)
white = XYZ(95.047, 100.000, 108.883)
| true |
3746e2d5f06e089a6325608ce7e1936e3743a12d | Python | siddharthbharthulwar/Synthetic-Vision-System | /Pipeline/basegrid.py | UTF-8 | 6,920 | 2.578125 | 3 | [
"MIT"
] | permissive | #file containing functions useful in initial aggregation of DSM data
import numpy as np
import rasterio as rio
import matplotlib.pyplot as plt
import numpy.ma as ma
import math
import cv2 as cv
import rasterio.warp
import rasterio.features
from mayavi import mlab
from scipy.ndimage.filters import gaussian_filter
import time as time
#spatial extent for normal AHN grid tile is 8500 left, 437500 bottom, 90000 right, 443750 top
def load(path, fillBoolean):
with rio.open(path) as src:
# convert / read the data into a numpy array: masked= True turns `nodata` values to nan
lidar_dem_im = src.read(1, masked=True)
if fillBoolean == 1:
arraya = ma.masked_values(lidar_dem_im, np.amax(lidar_dem_im))
array = arraya.filled(0.436)
return(array)
if fillBoolean == 0:
arraya = ma.masked_values(lidar_dem_im, np.amax(lidar_dem_im))
return(arraya)
else:
print("No valid parameter for filling in water values. 1 for YES, 0 for NO.")
def rioTransform(path, fillboolean):
with rio.open(path) as src:
lidar_dem_im = src.read(1, masked=True)
metadata = src.meta
transformAffine = metadata['transform']
def getBounds(path):
with rio.open(path) as src:
lidarBounds = src.bounds
return(lidarBounds)
def getMetaData(path):
with rio.open(path) as src:
return(src.meta)
def getAffine(path):
with rio.open(path) as src:
retrieve = src.meta
return retrieve['transform']
def stack(inputPaths, dimensions, fillBool):
if isinstance(inputPaths, str) == True:
a = load(inputPaths, fillBool)
return a
else:
if len(inputPaths) == dimensions[0] * dimensions[1]:
counter = 0
paths = []
while counter < len(inputPaths):
paths.append(load(inputPaths[counter], fillBool))
counter = counter + 1
xdim = dimensions[0]
ydim = dimensions[1]
vert = 0
hor = 1
fin = []
while vert < len(paths):
fin.append(paths[vert])
vert = vert + xdim
#creates a list with all values in the first column of path array
vert = 0
posMark = 0
while vert < ydim:
while hor < xdim:
fin[vert] = np.hstack((fin[vert], paths[hor + posMark]))
hor = hor + 1
posMark = posMark + xdim
hor = 1
vert = vert + 1
vertical = 1
finalArray = fin[0]
while vertical < ydim:
finalArray = np.vstack((finalArray, fin[vertical]))
vertical = vertical + 1
return(finalArray)
else:
print("Dimensions and length of arrayList do not match")
def listBounds(parameterVal):
if isinstance(parameterVal, str):
path = [parameterVal]
else:
path = parameterVal
count = 0
final = []
while count < len(path):
with rio.open(path[count]) as src:
final.append(src.bounds)
count = count + 1
return final
#converts the instance's AHN bounds into lat and lon coordinates
def transformBounds(normalBounds):
returnList = []
count = 0
while count < len(normalBounds):
temp = normalBounds[count]
returnList.append(rasterio.warp.transform_bounds('EPSG:28992', {'init': 'epsg:4326'}, temp[0], temp[1], temp[2], temp[3]))
count = count + 1
return returnList
def tileDimensions(param):
returnList = []
count = 0
while count < len(param):
current = param[count]
dlat = current[3] - current[1]
dlon = current[2] - current[0]
returnList.append((dlat, dlon))
count = count + 1
return returnList
class BaseGrid:
#class for a grid of rastered array tiff files
def __init__(self, path, dimensions, fill):
self.arrayValues = stack(path, dimensions, fill)
self.dimensions = dimensions
self.fill = fill
self.bounds = listBounds(path)
self.transformBounds = transformBounds(listBounds(path))
self.raw_dimensions = tileDimensions(transformBounds(listBounds(path)))
self.shape = self.arrayValues.shape
self.dupValues = stack(path, dimensions, fill)
def dynamicShow(self):
plt.imshow(self.arrayValues, cmap='viridis', vmin = np.amin(self.arrayValues), vmax = np.amax(self.arrayValues))
plt.show()
def show(self, min, max):
plt.imshow(self.arrayValues, cmap='viridis', vmin = min, vmax = max)
plt.show()
#simple matplotlib plotting of the terraingrid
#accessor method to values of the terraingrid
def arrayThreshold(self, value, outval, type):
a = cv.threshold(self.arrayValues, value, outval, type)
return ma.masked_values(a[1], 0)
def horslice(self, row, xinit, xfinal):
return(np.squeeze(self.arrayValues[row:row+1, xinit:xfinal]))
def verslice(self, column, yinit, yfinal):
return(np.squeeze(self.arrayValues[yinit:yfinal, column:column + 1]))
def gridslice_2d(self, xinit, xfinal, yinit, yfinal):
return(self.arrayValues[yinit:yfinal, xinit:xfinal])
def viewer_3d(self, color, min, max):
mlab.figure(size=(1920, 1080), bgcolor=(0.16, 0.28, 0.46))
mlab.surf(self.arrayValues, colormap= color, warp_scale=0.2,
vmin= min, vmax=max)
mlab.view(-5.9, 900, 570, [5.3, 20, 238])
mlab.show()
def interpolate(self, sigma):
return gaussian_filter(self.arrayValues, sigma = sigma)
def process(self):
water = ma.masked_not_equal(self.arrayValues, 1).astype('uint8') + np.ones(self.arrayValues.shape)
ret, thresh = cv.threshold(water, 0, 1, cv.THRESH_BINARY_INV)
plt.imshow(thresh)
plt.show()
n_labels, labels, stats, centroids = cv.connectedComponentsWithStats(thresh.astype('uint8'), connectivity = 4, )
waterarray = np.zeros(self.arrayValues.shape)
waterlist = []
unique = np.delete(np.unique(labels), 0)
for i in unique:
if (stats[i, 4]) > 170:
print(i, " / ", n_labels)
org = ma.masked_not_equal(labels, i) / i
waterarray = np.add(waterarray, org.filled(0))
dim = np.zeros(waterarray.shape)
R = np.stack((waterarray, dim, dim), axis = 2)
plt.imshow(R)
plt.imsave('blendmap.png', cmap = 'gist_gray')
plt.show()
self.arrayValues = gaussian_filter(self.arrayValues, sigma = 0.5)
min = np.amin(self.arrayValues)
self.arrayValues = self.arrayValues - min
plt.imshow(self.arrayValues)
plt.show()
print(np.amin(self.arrayValues)) | true |
6a6446df760b2983c4d7384a7591bdd83644dc8b | Python | FerJuaresCoria/Tarea-de-EDD | /semana03/Cliente.py | UTF-8 | 279 | 3.15625 | 3 | [] | no_license | class Cliente(object):
def __init__(self, nombre = "nombre generico", dni = "12345678"):
self._nombre = nombre
self._dni = dni
def __str__(self):
return f"Nombre: {self._nombre} \nDNI: {self._dni}"
def enviar_mensaje(mensaje):
pass
| true |
5facb3af3c5e75f267bedf2f9ed1ed9ba4f2aa12 | Python | Overdron/face_recognition_pavlov_andrey_2021 | /webcam_emotion_recognition.py | UTF-8 | 4,021 | 2.5625 | 3 | [] | no_license | import cv2
import numpy as np
import tensorflow as tf
import os
from zipfile import ZipFile
from google_drive_downloader import GoogleDriveDownloader as gdd
def load_model():
if '3_trt' in os.listdir('./'):
emotion_model = tf.keras.models.load_model('3_trt/')
elif 'model.zip' in os.listdir('./'):
with ZipFile('model.zip', 'r') as zipObj:
ZipFile.extractall(zipObj)
emotion_model = tf.keras.models.load_model('3_trt/')
else:
gdd.download_file_from_google_drive('1jkwvE0XvX919wYkD4ixEk6VYjdQFiLIx', './model.zip')
with ZipFile('model.zip', 'r') as zipObj:
ZipFile.extractall(zipObj)
emotion_model = tf.keras.models.load_model('3_trt/')
print('model downloaded')
return emotion_model
def load_emotion_dict():
emo_dict = {0: 'anger',
1: 'contempt',
2: 'disgust',
3: 'fear',
4: 'happy',
5: 'neutral',
6: 'sad',
7: 'surprise',
8: 'uncertain'}
return emo_dict
def decode_prediction(pred: np.ndarray, emo_dict):
return np.array([emo_dict[x] for x in pred])
# def log_error(func):
#
# def inner(*args, **kwargs):
# try:
# return func(*args, **kwargs)
# except Exception as e:
# print(f'Ошибка: {e}')
#
# return inner
#
#
# @log_error
def main():
print(tf.config.list_physical_devices('GPU'))
cam = cv2.VideoCapture(0)
if not cam.isOpened():
print("Не удалось открыть камеру")
else:
print("Камера запущена")
emotion_dict = load_emotion_dict()
emotion_model = load_model()
face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# здесь мы в бесконечном цикле считываем кадр с камеры и выводим его, используя cv2.imshow()
# корректная остановка окна с камерой произойдет, когда мы нажмем q на клавиатуре
while (True):
try:
ret, frame = cam.read()
grayscale_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(grayscale_frame, 1.3, 5) # or 1.1, 19
if len(faces) > 0:
faces_collection = []
bb_collection = []
for (x, y, w, h) in faces:
face_boundingbox_bgr = frame[y:y + h, x:x + w]
face_boundingbox_rgb = cv2.cvtColor(face_boundingbox_bgr, cv2.COLOR_BGR2RGB)
faces_collection.append(face_boundingbox_rgb)
bb_collection.append((x, y, w, h))
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), )
faces_collection = np.array(faces_collection)
faces_collection = tf.image.resize(faces_collection, (150, 150))
preds = emotion_model(faces_collection)
# preds -> emotion_idx, confidence_percentage
emotion_idx = np.argmax(preds, axis=1)
emotions = decode_prediction(emotion_idx, emotion_dict)
confidence_percentage = np.max(preds, axis=1)
for bb, emotion, confidence in zip(bb_collection, emotions, confidence_percentage):
cv2.putText(frame, f'{emotion:9s} {confidence:.0%}', (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.4,
(30, 255, 30), 1)
cv2.imshow("facial emotion recognition", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except Exception as e:
print(f'Ошибка: {e}')
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| true |
f795a2a8b54f84fa9f3c8b8017eeb6bf9ae55d8d | Python | Mezgrman/K8055 | /lcd.py | UTF-8 | 7,114 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# K8055 4Bit LCD display
# © 2013 Mezgrman
import argparse
import hashlib
import os
import psutil
import pyk8055
import random
import re
import sys
import termios
import time
import tty
from k8055_classes import K80554BitLCDController, K8055LCDUI
from subprocess import check_output as shell
# K8055
"""PINMAP = {
'RS': 8,
'RW': 7,
'E': 6,
'D4': 5,
'D5': 4,
'D6': 3,
'D7': 2,
'LED': 1,
}"""
# Raspberry Pi
PINMAP = {
'RS': 2,
'RW': 3,
'E': 4,
'D4': 22,
'D5': 10,
'D6': 9,
'D7': 11,
'LED': 18,
}
"""CHARMAP = {
'dir': "/home/mezgrman/temp",
}"""
CHARMAP = None
class KeyReader:
def __init__(self):
self.buffer = []
self.in_seq = False
self.seq = []
def read_key(self):
if sys.stdin.isatty():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
char = sys.stdin.read(1)
code = ord(char)
if code == 3:
raise KeyboardInterrupt
if code == 27:
self.in_seq = True
if self.in_seq:
self.seq.append(char)
if len(self.seq) == 3:
self.in_seq = False
seq = self.seq[:]
self.seq = []
return "".join(seq)
return
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
else:
if not self.buffer:
self.buffer = list(sys.stdin.read())
try:
char = self.buffer.pop(0)
except IndexError:
raise SystemExit
return char
def run():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', choices = ['stats', 'text', 'textpad', 'interactive', 'music'], default = 'interactive')
parser.add_argument('-t', '--text', default = "Hello world!")
parser.add_argument('-c', '--cursor', action = 'store_true')
parser.add_argument('-cb', '--cursor-blink', action = 'store_true')
parser.add_argument('-w', '--wrap', action = 'store_true')
parser.add_argument('-s', '--scroll', action = 'store_true')
parser.add_argument('-sd', '--scroll-delay', type = float, default = 0.25)
parser.add_argument('-si', '--skip-init', action = 'store_true')
parser.add_argument('-a', '--align', choices = ['left', 'center', 'right'], default = 'left')
args = parser.parse_args()
# k = pyk8055.k8055(0)
k = None
display = K80554BitLCDController(board = k, pinmap = PINMAP, charmap = CHARMAP, lines = 2, columns = 16, skip_init = args.skip_init, debug = False)
display.set_display_enable(cursor = args.cursor, cursor_blink = args.cursor_blink)
display.clear()
display.home()
try:
if args.mode == 'interactive':
key_reader = KeyReader()
ui = K8055LCDUI(display, key_reader)
"""ui.dim(0, animate = True, duration = 1.0)
ui.dim(1023, animate = True, duration = 1.0)
ui.dim(0, animate = True, duration = 1.0)
ui.dim(1023, animate = True, duration = 1.0)
ui.dim(800, animate = False)
time.sleep(0.25)
ui.dim(600, animate = False)
time.sleep(0.25)
ui.dim(400, animate = False)
time.sleep(0.25)
ui.dim(200, animate = False)
time.sleep(0.25)
ui.dim(0, animate = False)
time.sleep(0.25)
ui.dim(1023, animate = True, duration = 1.0)"""
while True:
res = ui.list_dialog("Welcome!", ("Textpad mode", "Clock", "System info", "Demos", "Settings", "Quit"), align = 'center')
if res[1] == "Textpad mode":
ui.clear()
try:
while True:
char = key_reader.read_key()
if char:
display.write(char)
except KeyboardInterrupt:
pass
ui.clear()
elif res[1] == "Clock":
try:
while True:
data = time.strftime("%a, %d.%m.%Y\n%H:%M:%S")
ui.message(data, align = 'center')
time.sleep(1)
except KeyboardInterrupt:
pass
elif res[1] == "System info":
while True:
ires = ui.list_dialog("System info", ("Load average", "Disk space", "Memory", "Back"), align = 'center')
if ires[1] == "Load average":
try:
while True:
with open("/proc/loadavg", 'r') as f:
loadavg = f.read()
data = "* LOAD AVERAGE *\n" + " ".join(loadavg.split()[:3])
ui.message(data, align = 'center')
time.sleep(5)
except KeyboardInterrupt:
pass
elif ires[1] == "Disk space":
try:
while True:
space = os.statvfs("/")
free = (space.f_bavail * space.f_frsize) / 1024.0 / 1024.0
total = (space.f_blocks * space.f_frsize) / 1024.0 / 1024.0
data = "Total\t%.2fMB\nFree\t%.2fMB" % (total, free)
ui.message(data)
time.sleep(5)
except KeyboardInterrupt:
pass
elif ires[1] == "Memory":
try:
while True:
mem = psutil.phymem_usage()
free = mem[2] / 1024.0 / 1024.0
total = mem[0] / 1024.0 / 1024.0
data = "Total\t%.2fMB\nFree\t%.2fMB" % (total, free)
ui.message(data)
time.sleep(5)
except KeyboardInterrupt:
pass
elif ires[1] == "Back":
break
elif res[1] == "Demos":
while True:
dres = ui.list_dialog("Demos", ("Progress bar", "Input dialog", "Back"), align = 'center')
if dres[1] == "Progress bar":
x = 0.0
bar = ui.progress_bar("Testing...", fraction = x, char = "*")
while x < 1.0:
x += 1.0 / 16.0
bar.update(fraction = x)
time.sleep(1.5)
ui.message("Done :)", align = 'center')
time.sleep(3)
elif dres[1] == "Input dialog":
name = ui.input_dialog("Your name?")
ui.message("Hello %s!" % name, align = 'center')
time.sleep(3)
elif dres[1] == "Back":
break
elif res[1] == "Settings":
while True:
sres = ui.list_dialog("Settings", ("Brightness", "Back"), align = 'center')
if sres[1] == "Brightness":
count = ui.slider_dialog("Brightness", 0, 1023, step = 5, big_step = 100, value = ui.display.brightness)
ui.dim(count)
elif sres[1] == "Back":
break
elif res[1] == "Quit":
ui.clear()
ui.dim(0)
break
elif args.mode == 'music':
while True:
data = shell(['mocp', '--info'])
if "FATAL_ERROR" in data:
string = "Not running"
else:
metadata = [line.split(": ") for line in data.splitlines()]
metadata = dict([(line[0], ": ".join(line[1:])) for line in metadata])
string = "%(Artist)s\n%(Title)s" % metadata
display.write(string, align = 'center')
time.sleep(5)
elif args.mode == 'textpad':
while True:
char = key_reader.read_key()
if char:
display.write(char)
elif args.mode == 'text':
text = args.text.replace("\\n", "\n")
display.write(text, wrap = args.wrap, align = args.align)
while args.scroll and len(args.text) > display.column_count:
display.scroll()
time.sleep(args.scroll_delay)
elif args.mode == 'stats':
while True:
with open("/proc/loadavg", 'r') as f:
loadavg = f.read()
data = "* LOAD AVERAGE *\n" + " ".join(loadavg.split()[:3])
display.write(data, wrap = args.wrap, update = True, align = args.align)
time.sleep(5)
except KeyboardInterrupt:
pass
except:
raise
finally:
display.shutdown()
run()
| true |
44c1902b3dcedc47400502b19e889e18fea700da | Python | lishan1047/PythonTeachingSample | /DataAnalysis/PandasNorthwind3.py | UTF-8 | 613 | 2.90625 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
data = pd.read_table('Northwind.txt', sep=',')
# (2) 求解销售相关性最强的两个产品。(解法二)
data = data.where(data.OrderYear != 2008)
products = pd.DataFrame(data.ProductName.unique()).dropna()
products = products.to_numpy().reshape(1, len(products))[0]
pg = [data[data.ProductName == p].groupby( \
by = [data.OrderYear, data.OrderMonth])['Quantity'].sum() \
for p in products]
join = pd.concat(pg, axis = 1, keys = products).fillna(0).corr()
max_arr = join[join != 1.0].max()
maxv = max_arr.max()
print(max_arr[max_arr == maxv])
| true |
78956647532388d4dff9f5872cec8e2a7c1a3525 | Python | yzhxrain/polyu_AI_concept_2020 | /src/common/example.py | UTF-8 | 8,965 | 3.4375 | 3 | [] | no_license | '''
Using census.csv for evaluation
'''
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from time import time
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualization code visuals.py
import visuals as vs
# Import sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import fbeta_score,accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
import pickle
# Pretty display for notebooks
#%matplotlib inline
def display_data():
# Success - Display the first record
display(data.head(n=3))
# display_data()
def show_income():
# TODO: Total number of records
n_records = data.shape[0]
# TODO: Number of records where individual's income is more than $50,000
#n_greater_50k = sum(data['income'] == '>50K')
# TODO: Number of records where individual's income is at most $50,000
#n_at_most_50k = sum(data['income'] == '<=50K')
n_at_most_50k, n_greater_50k = data.income.value_counts()
# TODO: Percentage of individuals whose income is more than $50,000
greater_percent = np.true_divide(n_greater_50k , n_records) * 100
# Print the results
print("Total number of records: {}".format(n_records))
print("Individuals making more than $50,000: {}".format(n_greater_50k))
print("Individuals making at most $50,000: {}".format(n_at_most_50k))
print("Percentage of individuals making more than $50,000: {:.2f}%".format(greater_percent))
#show_income()
def train_predict(learner, sample_size, X_train, y_train, X_test, y_test):
'''
inputs:
- learner: the learning algorithm to be trained and predicted on
- sample_size: the size of samples (number) to be drawn from training set
- X_train: features training set
- y_train: income training set
- X_test: features testing set
- y_test: income testing set
'''
results = {}
# TODO: Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:])
start = time() # Get start time
learner = learner.fit(X_train[:sample_size], y_train[:sample_size])
end = time() # Get end time
# TODO: Calculate the training time
results['train_time'] = end - start
# TODO: Get the predictions on the test set(X_test),
# then get predictions on the first 300 training samples(X_train) using .predict()
start = time() # Get start time
predictions_test = learner.predict(X_test)
predictions_train = learner.predict(X_train[:300])
end = time() # Get end time
# TODO: Calculate the total prediction time
results['pred_time'] = end-start
# TODO: Compute accuracy on the first 300 training samples which is y_train[:300]
results['acc_train'] = accuracy_score(predictions_train, y_train[:300])
# TODO: Compute accuracy on test set using accuracy_score()
results['acc_test'] = accuracy_score(predictions_test, y_test)
# TODO: Compute F-score on the the first 300 training samples using fbeta_score()
results['f_train'] = fbeta_score(y_train[:300], predictions_train, beta= 0.5)
# TODO: Compute F-score on the test set which is y_test
results['f_test'] = fbeta_score(y_test, predictions_test, beta= 0.5)
# Success
print("{} trained on {} samples.".format(learner.__class__.__name__, sample_size))
# Return the results
return results
def storeTree(inputTree, filename):
fw = open(filename,'wb')
pickle.dump(inputTree,fw)
fw.close()
def grabTree(filename):
fr = open(filename,'rb')
return pickle.load(fr)
def evaluate(dataset, key_field):
income_raw = dataset[key_field]
# 1. start of pre-processing data
features_raw = dataset.drop(key_field, axis = 1)
# Visualize skewed continuous features of original data
# vs.distribution(data)
# Log-transform the skewed features
skewed = ['capital-gain', 'capital-loss']
features_log_transformed = pd.DataFrame(data = features_raw)
features_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1))
# Visualize the new log distributions
# vs.distribution(features_log_transformed, transformed = True)
# Initialize a scaler, then apply it to the features
scaler = MinMaxScaler() # default=(0, 1)
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
features_log_minmax_transform = pd.DataFrame(data = features_log_transformed)
features_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical])
# Show an example of a record with scaling applied
display(features_log_minmax_transform.head(n = 5))
# vs.distribution(features_log_minmax_transform)
# TODO: One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()
features_final = pd.get_dummies(features_log_minmax_transform)
# TODO: Encode the 'income_raw' data to numerical values
encoder = LabelEncoder()
income = encoder.fit_transform(income_raw)
# Print the number of features after one-hot encoding
encoded = list(features_final.columns)
print("{} total features after one-hot encoding.".format(len(encoded)))
# Uncomment the following line to see the encoded feature names
# print encoded
X_train, X_test, y_train, y_test = train_test_split(features_final,
income,
test_size = 0.2,
random_state = 0)
# Show the results of the split
print("Training set has {} samples.".format(X_train.shape[0]))
print("Testing set has {} samples.".format(X_test.shape[0]))
# 1. end of pre-processing data
# 2. start of building native predictor
'''
TP = np.sum(income) # Counting the ones as this is the naive case. Note that 'income' is the 'income_raw' data
encoded to numerical values done in the data preprocessing step.
FP = income.count() - TP # Specific to the naive case
TN = 0 # No predicted negatives in the naive case
FN = 0 # No predicted negatives in the naive case
'''
# TODO: Calculate accuracy, precision and recall
encoder = LabelEncoder()
income = encoder.fit_transform(income_raw)
TP = np.sum(income)
FP = len(income) - TP
accuracy = np.true_divide(TP,TP + FP)
recall = 1
precision = accuracy
# TODO: Calculate F-score using the formula above for beta = 0.5 and correct values for precision and recall.
# HINT: The formula above can be written as (1 + beta**2) * (precision * recall) / ((beta**2 * precision) + recall)
fscore = (1 + 0.5**2) * (precision * recall) / ((0.5**2 * precision) + recall)
# Print the results
print("Naive Predictor: [Accuracy score: {:.4f}, F-score: {:.4f}]".format(accuracy, fscore))
# 2. end of building native predictor
# 3. start of evaluation
# TODO: Initialize the three models
clf_random_forest = RandomForestClassifier()
clf_decision_tree = DecisionTreeClassifier(random_state=0)
clf_C = SVC(kernel = 'rbf')
clf_M = MLPClassifier(solver='sgd',activation = 'identity',max_iter = 70,alpha = 1e-5,hidden_layer_sizes = (100,50),random_state = 1,verbose = False)
# TODO: Calculate the number of samples for 1%, 10%, and 100% of the training data
# HINT: samples_100 is the entire training set i.e. len(y_train)
# HINT: samples_10 is 10% of samples_100
# HINT: samples_1 is 1% of samples_100
samples_100 = len(y_train)
samples_10 = int(len(y_train)*0.1)
samples_1 = int(len(y_train)*0.01)
# Collect results on the learners
results = {}
for clf in [clf_random_forest, clf_decision_tree, clf_C, clf_M]:
clf_name = clf.__class__.__name__
results[clf_name] = {}
for i, samples in enumerate([samples_1, samples_10, samples_100]):
results[clf_name][i] = train_predict(clf, samples, X_train, y_train, X_test, y_test)
if clf == clf_decision_tree:
storeTree(clf, "decision_tree")
# Run metrics visualization for the three supervised learning models chosen
vs.evaluate(results, accuracy, fscore)
# 3. end of evaluation
dataset_map = {
'1': {'filename': 'census.csv', 'key_field': 'income'},
'2': {'filename': 'train.csv', 'key_field': 'exceeds50K'},
}
if __name__ == '__main__':
dataset_info = dataset_map.get('1')
data = pd.read_csv(dataset_info.get('filename'))
evaluate(data, dataset_info.get('key_field'))
| true |
5a35cd09dffe4f0176dd2a7bfdc903d92e725602 | Python | afshinatashian/PreBootcamp | /Tamrin8.py | UTF-8 | 406 | 3.109375 | 3 | [] | no_license | obj=eval(input())
#obj = {'Science': [88, 89, 62, 95], 'Language': [77, 78, 84, 80]}
print(type(obj))
exit()
key=obj.keys()
l=len(key)
result=[]
result2={}
for k in key:
temp=obj[k]
for i in range(len(temp)):
if len(result)<i+1:
result.append({})
result2.update(result[i])
result2[k]=temp[i]
result[i]=result2.copy()
result2.clear()
print(result) | true |
c6843e2c83dde09894b0d4d956ef7187b79faba7 | Python | MohamedAboBakr/Machine_Learning_WU_Specialization_Regression | /W2_Predicting House Prices/Assignment.py | UTF-8 | 1,628 | 2.796875 | 3 | [] | no_license | import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn import metrics
from sklearn.model_selection import train_test_split
import seaborn as sns
import statsmodels.formula.api as smf
import statsmodels.api as sm
def add_features(df):
df['bedrooms_squared'] = df['bedrooms'] * df['bedrooms']
df['bed_bath_rooms'] = df['bedrooms'] * df['bathrooms']
df['log_sqft_living'] = np.log(df['sqft_living'])
df['lat_plus_long'] = df['lat'] + df['long']
train_data = pd.read_csv('kc_house_train_data.csv')
test_data = pd.read_csv('kc_house_test_data.csv')
add_features(train_data)
add_features(test_data)
y_train = train_data['price']
y_test = test_data['price']
features = ['sqft_living', 'bedrooms', 'bathrooms', 'lat', 'long']
X1_train = train_data[features]
X1_test = test_data[features]
ml1 = LinearRegression()
ml1.fit(X1_train, y_train)
coef1 = ml1.coef_
accuracy1 = ml1.score(X1_test, y_test)
print(coef1)
print(accuracy1)
features.append('bed_bath_rooms')
X2_train = train_data[features]
X2_test = test_data[features]
ml2 = LinearRegression()
ml2.fit(X2_train, y_train)
coef2 = ml2.coef_
accuracy2 = ml2.score(X2_test, y_test)
print(coef2)
print(accuracy2)
features.append('bedrooms_squared')
features.append('log_sqft_living')
features.append('lat_plus_long')
X3_train = train_data[features]
X3_test = test_data[features]
ml3 = LinearRegression()
ml3.fit(X3_train, y_train)
coef3 = ml3.coef_
accuracy3 = ml3.score(X3_test, y_test)
print(coef3)
print(accuracy3)
| true |
f95a03d1aeae27b11a9436aad84234cec317fbef | Python | Ginkooo/overthewire | /natas/natas11.py | UTF-8 | 868 | 2.671875 | 3 | [] | no_license | import base64
from itertools import cycle
xored_and_64 = b'ClVLIh4ASCsCBE8lAxMacFMZV2hdVVotEhhUJQNVAmhSRwh6QUcIaAw=' # cookie
decoded = base64.decodebytes(xored_and_64) # decoded cookie
text = b'{"showpassword":"no","bgcolor":"#ffffff"}' # xored decoded cookie
ret = []
for i, l in zip(text, decoded):
ret.append(hex(i ^ l))
def xor_encrypt(data):
key = [int(x, 16) for x in ['0x71', '0x77', '0x38', '0x4a']]
ret = []
long_key = cycle(key)
for i, l in zip(data, long_key):
ret.append(hex(i ^ l))
return ret
hexarr = xor_encrypt(xored_and_64) # array decoded as hex array
x = ''.join(['0'+bin(int(x, 16))[2:] for x in hexarr]) # binary for that array
text = b'{"showpassword":"yes","bgcolor":"#ffffff"}'
xored = xor_encrypt(text)
xored_bytes = bytes([int(a, 16) for a in xored])
print(base64.encodebytes(xored_bytes))
| true |
960d84d912d493c189eb8288820e2040b3dd0e3e | Python | puk18/phishingwebsitesprediction | /logisticRegression/untitled0.py | UTF-8 | 1,132 | 2.828125 | 3 | [] | no_license | # Import the dependencies
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
filename = 'csv_result-Training Dataset.csv'
emailData = pd.read_csv(filename)
X = emailData.drop('Result', axis=1)
y = emailData['Result']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30)
from sklearn.linear_model import LogisticRegression
# instantiate the model (using the default parameters)
logreg = LogisticRegression()
# fit the model with data
logreg.fit(X_train,y_train)
#
y_pred=logreg.predict(X_test)
from sklearn import metrics
cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
cnf_matrix
print(cnf_matrix)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print("Precision:",metrics.precision_score(y_test, y_pred))
print("Recall:",metrics.recall_score(y_test, y_pred)) | true |
b34da551e1ff9b6df5fb476550d37dfa30cb57c7 | Python | RishikaMachina/Trees-2 | /Problem_2.py | UTF-8 | 826 | 3.734375 | 4 | [] | no_license | # Runs on Leetcode
# Runtime- O(n)
# Memory- O(1)
'''
1) Recursively travel through tree till we reach left leaf node of the tree. and then right nodes.
2) Maintaining the sum till last level in temp variable and adding it to result when we reach any leaf node of the tree
'''
class Solution:
def sumNumbers(self, root):
if not root:
return 0
self.result = 0
self.helper(root,str(root.val),0)
return self.result
def helper(self,root,temp,result):
if root.left is None and root.right is None:
self.result += int(temp)
if root.left is not None:
self.helper(root.left,temp+str(root.left.val),self.result)
if root.right is not None:
self.helper(root.right,temp+str(root.right.val),self.result)
| true |
8d1cf5e8d56fd5b278483fabe06a14cf92f4972c | Python | kopanswer/gao | /basic_course/demo2.py | UTF-8 | 1,390 | 3.90625 | 4 | [] | no_license | # -*- coding:utf8 -*-
"""
@discription: 函数
@author: xxx
@date: 2019-03-09
"""
def add1(a,b):
return a+b
# 默认参数
def add2(a=1,b=2):
return a+b
# 可变参数
def sum(*numbers, tip): # * 代表参数有若干个,传入的参数是对的,然后把 args 封装为一个 tuple
print(type(numbers))
sum = 0
for number in numbers:
sum += number
print(tip)
return sum
# 关键词参数
def print_map(**kw): # ** 传入的参数是独立的,然后把 kw 封装成一个字典
print(type(kw))
for k, v in kw.items():
print(k,v)
# 通用函数(无论参数怎样,都可以运行)
def common_func(*args, **kw):
print(type(args))
print(args)
print(kw)
# 递归函数
def fi(n):
# 1 函数一定要有入口和出口,入口是参数,出口是最后一个return
# 先考虑极端情况
if n <= 0:
return 0
if n <= 2:
return 1
return fi(n-1) + fi(n-2)
if __name__ == "__main__":
# print(add1(1,2))
# print(add2())
# print(add2(3))
# print(add2(b=5))
# print(sum(1))
# print(sum(1,3))
# print(sum(1,3,5,7,2,4,6,8))
# result = sum(1,2,3,tip="finished")
# print(result)
# print_map(name="tom", age="18", name2="jack", key="value")
# common_func(1,2,"b",3,a=1,b="kw")
print(fi(3))
| true |
c0de01ce090e77300623f57199747f1dc335bd67 | Python | piotut/Pong | /pong.py | UTF-8 | 3,340 | 2.984375 | 3 | [] | no_license | #!/usr/bin/python
import pygame
from pygame.locals import * # importujemy nazwy (klawiszy)
from sys import exit
from math import *
from random import randint
#internal classes
from paddle import Paddle
from ball import Ball
from arena import Arena
from sound import Sound
from referee import Referee
from tracking import Tracking
from threading import Thread
screenWidth = 1200
screenHeight = 600
screenRect = (screenWidth,screenHeight)
class Pong(object):
def __init__(self, file1=None, file2=None):
pygame.mixer.pre_init(44100, -16, 2, 2048)
pygame.init()
self.fps = pygame.time.Clock()
flag = DOUBLEBUF
self.board = pygame.display.set_mode(screenRect, flag)
pygame.display.set_caption('[ --- Pong --- ]')
self.state = 1 # 1 - run, 0 - exit
self.track = Tracking(file1, file2)
self.sound = Sound()
self.p1 = Paddle(self.board, (200,100,100),screenRect)
self.p1.setInitialPostition(0,screenHeight/2)
self.p2 = Paddle(self.board, (100,200,100),screenRect)
self.p2.setInitialPostition(screenWidth-self.p2.get()['width'],screenHeight/2)
self.ball = Ball(self.board, (50,50,250), screenRect, self.sound)
self.ball.setInitialPostition(screenWidth/2,screenHeight/2)
self.arena = Arena(self.board, screenRect)
self.referee = Referee(self.ball, self.p1, self.p2, screenRect, self.sound)
self.t = Thread(target=self.track.run)
#self.track.run()
self.t.start()
self.p1_pos = 0
self.p2_pos = 0
self.loop()
def movep1(self, diry):
'''Player1 moves support'''
self.p1.move(diry)
def movep2(self, diry):
'''Player2 moves support'''
self.p2.move(diry)
def game_exit(self):
exit()
def loop(self):
flaga = 1
while self.state==1:
for event in pygame.event.get():
if event.type==QUIT or (event.type==KEYDOWN and event.key==K_ESCAPE):
self.state=0
keys = pygame.key.get_pressed()
dirp1 = copysign(1, self.track.p1_position - self.p1_pos)
dirp2 = copysign(1, self.track.p2_position - self.p2_pos)
self.p1_pos += dirp1
self.p2_pos += dirp2
self.p1.set(self.track.p1_position+45)
self.p2.set(self.track.p2_position+45)
if keys[K_f]:
pygame.display.toggle_fullscreen()
self.arena.render(self.track.frame)
font = pygame.font.Font("gfx/ATARCC__.TTF",40)
text1 = font.render('P1={}'.format(self.p1.getScore()), True,(200,200,200))
text2 = font.render('P2={}'.format(self.p2.getScore()), True,(200,200,200))
quartWidth = screenWidth/4
self.board.blit(text1,(quartWidth * 1 - quartWidth/2,10))
self.board.blit(text2,(quartWidth * 3 - quartWidth/2,10))
self.p1.render()
self.p2.render()
self.ball.render()
self.referee.judge()
pygame.display.flip() # wyswietlamy obrazki
self.fps.tick(80)
self.track.running = False
self.game_exit()
if __name__ == '__main__':
Pong('czerwony.txt', 'zielony.txt')
| true |
d67c38e19dea7962f076badf26c00bb9b613d966 | Python | pmhalvor/Hello_World_II | /Python/Diet/recipe.py | UTF-8 | 609 | 2.734375 | 3 | [] | no_license | import requests
import json
apiKey = 'a0ab8407037c4a36a9f1e77fa128f2be'
url = 'https://api.spoonacular.com'
def get_recipe(ingredients=None):
'''
ingredients: csv string of ingredients
'''
params = {
'ingredients': ingredients,
'apiKey': apiKey
}
data = requests.get(url+'/recipes/findByIngredients', params=params)
# data = requests.get('https://api.spoonacular.com/recipes/findByIngredients?ingredients=apples,+flour,+sugar&number=2')
for item in data.json():
print(item['title'])
if __name__=='__main__':
get_receipe('spinach,chicken,feta') | true |
4855f5bfc35ea5fe02e5d9dd0fa61b1f240b923c | Python | stephentu/forwarder | /messages.py | UTF-8 | 661 | 2.734375 | 3 | [] | no_license | import struct
# Commands
CMD_NEW_CONN = 0
CMD_DATA = 1
CMD_CLOSE_CONN = 2
# read modes
MODE_CMD = 0
MODE_PAYLOAD_LEN = 1
MODE_PAYLOAD = 2
# Message formats:
#
# New connection:
# [ CMD_NEW_CONN (1-byte) ]
#
# Data:
# [ CMD_DATA (1-byte) | payload_length (4-bytes) | payload (payload_length bytes) ]
#
# Close connection:
# [ CMD_CLOSE_CONN ]
def create_new_conn_message():
return struct.pack('!B', CMD_NEW_CONN)
def create_data_message(buf):
'''create a data message, containing buf as the payload'''
return struct.pack('!BI', CMD_DATA, len(buf)) + buf
def create_close_conn_message():
return struct.pack('!B', CMD_CLOSE_CONN)
| true |
e01c9e546bc9c1ae1dafbeb8d855cf6be2035e5f | Python | aleda145/bike | /blog/models.py | UTF-8 | 1,363 | 2.703125 | 3 | [] | no_license | import datetime
from django.db import models
from django.utils import timezone
class BlogPost(models.Model):
title = models.CharField(max_length=200)
slug = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
body = models.TextField()
def __str__(self):
return self.title
# demonstration
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class EquipmentCategory (models.Model):
title= models.CharField(max_length=200)
description=models.CharField(max_length=500)
# calculates the total price of all the EquipmentItems in a category
def _get_total(self):
total_price=0
for item in self.equipmentitem_set.all():
total_price+=item.price
return total_price
total = property(_get_total)
def __str__(self):
return self.title
class EquipmentItem (models.Model):
title=models.ForeignKey(EquipmentCategory, on_delete=models.CASCADE)
product_name=models.TextField()
description=models.TextField()
price=models.IntegerField()
def __str__(self):
return (self.product_name)
| true |
c5d59f9001dc37e4e579921513ea738f9f7656a8 | Python | Alpha5714/pybasic | /class.py | UTF-8 | 141 | 2.765625 | 3 | [] | no_license | class Roll:
def g(self):
print('Nice stuff bro')
def toph(self):
print('Nice')
r = Roll()
r.toph()
r.g()
| true |
4e069670ff35d785a6f450b4653bd41f12ec25af | Python | bioinf/proteomics2014 | /lioznova/ht1/hw1.py | UTF-8 | 9,518 | 2.53125 | 3 | [] | no_license | import sys
import os
import copy
import argparse
def read_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith(">"):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
def get_seqs(input_file_name):
text = {}
with open(input_file_name) as fp:
for name, seq in read_fasta(fp):
text[name] = [seq]
fp.close()
return text
def read_matrix(fp):
scoring_matrix = {}
proteins = None
with open(matrix_file_name) as fp:
for line in fp:
if (len(line) < 2) or (line[0] == "#"):
continue
if not proteins:
proteins = (line.strip()).split()
continue
cur_line_char = (line.strip()).split()[0]
for i in xrange(1, len((line.strip()).split())):
w = int((line.strip()).split()[i])
if not scoring_matrix.has_key(cur_line_char):
scoring_matrix[cur_line_char] = {}
scoring_matrix[cur_line_char][proteins[i-1]] = w
return (scoring_matrix, proteins)
def score(arr1, arr2):
result = 0
for a1 in xrange(len(arr1)):
for a2 in xrange(len(arr2)):
result += scoring_matrix[proteins[a1]][proteins[a2]] * arr1[a1] * arr2[a2]
return result
def freq_mat(s):
fr = []
for i in xrange(len(proteins)):
fr_ = [0] * len(s[0])
fr.append(fr_)
for i in xrange(len(s)):
for j in xrange(len(s[i])):
fr[proteins.index(s[i][j])][j] += 1
return fr
def align(p1_, p2_, distance_only):
p1 = freq_mat(p1_)
string_num1 = 0
for i in xrange(len(proteins)):
string_num1 += p1[i][0]
p1[i].insert(0, 0)
p1[len(proteins) - 1][0] = string_num1
p2 = freq_mat(p2_)
string_num2 = 0
for i in xrange(len(proteins)):
string_num2 += p2[i][0]
p2[i].insert(0, 0)
p2[len(proteins) - 1][0] = string_num2
def space(num):
arr = [0] * len(proteins)
arr[-1] = num
return arr
def column(mat, pos):
arr = [0] * len(proteins)
for i in xrange(len(proteins)):
arr[i] = mat[i][pos]
return arr
p1_len = len(p1[0])
p2_len = len(p2[0])
d = []
for i in xrange(p1_len):
d_ = [None] * p2_len
d.append(d_)
d[0][0] = 0
for i in xrange(1, p1_len):
d[i][0] = d[i-1][0] + score(column(p1, i), space(string_num2))
for j in xrange (1, p2_len):
d[0][j] = d[0][j-1] + score(space(string_num1), column(p2, j))
for j in xrange(1, p2_len):
for i in xrange(1, p1_len):
d[i][j] = max(d[i-1][j] + score(column(p1, i), space(string_num2)), d[i][j-1] + score(space(string_num1), column(p2, j)), d[i-1][j-1] + score(column(p1, i), column(p2, j)))
edit_dist = d[-1][-1]
if distance_only:
return edit_dist
s_merged = [''] * (string_num1 + string_num2)
(i, j) = (p1_len-1, p2_len-1)
while (i > 0) and (j > 0):
if (d[i][j] == d[i-1][j-1] + score(column(p1, i), column(p2, j))):
for k in xrange(string_num1):
s_merged[k] += (p1_[k][i-1])
for k in xrange(string_num2):
s_merged[string_num1 + k] += (p2_[k][j-1])
(i, j) = (i-1, j-1)
elif (d[i][j] == d[i-1][j] + score(column(p1, i), space(string_num2))):
for k in xrange(string_num1):
s_merged[k] += (p1_[k][i-1])
for k in xrange(string_num2):
s_merged[string_num1 + k] += ('*')
(i, j) = (i-1, j)
elif (d[i][j] == d[i][j-1] + score(space(string_num1), column(p2, j))):
for k in xrange(string_num1):
s_merged[k] += ('*')
for k in xrange(string_num2):
s_merged[string_num1 + k] += (p2_[k][j-1])
(i, j) = (i, j-1)
while (i > 0):
for k in xrange(string_num1):
s_merged[k] += (p1_[k][i-1])
for k in xrange(string_num2):
s_merged[string_num1 + k] += ('*')
(i, j) = (i-1, j)
while (j > 0):
for k in xrange(string_num1):
s_merged[k] += ('*')
for k in xrange(string_num2):
s_merged[string_num1 + k] += (p2_[k][j-1])
(i, j) = (i, j-1)
for i in xrange(len(s_merged)):
s_merged[i] = s_merged[i][::-1]
return s_merged
def initialize_dist(text):
current_leaves = set(text.keys())
profile_distances = {}
for name in current_leaves:
profile_distances[name] = {}
for i in xrange(len(current_leaves)):
for j in xrange(i+1, len(current_leaves)):
p1_name = text.keys()[i]
p2_name = text.keys()[j]
p1_p2_dist = align(text[p1_name], text[p2_name], True)
profile_distances[p1_name][p2_name] = p1_p2_dist
profile_distances[p2_name][p1_name] = p1_p2_dist
return (current_leaves, profile_distances)
def neighbor_joining(text_, profile_distances_, current_leaves_):
text = copy.deepcopy(text_)
profile_distances = copy.deepcopy(profile_distances_)
current_leaves = copy.deepcopy(current_leaves_)
while len(current_leaves) != 1:
current_names = list(current_leaves)
Q = {}
for name in current_names:
Q[name] = {}
for i in xrange(len(current_names)):
for j in xrange(i+1, len(current_names)):
name1 = current_names[i]
name2 = current_names[j]
res = (len(current_names) - 2) * profile_distances[name1][name2]
for k in xrange(len(current_names)):
if k != i:
res -= profile_distances[name1][current_names[k]]
if k != j:
res -= profile_distances[name2][current_names[k]]
Q[name1][name2] = res
Q[name2][name1] = res
closest_dist = -100500
closest_name1 = None
closest_name2 = None
for i in xrange(len(current_names)):
for j in xrange(i+1, len(current_names)):
if Q[current_names[i]][current_names[j]] > closest_dist:
closest_dist = Q[current_names[i]][current_names[j]]
closest_name1 = current_names[i]
closest_name2 = current_names[j]
new_profile_name = (closest_name1 + "\n" + closest_name2)
new_profile = align(text[closest_name1], text[closest_name2], False)
text[new_profile_name] = new_profile
profile_distances[new_profile_name] = {}
for cur_n in current_names:
if (cur_n != closest_name1) and (cur_n != closest_name2):
new_dist = 0.5 * (profile_distances[closest_name1][cur_n] + profile_distances[closest_name2][cur_n] - profile_distances[closest_name1][closest_name2])
profile_distances[new_profile_name][cur_n] = new_dist
profile_distances[cur_n][new_profile_name] = new_dist
current_leaves.remove(closest_name1)
current_leaves.remove(closest_name2)
current_leaves.add(new_profile_name)
final_name = current_leaves.pop()
return (final_name, text[final_name])
def pgma(weighted, text_, profile_distances_, current_leaves_):
text = copy.deepcopy(text_)
profile_distances = copy.deepcopy(profile_distances_)
current_leaves = copy.deepcopy(current_leaves_)
while len(current_leaves) != 1:
current_names = list(current_leaves)
closest_dist = -100500
closest_name1 = None
closest_name2 = None
for i in xrange(len(current_names)):
for j in xrange(i+1, len(current_names)):
if profile_distances[current_names[i]][current_names[j]] > closest_dist:
closest_dist = profile_distances[current_names[i]][current_names[j]]
closest_name1 = current_names[i]
closest_name2 = current_names[j]
new_profile_name = (closest_name1 + "\n" + closest_name2)
new_profile = align(text[closest_name1], text[closest_name2], False)
text[new_profile_name] = new_profile
profile_distances[new_profile_name] = {}
for cur_n in current_names:
if (cur_n != closest_name1) and (cur_n != closest_name2):
if weighted:
new_dist = 0.5 * (profile_distances[closest_name1][cur_n] + profile_distances[closest_name2][cur_n])
else:
mod_name1 = len(text[closest_name1])
mod_name2 = len(text[closest_name2])
new_dist = float(mod_name1 * profile_distances[closest_name1][cur_n] + mod_name2 * profile_distances[closest_name2][cur_n]) / (mod_name1 + mod_name2)
profile_distances[new_profile_name][cur_n] = new_dist
profile_distances[cur_n][new_profile_name] = new_dist
current_leaves.remove(closest_name1)
current_leaves.remove(closest_name2)
current_leaves.add(new_profile_name)
final_name = current_leaves.pop()
return (final_name, text[final_name])
def print_ans(names_, seqs, out_f):
output_file = open(out_f, 'w')
names = names_.split('\n')
for i in xrange(len(seqs)):
output_file.write(names[i] + "\n" + seqs[i] + "\n")
output_file.close()
if __name__ == "__main__":
if len(sys.argv) == 1:
print "Usage:", sys.argv[0], "-o <output file> -i <input file> -s <scoring matrix> -m <NJ | UPGMA | WPGMA>"
print "Please use the --help option to get more usage information."
exit()
parser = argparse.ArgumentParser(prog = sys.argv[0], description='msa')
parser.add_argument("-i", "--in_file", help="name of input file", required=True)
parser.add_argument("-o", "--out_file", help="name of output file", required=True)
parser.add_argument("-s", "--score", help="scoring matrix file", required=True)
parser.add_argument("-m", "--method", help="method to align", choices=["NJ", "UPGMA", "WPGMA"], required=True)
args = parser.parse_args()
input_file_name = args.in_file
matrix_file_name = args.score
output_file_name = args.out_file
if not os.path.isfile(input_file_name):
print >> sys.stderr, "Not a file\t" + input_file_name
exit(1)
if not os.path.isfile(matrix_file_name):
print >> sys.stderr, "Not a file\t" + matrix_file_name
exit(1)
text = get_seqs(input_file_name)
(scoring_matrix, proteins) = read_matrix(matrix_file_name)
(current_leaves, profile_distances) = initialize_dist(text)
if args.method == "NJ":
(n, s) = neighbor_joining(text, profile_distances, current_leaves)
elif args.method == "WPGMA":
(n, s) = pgma(True, text, profile_distances, current_leaves) # wpgma
elif args.method == "UPGMA":
(n, s) = pgma(False, text, profile_distances, current_leaves) # upgma
print_ans(n, s, output_file_name)
| true |
cb75b0c1a8baa77b1bb6a04b3658358bbcc2a6cb | Python | liyaguo6/data-analynsis | /RandomForest/test.py | UTF-8 | 717 | 2.703125 | 3 | [] | no_license | import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.preprocessing import StandardScaler
def type_converters(s):
it = {'医疗': 0, '其它': 1}
return it[s]
df = pd.read_csv(r'C:\Users\10755\Desktop\test215.csv',header=0 \
,encoding='gbk',converters={3:type_converters},index_col='index',usecols=['answer','classes','index','value'],error_bad_lines=True).dropna()
# print(df.head())
# x_train, x_test, y_train, y_test = train_test_split(df.iloc[:,:1], df.iloc[:,1:2], test_size=0.3, random_state=1)
# print(x_train)
# print(y_train)
ss = StandardScaler()
x = ss.fit_transform(df['value'].values.reshape(-1,1).astype('float64'))
print(x) | true |
87e15621372a19b385c35f3d1209439f461de4ff | Python | thanhtam4692/sidneysraspberry | /python/desk_light_0_on.py | UTF-8 | 1,174 | 2.640625 | 3 | [] | no_license | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
import datetime
from pymongo import MongoClient
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
mongoclient = MongoClient("mongodb://localhost:27017")
db = mongoclient.sidneyspi
homeonDB = db.homeon
# init list with pin numbers
pinList = [24]
# loop through pins and set mode and state to 'low'
for i in pinList:
GPIO.setup(i, GPIO.OUT)
GPIO.output(i, GPIO.HIGH)
# time to sleep between operations in the main loop
#SleepTimeL = 20
# main loop
try:
GPIO.output(24, GPIO.LOW)
print "Light is on"
now = datetime.datetime.now()
datetime_string = str(now.year) + "" + str(now.strftime('%m')) + "" + str(now.strftime('%d')) + "-" + str(now.strftime('%H')) + "" + str(now.strftime('%M')) + "" + str(now.strftime('%S'))
homeonDB.update_one(
{"name": "desk_light_0"},
{"$set": {
"status.power": "on",
"mode": "manual",
"last_update": datetime_string
}
}
)
# End program cleanly with keyboard
except KeyboardInterrupt:
print " Quit"
# Reset GPIO settings
# GPIO.cleanup()
| true |
613be3d4ac5aa488e09d682ffe9d48b59b61354b | Python | akleeman/xray | /xray/test/test_formatting.py | UTF-8 | 2,448 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
import pandas as pd
from xray.core import formatting
from xray.core.pycompat import PY3
from . import TestCase
class TestFormatting(TestCase):
def test_get_indexer_at_least_n_items(self):
cases = [
((20,), (slice(10),)),
((3, 20,), (0, slice(10))),
((2, 10,), (0, slice(10))),
((2, 5,), (slice(2), slice(None))),
((1, 2, 5,), (0, slice(2), slice(None))),
((2, 3, 5,), (0, slice(2), slice(None))),
((1, 10, 1,), (0, slice(10), slice(None))),
((2, 5, 1,), (slice(2), slice(None), slice(None))),
((2, 5, 3,), (0, slice(4), slice(None))),
((2, 3, 3,), (slice(2), slice(None), slice(None))),
]
for shape, expected in cases:
actual = formatting._get_indexer_at_least_n_items(shape, 10)
self.assertEqual(expected, actual)
def test_first_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.first_n_items(array, n)
expected = array.flat[:n]
self.assertItemsEqual(expected, actual)
with self.assertRaisesRegexp(ValueError, 'at least one item'):
formatting.first_n_items(array, 0)
def test_format_item(self):
cases = [
(pd.Timestamp('2000-01-01T12'), '2000-01-01T12:00:00'),
(pd.Timestamp('2000-01-01'), '2000-01-01'),
(pd.Timestamp('NaT'), 'NaT'),
('foo', "'foo'"),
(u'foo', "'foo'" if PY3 else "u'foo'"),
(b'foo', "b'foo'" if PY3 else "'foo'"),
(1, '1'),
(1.0, '1.0'),
]
for item, expected in cases:
actual = formatting.format_item(item)
self.assertEqual(expected, actual)
def format_array_flat(self):
actual = formatting.format_array_flat(np.arange(100), 10),
expected = '0 1 2 3 4 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(100.0), 10),
expected = '0.0 1.0 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(100.0), 1),
expected = '0.0 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(3), 5),
expected = '0 1 2'
self.assertEqual(expected, actual)
| true |
6428319a24166793f9156723766cb27780814abe | Python | manna422/ProjectEuler | /002.py | UTF-8 | 429 | 3.625 | 4 | [] | no_license | '''
It's unavoidable to solve this without computing most of the fibonacci sequence.
To reduce the amount of brute-forcing used we can take advantage of the fact that
the fibonacci series follows a pattern of odd then odd then even and so on.
'''
tests = input()
for i in xrange(tests):
n = input()
result = 0
x = 2
y = 3
while x < n:
result += x
x, y = x+(2*y), (2*x)+(3*y)
print result
| true |
db2b0c5281616d0f0738531a7232ef887e1e4fb4 | Python | petroniocandido/mlstuff | /conexionist/activations.py | UTF-8 | 2,860 | 2.828125 | 3 | [] | no_license | import numpy as np
import mlstuff.conexionist.function as func
class Identity(func.UnivariateFunction):
def __init__(self, **kwargs):
super(BinaryStep, self).__init__(name='Binary Step', **kwargs)
def function(self, data):
return data
def derivative(self, data):
return 1
class BinaryStep(func.UnivariateFunction):
def __init__(self, **kwargs):
super(BinaryStep, self).__init__(name='Binary Step', **kwargs)
def function(self, data):
return 1 if data >= 0 else 0
def derivative(self, data):
return 0 if data >= 0 else 0
class Linear(func.UnivariateFunction):
def __init__(self, **kwargs):
super(BinaryStep, self).__init__(name='Linear', **kwargs)
self.a = kwargs.get('coefficient', 1)
self.b = kwargs.get('threshold', 0)
def function(self, data):
return self.a*data + self.b
def derivative(self, data):
return self.a
class Sigmoid(func.UnivariateFunction):
def __init__(self, **kwargs):
super(Sigmoid, self).__init__(name='Sigmoid', **kwargs)
def function(self, data):
return 1/(1 + np.exp(-data))
def derivative(self, data):
k = self.function(data)
return k * (1 - k)
class TanH(func.UnivariateFunction):
def __init__(self, **kwargs):
super(TanH, self).__init__(name='Hyperbolic Tangent', **kwargs)
def function(self, data):
return 2/(1 + np.exp(-2 * data)) - 1
def derivative(self, data):
k = self.function(data)
return 1 - k**2
class ReLu(func.UnivariateFunction):
def __init__(self, **kwargs):
super(ReLu, self).__init__(name='Rectified linear unit', **kwargs)
def function(self, data):
return np.max(0, data)
def derivative(self, data):
return 1 if data >= 0 else 0
class LeakyReLu(func.UnivariateFunction):
def __init__(self, **kwargs):
super(LeakyReLu, self).__init__(name='Leaky Rectified linear unit', **kwargs)
self.a = kwargs.get('a', .01)
def function(self, data):
return self.a*data if data < 0 else data
def derivative(self, data):
return self.a if data < 0 else 1
class Softmax(func.UnivariateFunction):
def __init__(self, **kwargs):
super(Softmax, self).__init__(name='Softmax', **kwargs)
def function(self, data):
tmp = [np.exp(k) for k in data]
Z = np.sum(tmp)
return [k/Z for k in tmp]
def derivative(self, data):
ret = []
for i,pi in enumerate(data):
tmp = []
for j, pj in enumerate(data):
if i == j:
tmp.append(self.function(pi)*(1-self.function(pj)))
else:
tmp.append(- self.function(pi) * self.function(pj))
ret.append(tmp)
return ret | true |
d4f7e0c042a2dfde616624156d15718e29ea2fa2 | Python | yklu0330/FinTech_2019 | /HW1/ohlcExtract.py | UTF-8 | 648 | 2.921875 | 3 | [] | no_license | import csv
import sys
with open(sys.argv[1], newline='', encoding='Big5') as csvfile:
dataList = csv.reader(csvfile)
list = []
for row in dataList:
if row[3] >= '084500' and row[3] <= '134500' and row[1] == 'TX ':
list.append(row)
list2 = []
for i in range(len(list)):
if list[i][2] == list[0][2]:
list2.append(list[i])
high = list2[0][4]
low = list2[0][4]
for i in range(len(list2)):
if list2[i][4] < low:
low = list2[i][4]
if list2[i][4] > high:
high = list2[i][4]
print(list2[0][4], high, low, list2[len(list2)-1][4])
| true |
03d80e5525ac50fb2e0a79841ea6a11dc8d258a1 | Python | zm6148/2020_insight_de_traffic | /kafka_producer/src/functions.py | UTF-8 | 5,683 | 2.984375 | 3 | [] | no_license | import face_recognition as fr
import os
import cv2
import face_recognition
import numpy as np
from PIL import Image
################################################################################
# function to encode faces
# take in the path of faces you want to detect
def get_encoded_faces(known_faces_path):
"""
looks through the faces folder and encodes all
the faces
:return: dict of (name, image encoded)
"""
encoded = {}
for dirpath, dnames, fnames in os.walk(known_faces_path):
for f in fnames:
if f.endswith(".jpg") or f.endswith(".png"):
face = fr.load_image_file(known_faces_path + f)
encoding = fr.face_encodings(face)[0]
encoded[f.split(".")[0]] = encoding
return encoded
################################################################################
# function to classify faces based on the faces encoded using
# photos of persons of interest
# im is path for now
def classify_face(img, known_faces):
"""
will find all of the faces in a given image and label
them if it knows what they are
:param im: str of file path
:return: return found faces, altered img, and wether there is a match
"""
faces_encoded = list(known_faces.values())
known_face_names = list(known_faces.keys())
face_locations = face_recognition.face_locations(img)
unknown_face_encodings = face_recognition.face_encodings(img, face_locations)
face_names = []
for face_encoding in unknown_face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(faces_encoded, face_encoding)
name = "Unknown"
# use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(faces_encoded, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Draw a box around the face
cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)
# Draw a label with a name below the face
cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)
# check wether target face was found
common_name = list(set(face_names).intersection(known_face_names))
if not common_name:
match = 0
else:
match = 1
# return found faces, altered img, and wether there is a match
return face_names, img, match
################################################################################
# functiont to check wether this frame of img contains any weapon
# now: knife, short gun, long gun
# im is path for now
def find_weapon(img_cv, model):
m,n = 50,50
weapon = ['knife', 'gun', 'gun']
x=[]
# convert to pillow image
img = cv2.cvtColor(img_cv, cv2.COLOR_BGR2RGB)
im_pil = Image.fromarray(img)
imrs = im_pil.resize((m,n))
imrs=img_to_array(imrs)/255;
imrs=imrs.transpose(2,0,1);
imrs=imrs.reshape(3,m,n);
x.append(imrs)
x=np.array(x);
predictions = model.predict(x)
greatest_p_index = np.argmax(predictions)
if np.amax(predictions) > 0.9:
return weapon[greatest_p_index], 1
else:
return 'no weapon', 0
################################################################################
# functions for object identification
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_prediction(img, confidence, x, y, x_plus_w, y_plus_h, label, color):
cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), (color), 2)
cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
def object_identification(image, classes, net, output_layers, COLORS):
Width = image.shape[1]
Height = image.shape[0]
scale = 0.00392
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
class_ids = []
confidences = []
boxes = []
conf_threshold = 0.5
nms_threshold = 0.4
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
for i in indices:
i = i[0]
box = boxes[i]
x = box[0]
y = box[1]
w = box[2]
h = box[3]
label = str(classes[class_ids[i]])
color = COLORS[class_ids[i]]
draw_prediction(image, confidences[i], int(x), int(y), int(x+w), int(y+h), label, color)
return image, class_ids
| true |
b6e309d7bf58f3880deacbebe10d9b9c489f2fd2 | Python | LucBerge/RobAIR | /catkin_ws/src/robairdock/scripts/dockmain.py | UTF-8 | 5,378 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
###########
# Imports #
###########
import rospy
import cv2
import cv2.aruco as aruco
import numpy as np
from math import *
from std_msgs.msg import Byte
from std_msgs.msg import String
from geometry_msgs.msg import Pose
###################
# Robot Constants #
###################
MarkerWidth = 0.08 #Marker width in meters
robot_ID = 0
##################
# Dock Constants #
##################
DK_NOTDOCKED = 0 #Not docked state
DK_WANTTODOCK = 1 #Want to docked state
DK_NOTSEEN = 2 #Not seen state
DK_SEEN = 3 #Seen state
DK_DOCKED = 4 #Docked state
#For a RaspberryPi camera v2.1
mtx = np.array([[505.62638698, 0, 326.44665333], [0, 506.57448647, 228.39570037],[0, 0, 1]]) #Camera matrix
disp = np.array([1.55319525e-01, 4.30522297e-02, -2.08579382e-04, -3.47100297e-03, -1.37788831e+00]) #Distortion matrix
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) #Declare the markers dictionnary
parameters = aruco.DetectorParameters_create() #Declare the aruco parameters
#############
# Variables #
#############
marker_pos = Pose() #RobAIR position in the camera coordinate system
DockState = 0 #Actual RobAIR state for docking
#############
# Functions #
#############
def GetPose(cap): #Get RobAIR position in screen coordinate
global marker_pos #Use the global marker_pos
ret, image = cap.read() #Save a picture from the video capture
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Transform into grey scale image
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray_image, aruco_dict, parameters=parameters) #Detect markers on the picture and save IDs and cornes beloning to each ID
if(isinstance(ids,np.ndarray)): #If marker detected
if(robot_ID in ids): #If the base ID is detected
rvec, tvec = aruco.estimatePoseSingleMarkers(corners, MarkerWidth, mtx, disp)[0:2] #Get the translation and rotation vector
index = np.where(ids == robot_ID)[0][0] #Get the robot index
tvec = tvec[index][0] #Get the translation vector in meters
rvec = rvec[index][0] #Get the rotation vector
rmat = cv2.Rodrigues(rvec)[0] #Get the rotation matrix
marker_pos.orientation.y = -atan2(-rmat[2][0],sqrt(rmat[2][1]**2 + rmat[2][2]**2)) #Get the y marker orientation (in radians)
marker_pos.position.x = tvec[0] #Get the x marker position (in meters)
marker_pos.position.z = tvec[2] #Get the z marker position (in meters)
return True #Return true because a marker have been detected
else: #If no marker detected
return False #Return false because no marker detected
def start_docking(): #Start docking function
cap = cv2.VideoCapture(0) #Start a video frame
State = DockState #State initialisation
LastState = DockState #Last statye initialization
while(DockState != DK_NOTDOCKED and DockState != DK_DOCKED): #If the docking operation isn't finish
rate.sleep() #Wait for the next sampling
if(GetPose(cap) == True): #If marker detected
State = DK_SEEN #The futur state will be DK_SEEN
send_marker_pos(marker_pos) #Publish the position
else: #If no marker detected
State = DK_NOTSEEN #The futur state will be DK_NOTSEEN
if(LastState != State): #If the last state is different from this one
send_dockstate(State) #Send the new state
LastState = State #Last state and actual state are now the same
cap.release() #Stop the video frame
def sat(value, minimum, maximum): #Saturation function
if(value < minimum): #If under the minimum
return minimum #If under the minimum
elif(value > maximum): #If over the maximum
return maximum #If under the maximum
return value #Return the same value
#################
# Send Funtions #
#################
def send_dockstate(data): #Send dock state
global DockState #Use the global DockState
DockState = data #Save the dock state
pub_dock.publish(data) #Publish the state
def send_marker_pos(data): #Send RobAIR position
pub_pos.publish(data) #Publish the position
####################
# Receive Funtions #
####################
def receive_dockstate(data): #Receive dock state
global DockState #Use the global DockState
if(data.data == DK_WANTTODOCK): #If dock request
if(DockState == DK_NOTDOCKED): #If the robot want to dock and it is not
DockState = data.data #Save the dock state
start_docking() #Start docking
else: #If no dock request
DockState = data.data #Save the dock state
############################
# Subscribers & Publishers #
############################
rospy.Subscriber("dockstate",Byte,receive_dockstate) #Subscribe to "dockstate" topic
pub_dock = rospy.Publisher('dockstate',Byte, queue_size=10) #"dockstate" topic object
pub_pos = rospy.Publisher('position',Pose, queue_size=10) #"position" topic object
pub_log = rospy.Publisher('log',String, queue_size=10) #"log" topic object for debug
########
# MAIN #
########
if __name__ == '__main__': #If the file is executed for the first time
try:
rospy.init_node('dockmain', anonymous=True) #Initialize the node
rate = rospy.Rate(10) #Rate set to 10Hz
pub_log.publish('Node "dockmain" initialized') #Log info for ROS network
rospy.loginfo('Node "dockmain" initialized') #Log info for computer only
rospy.spin() #Wait for an event
except rospy.ROSInterruptException:
rospy.logerr('error') #Log info error
| true |
dd2c628ac30105a98e42fc45180e6919604a2544 | Python | abhireddy96/Leetcode | /003_Longest_Substring_Without_Repeating_Characters.py | UTF-8 | 1,058 | 4 | 4 | [] | no_license | """
https://leetcode.com/problems/longest-substring-without-repeating-characters/
Given a string, find the length of the longest substring without repeating characters. For example, the longest
substring without repeating letters for "abcabcbb" is "abc", which the length is 3. For "bbbbb" the longest substring
is "b", with the length of 1.
"""
__author__ = 'abhireddy96'
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
ls = []
# Max number of distinct characters
m = 0
# Iterate over string
for x in s:
# check if character already present in distinct character list,
# if so remove all the elements to it's left
if x in ls:
ls = ls[ls.index(x) + 1:]
# Append character to right
ls.append(x)
# Check if length of distinct character list is greater than previous best
m = max(m, len(ls))
return m
if __name__ == "__main__":
print(Solution().lengthOfLongestSubstring("pwwkew"))
| true |
858b0ac8e5819ecf5280cbf0a2bce1a03dd82cdc | Python | eggtart93/Phantom | /testcode/servo.py | UTF-8 | 487 | 2.8125 | 3 | [] | no_license | import wiringpi2 as wiringpi
SERVO_PIN = 12
class ServoManager(object):
def __init__(self):
wiringpi.wiringPiSetupGpio()
wiringpi.pinMode(SERVO_PIN,2)
wiringpi.pwmSetMode(0)
wiringpi.pwmSetClock(375)
angle = 90
dutyCycle = int(angle/180.0*(0.14*1024)) + 6
wiringpi.pwmWrite(SERVO_PIN,dutyCycle)
def servoturn(self,angle):
dutyCycle = int(angle/180.0*(0.14*1024)) + 6
wiringpi.pwmWrite(SERVO_PIN,dutyCycle)
| true |
3e27cae18f2cd6b712b0265cf2224ebacad7ed8b | Python | AkarshanSrivastava/Data-Science-With-Python | /HYPOTHESIS TESTING/Hypothesis+testing+.py | UTF-8 | 5,525 | 3.03125 | 3 | [] | no_license | #importing the pacakages which are required
import pandas as pd
import numpy as np
import scipy
from scipy import stats
import statsmodels.api as sm
#install plotly package
import plotly.plotly as py
import plotly.graph_objs as go
from plotly.tools import FigureFactory as FF
#Mann-whitney test
data=pd.read_csv("C:/Users/suri/Desktop/practice programs/Hypothesis testing/with and without additive.csv")
#doing Normality test for Mann whitney
#without additive Normality test
withoutAdditive_data=stats.shapiro(data.Without_additive)
withoutAdditive_pValue=withoutAdditive_data[1]
print("p-value is: "+str(withoutAdditive_pValue))
#Additive normality test
Additive=stats.shapiro(data.With_Additive)
Additive_pValue=Additive[1]
print("p-value is: "+str(Additive_pValue))
#Doing Mann-Whiteny test
from scipy.stats import mannwhitneyu
mannwhitneyu(data.Without_additive, data.With_Additive)
#############################End of Mann-whiteny test#####################################
#2- Sample T-Test
#Creditcard Promotion data set
promotion=pd.read_csv("C:/Users/suri/Desktop/practice programs/Hypothesis testing/Promotion.csv")
#Ho: Avg of purchases made by FIW < = Avg purchases made by SC =>default/ current/ no action
#Ha: Avg of purchases made by FIW > Avg purchases made by SC =>take action
#Doing Normality test
#We consider Ho: Data are normal
#We consider Ha: Data are not normal
Promotion=stats.shapiro(promotion.InterestRateWaiver)
Promotion_pValue=Promotion[1]
print("p-value is: "+str(Promotion_pValue))
SDPromotion=stats.shapiro(promotion.StandardPromotion)
SDPromotion_pValue=Promotion[1]
print("p-value is: "+str(SDPromotion_pValue))
#we can proceed with the model
#Varience test
scipy.stats.levene(promotion.InterestRateWaiver, promotion.StandardPromotion)
#2 Sample T test
scipy.stats.ttest_ind(promotion.InterestRateWaiver,promotion.StandardPromotion)
scipy.stats.ttest_ind(promotion.InterestRateWaiver,promotion.StandardPromotion,equal_var = True)
###########################End of 2-Sample T-Test############################################
#One way Anova
#Importing the data set of contractrenewal
from statsmodels.formula.api import ols
cof=pd.read_csv("C:/Users/suri/Desktop/practice programs/Hypothesis testing/ContractRenewal_Data(unstacked).csv")
cof.columns="SupplierA","SupplierB","SupplierC"
#Normality test
SupA=stats.shapiro(cof.SupplierA) #Shapiro Test
SupA_pValue=SupA[1]
print("p-value is: "+str(SupA_pValue))
SupB=stats.shapiro(cof.SupplierB)
SupB_pValue=SupB[1]
print("p-value is: "+str(SupB_pValue))
SupC=stats.shapiro(cof.SupplierC)
SupC_pValue=SupC[1]
print("p-value is: "+str(SupC_pValue))
#Varience Test
scipy.stats.levene(cof.SupplierA, cof.SupplierB)
scipy.stats.levene(cof.SupplierB, cof.SupplierC)
scipy.stats.levene(cof.SupplierC, cof.SupplierA)
#One-Way Anova
mod=ols('SupplierA~SupplierB+SupplierC',data=cof).fit()
aov_table=sm.stats.anova_lm(mod,type=2)
print(aov_table)
###########################End of One-Way Anova###################################################
#Chi-Square test
#Importing the data set of bahaman
Bahaman=pd.read_csv("C:/Users/suri/Desktop/practice programs/Hypothesis testing/Bahaman.csv")
count=pd.crosstab(Bahaman["Defective"],Bahaman["Country"])
count
Chisquares_results=scipy.stats.chi2_contingency(count)
Chi_pValue=Chisquares_results[1]
print("p-value is: "+str(Chi_pValue))
##########################End of chi-square test################################################
#1 Sample Sign Test
import statsmodels.stats.descriptivestats as sd
#importing the data set of signtest.csv
data=pd.read_csv("C:/Users/suri/Desktop/practice programs/Hypothesis testing/Signtest.csv")
#normality test
data_socres=stats.shapiro(data.Scores)
data_pValue=data_socres[1]
print("p-value is: "+str(data_pValue))
#1 Sample Sign Test
sd.sign_test(data.Scores,mu0=0)
############################End of 1 Sample Sign test###########################################
#2-Proportion Test
two_prop_test=pd.read_csv("C:/Users/suri/Desktop/practice programs/Hypothesis testing/JohnyTalkers.csv")
#importing packages to do 2 proportion test
from statsmodels.stats.proportion import proportions_ztest
#we do the cross table and see How many adults or children are purchasing
tab = two_prop_test.groupby(['Person', 'Icecream']).size()
count = np.array([58, 152]) #How many adults and childeren are purchasing
nobs = np.array([480, 740]) #Total number of adults and childern are there
stat, pval = proportions_ztest(count, nobs,alternative='two-sided')
#Alternative The alternative hypothesis can be either two-sided or one of the one- sided tests
#smaller means that the alternative hypothesis is prop < value
#larger means prop > value.
print('{0:0.3f}'.format(pval))
# two. sided -> means checking for equal proportions of Adults and children under purchased
# p-value = 6.261e-05 < 0.05 accept alternate hypothesis i.e.
# Unequal proportions
stat, pval = proportions_ztest(count, nobs,alternative='larger')
print('{0:0.3f}'.format(pval))
# Ha -> Proportions of Adults > Proportions of Children
# Ho -> Proportions of Children > Proportions of Adults
# p-value = 0.999 >0.05 accept null hypothesis
# so proportion of Children > proportion of children
# Do not launch the ice cream shop
###################################End of Two proportion test#################################### | true |
1dd0c0023454a822aff1900fea3f649a3cc7bcfc | Python | lkfo415579/NMT_programs | /zh-pt-worker-NMT/worker/generalization/RE_test.py | UTF-8 | 1,868 | 3.171875 | 3 | [] | no_license | #encoding=utf-8
# this program is used to check the RE.
import sys
import re
if __name__=='__main__':
# ******Type the test Regular expression here******
RE = r''
count = 0
# Main Program
if len(sys.argv) == 1 or len(sys.argv) > 5:
print 'Launch format:'
print "python RE_test.py input_file [output_file]"
else:
# open input file
try:
infile = open(sys.argv[1],'r')
except:
print "Input file \"" + sys.argv[1] + "\" not exist."
quit()
HaveOutPut = False
lineNo = 0
# open output file if there are
if len(sys.argv) >= 3 and sys.argv[2][0] != '-':
outfile = open(sys.argv[2],'w')
HaveOutPut = True
# get the line one by one(While Loop)
while True:
line = infile.readline()
# break the loop if there are no more line
if not line:
infile.close()
if HaveOutPut:
outfile.close()
break
line = line.decode('utf8')
lineNo = lineNo + 1
conform = False
ext = []
# find the line that it match with the __RE__
recompi = re.compile(RE.decode('utf8'))
match = recompi.finditer(line)
for word in match:
conform = True
ext.append(word.group().encode('utf8'))
# print the match line with different ways
if conform:
if HaveOutPut:
outfile.write("--line_"+ str(lineNo)+ "--: ( ")
for i in ext:
outfile.write(i)
outfile.write(' ')
count = count + 1
outfile.write(")\n")
outfile.write(line.encode('utf8'))
else:
print '--line_' + str(lineNo) + '--:'
for i in ext:
print i + ' |||',
print '\n'
print line.encode('utf8')
count = count + len(ext)
if HaveOutPut:
if lineNo % 100 == 0:
if lineNo % 1000 == 0:
sys.stdout.write('%d' % lineNo)
else:
sys.stdout.write('.')
print "\nThere are", count, "items match the RE."
print "Done."
#End Program
| true |
1354c06b46d3fad21266cb61c3631a505010e193 | Python | daniel-reich/ubiquitous-fiesta | /2iETeoJq2dyEmH87R_13.py | UTF-8 | 92 | 2.921875 | 3 | [] | no_license |
def count_digits(n, d):
return ''.join(str(i ** 2) for i in range(n + 1)).count(str(d))
| true |
5e5b01f25cd1afc2d47e553fccbb69d33e904fda | Python | yurjeuna/teachmaskills_hw | /rekursii.py | UTF-8 | 1,757 | 3.453125 | 3 | [] | no_license | import string
def palindrom(string1):
a = string1.lower()
for i in set(a).intersection(set(string.punctuation)):
a = a.replace(i, '')
a = ''.join(a.split(' '))
re_a = a[::-1]
return a == re_a
def numb_of_negatives(my_list):
count = 0
if my_list[count] < 0:
count = 1
if len(my_list) - 1 == 0:
return count
else:
return count + numb_of_negatives(my_list = my_list[1:])
def sum_of_numbers(my_list, sum1 = None):
if sum1 is None:
sum1 = 0
sum1 += my_list[0]
if len(my_list) - 1 > 0:
my_list = my_list[1:]
return sum_of_numbers(my_list, sum1)
else:
return sum1
def is_prime(numb, divis = None):
if divis is None:
divis = numb - 1
if numb >= 2:
while divis >= 2:
if numb % divis == 0:
return False
return is_prime(numb, divis= divis - 1)
return True
return False
def power(x, y):
if y == 0:
return 1
elif y == 1:
return x
else:
return x * power(x, y= y - 1)
def to_binary(n, numb = None):
if numb is None:
numb = []
if n == 1:
numb.append(1)
numb.reverse()
return ''.join(map(str, numb))
elif n == 2:
numb.append(0)
n = 1
return to_binary(n, numb)
while n > 2:
numb.append(n % 2)
n = n // 2
return to_binary(n, numb)
def from_binary(n, new_n = None):
if new_n is None:
new_n = 0
if n[0] == '1':
new_n += 2 ** (len(n) - 1)
if len(n) - 1 > 0:
n = n[1:]
return from_binary(n, new_n)
else:
return new_n
| true |
1f24eb1548eda9138e22583097db35b1340637e3 | Python | Alexsandr0x/rSoccer | /rsoccer_gym/Entities/Frame.py | UTF-8 | 4,934 | 2.90625 | 3 | [] | no_license | import numpy as np
from typing import Dict
from rsoccer_gym.Entities.Ball import Ball
from rsoccer_gym.Entities.Robot import Robot
class Frame:
"""Units: seconds, m, m/s, degrees, degrees/s. Reference is field center."""
def __init__(self):
"""Init Frame object."""
self.ball: Ball = Ball()
self.robots_blue: Dict[int, Robot] = {}
self.robots_yellow: Dict[int, Robot] = {}
class FrameVSS(Frame):
def parse(self, state, n_blues=3, n_yellows=3):
"""It parses the state received from grSim in a common state for environment"""
self.ball.x = state[0]
self.ball.y = state[1]
self.ball.z = state[2]
self.ball.v_x = state[3]
self.ball.v_y = state[4]
rbt_obs = 6
for i in range(n_blues):
robot = Robot()
robot.id = i
robot.x = state[5 + (rbt_obs*i) + 0]
robot.y = state[5 + (rbt_obs*i) + 1]
robot.theta = state[5 + (rbt_obs*i) + 2]
robot.v_x = state[5 + (rbt_obs*i) + 3]
robot.v_y = state[5 + (rbt_obs*i) + 4]
robot.v_theta = state[5 + (rbt_obs*i) + 5]
self.robots_blue[robot.id] = robot
for i in range(n_yellows):
robot = Robot()
robot.id = i
robot.x = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 0]
robot.y = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 1]
robot.theta = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 2]
robot.v_x = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 3]
robot.v_y = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 4]
robot.v_theta = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 5]
self.robots_yellow[robot.id] = robot
class FrameSSL(Frame):
def parse(self, state, n_blues=3, n_yellows=3):
"""It parses the state received from grSim in a common state for environment"""
self.ball.x = state[0]
self.ball.y = state[1]
self.ball.z = state[2]
self.ball.v_x = state[3]
self.ball.v_y = state[4]
rbt_obs = 11
for i in range(n_blues):
robot = Robot()
robot.id = i
robot.x = state[5 + (rbt_obs*i) + 0]
robot.y = state[5 + (rbt_obs*i) + 1]
robot.theta = state[5 + (rbt_obs*i) + 2]
robot.v_x = state[5 + (rbt_obs*i) + 3]
robot.v_y = state[5 + (rbt_obs*i) + 4]
robot.v_theta = state[5 + (rbt_obs*i) + 5]
robot.infrared = bool(state[5 + (rbt_obs*i) + 6])
robot.v_wheel0 = state[5 + (rbt_obs*i) + 7]
robot.v_wheel1 = state[5 + (rbt_obs*i) + 8]
robot.v_wheel2 = state[5 + (rbt_obs*i) + 9]
robot.v_wheel3 = state[5 + (rbt_obs*i) + 10]
self.robots_blue[robot.id] = robot
for i in range(n_yellows):
robot = Robot()
robot.id = i
robot.x = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 0]
robot.y = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 1]
robot.theta = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 2]
robot.v_x = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 3]
robot.v_y = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 4]
robot.v_theta = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 5]
robot.infrared = bool(state[5 + n_blues*rbt_obs + (rbt_obs*i) + 6])
robot.v_wheel0 = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 7]
robot.v_wheel1 = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 8]
robot.v_wheel2 = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 9]
robot.v_wheel3 = state[5 + n_blues*rbt_obs + (rbt_obs*i) + 10]
self.robots_yellow[robot.id] = robot
class FramePB(Frame):
def parse(self, packet):
"""It parses the state received from grSim in a common state for environment"""
self.ball.x = packet.frame.ball.x
self.ball.y = packet.frame.ball.y
self.ball.v_x = packet.frame.ball.vx
self.ball.v_y = packet.frame.ball.vy
for _robot in packet.frame.robots_blue:
robot = Robot()
robot.id = _robot.robot_id
robot.x = _robot.x
robot.y = _robot.y
robot.theta = np.rad2deg(_robot.orientation)
robot.v_x = _robot.vx
robot.v_y = _robot.vy
robot.v_theta = np.rad2deg(_robot.vorientation)
self.robots_blue[robot.id] = robot
for _robot in packet.frame.robots_yellow:
robot = Robot()
robot.id = _robot.robot_id
robot.x = _robot.x
robot.y = _robot.y
robot.theta = np.rad2deg(_robot.orientation)
robot.v_x = _robot.vx
robot.v_y = _robot.vy
robot.v_theta = np.rad2deg(_robot.vorientation)
self.robots_yellow[robot.id] = robot
| true |
746e31de1d07ef8c011b8a52d8a53a0ba744e348 | Python | lorgiorepo/python-basico | /project07/db_version.py | UTF-8 | 572 | 3.140625 | 3 | [] | no_license | #!/usr/bin/python2.7
import MySQLdb
# Establecemos la conexion con la base de datos
db = MySQLdb.connect("localhost", "bcochile", "bcochile", "bancochile", 3306)
# Preparamos el cursos que nos va a ayudar a realizar las operaciones con la base de datos
cursor = db.cursor()
# Ejecutamos un query SQL usando el metodo execute() que nos proporciona el cursor
cursor.execute("SELECT VERSION()")
# Extraemos una sola fila usando el metodo fetchone()
data = cursor.fetchone()
print "Version Base de datos : %s " % data
# Nos desconectamos de la base de datos
db.close()
| true |
ab31b75a8f40cf33ac783565df56d62ad6ded4a8 | Python | jasonyzhang/phd | /src/util/video.py | UTF-8 | 5,343 | 2.890625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | import os
import shutil
import subprocess
import tempfile
import matplotlib.pyplot as plt
from tqdm import tqdm
def images_to_video(output_path, images, fps):
writer = VideoWriter(output_path, fps)
writer.add_images(images)
writer.make_video()
writer.close()
def sizeof_fmt(num, suffix='B'):
"""
Returns the filesize as human readable string.
https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-
readable-version-of-file-size
"""
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return '%3.1f%s%s' % (num, unit, suffix)
num /= 1024.0
return '%.1f%s%s' % (num, 'Yi', suffix)
def get_dir_size(dirname):
"""
Returns the size of the contents of a directory. (Doesn't include subdirs.)
"""
size = 0
for fname in os.listdir(dirname):
fname = os.path.join(dirname, fname)
if os.path.isfile(fname):
size += os.path.getsize(fname)
return size
class VideoWriter(object):
def __init__(self, output_path, fps, temp_dir=None):
self.output_path = output_path
self.fps = fps
self.temp_dir = temp_dir
self.current_index = 0
self.img_shape = None
self.frame_string = 'frame{:08}.jpg'
def add_images(self, images_list, show_pbar=False):
"""
Adds a list of images to temporary directory.
Args:
images_list (iterable): List of images (HxWx3).
show_pbar (bool): If True, displays a progress bar.
Returns:
list: filenames of saved images.
"""
filenames = []
if show_pbar:
images_list = tqdm(images_list)
for image in images_list:
filenames.append(self.add_image(image))
return filenames
def add_image(self, image):
"""
Saves image to file.
Args:
image (HxWx3).
Returns:
str: filename.
"""
if self.temp_dir is None:
self.temp_dir = tempfile.mkdtemp()
if self.img_shape is None:
self.img_shape = image.shape
assert self.img_shape == image.shape
filename = self.get_filename(self.current_index)
plt.imsave(fname=filename, arr=image)
self.current_index += 1
return filename
def get_frame(self, index):
"""
Read image from file.
Args:
index (int).
Returns:
Array (HxWx3).
"""
filename = self.get_filename(index)
return plt.imread(fname=filename)
def get_filename(self, index):
if self.temp_dir is None:
self.temp_dir = tempfile.mkdtemp()
return os.path.join(self.temp_dir, self.frame_string.format(index))
def make_video(self):
cmd = ('ffmpeg -y -threads 16 -r {fps} '
'-i {temp_dir}/frame%08d.jpg -profile:v baseline -level 3.0 '
'-c:v libx264 -pix_fmt yuv420p -an -vf '
'"scale=trunc(iw/2)*2:trunc(ih/2)*2" {output_path}'.format(
fps=self.fps, temp_dir=self.temp_dir, output_path=self.output_path
))
print(cmd)
try:
subprocess.call(cmd, shell=True)
except OSError as e:
import ipdb; ipdb.set_trace()
print('OSError')
def close(self):
"""
Clears the temp_dir.
"""
print('Removing {} which contains {}.'.format(
self.temp_dir,
self.get_temp_dir_size())
)
shutil.rmtree(self.temp_dir)
self.temp_dir = None
def get_temp_dir_size(self):
"""
Returns the size of the temp dir.
"""
return sizeof_fmt(get_dir_size(self.temp_dir))
class VideoReader(object):
def __init__(self, video_path, temp_dir=None):
self.video_path = video_path
self.temp_dir = temp_dir
self.frame_string = 'frame{:08}.jpg'
def read(self):
if self.temp_dir is None:
self.temp_dir = tempfile.mkdtemp()
cmd = ('ffmpeg -i {video_path} -start_number 0 '
'{temp_dir}/frame%08d.jpg'.format(
temp_dir=self.temp_dir,
video_path=self.video_path
))
print(cmd)
subprocess.call(cmd, shell=True)
self.num_frames = len(os.listdir(self.temp_dir))
def get_filename(self, index):
if self.temp_dir is None:
self.temp_dir = tempfile.mkdtemp()
return os.path.join(self.temp_dir, self.frame_string.format(index))
def get_image(self, index):
return plt.imread(self.get_filename(index))
def get_images(self):
i = 0
fname = self.get_filename(i)
while os.path.exists(fname):
yield plt.imread(self.get_filename(i))
i += 1
fname = self.get_filename(i)
def close(self):
"""
Clears the temp_dir.
"""
print('Removing {} which contains {}.'.format(
self.temp_dir,
self.get_temp_dir_size())
)
shutil.rmtree(self.temp_dir)
self.temp_dir = None
def get_temp_dir_size(self):
"""
Returns the size of the temp dir.
"""
return sizeof_fmt(get_dir_size(self.temp_dir))
| true |
553b89bdf8903005a1c20e51d9bdc5264aa84925 | Python | Hencya/WordPuzzleSearch | /WordPuzzleSearch_bruteforce.py | UTF-8 | 8,394 | 3.328125 | 3 | [] | no_license | import string
import time
#Bruteforce
class bruteforce:
def verticalDown(self,grid, col, row, hasilGrid):
concateGrid = grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{col},{row}")
hasilGrid["jenis"].append("Vertical Down")
col += 1
while col <= (len(grid[0])-1):
concateGrid = concateGrid + grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{col},{row}")
hasilGrid["jenis"].append("Vertical Down")
col += 1
return hasilGrid
def verticalUp(self,grid, col, row, hasilGrid):
concateGrid = grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{(len(grid[0])-1)-col},{row}")
hasilGrid["jenis"].append("Vertical Up")
col -= 1
while col >= 0:
concateGrid = concateGrid + grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{(len(grid[0])-1)-col},{row}")
hasilGrid["jenis"].append("Vertical Up")
col -= 1
return hasilGrid
def horizontalRight(self, grid, col, row, hasilGrid):
concateGrid = grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{col},{row}")
hasilGrid["jenis"].append("Horizontal Right")
row += 1
while row <= (len(grid)-1):
concateGrid = concateGrid + grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{col},{row}")
hasilGrid["jenis"].append("Horizontal Right")
row += 1
return hasilGrid
def horizontalLeft(self, grid, col, row, hasilGrid):
concateGrid = grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{col},{(len(grid)-1)-row}")
hasilGrid["jenis"].append("Horizontal Left")
row -= 1
while row >= 0:
concateGrid = concateGrid + grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{col},{(len(grid)-1)-row}")
hasilGrid["jenis"].append("Horizontal Left")
row -= 1
return hasilGrid
def diagonalRightDown(self, grid, col, row, hasilGrid):
concateGrid = grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{col},{row}")
hasilGrid["jenis"].append("Diagonal Right Down")
row += 1
col += 1
while row <= (len(grid)-1) and col <= (len(grid[0])-1):
concateGrid = concateGrid + grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{col},{row}")
hasilGrid["jenis"].append("Diagonal Right Down")
row += 1
col += 1
return hasilGrid
def diagonalRightUp(self, grid, col, row, hasilGrid):
concateGrid = grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{(len(grid[0])-1)-col},{row}")
hasilGrid["jenis"].append("Diagonal Right Up")
row += 1
col -= 1
while row <= (len(grid)-1) and col >= 0:
concateGrid = concateGrid + grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{(len(grid[0])-1)-col},{row}")
hasilGrid["jenis"].append("Diagonal Right Up")
row += 1
col -= 1
return hasilGrid
def diagonalLeftDown(self, grid, col, row, hasilGrid):
concateGrid = grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{col},{(len(grid)-1)-row}")
hasilGrid["jenis"].append("Diagonal Left Down")
row -= 1
col += 1
while row >= 0 and col <= (len(grid[0])-1):
concateGrid = concateGrid + grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{col},{(len(grid)-1)-row}")
hasilGrid["jenis"].append("Diagonal Left Down")
row -= 1
col += 1
return hasilGrid
def diagonalLeftUp(self, grid, col, row, hasilGrid):
concateGrid = grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(f"{(len(grid[0])-1)-col},{(len(grid)-1)-row}")
hasilGrid["jenis"].append("Diagonal Left Up")
row -= 1
col -= 1
while row >= 0 and col >= 0:
concateGrid = concateGrid + grid[col][row]
hasilGrid["arrayHasil"].append(concateGrid)
hasilGrid["posisi"].append(
f"{(len(grid[0])-1)-col},{(len(grid)-1)-row}")
hasilGrid["jenis"].append("Diagonal Left Up")
row -= 1
col -= 1
return hasilGrid
def initArray(self, grid):
hasilGrid = {"posisi" : [], "arrayHasil" : [], "jenis" : []}
for col in range(len(grid[0])):
for row in range(len(grid)):
self.horizontalRight(grid, col, row, hasilGrid)
self.horizontalLeft(grid, col, row, hasilGrid)
self.verticalDown(grid, col, row, hasilGrid)
self.verticalUp(grid, col, row, hasilGrid)
self.diagonalRightDown(grid, col, row, hasilGrid)
self.diagonalLeftDown(grid, col, row, hasilGrid)
self.diagonalRightUp(grid, col, row, hasilGrid)
self.diagonalLeftUp(grid, col, row, hasilGrid)
return hasilGrid
def searchingWord(hasilGrid):
hasilSearching = {"posisi": [], "arrayHasil": [], "jenis": []}
found = False
print()
cari = input("Masukan kata yang ingin dicari :")
for i in range(len(hasilGrid["arrayHasil"])):
if cari.upper() == hasilGrid["arrayHasil"][i]:
found = True
hasilSearching["posisi"].append(hasilGrid["posisi"][i])
hasilSearching["jenis"].append(hasilGrid["jenis"][i])
if found:
print("-----------------------")
print("KATA YANG ANDA CARI ADA")
print("-----------------------")
print(f'Kata yang anda cari adalah : {cari.upper()}')
print(f'Ketemu di posisi : {hasilSearching["posisi"]}')
print(f'Dengan arah ketemunya adalah : {hasilSearching["jenis"]}')
else:
print("--------------")
print("KATA TIDAK ADA")
print("--------------")
if __name__ == '__main__':
#test 1
start_time = time.time()
grid1 = ["OKO",
"WEK",
"CAC"]
array1 = bruteforce().initArray(grid1)
searchingWord(array1)
print("Running Time grid1 : %s seconds" % (time.time() - start_time))
#test 2
start_time = time.time()
grid2 = ["OKOD",
"WEQK",
"CACD",
"SPSA"]
array1 = bruteforce().initArray(grid2)
searchingWord(array1)
print("Running Time grid2 : %s seconds" %
(time.time() - start_time))
#test3
start_time = time.time()
grid3 = ["OKODS",
"WEQSK",
"CACDD",
"ODSWE",
"SPSAJ"]
array1 = bruteforce().initArray(grid3)
searchingWord(array1)
print("Running Time grid3 : %s seconds" %
(time.time() - start_time))
#test4
start_time = time.time()
grid4 = ["OKODSWEQSK",
"WEQSKCACDD",
"CACDDOKODS",
"OKODSWEQSK",
"WEQSKCACDD",
"SPSAJODSWE",
"DSWEQQSKCA",
"DDOKOOKODS",
"ODSWEQSKCA",
"OKODSWEQSK"]
array1 = bruteforce().initArray(grid4)
searchingWord(array1)
print("Running Time grid4 : %s seconds" %
(time.time() - start_time))
#test5
start_time = time.time()
grid5 = ["ODOEQS",
"WEQSAC",
"CDDODS",
"OKEQSK",
"WEQCAC",
"ODSQSK",
]
array1 = bruteforce().initArray(grid5)
searchingWord(array1)
print("Running Time grid5 : %s seconds" %
(time.time() - start_time))
| true |
5f85615da4a99c6e05a36c233c8aa3cc208498e4 | Python | jhuinac1/Learning-Flask | /starting-demo/app.py | UTF-8 | 1,268 | 2.6875 | 3 | [] | no_license | from flask import Flask, redirect, url_for, request, render_template
from data.books import books as books_data
app = Flask(__name__) #name determines the name of the application, this is the main file of the application
# @app.route('/profile/<int:id>')
# def profile(id):
# return '<h1>This is an profile page for %d</h1>' %(id)
# function to welcome the admin
# @app.route('/admin')
# def welcome_admin():
# return "welcome admin"
# function to welcome the guest user
# @app.route('/guest/<guest>')
# def welcome_guest(guest):
# return "welcome %s" % guest
# @app.route('/user/<name>')
# def welcome_user(name):
# if name == 'admin':
# return redirect(url_for('welcome_admin'))
# else:
# return redirect(url_for('welcome_guest', guest=name))
@app.route('/')
# def index():
# return "this is the request made by the client <br/> %s" %request.headers
def index():
return render_template('index.html')
@app.route('/profile/<username>')
def profile(username):
data = {
"username":username,
"isActive": False
}
return render_template('profile.html', dt=data)
@app.route('/books')
def books():
return render_template('books.html', books=books_data)
app.run(debug=True) | true |
8ff3b3ef4f52ed05858e1bad7fca5f11dd1f67f7 | Python | danforthcenter/persistent_homology | /old_scripts/bottleneck-distance-parallel.py | UTF-8 | 6,799 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import argparse
import os
import sys
from math import ceil
def options():
"""Parse command line options.
Args:
Returns:
argparse object.
Raises:
IOError: if dir does not exist.
IOError: if the program bottleneck-distance does not exist.
IOError: if the program run-bottleneck-distance.py does not exist.
IOError: if the program bottleneck-distance-condor-cleanup.py does not exist.
IOError: if the program create-matrix.py does not exist.
"""
parser = argparse.ArgumentParser(description="Create bottleneck-distance jobs for HTCondor",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-d", "--dir", help="Input directory containing diagram files.", required=True)
parser.add_argument("-j", "--jobname", help='HTCondor job name. This will be the prefix of multiple output files',
required=True)
parser.add_argument("-o", "--outdir", help="Output directory. Directory will be created if it does not exist.",
default=".")
parser.add_argument("-n", "--numjobs", help="The number of jobs per batch.", default=100, type=int)
args = parser.parse_args()
# If the input directory of diagram files does not exist, stop
if not os.path.exists(args.dir):
raise IOError("Directory does not exist: {0}".format(args.dir))
# If the program bottleneck-distance cannot be found in PATH, stop
args.exe = os.popen("which bottleneck-distance").read().strip()
if len(args.exe) == 0:
raise IOError("The executable bottleneck-distance could not be found.")
# Convert the output directory to an absolute path
args.outdir = os.path.abspath(args.outdir)
# If the output directory does not exist, make it
if not os.path.exists(args.outdir):
os.mkdir(args.outdir)
# Find the script run-bottleneck-distance.py or stop
repo_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
run_script = os.path.join(repo_dir, 'run-bottleneck-distance.py')
if not os.path.exists(run_script):
raise IOError("The program run-bottleneck-distance.py could not be found.")
else:
args.script = run_script
# Find the script bottleneck-distance-condor-cleanup.py or stop
clean_script = os.path.join(repo_dir, 'bottleneck-distance-condor-cleanup.py')
if not os.path.exists(clean_script):
raise IOError("The program bottleneck-distance-condor-cleanup.py could not be found.")
else:
args.clean = clean_script
# Find the script create-matrix.py or stop
matrix_script = os.path.join(repo_dir, 'create-matrix.py')
if not os.path.exists(matrix_script):
raise IOError("The program create-matrix.py could not be found.")
else:
args.matrix = matrix_script
# Get the value of the CONDOR_GROUP environmental variable, if defined
args.group = os.getenv('CONDOR_GROUP')
return args
def main():
# Parse flags
args = options()
# Create cleanup script condor job file
cleanfile = args.jobname + ".cleanup.condor"
clean = open(cleanfile, "w")
cleanargs = "--dir " + args.outdir + " --outfile " + args.jobname + ".bottleneck-distance.results.txt"
create_jobfile(clean, args.outdir, args.clean, cleanargs, args.group)
clean.write("queue\n")
clean.close()
# Create matrix script condor job file
matrixfile = args.jobname + ".matrix.condor"
matrix = open(matrixfile, "w")
matrixargs = "--file " + args.jobname + ".bottleneck-distance.results.txt " + \
"--matrix " + args.jobname + ".matrix.csv"
create_jobfile(matrix, args.outdir, args.matrix, matrixargs, args.group)
matrix.write("queue\n")
matrix.close()
# Create a bottleneck-distance condor job template file
condor_file = args.jobname + ".bottleneck-distance.condor"
template = open(condor_file, "w")
create_jobfile(template, args.outdir, args.script, "$(job_args)", args.group)
template.close()
# Collect diagram filenames
diagrams = []
# Walk through the input directory and find the diagram files (all *.txt files)
for (dirpath, dirnames, filenames) in os.walk(args.dir):
for filename in filenames:
# Is the file a *.txt file?
if filename[-3:] == 'txt':
diagrams.append(os.path.join(os.path.abspath(dirpath), filename))
# Job list
jobs = []
# Create a job for all diagram file pairs (combinations)
# For each diagram file
for i in range(0, len(diagrams)):
# Create a job with all the remaining diagram files
for j in range(i + 1, len(diagrams)):
jobs.append(" ".join(map(str, ["--exe", args.exe, "--diagram1", diagrams[i], "--diagram2", diagrams[j]])))
# Create DAGman file
dagman = open(args.jobname + '.dag', 'w')
dagman.write("JOB clean " + cleanfile + "\n")
dagman.write("JOB matrix " + matrixfile + "\n")
# Job counter
job_num = 0
# Number of batches
batches = int(ceil(len(jobs) / float(args.numjobs)))
for batch in range(0, batches):
# Create job batch (cluster) file
bname = args.jobname + ".batch." + str(batch) + ".condor"
batchfile = open(bname, "w")
# Initialize batch condor file
create_jobfile(batchfile, args.outdir, args.script, "$(job_args)", args.group)
for job in range(job_num, job_num + args.numjobs):
if job == len(jobs):
break
batchfile.write("job_args = " + jobs[job] + "\n")
batchfile.write("queue\n")
job_num += args.numjobs
# Add job batch file to the DAGman file
dagman.write("JOB batch" + str(batch) + " " + bname + "\n")
dagman.write("PARENT batch" + str(batch) + " CHILD clean\n")
dagman.write("PARENT clean CHILD matrix\n")
dagman.close()
def create_jobfile(jobfile, outdir, exe, arguments, group=None):
jobfile.write("universe = vanilla\n")
jobfile.write("getenv = true\n")
jobfile.write("request_cpus = 1\n")
jobfile.write("output_dir = " + outdir + "\n")
if group:
# If CONDOR_GROUP was defined, define the job accounting group
jobfile.write("accounting_group = " + group + '\n')
jobfile.write("executable = " + exe + '\n')
jobfile.write("arguments = " + arguments + '\n')
jobfile.write("log = $(output_dir)/$(Cluster).$(Process).bottleneck-distance.log\n")
jobfile.write("error = $(output_dir)/$(Cluster).$(Process).bottleneck-distance.error\n")
jobfile.write("output = $(output_dir)/$(Cluster).$(Process).bottleneck-distance.out\n")
# jobfile.write("queue\n")
if __name__ == '__main__':
main()
| true |
e65f20ed89b0c7ae36db59e92e2d6d272a9a6c48 | Python | kukushdi3981/sel-1_test-project | /task16_cloud_test1.py | UTF-8 | 1,664 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def test_example():
driver = webdriver.Chrome() # Optional argument, if not specified will search path.
# driver = webdriver.Remote("http://denkukushkin1:2sJgx7wjfVhe9pAtuo1Q@hub.browserstack.com:80/wd/hub", \
# desired_capabilities={'browser': 'chrome', 'version': "55", 'platform': "WIN8"})
driver.get('http://www.google.com/')
if not "Google" in driver.title:
raise Exception("Unable to load google page!")
elem = driver.find_element_by_name("q")
elem.send_keys("BrowserStack")
elem.submit()
print(driver.title)
driver.quit()
def test_example2():
# ------------------------------
# The actual test scenario: Test the codepad.org code execution service.
driver = webdriver.Remote("http://denkukushkin1:2sJgx7wjfVhe9pAtuo1Q@hub.browserstack.com:80/wd/hub", \
desired_capabilities={'browser': 'chrome', 'version': "55", 'platform': "WIN8"})
# Go to codepad.org
driver.get('http://codepad.org')
# Select the Python language option
python_link = driver.find_elements_by_xpath("//input[@name='lang' and @value='Python']")[0]
python_link.click()
# Enter some text!
text_area = driver.find_element_by_id('textarea')
text_area.send_keys("print 'Hello,' + ' World!'")
# Submit the form!
submit_button = driver.find_element_by_name('submit')
submit_button.click()
# Close the browser!
driver.quit()
test_example2()
| true |
982751157147227d688245a96ba177cd79a79167 | Python | chair-dsgt/mip-for-ann | /sparsify/cp_losses.py | UTF-8 | 669 | 2.734375 | 3 | [
"MIT"
] | permissive | import cvxpy as cp
from training.utils import one_hot
def softmax_loss(last_layer_logits, y):
"""marginal softmax based on https://ttic.uchicago.edu/~kgimpel/papers/gimpel+smith.naacl10.pdf
Arguments:
last_layer_logits {cvxpy variable} -- decision variable of the solver approximated output to the model's logits
y {np.array} -- labels of input batch to the solver
Returns:
cvxpy variable -- the loss computed used as the solver's objective
"""
k = last_layer_logits.shape[1]
Y = one_hot(y, k)
return cp.sum(cp.log_sum_exp(last_layer_logits, axis=1)) - cp.sum(
cp.multiply(Y, last_layer_logits)
)
| true |
16db450417677eac34be5ede3a7cdf459021131b | Python | SuyashPandya/Word-Sense-Disambiguation | /p1.py | UTF-8 | 2,277 | 2.890625 | 3 | [] | no_license | from nltk.corpus import wordnet as wn
from nltk.stem import PorterStemmer
from itertools import chain
from nltk import pos_tag
from sys import stdout
import MySQLdb as sql
import CGIHTTPServer
CGIHTTPServer.test()
#Connect with database
db = sql.connect("localhost","root","","my_python")
#prepare a cursor
cursor = db.cursor()
from nltk.tokenize import sent_tokenize, word_tokenize
test = raw_input("enter the test")
print(test)
ps = PorterStemmer()
def lesk(context_sentence, ambiguous_word, pos=None, stem=True, hyperhypo=True):
max_overlaps = 0; lesk_sense = None
context_sentence = context_sentence.split()
for ss in wn.synsets(ambiguous_word):
# If POS is specified.
if pos and ss.pos is not pos:
continue
lesk_dictionary = []
# Includes definition.
lesk_dictionary+= ss.definition.split()
# Includes lemma_names.
lesk_dictionary+= ss.lemma_names
# Optional: includes lemma_names of hypernyms and hyponyms.
if hyperhypo == True:
lesk_dictionary+= list(chain(*[i.lemma_names for i in ss.hypernyms()+ss.hyponyms()]))
if stem == True: # Matching exact words causes sparsity, so lets match stems.
lesk_dictionary = [ps.stem(i) for i in lesk_dictionary]
context_sentence = [ps.stem(i) for i in context_sentence]
overlaps = set(lesk_dictionary).intersection(context_sentence)
if len(overlaps) > max_overlaps:
lesk_sense = ss
max_overlaps = len(overlaps)
return lesk_sense
test1 = sent_tokenize(test)
print(test1)
n= len(test1)
for t in range(0,n):
s=0
for i in word_tokenize(test1[t]):
query="SELECT *FROM ambigous WHERE word ='%s'" % i
cursor.execute(query)
result=cursor.fetchall()
for r in result:
s = s+1
print("Context:",test1[t])
answer = lesk(test1[t],'%s' %r)
print("Sense:", answer)
print("Definition :" ,('%s->' %r) , answer.definition)
print
if s == 0:
print ("Context:", test1[t])
print ("No Ambiguous word found in this sentence.")
| true |
4508465153f961a47f0a1bd7047e3a8c233c4e5f | Python | elango-ux/CodeTrainingProject | /Python/extending_buit_in_type.py | UTF-8 | 106 | 2.96875 | 3 | [] | no_license | class Text(str):
def duplicate(self):
return self + self
text = Text("Python")
text.lower() | true |
50389c8e4caed610805e927356ac0c70a7e31d7a | Python | laurobmb/ScriptsPaloAltoNetworks | /info_vacinas.py | UTF-8 | 2,004 | 2.53125 | 3 | [] | no_license | from xml.etree import ElementTree
import urllib.request
import ssl,os,sys
import time as time
import datetime
from time import strptime
import pyautogui
os.system('clear')
def barra( nome ):
barra="======================================================================================"
titulo=nome
print(barra)
print(barra)
print(titulo.center(len(barra)))
print(barra)
print(barra)
return;
def paloalto_url(link):
context = ssl._create_unverified_context()
url = link
url = url.replace("paloalto", sys.argv[1])
try:
pagina=urllib.request.urlopen(url,context=context)
except:
print ("error 1")
exit(1)
dom=ElementTree.parse(pagina)
xml=dom.findall('result/content-updates/entry')
for i in xml:
filename=i.find('filename').text
version=i.find('version').text
releasedon=i.find('released-on').text
print ("Nome:", filename, " versão:", version, "Released:", releasedon)
return;
def data_atual():
now = datetime.datetime.today()
d = now.strftime("%d")
m = now.strftime("%m")
data=d+m
return data
def main():
if len(sys.argv) != 2:
print('use: {} <ip do firewall>'.format(sys.argv[0]))
sys.exit()
PrivateKey = [chave]
url='https://paloalto/api/?type=op&cmd=<request><anti-virus><upgrade><check></check></upgrade></anti-virus></request>&key='+PrivateKey
barra("Atualização de Vacinas do PaloAlto - Antivírus")
paloalto_url(url)
url = 'https://paloalto/api/?type=op&cmd=<request><content><upgrade><check></check></upgrade></content></request>&key='+PrivateKey
barra("Atualização de Vacinas do PaloAlto - Conteúdo de segurança")
paloalto_url(url)
url = 'https://paloalto/api/?type=op&cmd=<request><wildfire><upgrade><check></check></upgrade></wildfire></request>&key='+PrivateKey
barra("Atualização de Vacinas do PaloAlto - Wildfire")
paloalto_url(url)
time.sleep(2)
foto = pyautogui.screenshot()
foto.save('/home/lpgomes/Imagens/'+data_atual()+' - firewall.png')
if __name__ == '__main__':
main()
| true |
99c0bb527a0b69909c002accae605fd44a1387b0 | Python | fmaida/caro-diario | /caro-diario/diario/configurazione.py | UTF-8 | 1,557 | 3.15625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import os
import json
class Configurazione:
def __init__(self, p_file=None, p_percorso=None):
"""
Inizializza la classe
"""
if not p_percorso:
p_percorso = os.path.expanduser("~")
if not p_file:
p_file = ".config.json"
self.basedir = p_percorso
self.file = os.path.join(self.basedir, p_file)
# Controlla l'esistenza di un file di configurazione
if os.path.exists(self.file):
with open(self.file, "r") as f:
self.config = json.load(f)
else:
# Il file non esiste. Crea un file vuoto
self.config = {}
def set_default(self, p_tag, p_valore_default):
"""
Se non esiste un valore per la chiave specificata,
crea la chiave con il valore di default suggerito
"""
# Prova ad impostare alcuni valori di default
try:
if self.config[p_tag] == "":
self.config[p_tag] = p_valore_default
except KeyError:
self.config[p_tag] = p_valore_default
def tag(self, p_nome_tag, p_valore=None):
"""
Imposta o restituisce la chiave richiesta
"""
if p_valore:
self.config[p_nome_tag] = p_valore
return self.config[p_nome_tag]
def salva(self):
"""
Salva il file di configurazione su disco
"""
with open(self.file, "w") as f:
f.write(json.dumps(self.config, indent=4, sort_keys=True))
| true |
e8a32dcd0f5fa9da6c0ef4e96036730b69e337ef | Python | zhuzaiye/HighLevel-python3 | /chapter04/class_var.py | UTF-8 | 666 | 4.09375 | 4 | [] | no_license | """
1、类变量和实例变量的认识
2、类变量和实例变量的区别
"""
class A:
# 类变量, 永远不可能通过实例进行修改,只能通过类自己进行修改
# aa是所有实例共享的
aa = 1
def __init__(self, x, y):
self.x = x # 实例变量
self.y = y
if __name__ == '__main__':
a = A(2, 3) # 实例化
# 对A类进行类变量修改
A.aa = 11
# 这里其实不是修改A.aa这个类变量,而是重新创建一个和x,y一样的aa实例变量
a.aa = 100
print(a.x, a.y, a.aa) # 2 3 100
print(A.aa) # 11
# 重新实例化b
b = A(4, 5)
print(b.aa) # 11
| true |
7b6ec67f909c041bf4a543583f34a3b6ffca9655 | Python | CastleWhite/LeetCodeProblems | /242.py | UTF-8 | 364 | 3.265625 | 3 | [] | no_license | class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if len(s) != len(t):
return False
reco = [0]*26
for i in s:
reco[ord(i)-ord('a')] += 1
for i in t:
if reco[ord(i)-ord('a')] == 0:
return False
reco[ord(i)-ord('a')] -= 1
return reco == [0]*26
| true |
b6653d91f57b7d12487a3fc19b006b342b3009ab | Python | YuyaYoshioka/kenkyuu | /DMD/gaussian_fourie.py | UTF-8 | 4,779 | 2.96875 | 3 | [] | no_license | import math
import numpy as np
from numpy import dot, exp, pi, cos, sin
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy.linalg import eigh
from scipy.integrate import solve_ivp
from scipy import integrate
# 各種定数の設定
dt = 0.0008 # 時間刻み
xmin = 0.0
xmax = 2.0*math.pi
N =100
dx = (xmax - xmin)/N # 空間刻み
c = 1.0 # 移流速度
ν = 0 # 拡散係数
T=4
M = int(T/dt)
α=c*dt/dx
β=ν*dt/dx/dx
t = np.arange(0, T, dt) # 時間
mu = 1.0
sigma = 0.2
# 関数を定義
y = lambda x: exp(-(x-mu)**2 / sigma**2)
# yを0からpiまで数値積分
a0, er = integrate.quad(y, 0, 2*pi)
a0/=pi*2
r=20
an=[]
bn=[]
for n in range(1,r+1):
ya= lambda x: cos(n*x)*exp(-(x-mu)**2 / sigma**2)
yb= lambda x: sin(n*x)*exp(-(x-mu)**2 / sigma**2)
a, er = integrate.quad(ya, 0, 2 * pi)
b, er = integrate.quad(yb, 0, 2 * pi)
an.append(a/pi)
bn.append(b/pi)
x=np.arange(xmin,xmax,dx)
ff=np.full(len(x),a0)
n=1
for a in an:
ff+=a*cos(n*x)
n+=1
n=1
for b in bn:
ff+=b*sin(n*x)
n+=1
plt.plot(x,ff)
plt.show()
# 各種定数の設定
dt = 0.0008 # 時間刻み
xmin = 0.0
xmax = 2.0*math.pi
dx = (xmax - xmin)/N # 空間刻み
c = 1.0 # 移流速度
ν = 0 # 拡散係数
T=4
M = int(T/dt)
α=c*dt/dx
β=ν*dt/dx/dx
print(ν*dt/dx/dx)
x=np.arange(xmin,xmax,dx)
t = np.arange(0, T, dt) # 時間
U=[]
# 初期値
u0 = exp(-(x-mu)**2 / sigma**2)
U.append(u0)
a=-2*c*dt/3/dx
b=c*dt/12/dx
plt.plot(x,u0)
plt.show()
# 差分法
for _ in range(int(M)+1):
u = [0 for _ in range(N)]
u[0] = U[-1][0] + a * (U[-1][1] - U[-1][-1]) + b * (U[-1][2] - U[-1][-2])
u[1] = U[-1][1] + a * (U[-1][2] - U[-1][0]) + b * (U[-1][3] - U[-1][-1])
for j in range(2, N - 2):
u[j] = U[-1][j] - (U[-1][j + 1] - U[-1][j - 1]) * c * dt / 2 / dx + dt * ν * (
U[-1][j - 1] - 2 * U[-1][j] + U[-1][j + 1]) / dx / dx
u[N - 2] = U[-1][N - 2] + a * (U[-1][N - 1] - U[-1][N - 3]) + b * (U[-1][0] - U[-1][N - 4])
u[N - 1] = U[-1][N - 1] + a * (U[-1][0] - U[-1][N - 2]) + b * (U[-1][1] - U[-1][N - 3])
U.append(u)
plt.plot(x,U[-1])
plt.show()
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(1,1,1)
# アニメ更新用の関数
def update_func(i):
# 前のフレームで描画されたグラフを消去
ax.clear()
ax.plot(x, U[i], color='blue')
ax.scatter(x, U[i], color='blue')
# 軸の設定
ax.set_ylim(-1, 7)
# 軸ラベルの設定
ax.set_xlabel('x', fontsize=12)
ax.set_ylabel('u', fontsize=12)
# サブプロットタイトルの設定
ax.set_title('Time: ' + '{:.2f}'.format(dt*i))
ani = animation.FuncAnimation(fig, update_func, interval=1, repeat=True, save_count=int(M))
# アニメーションの保存
# ani.save('test7.gif', writer="imagemagick")
# 表示
plt.show()
V=np.array(U)
W=V.T
# データ行列
u_ave = np.average(W, axis=1) # 列方向の平均
print(u_ave.shape)
D = W - u_ave.reshape(len(u_ave), 1) # 時間平均を差し引く
# 固有値問題
R = (D.T).dot(D)
val, vec = eigh(R) # R is symmetric
# eighの戻り値は昇順なので逆順にして降順にする
val = val[::-1]
vec = vec[:, ::-1]
print(min(val))
print(val[:100])
# 累積寄与率
values=[]
whole=sum(val)
for n in range(40):
value=val[:n+1]
values.append(sum(value)/whole)
mode=np.arange(1,41,1)
plt.xlabel('mode')
plt.ylabel('eigenvalues')
plt.plot(mode,val[:40],linestyle='None',marker='.')
ax = plt.gca()
ax.set_yscale('log') # メイン: y軸をlogスケールで描く
plt.show()
plt.xlabel('mode')
plt.ylabel('cumulative contribution rate')
plt.plot(mode,values,linestyle='None',marker='.')
plt.show()
# 固有モード
vn = vec[:,:r]/np.sqrt(val[:r])
phi = D.dot(vn)
# ROMシミュレーション
# 初期値
a0 = (u0 - u_ave).dot(phi)
print(a0.shape)
# 平均値の勾配と分散
uax = np.gradient(u_ave,x)
uaxx = np.gradient(uax,x)
# 固有モードの勾配と分散
phix = np.gradient(phi,x, axis=0)
phixx = np.gradient(phix,x, axis=0)
print(phix.shape)
def lde_rom(t,a) :
rhs1 = dot(phi.T, uax)
rhs2 = dot((phi.T).dot(phix), a)
n_rhs1 = dot(phi.T, uaxx)
n_rhs2 = dot((phi.T).dot(phixx), a)
return -c*(rhs1 + rhs2)+ν*(n_rhs1 + n_rhs2)
sol_a = solve_ivp(lde_rom,[0,T], a0, method='Radau', t_eval=t)
a =sol_a.y
u_rom = u_ave.reshape(len(u_ave),1) + np.dot(phi, a)
u_rom=u_rom.T
error=0
for n in range(N):
error+=abs(u0[n]-ff[n])
print('フーリエと理論解')
print(error/N)
error=0
for n in range(N):
error+=abs(u0[n]-u_rom[0][n])
print('ROMと理論解')
print(error/N)
plt.xlabel('x')
plt.ylabel('u')
plt.plot(x,ff,label='fourie',linestyle='-.')
plt.plot(x,u_rom[0],label='u_rom',linestyle=':')
plt.plot(x,u0,label='theoretical',linestyle='-.')
plt.legend()
plt.show()
| true |
18e86dee3df44e850b572bbf8450992c4835af4a | Python | jiangshen95/UbuntuLeetCode | /InsertionSortList.py | UTF-8 | 938 | 3.328125 | 3 | [] | no_license | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def insertionSortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head==None:
return head
front = ListNode(0)
cur = head
while cur:
next = cur.next
pre = front
while pre.next and pre.next.val < cur.val:
pre = pre.next
cur.next = pre.next
pre.next = cur
cur = next
return front.next
if __name__=='__main__':
nums = list(input())
head = ListNode(0)
p = head
for num in nums:
p.next = ListNode(num)
p = p.next
head = head.next
solution = Solution()
p = solution.insertionSortList(head)
while p:
print(p.val)
p = p.next
| true |
2753c61d01a36fc4e1254500de436f1b081fc667 | Python | nikio8/Python | /sandwMaker.py | UTF-8 | 2,329 | 4.25 | 4 | [] | no_license | # Sandwich Maker
# Write a program that asks users for their sandwich preferences. The program should use PyInputPlus to ensure that they enter valid input, such as:
# Using inputMenu() for a bread type: wheat, white, or sourdough.
# Using inputMenu() for a protein type: chicken, turkey, ham, or tofu.
# Using inputYesNo() to ask if they want cheese.
# If so, using inputMenu() to ask for a cheese type: cheddar, Swiss, or mozzarella.
# Using inputYesNo() to ask if they want mayo, mustard, lettuce, or tomato.
# Using inputInt() to ask how many sandwiches they want. Make sure this number is 1 or more.
# Come up with prices for each of these options, and have your program display a total cost after the user enters their selection.
import pyinputplus as pyip
pricelist = {'wheat': 2.0, 'white': 2.0, 'sourdough': 2.5,
'chicken': 10.0, 'turkey': 9.0, 'ham': 8.0, 'tofu': 7.0,
'cheese':0.5, 'cheddar':0.5, 'Swiss':0.6, 'mozzarella':0.5,
'mayo':0.2,
'mustard':0.2,
'lettuce':0.5,
'tomato':0.5
}
sandwitch = []
price = 0.0
bread = pyip.inputMenu(['wheat','white','sourdough'], '%s> '%('bread type: wheat, white, or sourdough'))
sandwitch.append(bread)
protein = pyip.inputMenu(['chicken', 'turkey', 'ham', 'tofu'], '%s> '%('protein type: chicken, turkey, ham, or tofu'))
sandwitch.append(protein)
cheese = pyip.inputYesNo('cheese? y/n > ')
if cheese == 'yes':
cheese = pyip.inputMenu(['cheddar', 'Swiss', 'mozzarella'], '%s> '%('cheese type: cheddar, Swiss, or mozzarella'))
sandwitch.append(cheese)
mayo = pyip.inputYesNo('mayo? y/n > ')
if mayo == 'yes':
sandwitch.append('mayo')
mustard = pyip.inputYesNo('mustard? y/n > ')
if mustard == 'yes':
sandwitch.append('mustard')
lettuce = pyip.inputYesNo('lettuce? y/n > ')
if lettuce == 'yes':
sandwitch.append('lettuce')
tomato = pyip.inputYesNo('tomato? y/n > ')
if tomato == 'yes':
sandwitch.append('tomato')
noOfSandwiches = pyip.inputInt('no of sandwiches? > ', min=1)
print("Your sandwitch: " + ', '.join(sandwitch))
for k in sandwitch:
price += pricelist[k] * noOfSandwiches
print('# ' + k + ' ' + str(pricelist[k]) + ' * ' + str(noOfSandwiches))
print('amount due: ' + str(round(price,2))) | true |
bfa04a7c3959f82ebd7b05a3b342b58c6e3deed3 | Python | jsamoocha/stravalib | /stravalib/field_conversions.py | UTF-8 | 1,757 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | import logging
from datetime import timedelta
from functools import wraps
from typing import Any, Callable, List, Optional, Sequence, Union
import pytz
from pytz.exceptions import UnknownTimeZoneError
from stravalib.strava_model import ActivityType, SportType
LOGGER = logging.getLogger(__name__)
def optional_input(field_conversion_fn: Callable) -> Callable:
@wraps(field_conversion_fn)
def fn_wrapper(field_value: Any):
if field_value is not None:
return field_conversion_fn(field_value)
else:
return None
return fn_wrapper
@optional_input
def enum_value(v: Union[ActivityType, SportType]) -> str:
try:
return v.__root__
except AttributeError:
LOGGER.warning(
f"{v} is not an enum, returning itself instead of its value"
)
return v
@optional_input
def enum_values(enums: Sequence[Union[ActivityType, SportType]]) -> List:
# Pydantic (1.x) has config for using enum values, but unfortunately
# it doesn't work for lists of enums.
# See https://github.com/pydantic/pydantic/issues/5005
return [enum_value(e) for e in enums]
@optional_input
def time_interval(seconds: int) -> timedelta:
"""
Replaces legacy TimeIntervalAttribute
"""
return timedelta(seconds=seconds)
@optional_input
def timezone(tz: str) -> Optional[pytz.timezone]:
if " " in tz:
# (GMT-08:00) America/Los_Angeles
tzname = tz.split(" ", 1)[1]
else:
# America/Los_Angeles
tzname = tz
try:
tz = pytz.timezone(tzname)
except UnknownTimeZoneError as e:
LOGGER.warning(
f"Encountered unknown time zone {tzname}, returning None"
)
tz = None
return tz
| true |
14a67789c8f68f609b8ec8ee8fe9fa0f6e7808ec | Python | florije1988/flask_large | /api/utility/time_utility.py | UTF-8 | 1,937 | 3.234375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
__author__ = 'florije'
from datetime import datetime
import time
DATAFORMATE = '%Y-%m-%d %H:%M:%S'
def datetime_to_timestamp(date_time):
"""
datetime(datetime.datetime) -> timestamp(int)
2014-10-27 16:56:11 -> 1414400171
:param date_time:
:return:
"""
return int(time.mktime(date_time.timetuple()))
def datetime_str_to_timestamp(date_time_str):
"""
datetime str(str) -> timestamp(int)
2014-10-27 16:56:11 -> 1414400171
:param date_time_str:
:return:
"""
return datetime_to_timestamp(datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S'))
def timestamp_to_datetime(time_stamp):
"""
timestamp(int) -> datetime(datetime.datetime)
1414400171 -> 2014-10-27 16:56:11
:param time_stamp:
:return:
"""
return datetime.fromtimestamp(time_stamp)
# return time.localtime(time_stamp)
def timestamp_to_datetime_str(time_stamp):
"""
timestamp(int) -> datetime str(str)
1414400171 -> 2014-10-27 16:56:11
:param time_stamp:
:return:
"""
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_stamp))
def current_time_stamp():
return int(time.time())
if __name__ == '__main__':
now_time_stamp = int(time.time())
print now_time_stamp, type(now_time_stamp)
date_times = timestamp_to_datetime(now_time_stamp)
print date_times, type(date_times)
repeat_time_stamp = datetime_to_timestamp(date_times)
print repeat_time_stamp, type(repeat_time_stamp)
date_times_str = timestamp_to_datetime_str(repeat_time_stamp)
print date_times_str, type(date_times_str)
repeat_date_time = datetime_str_to_timestamp(date_times_str)
print repeat_date_time, type(repeat_date_time)
'''
1414400171 <type 'int'>
2014-10-27 16:56:11 <type 'datetime.datetime'>
1414400171 <type 'int'>
2014-10-27 16:56:11 <type 'str'>
1414400171 <type 'int'>
'''
| true |
2c56f2e38c6fbb3a8f2626fafb25bbd83708df3a | Python | AMDS123/leetcode_python | /ReverseInteger.py | UTF-8 | 596 | 3.765625 | 4 | [] | no_license | # Reverse digits of an integer.
#
# Example1: x = 123, return 321
# Example2: x = -123, return -321
#
# click to show spoilers.
#
# Note:
# The input is assumed to be a 32-bit signed integer.
# Your function should return 0 when the reversed integer overflows.
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
res = 0
flag = x < 0 and -1 or 1
x *= flag
while x != 0:
res = res * 10 + x % 10
x /= 10
if res > 2147483647:
return 0
return res * flag | true |
0b19b5ba10e2b08b4c80689b92633eba38c14fbd | Python | Arnav-17/Random-Problems | /Library Management.py | UTF-8 | 4,164 | 3.40625 | 3 | [] | no_license | import time
def signup():
username = input('Enter username: \n')
password = input('Enter password: \n')
with open('Login.txt', 'a') as f:
f.write(username)
f.write(',')
f.write(password)
f.write('\n')
print('Signed up successfully')
def login():
username1 = input('Enter username: \n')
password1 = input('Enter password: \n')
with open('Login.txt') as f:
m = []
for i in f:
keys = i.split(',')
username = keys[0]
password = keys[1]
char = len(password) - 1
password2 = password[0:char]
m.append(username)
m.append(password2)
while 1 == 1:
if username1 in m and password1 in m:
print('Login Successful')
elif username1 in m and password1 not in m:
print('Invalid Password')
break
elif username not in m and password1 in m:
print('Invalid Username')
break
elif username not in m and password1 not in m:
print('Invalid Credentials')
break
print(f'Welcome {username1}')
with open('Login logs.txt', 'a') as xyz:
xyz.write(f"Name:-{username1}, logged in at {time.asctime(time.localtime(time.time()))}\n")
d = input('Enter 1 to see books, 2 to issue book, 3 to donate book, 4 to return book\n')
if d == '1':
p = input('Enter 1 to see list of all books and 2 to see list of books currently available.\n')
if p == '1':
with open('All Books.txt') as nbc:
print(f"The books available are:- \n")
print(nbc.read())
elif p == '2':
with open('Current Books.txt') as nbc:
print(f"The books available are:- \n")
print(nbc.read())
elif d == '2':
b = input(f'Enter book\'s name\n')
with open('Current Books.txt') as r:
x = r.read()
if b in x:
with open('Issue logs.txt', 'a') as lol:
lol.write(f'Name:-{username1}, Book:-{b} issued at {time.asctime(time.localtime(time.time()))}\n')
print('Book issued successfully')
with open('Current Books.txt', 'r') as xyz:
lols = xyz.readlines()
with open('Current Books.txt', 'w') as xyz:
for line in lols:
if line.strip('\n') != b:
xyz.write(line)
else:
print('The book you want is currently not available')
elif d == '3':
j = input("Enter the book you want to donate\n")
with open('Current Books.txt', 'a') as y:
y.write(f'{j}\n')
print('Books donated Successfully')
print('Thank You for your donation')
with open('All Books.txt', 'a') as z:
z.write(f'{j}\n')
elif d == '4':
m = input('Enter the book\'s name you want to return\n')
c = []
with open('All Books.txt', 'r+') as abc:
for item in abc:
chars = len(item) - 1
key = item[0:chars]
c.append(key)
with open('Current Books.txt', 'a') as cde:
if m not in c:
print('This book doesnt belong to the library')
else:
cde.write(m)
print('Book returned successfully')
break
a = input('Enter 1 for signup, 2 for login\n')
if a == '1':
signup()
elif a == '2':
login()
| true |
1d6a232960dd086bd2b7959252c3d6712a8f2890 | Python | hananbeer/pyon | /test.py | UTF-8 | 1,363 | 3.078125 | 3 | [
"MIT"
] | permissive | import json
from pyon import *
js = '{ "a": {"b": [0, 1.2, {"c": 3}, "pi"] }, "d": {"e": 5}, "z": 14 }'
data = json.loads(js)
p = PyonObject(**data)
def test(expr, expc=None):
res = eval(expr, globals())
passed = ''
if res == expc:
passed = 'PASSED!'
elif expc is not None:
raise Exception('failed: "%s" evaluated to "%s", expecting "%s"' % (expr, res, expc))
print('%s:\r\n=> %s %s' % (expr, str(res), passed))
z = p['z']
print(z)
p['z'] = 15
print(p.z)
p.z = 16
print(p.z)
p.z *= 2
print(p.z)
test('p.a')
test('p.a.b')
test('p.a.c', PyoNone)
test('p.a.c.b', PyoNone)
test('p.a.b.c')
test('p.a.b[2]')
test('p.a.b[2].c', 3)
test('p.a.b[3]', 'pi')
test('p.d')
# TODO: should we expect int? I think PyonObject is fine, but I guess overriding __class__ might work...
#test('type(p.d.e)', int)
test('type(p.d.e)', PyonObject)
test('p.d.e', PyonObject(5))
test('p.d.e == PyonObject(4)', False)
test('p.d.e == PyonObject(5)', True)
test('p.d.e != PyonObject(5)', False)
test('p.d.e != PyonObject(6)', True)
test('p._pyon_data')
test('p.d.e == 5', True)
test('p.d.e >= 4', True)
test('p.d.e <= 6', True)
test('p.d.e > 6', False)
test('p.d.e < 4', False)
test('-p.d.e', -5)
test('type(p.d.e.to_prim())', int)
test('p.d.e.to_prim()', 5)
test('p.d.f.g.h.i.j.k == PyoNone', True)
| true |
55a0717858c6a4bf4dd248ae5cced8ab38731199 | Python | beagleboard/cloud9-examples | /BeagleBone/Blue/EduMIP/python/balance.py | UTF-8 | 5,744 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# Makes the robot balance
# Based on: https://github.com/mcdeoliveira/pyctrl/raw/master/examples/rc_mip_balance.py
import math
import time
import warnings
import numpy as np
import sys, tty, termios
import threading
import pyctrl
def brief_warning(message, category, filename, lineno, line=None):
return "*{}\n".format(message)
warnings.formatwarning = brief_warning
# read key stuff
ARROW_UP = "\033[A"
ARROW_DOWN = "\033[B"
ARROW_RIGHT = "\033[C"
ARROW_LEFT = "\033[D"
DEL = "."
END = "/"
SPACE = " "
def read_key():
key = sys.stdin.read(1)
if ord(key) == 27:
key = key + sys.stdin.read(2)
elif ord(key) == 3:
raise KeyboardInterrupt
return key
def get_arrows(mip, fd):
phi_dot_reference = 0
steer_reference = 180/360
tty.setcbreak(fd)
while mip.get_state() != pyctrl.EXITING:
print('\rvelocity = {:+4.0f} '
' steering = {:+4.2f} deg'
.format(100*phi_dot_reference,
360*(steer_reference-180/360)),
end='')
key = read_key()
if key == ARROW_LEFT:
steer_reference = max(steer_reference - 20/360, 0)
mip.set_signal('steer_reference', steer_reference)
elif key == ARROW_RIGHT:
steer_reference = min(steer_reference + 20/360, 1)
mip.set_signal('steer_reference', steer_reference)
elif key == ARROW_UP:
phi_dot_reference = phi_dot_reference + 0.1
mip.set_signal('phi_dot_reference', - phi_dot_reference)
elif key == ARROW_DOWN:
phi_dot_reference = phi_dot_reference - 0.1
mip.set_signal('phi_dot_reference', - phi_dot_reference)
elif key == SPACE:
phi_dot_reference = 0
mip.set_signal('phi_dot_reference', - phi_dot_reference)
steer_reference = 180/360
mip.set_signal('steer_reference', steer_reference)
elif key == DEL:
steer_reference = 180/360
mip.set_signal('steer_reference', steer_reference)
elif key == END:
phi_dot_reference = 0
mip.set_signal('phi_dot_reference', - phi_dot_reference)
def main():
# import blocks and controller
from pyctrl.rc.mip import Controller
from pyctrl.block.system import System, Subtract, Differentiator, Sum, Gain
from pyctrl.block.nl import ControlledCombination
from pyctrl.block import Logger, ShortCircuit
from pyctrl.block.logic import CompareAbs
from pyctrl.system.ss import DTSS
# create mip
mip = Controller()
# phi is the average of the encoders
mip.add_signal('phi')
mip.add_filter('phi',
Sum(gain=0.5),
['encoder1','encoder2'],
['phi'])
# phi dot
mip.add_signal('phi_dot')
mip.add_filter('phi_dot',
Differentiator(),
['clock','phi'],
['phi_dot'])
# phi dot reference
mip.add_signal('phi_dot_reference')
# state-space matrices
A = np.array([[0.913134, 0.0363383],[-0.0692862, 0.994003]])
B = np.array([[0.00284353, -0.000539063], [0.00162443, -0.00128745]])
C = np.array([[-383.009, 303.07]])
D = np.array([[-1.22015, 0]])
B = 2*np.pi*(100/7.4)*np.hstack((-B, B[:,1:]))
D = 2*np.pi*(100/7.4)*np.hstack((-D, D[:,1:]))
ssctrl = DTSS(A,B,C,D)
# state-space controller
mip.add_signals('pwm')
mip.add_filter('controller',
System(model = ssctrl),
['theta_dot','phi_dot','phi_dot_reference'],
['pwm'])
# steering biasing
mip.add_signal('steer_reference')
mip.add_filter('steer',
ControlledCombination(),
['steer_reference', 'pwm','pwm'],
['pwm1','pwm2'])
# set references
mip.set_signal('phi_dot_reference',0)
mip.set_signal('steer_reference',0.5)
# add kill switch
mip.add_filter('kill',
CompareAbs(threshold = 0.2),
['theta'],
['is_running'])
# print controller
print(mip.info('all'))
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
print("""
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* M I P B A L A N C E *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
""")
input('Hold your MIP upright and hit <ENTER> to start balancing')
print("""
Use your keyboard to control the mip:
* UP and DOWN arrows move forward and back
* LEFT and RIGHT arrows steer
* / stops forward motion
* . stops steering
* SPACE resets forward motion and steering
""")
# reset everything
mip.set_source('clock',reset=True)
mip.set_source('encoder1',reset=True)
mip.set_source('encoder2',reset=True)
mip.set_filter('controller',reset=True)
mip.set_source('inclinometer',reset=True)
# start the controller
mip.start()
print("Press Ctrl-C to exit")
# fire thread to update velocities
thread = threading.Thread(target = get_arrows,
args = (mip, fd))
thread.daemon = True
thread.start()
# and wait until controller dies
mip.join()
except KeyboardInterrupt:
print("> Balancing aborted")
mip.set_state(pyctrl.EXITING)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
if __name__ == "__main__":
main()
| true |
b574ee7ac1e2b66559729ee1259e20bb5fafaff5 | Python | daikosh/youtube-streamlit | /main.py | UTF-8 | 2,766 | 3.5 | 4 | [] | no_license | import streamlit as st
import numpy as np
import pandas as pd
from PIL import Image
import time
## タイトルの表示
st.title("Streamlit 超入門")
## テキストの表示
st.write("DataFrame")
## データフレームの表示
df = pd.DataFrame({
"1列目": [1, 2, 3, 4],
"2列目": [10, 20, 30, 40]
})
st.write(df.style.highlight_max(axis=0))
st.dataframe(df.style.highlight_max(axis=0), width=100, height=100)
st.table(df.style.highlight_max(axis=0))
## マークダウンの表示
"""
# 1章
## 1.1節
### 1.1.1項
```python
import streamlit as st
import numpy as np
import pandas as pd
```
"""
## チャートの表示
df_chart = pd.DataFrame(
np.random.rand(20, 3),
columns = {"a", "b", "c"}
)
st.line_chart(df_chart)
st.area_chart(df_chart)
st.bar_chart(df_chart)
# st.pyplot(df_chart)
## 地図の表示
df_map = pd.DataFrame(
np.random.rand(100, 2)/[50, 50] + [35.69, 139.70],
columns = {"lat", "lon"}
)
st.map(df_map)
## 画像の表示
img = Image.open("test.jpg")
st.image(img, caption="Test Image", use_column_width=True)
## チェックボックスの表示
if st.checkbox("Show Hello") == True:
st.write("Hello")
## セレクトボックスの表示
option = st.selectbox(
"あなたが好きな数字を教えてください",
list(range(1, 10))
)
"あなたの好きな数字は、", option, "です。"
## テキストボックスの表示
text = st.text_input("あなたの趣味を教えてください。")
"あなたの趣味:", text
## スライダの表示
condition = st.slider("あなたの今の調子は?", 0, 100, 50)
"コンディション:", condition
## サイドバーの追加
st.sidebar.write("サイドバーです")
text2 = st.sidebar.text_input("あなたの趣味を教えてください。2")
condition2 = st.sidebar.slider("あなたの今の調子は?2", 0, 100, 50)
"あなたの趣味:", text2
"コンディション:", condition2
## 2カラムレイアウト
left_column, right_column = st.beta_columns(2)
button = left_column.button("右カラムに文字を表示")
if button:
right_column.write("ここは右カラム")
## エクスパンダー
"エクスパンダーの表示"
expander1 = st.beta_expander("問い合わせ1")
expander1.write("問い合わせ1回答")
expander2 = st.beta_expander("問い合わせ2")
expander2.write("問い合わせ2回答")
expander3 = st.beta_expander("問い合わせ3")
expander3.write("問い合わせ3回答")
## プログレスバーの表示
st.write("プログレスバーの表示")
"Start!!"
latest_iteration = st.empty()
bar = st.progress(0) # 0: 0-100, 0.0: 0.0-1.0
for i in range(100):
latest_iteration.text(f"Iteration{i+1}")
bar.progress(i+1)
time.sleep(0.1)
"Done"
| true |
f33a2bb142497a06015bff6cc6a20eed6f974927 | Python | pom2ter/immortal | /monster.py | UTF-8 | 14,501 | 2.625 | 3 | [] | no_license | import libtcodpy as libtcod
import math
import copy
import game
import util
import mapgen
import item
class Monster(object):
def __init__(self, typ, name, unid_name, icon, color, dark_color, level, health, damage, article, ar, dr, weight, corpse, flags):
self.type = typ
self.name = name
self.unidentified_name = unid_name
self.icon = icon
self.color = libtcod.Color(color[0], color[1], color[2])
self.dark_color = libtcod.Color(dark_color[0], dark_color[1], dark_color[2])
self.level = level
self.health = health
self.damage = damage
self.article = article
self.attack_rating = ar
self.defense_rating = dr
self.weight = weight
self.corpse = corpse
self.flags = flags
# monster attacks the enemy
def attack(self):
attacker = util.roll_dice(1, 50)
defender = util.roll_dice(1, 50)
if (attacker != 1 and defender != 50 and ((attacker + self.attack_rating) >= (defender + game.player.defense_rating()) or attacker == 50 or defender == 1)) or game.player.is_disabled():
damage = self.damage.roll_dice()
game.message.new(self.article.capitalize() + self.get_name() + ' hits you for ' + str(damage) + ' pts of damage.', game.turns, libtcod.light_red)
game.player.take_damage(damage, self.article + self.name)
else:
game.message.new(self.article.capitalize() + self.get_name() + ' attacks you but misses.', game.turns)
# monster becomes hostile
def becomes_hostile(self):
self.flags.append('ai_hostile')
self.flags[:] = (value for value in self.flags if value != 'ai_neutral' and value != 'ai_friendly')
# returns true if monster can move
def can_move(self, x, y, include_mob=False):
if 'stuck' in self.flags:
return False
if game.current_map.tile[x][y]['name'] in ['deep water', 'very deep water'] and 'land' in self.flags:
return False
if game.current_map.tile[x][y]['name'] not in ['deep water', 'very deep water'] and 'aquatic' in self.flags:
return False
if include_mob:
objects = []
objects[:] = (obj for obj in game.current_map.objects if obj.y == y and obj.x == x and obj.blocks)
return not objects
return True
# checks monster condition each turn
def check_condition(self, x, y):
if 'stuck' in self.flags:
dice = util.roll_dice(1, 10)
if dice == 10:
self.flags.remove('stuck')
if 'poison' in self.flags:
dice = util.roll_dice(1, 10)
if dice == 10:
self.flags.remove('poison')
else:
self.take_damage(x, y, 1, 'poison')
if 'sleep' in self.flags:
dice = util.roll_dice(1, 10)
if dice == 10:
if libtcod.map_is_in_fov(game.fov_map, x, y):
game.message.new('The ' + self.get_name() + 'woke up.', game.turns)
self.flags.remove('sleep')
# determines monster distance to player
def distance_to_player(self, player, x, y):
dx = player.x - x
dy = player.y - y
return math.sqrt(dx ** 2 + dy ** 2)
# return monster's name base of identity level
def get_name(self, article=False):
string = ''
if article:
string = self.article
if "identified" in self.flags:
string += self.name
else:
string += self.unidentified_name + '(?)'
return string
# find xp value base on attributes
def give_xp(self):
xp = self.attack_rating * 0.33
xp += self.defense_rating * 0.33
xp += self.health * 0.33
xp += (self.damage.nb_dices * self.damage.nb_faces + self.damage.bonus) * 0.33
xp *= self.level
return int(xp)
# returns true if monster is not touching the ground
def is_above_ground(self):
if 'flying' in self.flags:
return True
return False
# returns true if monster is dead
def is_dead(self):
if self.health < 1:
return True
return False
# returns true if monster is disabled
def is_disabled(self):
if any(i in self.flags for i in ['sleep', 'unconscious']) or self.is_dead():
return True
return False
# returns true if monster is hostile
def is_hostile(self):
if 'ai_hostile' in self.flags:
return True
return False
# return true if monster is identified
def is_identified(self):
if 'identified' in self.flags:
return True
return False
# see if monster drops an item or a corpse when dying
def loot(self, x, y):
corpse = util.roll_dice(1, 100)
if corpse <= self.corpse:
d = game.baseitems.create_corpse(self.name, self.weight)
drop = mapgen.Object(x, y, d.icon, d.name, d.color, True, item=d)
game.current_map.objects.append(drop)
drop.send_to_back()
drop_chance = util.roll_dice(1, 100)
if drop_chance >= 80:
drop = game.baseitems.loot_generation(x, y, self.level)
game.current_map.objects.append(drop)
drop.send_to_back()
# monster move towards player
def move_towards_player(self, player, x, y):
#vector from this object to the target, and distance
dx = player.x - x
dy = player.y - y
distance = math.sqrt(dx ** 2 + dy ** 2)
#normalize it to length 1 (preserving direction), then round it and convert to integer so the movement is restricted to the map grid
dx = int(round(dx / distance))
dy = int(round(dy / distance))
while game.current_map.tile_is_blocked(x + dx, y + dy):
if dx == 0 and dy == 0:
break
dx, dy = libtcod.random_get_int(game.rnd, -1, 1), libtcod.random_get_int(game.rnd, -1, 1)
return dx, dy
# monster takes damage
def take_damage(self, x, y, damage, source, show=False):
self.health -= damage
if libtcod.map_is_in_fov(game.fov_map, x, y):
game.hp_anim.append({'x': x, 'y': y, 'damage': str(-damage), 'color': libtcod.light_yellow, 'turns': 0, 'icon': self.icon, 'icon_color': libtcod.red})
if source == 'player':
if "sleep" in self.flags:
if libtcod.map_is_in_fov(game.fov_map, x, y):
game.message.new('The ' + self.get_name() + 'woke up.', game.turns)
self.flags.remove('sleep')
if show:
if libtcod.map_is_in_fov(game.fov_map, x, y):
game.message.new(self.article.capitalize() + self.get_name() + ' is hit by ' + source + ' for ' + str(damage) + ' pts of damage!', game.turns)
elif not self.is_dead():
game.message.new('You hear a scream.', game.turns)
# monster takes its turn
def take_turn(self, x, y):
if libtcod.map_is_in_fov(game.fov_map, x, y) and self.is_hostile():
#move towards player if far away
dx, dy = 0, 0
if self.distance_to_player(game.char, x, y) >= 2:
dx, dy = self.move_towards_player(game.char, x, y)
if not self.can_move(x + dx, y + dy, True):
dx, dy = 0, 0
else:
self.attack()
else:
dx, dy = libtcod.random_get_int(game.rnd, -1, 1), libtcod.random_get_int(game.rnd, -1, 1)
if x + dx < 0 or x + dx >= game.current_map.map_width:
dx = 0
if y + dy < 0 or y + dy >= game.current_map.map_height:
dy = 0
if not self.can_move(x + dx, y + dy, True):
dx, dy = 0, 0
if all(i == 'ai_neutral' and i != 'ai_hostile' for i in self.flags):
if self.distance_to_player(game.char, x, y) <= 2:
turn_hostile = util.roll_dice(1, 100)
if turn_hostile <= 10:
self.flags.append('ai_hostile')
elif all(i in self.flags for i in ['ai_neutral', 'ai_hostile']):
return_neutral = util.roll_dice(1, 100)
if return_neutral <= 10:
self.flags[:] = (value for value in self.flags if value != 'ai_hostile')
#retry if destination is blocked
while game.current_map.tile_is_blocked(x + dx, y + dy):
if dx == 0 and dy == 0:
break
dx, dy = libtcod.random_get_int(game.rnd, -1, 1), libtcod.random_get_int(game.rnd, -1, 1)
if x + dx < 0 or x + dx >= game.current_map.map_width:
dx = 0
if y + dy < 0 or y + dy >= game.current_map.map_height:
dy = 0
return x + dx, y + dy
class MonsterList(object):
def __init__(self):
self.list = []
# setup the items structure and run parser
def init_parser(self):
parser = libtcod.parser_new()
monster_type_struct = libtcod.parser_new_struct(parser, 'monster_type')
libtcod.struct_add_property(monster_type_struct, 'type', libtcod.TYPE_STRING, True)
libtcod.struct_add_property(monster_type_struct, 'unidentified_name', libtcod.TYPE_STRING, True)
libtcod.struct_add_property(monster_type_struct, 'icon', libtcod.TYPE_STRING, True)
libtcod.struct_add_property(monster_type_struct, 'icon_color', libtcod.TYPE_COLOR, True)
libtcod.struct_add_property(monster_type_struct, 'icon_color2', libtcod.TYPE_COLOR, False)
libtcod.struct_add_property(monster_type_struct, 'icon_color3', libtcod.TYPE_COLOR, False)
libtcod.struct_add_property(monster_type_struct, 'icon_color4', libtcod.TYPE_COLOR, False)
libtcod.struct_add_property(monster_type_struct, 'icon_color5', libtcod.TYPE_COLOR, False)
libtcod.struct_add_property(monster_type_struct, 'dark_color', libtcod.TYPE_COLOR, True)
libtcod.struct_add_property(monster_type_struct, 'level', libtcod.TYPE_INT, True)
libtcod.struct_add_property(monster_type_struct, 'health', libtcod.TYPE_DICE, True)
libtcod.struct_add_property(monster_type_struct, 'attack_rating', libtcod.TYPE_INT, True)
libtcod.struct_add_property(monster_type_struct, 'defense_rating', libtcod.TYPE_INT, True)
libtcod.struct_add_property(monster_type_struct, 'damage', libtcod.TYPE_DICE, True)
libtcod.struct_add_property(monster_type_struct, 'article', libtcod.TYPE_STRING, True)
libtcod.struct_add_property(monster_type_struct, 'weight', libtcod.TYPE_INT, False)
libtcod.struct_add_property(monster_type_struct, 'corpse', libtcod.TYPE_INT, False)
libtcod.struct_add_flag(monster_type_struct, 'ai_friendly')
libtcod.struct_add_flag(monster_type_struct, 'ai_neutral')
libtcod.struct_add_flag(monster_type_struct, 'ai_hostile')
libtcod.struct_add_flag(monster_type_struct, 'identified')
libtcod.struct_add_flag(monster_type_struct, 'all')
libtcod.struct_add_flag(monster_type_struct, 'overworld')
libtcod.struct_add_flag(monster_type_struct, 'underground')
libtcod.struct_add_flag(monster_type_struct, 'dungeon')
libtcod.struct_add_flag(monster_type_struct, 'cave')
libtcod.struct_add_flag(monster_type_struct, 'maze')
libtcod.struct_add_flag(monster_type_struct, 'land')
libtcod.struct_add_flag(monster_type_struct, 'flying')
libtcod.struct_add_flag(monster_type_struct, 'aquatic')
libtcod.parser_run(parser, 'data/monsters.txt', MonsterListener())
# add monster to the list
def add_to_list(self, monster=None):
if monster is not None:
self.list.append(monster)
# get a monster from the list
def get_monster(self, name):
for monster in self.list:
if name == monster.name:
return monster
return None
# choose a random monster based on its level
def get_monster_by_level(self, level, tilename, type):
if type == 'Dungeon':
mob = [x for x in self.list if any(i in x.flags for i in ['dungeon', 'underground', 'all'])]
elif type == 'Cave':
mob = [x for x in self.list if any(i in x.flags for i in ['cave', 'underground', 'all'])]
elif type == 'Maze':
mob = [x for x in self.list if any(i in x.flags for i in ['maze', 'underground', 'all'])]
else:
mob = [x for x in self.list if any(i in x.flags for i in ['overworld', 'all'])]
if tilename in ['deep water', 'very deep water']:
mob = [x for x in mob if level - 6 <= x.level <= level and 'land' not in x.flags]
else:
mob = [x for x in mob if level - 6 <= x.level <= level and 'aquatic' not in x.flags]
if mob:
return mob[libtcod.random_get_int(game.rnd, 0, len(mob) - 1)]
return None
# returns the number of monsters on the map
def number_of_monsters_on_map(self):
return sum(obj.entity is not None for obj in game.current_map.objects)
# spawn a monster
def spawn(self):
if self.number_of_monsters_on_map() < game.current_map.max_monsters:
number = util.roll_dice(1, 90)
if number == 1:
game.current_map.place_monsters()
class MonsterListener(object):
def new_struct(self, struct, name):
self.temp_monster = Monster('', '', '', '', [0, 0, 0], [0, 0, 0], 0, item.Dice(0, 0, 0, 0), item.Dice(0, 0, 0, 0), '', 0, 0, 0, 0, [])
self.temp_monster.name = name
return True
def new_flag(self, name):
self.temp_monster.flags.append(name)
return True
def new_property(self, name, typ, value):
if name == 'icon_color':
self.temp_monster.color.r = value.r
self.temp_monster.color.g = value.g
self.temp_monster.color.b = value.b
elif name == 'dark_color':
self.temp_monster.dark_color.r = value.r
self.temp_monster.dark_color.g = value.g
self.temp_monster.dark_color.b = value.b
elif name == 'damage':
self.temp_monster.damage.nb_dices = value.nb_dices
self.temp_monster.damage.nb_faces = value.nb_faces
self.temp_monster.damage.multiplier = value.multiplier
self.temp_monster.damage.bonus = value.addsub
elif name == 'health':
self.temp_monster.health.nb_dices = value.nb_dices
self.temp_monster.health.nb_faces = value.nb_faces
self.temp_monster.health.multiplier = value.multiplier
self.temp_monster.health.bonus = value.addsub
else:
if name == 'type':
self.temp_monster.type = value
if name == 'icon':
self.temp_monster.icon = value
if name == 'level':
self.temp_monster.level = value
if name == 'attack_rating':
self.temp_monster.attack_rating = value
if name == 'defense_rating':
self.temp_monster.defense_rating = value
if name == 'weight':
self.temp_monster.weight = value
if name == 'corpse':
self.temp_monster.corpse = value
if name == 'article':
self.temp_monster.article = value
if name == 'unidentified_name':
self.temp_monster.unidentified_name = value
return True
def end_struct(self, struct, name):
skill_level = ['apprentice ', 'journeyman ', 'adept ', 'master ']
self.temp_monster.dark_color = libtcod.color_lerp(libtcod.black, self.temp_monster.color, 0.3)
self.temp_monster.health.nb_dices = self.temp_monster.level
self.temp_monster.health.bonus = self.temp_monster.level
if self.temp_monster.level == 1:
self.temp_monster.flags.append('identified')
game.monsters.add_to_list(self.temp_monster)
if self.temp_monster.type == 'humanoid':
for i in range(0, 4):
self.new_monster = copy.deepcopy(self.temp_monster)
self.new_monster.name = skill_level[i] + self.new_monster.name
self.new_monster.level += (i * 2) + 1
self.new_monster.attack_rating += ((i * 2) + 1) * 8
self.new_monster.defense_rating += ((i * 2) + 1) * 8
self.new_monster.health.nb_dices = self.new_monster.level
self.new_monster.health.bonus = self.new_monster.level
self.new_monster.damage.multiplier = i + 2
game.monsters.add_to_list(self.new_monster)
return True
def error(self, msg):
print 'error : ', msg
return True
| true |
4613f1334f589546c2b4cd090f2891c4242090e5 | Python | CG2016/barkovsky_3 | /lab6/manipulation.py | UTF-8 | 13,332 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import argparse
import math
import tkinter as tk
import tkinter.messagebox as tk_messagebox
import PIL.ImageTk
import PIL.Image
import numpy as np
IMAGE_SIZE = (600, 400)
class ImageManipulationWindow:
def __init__(self, image_path):
self.root = tk.Tk()
self.root.grid_columnconfigure(3, weight=1)
self.root.grid_columnconfigure(7, weight=1)
self.image = PIL.Image.open(image_path).convert('L')
self.label_original = tk.Label(self.root)
self.label_original.grid(row=0, column=0, columnspan=4)
self.label_modified = tk.Label(self.root)
self.label_modified.grid(row=0, column=4, columnspan=4)
self.set_original_image(self.image)
self.set_modified_image(self.image)
self.setup_pixelwise_operation_controls()
self.setup_thresholding_operation_controls()
def setup_pixelwise_operation_controls(self):
self.add_label = tk.Label(self.root, text='Add')
self.add_label.grid(row=1, column=0, sticky='W')
self.add_entry = tk.Entry(self.root)
self.add_entry.grid(row=1, column=1, sticky='W')
self.add_button = tk.Button(self.root, text='Perform',
command=self.perform_addition)
self.add_button.grid(row=1, column=2, sticky='W')
self.multiply_label = tk.Label(self.root, text='Multiply')
self.multiply_label.grid(row=2, column=0, sticky='W')
self.multiply_entry = tk.Entry(self.root)
self.multiply_entry.grid(row=2, column=1, sticky='W')
self.multiply_button = tk.Button(self.root, text='Perform',
command=self.perform_multiplication)
self.multiply_button.grid(row=2, column=2, sticky='W')
self.exponentiate_label = tk.Label(self.root, text='Exponentiate')
self.exponentiate_label.grid(row=3, column=0, sticky='W')
self.exponentiate_entry = tk.Entry(self.root)
self.exponentiate_entry.grid(row=3, column=1, sticky='W')
self.exponentiate_button = tk.Button(
self.root, text='Perform', command=self.perform_exponentiation
)
self.exponentiate_button.grid(row=3, column=2, sticky='W')
self.logarithm_label = tk.Label(self.root, text='Logarithm')
self.logarithm_label.grid(row=4, column=0, sticky='W')
self.logarithm_button = tk.Button(
self.root, text='Perform', command=self.perform_logarithm
)
self.logarithm_button.grid(row=4, column=1, sticky='W')
self.negation_label = tk.Label(self.root, text='Negate')
self.negation_label.grid(row=5, column=0, sticky='W')
self.negation_button = tk.Button(
self.root, text='Perform', command=self.perform_negation
)
self.negation_button.grid(row=5, column=1, sticky='W')
self.contrast_label = tk.Label(self.root, text='Contrast')
self.contrast_label.grid(row=6, column=0, sticky='W')
self.contrast_button = tk.Button(
self.root, text='Perform', command=self.perform_contrasting
)
self.contrast_button.grid(row=6, column=1, sticky='W')
self.reset_button = tk.Button(
self.root, text='Reset', command=self.perform_reset
)
self.reset_button.grid(row=7, column=0, sticky='W')
def setup_thresholding_operation_controls(self):
self.bernsen_label = tk.Label(
self.root, text='Bernsen thresholding'
)
self.bernsen_label.grid(row=1, column=4, sticky='W')
self.bernsen_button = tk.Button(
self.root, text='Perform',
command=self.perform_bernsen
)
self.bernsen_button.grid(row=1, column=5, sticky='W')
self.niblack_label = tk.Label(
self.root, text='Niblack thresholding'
)
self.niblack_label.grid(row=2, column=4, sticky='W')
self.niblack_button = tk.Button(
self.root, text='Perform',
command=self.perform_niblack
)
self.niblack_button.grid(row=2, column=5, sticky='W')
self.adaptive_label = tk.Label(
self.root, text='Adaptive thresholding'
)
self.adaptive_label.grid(row=3, column=4, sticky='W')
self.adaptive_button = tk.Button(
self.root, text='Perform',
command=self.perform_adaptive
)
self.adaptive_button.grid(row=3, column=5, sticky='W')
def set_original_image(self, image):
self._scaled_tk_image_original = PIL.ImageTk.PhotoImage(
self.scale_image(image)
)
self.label_original.config(image=self._scaled_tk_image_original)
def set_modified_image(self, image):
self.modified_image = image
self._scaled_tk_image_modified = PIL.ImageTk.PhotoImage(
self.scale_image(image)
)
self.label_modified.config(image=self._scaled_tk_image_modified)
def perform_addition(self):
try:
argument = float(self.add_entry.get())
except ValueError:
tk_messagebox.showerror('Error', 'Invalid argument')
return
original_pixels = list(self.modified_image.getdata())
modified_pixels = [
self.normalize_color_value(original_value + argument)
for original_value in original_pixels
]
self.show_modified_pixels(modified_pixels)
def perform_multiplication(self):
try:
argument = float(self.multiply_entry.get())
except ValueError:
tk_messagebox.showerror('Error', 'Invalid argument')
return
original_pixels = list(self.modified_image.getdata())
modified_pixels = [
self.normalize_color_value(original_value * argument)
for original_value in original_pixels
]
self.show_modified_pixels(modified_pixels)
def perform_exponentiation(self):
try:
argument = float(self.exponentiate_entry.get())
except ValueError:
tk_messagebox.showerror('Error', 'Invalid argument')
return
_, max_value = self.modified_image.getextrema()
original_pixels = list(self.modified_image.getdata())
modified_pixels = [
self.normalize_color_value(
255 * (original_value / max_value) ** argument
)
for original_value in original_pixels
]
self.show_modified_pixels(modified_pixels)
def perform_logarithm(self):
_, max_value = self.modified_image.getextrema()
original_pixels = list(self.modified_image.getdata())
modified_pixels = [
self.normalize_color_value(
255 * (math.log(original_value + 1) / math.log(max_value + 1))
)
for original_value in original_pixels
]
self.show_modified_pixels(modified_pixels)
def perform_negation(self):
original_pixels = list(self.modified_image.getdata())
modified_pixels = [
self.normalize_color_value(255 - original_value)
for original_value in original_pixels
]
self.show_modified_pixels(modified_pixels)
def perform_contrasting(self):
min_value, max_value = self.modified_image.getextrema()
coeff = 255 / (max_value - min_value)
original_pixels = list(self.modified_image.getdata())
modified_pixels = [
self.normalize_color_value(
coeff * (original_value - min_value)
)
for original_value in original_pixels
]
self.show_modified_pixels(modified_pixels)
def perform_bernsen(self):
r = 5
min_contrast = 15
pixel_array = np.array(self.image)
vertical_split = np.split(pixel_array, range(r, self.image.height, r))
segments = [
np.split(rows, range(r, self.image.width, r), axis=1)
for rows in vertical_split
]
for segment_row in segments:
for segment in segment_row:
min_value = int(segment.min())
max_value = int(segment.max())
mid_value = (min_value + max_value) / 2
if max_value - min_value <= min_contrast:
if mid_value < 128:
fill_value = 0
else:
fill_value = 255
segment.fill(fill_value)
else:
for i in range(segment.shape[0]):
for j in range(segment.shape[1]):
segment[i, j] = (
0 if segment[i, j] < mid_value else 255
)
vertical_split = [
np.concatenate(segment_row, axis=1)
for segment_row in segments
]
modified_pixels = np.concatenate(vertical_split)
modified_image = PIL.Image.fromarray(modified_pixels)
self.set_modified_image(modified_image)
def perform_niblack(self):
r = 15
k = -0.2
def clipped_range(dimension_size, coord, window):
radius = (window - 1) // 2
min_coord = max(0, coord - radius)
max_coord = min(dimension_size, coord + radius + 1)
return slice(min_coord, max_coord)
pixel_array = np.array(self.image)
new_array = pixel_array.copy()
for i in range(pixel_array.shape[0]):
print(i)
for j in range(pixel_array.shape[1]):
vertical_slice = clipped_range(pixel_array.shape[0], i, r)
horizontal_slice = clipped_range(pixel_array.shape[1], j, r)
segment = pixel_array[vertical_slice, horizontal_slice]
mean = np.mean(segment)
stddev = np.std(segment)
threshold = mean + k * stddev
new_array[i, j] = (
0 if pixel_array[i, j] < threshold else 255
)
modified_image = PIL.Image.fromarray(new_array)
self.set_modified_image(modified_image)
def perform_adaptive(self):
k = 3
alpha = 2 / 3
def clipped_range(dimension_size, coord, radius):
min_coord = max(0, coord - radius)
max_coord = min(dimension_size, coord + radius + 1)
return slice(min_coord, max_coord)
pixel_array = np.array(self.image)
new_array = pixel_array.copy()
for i in range(pixel_array.shape[0]):
print(i)
for j in range(pixel_array.shape[1]):
current_k = k
threshold = None
while True:
vertical_slice = clipped_range(
pixel_array.shape[0], i, current_k
)
horizontal_slice = clipped_range(
pixel_array.shape[1], j, current_k
)
segment = pixel_array[vertical_slice, horizontal_slice]
mean = np.mean(segment)
min_value = segment.min()
max_value = segment.max()
df_max = abs(max_value - mean)
df_min = abs(min_value - mean)
if df_max == df_min:
if min_value != max_value:
current_k += 1
continue
else:
threshold = alpha * mean
break
else:
if df_max > df_min:
threshold = alpha * (
2 / 3 * min_value +
1 / 3 * mean
)
else:
threshold = alpha * (
1 / 3 * min_value +
2 / 3 * mean
)
break
new_array[i, j] = (
0 if pixel_array[i, j] < threshold else 255
)
modified_image = PIL.Image.fromarray(new_array)
self.set_modified_image(modified_image)
def perform_reset(self):
self.set_modified_image(self.image)
def show_modified_pixels(self, modified_pixels):
modified_image = PIL.Image.new(
'L', (self.image.width, self.image.height)
)
modified_image.putdata(modified_pixels)
self.set_modified_image(modified_image)
@staticmethod
def normalize_color_value(value):
value = round(value)
if value > 255:
value = 255
elif value < 0:
value = 0
return value
@staticmethod
def scale_image(image):
scaled_image = image.copy()
scaled_image.thumbnail(IMAGE_SIZE)
return scaled_image
def main():
parser = argparse.ArgumentParser()
parser.add_argument('image_path', type=str, help='Path to the image file')
args = parser.parse_args()
window = ImageManipulationWindow(args.image_path)
window.root.mainloop()
if __name__ == '__main__':
main()
| true |
e21a078e0e52b270bd9a0d39f9dbb0927def7a39 | Python | robyparr/barmycodes | /barmycodes/barmycodes.py | UTF-8 | 3,565 | 2.5625 | 3 | [
"MIT"
] | permissive | from flask import Flask, render_template, request, Response, redirect
from reportlab.pdfgen import canvas
from reportlab.lib.utils import ImageReader
from io import BytesIO
from .models.barcode import Barcode
from .config import Config
# Setup Flask app
app = Flask(__name__)
app.config.from_object(Config)
def _get_barcodes(args, ignore_dimensions=True):
"""Generate barcodes from the request arguments.
Utility method for the routes.
"""
# Values to generate barcodes from
barcode_values = args.getlist('b[]')
barcode_type = args.get('type', 'Code128')
if not barcode_type in ('Code128', 'QR'):
barcode_type = 'Code128'
# Determine PDF size params
measurement = args.get('measurement')
width = args.get('width')
height = args.get('height')
# Generate barcodes
barcodes = []
for value in barcode_values:
if not value:
continue
# Generate the barcode
barcode = Barcode(
barcode_type,
value,
width=(width if width and not ignore_dimensions else None),
height=(height if height and not ignore_dimensions else None),
unit=(measurement if measurement in ('inch', 'mm') and not ignore_dimensions else 'mm')
)
barcodes.append(barcode)
return barcodes
# Routes
@app.route("/", methods=['GET'])
def index():
"""Displays the app page with or without barcodes.
If the querystring params 'b[]' are set, barcodes will
be generated from the values.
"""
barcodes = _get_barcodes(request.args)
# Render the template
data = {
'barcodes': barcodes,
'barcode_values': '\n'.join([b.text_value for b in barcodes])
}
return render_template('index.html', data=data)
@app.route("/png", methods=['GET'])
def png():
"""Generate a PNG image for a single barcode."""
# Get a single barcode's information
barcode_value = request.args.get('b[]')
barcode_type = request.args.get('type', 'Code128')
if not barcode_type in ['Code128', 'QR']:
barcode_type = 'Code128'
# Make sure we have a value
if not barcode_value:
return redirect('/')
# Generate the barcode
barcode = Barcode(barcode_type, barcode_value)
# Create the response
response = Response(
barcode.as_string('png'),
mimetype='image/png'
)
response.headers['Content-Disposition'] = 'attachment; filename=barmycodes.png'
return response
@app.route("/pdf", methods=['GET'])
def pdf():
"""Renders a PDF with barcodes generated from the
querystring params 'b[].'
"""
barcodes = _get_barcodes(request.args, ignore_dimensions=False)
# Start PDF generation
buffer = BytesIO()
pdf = canvas.Canvas(buffer)
pdf.setTitle('barmycodes.pdf')
# Generate barcodes and add to PDF
for barcode in barcodes:
# Add the barcode to the PDF
barcode_buffer = BytesIO(barcode.as_string('png'))
pdf.drawImage(ImageReader(barcode_buffer), 1, 1)
barcode_buffer.close()
pdf.setPageSize((barcode.width, barcode.height))
pdf.showPage()
pdf.save()
response = Response(
buffer.getvalue(),
mimetype='application/pdf',
)
response.headers['Content-Disposition'] = 'inline; filename=barmycodes.pdf'
buffer.close()
return response
@app.route('/privacy', methods=['GET'])
def privacy():
return render_template('privacy.html', data='')
# Run the app
if __name__ == "__main__":
app.run()
| true |
b6f745f543567e2954b169998f560e4ec06b6ccc | Python | TwitchPlaysPokemon/pokecat | /pokecat_test.py | UTF-8 | 30,377 | 2.65625 | 3 | [] | no_license |
import json
import os
import unittest
import warnings
from copy import deepcopy
import yaml
import pokecat
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def load_test_docs(name):
path = os.path.join(ROOT_DIR, "testdocuments", "{}.yaml".format(name))
with open(path, encoding="utf-8") as f:
return list(yaml.load_all(f))
def load_test_doc(name):
path = os.path.join(ROOT_DIR, "testdocuments", "{}.yaml".format(name))
with open(path, encoding="utf-8") as f:
return yaml.load(f)
def load_test_doc_json(name):
path = os.path.join(ROOT_DIR, "testdocuments", "{}.json".format(name))
with open(path, encoding="utf-8") as f:
return json.load(f)
class PokecatTester(unittest.TestCase):
def test_load(self):
# just make sure loading the testdocs even works
doc = load_test_doc("dummy")
self.assertEqual(doc, {"a": 1, "b": "a", "c": None})
def test_warning_lowercase_keys(self):
doc = load_test_doc("_template")
doc["Species"] = doc["species"]
del doc["species"]
with self.assertWarnsRegex(UserWarning, r"Key should be all lowercase: Species"):
pokecat.populate_pokeset(doc)
def test_fields_missing(self):
doc = load_test_doc("_template")
del doc["evs"]
with self.assertRaisesRegex(ValueError, r"pokeset is missing obligatory fields: evs"):
pokecat.populate_pokeset(doc)
def test_unknown_fields(self):
doc = load_test_doc("_template")
doc["foobar"] = 1
with self.assertRaisesRegex(ValueError, r"pokeset has unrecognized fields: foobar"):
pokecat.populate_pokeset(doc)
def test_too_long_ingamename(self):
doc = load_test_doc("_template")
doc["ingamename"] = "BULBASAURRRR"
with self.assertRaisesRegex(ValueError, r"ingamename must be between 1 and 10 characters long: BULBASAURRRR"):
pokecat.populate_pokeset(doc)
def test_default_ingamename(self):
doc = load_test_doc("_template")
doc["species"] = "Typhlosion"
if "ingamename" in doc: del doc["ingamename"]
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["ingamename"], "TYPHLOSION")
def test_default_shiny_ingamename(self):
doc = load_test_doc("_template")
doc["species"] = "Typhlosion"
doc["shiny"] = True
if "ingamename" in doc: del doc["ingamename"]
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["ingamename"], "TYPHLOSI-S")
def test_empty_setname(self):
doc = load_test_doc("_template")
doc["setname"] = ""
with self.assertRaisesRegex(ValueError, r"setname must be a non-empty string"):
pokecat.populate_pokeset(doc)
def test_optional_fields_overwrite(self):
# test if supplied optional fields don't get overwritten.
# just test with rarity
doc = load_test_doc("_template")
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["rarity"], 1.0)
doc["rarity"] = 4.2
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["rarity"], 4.2)
def test_species_number(self):
doc = load_test_doc("_template")
doc["species"] = 151
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["species"]["name"], "Mew")
def test_species_name(self):
doc = load_test_doc("_template")
doc["species"] = "Mew"
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["species"]["id"], 151)
def test_invalid_species_number(self):
doc = load_test_doc("_template")
doc["species"] = 494 # Victini, not gen 4
with self.assertRaisesRegex(ValueError, r"Invalid species number: 494"):
pokecat.populate_pokeset(doc)
def test_misspelled_species_name(self):
doc = load_test_doc("_template")
doc["species"] = "Groundon" # common spelling mistake
with self.assertWarnsRegex(UserWarning, r"Didn't recognize species Groundon, but assumed Groudon."):
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["species"]["name"], "Groudon")
def test_invalid_species_name(self):
doc = load_test_doc("_template")
doc["species"] = "BEST"
with self.assertRaisesRegex(ValueError, r"Unrecognized species: BEST"):
pokecat.populate_pokeset(doc)
def test_ability(self):
doc = load_test_doc("_template")
doc["ability"] = "Pressure"
result = pokecat.populate_pokeset(doc)
# gets populated as an array
self.assertEqual([a["name"] for a in result["ability"]], ["Pressure"])
def test_ability_list(self):
doc = load_test_doc("_template")
doc["ability"] = ["Pressure", "Static"]
result = pokecat.populate_pokeset(doc)
self.assertEqual([a["name"] for a in result["ability"]], ["Pressure", "Static"])
def test_duplicate_ability_list(self):
doc = load_test_doc("_template")
doc["ability"] = ["Pressure", "Pressure"]
with self.assertRaisesRegex(ValueError, r"All abilities supplied must be unique: Pressure, Pressure"):
pokecat.populate_pokeset(doc)
def test_misspelled_ability(self):
doc = load_test_doc("_template")
doc["ability"] = "Presure" # some spelling mistake
with self.assertWarnsRegex(UserWarning, r"Didn't recognize ability Presure, but assumed Pressure."):
result = pokecat.populate_pokeset(doc)
# gets populated as an array
self.assertEqual([a["name"] for a in result["ability"]], ["Pressure"])
def test_invalid_ability(self):
doc = load_test_doc("_template")
doc["ability"] = "Invincibility" # doesn't exist
with self.assertRaisesRegex(ValueError, "Unrecognized ability: Invincibility"):
pokecat.populate_pokeset(doc)
def test_no_ability(self):
doc = load_test_doc("_template")
doc["ability"] = None
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["ability"], [{"id": 0, "description": "", "name": None}])
def test_item(self):
doc = load_test_doc("_template")
doc["item"] = "Sitrus Berry"
result = pokecat.populate_pokeset(doc)
# gets populated as an array
self.assertEqual([i["name"] for i in result["item"]], ["Sitrus Berry"])
def test_item_list(self):
doc = load_test_doc("_template")
doc["item"] = ["Sitrus Berry", "Elixir"]
result = pokecat.populate_pokeset(doc)
self.assertEqual([i["name"] for i in result["item"]], ["Sitrus Berry", "Elixir"])
def test_duplicate_item_list(self):
doc = load_test_doc("_template")
doc["item"] = ["Sitrus Berry", "Sitrus Berry"]
with self.assertRaisesRegex(ValueError, r"All items supplied must be unique: Sitrus Berry, Sitrus Berry"):
pokecat.populate_pokeset(doc)
def test_misspelled_item(self):
doc = load_test_doc("_template")
doc["item"] = "Citrus Berry" # some spelling mistake
with self.assertWarnsRegex(UserWarning, r"Didn't recognize item Citrus Berry, but assumed Sitrus Berry."):
result = pokecat.populate_pokeset(doc)
# gets populated as an array
self.assertEqual([i["name"] for i in result["item"]], ["Sitrus Berry"])
def test_invalid_item(self):
doc = load_test_doc("_template")
doc["item"] = "Ice Cream" # doesn't exist, how sad
with self.assertRaisesRegex(ValueError, "Unrecognized item: Ice Cream"):
pokecat.populate_pokeset(doc)
def test_no_item(self):
doc = load_test_doc("_template")
doc["item"] = None
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["item"], [{"id": 0, "description": "", "name": None}])
def test_no_item_in_list(self):
doc = load_test_doc("_template")
doc["item"] = [None, "Sitrus Berry"]
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["item"], [{"id": 0, "description": "", "name": None}, {"id": 158, "description": "", "name": "Sitrus Berry"}])
def test_ball(self):
doc = load_test_doc("_template")
doc["ball"] = "Master"
result = pokecat.populate_pokeset(doc)
# gets populated as an array
self.assertEqual([b["name"] for b in result["ball"]], ["Master Ball"])
def test_ball_list(self):
doc = load_test_doc("_template")
doc["ball"] = ["Master", "Ultra"]
result = pokecat.populate_pokeset(doc)
self.assertEqual([b["name"] for b in result["ball"]], ["Master Ball", "Ultra Ball"])
def test_duplicate_ball_list(self):
doc = load_test_doc("_template")
doc["ball"] = ["Master", "Master"]
with self.assertRaisesRegex(ValueError, r"All balls supplied must be unique: Master Ball, Master Ball"):
pokecat.populate_pokeset(doc)
def test_misspelled_ball(self):
doc = load_test_doc("_template")
doc["ball"] = "Mastor" # misspelled
with self.assertWarnsRegex(UserWarning, r"Didn't recognize ball Mastor, but assumed Master Ball."):
result = pokecat.populate_pokeset(doc)
# gets populated as an array
self.assertEqual([b["name"] for b in result["ball"]], ["Master Ball"])
def test_misspell_ignore_accents(self):
doc = load_test_doc("_template")
doc["ball"] = "Poke" # missing accent, should be still okay
with warnings.catch_warnings(record=True) as w:
result = pokecat.populate_pokeset(doc)
self.assertEqual(len(w), 0)
# gets populated as an array
self.assertEqual([b["name"] for b in result["ball"]], ["Poké Ball"])
def test_invalid_ball(self):
doc = load_test_doc("_template")
doc["ball"] = "Iron" # Iron Ball isn't a Pokéball
with self.assertRaisesRegex(ValueError, "Unrecognized ball: Iron"):
pokecat.populate_pokeset(doc)
def test_gender(self):
doc = load_test_doc("_template")
doc["gender"] = "m"
result = pokecat.populate_pokeset(doc)
# gets populated as an array
self.assertEqual(result["gender"], ["m"])
def test_gender_list(self):
doc = load_test_doc("_template")
doc["gender"] = ["m", "f"]
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["gender"], ["m", "f"])
def test_duplicate_gender_list(self):
doc = load_test_doc("_template")
doc["gender"] = ["m", "m"]
with self.assertRaisesRegex(ValueError, r"All genders supplied must be unique: m, m"):
pokecat.populate_pokeset(doc)
def test_invalid_gender(self):
doc = load_test_doc("_template")
doc["gender"] = "w"
with self.assertRaisesRegex(ValueError, r"gender can only be 'm', 'f' or not set \(null\), but not w"):
pokecat.populate_pokeset(doc)
def test_mixed_genders(self):
doc = load_test_doc("_template")
doc["gender"] = ["m", None]
with self.assertRaisesRegex(ValueError, r"non-gender cannot be mixed with m/f"):
pokecat.populate_pokeset(doc)
def test_level(self):
doc = load_test_doc("_template")
doc["level"] = 100
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["level"], 100)
def test_invalid_level(self):
doc = load_test_doc("_template")
doc["level"] = 101
with self.assertRaisesRegex(ValueError, r"level must be a number between 1 and 100"):
pokecat.populate_pokeset(doc)
doc["level"] = 0
with self.assertRaisesRegex(ValueError, r"level must be a number between 1 and 100"):
pokecat.populate_pokeset(doc)
def test_nature(self):
doc = load_test_doc("_template")
doc["nature"] = "Timid"
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["nature"]["name"], "Timid")
def test_nature_by_effect(self):
doc = load_test_doc("_template")
doc["nature"] = "+spA -spe"
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["nature"]["increased"], "spA")
self.assertEqual(result["nature"]["decreased"], "spe")
self.assertEqual(result["nature"]["name"], "Quiet")
def test_misspelled_nature(self):
doc = load_test_doc("_template")
doc["nature"] = "Quiot" # spelling mistake
with self.assertWarnsRegex(UserWarning, r"Didn't recognize nature Quiot, but assumed Quiet."):
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["nature"]["name"], "Quiet")
def test_invalid_nature(self):
doc = load_test_doc("_template")
doc["nature"] = "Brutal" # doesn't exist
with self.assertRaisesRegex(ValueError, r"Unrecognized nature: Brutal"):
pokecat.populate_pokeset(doc)
def test_ivs_short(self):
doc = load_test_doc("_template")
val = 16
doc["ivs"] = val
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["ivs"], {"hp": val, "atk": val, "def": val, "spA": val, "spD": val, "spe": val})
def test_evs_short(self):
doc = load_test_doc("_template")
val = 16
doc["evs"] = val
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["evs"], {"hp": val, "atk": val, "def": val, "spA": val, "spD": val, "spe": val})
def test_ivs(self):
doc = load_test_doc("_template")
val = 16
doc["ivs"] = {"hp": val, "atk": val, "def": val, "spA": val, "spD": val, "spe": val}
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["ivs"], {"hp": val, "atk": val, "def": val, "spA": val, "spD": val, "spe": val})
def test_evs(self):
doc = load_test_doc("_template")
val = 16
doc["evs"] = {"hp": val, "atk": val, "def": val, "spA": val, "spD": val, "spe": val}
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["evs"], {"hp": val, "atk": val, "def": val, "spA": val, "spD": val, "spe": val})
def test_missing_iv(self):
doc = load_test_doc("_template")
val = 16
# no hp
doc["ivs"] = {"atk": val, "def": val, "spA": val, "spD": val, "spe": val}
with self.assertRaisesRegex(ValueError, r"ivs must contain the following keys: hp, atk, def, spA, spD, spe"):
pokecat.populate_pokeset(doc)
def test_missing_ev(self):
doc = load_test_doc("_template")
val = 16
# no atk
doc["evs"] = {"hp": val, "def": val, "spA": val, "spD": val, "spe": val}
with self.assertRaisesRegex(ValueError, r"evs must contain the following keys: hp, atk, def, spA, spD, spe"):
pokecat.populate_pokeset(doc)
def test_too_many_evs_single(self):
doc = load_test_doc("_template")
val = 510
doc["evs"] = {"atk": val, "def": val, "spA": val, "spD": val, "spe": val}
doc["evs"]["hp"] = 253
with self.assertRaisesRegex(ValueError, r"All EVs must be <= 252."):
pokecat.populate_pokeset(doc)
def test_too_many_evs_total(self):
doc = load_test_doc("_template")
val = 510//6
doc["evs"] = {"hp": val, "atk": val, "def": val, "spA": val, "spD": val, "spe": val}
doc["evs"]["hp"] += 1
with self.assertRaisesRegex(ValueError, r"Sum of EV must not be larger than 510, but is 511"):
pokecat.populate_pokeset(doc)
def test_wasted_evs(self):
doc = load_test_doc("_template")
val = 16
doc["evs"] = {"hp": val, "atk": val, "def": val, "spA": val, "spD": val, "spe": val}
doc["evs"]["hp"] = 15
# TODO warning is currently disabled globally, as it is annoying and doesn't help with anything
# with self.assertWarnsRegex(UserWarning, r"EV for hp is 15, which is not a multiple of 4 \(wasted points\)"):
# pokecat.populate_pokeset(doc)
def test_rarity(self):
doc = load_test_doc("_template")
doc["rarity"] = 0.5
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["rarity"], 0.5)
def test_negative_rarity(self):
doc = load_test_doc("_template")
doc["rarity"] = -0.1
with self.assertRaisesRegex(ValueError, r"rarity must be a number greater or equal to 0.0"):
pokecat.populate_pokeset(doc)
def test_judgment(self):
doc = load_test_doc("_template")
doc["item"] = "Flame Plate"
doc["moves"] = ["Judgment"]
resultset = pokecat.populate_pokeset(doc)
result = pokecat.instantiate_pokeset(resultset)
self.assertEqual(result["moves"][0]["type"], "Fire")
def test_natural_gift(self):
doc = load_test_doc("_template")
doc["item"] = "Colbur Berry"
doc["moves"] = ["Natural Gift"]
resultset = pokecat.populate_pokeset(doc)
result = pokecat.instantiate_pokeset(resultset)
self.assertEqual(result["moves"][0]["type"], "Dark")
self.assertEqual(result["moves"][0]["power"], 60)
def test_insignificant_spelling_mistake(self):
doc = load_test_doc("_template")
doc["item"] = "Blackbelt" # actually "Black Belt"
with warnings.catch_warnings(record=True) as w:
pokecat.populate_pokeset(doc)
self.assertEqual(len(w), 0)
def test_insignificant_spelling_mistake_in_combination(self):
doc = load_test_doc("_template")
doc["moves"] = ["Pound"]
doc["item"] = "Black Belt"
doc["combinations"] = [["Pound", "Blackbelt"]] # should get recognized
with warnings.catch_warnings(record=True) as w:
pokecat.populate_pokeset(doc)
self.assertEqual(len(w), 0)
def test_nidoran(self):
doc = load_test_doc("_template")
doc["species"] = "nidoran-m"
with self.assertWarnsRegex(UserWarning, r""):
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["species"]["id"], 32) # nidoran-m
def test_wormadam(self):
doc = load_test_doc("_template")
doc["species"] = "Wormadam"
doc["form"] = "Trash"
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["species"]["types"], ["Bug", "Steel"])
self.assertEqual(result["displayname"], "Wormadam Trash")
def test_arceus(self):
doc = load_test_doc("_template")
doc["species"] = "Arceus"
doc["item"] = "Earth Plate"
doc["ability"] = "Multitype"
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["species"]["types"], ["Ground"])
self.assertEqual(result["displayname"], "Arceus Ground")
def test_shared_object_bug(self):
doc1 = load_test_doc("_template")
doc1["species"] = "Arceus"
doc1["item"] = "Earth Plate"
doc2 = load_test_doc("_template")
doc2["species"] = "Arceus"
doc2["item"] = "Splash Plate"
result = pokecat.populate_pokeset(doc1)
backup = deepcopy(result)
pokecat.populate_pokeset(doc2) # should not affect the first result
self.assertEqual(backup, result, "result of a populate call was changed after another one")
def test_move_combinations(self):
doc = load_test_doc("_template")
doc["moves"] = [["Pound", "Aqua Jet"], ["Surf", "Rock Smash"]]
doc["combinations"] = [["Pound", "Surf"]]
pokeset = pokecat.populate_pokeset(doc)
result = pokecat.instantiate_pokeset(pokeset)
for _ in range(100):
if result["moves"][0]["name"] == "Pound":
self.assertEqual(result["moves"][1]["name"], "Surf")
def test_move_in_multiple_slots_combinations(self):
doc = load_test_doc("_template")
doc["moves"] = [["Pound", "Aqua Jet"], ["Surf", "Aqua Jet"]]
doc["combinations"] = [["Aqua Jet", "Aqua Jet"], ["Pound", "Surf"]]
pokeset = pokecat.populate_pokeset(doc)
for _ in range(100):
result = pokecat.instantiate_pokeset(pokeset)
if result["moves"][0]["name"] == "Aqua Jet":
self.assertEqual(result["moves"][1]["name"], "Aqua Jet")
elif result["moves"][0]["name"] == "Pound":
self.assertEqual(result["moves"][1]["name"], "Surf")
else:
self.assertTrue(False)
def test_move_separations(self):
doc = load_test_doc("_template")
doc["moves"] = [["Pound", "Aqua Jet"], ["Surf", "Rock Smash"]]
doc["separations"] = [["Pound", "Surf"]]
pokeset = pokecat.populate_pokeset(doc)
result = pokecat.instantiate_pokeset(pokeset)
for _ in range(100):
if result["moves"][0]["name"] == "Pound":
self.assertEqual(result["moves"][1]["name"], "Rock Smash")
def test_move_in_different_slots_separations(self):
doc = load_test_doc("_template")
doc["moves"] = [["Pound", "Aqua Jet"], ["Surf", "Aqua Jet"]]
doc["separations"] = [["Aqua Jet", "Aqua Jet"], ["Pound", "Surf"]]
pokeset = pokecat.populate_pokeset(doc)
for _ in range(100):
result = pokecat.instantiate_pokeset(pokeset)
if result["moves"][0]["name"] == "Aqua Jet":
self.assertEqual(result["moves"][1]["name"], "Surf")
elif result["moves"][1]["name"] == "Aqua Jet":
self.assertEqual(result["moves"][0]["name"], "Pound")
else:
self.assertTrue(False)
def test_skip_ev_check_single(self):
doc = load_test_doc("_template")
doc["evs"] = {"atk": 0, "def": 0, "spA": 0, "spD": 0, "spe": 0}
doc["evs"]["hp"] = 253
with self.assertWarnsRegex(UserWarning, r"All EVs must be <= 252."):
pokecat.populate_pokeset(doc, skip_ev_check=True)
def test_skip_ev_check_total(self):
doc = load_test_doc("_template")
val = 510//6
doc["evs"] = {"hp": val, "atk": val, "def": val, "spA": val, "spD": val, "spe": val}
doc["evs"]["hp"] += 1
with self.assertWarnsRegex(UserWarning, r"Sum of EV must not be larger than 510, but is 511"):
pokecat.populate_pokeset(doc, skip_ev_check=True)
def test_displayname_magic1(self):
doc = load_test_doc("_template")
doc["species"] = "Arceus"
doc["item"] = "Flame Plate"
doc["shiny"] = True
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["displayname"], "Arceus Fire (Shiny)")
def test_displayname_magic2(self):
doc = load_test_doc("_template")
doc["species"] = "Wormadam"
doc["form"] = 2
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["displayname"], "Wormadam Trash")
def test_custom_displayname(self):
doc = load_test_doc("_template")
doc["species"] = "Wormadam"
doc["form"] = 2
doc["displayname"] = "custom"
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["displayname"], "custom")
def test_formname(self):
doc = load_test_doc("_template")
doc["species"] = "Unown"
doc["form"] = "C"
result = pokecat.populate_pokeset(doc)
self.assertEqual(result["form"], 2)
def test_invalid_happiness(self):
doc = load_test_doc("_template")
doc["species"] = "Unown"
doc["happiness"] = [1, 2]
with self.assertRaisesRegex(ValueError, r"happiness must be a number."):
pokecat.populate_pokeset(doc)
def test_hidden_and_biddable(self):
doc = load_test_doc("_template")
doc["hidden"] = True
doc["biddable"] = True
with self.assertWarnsRegex(UserWarning, r"Set is biddable, but also hidden, which doesn't make sense."):
pokecat.populate_pokeset(doc)
def test_public_shiny(self):
doc = load_test_doc("_template")
doc["hidden"] = False
doc["shiny"] = True
with self.assertWarnsRegex(UserWarning, r"Set is shiny, but not hidden, which means it is publicly visible. Is this intended?"):
pokecat.populate_pokeset(doc)
doc["biddable"] = True
doc["shiny"] = True
with self.assertWarnsRegex(UserWarning, r"Set is shiny, but also biddable, which means it can be used in token matches. Is this intended?"):
pokecat.populate_pokeset(doc)
def test_default_hidden_shiny(self):
doc = load_test_doc("_template")
doc["shiny"] = True
result = pokecat.populate_pokeset(doc)
self.assertTrue(result["hidden"])
def test_default_not_hidden(self):
doc = load_test_doc("_template")
result = pokecat.populate_pokeset(doc)
self.assertFalse(result["hidden"])
def test_null_forms(self):
doc = load_test_doc("_template")
doc["tags"] = None
with self.assertRaisesRegex(ValueError, r"tags must be a list of strings"):
pokecat.populate_pokeset(doc)
def test_forms_not_string(self):
doc = load_test_doc("_template")
doc["tags"] = [42]
with self.assertRaisesRegex(ValueError, r"tags must be a list of strings"):
pokecat.populate_pokeset(doc)
# todo test forms, displaynames with forms, moves, special cases, combinations and separations.
# and whatever isn't tested yet as well
def test_gen1_vs_gen4(self):
gen1_200 = pokecat.gen1data.get_item("HM05")
gen4_200 = pokecat.gen4data.get_item("Chilan Berry")
self.assertEqual(gen1_200["id"], 200)
self.assertEqual(gen4_200["id"], 200)
def test_gen1_elixer_item_spelling(self):
elixir = pokecat.gen1data.find_item("Elixir")
match = next(iter(elixir.values()))
self.assertEqual(match["name"], "ELIXER")
def test_get_gen1_move(self):
gen1_100 = pokecat.gen1data.get_move("TELEPORT")
gen4_100 = pokecat.gen4data.get_move("Teleport")
self.assertEqual(gen1_100["id"], 100)
self.assertEqual(gen4_100["id"], 100)
def test_empty_otions(self):
fields = ["ability", "item", "ball", "gender"]
for field in fields:
doc = load_test_doc("_template")
doc[field] = []
plural = "abilities" if field == "ability" else field + "s"
with self.assertRaisesRegex(ValueError, r"List of possible {} cannot be empty".format(plural)):
pokecat.populate_pokeset(doc)
doc = load_test_doc("_template")
doc["moves"] = [[]]
with self.assertRaisesRegex(ValueError, r"List of possible moves in slot 1 cannot be empty"):
pokecat.populate_pokeset(doc)
def test_invalid_number_of_moves(self):
doc = load_test_doc("_template")
doc["moves"] = []
with self.assertRaisesRegex(ValueError, r"Pokémon must have between 1 and 4 moves, but has 0"):
pokecat.populate_pokeset(doc)
doc["moves"] = ["1", "2", "3", "4", "5"]
with self.assertRaisesRegex(ValueError, r"Pokémon must have between 1 and 4 moves, but has 5"):
pokecat.populate_pokeset(doc)
def test_duplicate_moves(self):
doc = load_test_doc("_template")
doc["moves"] = ["Tackle", "Tackle"]
with self.assertWarnsRegex(UserWarning, r"Move Tackle is guaranteed to occupy multiple slots \(possible stallmate due to PP-bug\)"):
pokecat.populate_pokeset(doc)
def test_invalid_suppression(self):
doc = load_test_doc("_template")
doc["suppressions"] = ["invalid"]
with self.assertRaisesRegex(ValueError, r"invalid is not a recognized suppression"):
pokecat.populate_pokeset(doc)
def test_suppressions_recognized(self):
from pokecat import Suppressions
for suppression in Suppressions:
doc = load_test_doc("_template")
doc["suppressions"] = [suppression.value]
pokecat.populate_pokeset(doc)
# should just raise no errors
def test_too_many_evs_single_suppressed(self):
doc = load_test_doc("_template")
val = 32
doc["evs"] = {"atk": val, "def": val, "spA": val, "spD": val, "spe": val}
doc["evs"]["hp"] = 256
doc["suppressions"] = ["invalid-evs"]
with warnings.catch_warnings(record=True) as w:
pokecat.populate_pokeset(doc)
self.assertEqual(len(w), 0)
def test_too_many_evs_total_suppressed(self):
doc = load_test_doc("_template")
val = 128
doc["evs"] = {"hp": val, "atk": val, "def": val, "spA": val, "spD": val, "spe": val}
doc["suppressions"] = ["invalid-evs"]
with warnings.catch_warnings(record=True) as w:
pokecat.populate_pokeset(doc)
for x in w:
print(x)
self.assertEqual(len(w), 0)
def test_wasted_evs_suppressed(self):
doc = load_test_doc("_template")
val = 16
doc["evs"] = {"hp": val, "atk": val, "def": val, "spA": val, "spD": val, "spe": val}
doc["evs"]["hp"] = 15
doc["suppressions"] = ["wasted-evs"]
with warnings.catch_warnings(record=True) as w:
pokecat.populate_pokeset(doc)
self.assertEqual(len(w), 0)
def test_duplicate_moves_suppressed(self):
doc = load_test_doc("_template")
doc["moves"] = ["Tackle", "Tackle"]
doc["suppressions"] = ["duplicate-moves"]
with warnings.catch_warnings(record=True) as w:
pokecat.populate_pokeset(doc)
self.assertEqual(len(w), 0)
def test_public_shiny_suppressed(self):
doc = load_test_doc("_template")
doc["suppressions"] = ["public-shiny"]
doc["shiny"] = True
doc["hidden"] = False
with warnings.catch_warnings(record=True) as w:
pokecat.populate_pokeset(doc)
self.assertEqual(len(w), 0)
doc["biddable"] = True
with warnings.catch_warnings(record=True) as w:
pokecat.populate_pokeset(doc)
self.assertEqual(len(w), 0)
if __name__ == "__main__":
unittest.main()
| true |
8d32fee9f358bea0ec12a52d4b73c302cb7914c8 | Python | xiangcong/image-tool | /convertToGray.py | UTF-8 | 371 | 2.890625 | 3 | [] | no_license | import PIL
import sys
import os
from PIL import Image
def convert(path):
im = Image.open(path).convert('L')
dotPos = path.rfind('.')
newPath = path[0:dotPos] + 'Gray' + '.jpg'
im.save(newPath)
if __name__ == '__main__':
if len(sys.argv) != 2:
print('usage: python {} imagePath'.format(sys.argv[0]))
else:
convert(sys.argv[1])
| true |