blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0ec685d0c4d508ff04896dfaba15cb82f823c41d
|
Python
|
SimonSlominski/Pybites_Exercises
|
/Pybites/375/test_combinations.py
|
UTF-8
| 2,256
| 3.328125
| 3
|
[] |
no_license
|
from itertools import product
import pytest
from combinations import generate_letter_combinations
@pytest.mark.parametrize(
"digits, expected",
[
("2", ["a", "b", "c"]),
("23", ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]),
(
"79",
[
"pw",
"px",
"py",
"pz",
"qw",
"qx",
"qy",
"qz",
"rw",
"rx",
"ry",
"rz",
"sw",
"sx",
"sy",
"sz",
],
),
(
"234",
[
"adg",
"adh",
"adi",
"aeg",
"aeh",
"aei",
"afg",
"afh",
"afi",
"bdg",
"bdh",
"bdi",
"beg",
"beh",
"bei",
"bfg",
"bfh",
"bfi",
"cdg",
"cdh",
"cdi",
"ceg",
"ceh",
"cei",
"cfg",
"cfh",
"cfi",
],
),
],
)
def test_generate_letter_combinations(digits, expected):
assert generate_letter_combinations(digits) == expected
def test_generate_letter_combinations_repeated_digits():
assert generate_letter_combinations("222") == [
"aaa",
"aab",
"aac",
"aba",
"abb",
"abc",
"aca",
"acb",
"acc",
"baa",
"bab",
"bac",
"bba",
"bbb",
"bbc",
"bca",
"bcb",
"bcc",
"caa",
"cab",
"cac",
"cba",
"cbb",
"cbc",
"cca",
"ccb",
"ccc",
]
def test_generate_letter_combinations_long_input():
assert generate_letter_combinations("23") == [
"ad",
"ae",
"af",
"bd",
"be",
"bf",
"cd",
"ce",
"cf",
]
| true
|
a2e4b400e69f8296aad93f6f5e17879198980241
|
Python
|
coucoulesr/advent-of-code-2019
|
/01-Rocket-Equation/01-2-Soln.py
|
UTF-8
| 472
| 3.40625
| 3
|
[] |
no_license
|
import math
def moduleFuelReq(mass):
output = math.floor(mass/3) - 2
return output if output > 0 else 0
def main():
fuel_required = 0
next_fuel = 0
with open("01-input") as file:
for line in file:
next_fuel = moduleFuelReq(int(line))
while next_fuel > 0:
fuel_required += next_fuel
next_fuel = moduleFuelReq(next_fuel)
print(fuel_required)
if __name__ == "__main__":
main()
| true
|
fe7b4f771154bfc36367868e72707786673ab66f
|
Python
|
covidwatchorg/CircuitPythonExperimenter
|
/ble_print.py
|
UTF-8
| 1,481
| 2.734375
| 3
|
[] |
no_license
|
# printing advertising packets and support formatting
import _bleio
import ble_gaen_scanning
# ======================================================
# Functions to help with printing a generic advertising packet.
def hex_of_bytes(bb):
s = ""
count = 0
for b in bb:
s += (" {:02x}".format(b))
count += 1
if count % 8 == 0:
s += " "
return s.strip()
def print_scan_entry_type(scan_entry):
if scan_entry.scan_response:
print("Scan Response", end='')
else:
print("Advertisement", end='')
if scan_entry.connectable:
print(", connectable")
else:
print("")
def print_address(address):
print("address =", address, end=" ")
t = address.type
print("address type =", end=" ")
if t == address.PUBLIC:
print("PUBLIC")
elif t == address.RANDOM_STATIC:
print("RANDOM_STATIC")
elif t == address.RANDOM_PRIVATE_RESOLVABLE:
print("RANDOM_PRIVATE_RESOLVABLE")
elif t == address.RANDOM_PRIVATE_NON_RESOLVABLE:
print("RANDOM_PRIVATE_NON_RESOLVABLE")
else:
print("unknown")
def print_advertisement_bytes(scan_entry):
adv_bytes = bytearray(scan_entry.advertisement_bytes)
if len(adv_bytes) != 0:
print("advertisement bytes = ", hex_of_bytes(adv_bytes))
def print_scan_entry(scan_entry):
print_scan_entry_type(scan_entry)
print_address(scan_entry.address)
print_advertisement_bytes(scan_entry)
| true
|
0c2c327bb3597ac06e474286d05d0ac908fb7de0
|
Python
|
whigg/HyperHeuristicKnapsack
|
/knapsack/genetic.py
|
UTF-8
| 1,587
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
import random as rnd
import algorithms as algs
import knapsack.hyper.single.problem as ksp
def simple_state_generator_ksp(dimension):
state = []
for i in range(0, dimension):
random_boolean = False if rnd.randint(0, 1) == 0 else True
state.append(random_boolean)
return state
def initial_population_generator_ksp(amount, dimension, validator=ksp.validate,
state_generator=simple_state_generator_ksp,
**kwargs):
population = []
while len(population) < amount:
population_candidate = state_generator(dimension)
if validator(population_candidate, **kwargs):
population.append(population_candidate)
return population
def compare_state_ksp(state1, state2):
for i in range(0, len(state1)):
if state1 != state2:
return True
return False
def crossover_func_ksp(first_parent, second_parent, **kwargs):
iteration = 0
while iteration < len(first_parent) ** 2:
# TODO: try other genetic operators (different types of crossover, for example)
# nor first allel nor last allel
crossover_index = rnd.randint(1, len(first_parent) - 1)
child_candidate = first_parent[:crossover_index + 1] + second_parent[crossover_index + 1:]
if ksp.validate(child_candidate, **kwargs):
return child_candidate
return None
def minimize(dimension, **kwargs):
return algs.genetic.minimize(dimension, initial_population_generator_ksp, crossover_func_ksp, ksp.solve, **kwargs)
| true
|
c008e73eeb247c527a1008a5a7563944d6ce77ee
|
Python
|
void-memories/APS-Code-Base
|
/Assignment/20_camelCase.py
|
UTF-8
| 145
| 2.953125
| 3
|
[] |
no_license
|
import re
def main():
patt = re.compile(r'[A-Z]')
string = input()
print(len(patt.findall(string))+1)
if __name__ == "__main__":
main()
| true
|
c5e588c192a1d754e192b46a9dd1717437f45d8f
|
Python
|
fisch321/uband-python-s2
|
/homeworks/B20769/homework1/B20769_feiyu_day5_homework.py
|
UTF-8
| 5,694
| 3.546875
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import codecs
import os
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#1. 读取文件
#根据“-”再次划分单词:['aa', 'aaa-bbb-sds'] => ['aa', 'aaa', 'bbb', 'sds']
def word_split(words):
new_list = []
for word in words:
if '-' not in word:
new_list.append(word)
else:
lst = word.split('-')
new_list.extend(lst)
return new_list
#读取单个文件,输出问由文件中所有单词组成的列表
def read_file(file_path):
f = codecs.open(file_path, 'r', "utf-8") #打开文件
lines = f.readlines() #按段落(行)读取文件,输出为n行数据
word_list = []
for line in lines:
line = line.strip()#去掉行首尾的空格
words = line.split(" ") #用空格分割
words = word_split(words) #用”-“分割
word_list.extend(words)
return word_list
#读取文件夹中所有文件的路径,输出为一组文件路径
def get_file_from_folder(folder_path):
file_paths = []
for root, dirs, files in os.walk(folder_path):
for file in files:
file_path = os.path.join(root, file)
file_paths.append(file_path)
return file_paths
#读取多个文件里的单词,输出问所有单词的一个列表
def read_files(file_paths):
final_words = []
for path in file_paths:
final_words.extend(read_file(path))
return final_words
#2.获取格式化之后的单词
#格式化一个单词,并输出
def format_word(word):
fmt = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-'
for char in word:
if char not in fmt:
word = word.replace(char, '')
return word.lower()
#格式化一串单词,并输出为一个新的列表
def format_words(words):
word_list = []
for word in words:
wd = format_word(word)
if wd:#判断该单词是否为空格,若为空格则略过
word_list.append(wd)
return word_list
#3. 统计单词数目
# {'aa':4, 'bb':1}
#统计每个单词出现的次数
def statictcs_words(words):
s_word_dict = {}
for word in words:
if s_word_dict.has_key(word):
s_word_dict[word] = s_word_dict[word] + 1
else:
s_word_dict[word] = 1
#根据单词出现的频次排序,,
sorted_dict = sorted(s_word_dict.iteritems(), key=lambda d: d[1], reverse=True)
return sorted_dict
#将元组转变为列表,(将字典转化为以小列表为元素的列表)
def tup2list(volcaulay_list_tup):
volcaulay_list_lst = []
for val in volcaulay_list_tup:
volcaulay_list_lst.append(list(val))
return volcaulay_list_lst
#4.输出成csv
def print_to_csv(volcaulay_list, to_file_path):
nfile = open(to_file_path, 'w+')
for val in volcaulay_list:
nfile.write("%s,%s,%0.2f,%s \n" % (val[0], str(val[1]), val[2], val[3]))
nfile.close()
#计算单词比例
def word_rate(volcaulay_list,total_count):
word_rates_dict = []
current_count = 0
for val in volcaulay_list:
current_count = current_count + val[1]
word_rate = (float(current_count) / total_count) * 100
val.append(word_rate)
word_rates_dict.append(val)
return word_rates_dict
#截取累积频次在一定范围的单词
def select_word(word_percent_list, rate_range):
word_list_recite = []
start = rate_range[0] * 100
end = rate_range[1] * 100
for val in word_percent_list:
if val[2] >= start and val[2] <= end:
word_list_recite.append(val)###列表中的元素就是列表
return word_list_recite
#读取释义
def read_meaning(file_path):
f = codecs.open(file_path, 'r', "utf-8") #打开文件
lines = f.readlines() #按段落(行)读取文件,输出为n行数据
words_meaning_list = []
for line in lines:
line = line.strip()#去掉行首尾的空格
word, space, meaning = line.partition(" ")#partition 和 split分割得到的结果不一样
meaning = meaning.strip()
word_meaning = [word,meaning]
words_meaning_list.append(word_meaning)
return words_meaning_list
#给单词配上解释
def meanging_word(volcaulay_list,words_meaning):
words_meaning_dict = {}
meanings2words = []
for word in words_meaning:
words_meaning_dict[word[0]] = word[1]
for val in volcaulay_list:
if words_meaning_dict.has_key(val[0]):
val.append(words_meaning_dict[val[0]])
else:
val.append('没有该单词的解释')
meanings2words.append(val)
return meanings2words
def main():
#读取文本
words = read_files(get_file_from_folder('data2'))
print '获取了未格式化的单词 %d 个' % (len(words))
#清洗文本
f_words = format_words(words)
total_word_count = len(f_words)
print '获取了已经格式化的单词 %d 个' %(len(f_words))
# 统计单词和排序
word_list = statictcs_words(f_words)
#将字典变成以列表为元素的列表
word_list_lst = tup2list(word_list)
#计算单词的累积频率并将其加到单词频数列表中
word_rates_dict = word_rate(word_list_lst,total_word_count)
#截取单词
start_and_end = [0.3, 0.7] #截取这一部分的单词
s_word_list = select_word(word_rates_dict,start_and_end)
#读取单词解释文件
words_meaning = read_meaning("8000-words.txt")
#单词解释和经济学人中抽取的单词配对
meanings_words = meanging_word(s_word_list, words_meaning)
# 输出文件
print_to_csv(meanings_words, 'output/words_meanings.csv')
if __name__ == "__main__":
main()
| true
|
f2453a83c9d38086702c4e3fec2a9b8486dd0657
|
Python
|
wangyendt/LeetCode
|
/Hard/297. Serialize and Deserialize Binary Tree/Serialize and Deserialize Binary Tree.py
|
UTF-8
| 1,577
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: Wayne
@contact: wangye.hope@gmail.com
@software: PyCharm
@file: Serialize and Deserialize Binary Tree
@time: 2019/8/29 16:53
"""
import sys
sys.path.append('..')
from Tools.BinaryTree import *
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if not root: return '[]'
ret = []
def helper(tree: TreeNode, ind=0):
if not tree: return
ret.append({ind: tree.val})
helper(tree.left, 2 * ind + 1)
helper(tree.right, 2 * ind + 2)
helper(root)
return str(ret)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
ele_dict = {}
data = data.replace('[', '').replace(']', '').replace('{', '').replace('}', '').split(',')
if '' in data: return None
for d in data:
k, v = d.split(':')
ele_dict[int(k)] = int(v)
def helper(ind=0):
if ind in ele_dict:
tree = TreeNode(ele_dict[ind])
tree.left = helper(2 * ind + 1)
tree.right = helper(2 * ind + 2)
return tree
return None
return helper()
encoder = Codec()
tree = parseTreeNode([1, 2, 3, 'null', 'null', 6, 7])
# tree = parseTreeNode([])
res = encoder.serialize(tree)
print(res)
decoded_tree = encoder.deserialize(res)
print(showTreeNode(decoded_tree))
| true
|
22dc659f188c63a22baac9d0638212917ee58d0c
|
Python
|
ankaan/dice
|
/dice_probability/die_test.py
|
UTF-8
| 9,759
| 2.703125
| 3
|
[] |
no_license
|
from dice_probability.die import Die, LazyDie
from dice_probability.die import DieParseException, from_string, pool_from_string, fastsum
from django.test import TestCase
class TestDie(TestCase):
def test_init(self):
for d in (Die,LazyDie):
self.assertEquals(d(0).probability(),[1.0])
self.assertEquals(d(4).probability(),[0.0, 0.25, 0.25, 0.25, 0.25])
self.assertEquals(d([]).probability(),[1.0])
self.assertEquals(d([0,0.5,0.5,0]).probability(),[0,0.5,0.5])
self.assertEquals(d([0,0.5,0.5,0]),d([0,0.5,0.5]))
self.assertEquals(d([0,1,1,1,1]),d([0,0.25,0.25,0.25,0.25]))
self.assertEquals(d([0,1,1,1,1]).probability(),d(4).probability())
self.assertEquals(d([0,1,1,1,1,0,0,0]).probability(),d(4).probability())
seq = [0.0, 0.2, 0.3, 0.5]
self.assertEquals(d(seq).probability(),seq)
self.assertEquals(d(5),d([0.0, 0.2, 0.2, 0.2, 0.2, 0.2]))
self.assertEquals(d(d(3)),d(3))
self.assertRaises(TypeError,d,(1,2,3))
self.assertRaises(ValueError,d,-1)
self.assertEquals(d([d(4), d(6)]), d(4)+d(6))
self.assertEquals(LazyDie([Die(4), Die(6)]), LazyDie(4)+LazyDie(6))
def test_const(self):
for d in (Die,LazyDie):
self.assertEquals(d.const(0).probability(),[1.0])
self.assertEquals(d.const(1).probability(),[0.0,1.0])
self.assertEquals(d.const(2).probability(),[0.0,0.0,1.0])
def test_add(self):
for d in (Die,LazyDie):
die = d(2)+d(2)
self.assertEquals(die.probability(),[0.0, 0.0, 0.25, 0.5, 0.25])
die = d(2)+d(4)
self.assertEquals(
die.probability(),
[0.0, 0.0, 1./2/4, 1./4, 1./4, 1./4, 1./2/4])
left = (d(2) + d(10)) + d(20)
right = d(2) + (d(10) + d(20))
self.assertEquals(left,right)
inc = d(8) + d(12)
dec = d(12) + d(8)
self.assertEquals(inc, dec)
a = d(10) + d(10)
b = d(12) + d(8)
self.assertNotEqual(a, b)
a = d(20)
b = d(10) + d(10)
self.assertNotEqual(a, b)
def test_eq(self):
for d in (Die,LazyDie):
self.assertEqual(d(2),d(2))
self.assertEqual(d(2),d([0,0.5,0.5]))
self.assertNotEqual(d(2),d(4))
self.assertNotEqual(d(4),d(2))
self.assertEqual(d([0.0,1,1]),d([0,0.5,0.5]))
self.assertEqual(d([0.0,1,1])+d(10),d([0,0.5,0.5])+d(10))
def test_cmp(self):
for d in (Die,LazyDie):
self.assertEqual(d(2).__cmp__(None),cmp(1,None))
self.assertEqual(d(2).__cmp__(d(2)),0)
self.assertEqual(d(2).__cmp__(d([0,0.5,0.5])),0)
self.assertEqual(d(2).__cmp__(d(4)),1)
self.assertEqual(d(4).__cmp__(d(2)),-1)
self.assertEqual(d([0.0,1,1]).__cmp__(d([0,0.5,0.5])),0)
self.assertEqual((d([0.0,1,1])+d(10)).__cmp__(d([0,0.5,0.5])+d(10)),0)
self.assertEqual(d(2),d(2))
self.assertNotEqual(d(4),d(2))
def test_duplicate(self):
for d in (Die,LazyDie):
self.assertEqual(d(20).duplicate(0), d.const(0))
self.assertEqual(d(20).duplicate(1), d(20))
self.assertEqual(d(20).duplicate(2), d(20)+d(20))
self.assertEqual(d(8).duplicate(3), d(8)+d(8)+d(8))
self.assertEquals(d(5).duplicate(20), d(5)+d(5).duplicate(19))
a = (d(5) + d(10)).duplicate(3)
b = d(5).duplicate(3) + d(10).duplicate(3)
self.assertEquals(a,b)
def test_probability(self):
for d in (Die,LazyDie):
self.assertEquals(d(4).probability(),[0.0, 0.25, 0.25, 0.25, 0.25])
self.assertEquals(d(5),d([0.0, 0.2, 0.2, 0.2, 0.2, 0.2]))
self.assertEqual(d([0.0,1,1]),d([0,0.5,0.5]))
def test_probability_reach(self):
for d in (Die,LazyDie):
self.assertEquals(d(4).probability_reach(),[1.0,1.0,0.75,0.5,0.25])
def test_probability_vs(self):
for d in (Die,LazyDie):
p = 0.5*0.75 + 0.5*0.5
self.assertEquals(d(4).probability_vs(d(2)),p)
d0 = d(4)+d(6)+d(8)+d(8)
d1 = d(4)+d(6)+d(8)+d(10)+d(8)
p = d0.probability_vs(d1) + d1.probability_vs(d0) + d0.probability_eq(d1)
self.assertEquals(round(p,7),1.0)
def test_probability_eq(self):
for d in (Die,LazyDie):
self.assertEquals(d(4).probability_eq(d(4)),0.25)
p = 0.5*0.25
self.assertEquals(d(4).probability_eq(d(8)),p)
self.assertEquals(d(8).probability_eq(d(4)),p)
def test_roll(self):
for d in (Die,LazyDie):
self.assertTrue(d(10).roll() in range(1,11))
self.assertEquals(d(10).roll(0.09),1)
self.assertEquals(d(10).roll(0.59),6)
self.assertEquals(d([0.0,0.2,0.7,0.1]).roll(0),1)
self.assertEquals(d([0.0,0.2,0.7,0.1]).roll(0.000001),1)
self.assertEquals(d([0.0,0.2,0.7,0.1]).roll(0.199999),1)
self.assertEquals(d([0.0,0.2,0.7,0.1]).roll(0.200001),2)
self.assertEquals(d([0.0,0.2,0.7,0.1]).roll(0.899999),2)
self.assertEquals(d([0.0,0.2,0.7,0.1]).roll(0.900001),3)
self.assertEquals(d([0.0,0.2,0.7,0.1]).roll(0.999999),3)
self.assertRaises(ValueError,d(10).roll,-0.2)
self.assertRaises(ValueError,d(10).roll,1.0)
def test_from_string(self):
for d in (Die,LazyDie):
self.assertEquals(from_string(d,""),(None,[]))
self.assertEquals(from_string(d," "),(None,[]))
self.assertEquals(from_string(d,"d4"),(d(4),["d4"]))
self.assertEquals(from_string(d,"2d6"),(d(6).duplicate(2),["2d6"]))
self.assertEquals(from_string(d," D12"),(d(12),["D12"]))
self.assertEquals(from_string(d,"13 "),(d.const(13),["13"]))
self.assertEquals(from_string(d,"13 2"),(d.const(13)+d.const(2),["13","2"]))
self.assertEquals(from_string(d,"d20 d8 d4"),(d(20)+d(8)+d(4),["d20","d8","d4"]))
self.assertEquals(from_string(d,"d4-d4"),(d(4),["d4-d4"]))
self.assertEquals(
from_string(d,"d4-d20"),
(d(4) + d(6) + d(8) + d(10) + d(12) + d(20),["d4-d20"]))
self.assertRaises(DieParseException,from_string,d,"12e3")
self.assertRaises(DieParseException,from_string,d,"h")
self.assertRaises(DieParseException,from_string,d,"3d3d2")
self.assertEquals(
from_string(d,"5d10",max_dice=5,max_sides=10),
(d(10).duplicate(5),["5d10"]))
self.assertEquals(
from_string(d,"2d10 3d10",max_dice=5,max_sides=10),
(d(10).duplicate(2) + d(10).duplicate(3),["2d10","3d10"]))
self.assertRaises(DieParseException,
from_string, d, "6d10", max_dice=5, max_sides=10)
self.assertRaises(DieParseException,
from_string, d, "2d2 4d3", max_dice=5, max_sides=10)
self.assertRaises(DieParseException,
from_string, d, "2d11", max_dice=5, max_sides=10)
pool = d([7,4,1])
self.assertEquals(from_string(d,"3p p"),(pool.duplicate(3) + pool,["3p","p"]))
self.assertEquals(from_string(d,"3p d4"),(pool.duplicate(3) + d(4),["3p","d4"]))
def test_fastsum(self):
self.assertEquals(fastsum([Die(10)]), Die(10))
self.assertEquals(fastsum([Die(4),Die(6)]), Die(4)+Die(6))
self.assertEquals(fastsum([Die(6),Die(4)]), Die(4)+Die(6))
def test_pool_from_string(self):
for d in (Die,LazyDie):
pool = d([7,4,1])
self.assertEquals(pool_from_string(d,""),([],[]))
self.assertEquals(pool_from_string(d," "),([],[]))
self.assertEquals(pool_from_string(d,"0"),([d([1])],["0"]))
self.assertEquals(pool_from_string(d,"1"),([pool],["1"]))
self.assertEquals(pool_from_string(d,"5"),([pool.duplicate(5)],["5"]))
self.assertEquals(pool_from_string(d,"3 4"),([pool.duplicate(3), pool.duplicate(4)],["3","4"]))
self.assertEquals(pool_from_string(d,"4-6"),([pool.duplicate(4), pool.duplicate(5), pool.duplicate(6)],["4","5","6"]))
self.assertEquals(pool_from_string(d,"3-3"),([pool.duplicate(3)],["3"]))
self.assertEquals(pool_from_string(d,"1-30"),(
[pool.duplicate(i) for i in range(1,31)],
[str(i) for i in range (1,31)]))
self.assertEquals(pool_from_string(d,"5",max_dice=5),([pool.duplicate(5)],["5"]))
self.assertRaises(DieParseException, pool_from_string, d, "-1")
self.assertRaises(DieParseException, pool_from_string, d, "6", max_dice=5)
self.assertRaises(DieParseException, pool_from_string, d, "4p")
self.assertRaises(DieParseException, pool_from_string, d, "p")
self.assertRaises(DieParseException, pool_from_string, d, "3-2")
self.assertRaises(DieParseException, pool_from_string, d, "1-31")
def test_percentile_reach(self):
for d in (Die,LazyDie):
self.assertEquals(d(4).percentile_reach([0.5]),[3.0])
self.assertEquals(d(4).percentile_reach([0.75]),[2.0])
self.assertEquals(d(4).percentile_reach([0.25]),[4.0])
self.assertEquals(d(4).percentile_reach([1.0]),[1.0])
self.assertEquals(d(4).percentile_reach([0.0]),[4.0])
self.assertEquals(d(4).percentile_reach([0.001]),[4.0])
self.assertEquals(d(4).percentile_reach([0.999]),[1.0])
self.assertEquals(d([0,0,0,0,1,1,0,0,0,0]).percentile_reach([1.0]),[4.0])
self.assertEquals(d([0,1,1,0,0,0,0]).percentile_reach([0.5]),[2.0])
self.assertEquals(d([0,1,2,1]).percentile_reach([0.5]),[2.5])
self.assertEquals(d([0,1,1,0,1,1]).percentile_reach([0.5]),[3.0])
self.assertEquals(d([0,0,1,0,1]).percentile_reach([0.5]),[3.0])
self.assertEquals(d(4).percentile_reach([1]),d(4).percentile_reach([1.0]))
self.assertEquals(d(4).percentile_reach([0]),d(4).percentile_reach([0.0]))
self.assertRaises(ValueError, d(4).percentile_reach, [-0.1])
self.assertRaises(ValueError, d(4).percentile_reach, [1.1])
self.assertEquals(round(d(10).percentile_reach([0.5])[0],7),6.0)
self.assertEquals(round(d(10).percentile_reach([0.75])[0],7),3.5)
self.assertEquals(d(4).percentile_reach([0.75,0.5,0.25]),[2.0, 3.0, 4.0])
| true
|
6674f364967428f01b295eb4386e8002fedf37d7
|
Python
|
nbnbhattarai/qpapers
|
/qpapers/scienceopen.py
|
UTF-8
| 3,027
| 2.625
| 3
|
[] |
no_license
|
import requests
import json
from datetime import datetime
from .article import Article
class ScienceOpenSearch(object):
NAME = 'SCIENCEOPEN'
ROOT_URL = 'https://www.scienceopen.com'
def __init__(self, *args, **kwargs):
self.url = self.ROOT_URL + '/search-servlet'
self.keyword = kwargs.get('keyword', '')
self.page = 0
self.result = 5
def set_keyword(self, keyword):
self.keyword = keyword
def set_results(self, results):
self.result = results
@property
def params(self):
filters = [
{
'kind': 48,
'query': self.keyword,
},
{
'kind': 39,
'disciplines': [
{
'kind': 23,
'id': '79f00046-6f95-11e2-bcfd-0800200c9a66',
},
]
},
{
'kind': 46,
'record': False,
'abstract': True,
'authorSummary': True,
'article': True,
}
]
params = {
'kind': 61,
'itemsToGet': self.result,
'firstItemIndex': self.page * self.result,
'getFacets': False,
'getFilters': False,
'search': {
'v': 3,
'id': '',
'isExactMatch': True,
'context': None,
'kind': 77,
'order': 3,
'orderLowestFirst': False,
'query': '',
'filters': filters,
}
}
return {
'q': json.dumps(params)
}
def search(self):
return requests.get(self.url, params=self.params)
def get_articles(self):
response = self.search()
response_json = response.json()
# print('response: ', response_json)
articles = []
for result in response_json.get('result', {'results': []}).get('results', []):
title = ' '.join(result.get('_titleSafe', '').replace(
'\n', ' ').strip().split())
abstract = ' '.join(result.get('_abstractTextSafe',
'').replace('\n', ' ').strip().split())
authors = result.get('_authors', [])
authors = [author.get('_displayNameSafe') for author in authors]
_date = result.get('_date', 0)
_date = datetime.utcfromtimestamp(_date//1000)
submitted_date = _date.strftime('%d %B, %Y')
href = self.ROOT_URL + result.get('_url')
article = Article(
title=title, summary=abstract, authors=authors,
submitted_date=submitted_date, link=href, source=self.NAME)
articles.append(article)
return articles
if __name__ == '__main__':
sos = ScienceOpenSearch()
sos.set_keyword('Machine Learning')
sos.get_articles()
| true
|
a33212c98531a033578232a74aa84a9e79d3c69c
|
Python
|
steven0301/Translate-and-Summarize-Text
|
/start.py
|
UTF-8
| 3,271
| 2.609375
| 3
|
[] |
no_license
|
from gensim.summarization.summarizer import summarize
from newspaper import Article
from flask import Flask, render_template, request, jsonify
from googletrans import Translator
import json
import pdftotext
import urllib
from urllib.error import URLError, HTTPError
import io
from pathlib import Path
import tempfile
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
variable_start_string='[[',
variable_end_string=']]',
))
app = CustomFlask(__name__)
translator = Translator()
@app.route("/")
def main():
return render_template('index.html')
@app.route("/news", methods=['POST'])
def news():
# retrieve articel from URL
url = request.args['url']
text = ""
# for PDF extension
if Path(url).suffix == '.pdf':
try :
# pretend not to crawl
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
con = urllib.request.urlopen(req)
remoteFile = con.read()
momoryFile = io.BytesIO(remoteFile)
pdf = pdftotext.PDF(momoryFile)
for page in pdf :
text += page
except HTTPError as e :
err = e.read()
code = e.getCode()
# for normal URL
else :
news = Article(url)
news.download()
news.parse()
text = news.text
# remove multiple whitespaces in the article
text = ' '.join(text.split())
response = app.response_class(
response=json.dumps(text),
status=200,
mimetype='application/json'
)
return response
@app.route("/abridge", methods=['POST'])
def abridge():
origin = request.args['origin']
summarizeRate = request.args['summarizerate']
length = len(origin.split()) * (int(summarizeRate)/100)
summarized = summarize(origin, word_count=length)
response = app.response_class(
response=json.dumps(summarized.replace("\n", "\n\n")),
status=200,
mimetype='application/json'
)
return response
@app.route("/translate", methods=['POST'])
def translate():
text = ""
try :
summarized = request.args['summarized']
translated = translator.translate(summarized, dest='ko')
text = translated.text
except Exception as e:
text = "3900 characters limit exceeded !!!"
response = app.response_class(
response=json.dumps(text),
status=200,
mimetype='application/json'
)
return response
@app.route("/upload", methods=['POST'])
def upload():
file = request.files['file']
text = ""
tempDir = tempfile.TemporaryDirectory()
filepath = tempDir.name + '/temp.pdf'
file.save(filepath)
con = urllib.request.urlopen('file://'+filepath)
remoteFile = con.read()
momoryFile = io.BytesIO(remoteFile)
pdf = pdftotext.PDF(momoryFile)
for page in pdf :
text += page
tempDir.cleanup()
text = ' '.join(text.split())
response = app.response_class(
response=json.dumps(text),
status=200,
mimetype='application/json'
)
return response
if __name__ == '__main__':
app.run(host="127.0.0.1", port="8080")
| true
|
7b5543d3cc0b3e732ee78ad86ee6ce8c8b8582ac
|
Python
|
raykunal2021/SDET_Selenium
|
/alerts.py
|
UTF-8
| 1,529
| 3.015625
| 3
|
[] |
no_license
|
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
import time
driver=webdriver.Chrome(ChromeDriverManager().install())
driver.get("https://testautomationpractice.blogspot.com/")
driver.maximize_window()
#Click on the "Alert" button to generate the Simple Alert
driver.find_element_by_xpath("//button[normalize-space()='Click Me']").click()
#Switch the control to the Alert window
obj=driver.switch_to.alert
time.sleep(5)
#Retrieve the message on the Alert window
message=obj.text
print("Alert shows following message: "+ message)
time.sleep(5)
#use the accept() method to accept the alert
obj.accept()
#get the text returned when OK Button is clicked.
txt=driver.find_element_by_css_selector("#demo")
print(" Clicked on the OK Button in the Alert Window : ",txt.text)
# Section 2
# Re-generate the Confirmation Alert
button = driver.find_element_by_xpath("//button[normalize-space()='Click Me']")
button.click()
time.sleep(2)
#Switch the control to the Alert window
obj1 = driver.switch_to.alert
# Dismiss the Alert using
obj1.dismiss()
#get the text returned when Cancel Button is clicked.
txt1=driver.find_element_by_css_selector("#demo")
print(" Clicked on the Cancle Button in the Alert Window : ",txt1.text)
driver.switch_to.default_content()#use it to come back to default page
time.sleep(2)
driver.close()
| true
|
916894d40ad8c18b4af1d39580e233120a8c04da
|
Python
|
jawozele/Python_Files
|
/Lists.py
|
UTF-8
| 376
| 3.984375
| 4
|
[] |
no_license
|
# A program that stores a List of cars. The output is displayed with all cars saved on
cars = ['Mercedes Benz',
'Toyota',
'BMW',
'Hyundai',
'Mitsubishi',
'Land Rover',
'Audi',
'Ford Focus']
#print(cars[0])
#print(cars[1:4])
#print(len(cars))
cars.sort()
cars.append('Chrysler')
cars.remove('Hyundai')
print (cars)
| true
|
fcd65a1995c8e7ffa79dd1aaf83ee314e54f297b
|
Python
|
neurospin/pySnpImpute
|
/pysnpimpute/imputation.py
|
UTF-8
| 3,873
| 2.53125
| 3
|
[
"LicenseRef-scancode-cecill-b-en"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Defines a set of functions to run the imputation using Impute2.
The module requires Impute2 to be installed.
"""
import os
import pysnpimpute
from pysnpimpute.utils import (check_installation_of_required_softwares,
check_chromosome_name,
check_existence_of_paths,
run_cmd_and_check)
def impute(chromosome, from_bp, to_bp, hap, sample, ref_hap, ref_legend,
recombination_map, outdir=None, to_impute=None, Ne=20000,
buffer_kb=250, allow_large_regions=False, basename=None,
suffix=None, impute2_exe="impute2", logger=None):
"""
Run imputation using Impute2
Parameters
----------
chromosome: str
Name of chromosome or X chromosome region.
Accepted names: "1", ..., "22", "X_PAR1", "X_nonPAR" and "X_PAR2".
from_bp, to_bp: int
The interval in basepair position to impute.
hap: str
Path to the phased data to impute in Impute2 format.
sample: str
Path to samples to impute in Impute2 format.
ref_hap, ref_legend: str
Path to reference panel file in Shapeit2/Impute2 format.
recombination_map: str
Path to the recombination map required by Shapeit2 and Impute2.
outdir: str
Path to directory where to output.
to_impute: str, default None
Path to the list of variants to impute. By default imputation is done
for all variants of the reference panel.
Ne: int, default 20000
Impute2 'Ne' paraemter.
buffer_kb: int, default 250
Impute2 '-buffer' parameter. Length of buffer region in kb (NOT IN
BASEPAIR) to include on each side of the analysis interval.
allow_large_regions: bool, default False
By default Impute2 does not allow imputation on a region of size > 7Mb.
To force imputation on a bigger region, set this option to True.
basename, suffix: str, default None.
Output path is <outdir>/<basename><suffix>.
By default basename is <hap> filename without .hap.gz extension and
suffix is '.impute2.<from_bp>_<to_bp>'.
impute2_exe: str, default "minimac3"
Path to the impute2 executable or alias if it's in $PATH.
logger: logging object, defaut None.
To activate logging, pass a logging object.
"""
# Check that Impute2 is installed
check_installation_of_required_softwares(dict(Impute2=impute2_exe))
check_chromosome_name(chromosome)
if outdir is None:
outdir = os.path.dirname(hap)
# Check existence of input files
paths_to_check = [hap, sample, ref_hap, ref_legend, recombination_map,
outdir]
if to_impute is not None:
paths_to_check += [to_impute]
check_existence_of_paths(paths_to_check)
if basename is None:
basename = os.path.basename(hap).split(".gz")[0].split(".hap")[0]
if suffix is None:
suffix = ".impute2.{}_{}".format(from_bp, to_bp)
imputed_hap = os.path.join(outdir, basename + suffix)
cmd = [impute2_exe,
"-use_prephased_g",
"-known_haps_g", hap,
"-sample_g", sample,
"-h", ref_hap,
"-l", ref_legend,
"-m", recombination_map,
"-Ne", str(Ne),
"-int", str(from_bp), str(to_bp),
"-buffer", str(buffer_kb),
"-o", imputed_hap]
if to_impute is not None:
cmd += ["-include_snps", to_impute]
if allow_large_regions:
cmd += ["-allow_large_regions"]
if chromosome in pysnpimpute.X_REGIONS:
cmd += ["-chrX"]
if chromosome.startswith("X_PAR"):
cmd += ["-Xpar"]
run_cmd_and_check(cmd, logger=logger)
return imputed_hap
| true
|
453cd742d0933bee7d1ca3baa27e65e1013f4a8a
|
Python
|
homebysix/grahamgilbert-recipes
|
/ShardOverTimeProcessor/ShardOverTimeProcessor.py
|
UTF-8
| 7,078
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/local/autopkg/python
#
"""See docstring for ShardOverTimeProcessor class"""
from __future__ import absolute_import, division, print_function
import datetime
import sys
from autopkglib import Processor, ProcessorError
__all__ = ["ShardOverTimeProcessor"]
DEFAULT_CONDITION = "shard"
DEFAULT_DELAY_HOURS = 0
DEFAULT_SHARD_DAYS = 5
DEFAULT_WORKING_HOURS = True
class ShardOverTimeProcessor(Processor):
"""This processor will add an installable condition to Munki pkginfo files to roll updates out over a period of time based on a integer value of a configurable condition."""
description = __doc__
input_variables = {
"condition": {
"required": False,
"description": "The condition to use to divide devices. Defaults to \"{}\"".format(DEFAULT_CONDITION)
},
"delay_hours": {
"required": False,
"description": "Number of hours to delay the initial rollout by. Defaults to \"{}\"".format(DEFAULT_DELAY_HOURS)
},
"shard_days": {
"required": False,
"description": "The number of days the rollout will be rolled over. Defaults to \"{}\"".format(DEFAULT_SHARD_DAYS)
},
"working_hours": {
"required": False,
"description": "Restrict rollout times to 9am-6pm (local time). Defaults to \"{}\"".format(DEFAULT_WORKING_HOURS)
}
}
output_variables = {
"installable_condition": {
"description": "The installable condition"
}
}
def next_working_day(self, the_date):
try:
if the_date.weekday() == 5:
# It's a saturday
return the_date.replace(hour=9, minute=00) + datetime.timedelta(days=2)
elif the_date.weekday() == 6:
# It's a sunday
return the_date.replace(hour=9, minute=00) + datetime.timedelta(days=1)
elif the_date.hour not in range(9,18):
print(("{} is not between 9 and 18".format(the_date)))
print(("Sending {} back to next_working_day".format(the_date.replace(hour=9, minute=00) + datetime.timedelta(days=1))))
# The time is not in working hours, call ourself with tomorrow as the date
return self.next_working_day(the_date.replace(hour=9, minute=00) + datetime.timedelta(days=1))
else:
return the_date
except BaseException as err:
# handle unexpected errors here
exc_type, exc_obj, exc_tb = sys.exc_info()
error_string = "error: {}, line: {}".format(err, exc_tb.tb_lineno)
raise ProcessorError(error_string)
def main(self):
try:
condition = self.env.get("condition", DEFAULT_CONDITION)
delay_hours = int(self.env.get("delay_hours", DEFAULT_DELAY_HOURS))
shard_days = int(self.env.get("shard_days", DEFAULT_SHARD_DAYS))
working_hours = bool(self.env.get("working_hours", DEFAULT_WORKING_HOURS))
output_string = ""
now = datetime.datetime.now()
target_date = now + datetime.timedelta(days=shard_days)
start_date = now + datetime.timedelta(hours=delay_hours)
date_format = "%Y-%m-%dT%H:%M:%SZ"
# We also only deploy monday to friday
if working_hours:
# If working hours, we only have 9 hours a day.
# We are only going to start deploying (or more if delay_hours > 24) at 9am
if delay_hours > 24:
start_date = now + datetime.timedelta(hours=delay_hours)
if start_date.hour > 18 and start_date.hour < 9:
start_date = start_date.replace(hour=9, minute=00) + datetime.timedelta(days=1)
else:
start_date = start_date.replace(hour=9, minute=00)
# make sure it's a working day
start_date = self.next_working_day(start_date)
# how many working hours between now and end of shard_days
increment = datetime.timedelta(minutes= (9 * shard_days * 60) / 10)
current_deploy_date = start_date
output_string += "("
for group in range(0, 10):
group = (group + 1) * 10
deploy_time = self.next_working_day(current_deploy_date + increment)
print(("group: {} deploy_time: {}".format(group, deploy_time)))
output_string += "({} <= {} AND date > CAST(\"{}\", \"NSDate\")) OR ".format(condition, group, deploy_time.strftime(date_format))
current_deploy_date = deploy_time
output_string = output_string[:-4]
output_string += ")"
else:
# How many do we increment by for each group?
deploy_time = target_date - start_date
time_increment = deploy_time / 10
time_10 = start_date
# if working_hours is true, make sure the start time is between 9 and 6
time_20 = start_date + (time_increment * 2)
time_30 = start_date + (time_increment * 3)
time_40 = start_date + (time_increment * 4)
time_50 = start_date + (time_increment * 5)
time_60 = start_date + (time_increment * 6)
time_70 = start_date + (time_increment * 7)
time_80 = start_date + (time_increment * 8)
time_90 = start_date + (time_increment * 9)
time_100 = start_date + (time_increment * 10)
output_string = """({} <= 10 AND date > CAST("{}", "NSDate")) OR {} <= 20 AND date > CAST("{}", "NSDate")) OR {} <= 30 AND date > CAST("{}", "NSDate")) OR {} <= 40 AND date > CAST("{}", "NSDate")) OR {} <= 50 AND date > CAST("{}", "NSDate")) OR {} <= 60 AND date > CAST("{}", "NSDate")) OR {} <= 70 AND date > CAST("{}", "NSDate")) OR {} <= 80 AND date > CAST("{}", "NSDate")) OR {} <= 90 AND date > CAST("{}", "NSDate")) OR {} <= 100 AND date > CAST("{}", "NSDate"))
""".format(condition, time_10.strftime(date_format), condition, time_20.strftime(date_format), condition, time_30.strftime(date_format), condition, time_40.strftime(date_format), condition, time_50.strftime(date_format), condition, time_60.strftime(date_format), condition, time_70.strftime(date_format), condition, time_80.strftime(date_format), condition, time_90.strftime(date_format), condition, time_100.strftime(date_format))
# print(output_string)
self.env["installable_condition"] = output_string
except BaseException as err:
# handle unexpected errors here
exc_type, exc_obj, exc_tb = sys.exc_info()
error_string = "error: {}, line: {}".format(err, exc_tb.tb_lineno)
raise ProcessorError(error_string)
if __name__ == "__main__":
PROCESSOR = ShardOverTimeProcessor()
PROCESSOR.execute_shell()
| true
|
a4cadb46aaa9304168c671497ce34605c3854f5c
|
Python
|
manoelsslima/datasciencedegree
|
/tarefas/tarefa01/e01q03.py
|
UTF-8
| 285
| 4.09375
| 4
|
[] |
no_license
|
'''
Faça um programa que peça um número para o usuário (string), converta-o para float e depois imprima-o na tela. Você consegue fazer a mesma coisa, porém convertendo para int?
'''
numero_str = input('Informe um número: ')
numero_float = float(numero_str)
print(numero_float)
| true
|
96a0fde6393b2d792fc17543720ba0414c621f10
|
Python
|
Yangqqiamg/Python-text
|
/基础学习/python_work/Chapter 9/text.py
|
UTF-8
| 3,527
| 4.03125
| 4
|
[] |
no_license
|
#one
class Dog():
"""docstring for dog"""
def __init__(self, name, age):
self.name = name
self.age = age
def sit(self):
print(self.name.title() + " is now sitting. ")
pass
def roll_over(self):
print(self.name.title() + " rolled over! ")
pass
#two
my_dog = Dog('willie', 7)
print("My dog's name is " + my_dog.name.title() + '.')
print("My dog is " + str(my_dog.age) + ' years old. ')
print('')
#three
my_dog.sit()
my_dog.roll_over()
print('')
#four
my_dog = Dog('red', 9)
your_dog = Dog('green', 12)
print("My dog's name is " + my_dog.name.title() + '.')
print("My dog is " + str(my_dog.age) + ' years old. ')
my_dog.sit()
my_dog.roll_over()
print("\nyour dog's name is " + your_dog.name.title() + '.')
print("your dog is " + str(your_dog.age) + ' years old. ')
your_dog.sit()
your_dog.roll_over()
print('')
#five
class Car():
def __init__(self, make, model, year):
self.make = make
self.model = model
self.year = year
self.odmeter_reading = 0
def get_descripitve_name(self):
long_name = str(self.year) + ' ' + self.make + ' ' + self.model
return long_name
def read_odmeter(self):
print("This car has " + str(self.odmeter_reading) + " miles on it. ")
def update_odmeter(self, mileage):
if mileage >=self.odmeter_reading:
self.odmeter_reading = mileage
else:
print("you can't do it !")
def inc_odmeter(self, miles):
self.odmeter_reading += miles
def fill_gas_tank(self):
print("This full !")
pass
my_new_car = Car('mike', 'aeg0', 8462)
print(my_new_car.get_descripitve_name())
#six
my_new_car.read_odmeter()
#seven
my_new_car.odmeter_reading = 292
my_new_car.read_odmeter()
#eight
my_new_car.update_odmeter(84)
my_new_car.read_odmeter()
my_new_car.update_odmeter(56)
my_new_car.read_odmeter()
my_new_car.inc_odmeter(15)
my_new_car.read_odmeter()
#nine and "homework 9-9"
'''from car (five)'''
class Battery():
def __init__(self,battery_size=70):
self.battery_size = battery_size
def describe_battery(self):
print("This car has a " + str(self.battery_size) + "-kWh battery. ")
pass
def get_range(self):
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = "This car can go approximately " + str(range)
message += " miles on a full charge."
print(message)
pass
def upgrade(self):
if self.battery_size != 85:
self.battery_size = 85
pass
class ElectricCar(Car):
def __init__(self, make, model, year):
super().__init__(make, model, year)
self.battery = Battery()
def fill_gas_tank(self):
print("This car doesn's have a gas tank! ")
pass
my_tesla = ElectricCar('tesla', 'model s', 2006)
print(my_tesla.get_descripitve_name())
my_tesla.battery.describe_battery()
my_tesla.fill_gas_tank()
my_tesla.battery.get_range()
my_tesla.battery.upgrade()
my_tesla.battery.get_range()
# my_tesla.battery.battery_size = 85
# my_tesla.battery.get_range()
#thirteen
from collections import OrderedDict
flavor_languages = OrderedDict()
flavor_languages['joe'] = 'python'
flavor_languages['mary'] = 'c'
flavor_languages['make'] = 'ruby'
flavor_languages['phil'] = 'python'
for name ,language in flavor_languages.items():
print(name.title() + "'s favorite language is " +
language.title())
| true
|
8353697b7d39a0ce64c99db7e0eb8752fae0c9be
|
Python
|
nijjumon/py
|
/basic/math.py
|
UTF-8
| 496
| 3.796875
| 4
|
[] |
no_license
|
import math
a=input("enter length")
b=input("enter base")
c=input("enter hypotenuse")
a=float(a)
b=float(b)
c=float(c)
if a==0 or b==0 or c==0:
print("invalid input")
elif a+b>c and b+c>a and a+c>b:
perimeter=a+b+c
s=perimeter/2
area=math.sqrt(s*(s-a)*(s-b)*(s-c))
# print("the area and perimeter of your triangle is {} and {}".format(area,perimeter))
print("the area and perimeter of your triangle is %f and %f" %(area,perimeter))
else:
print("invalid triangle")
| true
|
165e1011593a8b974cff25dbee6fff1f9384f878
|
Python
|
ultrajedinotreal/pprac
|
/helloworld.py
|
UTF-8
| 184
| 3.3125
| 3
|
[] |
no_license
|
a = int(input("Enter the number of hellos you need to fuck off"))
i=0
for i in range ( a):
print("HELLO THERE")
print("General Kenobi")
print("You are a bold one")
| true
|
0af065722bb5dc739a4cdaefc91613f3dcceb025
|
Python
|
MohammedAlJaff/1MD120_Deep_Learning_for_Image_Analysis_Assignments
|
/assignment_1/.ipynb_checkpoints/ex_1_4_model_1-checkpoint.py
|
UTF-8
| 2,009
| 3.171875
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
from maj_linear_model import LinearRegressionModel, standarize_data
from load_auto import load_auto
if __name__ =='__main__':
# Load automobile data-set
Xraw, y = load_auto()
# Standardize data matrix
X = standarize_data(Xraw)
horsepower_column_j = 2
X1 = X[:,horsepower_column_j].reshape([np.size(X[:,horsepower_column_j]),1])
###
learning_rates = [1, 1e-1, 1e-2, 1e-3, 1e-4]
training_curves = []
for i in range(len(learning_rates)):
#define model
maj_1_model = LinearRegressionModel(data_X=X1, true_label_Y=y)
# fit model with learning rate
lr = learning_rates[i]
w1, b1, j1 = maj_1_model.train_linear_model(X=X1, y_true=y, nr_iter=1000, learning_rate=lr)
training_curves.append(j1)
# same and append trainng cost trajectories
plt.figure(figsize=[15,5])
for i in range(len(learning_rates)):
plt.plot(training_curves[i], label='$\\alpha$ = '+str(learning_rates[i]))
plt.legend()
plt.legend()
plt.xlabel('iterations')
plt.ylabel('Training Cost/Emperical Risk')
plt.title('Model 1: Using only "Horsepower" as the input data feature')
lr = 0.01
maj_1_model = LinearRegressionModel(data_X=X1, true_label_Y=y)
w1, b1, j1 = maj_1_model.train_linear_model(X=X1, y_true=y, nr_iter=1000, learning_rate=lr)
y_pred = maj_1_model.predict(X1)
plt.figure(figsize=[12,5])
plt.scatter(X1, y)
plt.plot(X1, y_pred, 'kx', label = 'data-point predictions')
plt.plot(X1, y_pred, '-r', label = 'line eq: '+str(w1[0,0])[:6]+'x + ' + str(b1)[:5])
plt.legend()
plt.title(f'Model 1: Horsepower v. MPG a & best-fit line from grad-decent optimization - lr = {lr}')
plt.ylabel('miles per gallon (mpg) ')
plt.xlabel('horsepower (Standardized values)')
plt.savefig('ex_1_4_Model1_horsepower_vs_mpg.png')
plt.show()
| true
|
766c810eacbf2826e62dd8e2c9c4c7c327f14991
|
Python
|
nymwa/ante2
|
/tunimi/tokenizer.py
|
UTF-8
| 811
| 2.984375
| 3
|
[] |
no_license
|
import re
from .vocabulary import Vocabulary
class Tokenizer:
def __init__(self):
self.vocab = Vocabulary()
self.proper_pattern = re.compile(r'^([AIUEO]|[KSNPML][aiueo]|[TJ][aueo]|W[aie])n?(([ksnpml][aiueo]|[tj][aueo]|w[aie])n?)*$')
def convert(self, x):
if x in self.vocab.indices:
return self.vocab.indices[x]
elif x.isdecimal():
return self.vocab.number_id
elif self.proper_pattern.match(x) and ('nm' not in x) and ('nn' not in x):
return self.vocab.proper_id
else:
return self.vocab.unk_id
def __call__(self, x, as_str=False):
x = x.split()
x = [self.convert(token) for token in x]
if as_str:
x = ' '.join([self.vocab[token] for token in x])
return x
| true
|
bbbd7dc7131651d5d571194b058f8373c5ffe86a
|
Python
|
superwj1990/AdCo
|
/VOC_CLF/dataset.py
|
UTF-8
| 1,855
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 23:23:51 2019
@author: Keshik
"""
import torchvision.datasets.voc as voc
class PascalVOC_Dataset(voc.VOCDetection):
"""`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Detection Dataset.
Args:
root (string): Root directory of the VOC Dataset.
year (string, optional): The dataset year, supports years 2007 to 2012.
image_set (string, optional): Select the image_set to use, ``train``, ``trainval`` or ``val``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
(default: alphabetic indexing of VOC's 20 classes).
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, required): A function/transform that takes in the
target and transforms it.
"""
def __init__(self, root, year='2012', image_set='train', download=False, transform=None, target_transform=None):
super().__init__(
root,
year=year,
image_set=image_set,
download=download,
transform=transform,
target_transform=target_transform)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is the image segmentation.
"""
return super().__getitem__(index)
def __len__(self):
"""
Returns:
size of the dataset
"""
return len(self.images)
| true
|
b9f79d7608c0882f7c3f01e64711067438048f94
|
Python
|
kamillk/Homework2
|
/resistance.py
|
UTF-8
| 2,212
| 2.734375
| 3
|
[] |
no_license
|
import sys
import xml.dom.minidom
import time
from matrixops import floyd_warshall
from copy import deepcopy
doc = xml.dom.minidom.parse(sys.argv[1])
elements = doc.getElementsByTagName('net')
number = elements.length;
d = [[float("+inf") for x in range(number+1)] for y in range(number+1)]
for i in range(1,number+1):
d[i][i] = 0;
capactor = doc.getElementsByTagName('capactor')
resistor = doc.getElementsByTagName('resistor')
diode = doc.getElementsByTagName('diode')
for i in range(capactor.length):
res = float(capactor[i].attributes['resistance'].value)
a = int(capactor[i].attributes['net_from'].value)
b = int(capactor[i].attributes['net_to'].value)
d[a][b] = 1/ (1/d[a][b] + 1/res)
d[b][a] = 1/ (1/d[b][a] + 1/res)
for i in range(resistor.length):
res = float(resistor[i].attributes['resistance'].value)
a = int(resistor[i].attributes['net_from'].value)
b = int(resistor[i].attributes['net_to'].value)
d[a][b] = 1/ (1/d[a][b] + 1/res)
d[b][a] = 1/ (1/d[b][a] + 1/res)
for i in range(diode.length):
res = float(diode[i].attributes['resistance'].value)
res_rev = float(diode[i].attributes['reverse_resistance'].value)
a = int(diode[i].attributes['net_from'].value)
b = int(diode[i].attributes['net_to'].value)
d[a][b] = 1/ (1/d[a][b] + 1/res)
d[b][a] = 1/ (1/d[b][a] + 1/res_rev)
cur_d = deepcopy(d)
start_python = time.time()
for k in range(1,number+1):
for i in range(1,number+1):
for j in range(1,number+1):
if d[i][j] == 0 or d[i][k] == 0 and d[k][j] == 0:
d[i][j] = 0
elif d[i][j] == float("+inf") and d[i][k] == float("+inf") or d[i][j] == float("+inf") and d[k][j] == float("+inf"):
d[i][j] = float("+inf")
else:
d[i][j] = 1/ (1/d[i][j]+ 1/(d[i][k]+d[k][j]))
finish_python = time.time()
start_c = time.time()
d = floyd_warshall(cur_d)
finish_c = time.time()
cur_f = sys.argv[2]
f = open(cur_f, 'w')
for i in range(1,number+1):
for j in range(1,number+1):
f.write("{},".format(round(d[i][j],6)))
f.write("\n")
f.close()
print((finish_python - start_python)/(finish_c - start_c))
| true
|
5d9cc23c0f49805122d8bfdf4c39154df281af1b
|
Python
|
MoShrank/code-design-python-task
|
/zahlenraten.py
|
UTF-8
| 309
| 4.03125
| 4
|
[] |
no_license
|
goal_number = 100
def check_number(x):
if x == goal_number:
print("congratulations. You won, Your number is right")
elif goal_number < x:
print("the number is lower")
else:
print("the number is higher")
inp = input("guess the number: ")
inp = int(inp)
check_number(inp)
| true
|
472b600312d435e2476fe5977a407e08a6fe5c33
|
Python
|
MiekoHayasaka/Python_Training
|
/day0210/lesson3.py
|
UTF-8
| 143
| 3.546875
| 4
|
[] |
no_license
|
def sumof(n):
if n <= 1:
return n
else:
return n*sumof(n-1)
num=int(input('正の整数>'))
ans=sumof(num)
print(ans)
| true
|
bf8f3c2c8176999a747f9f27bf1ff7891a9c8f80
|
Python
|
csun87/dworp
|
/tests/test_agent.py
|
UTF-8
| 1,592
| 2.8125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Copyright 2018, The Johns Hopkins University Applied Physics Laboratory LLC
# All rights reserved.
# Distributed under the terms of the Modified BSD License.
from dworp.agent import *
import unittest
class IdentifierHelperTest(unittest.TestCase):
def test(self):
# trivial test that serves as an example
id_gen = IdentifierHelper.get(50)
self.assertEqual(50, next(id_gen))
self.assertEqual([51, 52, 53], [next(id_gen) for x in range(3)])
class AgentTest(unittest.TestCase):
class MockAgent(Agent):
def step(self, now, env):
pass
def test_creation_with_size(self):
a = AgentTest.MockAgent("name", 5)
self.assertEqual(5, a.state.shape[0])
def test_creation_without_size(self):
a = AgentTest.MockAgent("name", 0)
self.assertIsNone(a.state)
class SelfNamingAgentTest(unittest.TestCase):
class MockAgent(SelfNamingAgent):
def step(self, now, env):
pass
def test_id(self):
a1 = SelfNamingAgentTest.MockAgent(5)
a2 = SelfNamingAgentTest.MockAgent(5)
self.assertEqual(1, a1.agent_id)
self.assertEqual(2, a2.agent_id)
class TwoStageAgentTest(unittest.TestCase):
class MockAgent(TwoStageAgent):
def step(self, now, env):
self.next_state[1] = 42
def test_state_switch_on_complete(self):
agent = TwoStageAgentTest.MockAgent(agent_id=1, size=5)
agent.step(0, None)
agent.complete(0, None)
self.assertEqual(0, agent.state[0])
self.assertEqual(42, agent.state[1])
| true
|
6e42a3363e6a3f4e04f0a9d0d3706fa7d4a313d0
|
Python
|
mlarkin00/mslarkin-experiments
|
/gae-budget-alert/main.py
|
UTF-8
| 2,782
| 2.65625
| 3
|
[] |
no_license
|
import base64
import json
import os
import logging
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
PROJECT_ID = os.getenv('GCP_PROJECT')
APP_NAME = f"{PROJECT_ID}"
#Which alert threshold should trigger the shutdown (e.g. 100% of set budget)
TRIGGER_THRESHOLD = 1.0
def check_app(data, context):
"""
Checks Budget Alert Pub/Sub message and disables App Engine
if costs have exceeded the desired budget
"""
#Extract relevant Pub/Sub message content - (Message format: https://cloud.google.com/billing/docs/how-to/budgets#notification_format)
pubsub_data = base64.b64decode(data['data']).decode('utf-8')
pubsub_json = json.loads(pubsub_data)
cost_amount = pubsub_json['costAmount']
budget_amount = pubsub_json['budgetAmount']
budget_name = pubsub_json['budgetDisplayName']
alert_threshold = pubsub_json['alertThresholdExceeded']
# Check if we've hit the set limit (alert_threshold = .8 for 80%, 1.0 for 100%, etc.)
if alert_threshold < TRIGGER_THRESHOLD:
print(
f'No action necessary at {alert_threshold} for {budget_name}.\n'
f'Current Cost: {cost_amount}\n'
f'Budget Amount: {budget_amount}'
)
return
# Get the Apps object (http://googleapis.github.io/google-api-python-client/docs/dyn/appengine_v1.apps.html)
appengine = discovery.build(
'appengine',
'v1',
cache_discovery=False,
credentials=GoogleCredentials.get_application_default()
)
apps = appengine.apps()
# Get the current servingStatus
current_status = __get_app_status(APP_NAME, apps)
print(f'Current servingStatus: {current_status}')
# If app is serving, disable it
if current_status == "SERVING":
logging.warning(
f'Budget threshold exceeded, disabling app {APP_NAME}\n'
f'Budget Alert: {budget_name}\n'
f'Budget Threshold: {alert_threshold}\n'
f'Budget Amount: {budget_amount}\n'
f'Current Cost: {cost_amount}'
)
__toggle_app(APP_NAME, apps, "USER_DISABLED")
else:
print(
f'Budget threshold exceeded, but {APP_NAME} is already disabled\n'
f'Budget Alert: {budget_name}'
)
return
return
def __get_app_status(app_name, apps):
"""
Get the current serving status of the app
"""
app = apps.get(appsId=app_name).execute()
return app['servingStatus']
def __toggle_app(app_name, apps, set_state):
"""
Enables or Disables the app, depending on set_state
"""
body = {'servingStatus': set_state}
app = apps.patch(appsId=app_name, updateMask='serving_status', body=body).execute()
return
| true
|
69c5fd7d6874709e24f44ed06c4fd3a008502806
|
Python
|
AngelLiang/programming-in-python3-2nd-edition
|
/py3book31/py31eg/findduplicates-m.py
|
UTF-8
| 2,969
| 2.515625
| 3
|
[
"MIT",
"GPL-3.0-only",
"GPL-1.0-or-later",
"LGPL-2.0-or-later"
] |
permissive
|
#!/usr/bin/env python3
# Copyright (c) 2008-11 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import locale
locale.setlocale(locale.LC_ALL, "")
import collections
import hashlib
import optparse
import os
import multiprocessing
def main():
opts, path = parse_options()
data = collections.defaultdict(list)
if opts.verbose:
print("Creating file list...")
for root, dirs, files in os.walk(path):
for filename in files:
fullname = os.path.join(root, filename)
try:
key = (os.path.getsize(fullname), filename)
except EnvironmentError:
continue
if key[0] == 0:
continue
data[key].append(fullname)
items = []
for key in sorted(data):
if len(data[key]) > 1:
items.append((key[0], tuple(data[key])))
if items:
pool = multiprocessing.Pool()
pool.map_async(check_one_item, items, 1, print_result)
pool.close()
pool.join()
def check_one_item(item):
filenames = item[1]
md5s = collections.defaultdict(set)
for filename in filenames:
try:
md5 = hashlib.md5()
with open(filename, "rb") as fh:
md5.update(fh.read())
md5 = md5.digest()
md5s[md5].add(filename)
except EnvironmentError:
continue
results = []
for filenames in md5s.values():
if len(filenames) == 1:
continue
results.append("Duplicate files ({0:n} bytes):\n\t{1}".format(
item[0], "\n\t".join(sorted(filenames))))
return "\n".join(results)
def print_result(results):
for result in results:
if result:
print(result)
def parse_options():
parser = optparse.OptionParser(
usage=("usage: %prog [options] [path]\n"
"outputs a list of duplicate files in path "
"using the MD5 algorithm\n"
"ignores zero-length files\n"
"path defaults to ."))
parser.add_option("-v", "--verbose", dest="verbose",
default=False, action="store_true")
parser.add_option("-d", "--debug", dest="debug", default=False,
action="store_true")
opts, args = parser.parse_args()
return opts, args[0] if args else "."
if __name__ == "__main__": # This is *vital* on Windows!
main()
| true
|
6678465514f55498980a1f70ab70d669d8ea8815
|
Python
|
Johanjimenez97/Estructura-de-Datos-1
|
/tabla2.py
|
UTF-8
| 1,139
| 2.640625
| 3
|
[] |
no_license
|
class Generador:
def generaTabla(self, tabla):
codigo = ""
for t in tabla:
codigo = codigo + "<tr>"
for j in t.split(","):
if j == 'Oro':
j= '<img src="static/img/Oro.jpg" width="50px" heigth="50px">'
elif j== 'Plata':
j = '<img src="static/img/Plata.jpg" width="50px" heigth="50px">'
elif j== 'Bronce':
j = '<img src="static/img/Bronce.jpg" width="50px" heigth="50px">'
elif j== 'Cobre':
j = '<img src="static/img/Cobre.jpg" width="50px" heigth="50px">'
if j == '1':
j= '<img src="static/img/Rugal.jpg" width="50px" heigth="50px">'
codigo = codigo + "<td>" + j + "</td>"
codigo = codigo + "</tr>"
codigo = "<table>" + codigo + "</table>"
return codigo
def generarTituloParrafo(self, titulo, parrafo):
titulo = "<h1 >" + titulo + "</h1>"
parrafo = "<p>" + parrafo + "</p>"
return titulo + parrafo
| true
|
f5714a6f0eff91cbb48a8d20a6409ae100eecf4e
|
Python
|
chishu-amenomoriS/wiktionary-tools
|
/python/research/countlines-BytesIO.py
|
UTF-8
| 406
| 2.75
| 3
|
[
"CC0-1.0"
] |
permissive
|
import bz2, io
with open("streamlen.tsv") as f:
target = f.readline().strip()
slen = [int(line) for line in f.readlines()]
lines = 0
with open(target, "rb") as f:
for length in slen:
with io.BytesIO(f.read(length)) as b:
with bz2.open(b, "rt", encoding="utf-8") as t:
while (line := t.readline()):
lines += 1
print(f"lines: {lines:,}")
| true
|
8c7a52caf91014fcce8fab8139b0368e0492e748
|
Python
|
surferek/Machine-Learning
|
/cleaning_data.py
|
UTF-8
| 2,469
| 3.71875
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# Examples of cleaning data methods in Python and some introduction into preprocessing
# Libraries
import numpy as np
import pandas as pd
# And also sklearn
# Reading data to DataFrame ===================================================
dataFrame = pd.read_csv("MyData")
# Detecting missing data ======================================================
pd.isnull(dataFrame)
# Replacing specific data into new one
dataFrame.replace(to_replace="New_value", value="Old_value")
# Removing all missing data
dataFrame.dropna()
# Removing missing data from specific columns
dataFrame.dropna(subset=['Column_1'])
# Interpolating data by placing mean values
from sklearn.preprocessing import Imputer
imput = Imputer(missing_values='NaN', strategy='mean', axis=0)
imput = imput.fit(dataFrame)
imputedData = imput.transform(dataFrame.values)
# Dealing with outliers =======================================================
# Get the 98th and 2nd percentile as the limits of our outliers
upperBoundary = np.percentile(dataFrame.values, 98)
lowerBoundary = np.percentile(dataFrame.values, 2)
# Filter the outliers from the dataframe
AnotherDataFrame["ColName"].loc[dataFrame["ColName"]>upperBoundary] = upperBoundary
AnotherDataFrame["ColName"].loc[dataFrame["ColName"]<lowerBoundary] = lowerBoundary
# Handling with categorical data ==============================================
# Unificate names of categorical data
# Whole string lower case
[i.lower() for i in dataFrame["ColName"]]
# First letter capitalised
[i.Capitalize() for i in dataFrame["ColName"]]
# Convert categorical data into integers
from sklearn.preprocessing import LabelEncoder
target_feature = 'Some feature name'
# Using encoder and transform
encoder = LabelEncoder()
enc_Values = encoder.fit_transform(dataFrame[target_feature].values)
dataFrame[target_feature] = pd.Series(enc_Values, index=dataFrame.index)
# Convert categorical data into integers
from sklearn.preprocessing import OneHotEncoder
oneHotEncoder = OneHotEncoder(categorical_features=[0])
dataFrame = oneHotEncoder.fit_transform(dataFrame).toarray()
# Creating dummy features
dataFrame = pd.get_dummies(dataFrame)
# Scaling features ============================================================
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
train_dataFrame= sc.fit_transform(train_dataFrame)
test_dataFrame= sc.transform(test_dataFrame)
| true
|
7df38c62717b882f6a93c359a9771b8fc576c87c
|
Python
|
c0mmand3r3/twitter_covid19
|
/examples/data_split_example.py
|
UTF-8
| 1,987
| 2.78125
| 3
|
[] |
no_license
|
"""
- Author : Anish Basnet
- Email : anishbasnetworld@gmail.com
- Date : Tuesday, July 13, 2021
"""
import os
import pandas as pd
from tweeter_covid19.utils import mkdir
TOTAL_SET = 10
if __name__ == '__main__':
read_path = os.path.join('data', 'original', 'covid19_tweets_refactor.csv')
write_path = os.path.join('data', 'fold_dataset')
data = pd.read_csv(read_path)
positive_label_data = data.query('Label == 1')
negative_label_data = data.query('Label == -1')
neutral_label_data = data.query('Label == 0')
for fold in range(TOTAL_SET):
joiner_path = os.path.join(write_path, 'set_' + str(fold + 1))
mkdir(joiner_path)
# positive split
pos_train_data = positive_label_data.sample(frac=0.7)
pos_test_data = positive_label_data.drop(pos_train_data.index)
# negative split
neg_train_data = negative_label_data.sample(frac=0.7)
neg_test_data = negative_label_data.drop(neg_train_data.index)
# neutral split
neu_train_data = neutral_label_data.sample(frac=0.7)
neu_test_data = neutral_label_data.drop(neu_train_data.index)
train_data = [pos_train_data, neg_train_data, neu_train_data]
test_data = [pos_test_data, neg_test_data, neu_test_data]
train_df = pd.concat(train_data)
test_df = pd.concat(test_data)
train_df.to_csv(os.path.join(joiner_path, 'train.csv'))
test_df.to_csv(os.path.join(joiner_path, 'test.csv'))
print('FOLD - {} // Successfully Created ! Train tweets - {} :: Test tweets - {} -- Pos -'
' {}/{} Neg - {}/{} Neu - {}/{}.'.format(fold + 1, train_df.shape[0], test_df.shape[0],
pos_train_data.shape[0], pos_test_data.shape[0],
neg_train_data.shape[0], neg_test_data.shape[0],
neu_train_data.shape[0], neu_test_data.shape[0]))
| true
|
096ae6b9cf256dc77d1b9ceda8990e03a63e3d15
|
Python
|
ye-spencer/RunTracker
|
/src/runner.py
|
UTF-8
| 7,661
| 2.578125
| 3
|
[] |
no_license
|
from os import path
from os import rename
from os import mkdir
from os import listdir
from re import match
from platform import system
FIELDEVENTS = ["Long Jump", "Triple Jump", "Pole Vault", "Discus", "Shotput", "High Jump"]
global NoneType
NoneType = 56725649176543423.456215
#return None
#param String, String, String, String
def writeToFile(name, event, eType, text):
myFile = open("Runners\\%s\\%s\\%s.txt" % (name,event, eType), "a")
myFile.write(text)
myFile.close()
#return List<String>
#param String, String, String
def readFileLBL(name, event, eType):
myFile = open("Runners\\%s\\%s\\%s.txt" % (name, event, eType), "r")
lines = myFile.readlines()
myFile.close()
return lines
#return boolean
#param String
def fileExists(directs):
return path.exists(getFileName(directs))
#return String
#param String
def getFileName(directs):
fileName = "Runners"
for direct in directs:
fileName += "\\%s" % direct
return fileName
#return String
#param String
def getNotVersion(fileName):
ind = fileName.rindex("\\") + 1
return fileName[:ind] + "!" + fileName[ind:]
class Runner (object):
#return None
#param String
def __init__(self, name):
self.name = name
if not fileExists([self.name]):
mkdir(getFileName([self.name]))
def __str__(self):
return self.name + " does the events " + str(self.getEvents())
def __repr__(self):
return self.__str__()
#return String
#param String
def newEvent(self, eventName):
fileName = getFileName([self.name, eventName])
notV = getNotVersion(fileName)
if self.hasEvent(eventName):
return "Event Already Exists"
elif path.exists(notV):
rename(notV, fileName)
else:
mkdir(fileName)
open(getFileName([self.name, eventName, "goal.txt"]), "x").close()
open(getFileName([self.name, eventName, "time.txt"]), "x").close()
return "Event Added"
#return String
#param String
def removeEvent(self, eventName):
fileName = getFileName([self.name, eventName])
if not self.hasEvent(eventName):
return "Event Already Gone"
rename(fileName, getNotVersion(fileName))
return "Event Removed"
#return List<String>
#param None
def getEvents(self):
return [event for event in listdir(getFileName([self.name])) if "!" not in event]
#return boolean
#param String
def hasEvent(self, eventName):
return eventName in self.getEvents()
#return String
#param String, double
def newTime(self, eventName, time):
if self.hasEvent(eventName):
if ("%.2f" % time) not in self.getTimesEvent(eventName):
writeToFile(self.name, eventName, "time", "%.2f\n" % time)
return "Time Added"
return "Time Already Exists"
return "No Such Event"
#return None
#param String, double
def removeTime(self, eventName, time):
oldTimes = self.getTimesEvent(eventName)
self.clearEvent(eventName, "time")
for oldTime in oldTimes:
print(str(oldTime + 1) + " : " + str(time))
if not oldTime == time:
self.newTime(eventName, oldTime)
#return None
#param String, String
def clearEvent(self, eventName, portion):
myFile = open("Runners\\%s\\%s\\%s.txt" % (self.name, eventName, portion), "w")
myFile.close()
#return String
#param String, double
def newGoal(self, eventName, goal):
if self.hasEvent(eventName):
if goal not in self.getGoalsEvent(eventName):
writeToFile(self.name, eventName, "goal", "%.2f\n" % goal)
return "Goal Added"
return "Goal Already Exists"
return "No Such Event"
#return None
#param String, double
def removeGoal(self, eventName, goal):
goals = self.getGoalsEvent(eventName)
self.clearEvent(eventName, "goal")
for oldGoal in goals:
if oldGoal != goal:
writeToFile(self.name, eventName, "goal", "%.2f\n" % goal)
#return List<double>
#param String
def getGoalsEvent(self, eventName):
return [float(goal.strip()) for goal in readFileLBL(self.name, eventName, "goal")]
#return List<double>
#param String
def getTimesEvent(self, eventName):
return [float(time.strip()) for time in readFileLBL(self.name, eventName, "time")]
#return double
#param String
def getPRFieldEvent(self, eventName):
times = self.getTimesEvent(eventName)
return NoneType if len(times) == 0 else max(times)
#return double
#param String
def getPREvent(self, eventName):
times = self.getTimesEvent(eventName)
if eventName in FIELDEVENTS:
return self.getPRFieldEvent(eventName)
return NoneType if len(times) == 0 else min(times)
#return int
#param String
def getGoalsPassedEvent(self, eventName):
return len([goal for goal in self.getGoalsEvent(eventName) if self.getPREvent(eventName) <= goal])
#return int
#param None
def getAllGoalsPassed(self):
return sum(self.getGoalsPassedEvent(event) for event in self.getEvents())
#return double
#param None
def getAveragePoints(self):
try:
return self.getTotalPoints() / len([event for event in self.getEvents() if self.getPREvent(event) != NoneType])
except ZeroDivisionError:
return 0
#return int
#param None
def getTotalPoints(self):
return sum([self.getPointsEvent(event) for event in self.getEvents()])
#return double
#param double, double, double, double
def calculatePoints(self, a, b, c, time):
if time == NoneType:
score = 0
else:
score = a * pow((b - time), c)
try:
return max(score, 0)
except TypeError:
return 0
#return double
#param String
def getPointsEvent(self, event):
if event == "100m":
return self.calculatePoints(25.43471, 18, 1.81, self.getPREvent("100m"))
elif event == "200m":
return self.calculatePoints(3.32725, 42.5, 1.81, self.getPREvent("200m"))
elif event == "300m":
return self.calculatePoints(2.21152, 61, 1.81, self.getPREvent("300m"))
elif event == "400m":
return self.calculatePoints(1.53775, 82, 1.81, self.getPREvent("400m"))
elif event == "800m":
return self.calculatePoints(0.07462, 254, 1.88, self.getPREvent("800m"))
elif event == "1600m":
return self.calculatePoints(0.029828, 512, 1.85, self.getPREvent("1600m"))
return 0
#return String
#param None
def getAllPoints(self):
text = ""
for event in self.getEvents():
text += "%s: %d\n" % (event, self.getPointsEvent(event))
return text
#return String
#param String
def getAllInfoEvent(self, eventName):
toPrint = ""
pr = self.getPREvent(eventName)
if pr != NoneType:
toPrint += "PR: %.2f\n\n" % pr
else:
toPrint += "PR: N/A\n\n"
toPrint += "Points: %d\n\n" % self.getPointsEvent(eventName)
goals = self.getGoalsEvent(eventName)
goals.sort()
passed = self.getGoalsPassedEvent(eventName)
toPrint += "Goals: %d Passed: %d\n\n" % (len(goals), passed)
times = self.getTimesEvent(eventName)
times.sort()
toPrint += "\nTimes: %d\n" % len(times)
for time in times:
toPrint += "%.2f\n" % time
return toPrint
#return String
#param String
def toHTMLEvent(self, eventName):
text = "<h3> %s </h3>\n\n" % eventName
pr = self.getPREvent(eventName)
if pr != NoneType:
text += "<h5> PR: %s </h5>\n\n" % pr
else:
text += "<h5> PR: N/A </h5>\n\n"
goals = self.getGoalsEvent(eventName)
goals.sort()
text += "<h4> Goals: %d Passed: %d</h4>\n\n" % (len(goals), self.getGoalsPassedEvent(eventName))
for goal in goals:
text += "<p> %.2f </p>\n" % goal
text += "<h4> Times </h4>\n\n"
times = self.getTimesEvent(eventName)
times.sort()
for time in times:
text += "<p> %.2f </p>\n" % time
return text
| true
|
7c6207f137d9b410ee0dacb36ea4eeb281885a9a
|
Python
|
gonzalezcjj/andsp
|
/andsp_dwb_dump.py
|
UTF-8
| 986
| 2.75
| 3
|
[] |
no_license
|
import sqlite3
import json
import codecs
conn = sqlite3.connect('content.sqlite')
cur = conn.cursor()
cur.execute('''SELECT d.year,
d.population_value
FROM Country AS c,
Indicator AS i,
Data AS d
WHERE i.indicator_id = d.indicator_id
AND c.country_id = d.country_id
ORDER BY d.year''')
fhand = codecs.open('andsp_dwb_sppopt.js', 'w', "utf-8")
fhand.write("sppopt = [ \n")
fhand.write("['Year','Population'],\n")
count = 0
for data_row in cur :
val = str(data_row[0])
data = val,data_row[1]
try:
js = json.dumps(data)
count = count + 1
if count > 1 : fhand.write(",\n")
output = js
fhand.write(output)
except:
continue
fhand.write("\n];\n")
cur.close()
fhand.close()
print(count, "Output written to andsp_dwb_sppopt.js")
print("Open andsp_dwb_view.htm to visualize the data in a browser")
| true
|
aef56cccf3fb49eab9e9a7d705769bf4d35b1f8c
|
Python
|
ljm516/python-repo
|
/algorithm/knn/knn_algorithm.py
|
UTF-8
| 3,439
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
import csv
import math
import operator
import sys
from random import random
'''
实现 knn 算法:
1. 数据处理: 打开 csv 文件获取数据,将原始数据分为测试数据和训练数据
2. 相似性度量: 计算每两个数据实例之间的距离
3. 近邻查找: 找到 k 个与当前数据最近的邻居
4. 结果反馈: 从近邻实例反馈结果
5. 精度评估: 统计预测精度
'''
# 从文件加载数据集
def load_data_set(data_file, split_rate):
training_set = []
test_set = []
with open(data_file, 'r') as fr:
lines = csv.reader(fr)
data_set = list(lines)
for x in range(len(data_set) - 1):
iter_time = len(data_set[x]) - 1
for y in range(iter_time):
data_set[x][y] = float(data_set[x][y])
# data = [float(d) for d in data_set[x]]
if random() < split_rate:
training_set.append(data_set[x])
# training_set.append(data)
else:
test_set.append(data_set[x])
# test_set.append(data)
return training_set, test_set
# 获取两点间的欧氏距离
def euclidean_distance(instace_1, instace_2, length):
distance = 0
for x in range(length):
distance += pow((instace_1[x] - instace_2[x]), 2)
return math.sqrt(distance)
# 获取测试样本的k个最近邻居
def get_neighbors(training_set, test_instance, k):
distance = []
length = len(test_instance) - 1
for x in range(len(training_set)):
dist = euclidean_distance(instace_1=test_instance, instace_2=training_set[x], length=length)
distance.append((training_set[x], dist))
distance.sort(key=operator.itemgetter(1))
neighbors = []
for x in range(k):
neighbors.append(distance[x][0])
return neighbors
# 获取预测结果,从邻居点中提取数量最多的那个邻居
def get_response(neighbors):
class_votes = {}
for x in range(len(neighbors)):
response = neighbors[x][-1]
if response in class_votes:
class_votes[response] += 1
else:
class_votes[response] = 1
sorted_votes = sorted(class_votes.items(), key=operator.itemgetter(1), reverse=True)
return sorted_votes[0][0]
# 准确度
def get_accuracy(test_set, predictions):
correct = 0
for x in range(len(test_set)):
if test_set[x][-1] == predictions[x]:
correct += 1
return (correct / float(len(test_set))) * 100.0
if __name__ == '__main__':
data_file = sys.argv[1]
split_rate = 0.8
training_set, test_set = load_data_set(data_file=data_file, split_rate=split_rate)
print('training set: {s}'.format(s=len(training_set)))
print('test set: {s}'.format(s=len(test_set)))
predictions = []
k = 3
for x in range(len(test_set)):
neighbors = get_neighbors(training_set=training_set, test_instance=test_set[x], k=k)
print('neighbors: {n}'.format(n=neighbors))
result = get_response(neighbors=neighbors)
print('result: {r}'.format(r=result))
predictions.append(result)
print('predict: {p}, actual: {a}'.format(p=int(float(result)), a=int(float(test_set[x][-1]))))
print('predictions: {p}'.format(p=predictions))
accuracy = get_accuracy(test_set=test_set, predictions=predictions)
print('accuracy: {a}'.format(a=accuracy))
print('+Done')
| true
|
6e506a56c67263268fdaedd54e8387c66b5e0808
|
Python
|
Emerson53na/exercicios-python-3
|
/029 Radar eletrônico.py
|
UTF-8
| 288
| 3.78125
| 4
|
[] |
no_license
|
n = float(input('Qual é a velocidade atual do carro? km/h'))
valor = n*7-80*7
if n <= 80:
print('\033[32m Tenha um bom dia! Dirija com segurança.\033[m')
elif n >= 81:
print('\033[33m Você está indo muito rápido.\n\033[31mSua multa é de R${:.2f}\033[m'.format(valor))
| true
|
224586cac64daa3ad807d60cdd46ea31c526ea5c
|
Python
|
Voprzybyo/Python
|
/Classes/Calculator/ComplexCalculator_V1.py
|
UTF-8
| 2,795
| 4.28125
| 4
|
[] |
no_license
|
#! /usr/bin/env python
import math
class Complex:
# Constructor
def __init__(self, realpart=0.0, imagpart=0.0):
self.r = realpart
self.i = imagpart
# Conjugate of complex number (imaginary part negation)
def conjugate(self):
self.i = -self.i
# Method that returns complete form of complex number
def val(self):
return '{} + ({})j'.format(self.r, self.i)
# Method adding two complex numbers
def add(self, other):
return Complex(self.r + other.r,
self.i + other.i)
# Method subtracting two complex numbers
def sub(self, other):
return Complex(self.r - other.r,
self.i - other.i)
# Method multiply two complex numbers
def mul(self, other):
return Complex(self.r*other.r - self.i*other.i,
self.i*other.r + self.r*other.i)
# Method returns absolute value of complex number
def abs(self):
return math.sqrt(self.r**2 + self.i**2)
# Create object of Complex class
x = Complex(2.0, -1.0)
y = Complex(2.0, -2.0)
# Test val method (get full form of complex number)
print("Complex number: ", x.val())
# Test add method
z = x.add(y)
print("Complex number adding: (", x.val(), ") + (", y.val(), ") =", z.val())
# Test subtracting method
z = x.sub(y)
print("Complex number adding: (", x.val(), ") - (", y.val(), ") =", z.val())
# Test multiplication method
z = x.mul(y)
print("Complex number multiplication: (", x.val(), ") * (", y.val(), ") =", z.val())
# Test abs method
z = x.abs()
print("Absolute value of ", x.val(), " is: ", z)
print("Starting calculator mode!")
# Enter first complex number
complex_num = input("Enter first complex number(f.e. 1.0 -3.5j) : ")
complex_num = complex_num.split()
complex_num[1] = complex_num[1].replace('j', '') # No matter if user put "j" at imaginary part or not
p = Complex(float(complex_num[0]), float(complex_num[1]))
print(p.val())
# Enter second complex number
complex_num = input("Enter second complex number(f.e. 2.0 -4.25j) : ")
complex_num = complex_num.split()
complex_num[1] = complex_num[1].replace('j', '') # No matter if user put "j" at imaginary part or not
q = Complex(float(complex_num[0]), float(complex_num[1]))
print(q.val())
# Choose operation on given complex numbers
complex_oper = input("Enter operation( \"+\" \"-\" \"*\" ) : ")
if complex_oper == '+':
z = p.add(q)
print("Complex number adding: (", p.val(), ") + (", q.val(), ") =", z.val())
if complex_oper == '-':
z = p.sub(q)
print("Complex number subtracting: (", p.val(), ") - (", q.val(), ") =", z.val())
if complex_oper == '*':
z = p.mul(q)
print("Complex number multiplication: (", p.val(), ") * (", q.val(), ") =", z.val())
| true
|
5cdc90a578a8ba5e2c362ab7ff84242cb90c3109
|
Python
|
mbouthemy/datathon-energy
|
/src/get_score.py
|
UTF-8
| 541
| 3.46875
| 3
|
[] |
no_license
|
# Calculate the score of the index of each dataframe
# Assign the score A+ for the top 20%, A for 30%, B for 30% and C for 20%
def get_score_column(df, column_name):
"""
Get the score of the dataframe based on the column name.
"""
# Get the rank of the consumption.
rank = 'Rank' + column_name
df_2 = df.copy()
df[rank] = df[column_name].rank(ascending=False)
df_2['Score_' + column_name] = ((df.shape[0] + 1 - df[rank]) / df.shape[0]) * 100
df_2 = df_2.drop(columns=column_name)
return df_2
| true
|
9ae875fe31bbefc35e35ce1ca97afd447c7c82e1
|
Python
|
Impresee/lar-annotator
|
/pai/utils.py
|
UTF-8
| 5,199
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 2 13:20:00 2018
@author: jsaavedr
"""
from . import basic
from . import bw
import skimage.measure as measure
import skimage.morphology as morph
import cv2
import numpy as np
#%%
def extractLAR(check_image):
"""
check_image must come in grayscale format
"""
image = cv2.resize(check_image, (1200, 510))
#this values are estimating fromr real checks
lar_image = image[210:300, 80:1150]
#reduce noise by median filter
lar_image = cv2.medianBlur(lar_image,3)
bw_image= cv2.adaptiveThreshold(lar_image,1,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,31,15)
bw_image[0:20, :] = 0
#estimating the beste row representing the center of the lar text
#Computing profile using the 10% of the lar width
sub_width = int(0.10 * lar_image.shape[1])
sub_image = bw_image[:, 0:sub_width]
v_profile = basic.getVerticalProfile(sub_image)
bin_v_profile = basic.threshold(v_profile, sub_width*0.4)
bin_v_profile = np.reshape(bin_v_profile, (1, len(bin_v_profile)))
cc_profile = bw.getCC(bin_v_profile)
#
list_comps = []
for idx, comp in enumerate(cc_profile) :
list_comps.append((idx, comp['size']))
#max_comp = max (list_comps, key = lambda x : x[1])
best_row = int(cc_profile[0]['center_x'])
lar_image = lar_image[max(0,best_row-40):best_row+20, sub_width-10::]
return lar_image
#%%
def findCandidateWords(cc_gaps, minimum_gap_size = 2, minimum_word_size = 60):
"""
This function detect spliting points based on the separation between words that are called gaps
input: a list of gaps, each one represented as a ccomponent
output: a list of candidate regions in the form of (start, end)
"""
#1. compose a list of gap_size
list_of_gap_size = []
for idx, gap in enumerate(cc_gaps) :
list_of_gap_size.append((idx, gap['size']))
#2. looking for regions
stack=[(0,len(cc_gaps)-1)]
candidate_words = []
while len(stack) > 0 :
p=stack.pop()
_start=p[0]
_end=p[1]
print(p)
if (cc_gaps[_end]['center_x'] - cc_gaps[_start]['center_x']) > minimum_word_size and (_end - _start) > 0 :
p_max=max(list_of_gap_size[_start:_end+1], key = lambda x: x[1])
split_gap_id = p_max[0]
split_gap_size = p_max[1]
if split_gap_size > minimum_gap_size :
if split_gap_id - _start > 1 :
stack.append((_start, split_gap_id-1))
candidate_words.append((cc_gaps[_start]['center_x'],cc_gaps[split_gap_id]['min_x']))
if _end - split_gap_id > 1 :
stack.append((split_gap_id+1,_end))
candidate_words.append((cc_gaps[split_gap_id]['max_x'], cc_gaps[_end]['center_x']))
return candidate_words
#%%
def createSetOfGaps(lar_image) :
"""
This is based on the horizontal profile of the binary lar_image
we compute the gap components using ccomponentes function from basic
We consider as a non-gap component that with height > 0.1 of the lar's height
"""
th_otsu = basic.getOtsu(lar_image)
bw_image = 1 - basic.threshold(lar_image, th_otsu)
bw_image[0:10, :] = 0
#bw_image[-10::, :] = 0
for i in range(1) :
bw_image = morph.dilation(bw_image, morph.square(3))
labels = measure.label(bw_image)
regionprops = measure.regionprops(labels)
for ccomp in regionprops :
if ccomp['area'] < 30 :
rows, cols = zip(*ccomp['coords'])
bw_image[rows, cols] = 0
#cv2.imshow("binaria", bw_image*255)
h_profile = basic.getHorizontalProfile(bw_image)
h_profile = 1 - basic.threshold(h_profile, 0.1*bw_image.shape[0])
#reshap h_profile in order it be a 2D array
h_profile = np.reshape(h_profile, [1, -1])
cc_profile = bw.getCC(h_profile)
return cc_profile
#%%
def filterCandidateWords(candidate_words, minimum_size, maximum_size) :
"""
filtered candidate words with respect to the size,
we can also incorporate constraints about the content
like the proportion of text in each word. In that case we will need the lar_image
"""
filtered_list = []
for word in candidate_words:
word_size = word[1] - word[0]
if word_size > minimum_size and word_size < maximum_size :
filtered_list.append(word)
return filtered_list
def getCandidateWordsFromLAR(check_image):
"""
check_image must come in grayscale format
output 1: lar_image
output 2: tuples (start, end) defining each candidate word in lar_image
"""
lar_image = extractLAR(check_image)
cc_gaps = createSetOfGaps(lar_image)
candidate_words = findCandidateWords(cc_gaps)
candidate_words = filterCandidateWords(candidate_words, minimum_size = lar_image.shape[1]*0.05, maximum_size = lar_image.shape[1]*0.5)
return lar_image, candidate_words
| true
|
817a7fcd4bb1f71b21e68150b9d4543d08b8bd79
|
Python
|
yenertuz/push_swap
|
/checker.py
|
UTF-8
| 674
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/local/bin/python3.7
import ps_functions as ps
try:
f = open("numbers")
numbers_string = f.read()
f.close()
except:
print("checker.py: could not open \"numbers\"")
exit(-1)
try:
f = open("ops")
ops_string = f.read()
f.close
except:
print("checker.py: could not open \"ops\"")
exit(-1)
numbers_list = numbers_string.split(" ")
ops_list = ops_string.split(" ")
stack_a = []
stack_b = []
try:
for x in numbers_list:
stack_a.append(int(x))
except:
print("checker.py: failed to read number strings")
for x in ops_list:
ps.run_command(x, stack_a, stack_b)
if ps.is_sorted(stack_a) and len(stack_b) == 0:
print("OK")
else:
print("KO")
print(stack_a)
| true
|
bd5459fbe7edd3564b4b9aedc604456976ee7a3c
|
Python
|
junpenglao/Planet_Sakaar_Data_Science
|
/Miscellaneous/twitter_demo.py
|
UTF-8
| 2,120
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
"""
https://twitter.com/junpenglao/status/928206574845399040
"""
import pymc3 as pm
import numpy as np
import matplotlib.pylab as plt
L = np.array([[2, 1]]).T
Sigma = L.dot(L.T) + np.diag([1e-2, 1e-2])
L_chol = np.linalg.cholesky(Sigma)
with pm.Model() as model:
y = pm.MvNormal('y', mu=np.zeros(2), chol=L_chol, shape=2)
tr0 = pm.sample(500, chains=1)
tr1 = pm.fit(method='advi').sample(500)
tr2 = pm.fit(method='fullrank_advi').sample(500)
tr3 = pm.fit(method='svgd').sample(500)
plt.figure()
plt.plot(tr0['y'][:,0], tr0['y'][:,1], 'o', alpha=.1, label='NUTS')
plt.plot(tr1['y'][:,0], tr1['y'][:,1], 'o', alpha=.1, label='ADVI')
plt.plot(tr2['y'][:,0], tr2['y'][:,1], 'o', alpha=.1, label='FullRank')
plt.plot(tr3['y'][:,0], tr3['y'][:,1], 'o', alpha=.1, label='SVGD')
plt.legend();
"""
https://twitter.com/junpenglao/status/930826259734638598
"""
import matplotlib.pylab as plt
from mpl_toolkits import mplot3d
import numpy as np
import pymc3 as pm
def cust_logp(z):
return -(1.-z[0])**2 - 100.*(z[1] - z[0]**2)**2
grid = np.mgrid[-2:2:100j,-1:3:100j]
Z = -np.asarray([cust_logp(g) for g in grid.reshape(2, -1).T])
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(grid[0], grid[1], Z.reshape(100,100), cmap='viridis',
linewidth=0, antialiased=False)
with pm.Model():
pm.DensityDist('pot1', logp=cust_logp, shape=(2,))
tr1 = pm.sample(500, step=pm.NUTS())['pot1']
tr2 = pm.sample(500, step=pm.Metropolis())['pot1']
tr3 = pm.fit (n=50000, method='fullrank_advi').sample(500)['pot1'] #VI, cause whynot
import matplotlib.pylab as plt
_, ax = plt.subplots(1,3,figsize=(15,5), sharex=True, sharey=True)
ax[0].imshow(Z.reshape(100,100), extent=[-1,3,-2,2,]);
ax[0].plot(tr1[:,1], tr1[:,0], 'ro-',alpha=.1)
ax[1].imshow(Z.reshape(100,100), extent=[-1,3,-2,2,]);
ax[1].plot(tr2[:,1], tr2[:,0], 'ro-',alpha=.1)
ax[2].imshow(Z.reshape(100,100), extent=[-1,3,-2,2,]);
ax[2].plot(tr3[:,1], tr3[:,0], 'ro', alpha=.1)
plt.tight_layout()
with pm.Model():
pm.DensityDist('pot1', logp=cust_logp, shape=(2,))
minimal=pm.find_MAP()
| true
|
cab2d5f6fc61c42887737cee1361004ee4fe5b06
|
Python
|
Busymeng/MyPython
|
/Python for Research/2.2_NumPy-Student.py
|
UTF-8
| 8,870
| 3.96875
| 4
|
[] |
no_license
|
#####################################################################
## Introduction to NumPy Arrays
##
"""
* NumPy is a Python module designed for scientific computation.
* NumPy arrays are n-dimensional array objects.
- They are used for representing vectors and matrices.
- NumPy arrays have a size that is fixed when they are constructed.
- Elements of NumPy arrays are also all of the same data type leading
to more efficient and simpler code than using Python's standard
data types.
* np.zeros(), np.ones(), np.empty()
* Linear algebra, Fourier transform, random number capabilities
* Building block for other packages (e.g. Scipy)
"""
##-------------------------------------------------------------------
##-------------------------------------------------------------------
##-------------------------------------------------------------------
##-------------------------------------------------------------------
##-------------------------------------------------------------------
#####################################################################
## Slicing NumPy Arrays
##
"""
* With one-dimension arrays, we can index a given element by its position,
keeping in mind that indices start at 0.
* With two-dimensional arrays, the first index specifies the row of the
array and the second index specifies the column of the array.
* With multi-dimensional arrays, you can use the colon character in place
of a fixed value for an index, which means that the array elements
corresponding to all values of that particular index will be returned.
* For a two-dimensional array, using just one index returns the given row
which is consistent with the construction of 2D arrays as lists of lists,
where the inner lists correspond to the rows of the array.
"""
##-------------------------------------------------------------------
#####################################################################
## Indexing NumPy Arrays
##
"""
* NumPy arrays can also be indexed with other arrays or other
sequence-like objects like lists.
* Index can be defined as a Python list, but we could also have defined
that as a NumPy array.
* When you slice an array using the colon operator, you get a view of
the object.
- This means that if you modify it, the original array will
also be modified.
- This is in contrast with what happens when you index an array,
in which case what is returned to you is a copy of the original data.
"""
##-------------------------------------------------------------------
##-------------------------------------------------------------------
##-------------------------------------------------------------------
##-------------------------------------------------------------------
#####################################################################
## Building and Examining NumPy Arrays
##
"""
* To construct an array of 10 linearly spaced elements starting with 0
and ending with 100, we can use the NumPy linspace function.
* To construct an average of 10 logarithmically spaced elements between
10 and 100, we can use the NumPy logspace command.
"""
##-------------------------------------------------------------------
##-------------------------------------------------------------------
##-------------------------------------------------------------------
# Finds whether x is prime
##-------------------------------------------------------------------
# save to file
# read from file
#####################################################################
## Datatypes
##
"""
* Every numpy array is a grid of elements of the same type.
* Numpy provides a large set of numeric datatypes that you can use to
construct arrays.
* Numpy tries to guess a datatype when you create an array, but functions
that construct arrays usually also include an optional argument to
explicitly specify the datatype.
"""
##-------------------------------------------------------------------
#####################################################################
## Array math
##
"""
* Basic mathematical functions operate elementwise on arrays, and are
available both as operator overloads and as functions in the numpy module.
"""
##-------------------------------------------------------------------
##-------------------------------------------------------------------
"""
* We use the dot() function to compute inner products of vectors,
to multiply a vector by a matrix, and to multiply matrices.
- dot() is available both as a function in the numpy module and
as an instance method of array objects.
"""
##-------------------------------------------------------------------
# Inner product of vectors
# Matrix / vector product
# Matrix / matrix product
##-------------------------------------------------------------------
# Vector Operations
##-------------------------------------------------------------------
"""
* Operations along axes
"""
##-------------------------------------------------------------------
#####################################################################
## Broadcasting
##
"""
* Broadcasting is a powerful mechanism that allows numpy to work with
arrays of different shapes when performing arithmetic operations.
* Frequently we have a smaller array and a larger array, and we want to
use the smaller array multiple times to perform some operation on the
larger array.
"""
##-------------------------------------------------------------------
##-------------------------------------------------------------------
# Real Numpy broadcasting
"""
* Rule of broadcasting
1. If the arrays do not have the same rank, prepend the shape of the
lower rank array with 1s until both shapes have the same length.
2. The two arrays are said to be compatible in a dimension if they have
the same size in the dimension, or if one of the arrays has size 1 in
that dimension.
3. The arrays can be broadcast together if they are compatible in all
dimensions.
4. After broadcasting, each array behaves as if it had shape equal to
the elementwise maximum of shapes of the two input arrays.
5. In any dimension where one array had size 1 and the other array had
size greater than 1, the first array behaves as if it were copied
along that dimension
"""
##-------------------------------------------------------------------
#####################################################################
## Other Matrix Operations
##
"""
* import numpy.linalg
eye(3) #Identity matrix
trace(A) #Trace
column_stack((A,B)) #Stack column wise
row_stack((A,B,A)) #Stack row wise
* Linear Algebra
import numpy.linalg
qr Computes the QR decomposition
cholesky Computes the Cholesky decomposition
inv(A) Inverse
solve(A,b) Solves Ax = b for A full rank
lstsq(A,b) Solves arg minx kAx − bk2
eig(A) Eigenvalue decomposition
eig(A) Eigenvalue decomposition for symmetric or hermitian
eigvals(A) Computes eigenvalues.
svd(A, full) Singular value decomposition
pinv(A) Computes pseudo-inverse of A
* Fourier Transform
import numpy.fft
fft 1-dimensional DFT
fft2 2-dimensional DFT
fftn N-dimensional DFT
ifft 1-dimensional inverse DFT (etc.)
rfft Real DFT (1-dim)
ifft Imaginary DFT (1-dim)
* Random Sampling
import numpy.random
rand(d0,d1,...,dn) Random values in a given shape
randn(d0, d1, ...,dn) Random standard normal
randint(lo, hi, size) Random integers [lo, hi)
choice(a, size, repl, p) Sample from a
shuffle(a) Permutation (in-place)
permutation(a) Permutation (new array)
* Distributions in Random
import numpy.random
The list of distributions to sample from is quite long, and includes
beta
binomial
chisquare
exponential
dirichlet
gamma
laplace
lognormal
pareto
poisson
power
"""
##-------------------------------------------------------------------
| true
|
015edec03648055fb46698e036e9c1d3829135e2
|
Python
|
itsolutionscorp/AutoStyle-Clustering
|
/all_data/exercism_data/python/hamming/631c999c8eae43dba437e8ef0ba97c7d.py
|
UTF-8
| 321
| 3.671875
| 4
|
[] |
no_license
|
def hamming(first, second):
hamming = 0
if(len(first) < len(second)):
a = first
b = second
elif(len(first) > len(second)):
b = first
a = second
else:
a = first
b = second
hamming = len(b) - len(a)
for i in range(len(a)):
if(a[i] != b[i]):
hamming += 1
return hamming
| true
|
7b023de700615ef82a8f8d968652cd3eba2b250e
|
Python
|
belenalegre/Santander
|
/src/EjercicioPython.py
|
UTF-8
| 1,399
| 3.21875
| 3
|
[] |
no_license
|
import csv
class Parser():
def __init__(self, srcPath):
self.srcPath = srcPath
self.filename = srcPath.split('.')[0]
def analyseLines(self, lines):
cols = len(lines[0])
correct_lines = [ l for l in lines if len(l)==cols]
wrong_lines = [l for l in lines if len(l) != cols]
return correct_lines, wrong_lines
def readTSV(self):
with open(self.srcPath,'rb') as tsv_file:
self.lines = tsv_file.read().decode("utf-16-le").encode("utf-8")
return
def convertTSVtoCSV(self):
lines = [l.split('\t') for l in self.lines.split('\n')]
self.lines, self.wrong_lines = self.analyseLines(lines)
return
def exportCSV(self, dstPath=None, expWrongLines=False, errPath=None):
if dstPath == None:
dstPath='{0}.csv'.format(self.filename)
with open(dstPath, 'wb') as f:
write = csv.writer(f, delimiter='|')
write.writerows(self.lines)
if expWrongLines:
with open(errPath, 'wb') as f:
write = csv.writer(f, delimiter='|')
write.writerows(self.wrong_lines)
return
def runParser(self, output_path=None):
self.readTSV()
self.convertTSVtoCSV()
self.exportCSV(output_path)
return
| true
|
eb32f2c307eb4c18721b072bdb30a61754fedfc6
|
Python
|
MrTrustworthy/game_of_life
|
/gol/grid.py
|
UTF-8
| 3,674
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
__author__ = 'MrTrustworthy'
from gol.x_utils import Position
from typing import List, Union
class Field:
def __init__(self, position: Position, passable: bool, passing_cost: Union[int, None], objects):
self.passable = passable
self.passing_cost = passing_cost if passable else None
if not isinstance(position, Position):
raise ValueError("position must be a Position-object")
if not isinstance(objects, list):
raise ValueError("Objects must be a list")
self.objects = objects if isinstance(objects, list) else [objects]
self.position = position
class Grid:
def __init__(self, fields: List[List[Field]]) -> None:
self.fields = fields
def get(self, *args: List[Union[Position, tuple, int]]) -> Field:
"""
Returns a given field based on a X-Y value or tuple
:return:
"""
if len(args) == 1:
if isinstance(args[0], Position):
x, y = args[0].tuple
else:
x, y = args[0]
else:
x, y = (args[0], args[1])
return self.fields[x][y]
def get_path(self, pos_a: Position, pos_b: Position) -> List[Field]:
"""
Calculates a passable path between the two given positions
:param pos_a:
:param pos_b:
:return:
"""
class Node:
def __init__(self, node, cost, expected, parent):
self.node = node
# same position reference that Field-Object has
self.pos = node.position
self.cost = cost
self.expected = expected
self.total = self.cost + self.expected
self.parent = parent
def __eq__(self, other):
return self.pos == other.pos
current_field = Node(self.get(pos_a.tuple), 0, pos_a.distance_to(pos_b), None)
path = [current_field]
already_checked = []
while current_field.pos != pos_b:
valid_neighbours = filter(
lambda x: x.passable,
self._get_neighbours(current_field.pos)
)
for field in valid_neighbours:
# calculate the values of each field
cost = current_field.pos.distance_to(field.position)
h = field.position.distance_to(pos_b)
node = Node(field, cost, h, current_field)
# dont want duplicates
if node not in path and node not in already_checked:
already_checked.append(node)
already_checked.sort(key=lambda x: x.total)
current_field = already_checked[0]
already_checked.remove(current_field)
path.append(current_field)
ordered_path = []
while current_field is not None:
ordered_path.append(current_field.node)
current_field = current_field.parent
ordered_path.reverse()
return ordered_path
def _get_neighbours(self, position: Position) -> List[Field]:
"""
Calculates and returns a list of all Fields right next to the given position
:param position:
:return: List(Field) List of neighbouring fields
"""
x, y = (position.x, position.y)
neighbours = []
for i in [x - 1, x, x + 1]:
for j in [y - 1, y, y + 1]:
if i < 0 or j < 0:
continue
if i == x and j == y:
continue
else:
neighbours.append(self.get(i, j))
return neighbours
| true
|
8b83d67fc90e20e95da8fb3e166c7bf1fe2926ae
|
Python
|
jobby/project-euler
|
/python/problem56.py
|
UTF-8
| 144
| 2.96875
| 3
|
[] |
no_license
|
def digitsum(n):
return sum(map(lambda s:int(s), str(n)))
print max(map(digitsum, [(a ** b) for a in range(1,100) for b in range(1,100)]))
| true
|
051bdb55ec8ca1b1fee7886c0b2f8b9935dc4799
|
Python
|
frairefm/UOC_DataScience_TipologiaCicleDades
|
/PRAC1_code.py
|
UTF-8
| 7,998
| 2.78125
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup, NavigableString
import requests
import pandas as pd
def get_soup(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
return soup
# Extracts the links to every review edition
def get_links():
links = list()
h2s = soup.findAll('h2', class_='teaser_title')
for h2 in h2s:
links.append(h2.a['href'])
return links
# Gets into each review edition and into every single article to draw their data out
def extract_items(links):
for link in links:
soup = get_soup(link)
## html structure of 'quaderns' pages
if("QuadernsICA" not in link):
# get review title
print()
review_title = soup.find('div', class_='page page_issue').h1
print(review_title.text.strip())
# Gets review description
review_description = soup.find('div', class_='description').p
# Gets url articles & pages
ref = soup.findAll('div', class_='title')
pages = soup.findAll('div', class_='pages')
for a, pag in zip(ref, pages):
# Appends review_title & review_description to their respective lists for each article
review_title_list.append(review_title.text.strip())
review_description_list.append(review_description.text.strip())
# Appends article pages to article_pages_list
article_pages_list.append(pag.text.strip())
# Explores page article
inner_soup = get_soup(a.a['href'])
# Gets article title
article_title = inner_soup.find('h1', class_='page_title')
article_title_list.append(article_title.text.strip())
# Get article authors
authors = list()
article_authors = inner_soup.findAll('span', class_='name')
for author in article_authors:
authors.append(author.text.strip())
joined_string = ",".join(authors)
article_authors_list.append(joined_string)
# Get article keywords
article_keywords = inner_soup.find('span', class_='value')
if(article_keywords):
joined_string = " ".join(article_keywords.text.split())
article_keywords_list.append(joined_string)
else:
article_keywords_list.append(None)
# Get article abstract
article_abstract = inner_soup.find('div', class_='item abstract')
article_abstract = article_abstract.text.replace('Resum', '').strip()
article_abstract_list.append(article_abstract)
# Get article pdf
article_pdf=inner_soup.find('a', class_='obj_galley_link pdf')['href']
article_pdf_list.append(article_pdf)
print("-" + article_title.text.strip())
## html structure of 'QuadernsICA' pages
elif ("QuadernsICA" in link):
# Gets review title
print()
review_title = soup.find('li', class_='active')
print(review_title.text.strip())
# Gets review description
review_description = soup.find('div', class_='description')
# Gets url articles & pages
ref = soup.findAll('h3', class_='media-heading')
pages = soup.findAll('p', class_='pages') # ok
for a, pag in zip(ref, pages):
# Appends review_title to review_title_list for each article
if(review_title):
review_title_list.append(review_title.text.strip())
else:
review_title_list.append(None)
# Appends review_description to review_description_list for each article
if(review_description):
review_description_list.append(review_description.text.strip())
else:
review_description_list.append(None)
# Appends article pages to article_pages_list
article_pages_list.append(pag.text.strip())
# Explores page article
inner_soup = get_soup(a.a['href'])
# Gets article title
article_title = inner_soup.find('h1', class_='page-header')
if(article_title):
article_title_list.append(article_title.text.strip())
else:
article_title_list.append(None)
# Gets article authors
authors = list()
article_authors = inner_soup.findAll('div', class_='author')
if(article_authors):
for author in article_authors:
authors.append(author.find('strong').text)
joined_string = ",".join(authors)
article_authors_list.append(joined_string)
else:
article_authors_list.append(None)
# Gets article abstract & keywords
article_abstract = inner_soup.find('div', class_='article-abstract')
if(article_abstract):
article_abstract_list.append(article_abstract.text.strip())
article_keywords_list.append(article_abstract.text.strip().partition("Keywords: ")[2])
else:
article_abstract_list.append(None)
article_keywords_list.append(None)
# Gets article pdf
article_pdf=inner_soup.find('a', class_='galley-link btn btn-primary pdf')
if(article_pdf):
article_pdf_list.append(article_pdf['href'])
else:
article_pdf_list.append(None)
print("-" + article_title.text.strip())
# initialization of fields lists
review_title_list = list()
review_description_list = list()
article_title_list = list()
article_pages_list = list()
article_authors_list = list()
article_keywords_list = list()
article_abstract_list = list()
article_pdf_list = list()
print("Start")
# first page
url_page="https://www.antropologia.cat/publicacions-ica/quaderns/"
soup = get_soup(url_page)
links = get_links()
extract_items(links)
next_page = soup.find('a', class_='next page-numbers')
# following pages loop
while(next_page):
url_page = next_page['href']
soup = get_soup(url_page)
links = get_links()
extract_items(links)
next_page = soup.find('a', class_='next page-numbers') # ok
print("Extraction done")
print("Setting dataset\n")
print(str(len(article_title_list)) + " articles from " + str(len(set(review_title_list))) + " were retrieved")
# Creates a dataset creation and populates it with field lists data
df = pd.DataFrame({'Review title':review_title_list,
'Review description':review_description_list,
'Article title':article_title_list,
'Article pages':article_pages_list,
'Article authors':article_authors_list,
'Article keywords':article_keywords_list,
'Article abstract':article_abstract_list,
'Article pdf':article_pdf_list})
# Writes the files
df.to_csv('Quaderns_ICA.csv', sep='|')
df.to_excel('Quaderns_ICA.xlsx')
print("Dataset written into 'Quaderns_ICA.csv' file")
print("Dataset written into 'Quaderns_ICA.xlsx' file")
print("\nEnd")
| true
|
fabcd7adde7d619666b4b9b2566c1e83d628d7d2
|
Python
|
gleisonbs/trackings-report
|
/reports/mau_report.py
|
UTF-8
| 1,285
| 2.84375
| 3
|
[] |
no_license
|
from trackings import Trackings
from utils.date import get_date_range, get_month_from_date, get_months
from utils.logger import log_error
from dateutil.relativedelta import relativedelta
from pprint import pprint
from time import sleep
from datetime import datetime
from collections import defaultdict
class MAUReport:
def __init__(self):
self.trackings = Trackings()
self.rows = []
def add_header(self, rows):
updated_at = f'Atualizado em: {datetime.now().strftime("%d/%m/%Y %H:%M:%S")}'
empty_line = []
rows.insert(0, empty_line)
rows.insert(0, [updated_at])
return rows
def generate(self):
print('Running the "MAU" report...')
begin_date, end_date = get_date_range()
months = get_months(begin_date.month, end_date.month)
oneMonth = relativedelta(months = +1)
oneDay = relativedelta(days = +1)
for month_name, month_number in months:
total_MAU = self.trackings.getMAU(begin_date, (begin_date + oneMonth) - oneDay)
begin_date += oneMonth
self.rows.append([month_name] + [total_MAU])
print(f'{month_name}: {total_MAU}')
self.rows = self.add_header(self.rows)
return self.rows
| true
|
35a8ae6d20ebc0d2e05f8f3f469c358a6761485e
|
Python
|
anandav/NSE-OptionChain-Importer
|
/mydatabase.py
|
UTF-8
| 2,088
| 2.6875
| 3
|
[] |
no_license
|
import sqlite3
import os
import database
import configparser
from config import AppConfig
class databaseprovider:
def __init__(self, data):
self.data = data
# self.config = configparser.ConfigParser()
# self.config.read("config.ini")
def GetConnection(self):
conn = sqlite3.connect(AppConfig().ConnectionString())
return conn
def CreateOptionChainTable(self, tableName):
fl = open(AppConfig().ScriptCreateOptionChainTable(), "r")
tblcontent = fl.read()
tblname = AppConfig().TableName()
tblcontent = tblcontent.replace("TABLENAME", tblname)
conn = self.GetConnection()
conn.execute(tblcontent)
conn.close()
def SaveOptionChainData(self):
data = self.PrepareData()
conn = self.GetConnection()
fl = open(AppConfig().ScriptInsertOptionChain(), "r")
tbl = fl.read()
fl.close()
if(len(data) > 0):
print("Writing to database")
conn.executemany(tbl, data)
conn.commit()
conn.close()
def GetData(self, query):
conn = self.GetConnection()
def PrepareData(self):
result = []
for item in self.data["OptionChain"]:
result.append((self.data["Symbol"], self.data["Date"], self.data["SpotPrice"], 'call', item["StrikePrice"], item["Calls"]["AskPrice"], item["Calls"]["AskQty"], item["Calls"]
["BidPrice"], item["Calls"]["BidQty"], item["Calls"]["Chng in OI"], item["Calls"]["IV"], item["Calls"]["LTP"], item["Calls"]["Net Chng"], item["Calls"]["OI"], item["Calls"]["Volume"]))
result.append((self.data["Symbol"], self.data["Date"], self.data["SpotPrice"], 'put', item["StrikePrice"], item["Puts"]["AskPrice"], item["Puts"]["AskQty"],
item["Puts"]["BidPrice"], item["Puts"]["BidQty"], item["Puts"]["Chng in OI"], item["Puts"]["IV"], item["Puts"]["LTP"], item["Puts"]["Net Chng"], item["Puts"]["OI"], item["Puts"]["Volume"]))
return result
| true
|
3299ad33b4c616d338a11c74af5763850507dfba
|
Python
|
rafaelbaur/SistemaPPGI
|
/ppgi/util.py
|
UTF-8
| 1,062
| 2.625
| 3
|
[] |
no_license
|
from django.utils.datetime_safe import datetime
def getPeriodo(mes):
if mes >= 1 and mes <= 3:
periodo = 1
elif mes >= 4 and mes <=6:
periodo = 2
elif mes >= 7 and mes <= 9:
periodo = 3
elif mes >= 10 and mes <= 12:
periodo = 4
return periodo
def getPeriodosDecorridos(anoIngresso, periodoIngresso):
mesAtual = datetime.now().month
anoAtual = datetime.now().year
periodosAnoAtual = getPeriodo(mesAtual)
#desconsiderar periodoAtual
numPeriodosDecorridos = ((anoAtual - anoIngresso)*4 - (periodoIngresso-1) + (periodosAnoAtual-1))
return numPeriodosDecorridos
def htmlIconTrue():
return "<center> <img src='/static/admin/img/icon-yes.gif' alt='True' /> </center>"
def htmlIconFalse():
return "<center> <img src='/static/admin/img/icon-no.gif' alt='False' /> </center>"
def handle_uploaded_file(f, dest):
destino = open(dest+'/'+f.name, 'wb+')
for chunk in f.chunks():
destino.write(chunk)
destino.close()
| true
|
03a755dc1b00735e2f659ccc6aa0314e7342f0eb
|
Python
|
bhatiakomal/pythonpractice
|
/Udemy/Hierarchical_Inheritance.py
|
UTF-8
| 869
| 3.3125
| 3
|
[] |
no_license
|
'''class Father:
def showF(self):
print("Father Class method")
class Son(Father):
def showS(self):
print("Son Class method")
class Daughter(Father):
def showD(self):
print("Daughter Class method")
s=Son()
s.showS()
s.showF()
d=Daughter()
d.showF()
d.showD()'''
class Father:
def __init__(self):
print('Father class Constructor')
def showF(self):
print("Father Class method")
class Son(Father):
def __init__(self):
super().__init__() #Calling Father Class Constructor
print('Son class Constructor')
def showS(self):
print("Son Class method")
class Daughter(Father):
def __init__(self):
super().__init__()
print('Daughter class Constructor')
def showD(self):
print("Daughter Class method")
s=Son()
print()
d=Daughter()
| true
|
82d2d58bd1b45e852647f892abc365ebd46e869b
|
Python
|
ophidianwang/PyWorkspace
|
/od_package/od_module01.py
|
UTF-8
| 1,104
| 3.5625
| 4
|
[] |
no_license
|
# encoding: utf-8
class od_class01(object):
"""Summary of class here.
Longer class information....
Longer class information....
Attributes:
likes_spam: A boolean indicating if we like SPAM or not.
eggs: An integer count of the eggs we have laid.
name: name of instance
"""
count = 0
def __init__(self, name):
"""Inits SampleClass with blah."""
self.name = name
od_class01.count += 1
def __str__(self):
return str(self.name)
def go(self):
"test, print go! and self.__str__"
print("go! " + self.__str__())
@classmethod
def getX(cls):
"get how many class instance is created"
return cls.count
@classmethod
def class_foo(cls,x):
"testing classmethod"
print "executing class_foo(%s,%s)"%(cls,x)
@staticmethod
def static_foo(x):
"testing staticmethod"
print "executing static_foo(%s)"%x
@classmethod
def oInstanceByClass(cls,name):
"testing get instance with classmethod"
return od_class01(name)
@staticmethod
def oInstanceByStatic(name):
"testing get instance with staticmethod"
return od_class01(name)
| true
|
8114270c6aad87ac9ea7891791ff58fa37427f8d
|
Python
|
kkrauss2/qbb2016-answers
|
/week-11/comparison.py
|
UTF-8
| 3,045
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
from __future__ import division
import sys
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage, cophenet, leaves_list
from scipy.cluster.hierarchy import leaves_list as leafy
from scipy.spatial.distance import pdist
from scipy.cluster.vq import kmeans2 as kmeans
from scipy.stats import ttest_ind, ttest_ind_from_stats
from scipy.special import stdtr
from scipy import stats
import itertools
import numpy as np
import csv
import pydendroheatmap as pdh
try: import cPickle as pickle
except: import pickle
import pandas as pd
from statsmodels.stats.weightstats import ttest_ind as ttest
data = sys.argv[1]
f = open(data)
##Early stages - CFU, mys
##Late stages - Poly, unk
gene_names = []
gene_positions = []
cfu = []
mys = []
poly = []
unk = []
genes = {}
for i, line in enumerate(f):
if i == 0:
continue
else:
field = line.split('\t')
gene_names.append(field[0])
cfu.append(field[1])
mys.append(field[5])
poly.append(field[2])
unk.append(field[3])
gene_positions.append(i)
genes = dict(itertools.izip(gene_positions, gene_names))
# print genes
early = []
late = []
cfu_array = np.array(cfu, dtype = np.float)
mys_array = np.array(mys, dtype = np.float)
avg_early = (cfu_array + mys_array)/2
early.append(avg_early)
poly_array = np.array(poly, dtype = np.float)
unk_array = np.array(unk, dtype = np.float)
avg_late = (poly_array + unk_array)/2
late.append(avg_late)
early_array = np.array(early, dtype = np.float)
late_array = np.array(late, dtype = np.float)
ratio = []
temp_ratio = (early_array / late_array)
ratio.append(temp_ratio)
# print ratio
up_genes = []
up_genes_position = []
down_genes = []
down_genes_positions = []
not_sig_genes = []
not_sig_genes_positions = []
for position, value in enumerate(np.nditer(ratio)):
if value >= 2.0:
up_genes.append(value)
up_genes_position.append(position)
elif value <= 0.5:
down_genes.append(value)
down_genes_positions.append(position)
else:
not_sig_genes.append(value)
not_sig_genes_positions.append(position)
# print up_genes
up_genes_array = np.array(up_genes, dtype = np.float)
down_genes_array = np.array(down_genes, dtype = np.float)
# print up_genes_array
up_gene_names = []
down_gene_names = []
not_sig_names = []
for position, gene, in genes.items():
if position in up_genes_position:
up_gene_names.append(gene)
elif position in down_genes_positions:
down_gene_names.append(gene)
else:
not_sig_names.append(gene)
# print up_gene_names
t, p = ttest_ind(early_array, late_array, equal_var=False)
print t
##I have been trying to get this t-test to work. I know that the problem is that I am giving it an array of averages and it wants to be able to find the averages on its own, but I cannot figure out how to get around this. Since I couldn't get past this, I couldn't perform the Panther part of this exercise.
| true
|
9fc4f0028a8ecdf623b1459246d9ee431f992fe4
|
Python
|
Muskelbieber/PS2_remote_to_arduino
|
/PS2_remote_turtle.py
|
UTF-8
| 1,834
| 3.5
| 4
|
[] |
no_license
|
##############
## Script listens to serial port and does stuff
##############
## requires pySerial to be installed
import serial
import turtle
from PS2_remote_data import serial_port,\
baud_rate,\
button_to_signal,\
signal_to_button, signal_to_int
ser = serial.Serial(serial_port, baud_rate)
#The Information function to print all button uses
def info():
print('OPEN/CLOSE: Terminate the whole programm by exit()')
print('PLAY: Activates/deactivates Turtle gamemode')
print('Triangle: Display Turtle shape')
print('Up arrow: Turtle move 25 in forward facing direction')
print('Left arrow: Turtle rotates left 5 degrees')
print('Right arrrow: Turtle rotates right 5 degrees')
print('Display: Shows this information again in the terminal')
turtle_val=False;
info();
while(True):
line = ser.readline();
#ser.readline returns a binary, convert to string
line = line.decode("utf-8");
#Terminal output of what one pressed
print(line);
print(signal_to_button[line]);
print(signal_to_int[line]);
#System commands
#if(signal_to_int[line]==int('0x68B5B', 0)):exit();#OPEN/CLOSE
if(signal_to_int[line]==signal_to_int[button_to_signal['OPEN/CLOSE']]):
exit();#OPEN/CLOSE
#Help Information again displaying
if(signal_to_int[line]==signal_to_int[button_to_signal['DISPLAY']]):
info();
#Draw play Turtle
if(signal_to_int[line]==signal_to_int[button_to_signal['PLAY']]):
turtle_val = not turtle_val;
if(turtle_val==True):
if(signal_to_int[line]==signal_to_int[button_to_signal['Triangle']]):
turtle.shape("turtle")
if(signal_to_int[line]==signal_to_int[button_to_signal['Up arrow']]):
turtle.forward(25);
if(signal_to_int[line]==signal_to_int[button_to_signal['Right arrow']]):
turtle.right(5);
if(signal_to_int[line]==signal_to_int[button_to_signal['Left arrow']]):
turtle.left(5);
| true
|
2af04bd9ccaa403694885001514b96d2adb256d4
|
Python
|
devkumar24/30-Days-of-Code
|
/Day 6 Review/code.py
|
UTF-8
| 399
| 3.4375
| 3
|
[] |
no_license
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
test_cases = int(input())
for i in range(test_cases):
input_str = input()
for j in range(len(input_str)):
if j%2 == 0:
print(input_str[j],end = "")
print(end = " ")
for j in range(len(input_str)):
if j%2 != 0:
print(input_str[j],end = "")
print(end = "\n")
| true
|
9f69c856885d9b39cc390da189f61b1674c9a63c
|
Python
|
MariaLitvinova/autotesting-with-python
|
/module2/test7_explicit_wait.py
|
UTF-8
| 949
| 3.140625
| 3
|
[] |
no_license
|
from selenium import webdriver
import time
import math
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
try:
link = "http://suninjuly.github.io/explicit_wait2.html"
browser = webdriver.Chrome()
browser.get(link)
text = WebDriverWait(browser, 20).until(
EC.text_to_be_present_in_element((By.ID, "price"), "100")
)
button = browser.find_element_by_id("book")
button.click()
x_element = WebDriverWait(browser, 5).until(
EC.presence_of_element_located((By.ID, "input_value"))
)
x = x_element.text
y = calc(x)
y_element = browser.find_element_by_id("answer")
y_element.send_keys(y)
button = browser.find_element_by_id("solve")
button.click()
finally:
time.sleep(20)
browser.quit()
| true
|
ec70c6d087b91c4f0b4f253849950fa4c4308236
|
Python
|
jacklisp/motion-planning-playground
|
/algorithms/Probablistics Planners/rrt_family_algorithms.py
|
UTF-8
| 10,489
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
import random
import numpy as np
import math
import copy
import matplotlib.pyplot as plt
show_animation = True
class RRTFamilyPlanners():
def __init__(self, start, goal, obstacleList, randArea, expandDis=0.5, goalSampleRate=10, maxIter=200):
self.start = Node(start[0], start[1])
self.goal = Node(goal[0], goal[1])
self.minrand = randArea[0]
self.maxrand = randArea[1]
self.expandDis = expandDis
self.goalSampleRate = goalSampleRate
self.maxIter = maxIter
self.obstacleList = obstacleList
##################################################################################
def RRTSearch(self, animation=True):
self.nodeList = [self.start]
while True:
# get random point in the free space
rnd = self.sampleFreeSpace()
# find closest node in the tree
nind = self.getNearestListIndex(self.nodeList, rnd)
nearestNode = self.nodeList[nind]
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
# compute the position of the new node
newNode = self.getNewNode(theta, nind, nearestNode)
# collision check
if not self.__CollisionCheck(newNode, self.obstacleList):
continue
# if collision doesn't happen in extending the nearest node to the new node
# add it to the tree
self.nodeList.append(newNode)
#check if we reached the goal
if self.isNearGoal(newNode):
break
if animation:
self.drawGraph(rnd)
# compute the path
lastIndex = len(self.nodeList) -1
path = self.getFinalCourse(lastIndex)
return path
def sampleFreeSpace(self):
if random.randint(0,100) > self.goalSampleRate:
rnd = [random.uniform(self.minrand, self.maxrand),
random.uniform(self.minrand, self.maxrand)]
else:
rnd = [self.goal.x, self.goal.y]
return rnd
def getNearestListIndex(self, nodes, rnd):
dList = [(node.x - rnd[0])**2 +
(node.y - rnd[1])**2 for node in nodes]
minIndex = dList.index(min(dList))
return minIndex
def getNewNode(self, theta, nind, nearestNode):
newNode = copy.deepcopy(nearestNode)
newNode.x += self.expandDis * math.cos(theta)
newNode.y += self.expandDis * math.sin(theta)
newNode.cost += self.expandDis
newNode.parent = nind
return newNode
def __CollisionCheck(self, newNode, obstacleList):
for (ox, oy, size) in obstacleList:
dx = ox - newNode.x
dy = oy - newNode.y
d = dx * dx + dy * dy
if d <= 1.1 * size**2:
return False #collision
return True # safe
def isNearGoal(self, node):
d = self.lineCost(node, self.goal)
if d < self.expandDis:
return True
return False
##################################################################################
def RRTStarSearch(self, animation=True):
self.nodeList = [self.start]
iter = 1
while True:
rnd = self.sampleFreeSpace()
nind = self.getNearestListIndex(self.nodeList, rnd)
nearestNode = self.nodeList[nind]
# steer
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
newNode = self.getNewNode(theta, nind, nearestNode)
if self.__CollisionCheck(newNode, self.obstacleList):
nearinds = self.findNearNodes(newNode)
newNode = self.chooseParent(newNode, nearinds)
self.nodeList.append(newNode)
self.rewire(newNode, nearinds)
iter += 1
if(iter == self.maxIter):
break
if animation:
self.drawGraph(rnd)
if self.isNearGoal(newNode):
break
# get path
lastIndex = len(self.nodeList) -1
path = self.getFinalCourse(lastIndex)
return path
def rewire(self, newNode, nearInds):
nnode = len(self.nodeList)
for i in nearInds:
nearNode = self.nodeList[i]
d = math.sqrt((nearNode.x - newNode.x)**2 +
(nearNode.y - newNode.y)**2)
scost = newNode.cost + d
if nearNode.cost > scost:
theta = math.atan2(newNode.y - nearNode.y ,
newNode.x - nearNode.x)
if self.check_collision_extend(nearNode, theta, d):
nearNode.parent = nnode - 1
nearNode.cost = scost
def check_collision_extend(self, nearNode, theta, d):
tmpNode = copy.deepcopy(nearNode)
for i in range(int(d / self.expandDis)):
tmpNode.x += self.expandDis * math.cos(theta)
tmpNode.y += self.expandDis * math.sin(theta)
if not self.__CollisionCheck(tmpNode, self.obstacleList):
return False
return True
def findNearNodes(self, newNode):
nnode = len(self.nodeList)
r = 50.0 * math.sqrt((math.log(nnode) / nnode))
dlist = [(node.x - newNode.x) ** 2 +
(node.y - newNode.y) ** 2 for node in self.nodeList]
nearinds = [dlist.index(i) for i in dlist if i <= r ** 2]
return nearinds
def chooseParent(self, newNode, nearInds):
if len(nearInds) == 0:
return newNode
dList = []
for i in nearInds:
dx = newNode.x - self.nodeList[i].x
dy = newNode.y - self.nodeList[i].y
d = math.sqrt(dx ** 2 + dy ** 2)
theta = math.atan2(dy, dx)
if self.check_collision_extend(self.nodeList[i], theta, d):
dList.append(self.nodeList[i].cost + d)
else:
dList.append(float('inf'))
minCost = min(dList)
minInd = nearInds[dList.index(minCost)]
if minCost == float('inf'):
print("mincost is inf")
return newNode
newNode.cost = minCost
newNode.parent = minInd
return newNode
def getFinalCourse(self, lastIndex):
path = [[self.goal.x, self.goal.y]]
while self.nodeList[lastIndex].parent is not None:
node = self.nodeList[lastIndex]
path.append([node.x, node.y])
lastIndex = node.parent
path.append([self.start.x, self.start.y])
return path
def getBestLastIndex(self):
disgList = [self.calcDistToGoal(node.x, node.y)
for node in self.nodeList]
goalInds = [disgList.index(i) for i in disgList if i <= self.expandDis]
if len(goalInds) == 0:
return None
minCost = min([self.nodeList[i].cost for i in goalInds])
for i in goalInds:
if self.nodeList[i].cost == minCost:
return i
return None
def calcDistToGoal(self, x, y):
return np.linalg.norm([x - self.goal.x, y - self.goal.y])
##################################################################################
def InformedRRTStarSearch(self, animation=True):
self.nodeList = [self.start]
# max length we expect to find in our 'informed' sample space, starts as infinite
cBest = float('inf')
pathLen = float('inf')
treeSize = 0
pathSize = 0
solutionSet = set()
path = None
# Computing the sampling space
cMin = math.sqrt(pow(self.start.x - self.goal.x, 2) + pow(self.start.y - self.goal.y, 2))
xCenter = np.matrix([[(self.start.x + self.goal.x) / 2.0], [(self.start.y + self.goal.y) / 2.0], [0]])
a1 = np.matrix([[(self.goal.x - self.start.x) / cMin], [(self.goal.y - self.start.y) / cMin], [0]])
id1_t = np.matrix([1.0, 0.0, 0.0]) # first column of idenity matrix transposed
M = np.dot(a1 , id1_t)
U, S, Vh = np.linalg.svd(M, 1, 1)
C = np.dot(np.dot(U, np.diag([1.0, 1.0, np.linalg.det(U) * np.linalg.det(np.transpose(Vh))])), Vh)
for i in range(self.maxIter):
# Sample space is defined by cBest
# cMin is the minimum distance between the start point and the goal
# xCenter is the midpoint between the start and the goal
# cBest changes when a new path is found
rnd = self.sample(cBest, cMin, xCenter, C)
nind = self.getNearestListIndex(self.nodeList, rnd)
nearestNode = self.nodeList[nind]
# steer
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
newNode = self.getNewNode(theta, nind, nearestNode)
d = self.lineCost(nearestNode, newNode)
if self.__CollisionCheck(newNode, self.obstacleList) and self.check_collision_extend(nearestNode, theta, d):
nearInds = self.findNearNodes(newNode)
newNode = self.chooseParent(newNode, nearInds)
self.nodeList.append(newNode)
self.rewire(newNode, nearInds)
if self.isNearGoal(newNode):
solutionSet.add(newNode)
lastIndex = len(self.nodeList) -1
tempPath = self.getFinalCourse(lastIndex)
tempPathLen = self.getPathLen(tempPath)
if tempPathLen < pathLen:
path = tempPath
cBest = tempPathLen
if animation:
self.drawGraph(rnd)
return path
def sample(self, cMax, cMin, xCenter, C):
if cMax < float('inf'):
r = [cMax /2.0, math.sqrt(cMax**2 - cMin**2)/2.0,
math.sqrt(cMax**2 - cMin**2)/2.0]
L = np.diag(r)
xBall = self.sampleUnitBall()
rnd = np.dot(np.dot(C, L), xBall) + xCenter
rnd = [rnd[(0,0)], rnd[(1,0)]]
else:
rnd = self.sampleFreeSpace()
return rnd
def sampleUnitBall(self):
a = random.random()
b = random.random()
if b < a:
a, b = b, a
sample = (b * math.cos(2 * math.pi * a / b),
b * math.sin(2 * math.pi * a / b))
return np.array([[sample[0]], [sample[1]], [0]])
def getPathLen(self, path):
pathLen = 0
for i in range(1, len(path)):
node1_x = path[i][0]
node1_y = path[i][1]
node2_x = path[i-1][0]
node2_y = path[i-1][1]
pathLen += math.sqrt((node1_x - node2_x)**2 + (node1_y - node2_y)**2)
return pathLen
def lineCost(self, node1, node2):
return math.sqrt((node1.x - node2.x)**2 + (node1.y - node2.y)**2)
##################################################################################
def drawGraph(self, rnd=None):
plt.clf()
if rnd is not None:
plt.plot(rnd[0], rnd[1], "^k")
for node in self.nodeList:
if node.parent is not None:
if node.x or node.y is not None:
plt.plot([node.x, self.nodeList[node.parent].x], [
node.y, self.nodeList[node.parent].y], "-g")
for (ox, oy, size) in self.obstacleList:
plt.plot(ox, oy, "ok", ms = 30 * size)
plt.plot(self.start.x, self.start.y, "xr")
plt.plot(self.goal.x, self.goal.y, "xr")
plt.axis([-2, 15, -2, 15])
plt.grid(True)
plt.pause(0.01)
class Node():
def __init__(self, x, y):
self.x = x
self.y = y
self.cost = 0.0
self.parent = None
def main():
print("Start rrt planning")
# ====Search Path with RRT====
obstacleList = [
(5, 5, 0.5),
(9, 6, 1),
(7, 5, 3),
(1, 5, 1),
(2, 2, 1),
(7, 9, 1)
] # [x,y,size(radius)]
# Set Initial parameters
rrt = RRTFamilyPlanners(start = [0, 0], goal = [5, 10],
randArea = [-2, 15], obstacleList = obstacleList)
path = rrt.RRTStarSearch(animation = show_animation)
# Draw final path
if show_animation:
rrt.drawGraph()
plt.plot([x for (x, y) in path], [y for (x, y) in path], '-r')
plt.grid(True)
plt.pause(0.01) # Need for Mac
plt.show()
if __name__ == '__main__':
main()
| true
|
49a96a36ac7a962c1e0d00b5747699f62f4d9999
|
Python
|
MarioMiranda98/Curso-Python
|
/Interfaces/PrimeraInterfaz.pyw
|
UTF-8
| 344
| 3.0625
| 3
|
[] |
no_license
|
from tkinter import *
#primero construir la raiz (frame)
raiz = Tk()
raiz.title("Ventana de prueba") #Asignar titulo
raiz.resizable(0, 0) #Evitar que sea redimensionable
#raiz.iconbitmap("Ruta") //Para poner otro icono
raiz.geometry("650x350") #Para dar medidas
raiz.config(bg = "blue") #Para cambiar el fondo
raiz.mainloop() #Bucle infinito
| true
|
5caf6e3dfee856906d3c146afcd31f475d5f2b8f
|
Python
|
MrShashankBisht/Python-basics-
|
/control_Statement/nestedForloop.py
|
UTF-8
| 67
| 2.953125
| 3
|
[] |
no_license
|
for i in range(1,50,5):
for j in range(i,30):
print (j)
| true
|
728d8ddf06cb13b425684e1fb57ac904bf5938f0
|
Python
|
wanghq/oss-copy
|
/functions/initMultipartUpload/test_index.py
|
UTF-8
| 1,034
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
import logging
import os
import string
import unittest
from .index import calc_groups
class TestIndex(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestIndex, self).__init__(*args, **kwargs)
def test_calc_groups(self):
cases = [
# total_size, part_size, max_total_part_size
[[100, 10, 40], [10, 3, 4]],
[[100, 10, 50], [10, 2, 5]],
[[101, 10, 40], [11, 3, 4]],
[[101, 10, 50], [11, 3, 5]],
[[99, 10, 40], [10, 3, 4]],
[[99, 10, 50], [10, 2, 5]],
[[100, 15, 40], [7, 4, 2]],
[[100, 15, 50], [7, 3, 3]],
]
for c in cases:
input = c[0]
expected = c[1]
t, g, p = calc_groups(input[0], input[1], input[2])
self.assertEqual(t, expected[0], input)
self.assertEqual(g, expected[1], input)
self.assertEqual(p, expected[2], input)
if __name__ == '__main__':
unittest.main()
| true
|
29c20047994da6047c4e916c44b267bf35cdc3c7
|
Python
|
Denisov-AA/Python_courses
|
/HomeWork/Lection8_TestWork/Task_2.py
|
UTF-8
| 135
| 3.609375
| 4
|
[] |
no_license
|
def my_reversed(somelist:list):
reversed_list = somelist[::-1]
return reversed_list
print(my_reversed([1, 2, 3, 4, 5, 6, 7]))
| true
|
0ef54a279c101714b03f23beb739734dc5cee4de
|
Python
|
Yeshwanthyk/algorithms
|
/leetcode/253-meeting-rooms/253_meeting_rooms_ii.py
|
UTF-8
| 824
| 3.96875
| 4
|
[] |
no_license
|
"""Given an array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei), find the minimum number of conference rooms required.
Example 1:
Input: [[0, 30],[5, 10],[15, 20]]
Output: 2
Example 2:
Input: [[7,10],[2,4]]
Output: 1
"""
import heapq
def meeting_room(intervals):
if len(intervals) < 1:
return 0
intervals = sorted(intervals, key=lambda x: x[0])
heap = []
for interval in intervals:
start_time = interval[0]
end_time = interval[1]
if not heap:
heapq.heappush(heap, end_time)
elif heap[0] > start_time:
heapq.heappush(heap, end_time)
else:
heapq.heapreplace(heap, end_time)
return len(heap)
intervals = [[7, 10], [2, 4]]
ans = meeting_room(intervals)
print(ans)
| true
|
f3882cde49fef82cd62c84d33c14936580b0a0c5
|
Python
|
ammumal/2021-1_Learning_Study
|
/Stack과 Queue/9012 괄호.py
|
UTF-8
| 797
| 3.453125
| 3
|
[] |
no_license
|
#testcase 입력받기
n = int(input())
PS = [input() for i in range(n)]
#괄호 검사와 답 저장을 위한 list생성
stack = []
answer = []
#괄호 검사
for i in range(n):
for j in range(len(PS[i])):
#PS가 (면 스택에 저장
if PS[i][j] == '(':
stack.append('(')
#PS가 )일 경우 stack에 저장되어있던 ( 삭제, stack이 비어있을 경우 break해서 NO에 걸릴 수 있도록 ) 추가
else:
if not stack:
stack.append(')')
break
else:
del stack[-1]
#stack이 비어있으면 VPS
if not stack:
answer.append('YES')
stack = []
else:
answer.append('NO')
stack = []
#출력
for i in range(n):
print(answer[i])
| true
|
a5e5c80ed38558f08ecb7c3eb1c4a166a1bed0d0
|
Python
|
RShveda/pygame-practice
|
/catch-ball-game/catch_ball.py
|
UTF-8
| 658
| 2.53125
| 3
|
[] |
no_license
|
import pygame
from models import load_scores
from views import blank_screen
from controllers import user_controller_tick, system_controller_tick
import constants as cons
def main():
"""
Main function of the module which is responsible for variables initialisation
and game event loop.
"""
pygame.init()
clock = pygame.time.Clock()
load_scores()
is_finished = False
while not is_finished:
clock.tick(cons.FPS)
is_finished = user_controller_tick(is_finished)
system_controller_tick()
pygame.display.update()
blank_screen()
pygame.quit()
if __name__ == '__main__':
main()
| true
|
03cd03a9253731fec3e7f3745e841a8e537c3f7f
|
Python
|
efvaldez1/Advanced-Deep-Learning-with-Keras
|
/chapter9-drl/dqn-cartpole-9.3.1.py
|
UTF-8
| 8,505
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
"""Trains a DQN to solve CartPole-v0 problem
"""
from keras.layers import Dense, Input
from keras.models import Model
from keras.optimizers import Adam, RMSprop
from collections import deque
import heapq
import numpy as np
import random
import argparse
import sys
import gym
from gym import wrappers, logger
class DQNAgent(object):
def __init__(self, state_space, action_space, args):
self.action_space = action_space
self.state_space = state_space
self.build_model()
self.memory = []
self.gamma = 0.9 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.1
self.epsilon_decay = 0.99
self.q_model = self.build_model()
optimizer = Adam()
self.weights_file = 'dqn_cartpole.h5'
self.q_model = self.build_model()
self.q_model.compile(loss='mse', optimizer=optimizer)
self.target_q_model = self.build_model()
self.update_weights()
self.replay_counter = 0
self.enable_ddqn = True if args.enable_ddqn else False
self.prioritized_replay = True if args.prioritized_replay else False
if self.enable_ddqn:
print("DDQN---------------------------------------------------")
else:
print("----------------------------------------------------DQN")
if self.prioritized_replay:
print("PRIORITIZED REPLAY-------------------------------------")
self.priority = 0
def build_model(self):
inputs = Input(shape=(self.state_space.shape[0], ), name='state')
x = Dense(256, activation='relu')(inputs)
x = Dense(256, activation='relu')(x)
x = Dense(256, activation='relu')(x)
x = Dense(self.action_space.n, activation='linear', name='action')(x)
q_model = Model(inputs, x)
q_model.summary()
return q_model
def save_weights(self):
self.q_model.save_weights(self.weights_file)
def update_weights(self):
self.target_q_model.set_weights(self.q_model.get_weights())
def act(self, state):
if np.random.rand() <= self.epsilon:
# explore - do random action
return self.action_space.sample()
# exploit
q_values = self.q_model.predict(state)
# select the action with max acc reward (Q-value)
return np.argmax(q_values[0])
def get_td_error(self, next_state):
eps = random.uniform(1e-4, 1e-3)
q_value = self.get_target_q_value(next_state)
q_value -= self.q_model.predict(state)[0][action]
return abs(q_value) + eps
def remember(self, state, action, reward, next_state, done):
# self.memory.append([state, action, reward, next_state, done])
self.priority += 1
if self.prioritized_replay:
self.priority = self.get_td_error(next_state)
item = (self.priority, state, action, reward, next_state, done)
heapq.heappush(self.memory, item)
def get_target_q_value(self, next_state):
# TD(0) Q-value using Bellman equation
# to deal with non-stationarity, model weights are fixed
if self.enable_ddqn:
# DDQN
# current q network selects the action
action = np.argmax(self.q_model.predict(next_state)[0])
# target q network evaluate the action
q_value = self.target_q_model.predict(next_state)[0][action]
else:
# DQN chooses the q value of the action with max value
q_value = np.amax(self.target_q_model.predict(next_state)[0])
q_value *= self.gamma
q_value += reward
return q_value
def replay(self, batch_size):
"""Experience replay removes correlation between samples that
is causing the neural network to diverge
"""
# get a random batch of sars from replay memory
# sars = state, action, reward, state' (next_state)
if self.prioritized_replay:
self.memory = heapq.nlargest(len(self.memory), self.memory, key=lambda m:m[0])
indexes = np.random.choice(min(len(self.memory), 16*batch_size), batch_size, replace=False)
sars_batch = []
for index in indexes:
sars_batch.append(self.memory[index])
else:
sars_batch = random.sample(self.memory, batch_size)
state_batch, q_values_batch = [], []
index = 0
for _, state, action, reward, next_state, done in sars_batch:
# policy prediction for a given state
q_values = self.q_model.predict(state)
q_value = self.get_target_q_value(next_state)
# correction on the Q-value for the given action
q_values[0][action] = reward if done else q_value
# collect batch state-q_value mapping
state_batch.append(state[0])
q_values_batch.append(q_values[0])
if self.prioritized_replay:
priority = self.get_td_error(next_state)
i = indexes[index]
self.memory[i] = (priority, state, action, reward, next_state, done)
index += 1
# train the Q-network
self.q_model.fit(np.array(state_batch),
np.array(q_values_batch),
batch_size=batch_size,
epochs=1,
verbose=0)
# update exploration-exploitation probability
if self.replay_counter % 4 == 0:
self.update_epsilon()
# copy new params on old target after every x training updates
if self.replay_counter % 2 == 0:
self.update_weights()
self.replay_counter += 1
def update_epsilon(self):
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
parser.add_argument('env_id',
nargs='?',
default='CartPole-v0',
help='Select the environment to run')
parser.add_argument("-d",
"--enable-ddqn",
action='store_true',
help="Enable double DQN")
parser.add_argument("-p",
"--prioritized-replay",
action='store_true',
help="Enable prioritized experience replay")
args = parser.parse_args()
if args.enable_ddqn:
print("Using DDQN")
else:
print("Using default DQN")
win_trials = 100
win_reward = { 'CartPole-v0' : 195.0 }
scores = deque(maxlen=win_trials)
# You can set the level to logging.DEBUG or logging.WARN if you
# want to change the amount of output.
logger.setLevel(logger.ERROR)
env = gym.make(args.env_id)
outdir = "/tmp/dqn-%s" % args.env_id
env = wrappers.Monitor(env, directory=outdir, force=True)
env.seed(0)
agent = DQNAgent(env.observation_space, env.action_space, args)
episode_count = 3000
state_size = env.observation_space.shape[0]
batch_size = 64
for i in range(episode_count):
state = env.reset()
state = np.reshape(state, [1, state_size])
t = 0
done = False
while not done:
# in CartPole, action=0 is left and action=1 is right
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
# in CartPole:
# state = [pos, vel, theta, angular speed]
next_state = np.reshape(next_state, [1, state_size])
agent.remember(state, action, reward, next_state, done)
state = next_state
t += 1
if len(agent.memory) >= batch_size:
agent.replay(batch_size)
scores.append(t)
mean_score = np.mean(scores)
if mean_score >= win_reward[args.env_id] and i >= win_trials:
print("Solved in episode %d: Mean survival = %0.2lf in %d episodes"
% (i, mean_score, win_trials))
print("Epsilon: ", agent.epsilon)
agent.save_weights()
break
if i % win_trials == 0:
print("Episode %d: Mean survival = %0.2lf in %d episodes" %
(i, mean_score, win_trials))
# close the env and write monitor result info to disk
env.close()
| true
|
f567e81824b0485212695e4e5f0fff322cdce0ec
|
Python
|
WihlkeJulius/JWLTcolab
|
/m_engine.py
|
UTF-8
| 2,154
| 3.203125
| 3
|
[] |
no_license
|
#
# Mongoengine är det paket som hanterar kopplingen till MongoDB
from mongoengine import *
# Det här skapar en koppling till databasen 'systemet2' lokalt på din dator
connect('systemet2')
#Det här är en definition av hur ett dokument av typen Vara ser ut, jag har valt sju saker av de 30 som finns i filen
class Vara(Document):
nr= StringField()
Artikelid = StringField()
Varunummer = StringField()
Namn = StringField()
Namn2 = StringField()
Prisinklmoms = StringField()
Volymiml = StringField()
PrisPerLiter = StringField()
# det här monstret öppnar filen 'testfil.txt' sen läser den rad för rad och kollar först om artikelidt redan finns
# i databasen, gör den inte det så skapar den ett nytt dokument i databasen och sparar det. Finns det redan ett dokument
# med samma artikelid så kollar den om priset i databasen är annat än det som är i textfilen, isf skriver den ut
# diffen i consolfönstret, sedan sparas det nya värdet i databasen. Ifall priset är samma som i databasen går den bara vidare till nästa rad.
def load_file():
testdata = open('testfil.txt','r', encoding='utf-8', errors='ignore') # här kan du ändra namnet på den fil du vill öppna just nu är det 'testfil.txt'
for rad in testdata:
radlista = rad.split('\t')
found_doc = Vara.objects(Artikelid=radlista[1])
if not found_doc:
newdoc = Vara(
nr=radlista[0],
Artikelid=radlista[1],
Varunummer=radlista[2],
Namn=radlista[3],
Namn2=radlista[4],
Prisinklmoms=radlista[5],
Volymiml=radlista[7],
PrisPerLiter=radlista[8]
)
newdoc.save()
elif found_doc:
if found_doc[0].Prisinklmoms != radlista[5]:
print('Nytt Pris')
print(str(found_doc[0]['Namn'])+', Prisförändring: '+(str(float(found_doc[0]['Prisinklmoms']) - float(radlista[5]))))
found_doc.update(Prisinklmoms=radlista[5])
testdata.close()
| true
|
06877c1852dd0e363395a63ce8ba0d671398d49b
|
Python
|
CSUBioinformatics1801/Python_Bioinformatics_ZYZ
|
/Exp6/list_test.py
|
UTF-8
| 561
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
a=input('input multinums splitted ori_listy ",":')
ori_list=a.split(',')
n=0
for c in ori_list:
ori_list[n]=int(c)
n+=1
print("origin list:",ori_list)
x=eval(input('input a num:'))
x_index=ori_list.index(x)
if 0<x_index<len(ori_list):
print('max adjcent num:',ori_list[x_index-1],ori_list[x_index+1])
if x in ori_list:
print(x,"'s index of the list is",ori_list.index(x))
ori_list.remove(x)
print("Delete %s successfully!"%x)
else:
ori_list.append(x)
print("%s has ori_listeen added"%x)
ori_list.sort()
print("Sorted:",ori_list)
| true
|
08bbbdcba129130a0f20af201af09a256bcf9461
|
Python
|
Amada91/Valentines-with-Python
|
/valentine.py
|
UTF-8
| 4,172
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
# =====================================================================
# Title: Valentines with Python
# Author: Niraj Tiwari
# =====================================================================
import os
import numpy as np
from wordcloud import WordCloud, STOPWORDS
import imageio
import matplotlib.pyplot as plt
from PIL import Image
import glob
# =====================================================================
# required variables
WORD_CLOUD_PNG = 'word_cloud.png'
WORD_CLOUD_GIF = 'word_cloud.gif'
HEART_IMAGE = 'heart1.png'
HEART_X_COORDS = np.array([ 0, 44, 115, 170, 209, 250, 263, 245, 183, 123, 68, 0])
HEART_Y_COORDS = np.array([ 145, 178, 195, 184, 161, 126, 73, -25, -91, -149, -187, -223])
TEXT_POS = (-110, -254)
DRAW_SPEED = 3 # from 0 to 10
DRAW_WIDTH = 5 # width of the pen
SCREEN_SIZE = (720, 576) # size of the screen
# =====================================================================
# convert png image to gif and save
def to_gif(gif_file_name, png_file_name):
# Create the frames
frames = []
imgs = glob.glob(png_file_name)
for i in imgs:
new_frame = Image.open(i)
frames.append(new_frame)
# Save into a GIF file that loops forever
frames[0].save(gif_file_name, format='GIF',
append_images=frames[1:],
save_all=True,
duration=300, loop=0)
# =====================================================================
# random color for word cloud
def random_red_color_func(word=None, font_size=None, position=None,
orientation=None, font_path=None, random_state=None):
h = 0
s = 100
l = int(50 * (float(random_state.randint(60, 120))/100.0))
return "hsl({}, {}%, {}%)".format(h, s, l)
# =====================================================================
# generate word cloud
def generate_word_cloud(words, image_file, saved_name, gif_file_name):
mask = imageio.imread(image_file)
word_cloud = WordCloud(width = 400,
height = 400,
color_func = random_red_color_func,
background_color = 'white',
stopwords = STOPWORDS,
mask = mask, repeat=True).generate(words)
plt.figure(figsize = (10,8), facecolor = 'white', edgecolor='blue')
plt.imshow(word_cloud)
plt.axis('off')
plt.tight_layout(pad=0)
plt.savefig(saved_name)
to_gif(gif_file_name, saved_name)
# =====================================================================
# draw animation
def draw_boundary():
from turtle import Turtle, Screen, bye, getcanvas, ontimer
t = Turtle()
t.speed(1)
s = Screen()
s.setup(SCREEN_SIZE[0], SCREEN_SIZE[1])
s.bgpic('word_cloud.gif')
# t.shape('circle')
xs = HEART_X_COORDS
ys = HEART_Y_COORDS
xs = np.flip(xs)
ys = np.flip(ys)
t.penup()
t.speed(0)
t.left(135)
t.pensize(DRAW_WIDTH)
t.goto(xs[0], ys[0])
t.pendown()
t.speed(DRAW_SPEED)
for i in range(12):
t.color("red")
# t.fd(20)
t.goto(xs[i], ys[i])
xs = -np.flip(xs)
ys = np.flip(ys)
for i in range(12):
t.color("red")
# t.fd(20)
t.goto(xs[i], ys[i])
t.penup()
t.speed(0)
t.pensize(DRAW_WIDTH)
t.goto(TEXT_POS[0], TEXT_POS[1])
t.pendown()
t.speed(DRAW_SPEED)
t.write("HAPPY VALENTINE'S DAY", font=('Arial', 16, 'bold'))
s.exitonclick()
bye()
# =====================================================================
if __name__ == '__main__':
name = input('Enter your valentines name: ')
words = ', '.join(name.split())
generate_word_cloud(words, HEART_IMAGE, WORD_CLOUD_PNG, WORD_CLOUD_GIF)
draw_boundary()
os.remove(WORD_CLOUD_GIF)
os.remove(WORD_CLOUD_PNG)
| true
|
19cebd43cf45d31d4ddd4e2fa926ea32265b3290
|
Python
|
cltrudeau/purdy
|
/purdy/colour/urwidco.py
|
UTF-8
| 5,657
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace, Punctuation, Text, Literal
from purdy.parser import FoldedCodeLine, token_ancestor
# =============================================================================
# Urwid Colourizer
_code_palette = {
# urwid colour spec supports both 16 and 256 colour terminals
# fg16 bg16 fg256 bg256
Token: ('', '', '', '', ''),
Whitespace: ('', '', '', '', ''),
Comment: ('dark cyan', '', '', '#6dd', ''),
Keyword: ('brown', '', '', '#d8d', ''),
Operator: ('brown', '', '', '#aaa', ''),
Punctuation: ('dark cyan', '', '', '#8df', ''),
Text: ('dark cyan', '', '', '#ddd', ''),
Name: ('light gray', '', '', '#ddd', ''),
Name.Builtin: ('dark cyan', '', '', '#8af', ''),
Name.Builtin.Pseudo:('dark cyan', '', '', '#a66,bold', ''),
Name.Function: ('dark cyan', '', '', '#adf', ''),
Name.Class: ('dark cyan', '', '', '#adf', ''),
Name.Exception: ('dark green', '', '', '#fd6,bold', ''),
Name.Decorator: ('dark cyan', '', '', '#fd6,bold', ''),
String: ('dark magenta', '', '', '#ddd', ''),
Number: ('dark magenta', '', '', '#f86', ''),
Generic.Prompt: ('dark blue', '', '', '#fff,bold', ''),
Generic.Error: ('dark green', '', '', '#fd6,bold', ''),
Generic.Traceback: ('', '', '', '#ddd', ''),
Error: ('dark green', '', '', '#fd6,bold', ''),
}
_xml_palette = dict(_code_palette)
_xml_palette.update({
Name.Attribute: ('brown', '', '', 'brown', ''),
Keyword: ('dark cyan', '', '', '#8af', ''),
Name.Tag: ('dark cyan', '', '', '#8af', ''),
Punctuation: ('dark cyan', '', '', '#8af', ''),
})
_doc_palette = dict(_code_palette)
_doc_palette.update({
Name.Tag: ('brown', '', '', 'brown', ''),
Name.Attribute: ('brown', '', '', 'brown', ''),
Literal: ('dark cyan', '', '', '#8af', ''),
Generic.Heading:('brown', '', '', 'brown', ''),
Generic.Subheading:('brown', '', '', 'brown', ''),
Generic.Emph: ('dark blue', '', '', 'dark blue', ''),
Generic.Strong: ('dark green', '', '', 'dark green', ''),
String: ('dark magenta', '', '', 'dark magenta', ''),
})
class UrwidColourizer:
palettes = {
'code':_code_palette,
'xml':_xml_palette,
'doc':_doc_palette,
}
@classmethod
def create_urwid_palette(cls):
"""Returns a list of colour tuples that Urwid uses as its palette. The
list is based on the UrwidColourizer.colours with a couple extra items
"""
urwid_palette = []
for name, palette in cls.palettes.items():
for key, value in palette.items():
# for each item in our colours hash create a tuple consisting of
# the token name and its values
item = (f'{name}_{key}', ) + value
urwid_palette.append( item )
# do it again for highlighted tokens, for 16 colour mode change
# both the fg and bg colour, for 256 colour mode just change the
# background
item = (f'{name}_{key}_highlight', 'black', 'light gray', '',
value[3], 'g23')
urwid_palette.append( item )
# add miscellaneous other palette items
urwid_palette.extend([
('reverse', 'black', 'white', '', 'black', 'white'),
('bold', 'white,bold', '', '', 'white,bold', ''),
('title', 'white,underline', '', '', 'white,underline', ''),
('folded', 'white', '', '', 'white', ''),
('line_number', 'dark gray', '', '', 'dark gray', ''),
('empty', '', '', '', '', ''),
('empty_highlight', '', 'light gray', '', '', 'g23'),
])
return urwid_palette
@classmethod
def colourize(cls, code_line):
"""Returns a list containing markup tuples as used by urwid.Text
widgets.
:param code_line: a :class:`CodeLine` object to colourize
"""
if isinstance(code_line, FoldedCodeLine):
return ('folded', ' ⋮')
palette = code_line.lexer.palette
ancestor_list = cls.palettes[palette].keys()
output = []
if code_line.line_number >= 0:
output.append( cls.line_number(code_line.line_number) )
for part in code_line.parts:
ancestor = token_ancestor(part.token, ancestor_list)
key = f'{palette}_{ancestor}'
if code_line.highlight:
key += '_highlight'
# Urwid uses a palette which has been built as a hash using the
# names of the ancestor tokens as keys and the fg/bg colour
# choices as values, each piece of marked up text is a tuple of
# the palette key and the text to display
markup = (key, part.text)
output.append(markup)
return output
@classmethod
def line_number(cls, num):
"""Returns a colourized version of a line number"""
return ('line_number', f'{num:3} ')
| true
|
64a15837d59be63689799da54c288c3ee7aaa988
|
Python
|
zhy0/dmarket_rl
|
/dmarket/agents.py
|
UTF-8
| 8,913
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
import numpy as np
class MarketAgent:
"""
Market agent implementation to be used in market environments.
Attributes
----------
role: str, 'buyer' or 'seller'
reservation_price: float
Must be strictly positive.
name: str, optional (default=None)
Name of the market agent. If not given, a random one will be generated.
Note: this will usually not be the agent id used in the market engine.
"""
def __init__(self, role, reservation_price, name=None):
if not role in ['buyer', 'seller']:
raise ValueError("Role must be either buyer or seller")
if reservation_price <= 0:
raise ValueError("Reservation price must be positive")
self.role = role
self.reservation_price = reservation_price
if not name:
randstring = "%04x" % np.random.randint(16**4)
cls = type(self).__name__[0:4]
letter = role[0].upper()
name = f"{cls}_{letter}{reservation_price}_{randstring}"
self.name = name
def get_offer(self, observation):
"""
Returns offer given an observations.
Parameters
----------
observation: array_like
An element of some observation space defined by the used
information setting.
Returns
-------
offer: float
Offer to made to the market.
"""
raise NotImplementedError
class ConstantAgent(MarketAgent):
"""Agent that always offers its reservation price."""
def get_offer(self, observation):
return self.reservation_price
class FactorAgent(MarketAgent):
"""
Abstract agent class that determines an offer range based on a
multiplicative factor.
Children of this class will take an argument ``max_factor`` and use this
together with its reservation price to determine an interval to offer in.
For a seller, the agent will have the range ``[r, (1+max_factor)*r]`` with
``r`` the reservation price. For a buyer, this interval would be
``[(1-max_factor)*r, r]``.
Parameters
----------
max_factor: float, optional (default=0.5)
Must be between 0 and 1.
Attributes
----------
_s: int
A sign derived from the role of the agent, +1 means seller, -1 means
buyer.
_c: float
A factor used to compute the interval range.
_a: float
Lower bound of the offer range.
_b: float
Upper bound of the offer range.
"""
def __init__(self, role, reservation_price, name=None, max_factor=0.5):
self.max_factor = max_factor
super().__init__(role, reservation_price, name)
r = reservation_price
self._s = (-1 if role == 'buyer' else 1)
self._c = (1 + self._s * max_factor)
self._a = min(r, self._c*r) # minimum agent can offer
self._b = max(r, self._c*r) # maximum agent can offer
class UniformRandomAgent(FactorAgent):
"""
Random agent that offers uniformly random prices.
This agent will take an argument ``max_factor`` and use this together
with its reservation price to determine the interval to use for sampling
offers. For a seller, the agent will make uniform random offers in the
range ``[r, (1+max_factor)*r]`` with ``r`` the reservation price. For a
buyer, this interval would be ``[(1-max_factor)*r, r]``.
Parameters
----------
max_factor: float, optional (default=0.5)
Must be between 0 and 1.
"""
def get_offer(self, observation):
return np.random.uniform(self._a, self._b)
class TimeDependentAgent(FactorAgent):
"""
Abstract helper class to create agents that have time-dependent strategies.
"""
def get_offer(self, observation):
if not isinstance(observation, tuple):
raise ValueError("Expected tuple observation!")
obs = observation[0]
time = observation[1]
return self.compute_offer(obs, time)
def compute_offer(self, observation, time):
"""
Compute the offer based on observation and time.
"""
raise NotImplementedError
class TimeLinearAgent(TimeDependentAgent):
"""
Agent that linearly decreases/increases its offer price.
This agent starts with a high offer and linearly decreases/increases its
price until it reaches its reservation price.
Parameters
----------
max_factor: float, optional (default=0.5)
Must be between 0 and 1, determines the offer range of the agent.
noise: float, optional (default=1.0)
The standard deviation of the noise added to the computed price.
max_steps: int, optional (default=20)
Number of steps until the agent offers its reservation price. This
determines how quickly the agent lowers/increases his price.
"""
def __init__(self, role, reservation_price, name=None, max_factor=0.5,
noise=1.0, max_steps=20):
super().__init__(role, reservation_price, name, max_factor)
self.max_steps = max_steps
self.noise = noise
self._slope = -self._s * (self._b - self._a)/self.max_steps
def compute_offer(self, observation, time):
t = min(time, self.max_steps)
noise = np.random.normal(scale=self.noise)
return self._c*self.reservation_price + t*self._slope + noise
class GymRLAgent(FactorAgent):
"""
A market agent with reinforcement learning model.
This class serves as a wrapper for gym RL models and serves two purposes:
1. Standardize action space for RL models;
2. Make trained RL models applicable under different market situations.
The second point is achieved through normalization of input observations.
This makes it possible for an agent that was trained as a seller to
operate as a buyer. It also enables agents to function properly across
markets with different price scales.
Parameters
----------
model: object, optional (default=None)
Trained baselines model to use for predictions. It needs to have the
method ``predict(observation) -> (action, state)``.
discretization: int, optional (default=20)
The number of different offers the agent can make. This determines the
action space of the agent.
max_factor: int
A factor of the reservation price that determines the range of prices
the agent can offer. See ``UniformRandomAgent``.
"""
def __init__(self, role, reservation_price, name=None, model=None,
discretization=20, max_factor=0.5):
self.model = model
self.discretization = discretization
self._N = discretization
super().__init__(role, reservation_price, name, max_factor)
def get_offer(self, observation):
if not self.model:
raise RuntimeError("Current agent does not have a model")
action = self.model.predict(self.normalize(observation))[0]
return self.action_to_price(action)
def normalize(self, observation):
"""
Normalize the prices in observations according to reservation price.
This function will serve to scale all observations based on the agent's
reservation price and role. An observation should contain nonnegative
prices. A small value corresponds to prices close to the agent's
reservation price, while large values correspond to attractive offers.
To preserve symmetry between sellers and buyers, this function is
discontinuous in 0, since the information settings represent no
offers/no information with zero.
Parameters
----------
observation: array_like
An element of the observation space determined by the information
setting.
Returns
-------
normalized_observation: array_like
Scaled observation based on agent's reservation price and role.
"""
return np.heaviside(observation, 0) * self._s * \
(observation - self.reservation_price)/self.reservation_price
def action_to_price(self, action):
"""
Convert an action in the action space of the agent to a market price.
This function uniformly discretizes the price range of the agent. An
action close to zero should yield a conservative offer, i.e., close
to the reservation price, while a large value for action gives more
aggressive offers.
Parameters
----------
action: int
The action is an integer ``0 <= action < discretization``.
Returns
-------
price: float
The price corresponding to the action.
"""
l = action - self._N/2
m = self._N/2
return ((m - l*self._s)*self._a + (m + l*self._s)*self._b)/self._N
| true
|
daa11d5d9354b5a92e86165000b5cd0d5ab4465f
|
Python
|
yufengvac/one
|
/test/one.py
|
UTF-8
| 148
| 3.109375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
file = open("test.txt", "r")
count = 0
for line in file.readlines():
count = count + 1
print(count)
print(line)
| true
|
5d83caf939bbb00d2ff85f7c63dd60e956b3ccb7
|
Python
|
ym0179/bit_seoul
|
/ml/m13_kfold_estimators2.py
|
UTF-8
| 5,037
| 2.890625
| 3
|
[] |
no_license
|
#Day12
#2020-11-24
# 리그레서 모델들 추출
import pandas as pd
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import r2_score
from sklearn.utils.testing import all_estimators
import warnings
warnings.filterwarnings('ignore')
boston = pd.read_csv('./data/csv/boston_house_prices.csv', header=1, index_col=0)
x = boston.iloc[:,0:12]
y = boston.iloc[:,12]
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.2,random_state=44)
allAlgorithms = all_estimators(type_filter='regressor') #리그레서 모든 모델들을 추출
for (name, algorithm) in allAlgorithms: #모든 모델들의 알고리즘
try:
kfold = KFold(n_splits=7, shuffle=True)
model = algorithm()
scores = cross_val_score(model, x_train, y_train, cv=kfold)
print(name,' : ',scores, " / ", scores.mean())
# print(name,' : ',scores)
# model.fit(x_train,y_train)
# y_pred = model.predict(x_test)
# print(name, '의 정답률 : ', r2_score(y_test,y_pred))
except:
pass
import sklearn
print(sklearn.__version__) #0.22.1 버전에 문제 있어서 출력이 안됨 -> 버전 낮춰야함
'''
ARDRegression : [0.63472339 0.73699293 0.72638401 0.65501959 0.69614065]
AdaBoostRegressor : [0.81182737 0.83379332 0.84245069 0.77756577 0.85166409]
BaggingRegressor : [0.79138811 0.88575358 0.82851186 0.73332175 0.87658863]
BayesianRidge : [0.69691645 0.71099764 0.67201277 0.70536539 0.68568366]
CCA : [0.5806552 0.48904534 0.70327595 0.62285991 0.79428391]
DecisionTreeRegressor : [0.80223336 0.7540664 0.5059052 0.4440844 0.65935755]
DummyRegressor : [-0.00137973 -0.00129879 -0.04407133 -0.00123083 -0.0159433 ]
ElasticNet : [0.59493706 0.56272884 0.73365092 0.69541029 0.69606088]
ElasticNetCV : [0.60860904 0.67915763 0.57426626 0.62888366 0.73562787]
ExtraTreeRegressor : [0.86450746 0.74297366 0.53432261 0.59945686 0.64678139]
ExtraTreesRegressor : [0.85225421 0.90500975 0.86617287 0.85753211 0.87393061]
GammaRegressor : [-0.02701651 -0.00030033 -0.08118306 -0.00745336 -0.01016455]
GaussianProcessRegressor : [-6.78859393 -6.07186421 -7.37628514 -4.80213878 -6.2481255 ]
GeneralizedLinearRegressor : [0.54524608 0.7250886 0.62371454 0.68799271 0.61748805]
GradientBoostingRegressor : [0.92940103 0.7175701 0.87544309 0.7556643 0.87608506]
HistGradientBoostingRegressor : [0.87467476 0.87732726 0.75257654 0.7248113 0.89037996]
HuberRegressor : [0.69471431 0.67857872 0.70739335 0.65091386 0.47298981]
IsotonicRegression : [nan nan nan nan nan]
KNeighborsRegressor : [0.5832549 0.2546547 0.39651904 0.52064014 0.5169459 ]
KernelRidge : [0.43205815 0.73256777 0.78110944 0.64994815 0.58164087]
Lars : [0.79477887 0.75460525 0.58492768 0.76527814 0.60013652]
LarsCV : [0.49319365 0.81419339 0.79302459 0.53879881 0.72506674]
Lasso : [0.73905339 0.70775097 0.64044895 0.59981717 0.50907817]
LassoCV : [0.74348116 0.67173894 0.67785273 0.51023048 0.58180958]
LassoLars : [-0.00796666 -0.00699375 -0.00058403 -0.00392075 -0.00127822]
LassoLarsCV : [0.56913906 0.77362298 0.70654728 0.76041944 0.73332494]
LassoLarsIC : [0.7244167 0.7158275 0.69406338 0.75834145 0.57829223]
LinearRegression : [0.771467 0.64618242 0.70935878 0.64652762 0.69451981]
LinearSVR : [0.49898865 0.41101108 0.57294219 0.60247352 0.7069553 ]
MLPRegressor : [ 0.66048977 -0.35963132 0.47638036 0.49526194 0.29457928]
MultiTaskElasticNet : [nan nan nan nan nan]
MultiTaskElasticNetCV : [nan nan nan nan nan]
MultiTaskLasso : [nan nan nan nan nan]
MultiTaskLassoCV : [nan nan nan nan nan]
NuSVR : [0.1371885 0.22346531 0.21508614 0.09388968 0.25175281]
OrthogonalMatchingPursuit : [0.56054326 0.55417602 0.52423729 0.53617177 0.46495364]
OrthogonalMatchingPursuitCV : [0.65705797 0.64562383 0.78680437 0.55099728 0.55431573]
PLSCanonical : [-2.47160649 -0.79342764 -2.41959199 -1.97057166 -2.41194385]
PLSRegression : [0.66585045 0.59597438 0.68465931 0.73627246 0.70289204]
PassiveAggressiveRegressor : [ 0.12602799 -0.02234138 0.17265532 0.18889428 -0.08999591]
PoissonRegressor : [0.7531553 0.70513508 0.8252041 0.73214539 0.68064051]
RANSACRegressor : [0.55351915 0.53909779 0.57947924 0.60876912 0.75013641]
RandomForestRegressor : [0.80693943 0.91920546 0.65201945 0.90496118 0.89685453]
Ridge : [0.66198069 0.6906941 0.65923349 0.7292856 0.72547757]
RidgeCV : [0.69031587 0.73322112 0.73959015 0.62832114 0.65868779]
SGDRegressor : [-1.74841079e+24 -4.33056960e+26 -6.08606957e+26 -2.10602576e+27
-1.83006918e+26]
SVR : [0.1462815 0.18366299 0.03295133 0.21950759 0.36641889]
TheilSenRegressor : [0.61424655 0.60865322 0.75293689 0.73932156 0.619508 ]
TransformedTargetRegressor : [0.73375329 0.67349494 0.7734167 0.56167802 0.702008 ]
TweedieRegressor : [0.60150164 0.69212087 0.67875117 0.67929497 0.56801638]
_SigmoidCalibration : [nan nan nan nan nan]
'''
| true
|
1d057bc95b84a9bd20c65312b9932a3788e21286
|
Python
|
kanwalbir/poker_sols
|
/main.py
|
UTF-8
| 3,545
| 3.859375
| 4
|
[] |
no_license
|
#-----------------------------------------------------------------------------#
# PACKAGE AND MODULE IMPORTS #
#-----------------------------------------------------------------------------#
"""
Other Python file imports.
"""
from create_deck import create_deck
from deal import deal_cards
from wild_hand import best_wild_hand
#-----------------------------------------------------------------------------#
"""
Check if the cards have been provided beforehand. If not, then create a deck
of cards and let the system deal some cards to the players. Find the rank of
every hand and print out some information about the hand. Determine the
winner of the poker match.
Args: (i) Dealt hand (optional) - if absent, system will deal cards
(ii) Number of players (optional) - default is 4 players
(iii) Deck of cards (optional) - default is 'standard' 52 deck
- or 'joker' which adds 2 jokers to 'standard'
- see create_deck.py for more details
Returns: (i) Winner of the poker round
"""
def poker(deal=[], num_players=4, deck='standard'):
if deal:
newdeal = deal
else:
if num_players > 10: # Maximum 10 players can play using 1 deck of cards
num_players = 10
mydeck = create_deck(deck)
newdeal = deal_cards(mydeck, num_players, 5)
print '\n', 'Following hands were dealt:'
poker_results, winner, max_value, i = {}, [], (), 0
for hand in newdeal:
poker_results[i] = [hand, best_wild_hand(hand)]
print poker_results[i][0], '---->', poker_results[i][1][1]
if poker_results[i][1][0] > max_value:
max_value = poker_results[i][1][0]
winner = [poker_results[i][0]]
win_type = poker_results[i][1][1]
elif poker_results[i][1][0] == max_value:
if poker_results[i][0] not in winner:
winner += [poker_results[i][0]]
i += 1
if len(winner) == 1:
winner = winner[0]
print '\n', 'The winner is:', winner, '---->', win_type
print '---------------------------------------------------------------'
return winner
#-----------------------------------------------------------------------------#
"""
Test values and assert statements for above function.
"""
def test1():
sf = ['6C', '7C', '8C', '9C', 'TC'] # Straight Flush
fk = ['9C', '9D', '9H', '9S', '7D'] # Four of a Kind
fh = ['TC', 'TD', 'TH', '7C', '7D'] # Full House
sf1 = ['6C', '7C', '8C', '9C', 'TC'] # Straight Flush
sf2 = ['6D', '7D', '8D', '9D', 'TD'] # Straight Flush
sf3 = ['6S', '7S', '8S', '9S', 'TS'] # Straight Flush
assert poker([sf] + 99*[fh]) == sf
assert poker([sf, fk, fh]) == sf
assert poker([fk, fh]) == fk
assert poker([fh, fh]) == fh
assert poker([sf]) == sf
assert poker([sf1, sf2, fk, fh]) == [sf1, sf2]
assert poker([sf1, sf2, sf3, fk, fh]) == [sf1, sf2, sf3]
return 'All Tests 1 Passed'
def test2():
bj1 = ['7C', '8C', '9D', 'TD', '?B'] # Straight
rj1 = ['7C', '8C', '9D', 'TD', '?R'] # Straight
brj1 = ['TC', 'TD', '7C', '?R', '?B'] # Four of a Kind
assert poker([bj1, rj1, brj1]) == brj1
return 'All Tests 2 Passed'
print poker()
print poker([])
print poker([], 2)
print poker([], 8, 'standard')
print poker([], 8, 'joker')
print poker([], 11, 'standard')
print test1()
print test2()
#-----------------------------------------------------------------------------#
| true
|
ed51c7733c5c43339625e26a53329df0e2c05fbe
|
Python
|
rodolforicardotech/pythongeral
|
/pythonparazumbis/Lista01/PPZ01.py
|
UTF-8
| 208
| 4.09375
| 4
|
[] |
no_license
|
# 1) Faça um programa que peça dois
# números inteiros e imprima a soma desses dois números
n1 = int(input('Informe o primeiro número: '))
n2 = int(input('Informe o segundo número: '))
print(n1 + n2)
| true
|
c8acbf969aa0275cbfd9291653e79cb07e2cd365
|
Python
|
rodrigodg1/redes
|
/Sockets/Python/TCP-Server.py
|
UTF-8
| 1,263
| 3.671875
| 4
|
[] |
no_license
|
from socket import *
# Define a porta do servidor
serverPort = 12000
# Cria um novo socket do tipo TCP (SOCK_STREAM) e endereçamento IPv4 (AF_INET)
serverSocket = socket(AF_INET, SOCK_STREAM)
# Associa o socket ao endereço IP e porta especificados
serverSocket.bind(("10.62.9.237", serverPort))
# Define o socket para ouvir conexões, com uma fila de no máximo 1 conexão pendente
serverSocket.listen(1)
print("O servidor está pronto para receber conexões")
# Loop infinito para lidar com conexões de clientes
while True:
# Aceita uma nova conexão de cliente e retorna um novo socket e o endereço do cliente
connectionSocket, addr = serverSocket.accept()
print("Conectado com: [", addr[0], "Porta:", addr[1], "]")
# Recebe até 1024 bytes de dados do cliente e decodifica a mensagem como string
sentence = connectionSocket.recv(1024).decode()
# Converte a mensagem recebida para letras maiúsculas
capitalizedSentence = sentence.upper()
# Envia a mensagem em maiúsculas de volta ao cliente, codificando-a como bytes
connectionSocket.send(capitalizedSentence.encode())
# Fecha a conexão com o cliente
connectionSocket.close()
print("Conexão com: [", addr[0], "Porta:", addr[1], "] foi fechada")
| true
|
bb354cf209cf2120bbda46c37c51e1a8893d15c2
|
Python
|
NewWisdom/Algorithm
|
/파이썬으로 시작하는 삼성 SW역량테스트/2. 정렬/11651.py
|
UTF-8
| 839
| 3.75
| 4
|
[] |
no_license
|
"""
문제
2차원 평면 위의 점 N개가 주어진다. 좌표를 y좌표가 증가하는 순으로, y좌표가 같으면 x좌표가 증가하는 순서로 정렬한 다음 출력하는 프로그램을 작성하시오.
입력
첫째 줄에 점의 개수 N (1 ≤ N ≤ 100,000)이 주어진다. 둘째 줄부터 N개의 줄에는 i번점의 위치 xi와 yi가 주어진다. (-100,000 ≤ xi, yi ≤ 100,000) 좌표는 항상 정수이고, 위치가 같은 두 점은 없다.
출력
첫째 줄부터 N개의 줄에 점을 정렬한 결과를 출력한다.
예제 입력 1
5
0 4
1 2
1 -1
2 2
3 3
예제 출력 1
1 -1
1 2
2 2
3 3
0 4
"""
import sys
input = lambda : sys.stdin.readline()
n = int(input())
arr = [list(map(int,input().split())) for _ in range(n)]
arr.sort(key = lambda x: (x[1],x[0]))
for i in arr:
print(i[0],i[1])
| true
|
b34bbd88665e2959f184a80fe461ce314895b2e1
|
Python
|
Richard-D/python_excrise
|
/类和实例.py
|
UTF-8
| 1,273
| 3.6875
| 4
|
[] |
no_license
|
class Student(object):
def __init__(self,name,score):
self.name = name
self.score = score
def print_score(self):
print("%s: %s" %(self.name, self.score))
bart = Student("denghuang","97")
print("我们来看看未实例化的信息 ", Student) #一个类
print("我们来看看实例化后的信息 ", bart) #一个对象
lisa = Student("lisa","99")
bart.print_score()
print("我们来看看类中方法的地址:" ,Student("denghuang","97").print_score) ##我想打印出方法的地址
## 访问限制与数据封装
# 外部无法访问的name与score
class Student_fix(object):
def __init__(self,name,score):
self.__name = name
self.__score = score
def print_score(self):
print('%s: %s' % (self.__name, self.__score))
def get_name(self):
return self.__name
def get_score(self):
return self.__score
def set_score(self,score):
if 0 <= score <= 100:
self.__score = score
else:
raise ValueError("Bad Score")
bart = Student_fix("J",99)
bart.set_score(80)
print(bart.get_name())
print(bart.get_score())
print(bart._Student_fix__name) # 还是可以通过这种方式访问
bart._Student_fix__name = "K"
print(bart._Student_fix__name)
| true
|
562acf55734f4d1215d5100d24027565b2079038
|
Python
|
davidvaguilar/FundamentosPython
|
/src/basico/ejercicio020/Ejercicio020.py
|
UTF-8
| 378
| 3.53125
| 4
|
[] |
no_license
|
'''
Created on 05-05-2016
@author: David
'''
if __name__ == '__main__':
print ("ESTE PROGRAMA CALCULA SU SALARIO SEMANAL ")
print ("Ingrese el valor hora")
valorHora = int(input())
print("Ingrese la cantidad de Horas trabajadas")
cantidadHora = int(input())
salario = valorHora * cantidadHora
print ("Su salario semanal es : ",salario)
| true
|
cb2ea93b9fe8a8db3234d14e1b7b25219996b733
|
Python
|
mkachuee/sentiment-discovery
|
/model/model.py
|
UTF-8
| 14,186
| 2.546875
| 3
|
[] |
no_license
|
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from apex import RNN
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.decoder = nn.Linear(nhid, ntoken)
self.rnn=getattr(RNN, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.decoder.bias.data.fill_(0)
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def forward(self, input, reset_mask=None):
emb = self.drop(self.encoder(input))
self.rnn.detach_hidden()
output, hidden = self.rnn(emb, reset_mask=reset_mask)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def state_dict(self, destination=None, prefix='', keep_vars=False):
sd = {}
sd['encoder'] = self.encoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd['rnn'] = self.rnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd = {'encoder': sd}
sd['decoder'] = self.decoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
return sd
def load_state_dict(self, state_dict, strict=True):
if 'decoder' in state_dict:
self.decoder.load_state_dict(state_dict['decoder'], strict=strict)
self.encoder.load_state_dict(state_dict['encoder']['encoder'], strict=strict)
self.rnn.load_state_dict(state_dict['encoder']['rnn'], strict=strict)
class RNNModelNoEmbed(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModelNoEmbed, self).__init__()
self.drop = nn.Dropout(dropout)
#self.encoder = nn.Embedding(ntoken, ninp)
self.decoder = nn.Linear(nhid, ntoken)
self.rnn=getattr(RNN, rnn_type)(ntoken, nhid, nlayers, dropout=dropout)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
raise ValueError('Not supported!')
self.decoder.bias.data.fill_(0)
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def forward(self, input, reset_mask=None):
#emb = self.drop(self.encoder(input))
self.rnn.detach_hidden()
#input = input.type(torch.FloatTensor).cuda()
emb = one_hot(input, 256).type(torch.FloatTensor).cuda()
#pdb.set_trace()
output, hidden = self.rnn(emb, reset_mask=reset_mask)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def state_dict(self, destination=None, prefix='', keep_vars=False):
sd = {}
#sd['encoder'] = self.encoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd['rnn'] = self.rnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd = {'encoder': sd}
sd['decoder'] = self.decoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
return sd
def load_state_dict(self, state_dict, strict=True):
if 'decoder' in state_dict:
self.decoder.load_state_dict(state_dict['decoder'], strict=strict)
#self.encoder.load_state_dict(state_dict['encoder']['encoder'], strict=strict)
self.rnn.load_state_dict(state_dict['encoder']['rnn'], strict=strict)
class RNNModelPreTrain(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False, nvec=300):
super(RNNModelPreTrain, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
#self.decoder = nn.Linear(nhid, ntoken)
self.decoder_vec = nn.Linear(nhid, nvec)
self.rnn=getattr(RNN, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
raise ValueError('Not Supported: When using the tied flag, nhid must be equal to emsize')
self.decoder_vec.bias.data.fill_(0)
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
self.nvec = nvec
self.hidden = self.init_hidden()
def forward(self, input_seq, reset_mask=None):
emb = self.drop(self.encoder(input_seq))
#self.rnn.detach_hidden()
output, self.hidden = self.rnn(emb, self.hidden)#, reset_mask=reset_mask)
output = self.drop(output)
decoded = self.decoder_vec(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), self.hidden
def state_dict(self, destination=None, prefix='', keep_vars=False):
sd = {}
sd['encoder'] = self.encoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd['rnn'] = self.rnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd = {'encoder': sd}
sd['decoder_vec'] = self.decoder_vec.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
return sd
def load_state_dict(self, state_dict, strict=True):
if 'decoder' in state_dict:
self.decoder.load_state_dict(state_dict['decoder'], strict=strict)
self.encoder.load_state_dict(state_dict['encoder']['encoder'], strict=strict)
self.rnn.load_state_dict(state_dict['encoder']['rnn'], strict=strict)
def init_hidden(self):
self.hidden = (torch.zeros(1, 1, self.nhid), torch.zeros(1, 1, self.nhid))
class RNNFeaturizer(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, all_layers=False):
super(RNNFeaturizer, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.rnn=getattr(RNN, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
self.all_layers = all_layers
self.output_size = self.nhid if not self.all_layers else self.nhid * self.nlayers
def forward(self, input, seq_len=None):
self.rnn.detach_hidden()
if seq_len is None:
for i in range(input.size(0)):
emb = self.drop(self.encoder(input[i]))
_, hidden = self.rnn(emb.unsqueeze(0), collectHidden=True)
cell = self.get_cell_features(hidden)
else:
last_cell = 0
for i in range(input.size(0)):
emb = self.drop(self.encoder(input[i]))
_, hidden = self.rnn(emb.unsqueeze(0), collectHidden=True)
cell = self.get_cell_features(hidden)
if i > 0:
cell = get_valid_outs(i, seq_len, cell, last_cell)
last_cell = cell
return cell
def get_cell_features(self, hidden):
cell = hidden[1]
#get cell state from layers
if self.all_layers:
cell = torch.cat(cell, -1)
else:
cell = cell[-1]
return cell[-1]
def state_dict(self, destination=None, prefix='', keep_vars=False):
sd = {}
sd['encoder'] = self.encoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd['rnn'] = self.rnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
return sd
def load_state_dict(self, state_dict, strict=True):
self.encoder.load_state_dict(state_dict['encoder'], strict=strict)
self.rnn.load_state_dict(state_dict['rnn'], strict=strict)
class RNNFeaturizerHist(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, all_layers=False):
super(RNNFeaturizerHist, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.rnn=getattr(RNN, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
self.all_layers = all_layers
self.output_size = self.nhid if not self.all_layers else self.nhid * self.nlayers
def forward(self, input, seq_len=None, frame_width=64):
self.rnn.detach_hidden()
#pdb.set_trace()
hist = []
if seq_len is None:
for i in range(input.size(0)):
emb = self.drop(self.encoder(input[i]))
_, hidden = self.rnn(emb.unsqueeze(0), collectHidden=True)
cell = self.get_cell_features(hidden)
else:
last_cell = 0
for i in range(input.size(0)):
emb = self.drop(self.encoder(input[i]))
_, hidden = self.rnn(emb.unsqueeze(0), collectHidden=True)
cell = self.get_cell_features(hidden)
if i > 0:
cell = get_valid_outs(i, seq_len, cell, last_cell)
last_cell = cell
if i % (input.size(0)//frame_width) == 0:
hist.append(cell)
#pdb.set_trace()
hist = torch.stack(hist[-frame_width:]).permute(1, 2, 0).view(cell.size(0), 1, cell.size(1), -1)
#pdb.set_trace()
return cell, hist
def get_cell_features(self, hidden):
cell = hidden[1]
#get cell state from layers
if self.all_layers:
cell = torch.cat(cell, -1)
else:
cell = cell[-1]
return cell[-1]
def state_dict(self, destination=None, prefix='', keep_vars=False):
sd = {}
sd['encoder'] = self.encoder.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
sd['rnn'] = self.rnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
return sd
def load_state_dict(self, state_dict, strict=True):
self.encoder.load_state_dict(state_dict['encoder'], strict=strict)
self.rnn.load_state_dict(state_dict['rnn'], strict=strict)
class LRClassifier(nn.Module):
def __init__(self):
super(LRClassifier, self).__init__()
self.fc1 = nn.Linear(4096, 1)
self.out_act = nn.Sigmoid()
def forward(self, x):
x = x[:,0,:,-1]
logits = self.fc1(x)
out = self.out_act(logits).view(-1)
return out
class LSTMClassifier(nn.Module):
def __init__(self, nhid=32, batch_size=128):
super(LSTMClassifier, self).__init__()
self.nhid = nhid
self.batch_size = batch_size
#self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=11)
self.lstm = nn.LSTM(4096, nhid)
self.hidden = self.init_hidden()
self.fc1 = nn.Linear(nhid, 1)
self.out_act = nn.Sigmoid()
def forward(self, x):
self.batch_size = x.size(0)
self.hidden = self.init_hidden()
self.hidden[0].detach_()
self.hidden[1].detach_()
x = x.permute(3,0,2,1)[:,:,:,0] #torch.randn(128, 4096, requires_grad=True) #x.contiguous().view(-1)#x[:,0,:,-1]
lstm_out, self.hidden = self.lstm(x, self.hidden)
logits = self.fc1(lstm_out[-1])
out = self.out_act(logits).view(-1)
#pdb.set_trace()
return out
def init_hidden(self):
return (torch.zeros(1, self.batch_size, self.nhid).cuda(),
torch.zeros(1, self.batch_size, self.nhid).cuda())
def get_valid_outs(timestep, seq_len, out, last_out):
invalid_steps = timestep >= seq_len
if (invalid_steps.long().sum() == 0):
return out
return selector_circuit(out, last_out, invalid_steps)
def selector_circuit(val0, val1, selections):
selections = selections.type_as(val0.data).view(-1, 1).contiguous()
return (val0*(1-selections)) + (val1*selections)
def one_hot(seq_batch,depth):
# seq_batch.size() should be [seq,batch] or [batch,]
# return size() would be [seq,batch,depth] or [batch,depth]
out = torch.zeros(seq_batch.size()+torch.Size([depth]), dtype=torch.long).cuda()
dim = len(out.size()) - 1
#pdb.set_trace()
index = seq_batch.view(seq_batch.size()+torch.Size([1]))
return out.scatter_(dim,index,1)
| true
|
667a8cd5709651c9a48e02dbe9fafd57d7648c1f
|
Python
|
maddox/home-assistant
|
/tests/helpers/test_entity.py
|
UTF-8
| 2,258
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
"""
tests.test_helper_entity
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the entity helper.
"""
# pylint: disable=protected-access,too-many-public-methods
import unittest
import homeassistant.core as ha
import homeassistant.helpers.entity as entity
from homeassistant.const import ATTR_HIDDEN
class TestHelpersEntity(unittest.TestCase):
""" Tests homeassistant.helpers.entity module. """
def setUp(self): # pylint: disable=invalid-name
""" Init needed objects. """
self.entity = entity.Entity()
self.entity.entity_id = 'test.overwrite_hidden_true'
self.hass = self.entity.hass = ha.HomeAssistant()
self.entity.update_ha_state()
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
entity.Entity.overwrite_attribute(self.entity.entity_id,
[ATTR_HIDDEN], [None])
def test_default_hidden_not_in_attributes(self):
""" Test that the default hidden property is set to False. """
self.assertNotIn(
ATTR_HIDDEN,
self.hass.states.get(self.entity.entity_id).attributes)
def test_setting_hidden_to_true(self):
self.entity.hidden = True
self.entity.update_ha_state()
state = self.hass.states.get(self.entity.entity_id)
self.assertTrue(state.attributes.get(ATTR_HIDDEN))
def test_overwriting_hidden_property_to_true(self):
""" Test we can overwrite hidden property to True. """
entity.Entity.overwrite_attribute(self.entity.entity_id,
[ATTR_HIDDEN], [True])
self.entity.update_ha_state()
state = self.hass.states.get(self.entity.entity_id)
self.assertTrue(state.attributes.get(ATTR_HIDDEN))
def test_overwriting_hidden_property_to_false(self):
""" Test we can overwrite hidden property to True. """
entity.Entity.overwrite_attribute(self.entity.entity_id,
[ATTR_HIDDEN], [False])
self.entity.hidden = True
self.entity.update_ha_state()
self.assertNotIn(
ATTR_HIDDEN,
self.hass.states.get(self.entity.entity_id).attributes)
| true
|
64051e1b30d8065f8b47acb58fa10ff65011d094
|
Python
|
VictorCastao/Curso-em-Video-Python
|
/Desafio01.py
|
UTF-8
| 112
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
print ("============Desafio 1============")
nome = input ("Digite seu nome: ")
print ("Seja bem vindx," , nome)
| true
|
a3e023676f2702aaf8d3907eca310462ecc45403
|
Python
|
luvkrai/learnings
|
/custom_exceptions.py
|
UTF-8
| 224
| 3.6875
| 4
|
[] |
no_license
|
class myexception(Exception):
def __init__(self, message, errors):
super().__init__(message)
self.errors = errors
try:
raise myexception("hello","my error")
except myexception as e:
print(e)
print(e.errors)
| true
|
24e27095d424238016503bf239e515f5e70765be
|
Python
|
flyteorg/flytesnacks
|
/examples/type_system/type_system/typed_schema.py
|
UTF-8
| 1,939
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
# %% [markdown]
# (typed_schema)=
#
# # Typed Columns in a Schema
#
# ```{eval-rst}
# .. tags:: DataFrame, Basic, Data
# ```
#
# This example explains how a typed schema can be used in Flyte and declared in flytekit.
# %%
import pandas
from flytekit import kwtypes, task, workflow
# %% [markdown]
# Flytekit consists of some pre-built type extensions, one of them is the FlyteSchema type
# %%
from flytekit.types.schema import FlyteSchema
# %% [markdown]
# FlyteSchema is an abstract Schema type that can be used to represent any structured dataset which has typed
# (or untyped) columns
# %%
out_schema = FlyteSchema[kwtypes(x=int, y=str)]
# %% [markdown]
# To write to a schema object refer to `FlyteSchema.open` method. Writing can be done
# using any of the supported dataframe formats.
#
# ```{eval-rst}
# .. todo::
#
# Reference the supported dataframe formats here
# ```
# %%
@task
def t1() -> out_schema:
w = out_schema()
df = pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})
w.open().write(df)
return w
# %% [markdown]
# To read a Schema, one has to invoke the `FlyteSchema.open`. The default mode
# is automatically configured to be `open` and the default returned dataframe type is {py:class}`pandas.DataFrame`
# Different types of dataframes can be returned based on the type passed into the open method
# %%
@task
def t2(schema: FlyteSchema[kwtypes(x=int, y=str)]) -> FlyteSchema[kwtypes(x=int)]:
assert isinstance(schema, FlyteSchema)
df: pandas.DataFrame = schema.open().all()
return df[schema.column_names()[:-1]]
@workflow
def wf() -> FlyteSchema[kwtypes(x=int)]:
return t2(schema=t1())
# %% [markdown]
# Local execution will convert the data to and from the serialized representation thus, mimicking a complete distributed
# execution.
#
# %%
if __name__ == "__main__":
print(f"Running {__file__} main...")
print(f"Running wf(), returns columns {wf().columns()}")
| true
|
85a8b55ea656520d8c6b904cf39af474bf2cfc83
|
Python
|
ZCCFighting/picture
|
/Pca.py
|
UTF-8
| 2,656
| 2.6875
| 3
|
[] |
no_license
|
import cv2 as cv
import numpy as np
img=cv.imread('DJI_0024binary0.tif')
h, w, _ = img.shape
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
ret, binary = cv.threshold(gray, 150, 255, cv.THRESH_BINARY)
image, contours, hierarchy = cv.findContours(binary,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)
#cv.drawContour(image,contours,-1,(0,255,0),3)
def eigValPct(eigVals,percentage):
sortArray=np.sort(eigVals) #使用numpy中的sort()对特征值按照从小到大排序
sortArray=sortArray[-1::-1] #特征值从大到小排序
arraySum=sum(sortArray) #数据全部的方差arraySum
tempSum=0
num=0
for i in sortArray:
tempSum+=i
num+=1
if tempSum>=arraySum*percentage:
return num
'''pca函数有两个参数,其中dataMat是已经转换成矩阵matrix形式的数据集,列表示特征;
其中的percentage表示取前多少个特征需要达到的方差占比,默认为0.9'''
def pca(dataMat,percentage=0.9):
#print(dataMat.shape)
dataMat_re=np.reshape(dataMat,(-1,2))
meanVals=np.mean(dataMat_re,axis=0) #对每一列求平均值,因为协方差的计算中需要减去均值
#print(meanVals)
meanRemoved=dataMat_re-meanVals
covMat=np.cov(meanRemoved) #cov()计算方差
eigVals,eigVects=np.linalg.eig(np.mat(covMat)) #利用numpy中寻找特征值和特征向量的模块linalg中的eig()方法
k=eigValPct(eigVals,percentage) #要达到方差的百分比percentage,需要前k个向量
eigValInd=np.argsort(eigVals) #对特征值eigVals从小到大排序
eigValInd=eigValInd[:-(k+1):-1] #从排好序的特征值,从后往前取k个,这样就实现了特征值的从大到小排列
redEigVects=eigVects[:,eigValInd]
#返回排序后特征值对应的特征向量redEigVects(主成分)
lowDDataMat=meanRemoved.T*redEigVects #将原始数据投影到主成分上得到新的低维数据lowDDataMat
reconMat=(lowDDataMat*redEigVects.T).T+meanVals #得到重构数据reconMat
return lowDDataMat,reconMat
k=0
rec=[]
for i in range(len(contours)):
cnt = contours[i]
area = cv.contourArea(cnt) # 处理掉小的轮廓区域,这个区域的大小自己定义。
if(area <1e2 or 1e5 <area): continue # thickness不为-1时,表示画轮廓线,thickness的值表示线的宽度。
cv.drawContours(img,contours,i,(255,0,0),2,8,hierarchy,0)
lowDDataMat,reconMat=pca(contours[i],percentage=0.9)
recon_mean=np.mean(reconMat,axis=0)
print(recon_mean)
rec.append(recon_mean)
k+=1
#print(rec)
pos=np.mean(recon_mean)
#cv.circle(img,(a,b),3,(255,0,0),2,lineType=8,shift=0)
img=cv.line(img,(3210,1649),(1243,325),(0,0,255),2)
cv.imshow('img',img)
cv.imwrite('res.jpg',img)
cv.waitKey(1000)
| true
|
4b14803e4fa4e38ddaf6e3c95bf3d742309916f5
|
Python
|
lakshmana8121/hire_lakshman
|
/Basepage/basepage01.py
|
UTF-8
| 1,501
| 2.578125
| 3
|
[] |
no_license
|
from selenium import webdriver
import time
class base:
Select_vaccination_service_xpath="//button[text()='Vaccination Services']"
Select_search_vaccination_center_xpath='//*[@id="mat-menu-panel-0"]/div/ul/li[2]/a'
search_District_id='mat-tab-label-0-1'
select_state_button_id="mat-select-0"
Select_state_xpath='//*[text()=" Andhra Pradesh "]'
Select_District_button_xpath="//*[@id='mat-select-2']"
Select_districts_xpath="//span[text()=' Anantapur ']"
Select_Search_button_xpath="//*[text()='Search']"
def __init__(self,driver):
self.driver=driver
def clickvaccineservice(self):
self.driver.find_element_by_xpath(self.Select_vaccination_service_xpath).click()
def clicksearchvaccine(self):
self.driver.find_element_by_xpath(self.Select_search_vaccination_center_xpath).click()
def clickDistrict(self):
self.driver.find_element_by_id(self.search_District_id).click()
def clickstatebutton(self):
self.driver.find_element_by_id(self.select_state_button_id).click()
def clickstate(self):
self.driver.find_element_by_xpath(self.Select_state_xpath).click()
def clickDistrictbutton(self):
self.driver.find_element_by_xpath(self.Select_District_button_xpath).click()
def clickdistrict(self):
self.driver.find_element_by_xpath(self.Select_districts_xpath).click()
def clicksearchbutton(self):
self.driver.find_element_by_xpath(self.Select_Search_button_xpath).click()
| true
|
3d4e2407c8a4699373293d01861a06912a19e31c
|
Python
|
TaigoKuriyama/atcoder
|
/problem/abc150/c/main.py
|
UTF-8
| 331
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import itertools
l = list(range(1, int(input()) + 1))
p = list(map(int, input().split()))
q = list(map(int, input().split()))
cnt_a = 1
cnt_b = 1
for i in itertools.permutations(l):
if list(i) == p:
a = cnt_a
if list(i) == q:
b = cnt_b
cnt_a += 1
cnt_b += 1
print(abs(a - b))
| true
|
1ccb54f74d7fa36a0e2f4aadb2a80b4b90fbf57a
|
Python
|
alpha-kwhn/Baekjun
|
/GONASOO/8611.py
|
UTF-8
| 346
| 3.203125
| 3
|
[] |
no_license
|
def conv(k,m):
r = ""
while True:
a = k % m
k //= m
r = str(a) + r
if k < m: r = str(k) + r
if k//m < 1: return int(r)
n = int(input()); flag = True
for i in range(2,11):
t = str(conv(n, i))
if t[::-1] == t:
print(i, t)
flag = False
if flag: print("NIE")
| true
|
386b576c4da9740e1ba7a7fc58b4152d81bfd1c3
|
Python
|
globocom/dojo
|
/2021_01_06/dojo_test.py
|
UTF-8
| 2,098
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
import unittest
from dojo import get_dimensions, build_matrix
class DojoTest(unittest.TestCase):
def test_get_dimensions1(self):
self.assertEquals(get_dimensions("ifmanwasmeanttostayonthegroundgodwouldhavegivenusroots"), (7,8))
def test_get_dimensions2(self):
self.assertEquals(get_dimensions("feedthedog"), (3,4))
def test_get_dimensions2(self):
self.assertEquals(get_dimensions("chillout"), (3,3))
def test_build_matrix(self):
self.assertEquals(build_matrix("if man was me ant to stay on the ground god would have given us roots"),
[
['i','f','m','a','n','w','a','s'],
['m','e','a','n','t','t','o','s'],
['t','a','y','o','n','t','h','e'],
['g','r','o','u','n','d','g','o'],
['d','w','o','u','l','d','h','a'],
['v','e','g','i','v','e','n','u'],
['s','r','o','o','t','s','','']
])
def test_build_matrix2(self):
self.assertEqual(build_matrix("feed the dog"),
[
['f','e','e','d'],
['t','h','e','d'],
['o','g','',''],
])
def test_build_matrix3(self):
self.assertEqual(build_matrix("chill out"),
[
['c','h','i'],
['l','l','o'],
['u','t','']
])
if __name__ == '__main__':
unittest.main()
# Allan - Carreira - Elen - Lucas - Bruna - Lara - Mateus - TiagoDuarte - Ighor
'''
'm','e','a','n','t','t','o','s'
't','a','y','o','n','t','h','e'
'g','r','o','u','n','d','g','o'
'd','w','o','u','l','d','h','a'
'v','e','g','i','v','e','n','u'
's','r','o','o','t','s'
','
feed the dog
[
['f','e','e','d'],
['t','h','e','d'],
['o','g','',''],
]
['c','h','i']
['l','l','o']
['u','t',']
'
'
def bla(str):
'''
# EU JÁ TE SUPEREEEEI, EU JÁ TE SUPEREEEEEEEEEEI
# mas nao manda mensagem outra vez SE NAO RECAIREEEI (HINO!) #amo
# 1 - Tirar os espaçoes da string
# 2 - Dimensões da matriz com base na string (sem espaços) - FEITO
# 3 - Construir a matriz
# 4 - Montar a string
| true
|
ffd68ef1dc65319700d680a038714eb3ae2d0fd9
|
Python
|
qmnguyenw/python_py4e
|
/geeksforgeeks/python/easy/29_5.py
|
UTF-8
| 2,677
| 3.40625
| 3
|
[] |
no_license
|
Program to calculate the Round Trip Time (RTT)
**Round trip time(RTT)** is the length of time it takes for a signal to be
sent plus the length of time it takes for an acknowledgement of that signal to
be received. This time therefore consists of the propagation times between the
two point of signal.
On the Internet, an end user can determine the RTT to and from an IP(Internet
Protocol) address by pinging that address. The result depends on various
factors :-
* The data rate transfer of the source’s internet connection.
* The nature of transmission medium.
* The physical distance between source and destination.
* The number of nodes between source and destination.
* The amount of traffic on the LAN(Local Area Network) to which end user is connected.
* The number of other requests being handled by intermediate nodes and the remote server.
* The speed with which intermediate node and the remote server function.
* The presence of Interference in the circuit.
Examples:
Input : www.geeksforgeeks.org
Output : Time in seconds : 0.212174892426
Input : www.cricbuzz.com
Output : Time in seconds : 0.55425786972
## Recommended: Please try your approach on **__{IDE}__** first, before moving
on to the solution.
__
__
__
__
__
__
__
# Python program to calculate RTT
import time
import requests
# Function to calculate the RTT
def RTT(url):
# time when the signal is sent
t1 = time.time()
r = requests.get(url)
# time when acknowledgement of signal
# is received
t2 = time.time()
# total time taken
tim = str(t2-t1)
print("Time in seconds :" + tim)
# driver program
# url address
url = "http://www.google.com"
RTT(url)
---
__
__
Output:
Time in seconds :0.0579478740692
This article is contributed by **Pramod Kumar**. If you like GeeksforGeeks and
would like to contribute, you can also write an article using
contribute.geeksforgeeks.org or mail your article to
contribute@geeksforgeeks.org. See your article appearing on the GeeksforGeeks
main page and help other Geeks.
Please write comments if you find anything incorrect, or you want to share
more information about the topic discussed above.
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| true
|
19013691b0f53d265f11ecfe850f1af6d15e0c6e
|
Python
|
ajstocchetti/apartment-temps
|
/test.py
|
UTF-8
| 1,893
| 2.96875
| 3
|
[] |
no_license
|
import time
import board
import adafruit_dht
from influxdb import InfluxDBClient
# Initial the dht device, with data pin connected to:
DHT_TYPE = adafruit_dht.DHT22
DHT_PIN = board.D4
dhtDevice = DHT_TYPE(DHT_PIN)
minF = 65
lowFreq = 30 # seconds
regFreq = 240 # seconds
errorFreq = 6 # seconds
client = InfluxDBClient(host='127.0.0.1', port=8086, database='apartmenttemp')
def getFrequency(tempReading, isValid):
if isValid is not True:
return errorFreq
elif tempReading > minF:
return regFreq
else:
return lowFreq
def getVals():
resp = {
"temp": None,
"humidity": None
}
try:
# Print the values to the serial port
temperature_c = dhtDevice.temperature
temperature_f = temperature_c * (9 / 5) + 32
temperature_f = round(temperature_f, 3)
resp["temp"] = temperature_f
humidity = dhtDevice.humidity
resp["humidity"] = humidity
except RuntimeError as error:
# Errors happen fairly often, DHT's are hard to read, just keep going
print(error.args[0])
finally:
return resp
def handleResp(vals, isValid):
try:
if isValid:
insertTsdb(vals)
print("Temp: {:.1f} F - Humidity: {}% "
.format(vals.get("temp"), vals.get("humidity")))
except:
pass
def run():
vals = getVals()
temp = vals.get("temp")
isValidRead = temp is not None
handleResp(vals, isValidRead)
nextFreq = getFrequency(temp, isValidRead)
print("Sleeping for", nextFreq, "seconds")
time.sleep(nextFreq)
def insertTsdb(vals):
body = [
{
"measurement": "climate",
"tags": {
"location": "bedroom",
"device": "pi3b"
},
"fields": vals
}
]
client.write_points(body)
while True:
run()
| true
|
2eb81e2e3046ca036f17160fd83ea4ddd906dfcb
|
Python
|
Eomys/SciDataTool
|
/SciDataTool/Methods/DataND/_set_values.py
|
UTF-8
| 532
| 2.671875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
from SciDataTool.Classes._check import check_dimensions, check_var
from numpy import squeeze, array
def _set_values(self, value):
"""setter of values"""
if type(value) is int and value == -1:
value = array([])
elif type(value) is list:
try:
value = array(value)
except:
pass
check_var("values", value, "ndarray")
# Check dimensions
if value is not None:
value = squeeze(value)
value = check_dimensions(value, self.axes)
self._values = value
| true
|
f3a714916f77449708e44052e23162373c2daad1
|
Python
|
YunHao-Von/Mathematical-Modeling
|
/手写代码/第2章 数据处理与可视化/Pex2_48_1.py
|
UTF-8
| 268
| 2.609375
| 3
|
[] |
no_license
|
from scipy.stats import binom
import matplotlib.pyplot as plt
import numpy as np
n,p=5,0.4
x=np.arange(6);y=binom.pmf(x,n,p)
plt.subplot(1,2,1);plt.plot(x,y,'ro')
plt.vlines(x,0,y,'k',lw=3,alpha=0.5)
plt.subplot(1,2,2);plt.stem(x,y,use_line_collection=True)
plt.show()
| true
|
4950661cd5b799efb99e2f6717f21e3ce1a804cb
|
Python
|
kaphka/catconv
|
/catconv/stabi.py
|
UTF-8
| 4,337
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""This modules provides functions to process the music catalog provided by
the Staatsbibliothek Berlin"""
import copy
import os
import re
import ocrolib
import ujson
import glob as g
import os.path as op
# import ujson
TIF_PAGES_GLOB = "{name}{batch}/TIF/????????{ext}"
PAGES_GLOB = "{name}{batch}/????????{ext}"
class Catalog(object):
"""collection of catalog cards"""
def __init__(self, path):
self.name = op.basename(path)
self.path = path
def split_path(path):
"""splits path into data_dir, cat_name, batch_name, page_name"""
norm = os.path.normpath(path)
# -> /catalogs/S/S001/00001.tif
pages_dir, file_name = op.split(norm)
# -> /catalogs/S/S001/TIF/ 00001.tif
# -> /catalogs/S/S001 00001.tif
# page file names may contain multiple dots
page_name = re.sub(r"(\.\w+)+$", "", file_name)
batch_dir, batch_name = op.split(pages_dir)
# -> /catalogs/S/S001 TIF
# -> /catalogs/S S001
if batch_name == "TIF":
batch_dir, batch_name = op.split(batch_dir)
data_dir, cat_name = op.split(batch_dir)
# -> /catalogs S
return data_dir, cat_name, batch_name, page_name
def change_path(path, cat=None, ext="", remove_type=False, rel_path=None, to_cat=None):
"""change catalog paths to a simpler folder structure"""
data_dir, cat_name, batch_name, page_name = split_path(path)
if cat:
cat_name = cat
batch_name = cat_name + batch_name[-3:]
if to_cat:
data_dir = to_cat
changed_path = op.join(data_dir, cat_name, batch_name, page_name + ext)
if rel_path:
return op.relpath(changed_path, op.normpath(rel_path))
else:
return changed_path
def convert_page_path(page, conversion):
"""create a copy of a page and changes the path"""
new_path = change_path(page['path'], **conversion)
new_page = copy.deepcopy(page)
new_page['path'] = new_path
return new_page
def page_dir(page_path):
"""directory named like the image"""
pagename = op.basename(change_path(page_path))
return op.join(op.split(page_path)[0], pagename)
def catalog_pages(cat_path, batch='*', ext='.png', amount=None):
# pattern = op.join(cat_path, '{}/????????{}'.format(batch, ext))
path, name = op.split(cat_path)
if ext == ".tif":
pattern = TIF_PAGES_GLOB
else:
pattern = PAGES_GLOB
page_glob = op.join(path, name, pattern.format(name=name, batch=batch, ext=ext))
return g.glob(page_glob)
def batches(cat_path):
pattern = op.join(cat_path, '*')
batches = sorted(map(op.basename,g.glob(pattern)))
return batches
def line_index_to_name(idx):
return '0'+hex((0x010000+(idx)))[2:]
def read_line_boxes(page):
"""read the dimensions of each text line"""
path = change_path(page['path'], ext='.pseg.png')
path = path.encode()
try:
pseg = ocrolib.read_page_segmentation(path)
except IOError:
return []
regions = ocrolib.RegionExtractor()
regions.setPageLines(pseg)
lines = []
for i in range(1, regions.length()):
y0, x0, y1, x1 = regions.bboxMath(i)
lines.append({'name': line_index_to_name(i),
'position': [x0, y0, x1, y1]})
return lines
def load_box_positions(page):
segments = read_line_boxes(page)
page['lines'] = segments
return page
def page_from_path(path):
return {'path': path}
def read_text(page):
if not 'lines' in page:
return page
text_dir = page_dir(page['path'])
for line in page['lines']:
path = op.join(text_dir, line['name']) + '.txt'
text = ""
if op.isfile(path):
with open(path, 'rb') as sfile:
text = sfile.read()
line['text'] = text
def get_cat_name(batch_name):
return re.match('([A-S]+)', batch_name).group(0)
def load_catalog(path, selection={}, text_box=False, text=False):
name = op.basename(path)
pages = sorted(map(page_from_path, catalog_pages(path, **selection)),key=lambda page: page['path'])
for page in pages:
if text_box:
load_box_positions(page)
if text:
read_text(page)
return {'name': name, 'path': path, 'pages': pages}
def change_paths(cat, conv):
cat['pages'] = map(lambda page: convert_page_path(page,conv), cat['pages'])
| true
|
6894e381690c5f063c351917c8f9edaf6603c778
|
Python
|
process-intelligence-research/SFILES2
|
/Flowsheet_Class/nx_to_sfiles.py
|
UTF-8
| 40,844
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
import random
import networkx as nx
import re
import numpy as np
random.seed(1)
"""
Exposes functionality for writing SFILES (Simplified flowsheet input line entry system) strings
Based on
- d’Anterroches, L. Group contribution based process flowsheet synthesis, design and modelling, Ph.D. thesis.
Technical University of Denmark, 2006.
- Zhang, T., Sahinidis, N. V., & Siirola, J. J. (2019). Pattern recognition in chemical process flowsheets.
AIChE Journal, 65(2), 592-603.
- Weininger, David (February 1988). "SMILES, a chemical language and information system. 1. Introduction to
methodology and encoding rules". Journal of Chemical Information and Computer Sciences. 28 (1): 31–6.
"""
def nx_to_SFILES(flowsheet, version, remove_hex_tags, canonical=True):
"""Converts a networkx graph to its corresponding SFILES notation.
Parameters
----------
flowsheet: networkx graph
Process flowsheet as networkx graph.
version: str, default='v1'
SFILES version, either 'v1' or 'v2'.
remove_hex_tags: bool
Whether to show the 'he' tags in the SFILES_v2 (Conversion back and merging of hex nodes is not possible if
this is set to true).
Returns
----------
sfiles_gen: list [str]
Generalized SFILES representation of the flowsheet (parsed).
sfiles_string_gen: str
Generalized SFILES representation of flowsheet.
"""
# Signal edges are removed from flowsheet graph as they are inserted later with recycle notation to SFILES.
# Remove signal nodes before ranking, otherwise interoperability with SFILES2.0 cannot be ensured.
# Edges of signals connected directly to the next unit operation shall not be removed, since they represent both
# material stream and signal connection.
flowsheet_wo_signals = flowsheet.copy()
edge_information = nx.get_edge_attributes(flowsheet, 'tags')
edge_information_signal = {k: flatten(v['signal']) for k, v in edge_information.items() if 'signal' in v.keys()
if v['signal']}
edges_to_remove = [k for k, v in edge_information_signal.items() if v == ['not_next_unitop']]
flowsheet_wo_signals.remove_edges_from(edges_to_remove)
# Calculation of graph invariant / node ranks
ranks = calc_graph_invariant(flowsheet_wo_signals)
# Find initial nodes of graph. Initial nodes are determined by an in-degree of zero.
init_nodes = [n for n, d in flowsheet_wo_signals.in_degree() if d == 0]
# Sort the possible initial nodes for traversal depending on their rank.
init_nodes = sort_by_rank(init_nodes, ranks, canonical=True)
# Add an additional virtual node, which is connected to every initial node. Thus, one graph traversal is sufficient
# to access every node in the graph.
flowsheet_wo_signals.add_node('virtual')
virtual_edges = [('virtual', i) for i in init_nodes]
flowsheet_wo_signals.add_edges_from(virtual_edges)
current_node = 'virtual'
ranks['virtual'] = 0
# Nodes in cycle-processes are not determined since their in_degree is greater than zero.
# Thus, as long as not every node of flowsheet is connected to the virtual node, the node with the lowest rank
# (which is not a outlet node) is connected to the virtual node.
flowsheet_undirected = nx.to_undirected(flowsheet_wo_signals)
connected_to_virtual = set(nx.node_connected_component(flowsheet_undirected, 'virtual'))
not_connected = set(flowsheet_wo_signals.nodes) - connected_to_virtual
while not_connected:
rank_not_connected = sort_by_rank(not_connected, ranks, canonical=True)
rank_not_connected = [k for k in rank_not_connected if flowsheet_wo_signals.out_degree(k) > 0]
flowsheet_wo_signals.add_edges_from([('virtual', rank_not_connected[0])])
connected_to_virtual = set(nx.node_connected_component(flowsheet_undirected, 'virtual'))
not_connected = set(flowsheet_wo_signals.nodes) - connected_to_virtual
# Initialization of variables.
visited = set()
sfiles_part = []
nr_pre_visited = 0
nodes_position_setoffs = {n: 0 for n in flowsheet_wo_signals.nodes}
nodes_position_setoffs_cycle = {n: 0 for n in flowsheet_wo_signals.nodes}
special_edges = {}
# Graph traversal (depth-first-search dfs).
sfiles_part, nr_pre_visited, node_insertion, sfiles = dfs(visited, flowsheet_wo_signals, current_node, sfiles_part,
nr_pre_visited, ranks, nodes_position_setoffs,
nodes_position_setoffs_cycle, special_edges,
edge_information_signal, first_traversal=True, sfiles=[],
node_insertion='', canonical=canonical)
# Flatten nested list of sfile_part
sfiles = flatten(sfiles)
# SFILES Version 2.0:
if version == 'v2':
sfiles = SFILES_v2(sfiles, special_edges, edge_information, remove_hex_tags)
# Generalization of SFILES (remove node numbering) as last step
sfiles_gen = generalize_SFILES(sfiles)
sfiles_string_gen = ''.join(sfiles_gen)
return sfiles_gen, sfiles_string_gen
def dfs(visited, flowsheet, current_node, sfiles_part, nr_pre_visited, ranks, nodes_position_setoffs,
nodes_position_setoffs_cycle, special_edges, edge_information, first_traversal, sfiles, node_insertion,
canonical=True):
"""Depth first search implementation to traverse the directed graph from the virtual node.
Parameters
----------
visited: set
Keeps track of visited nodes.
flowsheet: networkx graph
Process flowsheet as networkx graph.
current_node: str
Current node in depth first search.
edge_information: dict
Stores information about edge tags.
sfiles_part: list [str]
SFILES representation of a single traversal of the flowsheet.
nr_pre_visited: int
Counter variable for cycles.
ranks: dict
Ranks of nodes required for branching decisions.
nodes_position_setoffs: dict
Counts the occurrences of outgoing and incoming cycles per node.
nodes_position_setoffs_cycle: dict
Counts the occurrences only of outgoing cycles per node.
special_edges: dict
Saves, whether an edge (in, out) is a cycle (number>1) or not (number=0).
first_traversal: bool
Saves, whether the graph traversal is the first (True) or a further traversal (False).
sfiles: list [str]
SFILES representation of the flowsheet (parsed).
node_insertion: str
Node of previous traversal(s) where branch (first) ends, default is an empty string.
canonical: bool, default=True
Whether the resulting SFILES should be canonical (True) or not (False).
Returns
-------
sfiles: list
SFILES representation of the flowsheet (parsed).
sfiles_part: list
SFILES representation of the flowsheet of a single traversal.
node_insertion: list
Node of previous traversal(s) where branch (first) ends.
nr_pre_visited: int
Counter variable for cycles.
"""
if current_node == 'virtual':
visited.add(current_node)
# Traversal order according to ranking of nodes.
neighbours = sort_by_rank(flowsheet[current_node], ranks, visited, canonical=True)
for neighbour in neighbours:
# Reset sfiles_part for every new traversal starting from 'virtual', since new traversal is started.
sfiles_part = []
sfiles_part, nr_pre_visited, node_insertion, sfiles = dfs(visited, flowsheet, neighbour, sfiles_part,
nr_pre_visited, ranks, nodes_position_setoffs,
nodes_position_setoffs_cycle, special_edges,
edge_information, first_traversal, sfiles,
node_insertion='', canonical=canonical)
# First traversal: sfiles_part is equal to sfiles.
# Further traversals: traversals, which are connected to the first traversal are inserted with '<&|...&|'
# and independent subgraphs are inserted with 'n|'.
if first_traversal:
sfiles.extend(sfiles_part)
first_traversal = False
else:
if not node_insertion == '':
sfiles_part.append('|')
sfiles_part.insert(0, '<&|')
pos = position_finder(nodes_position_setoffs, node_insertion, sfiles, nodes_position_setoffs_cycle,
cycle=False)
# Insert the branch next to node_insertion.
insert_element(sfiles, pos, sfiles_part)
else:
sfiles.append('n|')
sfiles.extend(sfiles_part)
# After last traversal, insert signal connections with recycle notation.
if neighbour == neighbours[-1]:
sfiles = insert_signal_connections(edge_information, sfiles, nodes_position_setoffs_cycle,
nodes_position_setoffs, special_edges)
if current_node not in visited and not current_node == 'virtual':
successors = list(flowsheet.successors(current_node))
# New branching if current_node has more than one successor.
if len(successors) > 1:
sfiles_part.append('(' + current_node + ')')
visited.add(current_node)
# Branching decision according to ranking of nodes.
neighbours = sort_by_rank(flowsheet[current_node], ranks, visited, canonical)
for neighbour in neighbours:
if not neighbour == neighbours[-1]:
sfiles_part.append('[')
if neighbour not in visited:
sfiles_part, nr_pre_visited, node_insertion, sfiles = dfs(visited, flowsheet, neighbour,
sfiles_part, nr_pre_visited,
ranks, nodes_position_setoffs,
nodes_position_setoffs_cycle,
special_edges, edge_information,
first_traversal, sfiles, node_insertion,
canonical=canonical)
if not neighbour == neighbours[-1]:
sfiles_part.append(']')
# If neighbor is already visited, that's a direct cycle. Thus, the branch brackets can be removed.
elif first_traversal:
if sfiles_part[-1] == '[':
sfiles_part.pop()
# A material cycle is represented using the recycle notation with '<#' and '#'.
nr_pre_visited, special_edges, sfiles_part, sfiles = insert_cycle(nr_pre_visited, sfiles_part,
sfiles, special_edges,
nodes_position_setoffs,
nodes_position_setoffs_cycle,
neighbour, current_node,
inverse_special_edge=False)
elif not first_traversal: # Neighbour node in previous traversal.
if sfiles_part[-1] == '[':
sfiles_part.pop()
# Only insert sfiles once. If there are multiple backloops to previous traversal,
# treat them as cycles. Insert a & sign where branch connects to node of previous traversal.
if node_insertion == '' and '(' + neighbour + ')' not in flatten(sfiles_part):
node_insertion = neighbour
pos = position_finder(nodes_position_setoffs, current_node, sfiles_part,
nodes_position_setoffs_cycle, cycle=True)
insert_element(sfiles_part, pos, '&')
# Additional info: edge is a new incoming branch edge in SFILES.
special_edges[(current_node, neighbour)] = '&'
else:
nr_pre_visited, special_edges, sfiles_part, sfiles = insert_cycle(nr_pre_visited, sfiles_part,
sfiles, special_edges,
nodes_position_setoffs,
nodes_position_setoffs_cycle,
neighbour, current_node,
inverse_special_edge=False)
# Node has only one successor, thus no branching.
elif len(successors) == 1:
sfiles_part.append('(' + current_node + ')')
visited.add(current_node)
sfiles_part, nr_pre_visited, node_insertion, sfiles = dfs(visited, flowsheet, successors[0], sfiles_part,
nr_pre_visited, ranks, nodes_position_setoffs,
nodes_position_setoffs_cycle, special_edges,
edge_information, first_traversal, sfiles,
node_insertion, canonical=canonical)
# Dead end.
elif len(successors) == 0:
visited.add(current_node)
sfiles_part.append('(' + current_node + ')')
# Nodes of previous traversal, this elif case is visited when there is no branching but node of previous traversal.
elif not current_node == 'virtual':
# Incoming branches are inserted at mixing point in SFILES surrounded by '<&|...&|'.
# Only insert sfiles once. If there are multiple backloops to previous traversal, treat them as cycles.
if node_insertion == '' and '(' + current_node + ')' in flatten(sfiles) and not first_traversal:
# Insert a & sign where branch connects to node of previous traversal.
node_insertion = current_node
last_node = last_node_finder(sfiles_part)
pos = position_finder(nodes_position_setoffs, last_node, sfiles_part, nodes_position_setoffs_cycle,
cycle=True)
insert_element(sfiles_part, pos, '&')
# Additional info: edge is a new incoming branch edge in SFILES.
special_edges[(last_node, current_node)] = '&'
else: # Incoming branches are referenced with the recycle notation, if there already is a node_insertion.
nr_pre_visited, special_edges, sfiles_part, sfiles = insert_cycle(nr_pre_visited, sfiles_part, sfiles,
special_edges, nodes_position_setoffs,
nodes_position_setoffs_cycle,
current_node, node2='last_node',
inverse_special_edge=False)
return sfiles_part, nr_pre_visited, node_insertion, sfiles
def insert_cycle(nr_pre_visited, sfiles_part, sfiles, special_edges, nodes_position_setoffs,
nodes_position_setoffs_cycle, node1, node2, inverse_special_edge, signal=False):
"""Inserts the cycle numbering of material recycles and signal connections according to the recycle notation.
Parameters
----------
nr_pre_visited: int
Counter variable for cycles.
sfiles_part: list [str]
SFILES representation of a single traversal of the flowsheet.
sfiles: list [str]
SFILES representation of the flowsheet (parsed).
special_edges: dict
Saves, whether an edge (in, out) is a cycle (number>1) or not (number=0).
nodes_position_setoffs: dict
Counts the occurrences of outgoing and incoming cycles per node.
nodes_position_setoffs_cycle: dict
Counts the occurrences only of outgoing cycles per node.
node1: str
Node name of connection to incoming cycle.
node2: str
Node name of connection to outgoing cycle.
inverse_special_edge: bool
Inverts the entry in special_edges.
signal: bool, default=False
If true signal connection notation ('<_#' and '_#')is used.
Returns
----------
nr_pre_visited: int
Counter variable for cycles.
special_edges: dict
Saves, whether an edge (in, out) is a cycle (number>1) or not (number=0).
sfiles_part: list [str]
SFILES representation of a single traversal of the flowsheet.
sfiles: list [str]
SFILES representation of the flowsheet (parsed).
"""
# Check if incoming cycle is connected to node of current traversal or previous traversal.
if '(' + node1 + ')' not in flatten(sfiles_part):
pos1 = position_finder(nodes_position_setoffs, node1, sfiles, nodes_position_setoffs_cycle, cycle=False)
nr_pre_visited += 1
insert_element(sfiles, pos1, '<' + ('_' if signal else '') + str(nr_pre_visited))
else:
pos1 = position_finder(nodes_position_setoffs, node1, sfiles_part, nodes_position_setoffs_cycle, cycle=False)
nr_pre_visited += 1
insert_element(sfiles_part, pos1, '<' + ('_' if signal else '') + str(nr_pre_visited))
if node2 == 'last_node':
node2 = last_node_finder(sfiles_part)
pos2 = position_finder(nodes_position_setoffs, node2, sfiles_part, nodes_position_setoffs_cycle, cycle=True)
# According to SMILES notation, for two digit cycles a % sign is put before the number (not required for signals).
if nr_pre_visited > 9 and not signal:
insert_element(sfiles_part, pos2, '%' + str(nr_pre_visited))
else:
insert_element(sfiles_part, pos2, ('_' if signal else '') + str(nr_pre_visited))
# Additional info: edge is marked as a cycle edge in SFILES.
if inverse_special_edge:
special_edges[(node1, node2)] = ('%' if nr_pre_visited > 9 else '') + str(nr_pre_visited)
else:
special_edges[(node2, node1)] = ('%' if nr_pre_visited > 9 else '') + str(nr_pre_visited)
return nr_pre_visited, special_edges, sfiles_part, sfiles
def SFILES_v2(sfiles, special_edges, edge_information, remove_hex_tags=False):
"""Method to construct the SFILES 2.0: Additional information in edge attributes regarding connectivity
(Top or bottom in distillation, absorption, or extraction columns, signal connections)
Parameters
----------
sfiles: list [str]
SFILES representation of the flowsheet (parsed).
special_edges: dict
Contains edge and cycle number>0 -> different notation of tags.
edge_information: dict
Stores information about edge tags.
remove_hex_tags: bool
Whether to show the 'he' tags in the SFILES_v2
(Conversion back and merging of hex nodes is not possible if this is set to true).
Returns
-------
sfiles_v2: list [str]
SFILES representation (2.0) of the flowsheet (parsed).
"""
sfiles_v2 = sfiles.copy()
if remove_hex_tags: # Only save the column related tags.
edge_information = {k: {'col': v['col']} for k, v in edge_information.items() if 'col' in v.keys()}
edge_information = {k: flatten(v.values()) for k, v in edge_information.items()} # Merge he and col tags.
edge_information = {k: v for k, v in edge_information.items() if v} # Filter out empty tags lists.
if edge_information:
# First assign edge attributes to nodes.
for e, at in edge_information.items():
# e: edge-tuple (in_node name, out_node name); at: attribute
if type(at) == str:
at = [at]
in_node = e[0]
out_node = e[1]
if e in special_edges:
edge_type = str(special_edges[e])
else:
edge_type = 'normal'
tags = '{' + '}{'.join(at) + '}' # Every single tag of that stream inserted in own braces.
# Search position where to insert tag.
if edge_type == 'normal':
for s_idx, s in enumerate(sfiles_v2):
if s == '(' + out_node + ')':
sfiles_v2.insert(s_idx, tags)
break
# Search the right & sign.
elif edge_type == '&':
search_and = False
for s_idx, s in enumerate(sfiles_v2):
if s == '(' + in_node + ')':
search_and = True
counter = 0
if search_and:
if s == '&' and counter == 0: # No second branch within branch with <&| notation.
sfiles_v2.insert(s_idx, tags)
break
if s == '&' and counter > 0:
counter -= 1
if s == '<&|':
counter += 1
else: # Edge_type > 0 recycle edge, so we search for the corresponding recycle number.
for s_idx, s in enumerate(sfiles_v2):
if s == edge_type:
sfiles_v2.insert(s_idx, tags)
break
# Heat integration tags: Heat integration is noted with a mix between recycle and connectivity notation,
# e.g. (hex){1}...(hex){1}. Networkx node names indicate heat integration with slash, e.g. hex-1/1 and hex-1/2.
HI_eqs = [] # Heat integrated heat exchangers
for s_idx, s in enumerate(sfiles_v2):
if 'hex' in s and '/' in s:
heatexchanger = s.split(sep='/')[0][1:]
if heatexchanger not in HI_eqs:
HI_eqs.append(heatexchanger)
_HI_counter = 1
for heatexchanger in HI_eqs:
indices = [i for i, x in enumerate(sfiles_v2) if x.split(sep='/')[0][1:] == heatexchanger]
for i in indices:
previous = sfiles_v2[i]
sfiles_v2[i] = [previous, '{' + str(_HI_counter) + '}']
sfiles_v2 = flatten(sfiles_v2)
_HI_counter += 1
# Store information about control structure in stream tag.
for s_idx, s in enumerate(sfiles_v2):
if 'C' in s and '/' in s:
insert_element(sfiles_v2, [s_idx], '{' + str(s.split(sep='/')[1][:-1]) + '}')
sfiles_v2[s_idx] = s.split(sep='/')[0] + ')'
return sfiles_v2
def generalize_SFILES(sfiles):
"""Method to construct the generalized SFILES 2.0: Unit numbers (necessary in graph node names) are removed.
Parameters
----------
sfiles: list [str]
SFILES representation of the flowsheet.
Returns
-------
sfiles_gen: list [str]
Generalized SFILES representation of the flowsheet.
"""
sfiles_gen = sfiles.copy()
for i, s in enumerate(sfiles_gen):
if bool(re.match(r'\(.*?\)', s)):
sfiles_gen[i] = s.split(sep='-')[0] + ')'
return sfiles_gen
def sort_by_rank(nodes_to_sort, ranks, visited=[], canonical=True):
"""Method to sort the nodes by their ranks.
Parameters
----------
nodes_to_sort: list [str]
List of nodes which will be sorted according to their rank.
ranks: dict
Node ranks calculated in calc_graph_invariant().
visited: set
List of already visited nodes.
canonical: bool, default=True
Whether the resulting SFILES should be canonical (True) or not (False).
Returns
-------
nodes_sorted: list [str]
Contains certain neighbour nodes in a sorted manner.
"""
nodes_sorted_dict = {}
nodes_sorted_dict_cycle = {}
for n in nodes_to_sort:
if n in ranks:
if n in visited:
nodes_sorted_dict_cycle[n] = ranks[n]
else:
nodes_sorted_dict[n] = ranks[n]
nodes_sorted_dict = dict(sorted(nodes_sorted_dict.items(), key=lambda item: item[1]))
nodes_sorted_dict_cycle = dict(sorted(nodes_sorted_dict_cycle.items(), key=lambda item: item[1]))
# Concatenate -> direct cycle nodes are visited first.
all_nodes_sorted = dict(nodes_sorted_dict_cycle, **nodes_sorted_dict)
# Only take the sorted keys as list.
nodes_sorted = list(all_nodes_sorted.keys())
if not canonical:
random.shuffle(nodes_sorted)
return nodes_sorted
def calc_graph_invariant(flowsheet):
"""Calculates the graph invariant, which ranks the nodes for branching decisions in graph traversal.
1. Morgan Algorithm based on: Zhang, T., Sahinidis, N. V., & Siirola, J. J. (2019).
Pattern recognition in chemical process flowsheets. AIChE Journal, 65(2), 592-603.
2. Equal ranks (e.g. two raw material nodes) are ranked by additional rules in function rank_by_dfs_tree.
Parameters
----------
flowsheet: networkx graph
Process flowsheet as networkx graph.
Returns
-------
Ranks: dict
Ranks of graph nodes.
"""
# First generate subgraphs (different mass trains in flowsheet).
_sgs = [flowsheet.subgraph(c).copy() for c in nx.weakly_connected_components(flowsheet)]
# Sort subgraphs, such that larger subgraphs are used first.
_sgs.sort(key=lambda x: -len(list(x.nodes)))
rank_offset = 0
all_unique_ranks = {}
for sg in _sgs:
# Morgan algorithm
# Elements of the adjacency matrix show whether nodes are connected in the graph (1) or not (0).
# Summing over the rows of the adjacency matrix results in the connectivity number of each node.
# The Morgan algorithm is performed via a matrix multiplication of the connectivity and the adjacency matrix.
# This equals a summing of the connectivity values of the neighbour nodes for each node in a for-loop.
undirected_graph = nx.to_undirected(sg)
adjacency_matrix = nx.to_numpy_array(undirected_graph, dtype=np.int64)
connectivity = sum(adjacency_matrix)
node_labels = list(sg)
unique_values_temp = 0
counter = 0
morgan_iter_dict = {}
morgan_iter = connectivity @ adjacency_matrix
# Morgan algorithm is stopped if the number of unique values is stable.
while counter < 5:
morgan_iter = morgan_iter @ adjacency_matrix
unique_values = np.unique(morgan_iter).size
if unique_values > unique_values_temp:
unique_values_temp = unique_values
morgan_iter_dict = dict(zip(node_labels, morgan_iter))
else:
counter += 1
# Assign ranks based on the connectivity values.
r = {key: rank for rank, key in enumerate(sorted(set(morgan_iter_dict.values())), 1)}
ranks = {k: r[v] for k, v in morgan_iter_dict.items()}
# Use rank as keys. Nodes with the same rank are appended to a list.
k_v_exchanged = {}
for key, value in ranks.items():
if value not in k_v_exchanged:
k_v_exchanged[value] = [key]
else:
k_v_exchanged[value].append(key)
# 1. We first sort (ascending) the dict and afterwards create a nested list.
k_v_exchanged_sorted = {k: k_v_exchanged[k] for k in sorted(k_v_exchanged)}
ranks_list = []
for key, value in k_v_exchanged_sorted.items():
ranks_list.append(value)
edge_information = nx.get_edge_attributes(flowsheet, 'tags')
edge_information_col = {k: flatten(v['col']) for k, v in edge_information.items() if 'col' in v.keys() if
v['col']}
# 2. We afterwards sort the nested lists (same rank). This is the tricky part of breaking the ties.
for pos, eq_ranked_nodes in enumerate(ranks_list):
# eq_ranked_nodes is a list itself. They are sorted, so the unique ranks depend on their names.
dfs_trees = []
# Sorting rules to achieve unique ranks are described in the SFILES documentation.
if len(eq_ranked_nodes) > 1:
for n in eq_ranked_nodes:
# Construct depth first search tree for each node.
dfs_tr = nx.dfs_tree(sg, source=n)
dfs_trees.append(dfs_tr)
# Edges of DFS tree are sorted alphabetically. The numbering of the nodes is removed first (since it
# should not change the generalized SFILES).
sorted_edges = []
for k in range(0, len(eq_ranked_nodes)):
edges = sorted(list(dfs_trees[k].edges), key=lambda element: (element[0], element[1]))
edges = [(k.split(sep='-')[0], v.split(sep='-')[0]) for k, v in edges]
sorted_edge = sorted(edges, key=lambda element: (element[0], element[1]))
sorted_edge = [i for sub in sorted_edge for i in sub]
edge_tags = []
for edge, tag in edge_information_col.items():
if edge[0] == eq_ranked_nodes[k] or edge[1] == eq_ranked_nodes[k]:
edge_tags.append(tag[0])
edge_tags = ''.join(sorted(edge_tags))
if edge_tags:
sorted_edge.insert(0, edge_tags)
sorted_edges.append(sorted_edge)
dfs_trees_generalized = {eq_ranked_nodes[i]: sorted_edges[i] for i in range(0, len(eq_ranked_nodes))}
# We sort the nodes by 4 criteria: Input/output/signal/other node, number of successors in dfs_tree,
# successors names (without numbering), node names with numbering.
sorted_eq_ranked_nodes = rank_by_dfs_tree(dfs_trees_generalized)
else:
sorted_eq_ranked_nodes = sorted(eq_ranked_nodes)
ranks_list[pos] = sorted_eq_ranked_nodes
# 3. We flatten the list and create the new ranks dictionary with unique ranks
# (form: node:rank) starting with rank 1.
flattened_ranks_list = flatten(ranks_list)
unique_ranks = {n: r + 1 + rank_offset for r, n in enumerate(flattened_ranks_list)}
# All unique ranks in separate dict.
all_unique_ranks.update(unique_ranks)
# Change rank offset in case there are subgraphs.
rank_offset += len(list(sg.nodes))
return all_unique_ranks
def position_finder(nodes_position_setoffs, node, sfiles, nodes_position_setoffs_cycle, cycle=False):
"""Returns position where to insert a certain new list element in sfiles list, adjusted by position setoffs.
Parameters
----------
nodes_position_setoffs: dict
Counts the occurrences of outgoing and incoming cycles per node.
node: str
Node name for which position is searched.
sfiles: list [str]
SFILES representation of the flowsheet.
nodes_position_setoffs_cycle: dict
Counts the occurrences only of outgoing cycles per node.
cycle: boolean, default=False
Whether the format is of form # (outgoing cycle)
Returns
----------
pos: int
Position where to insert new element.
"""
# If the node is not found, it is in a nested list: Function to find positions in nested list is utilized.
indices = find_nested_indices(sfiles, '(' + node + ')')
if cycle:
# This ensures that # are always listed before <#.
indices[-1] += nodes_position_setoffs_cycle[node]
# This updates the node position setoffs for cycles only.
nodes_position_setoffs_cycle[node] += 1
# This updates the overall node position setoffs.
nodes_position_setoffs[node] += 1
else:
indices[-1] += nodes_position_setoffs[node]
# This updates the overall node position setoffs.
nodes_position_setoffs[node] += 1
return indices
def last_node_finder(sfiles):
"""Returns the last node in the sfiles list.
Parameters
----------
sfiles: list [str]
SFILES representation of the flowsheet.
Returns
----------
last_node: str
Name of last node.
"""
last_node = ''
for element in reversed(sfiles):
if element.startswith('(') and element.endswith(')'):
last_node = element[1:-1]
break
return last_node
def flatten(nested_list):
"""Returns a flattened list.
Parameters
----------
nested_list: list
List of lists.
Returns
----------
l_flat: list
Flat list without nested lists.
"""
flat_list = []
for i in nested_list:
if isinstance(i, list):
flat_list.extend(flatten(i))
else:
flat_list.append(i)
return flat_list
def find_nested_indices(nested_list, node):
"""Returns index of node in nested list.
Parameters
----------
nested_list: list
List of lists.
node: str
Name of node.
Returns
----------
indices: list
Flat list without nested lists.
"""
temp_list = nested_list.copy()
indices = []
if node not in flatten(nested_list):
raise KeyError('Node not in nested list!')
while True:
try:
pos = temp_list.index(node)
indices.append(pos)
break
except:
for idx, i in enumerate(temp_list):
if node in flatten(i):
temp_list = i.copy()
indices.append(idx)
return indices
def insert_element(lst, indices, value):
if len(indices) == 1:
lst.insert(indices[0] + 1, value)
else:
insert_element(lst[indices[0]], indices[1:], value)
def rank_by_dfs_tree(dfs_trees_generalized):
"""Sorts the nodes with equal ranks (after application of morgan algorithm) according to the following criteria:
1. Ranks: Signal node < Output node < Input node < All other nodes
2.1. Input nodes: The higher the number of successors in dfs_tree the lower the rank. First build long SFILES parts.
(if 1. did not yield unique ranks)
2.2. Other nodes: The lower the number of successors in dfs_tree the lower the rank. Short branches in brackets.
(if 1. did not yield unique ranks)
3. Alphabetical comparison of successor names (if 1. & 2. did not yield unique ranks).
4. Unit operations of equally ranked nodes are the same. Considering node numbers of equally ranked nodes.
(if 1. & 2. & 3. did not yield unique ranks)
Note: Criteria 4 implies that the node numbering matters in SFILES construction.
Nevertheless, if we remove the numbers in SFILES (generalized SFILES), the SFILES will be independent of
numbering. This is based on criteria 3, which implies that all the successors are the same.
Parameters
----------
dfs_trees_generalized: dict
Equally ranked nodes with their respective dfs_trees (node names without unit numbers) in the flowsheet graph.
Returns
-------
sorted_nodes: list
List of sorted nodes with previously equal ranks.
"""
output_nodes = {}
input_nodes = {}
signal_nodes = {}
other_nodes = {}
for n, s in dfs_trees_generalized.items():
succ_str = ''.join(list(s))
if 'prod' in n:
output_nodes[n] = (len(dfs_trees_generalized[n]), succ_str)
elif 'raw' in n:
input_nodes[n] = (len(dfs_trees_generalized[n]), succ_str)
elif bool(re.match(r'C-\d+', n)):
signal_nodes[n] = (len(dfs_trees_generalized[n]), succ_str)
else:
other_nodes[n] = (len(dfs_trees_generalized[n]), succ_str)
# Sort all dicts first according list length (input/output: long is better, other nodes: short is better->
# less in brackets), then generalized string alphabetically, then real node name (i.e. node number).
# Real node name with numbering is only accessed if the generalized string (graph structure) is the same.
sorted_nodes = []
for d in [signal_nodes, output_nodes, input_nodes]:
# 3 sort criteria in that order list length (- sign), then generalized string alphabetically, then node number.
sorted_nodes_sub = sorted(d, key=lambda k: (-d[k][0], d[k][1], int(re.split('[-/]', k)[1])))
sorted_nodes.extend(sorted_nodes_sub) # Implies the order of first signal then output and input nodes.
# Other nodes: 3 sort criteria in that order list length (+ sign), then generalized string alphabetically,
# then node number
sorted_nodes_sub = sorted(other_nodes,
key=lambda k: (other_nodes[k][0], other_nodes[k][1], int(re.split('[-/]', k)[1])))
sorted_nodes.extend(sorted_nodes_sub) # Implies the order of first signal, then output, input, and other nodes.
return sorted_nodes
def insert_signal_connections(edge_infos_signal, sfiles, nodes_position_setoffs_cycle, nodes_position_setoffs,
special_edges):
"""Inserts signal connections in SFILES.
Parameters
----------
edge_infos_signal: dict
Contains information about signal edges.
sfiles: list [str]
SFILES representation of the flowsheet (parsed).
nodes_position_setoffs: dict
Counts the occurrences of outgoing and incoming cycles per node.
nodes_position_setoffs_cycle: dict
Counts the occurrences only of outgoing cycles per node.
special_edges: dict
Saves, whether an edge (in,out) is a cycle (number>1) or not (number=0).
Returns
----------
sfiles: list
SFILES list including signal connections.
"""
nr_pre_visited_signal = 0
signal_nodes = [k[0] for k in edge_infos_signal.keys()]
sfiles_flattened = flatten(sfiles)
pos = {}
if signal_nodes:
nodes_position_setoffs_temp = nodes_position_setoffs.copy()
nodes_position_setoffs_cycle_temp = nodes_position_setoffs_cycle.copy()
for k in signal_nodes:
pos.update({position_finder(nodes_position_setoffs, k, sfiles_flattened,
nodes_position_setoffs_cycle)[0]: k})
# Reset node_position_setoffs since they are manipulated by position_finder.
nodes_position_setoffs_cycle = nodes_position_setoffs_cycle_temp.copy()
nodes_position_setoffs = nodes_position_setoffs_temp.copy()
# TODO: Check if this works!
#nodes_position_setoffs_cycle = nodes_position_setoffs_cycle.fromkeys(nodes_position_setoffs_cycle, 0)
#nodes_position_setoffs = nodes_position_setoffs_cycle.fromkeys(nodes_position_setoffs, 0)
for k, v in special_edges.items():
if v == '&':
nodes_position_setoffs[k[1]] = 0
# Sort the signal nodes according to their position in the SFILES.
signal_nodes_sorted = dict(sorted(pos.items()))
signal_nodes_sorted = list(signal_nodes_sorted.values())
edge_infos_signal = dict(sorted(edge_infos_signal.items(), key=lambda x: signal_nodes_sorted.index(x[0][0])))
for k, v in edge_infos_signal:
nr_pre_visited_signal, special_edges, sfiles_part, sfiles = insert_cycle(nr_pre_visited_signal, sfiles, sfiles,
special_edges, nodes_position_setoffs,
nodes_position_setoffs_cycle, v, k,
inverse_special_edge=False,
signal=True)
return sfiles
| true
|
7777a0c5b220d4aa3a7c2664f562c8890c3f1287
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2847/60724/237142.py
|
UTF-8
| 198
| 3
| 3
|
[] |
no_license
|
s=int(input())
numbers=input().split()
numbers=[int(x) for x in numbers]
rank=input().split()
rank=[int(y) for y in rank]
res=0
for k in range(rank[0]-1,rank[1]-1):
res=res+numbers[k]
print(res)
| true
|
598ea3382cef27b48e73ecb7985ffa521221b402
|
Python
|
irvalchev/3MW-Simple-App
|
/site_summary/views.py
|
UTF-8
| 1,594
| 2.578125
| 3
|
[] |
no_license
|
from django.shortcuts import render
from site_summary.models import Site, SiteEntry
def sites(request):
sites_list = Site.objects.all()
context = {'sites_list': sites_list}
return render(request, 'sites.html', context)
def site_details(request, site_id):
site = Site.objects.filter(id=site_id)
site_entries = None
if site:
site_entries = SiteEntry.objects.filter(site=site)
context = {'site': site, 'site_entries': site_entries}
return render(request, 'site_details.html', context)
def summary_sum(request):
aggregation = "sum"
all_entries = SiteEntry.objects.all()
summary_entries = []
for site in Site.objects.all():
summary_entry = SiteEntry()
summary_entry.site = site
summary_entry.a_value = sum(entry.a_value for entry in
all_entries if entry.site == site)
summary_entry.b_value = sum(entry.b_value for entry in
all_entries if entry.site == site)
summary_entries.append(summary_entry)
context = {'summary_entries': summary_entries, "aggregation": aggregation}
return render(request, 'summary.html', context)
def summary_average(request):
aggregation = "average"
summary_entries = SiteEntry.objects.raw("""
SELECT 0 as id, site_id, null as date, avg(a_value) a_value,
avg(b_value) b_value
FROM site_entries
GROUP BY site_id""")
context = {'summary_entries': summary_entries, "aggregation": aggregation}
return render(request, 'summary.html', context)
| true
|
575ef4a34c23afaf7ded8c466559f5ca371a7799
|
Python
|
correosdelbosque/tsl
|
/utils/distances.py
|
UTF-8
| 17,477
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import json
import math
import matplotlib.pyplot as plt
import networkx as nx
import numpy
import os
import sys
# Read in movie JSON files.
movies_dir = "../example-scripts/parsed"
outdir = "/wintmp/movie/graph5/"
def get_movies( movies_dir ):
'''Returns a hash keyed on movie title whose body is the Python
data structure made up of the _metrics.json for this film in the
movies_dir.'''
movies = {}
for dirpath, dirnames, filenames in os.walk( movies_dir):
for directory in dirnames:
metrics_files = [ x for x in os.listdir( os.path.join( dirpath, directory ) ) if x.endswith( '_metrics.json' ) ]
if len( metrics_files ) == 0:
print "Skipping %s/%s" % ( dirpath, directory )
continue
metrics = json.load( open( os.path.join( dirpath, directory, metrics_files[0] ) ) )
movies[metrics['title']] = metrics
return movies
def default_dist( a, b ):
return abs( a-b )
def register_dist_funcs( dist_funcs ):
def log_dist( a, b ):
return abs( math.log( a ) - math.log( b ) )
dist_funcs[ dimensions[2] ] = log_dist
dist_funcs[ dimensions[7] ] = log_dist
def five_vect( a, b, lookup ):
result_dist = 0
for i in range( 0, 5 ):
a_val = None
if i >= len( a ):
a_val = 0
else:
if i == 0:
a_val = 3*a[i][lookup]
else:
a_val = a[i][lookup]
b_val = None
if i >= len( b ):
b_val = 0
else:
if i == 0:
b_val = 3*b[i][lookup]
else:
b_val = b[i][lookup]
result_dist += default_dist( a_val, b_val )**2
return result_dist**0.5
def character_x_speakers( a, b ):
return five_vect( a, b, 'speakers' )
dist_funcs[ dimensions[9] ] = character_x_speakers
def scenes_percentage_for_characters( a, b ):
return five_vect( a, b, 'percentage_of_scenes' )
dist_funcs[ dimensions[10] ] = scenes_percentage_for_characters
def percent_dialog_by_character( a, b ):
return five_vect( a, b, 'percent_dialog' )
dist_funcs[ dimensions[11] ] = percent_dialog_by_character
def dialog_words_score( a, b ):
return ( ( a[0] - b[0] )**2 + ( a[1] - b[1] )**2 )**0.5
dist_funcs[ dimensions[13] ] = dialog_words_score
'''
def mcic( a, b ):
return abs( a-b )
dist_funcs[ dimensions[0] ] = mcic
def poswmc( a, b ):
return 50*abs( a-b )
dist_funcs[ dimensions[1] ] = poswmc
'''
def cartesian_distance( dists ):
'''Takes in an array of distances between coordinates, and
aggregates them into a single distance function. Here we use
Cartesian distance.'''
total_dist = 0
for dist in dists:
total_dist += dist**2
return total_dist**0.5
def compute_distances( movies, dist_funcs, distance_func ):
'''Returns a hash of hash. The keys are every pair of movies, and
the value is distance between them.'''
distances = {}
for k1 in sorted( movies.keys() ):
for k2 in sorted( movies.keys() ):
m1 = movies[k1]
m2 = movies[k2]
dists = []
for dim in dimensions:
if dim in dist_funcs:
dists.append( dist_funcs[dim]( m1[dim], m2[dim] ) )
else:
dists.append( default_dist( m1[dim], m2[dim] ) )
distance = distance_func( dists )
if k1 in distances:
distances[k1][k2] = distance
else:
distances[k1] = { k2 : distance }
return distances
def eccentricity( distances ):
'''A hash of movie, eccentricity.'''
result = {}
denominator = len( distances.keys() )
for k1 in sorted( distances.keys() ):
numerator = 0
for k2, distance in distances[k1].items():
numerator += distance
result[k1] = numerator / denominator
return result
def density( distances ):
'''A hash of movie, density.'''
result = {}
for k1 in sorted( distances.keys() ):
numerator = 0
for k2, distance in distances[k1].items():
try:
numerator += 1 / math.e**( distance**2 )
except:
# If we have an overflow don't worry about it, just
# add nothing.
pass
result[k1] = numerator
return result
def compute_projection( distances, projection_func ):
return projection_func( distances )
def get_inverse_covering( projection, covering ):
'''Given a covering, which is defined as an array of tuples, the
elements a, b of which define the interval: [a, b], and a
projection data structure, return:
An array of hashes, the i'th element of which corresponds to the
inverse image of the things in the projection for the i'th tuple.
The format of these hashes is:
{ range: ( a, b ), movies: { 'Movie 1': True, 'Movie 2': True, ... } }'''
inverse_covering = []
for interval in covering:
start = interval[0]
end = interval[1]
current_inverse = { 'range' : interval, 'movies' : {} }
for movie, value in projection.items():
if start <= value and value <= end:
current_inverse['movies'][movie] = True
inverse_covering.append( current_inverse )
return inverse_covering
def get_clusters( movies_input, distances, epsilon ):
'''Given a hash of movie keys, the distances data structure, and
epsilon threshold distance, returns an array of hashes of movie
keys where each hash is a cluster is a subset of the input movies
containing the points which are within a transitive closure of
episolon of one another.'''
# Don't change the input value.
movies = {}
for movie in movies_input.keys():
movies[movie] = True
clusters = []
#import pdb
#pdb.set_trace()
while len( movies ):
current_cluster = {}
cluster_changed = True
while cluster_changed:
cluster_changed = False
movie_keys = movies.keys()
for movie in movie_keys:
if len( current_cluster ) == 0:
cluster_changed = True
current_cluster[movie] = True
del movies[movie]
else:
for cluster_movie in current_cluster.keys():
if distances[cluster_movie][movie] <= epsilon:
cluster_changed = True
current_cluster[movie] = True
if movie in movies:
del movies[movie]
#for movie in movie_keys:
# if len( current_cluster ) == 0:
# current_cluster[movie] = True
# del movies[movie]
# else:
# for cluster_movie in current_cluster.keys():
# if distances[cluster_movie][movie] <= epsilon:
# current_cluster[movie] = True
# if movie in movies:
# del movies[movie]
clusters.append( current_cluster )
return clusters
def cluster_epsilon_finder( movies, distances ):
'''Calculates epsilon via the following algorithm:
1. Clusters are defined to be a non-empty set of points, initially
we have one cluster per point.
2. Distance between clusters is defined to be the minimum distance
between any two points in either cluster.
3. We iteratively aggregate the two clusters having the minimum
distance until there is only one cluster, while recording the
distances involved.
4. We select the median distance recorded in step 3.
'''
# Handle pathological cases
if not len( movies ):
raise Exception("Expected at least one movie in cluster_epsilon_finder.")
elif len( movies ) == 1:
return [0]
cluster_distances = []
# Create the initial cluster.
min_i = None
min_j = None
min_dist = None
for i in movies.keys():
for j in movies.keys():
if i == j:
continue
else:
if min_dist is None or distances[i][j] < min_dist:
min_dist = distances[i][j]
min_i = i
min_j = j
clusters = [ { min_i : True, min_j : True } ]
cluster_distances.append( distances[min_i][min_j] )
for movie in movies.keys():
if movie != min_i and movie != min_j:
clusters.append( { movie : True } )
# Process the rest of the points.
while len( clusters ) > 1:
min_dist = None
min_i = None
min_j = None
for i_idx, cluster_i in enumerate( clusters ):
for j_idx, cluster_j in enumerate( clusters ):
if i_idx == j_idx:
continue
else:
for i in cluster_i.keys():
for j in cluster_j.keys():
if min_dist is None or distances[i][j] < min_dist:
min_dist = distances[i][j]
min_i_idx = i_idx
min_j_idx = j_idx
min_cluster_i = cluster_i
min_cluster_j = cluster_j
min_i = i
min_j = j
# There are a few cases:
#
# 1. min_cluster_i and j are in singleton clusters := make a
# new cluster of the two of them.
#
# 2. min_cluster_i or j is a singleton, but the other is not
# := add the singleton to the larger cluster.
#
# 3. Neither min_cluster_i or j is a singleton := merge the
# two.
cluster_distances.append( min_dist )
if len( min_cluster_i.keys() ) == 1 and len( min_cluster_j.keys() ) == 1:
min_cluster_i[min_j] = True
clusters = clusters[:min_j_idx] + clusters[min_j_idx+1:]
elif len( min_cluster_i.keys() ) == 1 and len( min_cluster_j.keys() ) > 1:
min_cluster_j[min_i] = True
clusters = clusters[:min_i_idx] + clusters[min_i_idx+1:]
elif len( min_cluster_i.keys() ) > 1 and len( min_cluster_j.keys() ) == 1:
min_cluster_i[min_j] = True
clusters = clusters[:min_j_idx] + clusters[min_j_idx+1:]
else:
for j_point in min_cluster_j.keys():
min_cluster_i[j_point] = True
clusters = clusters[:min_j_idx] + clusters[min_j_idx+1:]
return cluster_distances
movies = get_movies( movies_dir )
# Dimensions
#
# Don't change the order of things here unless you also change the
# dist_funcs key lookups in register_dist_funcs
#dimensions = [ 'main_character_interlocutor_count', 'percentage_of_scenes_with_main_character' ]
dimensions = [
'named_characters',
'distinct_locations',
'location_changes',
'percent_dialog',
'distinct_words',
'dramatic_units',
'adj-adv_noun-verb_ratio',
'supporting_characters',
'hearing',
'character_x_speakers',
'scenes_percentage_for_characters',
'percent_dialog_by_character',
'scene_dialog_score',
'dialog_words_score'
]
dist_funcs = {}
register_dist_funcs( dist_funcs )
import pprint
pp = pprint.PrettyPrinter( indent=4 )
# We could in principle have difference means of calculating our
# distance.
distance_func = cartesian_distance
distances = compute_distances( movies, dist_funcs, distance_func )
print "Distances:"
pp.pprint( distances )
projection_func = eccentricity
#projection_func = density
projection = compute_projection( distances, projection_func )
print "Eccentricities:"
pp.pprint( projection )
def make_covering( low, high, width, overlap ):
step = float( width ) / overlap
current = low
covering = []
while current < high:
covering.append( ( current, current + width ) )
current += step
return covering
def output_d3( filename, vertices, edges, header ):
f = open( outdir+filename+".json", 'w' )
json.dump( { "nodes" : vertices, "links" : edges }, f )
f.close()
f = open( outdir+"graphs.html", 'a' )
html_body = '''
<p>
%s
</p>
<script>
script_graph( "%s", width=768, height=432 );
</script>
''' % ( header, filename+".json" )
f.write( html_body )
f.close()
def make_graph( low, high, width, overlap, epsilon ):
covering = make_covering( low, high, width, overlap )
print "Covering is:", covering
inverse_covering = get_inverse_covering( projection, covering )
# Array of { "name":"Foo","group":cluster_idx }
vertices = []
# Array of { "source":idx of thing in vertices, "target":idx of thing in vertices", value:1 }
edges = []
graph = nx.Graph()
label_to_vertex = {}
for p_idx, partition in enumerate( inverse_covering ):
partition_clusters = get_clusters( partition['movies'], distances, epsilon )
print "Range from %s to %s had %s movies, which formed the following clusters:" % ( partition['range'][0], partition['range'][1], len( partition['movies'].keys() ) )
for idx, cluster in enumerate( partition_clusters ):
print "\tCluster %s" % idx
label = 'Cover %s: ' % ( p_idx ) + ', '.join( sorted( cluster.keys() ) )
#graph.add_node( label )
#vertices.append( { "name" : label, "group" : p_idx } )
#label_to_vertex[label] = len( vertices ) - 1
#import pdb
#pdb.set_trace()
add_to_graph = True
for node, data in graph.nodes( data=True ):
same_as_existing = True
for movie in cluster.keys():
if movie not in data:
same_as_existing = False
for movie in data.keys():
if movie not in cluster:
same_as_existing = False
if same_as_existing:
add_to_graph = False
print "Skipping cluster: %s as identical to %s" % ( label, node )
break
if add_to_graph:
graph.add_node( label )
vertices.append( { "name" : label, "group" : p_idx, "elements" : len( cluster.keys() ), "shading" : float( partition['range'][0] ) / high } )
label_to_vertex[label] = len( vertices ) - 1
for movie in sorted( cluster.keys() ):
if add_to_graph:
graph.node[label][movie] = True
print "\t\t%s" % movie
for node, data in graph.nodes( data=True ):
if movie in data and node != label and add_to_graph:
graph.add_edge( node, label )
edges.append( { "source" : label_to_vertex[node], "target" : label_to_vertex[label], "value" : 1 } )
#nx.write_dot( graph, 'file.dot' )
#positions = nx.graphviz_layout( graph, prog='neato' )
#positions = nx.spring_layout( graph )
#nx.draw( graph, pos=positions )
#nx.draw_random( graph )
#plt.show()
#nx.draw_circle( graph )
'''
positions = nx.spring_layout( graph, scale=1024 )
plt.figure( 1, figsize=(16,16) )
nx.draw( graph, positions, font_size=8 )
plt.show()
#plt.figure( num=None, figsize=( 8, 8 ), facecolor='w', edgecolor='k' )
#plt.savefig( "8x8_cover_width_%s_overlap_%s_epsilon_%0.02f.png" % ( width, overlap, epsilon ) )
#plt.figure( num=None, figsize=( 16, 16 ) )
#plt.savefig( "16x16_cover_width_%s_overlap_%s_epsilon_%0.02f.png" % ( width, overlap, epsilon ) )
'''
#import pdb
#pdb.set_trace()
plt.clf()
positions = nx.spring_layout( graph, k=.1, iterations=100 )
plt.figure( figsize=(16,9) )
nx.draw( graph, pos=positions )
filename = "cover_width_%s_overlap_%s_epsilon_%0.02f" % ( width, overlap, epsilon )
plt.savefig( outdir+"%s.png" % ( filename ) )
output_d3( filename, vertices, edges, "Cover width: %s, Overlap: %s, Epsilon: %0.02f" % ( width, overlap, epsilon ) )
epsilon_candidates = cluster_epsilon_finder( movies, distances )
print "Cluster epsilon candidates", epsilon_candidates
epsilon = numpy.median( epsilon_candidates )*1.01
#epsilon = 10
print "Epsilon selected as: (multiplied by 1.01 to handle rounding errors)", epsilon
f = open( outdir+"graphs.html", 'w' )
html_front = '''
<!DOCTYPE html>
<meta charset="utf-8">
<style>
.node {
stroke: #fff;
stroke-width: 1.5px;
}
.link {
stroke: #999;
stroke-opacity: .6;
}
</style>
<body>
<script src="http://d3js.org/d3.v3.min.js"></script>
<script src="graph.js"></script>
'''
f.write( html_front )
f.close()
#epsilon = 10
for width in [65536, 32768, 16384, 8192, 4096, 2048, 1024, 512, 256, 128]:
#for width in [ 128, 64, 32, 16, 8, 4, 2, 1, .5, .25 ]:
# make_graph( 0, 74, width, 2, epsilon )
make_graph( 13000, 33950, width, 4, epsilon )
f = open( outdir+"graphs.html", 'a' )
html_back = '''
</body>
'''
f.write( html_back )
f.close()
| true
|
fc91d4a9f06a02e3cd50fe9df396376898dd4bdb
|
Python
|
ricard0ff/stuffy
|
/random/compare and sum.py
|
UTF-8
| 634
| 3.84375
| 4
|
[] |
no_license
|
alist1 = [1,4,5,6]
alist2 = [1,10,3,4,5,6]
def get_sum(alist,number_sum):
item = set()
alist.sort()
for index, valueouter in enumerate(alist):
try:
item.add(valueouter)
if (number_sum - valueouter) in item:
#then we have an item that will get us to the sum
print ("we have found the number")
print ("the number is %d that can be used with %d" %(valueouter,(number_sum - valueouter)))
except IndexError:
print ("oops out of the range")
def main():
get_sum(alist2, 6)
if __name__ == "__main__":
main()
| true
|
b5130fca37f06040a0061eb7989f426c1324bd25
|
Python
|
nimbis/cmsplugin-tabs
|
/cmsplugin_tabs/tests.py
|
UTF-8
| 411
| 2.546875
| 3
|
[] |
no_license
|
from django.test import TestCase
from models import Tab, TabHeader
class TabsTest(TestCase):
"""
Simple CRUD test for cmsplugin-tabs
"""
def setUp(self):
self.tab = Tab()
self.tab.title = "Test Tab"
self.header = TabHeader()
def test_plugin(self):
self.assertEquals(unicode(self.header), "0 tabs")
self.assertEquals(unicode(self.tab), "Test Tab")
| true
|
511cb208914563a65538d1bb00da1d6d4b297901
|
Python
|
windorchidwarm/py_test_project
|
/hugh/cyan/test/test_fun.py
|
UTF-8
| 406
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : test_fun.py
# Author: chen
# Date : 2020-04-27
def adder(x):
def wrapper(y):
return x + y
return wrapper
adder5 = adder(5)
print(adder5(adder5(6)))
def f(): pass
print(type(f()))
s = "I love Python"
ls = s.split()
ls.reverse()
print(ls)
mytuple=3,4,5
print(mytuple)
x,y,z=mytuple
print(x+y+z)
print((10) // (-3))
print(2 ** 2.4)
| true
|
03300abbd0cc6571b4180aab1a83cca8eece40d7
|
Python
|
nextdesusu/Learn-Python
|
/SICP/examples/ex1_27.py
|
UTF-8
| 2,078
| 3.296875
| 3
|
[] |
no_license
|
from random import randrange
def fast_prime(n, times):
even = lambda x: x % 2 == 0
remainder = lambda x, y: x % y
square = lambda x: x * x
random = randrange(1, n)
test_it = lambda func, a, n: func(a, n, n) == a
def check(n, start):
if start < n:
if test_it(expmod, start, n):
check
def fermat_test(n):
try_it = lambda func, a: func(a, n, n) == a
return try_it(expmod, random)
def expmod(base, exp, m):
if exp == 0:
return 1
if even(exp):
return remainder(square(expmod(base, exp / 2, m)), m)
else:
return remainder(base * (expmod(base, exp - 1, m)), m)
if times == 0:
return True
if fermat_test(n):
checker(num, random)
return fast_prime(n, times - 1)
else:
return False
def tester(n):
even = lambda x: x % 2 == 0
remainder = lambda x, y: x % y
square = lambda x: x * x
random = randrange(1, n)
test_it = lambda func, a, n: func(a, n, n) == a
def expmod(base, exp, m):
if exp == 0:
return 1
if even(exp):
return remainder(square(expmod(base, exp / 2, m)), m)
else:
return remainder(base * (expmod(base, exp - 1, m)), m)
def test(a, n):
return expmod(a, n, n) == a
def test_all_from_start(n, start):
if start < n:
if test(start, n):
return test_all_from_start(n, start+1)
return False
return True
return test_all_from_start(n, 1)
Karmikle_numbers = [561, 1105, 1729, 2465, 2821, 6601]
Simple_numbers = [3, 5, 7, 11, 13, 17]
print("*** Karmikle_numbers ***")
for num in Karmikle_numbers:
print(tester(num))
print("*** Karmikle_numbers ***")
print("*** Simple_numbers ***")
for num in Simple_numbers:
print(tester(num))
print("*** Simple_numbers ***")
| true
|