blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
51cc72a9586eb090d9def75dbeb4258b5f8dae7b | Python | Termoplane/Python__Course | /lambda_mod_checker.py | UTF-8 | 119 | 3.359375 | 3 | [] | no_license | def mod_checker(x, mod = 0):
return lambda y : y % x == mod
mod_3 = mod_checker(3)
print(mod_3(5))
print(mod_3(3)) | true |
a4a2e82282922f93c0ba02e4d2d482b239ed3c1b | Python | sjraaijmakers/otolith | /prepare/prepare_stacks.py | UTF-8 | 658 | 2.6875 | 3 | [] | no_license | # Bulk prepare stacks
import os
import sys
import prepare_stack
def has_subdirectories(dir):
for f in os.scandir(s):
if f.is_dir():
return True
return False
if __name__ == "__main__":
args = sys.argv[1:]
input_folder = args[0]
output_folder = args[1]
subs = [x[0] for x in os.walk(input_folder)]
subs = subs[1:]
for s in subs:
if has_subdirectories(s):
continue
name = s.replace(input_folder, "")
print("Preparing %s" % name)
basename = os.path.basename(s)
out = (output_folder + name).replace(basename, "")
prepare_stack.run(s, out)
| true |
ff59d2469cf2bd83adf5b15de94cfbbf46280c1f | Python | srishtishukla-20/function | /Q4(Prime).py | UTF-8 | 593 | 3.484375 | 3 | [] | no_license | def prime_num(n):
i=1
counter=0
while i<=n:
if n%i==0:
counter+=1
i+=1
if counter==2:
print("prime number")
else:
print("not prime number")
n=int(input("enter the num"))
prime_num(n)
#prime num
def prime(num):
i=2
x=0
while i>0:
j=1
count=0
while j<i:
if i%j==0:
count+=1
j+=1
if count==1:
print(i,"prime no")
x+=1
if x==num:
break
i+=1
prime(num=int(input("enter any num=")))
#another method
| true |
a2e48e281ace597f272d71506979008b27d20bdf | Python | chizhdiana/Repository | /my_test/test/Redis_bit.py | UTF-8 | 942 | 2.921875 | 3 | [] | no_license | import redis
import time
conn = redis.Redis()
now = time.time()
print(now)
# БИТЫ
days = ['2013-02-25', '2013-02-26', '2013-02-27'] # лист с датами
# ID пользователей
big_spender = 1089
tire_kicker = 4045
late_joiner = 550212
# установим бит на конкретную дату с одним посещением пользователя
print(conn.setbit(days[0], big_spender, 1))
print(conn.setbit(days[0], tire_kicker, 1))
print(conn.setbit(days[1], big_spender, 1))
print(conn.setbit(days[2], big_spender, 1))
print(conn.setbit(days[2], late_joiner, 1))
# счетчик ежедневных посещений за эти три дня:
#for day in days:
#conn.bitcount(day)
print(conn.getbit(days[1], tire_kicker))
# Сколько пользователей посещает сайт каждый день?
# conn.bitop('and', 'everyday', *days)
print(conn.getbit('everyday', big_spender))
| true |
2da9f60f21acff5c59ff46f62fe2746815b12e1c | Python | Cedric-Chan/Script_of_Data_Analysis | /数据分析与机器学习/数据分析实战/图&社交网络/识别欺诈的罪魁祸首.py | UTF-8 | 3,645 | 3 | 3 | [] | no_license | import networkx as nx
import numpy as np
import collections as c
graph_file = 'desktop/fraud.gz'
fraud = nx.read_graphml(graph_file)
print('\nType of the graph: ', type(fraud)) # 显示图的类型(有并行边的有向图)
# 节点和边
nodes = fraud.nodes() # 调出所有节点
nodes_population = [n for n in nodes if 'p_' in n] # 买家节点的前缀是p_
nodes_merchants = [n for n in nodes if 'm_' in n] # 卖家节点的前缀是m_
n_population = len(nodes_population)
n_merchants = len(nodes_merchants)
print('\nTotal population: {0}, number of merchants: {1}'.format(n_population, n_merchants)) # 显示节点列表长度
# 交易数目
n_transactions = fraud.number_of_edges()
print('Total number of transactions: {0}'.format(n_transactions)) # 显示交易总数(边数)
# what do we know about a transaction
p_1_transactions = fraud.out_edges('p_1', data=True) # out_edges()获取p_1的全部交易
print('\nMetadata for a transaction: ', list(p_1_transactions))
print('Total value of all transactions: {0}'.format(np.sum([t[2]['amount'] for t in fraud.edges(data=True)]))) # 显示交易总金额
# 辨别信用卡泄露的消费者
all_disputed_transactions = [dt for dt in fraud.edges(data=True) if dt[2]['disputed']]
print('Total number of disputed transactions: {0}'.format(len(all_disputed_transactions))) # 欺诈交易的数量
print('Total value of disputed transactions: {0}'.format(np.sum([dt[2]['amount'] for dt in all_disputed_transactions]))) # 欺诈涉及金额
# 受害者列表
people_scammed = list(set([p[0] for p in all_disputed_transactions])) # set()生成一个去重的列表
print('Total number of people scammed: {0}'.format(len(people_scammed))) # 受害者人数
# 所有异常交易列表
print('All disputed transactions:')
for dt in sorted(all_disputed_transactions, key=lambda e: e[0]):
print('({0}, {1}: {{time:{2}, amount:{3}}})'.format(dt[0], dt[1], dt[2]['amount'], dt[2]['amount']))
# 每个人的损失
transactions = c.defaultdict(list) # .defaultdict()类似字典
for p in all_disputed_transactions:
transactions[p[0]].append(p[2]['amount'])
for p in sorted(transactions.items(), key=lambda e: np.sum(e[1]), reverse=True): # 受害程度从大到小显示消费者列表
print('Value lost by {0}: \t{1}'.format(p[0], np.sum(p[1])))
# 辨别出信用卡泄露的消费者
people_scammed = c.defaultdict(list)
for (person, merchant, data) in fraud.edges(data=True):
if data['disputed']:
people_scammed[person].append(data['time'])
print('\nTotal number of people scammed: {0}'.format(len(people_scammed)))
# 每个受害者第一笔欺诈交易发生的时间
# scammed person
stolen_time = {}
for person in people_scammed:
stolen_time[person] = np.min(people_scammed[person]) # 找到受害者争议交易的最早时间
# 找出欺诈都涉及的卖家
merchants = c.defaultdict(list)
for person in people_scammed:
edges = fraud.out_edges(person, data=True)
for (person, merchant, data) in edges:
if stolen_time[person] - data['time'] <= 5 and stolen_time[person] - data['time'] >= 0: # >=0用于找出第一次欺诈交易之前的所有交易,<=1用于回溯共同卖家的天数
merchants[merchant].append(person)
merchants = [(merch, len(set(merchants[merch]))) for merch in merchants] # 选出去重后的卖家
print('\nTop 5 merchants where people made purchases')
print('shortly before their credit cards were stolen')
print(sorted(merchants, key=lambda e: e[1], reverse=True)[:5]) # 所有33个受害者在第一笔欺诈的前一天都在4号卖家消费过
| true |
a8795e9ac091974e528cb30cff2e38826a8bdda1 | Python | ShreyasKadiri/Machine_Learning | /corelation.py | UTF-8 | 416 | 2.921875 | 3 | [] | no_license | import pandas as pd
from sklearn.datasets import fetch_california_housing
# fetch a regression dataset
data = fetch_california_housing()
X = data["data"]
col_names = data["feature_names"]
y = data["target"]
# convert to pandas dataframe
df = pd.DataFrame(X, columns=col_names)
# introduce a highly correlated column
df.loc[:, "MedInc_Sqrt"] = df.MedInc.apply(np.sqrt)
# get correlation matrix (pearson)
df.corr()
| true |
6f5194d4b67492e6e95277d9fbff27c6b0acdf40 | Python | ankawm/NowyProjektSages | /type_str_lit_emo.py | UTF-8 | 899 | 3.859375 | 4 | [] | no_license | """
* Assignment: Str Literals Emoticon
* Required: yes
* Complexity: easy
* Lines of code: 2 lines
* Time: 3 min
English:
1. Define `name` with value `Mark Watney`
2. Print `Hello World EMOTICON`, where:
3. EMOTICON is Unicode Codepoint "\U0001F600"
4. Run doctests - all must succeed
Polish:
1. Zdefiniuj `name` z wartością `Mark Watney`
2. Wypisz `Hello World EMOTICON`
3. EMOTICON to Unicode Codepoint "\U0001F600"
4. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> assert result is not Ellipsis, \
'Assign result to variable: `result`'
>>> assert type(result) is str, \
'Variable `result` has invalid type, should be str'
>>> '\U0001F600' in result
True
>>> result
'Hello World 😀'
"""
EMOTICON = '\U0001F600'
# str: Hello World EMOTICON
result = f"Hello World {EMOTICON}" | true |
75322357dfa7a345795471be1b152fe9f3ae5c80 | Python | niteesh2268/coding-prepation | /leetcode/Problems/138--Copy-List-with-Random-Pointer-Medium.py | UTF-8 | 1,157 | 3.453125 | 3 | [] | no_license | """
# Definition for a Node.
class Node:
def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):
self.val = int(x)
self.next = next
self.random = random
"""
class Solution:
def copyRandomList(self, head: 'Node') -> 'Node':
if not head:
return None
nodeList, temp = [], head
tempList = []
while temp:
tempList.append(temp)
copy = Node(temp.val)
nodeList.append(copy)
temp = temp.next
randomPointer = []
for i in range(len(tempList)):
if tempList[i].random == None:
randomPointer.append(-1)
for j in range(len(tempList)):
if tempList[j] == tempList[i].random:
randomPointer.append(j)
break
for i in range(len(nodeList)):
if i != len(nodeList)-1:
nodeList[i].next = nodeList[i+1]
if randomPointer[i] != -1:
nodeList[i].random = nodeList[randomPointer[i]]
return nodeList[0] | true |
e7fcdb4143d53ed5d274f8238a45df4346e91363 | Python | xfgao/VRKitchen | /Script/tool_pos.py | UTF-8 | 3,919 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | tool_pos = {}
tool_pos["2"] = {
"Orig": {"Actor":{"Loc":{"X":0.0,"Y":0.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
"Grater": {"Actor":{"Loc":{"X":60.0,"Y":-24.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
"SauceBottle": {"Actor":{"Loc":{"X":54.0,"Y":20.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
"Knife": {"Actor":{"Loc":{"X":60.0,"Y":89.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
"Peeler": {"Actor":{"Loc":{"X":59.0,"Y":200.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
"Juicer": {"Actor":{"Loc":{"X":41.0,"Y":-42.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":-90.0,"Roll":0.0}}},\
"Oven": {"Actor":{"Loc":{"X":-20.0,"Y":-0.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":-90.0,"Roll":0.0}}},\
"Stove": {"Actor":{"Loc":{"X":-20.0,"Y":-42.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":-90.0,"Roll":0.0}}},\
"Fridge": {"Actor":{"Loc":{"X":-27.0,"Y":125.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":90.0,"Roll":0.0}}},\
}
tool_pos["3"] = {
"Orig": {"Actor":{"Loc":{"X":0.0,"Y":0.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
"Grater": {"Actor":{"Loc":{"X":158.0,"Y":-205.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
"SauceBottle": {"Actor":{"Loc":{"X":155.0,"Y":92.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
"Knife": {"Actor":{"Loc":{"X":95.0,"Y":99.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":90.0,"Roll":0.0}}},\
"Juicer": {"Actor":{"Loc":{"X":155.0,"Y":34.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
"Oven": {"Actor":{"Loc":{"X":130.0,"Y":-44.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
"Stove": {"Actor":{"Loc":{"X":158.0,"Y":-36.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
"Fridge": {"Actor":{"Loc":{"X":27.0,"Y":78.0,"Z":0.0},\
"Rot":{"Pitch":0.0,"Yaw":90.0,"Roll":0.0}}},\
}
tool_pos["5"] = {
"Orig": {"Actor":{"Loc":{"X":0.0,"Y":0.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":90.0,"Roll":0.0}}},\
"Grater": {"Actor":{"Loc":{"X":85.0,"Y":204.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":90.0,"Roll":0.0}}},\
"SauceBottle": {"Actor":{"Loc":{"X":-81.0,"Y":204.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":90.0,"Roll":0.0}}},\
"Knife": {"Actor":{"Loc":{"X":-147.0,"Y":199.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":180.0,"Roll":0.0}}},\
"Peeler": {"Actor":{"Loc":{"X":104.0,"Y":146.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
"Juicer": {"Actor":{"Loc":{"X":-143.0,"Y":202.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":90.0,"Roll":0.0}}},\
"Oven": {"Actor":{"Loc":{"X":-123.0,"Y":125.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":180.0,"Roll":0.0}}},\
"Stove": {"Actor":{"Loc":{"X":-145.0,"Y":111.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":180.0,"Roll":0.0}}},\
"Fridge": {"Actor":{"Loc":{"X":69.0,"Y":69.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
}
tool_pos["6"] = {
"Orig": {"Actor":{"Loc":{"X":0.0,"Y":0.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":-90.0,"Roll":0.0}}},\
"Grater": {"Actor":{"Loc":{"X":80.0,"Y":-61.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":-90.0,"Roll":0.0}}},\
"SauceBottle": {"Actor":{"Loc":{"X":-134.0,"Y":-61.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":-90.0,"Roll":0.0}}},\
"Knife": {"Actor":{"Loc":{"X":227.0,"Y":-61.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":-90.0,"Roll":0.0}}},\
"Juicer": {"Actor":{"Loc":{"X":-74.0,"Y":-61.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":-90.0,"Roll":0.0}}},\
"Oven": {"Actor":{"Loc":{"X":-12.0,"Y":-40.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":-90.0,"Roll":0.0}}},\
"Stove": {"Actor":{"Loc":{"X":-2.0,"Y":-60.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":-90.0,"Roll":0.0}}},\
"Fridge": {"Actor":{"Loc":{"X":150.0,"Y":32.0,"Z":5.0},\
"Rot":{"Pitch":0.0,"Yaw":0.0,"Roll":0.0}}},\
} | true |
756c30e1a019f04f9bddf6ec5d51e5a01c5ebf97 | Python | CamachoBry/abm-environment | /agents.py | UTF-8 | 3,591 | 3.328125 | 3 | [] | no_license | from mesa import Agent
from random_walk import RandomWalk
class GrassPatch(Agent):
'''
A patch of grass that grows at a fixed rate and is eaten by bunnies
'''
def __init__(self, unique_id, pos, model, fully_grown, countdown):
super().__init__(unique_id, model)
self.fully_grown = fully_grown
self.countdown = countdown
self.pos = pos
def step(self):
if not self.fully_grown:
if self.countdown <= 0:
#Set as fully grown
self.fully_grown = True
self.countdown = self.model.grass_regrowth_time
else:
self.countdown -= 1
class Bunny(RandomWalk):
'''
Bunny walks around, reproduces and eats
'''
energy = None
def __init__(self, unique_id, pos, model, moore, energy=None):
super().__init__(unique_id, pos, model, moore=moore)
self.energy = energy
def step(self):
'''
A model step. Move, then eat, then reproduce
'''
self.random_move()
living = True
if self.model.grass:
#Reduce energy
self.energy -= 1
#If there is grass available, eat
this_cell = self.model.grid.get_cell_list_contents([self.pos])
grass_patch = [obj for obj in this_cell if isinstance(obj, GrassPatch)][0]
if grass_patch.fully_grown:
self.energy += self.model.bunny_gain_from_food
grass_patch.fully_grown = False
#Death
if self.energy < 0:
self.model.grid._remove_agent(self.pos, self)
self.model.schedule.remove(self)
living = False
if living and self.random.random() < self.model.bunny_reproduce:
#Create baby bunny
if self.model.grass:
self.energy /= 2
baby_bunny = Bunny(self.model.next_id(), self.pos, self.model, self.moore, self.energy)
self.model.grid.place_agent(baby_bunny, self.pos)
self.model.schedule.add(baby_bunny)
class Fox(RandomWalk):
'''
A fox that walks around, eats bunnies and reproduces
'''
energy = 0
def __init__(self, unique_id, pos, model, moore, energy=None):
super().__init__(unique_id, pos, model, moore=moore)
self.energy = energy
def step(self):
self.random_move()
self.energy -= 1
#If there are bunnies present, eat one and remove from scheduler
x,y = self.pos
this_cell = self.model.grid.get_cell_list_contents([self.pos])
bunny = [obj for obj in this_cell if isinstance(obj, Bunny)]
#If more than one bunny around, randomly choose one
if len(bunny) > 0:
bunny_to_eat = self.random.choice(bunny)
self.energy += self.model.fox_gain_from_food
#Kill/remove the bunny from scheduler and grid
self.model.grid._remove_agent(self.pos, bunny_to_eat)
self.model.schedule.remove(bunny_to_eat)
#If fox dies
if self.energy < 0:
self.model.grid._remove_agent(self.pos, self)
self.model.schedule.remove(self)
else:
if self.random.random() < self.model.fox_reproduce:
#Create baby fox
self.energy /= 2
foxlet = Fox(self.model.next_id(), self.pos, self.model, self.moore, self.energy)
self.model.grid.place_agent(foxlet, foxlet.pos)
self.model.schedule.add(foxlet)
| true |
eeda8ead315d8ee8c102481e963a0a99d600103d | Python | liliarose/ComputerScienceforInsight | /hw3/old_hw3pr2.py | UTF-8 | 10,885 | 3.5625 | 4 | [] | no_license | #
# hw3pr2.py
#
# Person or machine? The rps-string challenge...
#
# This file should include your code for
# + extract_features( rps ), returning a dictionary of features from an input rps string
# + score_features( dict_of_features ), returning a score (or scores) based on that dictionary
# + read_data( filename="rps.csv" ), returning the list of datarows in rps.csv
#
# Be sure to include a short description of your algorithm in the triple-quoted string below.
# Also, be sure to include your final scores for each string in the rps.csv file you include,
# either by writing a new file out or by pasting your results into the existing file
# And, include your assessment as to whether each string was human-created or machine-created
#
#
"""
Short description of (1) the features you compute for each rps-string and
(2) how you score those features and how those scores relate to "humanness" or "machineness"
because
"""
# Here's how to machine-generate an rps string.
# You can create your own human-generated ones!
import random
import csv
import re
import math
def gen_rps_string(num_characters):
""" return a uniformly random rps string with num_characters characters """
result = ''
for i in range( num_characters ):
result += random.choice( 'rps' )
return result
# Here are two example machine-generated strings:
rps_machine = [gen_rps_string(200) for i in range(500)]
# rps_machine1 = gen_rps_string(200)
# rps_machine2 = gen_rps_string(200)
# print those, if you like, to see what they are...
# from geeksforgeeks, didn't have enough t
def longestRepeatedSubstring(str):
n = len(str)
LCSRe = [[0 for x in range(n + 1)] for y in range(n + 1)]
res = "" # To store result
res_length = 0 # To store length of result
index = 0
for i in range(1, n + 1):
for j in range(i + 1, n + 1):
if (str[i - 1] == str[j - 1] and
LCSRe[i - 1][j - 1] < (j - i)):
LCSRe[i][j] = LCSRe[i - 1][j - 1] + 1
if (LCSRe[i][j] > res_length):
res_length = LCSRe[i][j]
index = max(i, index)
else:
LCSRe[i][j] = 0
if (res_length > 0):
for i in range(index - res_length + 1, index + 1):
res = res + str[i - 1]
return res
from collections import defaultdict
def mRSinLRS(string):
substring = string
cString = string
while len(substring) > 1:
cString = substring
substring = longestRepeatedSubstring(cString)
if(string.count(cString) > 1):
return (cString, string.count(cString))
return False
def score_find(data, times=1):
# input: just the strings themselves & the number of times mRSinLRS should be found
scores = [0] * len(data)
subStrings = [[0] for i in range(len(data))]
print(subStrings)
for i in range(len(data)):
cString = data[i]
for j in range(times):
if j != 0 and len(subStrings) == j and subStrings[i][j-1][1]>1:
t = mRSinLRS(cString)
if t:
subStrings[i].append(mRSinLRS(cString))
scores[i] += subStrings[i][j][0] * subStrings[i][j][1]
cString = re.sub(subStrings[i][j][0], 'y', cString)
print(scores[i])
return scores
# (scores, subStrings)
"""
def removeLRS(data):
listOfLRS = [0] * len(data)
data2 = [0] * len(data)
flag = 0
for i in range(len(data)):
cLRS = longestRepeatedSubstring(data[i])
listOfLRS[i] = [(cLRS, data[i].count(cLRS))]
# print(cLRS)
if len(cLRS) > 2:
data2[i] = re.sub(cLRS, '', data[i])
else:
flag +=1
while flag < len(data2):
for i in range(len(data2)):
if len(listOfLRS[i][-1][0]) > 2:
cLRS = longestRepeatedSubstring(data2[i])
listOfLRS[i].append((cLRS, data2[i].count(cLRS))) #(cLRS, data2[i].count(cLRS)))
# print(cLRS)
if(len(cLRS) > 2):
data2[i] = re.sub(cLRS, '', data2[i])
else:
flag += 1
print("flag:", flag)
return (listOfLRS, data2)
"""
"""
def removeLRS(data, time = 10):
listOfLRS = [0] * len(data)
data2 = [0] * len(data)
for i in range(len(data)):
cLRS = longestRepeatedSubstring(data[i])
listOfLRS[i] = [(cLRS, data[i].count(cLRS))]
data2[i] = re.sub(cLRS, '', data[i])
for j in range(time):
for i in range(len(data2)):
if len(listOfLRS[i][-1][0]) > 2:
cLRS = longestRepeatedSubstring(data2[i])
listOfLRS[i].append((cLRS, data2[i].count(cLRS))) #(cLRS, data2[i].count(cLRS)))
#print(listOfLRS[i])
if(len(cLRS) > 2):
data2[i] = re.sub(cLRS, '', data2[i])
print(j)
return (listOfLRS, data2)
def score_features(data1, data2):
score = [0] * len(data1)
for i in range(len(data1)):
score[i] = (len(data1[2]) - len(data2[1][i])) # + len(data2[0][i]))/len(data1[2]), 2)
return score # return a humanness or machineness score
def score_features2(data1, data2):
patternLengths = [0] * len(data2[0])
for i in range(len(data2[0])):
for s in data2[0][i]:
patternLengths[i] += len(s)
score = [0] * len(data1)
for i in range(len(data1)):
score[i] = (len(data1[2]) - len(data2[1][i]) - patternLengths[i])
return score
def score_features4(data1, data2):
score = [0] * len(data1)
for i in range(len(data1)):
for s in data2[0][i]:
score[i] += len(s[0])* (s[1]**2)
return score
def editDistDP(str1, str2, m, n):
dp = [[0 for x in range(n+1)] for x in range(m+1)]
for i in range(m+1):
for j in range(n+1):
if i == 0:
dp[i][j] = j # Min. operations = j
elif j == 0:
dp[i][j] = i # Min. operations = i
elif str1[i-1] == str2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = 1 + min(dp[i][j-1], dp[i-1][j], dp[i-1][j-1]) # Replace
return dp[m][n]
def score_features4(data1, data2, )
def score_features3(data1, data2, times=5, times2=3):
scoreT = [[0] for i in data1]
for i in range(len(data2[0])):
for j in range(max(min(len(data2[0][i])-1, times), 0)):
for k in range(j+1, min(len(data2[0][i]), times)):
x = editDistDP(data2[0][i][j][0], data2[0][i][k][0], len(data2[0][i][j][0]), len(data2[0][i][k][0]))
# data2[0][i][k][1]
#print(i, ": ", x, type(x), data2[0][i][k][1], type(data2[0][i][k][1]))
scoreT[i].append(pow(x, data2[0][i][k][1]/10))
#score[i] += editDistDP(data2[0][i][j][0], data2[0][i][k][0], len(data2[0][i][j][0]), len(data2[0][i][k][0]))*data2[0][i][k][1]
#print(scoreT)
score = [0] * len(data1)
for i in range(len(data2[0])):
for j in range(times):
if len(scoreT[i]) > 0:
t = max(scoreT[i])
#print(score[i])
score[i] += t #(times2-j)*(times2-j(times2-j))
scoreT[i].remove(t)
score[i] = int(score[i]*(10**1))
print(score[i])
return score
"""
def readcsv(csv_file_name):
""" readcsv takes as
+ input: csv_file_name, the name of a csv file
and returns
+ output: a list of lists, each inner list is one row of the csv
all data items are strings; empty cells are empty strings
in this format:
[ item1, item2...]
"""
try:
csvfile = open( csv_file_name, newline='' ) # open for reading
csvrows = csv.reader( csvfile ) # creates a csvrows object
all_rows = [] # we need to read the csv file
for row in csvrows: # into our own Python data structure
all_rows.append(row) # adds only the word to our list
del csvrows # acknowledge csvrows is gone!
csvfile.close() # and close the file
return all_rows # return the list of lists
except FileNotFoundError as e:
print("File not found: ", e)
return []
def write_to_csv(filename, d):
""" readcsv takes as
+ input: csv_file_name, the name of a csv file
and returns
+ output: a list of lists, each inner list is one row of the csv
all data items are strings; empty cells are empty strings
"""
try:
csvfile = open( filename, "w", newline='' )
filewriter = csv.writer( csvfile, delimiter=",")
for row in d:
filewriter.writerow(row)
csvfile.close()
except Exception as e:
print(e)
print("File", filename, "could not be opened for writing...")
#
# you'll use these three functions to score each rps string and then
# determine if it was human-generated or machine-generated
# (they're half and half with one mystery string)
#
# Be sure to include your scores and your human/machine decision in the rps.csv file!
# And include the file in your hw3.zip archive (with the other rows that are already there)
#
def calcMach(scores, cutoff):
machine = 0
for score in scores:
if score > cutoff:
machine += 1
return machine
def calcDist(scores):
dist = defaultdict(int)
for score in scores:
dist[score] += 1
return dist
data = readcsv("cs35rps.csv")
actualData = [data[i][2] for i in range(len(data))]
"""
times = 10
data2 = removeLRS(actualData)
scores = score_features3(actualData, data2)
maxTimes = [0] * len(scores)
for i in range(len(scores)):
for j in range(len(data2[0][i])):
if data2[0][i][j][1] > maxTimes[i]:
maxTimes[i] = data2[0][i][j][1]
result = [[data[i][2], data2[1][i], maxTimes[i], scores[i] ] for i in range(len(scores))]
# result = [ [data[i][0], data[i][2], data2[1][i], len(data2[1][i]), scores[i] ] for i in range(len(data2[1]))]
print(calcDist(scores))
tmp = removeLRS(rps_machine)
scores = score_features3(rps_machine, tmp)
result2 = [ [ rps_machine[i], scores[i] ] for i in range(len(scores))]
calc = calcMach(scores, 200)
print(calcDist(scores))
print(calc, len(scores) - calc)
"""
times = 1
scores = score_find(actualData, 3)
# scores = data2[0]
# print(scores)
result = [[data[i][2], scores[i] ] for i in range(len(scores))]
scores = score_find(rps_machine)
# scores = tmp[0]
# print(scores)
result2 = [ [ rps_machine[i], scores[i] ] for i in range(len(scores))]
write_to_csv("result.csv", result)
write_to_csv("result2.csv", result2)
| true |
9a7fb4309d1d4ef517ff09d58faae7666ab11f5e | Python | Solanar/CMPUT313_Asn1 | /ESIM/main.py | UTF-8 | 7,145 | 2.703125 | 3 | [] | no_license | import sys
from transmitter import Transmitter
from simulate_transmission import Simulator
from receiver import Receiver, OneBitError, MultipleBitErrors
from statistics import Statistics
A = 'A' # Response overhead bit time units
K = 'K' # Number of blocks frame is broken into num blocks
F = 'F' # Frame size (in bits) frame size
E = 'E' # Probability of a bit error bit error probability
R = 'R' # Simulation length in bit_trials_time_units length in bit time units
T = 'T' # Trials num trials
TSeeds = "T Seeds" # t seeds for trials
parameter_dict = {
A: 500,
K: 400, # 0, 1, 2, > 2 (but multiple of R)
F: 4000,
E: 0.0001,
R: 400000,
T: 5,
TSeeds: [1534546, 2133323, 377, 456548, 59998]
}
def start():
get_arguments()
# Transmitter.transmit returns the new size of a block
new_block_size = Transmitter.transmit(parameter_dict[K],
parameter_dict[F])
# for T trials, repeat the simulation
for i in range(parameter_dict[T]):
# clear this trial's variables
trials_time = 0
trials_received_frames = 0
trials_failed_frames = 0
# Set the first seed for the simulation
Simulator.set_seed(parameter_dict[TSeeds][i])
while (trials_time <= parameter_dict[R]):
trials_received_blocks = 0 # new frame
trials_failed_blocks = 0 # new frame
# set the number of blocks to be transmitted in this frame
transmissions = parameter_dict[K]
if (parameter_dict[K] == 0):
transmissions = 1
# For K blocks (or 1 if K == 0), simulate the transmission
for j in range(transmissions): # range starts at 0
# frame_failure = 0 if block was transmitted successfully
block_failure = handle_block(new_block_size,
parameter_dict[E],
parameter_dict[K])
# record block success or failure
if (block_failure > 0):
trials_failed_blocks += 1
else:
trials_received_blocks += 1
# set trials_time to number of bits and response overhead
trials_time += (parameter_dict[F] +
(parameter_dict[K] * Transmitter.r)
+ parameter_dict[A])
# update number of transmitted frames
Statistics.update(Statistics.total_frames)
# frame failed, resend the frame
if(trials_failed_blocks >= 1):
trials_failed_frames += 1
# the last frame being sent (no longer needed) see forums
#elif(trials_time > parameter_dict[R]):
# pass
# successful transmition
else:
Statistics.update(Statistics.correctly_received_frames)
trials_received_frames += 1
#a print("Trial number:", i)
#a print("Received Frames", trials_received_frames)
#a print("Failed Frames", trials_failed_frames)
# Assume: Take all K*(F+r) trials_time units into account
# even if in last frame
Statistics.append(Statistics.throughput_averages,
((parameter_dict[F] * trials_received_frames)
/ trials_time))
if(trials_received_frames != 0):
# Assume: Take all frames into account, even last frame
Statistics.append(Statistics.frame_averages,
(trials_received_frames + trials_failed_frames) /
trials_received_frames)
else:
Statistics.append(Statistics.frame_averages, 0)
# Add to total time
Statistics.statistics_dict[Statistics.total_time] += trials_time
# Call Print Statements
#a print()
#a print("----------------------------------------------")
print_input(sys.argv)
Statistics.set_final_values(parameter_dict[F], parameter_dict[R])
Statistics.print_frame_ci()
Statistics.print_throughput_ci()
# stat_dict = Statistics.statistics_dict
# ci_high = stat_dict[Statistics.final_frame_ci].split()[1][:-1]
# print(parameter_dict[E],
# parameter_dict[K],
# Statistics.statistics_dict[Statistics.final_frame_average],
# ci_high)
# ci_high = stat_dict[Statistics.final_throughput_ci].split()[1][:-1]
# print(parameter_dict[E],
# parameter_dict[K],
# Statistics.statistics_dict[Statistics.final_throughput],
# str(float(ci_high) -
# Statistics.statistics_dict[Statistics.final_throughput]))
#a print("----------------------------------------------")
#Statistics.print_block_ci()
#a print()
#Statistics.print_all()
def get_arguments():
if (len(sys.argv) <= 8):
print("Not enough arguments.")
return
# overwrite parameter_dict with arguments
# first argv is file name
parameter_dict[A] = int(sys.argv[1])
parameter_dict[K] = int(sys.argv[2])
parameter_dict[F] = int(sys.argv[3])
parameter_dict[E] = float(sys.argv[4])
parameter_dict[R] = int(sys.argv[5])
if (len(sys.argv) is not int(sys.argv[6]) + 7):
print("Incorrect number of seed arguments.")
return
if parameter_dict[T] is not int(sys.argv[6]):
parameter_dict[TSeeds] = [] * int(sys.argv[6])
parameter_dict[T] = int(sys.argv[6])
for i in range(parameter_dict[T]): # range starts at 0
parameter_dict[TSeeds][i] = (int(sys.argv[7 + i]))
# remove later
# print("Parameters:")
# for name, value in parameter_dict.items():
# print("Name:", name, "\tValue:", value)
# print()
def handle_block(new_block_size, E, K):
# Simulator.simulate returns the number of bit errors in each block
bit_errors = Simulator.simulate(new_block_size, E)
Statistics.update(Statistics.total_transmitions)
if (bit_errors != 0):
Statistics.update(Statistics.block_errors)
try:
Receiver.receive(bit_errors)
Statistics.update(Statistics.no_error)
Statistics.update(Statistics.correctly_received_blocks)
return 0
except OneBitError:
Statistics.update(Statistics.one_bit_error)
if (K != 0):
Statistics.update(Statistics.correctly_received_blocks)
# Assume: Fixing the error requires 0 trials_time units
return 0
return bit_errors
except MultipleBitErrors:
Statistics.update(Statistics.multiple_bit_errors)
return bit_errors
def print_input(args):
# Remove the first "main.py" element
args.pop(0)
input_string = ""
for arg in args:
input_string += " " + str(arg)
# Remove leading whitespace
input_string = input_string[1:]
print(input_string)
if __name__ == "__main__":
start()
| true |
8332ef089fea92ba25e044eba58d13c4b5d3521c | Python | ChristopherStavros/Python_Study | /Projects/OOP_and_Postgres/movie-system/app.py | UTF-8 | 2,341 | 3.78125 | 4 | [] | no_license | from user import User
import json, os
def menu():
# Ask for the user's name
name = input("Enter your name: ")
# Check if a file exists for that user
# If it already exists, welcome then and load their data.
# If not, create a User object
filename = "{}.json".format(name)
if file_exists(filename):
with open(filename, 'r') as f:
try:
json_data = json.load(f)
except json.decoder.JSONDecodeError:
print("Invalid JSON file")
return
user = User.from_json(json_data)
else:
user = User(name)
user_input = input('''Enter:
'a' to add a movie,
'm' to see the list of movies,
'w' to set a movie as watched,
'd' to delete a movie,
'l' to see a list of watched movies,
's' to save,
'q' to quit
''')
while user_input != 'q':
if user_input == 'a':
movie_name = input("Enter the movie name: ")
movie_genre = input("Enter the genre: ")
user.add_movie(movie_name, movie_genre)
elif user_input == 'm':
for movie in user.movies:
print("Name: {name}, Genre: {genre}, Watched: {watched}".format(**movie.json())) # This is cool!!!
elif user_input == 'w':
movie_name = input("Enter the movie name to set as watched: ")
user.set_watched(movie_name)
elif user_input == 'd':
movie_name = input("Enter the movie name to delete: ")
user.delete_movie(movie_name)
elif user_input == 'l':
for movie in user.watched_movies():
print("Name: {name}, Genre: {genre}, Watched: {watched}".format(**movie.json())) # This is cool!!!
elif user_input == 's':
with open(filename, 'w') as f:
json.dump(user.json(), f)
elif user_input == 'q':
return
else:
print("That is not a valid choice")
user_input = input('''Enter:
'a' to add a movie,
'm' to see the list of movies,
'w' to set a movie as watched,
'd' to delete a movie,
'l' to see a list of watched movies,
's' to save,
'q' to quit
''')
def file_exists(filename):
return os.path.isfile(filename)
menu()
| true |
4a2c1f16e25b32e4cf32315a356423d762e5385d | Python | atg-abhijay/LeetCode_problems | /binary_gap_868.py | UTF-8 | 739 | 3.625 | 4 | [] | no_license | """
URL of problem:
https://leetcode.com/problems/binary-gap/description/
"""
def main(num):
bin_num = bin(num)[2:]
max_dist = 0
dist_counter = -1
encounter_start_one = False
for digit in bin_num:
digit = int(digit)
if encounter_start_one:
if digit == 1:
dist_counter += 1
if max_dist < dist_counter:
max_dist = dist_counter
dist_counter = 0
else:
dist_counter += 1
else:
if digit == 1:
encounter_start_one = True
dist_counter += 1
print("Max distance:", max_dist)
if __name__ == '__main__':
main(int(input("Give a number: ")))
| true |
8b5b8edcaa925fa786c09159b76aee8511c8a12e | Python | gonrodri18/Python | /Listas y tuplas/Ejercicio13.py | UTF-8 | 542 | 4.34375 | 4 | [] | no_license | #Escribir un programa que pregunte por una muestra de números, separados por comas, los guarde en una tupla y muestre por pantalla su media y desviación típica.
numeros = input ('introduce un muestra de númros separada por comas:')
numeros = numeros.split(',')
n = len(numeros)
for i in range(n):
numeros[i] = int(numeros[i])
numeros = tuple(numeros)
sum = 0
sumsq = 0
for i in numeros:
sum += i
sumsq += i**2
mean = sum/n
stdev = (sumsq/n-mean**2)**(1/2)
print('La media es', mean, ', y la desviación típica es', stdev)
| true |
6a3f9f968da8db0c591cc87e12dd773a525b8796 | Python | shocker8786/scripts | /python_scripts/fastq.py | UTF-8 | 182 | 2.5625 | 3 | [] | no_license | import sys
for line in sys.stdin:
line = line.strip()
if line[0:3] == 'HWI':
line = '@' + line
print line
elif not line.strip():
line = '+'
print line
else:
print line
| true |
8c03ce71210d1ea732a61402ac527d807ce72e8f | Python | standbyme227/project_with_jtlim | /first.py | UTF-8 | 2,247 | 3.640625 | 4 | [] | no_license | class Human:
# success = 0
# failure = 0
def __init__(self, id, height, weight, fatigue):
self.id = id
self.height = height
self.weight = weight
self.fatigue = fatigue
self.bmi = None
def set_bmi(self):
self.bmi = round(self.weight / ((self.height / 100) ** 2), 1)
print("{}의 BMI는 {}".format(self.id, self.bmi))
# def add_success(self):
# if round(self.bmi, 1) == 23:
# self.success += 1
#
# def add_failure(self):
# if self.fatigue == 100:
# self.failure += 1
class Workout:
def __init__(self, pt_count):
self.human = None
self.pt_count = pt_count
def exercise(self, human):
# set_bmi 부분을 Decorator로 구현할 수 있을 거 같다.
human.set_bmi()
if round(human.bmi) < 23:
human.weight += 0.2
human.fatigue += 10
human.set_bmi()
print(round(human.weight))
elif round(human.bmi) > 23:
human.weight -= 0.2
human.fatigue += 10
human.set_bmi()
print(round(human.weight))
else:
print("운동 끝!!!!!!")
def rest(self, human):
human.set_bmi()
human.fatigue -= 20
print(round(human.weight))
# class Scheduler:
# def __init__(self, human, weekdays=None):
# self.human = human
# if not weekdays:
# print('요일을 지정해주세요')
# pass
# else:
# self.weekdays = weekdays
#
# def set_schedule(self, human):
if __name__ == '__main__':
human = Human(1, 177, 75, fatigue=20)
print(human.height)
human.set_bmi()
print(human.bmi)
print(human.weight)
workout = Workout(10)
while workout.pt_count > 0:
if human.fatigue < 90:
workout.pt_count -= 1
print("운동을 시작하지")
workout.exercise(human)
elif round(human.bmi) == 23:
workout.exercise(human)
print(workout.pt_count)
break
else:
print("오늘은 좀 쉬어보자")
workout.rest(human)
human.set_bmi()
print(human.bmi)
| true |
e26f4b4cdb025fbcec07385104516470bc4457bc | Python | lordjuacs/ICC-Trabajos | /Ciclo 1/Lab ICC/PC/mayor_menor.py | UTF-8 | 382 | 4.1875 | 4 | [] | no_license | n = int(input("Ingrese N: "))
max = 29
min = 66
imprime = False
for i in range(1,n+1):
edad = int(input("Ingrese edad " + str(i) + ": "))
if edad >= 30 and edad <=65:
imprime = True
if edad > max:
max = edad
if edad < min:
min = edad
print(imprime * ("El mayor es: " + str(max)))
print(imprime * ("El menor es: " + str(min)))
| true |
35d0dac1eb6679195d4dd24ce2aff5285987b555 | Python | psm651/python-algorithm | /baekjoon10996.py | UTF-8 | 380 | 3.5 | 4 | [] | no_license | val = int(input())
for i in range(0,val):
stra=''
strb=''
for j in range(1,val+1):
if j % 2 != 0:
stra +='*'
if j % 2 == 0:
stra +=' '
print(stra)
if val > 1:
for k in range(1,val+1):
if k % 2 != 0:
strb +=' '
if k % 2 == 0:
strb +='*'
print(strb)
| true |
8f8a77cba95ad57fc05346f59f41e42febe41230 | Python | pdaian/mev | /parse_output.py | UTF-8 | 415 | 2.921875 | 3 | [] | no_license | import os
out = open('out2').read()
states = out.count("#Or")
print("Found %d states." % (states))
max_amt = -1
for line in out.splitlines():
if "0 in 0 |-" in line and line.index("0 in 0 |-") == 8:
amt = int(line.split()[-1])
max_amt = amt if amt > max_amt else max_amt
print(amt)
print("miner makes at most %d" % (max_amt))
os.system("grep -C 20 '0 in 0 |-> %d' out" % (max_amt))
| true |
2fb3894a3eb5aa81782e31b46e0fc5d32e451ba0 | Python | matiasandina/useful_functions | /listdir_fullpath.py | UTF-8 | 871 | 2.90625 | 3 | [] | no_license | # This function returns the full path
# It tries to be an analogous of list.files in R...still work to do
import os
import numpy as np
def listdir_fullpath(root_dir, file_pattern=None, file_extension=None, exclude_dir = True):
# Get everything
if file_extension is None:
file_list = [os.path.join(root_dir, files) for files in os.listdir(root_dir)]
else:
file_list = [os.path.join(root_dir, files) for files in os.listdir(root_dir) if files.endswith(file_extension)]
if file_pattern is not None:
file_list = [file for file in file_list if file_pattern in file]
if len(file_list) > 0:
if exclude_dir:
files_to_keep = np.bitwise_not(list(map(os.path.isdir, file_list)))
file_list = np.array(file_list)[files_to_keep]
file_list = file_list.tolist()
return sorted(file_list) | true |
717d938ab7985e2f0adfa81c37e883bcc6f3f206 | Python | akshat12000/Python-Run-And-Learn-Series | /Codes/80) Functions_returning_two_values.py | UTF-8 | 300 | 3.96875 | 4 | [] | no_license | # Functions returning two values
def operations(a,b):
add=a+b
multiply=a*b
return add,multiply
a,b=input("Enter two numbers ").split()
res=operations(int(a),int(b)) # res will be a tuple type!!
add,mul=operations(int(a),int(b))
print(type(res))
print(res)
print(add)
print(mul) | true |
9dbfba671391c99d1dc714c5fe0a1241a79a02ae | Python | Bumskee/-Part-2-Week-2-assignment-21-09-2020 | /Problem 1.py | UTF-8 | 569 | 4.6875 | 5 | [] | no_license | """Problem 1 Assigning angle's value to the valuable degrees then converting that value to radian and then assigning the value to the variable radian"""
#A function that assigns an angle as a value for degrees then converting it to a radian value then printing the values of degrees and radians
def degToRad(angle, pi = 3.14):
global degrees, radians
degrees = angle
radians = degrees * pi / 180
print("Degrees =", str(degrees))
print("Radians =", str(radians))
#calls the function to assign those variables and printing the values
degToRad(150)
| true |
bdc73dd17ae16343913f58888bb2f67a3ce001b3 | Python | wtsai92/mycode | /python/python_buitin_module/use_collections.py | UTF-8 | 1,763 | 4.34375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import namedtuple, deque, defaultdict, OrderedDict, Counter
"""
namedtuple
namedtuple是一个函数,它用来创建一个自定义的tuple对象,并且规定了tuple元素的个数,
并可以用属性而不是索引来引用tuple的某个元素。
这样一来,我们用namedtuple可以很方便地定义一种数据类型,它具备tuple的不变性,又可以根据属性来引用,使用十分方便。
"""
Point = namedtuple('Point', ['x', 'y'])
p = Point(1, 2)
print(p.x)
# Point对象是tuple的一种子类
print(isinstance(p, Point))
print(isinstance(p, tuple))
"""
deque
使用list存储数据时,按索引访问元素很快,但是插入和删除元素就很慢了,因为list是线性存储,数据量大的时候,插入和删除效率很低。
deque是为了高效实现插入和删除操作的双向列表,适合用于队列和栈:
deque除了实现list的append()和pop()外,还支持appendleft()和popleft(),这样就可以非常高效地往头部添加或删除元素。
"""
q = deque(['a', 'b', 'c'])
q.append('x')
q.appendleft('y')
print(q)
"""
defaultdict
使用dict时,如果引用的Key不存在,就会抛出KeyError。如果希望key不存在时,返回一个默认值,就可以用defaultdict:
注意默认值是调用函数返回的,而函数在创建defaultdict对象时传入。
除了在Key不存在时返回默认值,defaultdict的其他行为跟dict是完全一样的。
"""
dd = defaultdict(lambda: 'N/A')
dd['key1'] = 'abc'
print(dd['key1'])
print(dd['key2'])
"""
Counter
Counter是一个简单的计数器,例如,统计字符出现的个数:
"""
c = Counter()
for ch in 'programing':
c[ch] = c[ch] + 1
print(c)
| true |
0c7217f3dd8d360b50173f6bde54532489e95103 | Python | amadeusantos/Mundo_1 | /desafio09025.py | UTF-8 | 240 | 3.78125 | 4 | [] | no_license | nome = str(input('Qual seu nome completo: ')).strip().lower()
print(f'Você possui Silva no nome: '
f'{nome.count("silva") > 0}.'.replace('True', 'Sim').replace('False', 'Não')) # {nome.find("silva") != -1}
# 3 {"silva" in nome}
| true |
dac3bfeb697e0417983a7308e34525918e22921b | Python | sayed6201/sayeds_django_library | /2.views/view_html_return.py | UTF-8 | 846 | 3.203125 | 3 | [] | no_license | ========================================================================
returning HTML from view
========================================================================
monthly_challenges_dictioinary = {
"jan": "Eat no meat for entire month",
"feb": "Walk 20 min",
"mar": "Learn django"
}
def index(request):
list_items = ""
months = list(monthly_challenges_dictioinary.keys())
for month in months:
capitalized_months = month.capitalize()
month_path = reverse("month-challenge", args=[month])
list_items += f"<li><a href=\"{month_path}\">{capitalized_months}</a></li>"
response_data = f"<ul>{list_items}</ul>"
# #static approach
# response_data = """""
# <ul>
# <li><a href="/challenges/jan">jan</a></li>
# </ul>
# """""
return HttpResponse(response_data) | true |
ed95c5e96c791267ff6a41f713eefc9e6b57a8db | Python | agupta13/sdx | /player/player_interface.py | UTF-8 | 696 | 2.734375 | 3 | [] | no_license | __author__ = 'arpit'
import sys, socket
exchangeIp = "127.0.0.1"
exchangePort = 9006
def main():
print "Started the player interface"
HOST, PORT = exchangeIp, exchangePort
data = "sdx_offload:asB,{asA:asC}"
# Create a socket (SOCK_STREAM means a TCP socket)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Connect to server and send data
sock.connect((HOST, PORT))
sock.sendall(data)
# Receive data from the server and shut down
received = sock.recv(1024)
finally:
sock.close()
print "Sent: {}".format(data)
print "Received: {}".format(received)
if __name__=="__main__":
main() | true |
dd112ea1f8a8886466e23f2d7653e61b7958ca0a | Python | LyaxKing/My_Printor | /2.0/Main.py | UTF-8 | 1,488 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 16:24:35 2019
@author: HP
"""
import Printor_control
import socketio
import serial
portname = "COM6"
baudrate = 115200
printid = '1'
tem_position = [0, 2]
sio = socketio.Client()
ps = Printor_control.print_state(printid, portname, baudrate, sio, tem_position)
sio.connect('tcp://47.96.95.75:7002')
@sio.on('connect')
def on_connect():
print('连接成功')
if ps.start():
statue_json = ps.get_statue_json()
sio.emit('status', statue_json)
print("打印机上线,发送打印机状态:")
else:
statue_json = ps.get_statue_json()
sio.emit('status', statue_json)
print("打印机连接失败")
print(statue_json)
@sio.on('file')
def on_file(file):
print("接收到打印文件")
fp = open("printfile.gcode", 'w')
fp.writelines(file)
fp.close()
gcodelist = ps.read_gcode()
ps.startprint = 1
ps.printing = 1
statue_json = ps.get_statue_json()
sio.emit('status', statue_json)
print(statue_json)
ps.startprint = 0
print("开始打印")
ps.print_model(gcodelist)
@sio.on('status')
def on_state():
ps.tem_get()
statue_json = ps.get_statue_json()
sio.emit('status', statue_json)
print("打印机状态发送state")
print(statue_json)
@sio.on('disconnect')#只要在disconnect下发送消息即刻断开连接
def on_disconnect():
print('重新连接')
sio.wait()
| true |
a4212f7d32783bf9b79fd905f34f7a45331d3148 | Python | ribeiro3115/Movie-Trailer-Website | /services.py | UTF-8 | 1,151 | 3.171875 | 3 | [
"MIT"
] | permissive | import urllib2
import xml.etree.ElementTree as ET
import media
# This file has a function with a responsability to connect a API that i found in the internet that return a webservice in XML with information about Movies.
def downloadMovies(id_page):
# Pass the id of page of movies to API.
file = urllib2.urlopen('http://trailerapi.com/api/api.php?page='+id_page+'&language=en')
# Parse XML
data = file.read()
file.close()
root = ET.fromstring(data)
# create array of movies
array_movies = []
# Cycle to find all Movies in XML
for movie_info in root.findall('movie'):
# Read title, storyline, poster and trailer of Movie
# The trailer is housed in Daylimotion.
# The API returns the ID of Movie (Ex:x2tyg0n) and URL to embed Video "http://www.dailymotion.com/embed/video/x2tyg0n"
movie = media.Movie(movie_info.find('name').text.encode('utf8'),
movie_info.find('description').text.encode('utf8'),
movie_info.find('poster').text.encode('utf8'),
movie_info.find('did').text.encode('utf8'))
# Add Movie to array of Movies
array_movies.append(movie)
# Return array with All Movies.
return array_movies | true |
9dac8023b03c2f66f8f573ce2d2f9f2859c5d4e2 | Python | wufenglun/TravelPlanner | /anytime_algo.py | UTF-8 | 1,233 | 2.59375 | 3 | [] | no_license | from DirectedGraph import *
from search import * #for search engines
from hotelAndScenery import *
def heur_zero(state):
return 0
def tsp_goal_state(state):
return len(state.get_vertices()) == 1
def fval_function(sN, weight):
return sN.gval + weight * sN.hval
def anytime_gbfs(initial_state, heur_fn, timebound = 10):
se = SearchEngine('best_first', 'none')
se.init_search(initial_state, goal_fn=tsp_goal_state, heur_fn=heur_fn)
cur_time = os.times()[0]
start_time = os.times()[0]
solutions = [None]
costbound = (float('inf'), float('inf'), float('inf'))
while timebound > 0:
solution = se.search(timebound, costbound)
if solution:
if solution.gval < costbound[0]:
costbound = (solution.gval, float('inf'), float('inf'))
solutions.pop()
solutions.append(solution)
else:
print("=======================================================")
print("Solution found in {} secs.".format(os.times()[0] - start_time))
break
timebound = timebound - (os.times()[0] - cur_time)
cur_time = os.times()[0]
return False if solutions[0] is None else solutions[0]
| true |
f5bd9642a028264318b9a6e3d3e1e22b43d1d7ba | Python | Phillgb/ViSTA_GrAM | /scripts/2.2/GrAM/schedule.py | UTF-8 | 2,436 | 3.21875 | 3 | [] | no_license | # schedule.py Phillipe Gauvin-Bourdon
'''
This script is describing the scheduler for the GrAM module. This scheduler is
making sure the agents are activated one type at the time. Each agents of the
same type are activated at random.
'''
# --------------------------IMPORT MODULES-------------------------------------
import random
from mesa.time import RandomActivation
from collections import defaultdict
# -----------------------RANDOM ACTIVATION BY BREED----------------------------
class RandomActivationByBreed(RandomActivation):
'''
A scheduler which activate each type of agent once per step, in random order,
with the order reshuffled every step.
This is inspired by MESA exemples WolfSheepPredation model and NetLogo 'ask
breed' class.
All agents must have a step() method.
'''
agents_by_breed = defaultdict(list)
def __init__(self, model):
super().__init__(model)
self.agents_by_breed = defaultdict(list)
def add(self, agent):
'''
Add an Agent object to the schedule
Args:
agent: An agent to be added to the schedule.
'''
self.agents.append(agent)
agent_class = type(agent)
self.agents_by_breed[agent_class].append(agent)
def remove(self, agent):
'''
Remove all instances of a given agent from the schedule.
'''
while agent in self.agents:
self.agents.remove(agent)
agent_class = type(agent)
while agent in self.agents_by_breed[agent_class]:
self.agents_by_breed[agent_class].remove(agent)
def step(self, by_breed=True):
'''
Executes the step of agent breed, one at a time, in random order.
Args:
by_breed: If True, run all agents of a single breed before running
the next one.
'''
if by_breed:
for agent_class in self.agents_by_breed:
self.step_breed(agent_class)
self.steps += 1
self.time += 1
else:
super().step()
def step_breed(self, breed):
'''
Shuffle order and run all agents of a given breed.
Args:
breed: Class objects of the breed to run.
'''
agents = self.agents_by_breed[breed]
random.shuffle(agents)
for agent in agents:
agent.step() | true |
fa1c398ffd16beb58d2828806303c25ed70e6733 | Python | eavanvalkenburg/brunt-api | /src/brunt/http.py | UTF-8 | 5,580 | 2.578125 | 3 | [
"MIT"
] | permissive | """Main code for brunt http."""
from __future__ import annotations
import json
import logging
from abc import abstractmethod, abstractproperty
from datetime import datetime
from typing import Final
import requests
from aiohttp import ClientSession
from .const import COOKIE_DOMAIN, DT_FORMAT_STRING
from .utils import RequestTypes
_LOGGER = logging.getLogger(__name__)
DEFAULT_HEADER: Final = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Origin": "https://sky.brunt.co",
"Accept-Language": "en-gb",
"Accept": "application/vnd.brunt.v1+json",
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) \
AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E216",
}
class BaseBruntHTTP:
"""Base class for Brunt HTTP."""
@staticmethod
def _prepare_request(data: dict) -> dict:
"""Prepare the payload and add the length to the header, payload might be empty."""
payload = ""
headers = DEFAULT_HEADER.copy()
if "data" in data:
payload = json.dumps(data["data"])
headers = {"Content-Length": str(len(payload))}
return {"url": data["host"] + data["path"], "data": payload, "headers": headers}
@abstractmethod
def request(self, data: dict, request_type: RequestTypes) -> dict | list:
"""Return the request response - abstract."""
@abstractmethod
async def async_request(
self, data: dict, request_type: RequestTypes
) -> dict | list:
"""Return the request response - abstract."""
@abstractproperty
def is_logged_in(self) -> bool:
"""Return True if there is a session and the cookie is still valid."""
class BruntHttp(BaseBruntHTTP):
"""Class for brunt http calls."""
def __init__(self, session: requests.Session = None):
"""Initialize the BruntHTTP object."""
self.session = session if session else requests.Session()
@property
def is_logged_in(self) -> bool:
"""Return True if there is a session and the cookie is still valid."""
if not self.session.cookies:
return False
for cookie in self.session.cookies:
if cookie.domain == COOKIE_DOMAIN:
if cookie.expires is not None:
return (
datetime.strptime(str(cookie.expires), DT_FORMAT_STRING)
> datetime.utcnow()
)
return False
async def async_request(
self, data: dict, request_type: RequestTypes
) -> dict | list:
"""Raise error for using this call with sync."""
raise NotImplementedError("You are using the sync version, please use request.")
def request(self, data: dict, request_type: RequestTypes) -> dict | list:
"""Request the data.
:param session: session object from the Requests package
:param data: internal data of your API call
:param request: the type of request, based on the RequestType enum
:returns: dict with sessionid for a login and the dict of the things for the other calls,
or just success for PUT
:raises: raises errors from Requests through the raise_for_status function
"""
resp = self.session.request(
request_type.value,
**BaseBruntHTTP._prepare_request(data),
)
# raise an error if it occured in the Request.
resp.raise_for_status()
# check if there is something in the response body
if len(resp.text) == 0:
return {"result": "success"}
return resp.json()
class BruntHttpAsync(BaseBruntHTTP):
"""Class for async brunt http calls."""
def __init__(self, session: ClientSession = None):
"""Initialize the BruntHTTP object."""
self.session = session if session else ClientSession()
@property
def is_logged_in(self) -> bool:
"""Return True if there is a session and the cookie is still valid."""
if not self.session.cookie_jar:
return False
for cookie in self.session.cookie_jar:
if cookie.get("domain") == COOKIE_DOMAIN:
if cookie.get("expires") is not None:
return (
datetime.strptime(str(cookie.get("expires")), DT_FORMAT_STRING)
> datetime.utcnow()
)
return False
def request(self, data: dict, request_type: RequestTypes) -> dict | list:
"""Raise error for using this call with async."""
raise NotImplementedError(
"You are using the Async version, please use async_request."
)
async def async_request(
self, data: dict, request_type: RequestTypes
) -> dict | list:
"""Request the data.
:param session: session object from the Requests package
:param data: internal data of your API call
:param request: the type of request, based on the RequestType enum
:returns: dict with sessionid for a login and the dict of the things for
the other calls, or just success for PUT
:raises: raises errors from Requests through the raise_for_status function
"""
async with self.session.request(
request_type.value,
**BaseBruntHTTP._prepare_request(data),
raise_for_status=True,
) as resp:
try:
return await resp.json(content_type=None)
except json.JSONDecodeError:
return {"result": "success"}
| true |
7f131c9079ac014c3b4f7f2a0637c670ed1dd6e6 | Python | EduardoLPaez/spanish-twitter-Sentiment-Analysis | /stream_app.py | UTF-8 | 1,427 | 3.3125 | 3 | [] | no_license | import pandas as pd
import numpy as np
import streamlit as st
import matplotlib.pyplot as plt
from main import twitter_query
import altair as alt
def overall(frame):
temp = frame['sentiment'].mean()
if temp >= 6:
return 'positive'
elif temp <= 6 and temp >= 3.1:
return 'neutral'
else:
return 'negative'
def main():
st.title('Twitter Sentiment Analysis(spanish)')
# VV pass bellow through spellcheck.
st.markdown('''This project utilizes the twitter API tweepy to look up the latest 200 tweets
for a given keyword. at current the program utilizes aylotts's sentipy model for
classifying the tweets' sentiment. \n
- The program will determine which if any tweets are in Spanish \n
for the given keyword. then does a sentiment analysis on \n
those that are in Spanish. \n
- Future plans include adding analysis options for other languages.\n
''')
text_in = st.text_input('please write a keyword.', 'keyword')
if text_in != 'keyword':
df = twitter_query(text_in)
st.text(f'\nthe overall sentiment for {text_in} is {overall(df)} ')
# need more perty graphics.......I regret nothing......
st.altair_chart(
alt.Chart(df).mark_bar().encode(
alt.X("sentiment", bin=True),
y='count()',
)
)
if __name__ == '__main__':
main() | true |
9da9c0012eea2cde05759892586b75908216b9fb | Python | icebert/clinvar_norm | /utils/format.py | UTF-8 | 191 | 2.65625 | 3 | [] | no_license | #!/bin/env python
import sys
import hgvs.parser
hp = hgvs.parser.Parser()
for var in sys.stdin:
var = var.rstrip('\n')
var_i = hp.parse_hgvs_variant(var)
print(str(var_i))
| true |
a01b7a97309e5bb5ac8c8a5a6628855b2a0c0196 | Python | HBinhCT/Q-project | /hackerearth/Data Structures/Advanced Data Structures/Trie (Keyword Tree)/Yet another problem with Strings/solution.py | UTF-8 | 852 | 2.796875 | 3 | [
"MIT"
] | permissive | from sys import stdin
def get_deciphered(string, last_yes_decipher):
res = ''
for c in string:
res += chr((ord(c) - 97 + last_yes_decipher) % 26 + 97) # 97 = ord('a')
return res
n, q = map(int, stdin.readline().strip().split())
strings = []
for _ in range(n):
s = stdin.readline().strip()
strings.append(s)
last_yes = 0
for idx in range(q):
p, *query = stdin.readline().strip().split()
if p == '1':
t = query[0]
if last_yes:
t = get_deciphered(t, last_yes)
for s in strings:
if s in t:
last_yes = idx
print('YES')
break
else:
print('NO')
else:
i, alpha = map(int, query)
alpha = (alpha + last_yes) % 26
strings[(i + last_yes) % n] += chr(alpha + 97) # 97 = ord('a')
| true |
9efc35382897fd9ae1a4e47a3efe15e07249f3b6 | Python | s3rvac/talks | /2017-03-07-Introduction-to-Python/examples/23-override.py | UTF-8 | 119 | 3.078125 | 3 | [
"BSD-3-Clause"
] | permissive | class A:
def foo(self):
print('A')
class B(A):
def foo(self):
print('B')
x = B()
x.foo() # B
| true |
6d4e1072a09915e25b9dfa3ae529c797cfc4743b | Python | qgladis45/Dinner | /new 2.py | UTF-8 | 10,570 | 2.78125 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import tkinter as tk
from tkinter import *
from tkinter import ttk
from tkinter.messagebox import showinfo
import webbrowser
from PIL import Image, ImageTk
from urllib.request import urlopen
import io
import sys
#網路連線檢查
def check_internet():
try:
_ = requests.get('http://www.google.com/', timeout=5) #以谷歌測試
return True
except requests.ConnectionError:
showinfo("溫馨小提示", "NO INTERNET CONNECTION") #如果沒網路,以視窗顯示
#root = tk.Tk()
#root.withdraw()
sys.quit()
return False
check_internet()
'''爬蟲'''
url = "https://kma.kkbox.com/charts/daily/song?cate="
song_list = ['297', '390', '308', '314'] # 華語man, 英文eng, 日文jap, 韓文kor
song_index = -1
man_rank = []
eng_rank = []
jap_rank = []
kor_rank = []
# 獲得專輯照片&其他排名的歌名資料
# 把下面的code插入在16行那邊
# 目前我只做華語歌曲前十名的專輯照片網址
man_url = url + song_list[0]
r_man = requests.get(man_url)
soup = BeautifulSoup(r_man.text, 'html.parser')
all_scripts = soup.find_all('script')
song_scripts = all_scripts[-2].text[:-30000] # 後面一大段都不重要
man_cover = [] # 前十名的專輯照片網址
for i in range(10):
#處理專輯照片
start = song_scripts.find('small')
end = song_scripts.find('160x160.jpg')
cover_url = song_scripts[start+8:end+11]
cover_url = cover_url.replace('\\' , '')
man_cover.append(cover_url)
# 把文字檔精簡
song_scripts = song_scripts[end+12:]
for o in (man_rank, eng_rank, jap_rank, kor_rank):
song_index += 1
song_url = url + song_list[song_index]
r = requests.get(song_url)
soup = BeautifulSoup(r.text, 'html.parser')
attr = {'name': 'description'}
rank = soup.find_all('meta', attrs=attr) # 找到html裡面的meta標籤
rank_str = rank[0]['content'] # 找到排行榜的部分
rank_str = rank_str[(rank_str.find(':')+1):] # 只抓取歌單的部分
rank_list = rank_str.split('、') # 把str轉成list
# list中0,2,4,6,8為歌名; 1,3,5,7,9為歌手
for i in rank_list:
# rank = i.strip()
title = i[:i.find('-')] # 把歌名整理一下
singer = i[(i.find('-')+1):] # 把歌手整理一下
if title.find('('): # 如果歌名有(像是歌名的英文名稱)
o.append(title[:title.find('(')]) # 只保留中文的部分
else:
o.append(title)
if singer.find('-'):
o.append(singer[(singer.find('-')+1):])
else:
o.append(singer)
# 把前後有空格的整理乾淨
for i in range(10):
o[i] = o[i].strip()
'''視窗'''
class Ranking(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.grid()
self.create_widgets()
self.click(man_rank, man_cover)
def input_pic1(self, cover_name):
self.url = requests.get(man_cover[0])
self.imagebyte = io.BytesIO(self.url.content)
self.imagepil = Image.open(self.imagebyte)
self.imagepil = self.imagepil.resize((80, 80), Image.ANTIALIAS) # 重設大小
self.image1 = ImageTk.PhotoImage(self.imagepil)
return self.image1
def input_pic2(self, cover_name):
self.url = requests.get(man_cover[1])
self.imagebyte = io.BytesIO(self.url.content)
self.imagepil = Image.open(self.imagebyte)
self.imagepil = self.imagepil.resize((80, 80), Image.ANTIALIAS) # 重設大小
self.image2 = ImageTk.PhotoImage(self.imagepil)
return self.image2
def input_pic3(self, cover_name):
self.url = requests.get(man_cover[2])
self.imagebyte = io.BytesIO(self.url.content)
self.imagepil = Image.open(self.imagebyte)
self.imagepil = self.imagepil.resize((80, 80), Image.ANTIALIAS) # 重設大小
self.image3 = ImageTk.PhotoImage(self.imagepil)
return self.image3
def input_pic4(self, cover_name):
self.url = requests.get(man_cover[3])
self.imagebyte = io.BytesIO(self.url.content)
self.imagepil = Image.open(self.imagebyte)
self.imagepil = self.imagepil.resize((80, 80), Image.ANTIALIAS) # 重設大小
self.image4 = ImageTk.PhotoImage(self.imagepil)
return self.image4
def input_pic5(self, cover_name):
self.url = requests.get(man_cover[4])
self.imagebyte = io.BytesIO(self.url.content)
self.imagepil = Image.open(self.imagebyte)
self.imagepil = self.imagepil.resize((80, 80), Image.ANTIALIAS) # 重設大小
self.image5 = ImageTk.PhotoImage(self.imagepil)
return self.image5
# 建立主題按鈕&名次
def create_widgets(self):
# 主題(button)
self.manbut = tk.Button(self, text="華語", font='微軟正黑體', bg='Black', fg='White', activebackground='LightSteelBlue4', activeforeground='White', command=(lambda: self.click(man_rank, man_cover)))
self.manbut.grid(row=0, column=2, ipadx=15, pady=2, sticky=(tk.NW+tk.SE))
self.engbut = tk.Button(self, text="西洋", font='微軟正黑體', bg='Black', fg='White', activebackground='LightSteelBlue4', activeforeground='White', command=(lambda: self.click(eng_rank, man_cover)))
self.engbut.grid(row=0, column=3, ipadx=15, pady=2, sticky=(tk.NW+tk.SE))
self.japbut = tk.Button(self, text="日語", font='微軟正黑體', bg='Black', fg='White', activebackground='LightSteelBlue4', activeforeground='White', command=(lambda: self.click(jap_rank, man_cover)))
self.japbut.grid(row=0, column=4, ipadx=15, pady=2, sticky=(tk.NW+tk.SE))
self.korbut = tk.Button(self, text="韓語", font='微軟正黑體', bg='Black', fg='White', activebackground='LightSteelBlue4', activeforeground='White', command=(lambda: self.click(kor_rank, man_cover)))
self.korbut.grid(row=0, column=5, ipadx=15, pady=2, sticky=(tk.NW+tk.SE))
# 名次(label)
self.rank1 = tk.Label(self, text=' 1st ', font='微軟正黑體', bg='Black', fg='Gold')
self.rank1.grid(row=1, column=0, padx=10, pady=5, sticky=(tk.NW+tk.SE))
self.rank2 = tk.Label(self, text=' 2nd ', font='微軟正黑體', bg='Black', fg='Gold')
self.rank2.grid(row=2, column=0, padx=10, pady=5, sticky=(tk.NW+tk.SE))
self.rank3 = tk.Label(self, text=' 3rd ', font='微軟正黑體', bg='Black', fg='Gold')
self.rank3.grid(row=3, column=0, padx=10, pady=5, sticky=(tk.NW+tk.SE))
self.rank4 = tk.Label(self, text=' 4th ', font='微軟正黑體', bg='Black', fg='Gold')
self.rank4.grid(row=4, column=0, padx=10, pady=5, sticky=(tk.NW+tk.SE))
self.rank5 = tk.Label(self, text=' 5th ', font='微軟正黑體', bg='Black', fg='Gold')
self.rank5.grid(row=5, column=0, padx=10, pady=5, sticky=(tk.NW+tk.SE))
# 離開(button)
self.exitbut = tk.Button(self, width=2, text='Ⓧ', font=('微軟正黑體', 12), bg='Black', fg='Gray55', activebackground='Black', activeforeground='red', relief='flat', command=(lambda: self.quit()))
self.exitbut.grid(row=0, column=0, sticky=tk.NW)
# function: 各主題的排行(button)
def click(self, rank_name, cover_name):
self.but1 = tk.Button(self, text=(rank_name[0] + " - " + rank_name[1]), font='微軟正黑體', bg='Black', fg='Snow2', activebackground='LightSteelBlue4', activeforeground='White', command=(lambda: self.click_lan(rank_name)))
self.but1.grid(row=1, column=2, columnspan=6, sticky=(tk.NW+tk.SE))
self.but2 = tk.Button(self, text=(rank_name[2] + " - " + rank_name[3]), font='微軟正黑體', bg='Black', fg='Snow2', activebackground='LightSteelBlue4', activeforeground='White', command=(lambda: self.click_lan(rank_name)))
self.but2.grid(row=2, column=2, columnspan=6, sticky=(tk.NW+tk.SE))
self.but3 = tk.Button(self, text=(rank_name[4] + " - " + rank_name[5]), font='微軟正黑體', bg='Black', fg='Snow2', activebackground='LightSteelBlue4', activeforeground='White', command=(lambda: self.click_lan(rank_name)))
self.but3.grid(row=3, column=2, columnspan=6, sticky=(tk.NW+tk.SE))
self.but4 = tk.Button(self, text=(rank_name[6] + " - " + rank_name[7]), font='微軟正黑體', bg='Black', fg='Snow2', activebackground='LightSteelBlue4', activeforeground='White', command=(lambda: self.click_lan(rank_name)))
self.but4.grid(row=4, column=2, columnspan=6, sticky=(tk.NW+tk.SE))
self.but5 = tk.Button(self, text=(rank_name[8] + " - " + rank_name[9]), font='微軟正黑體', bg='Black', fg='Snow2', activebackground='LightSteelBlue4', activeforeground='White', command=(lambda: self.click_lan(rank_name)))
self.but5.grid(row=5, column=2, columnspan=6, sticky=(tk.NW+tk.SE))
self.pic1 = tk.Label(self, image=self.input_pic1(cover_name))
self.pic1.grid(row=1, column=1, sticky=(tk.NW+tk.SE))
self.pic2 = tk.Label(self, image=self.input_pic2(cover_name))
self.pic2.grid(row=2, column=1, sticky=(tk.NW+tk.SE))
self.pic3 = tk.Label(self, image=self.input_pic3(cover_name))
self.pic3.grid(row=3, column=1, sticky=(tk.NW+tk.SE))
self.pic4 = tk.Label(self, image=self.input_pic4(cover_name))
self.pic4.grid(row=4, column=1, sticky=(tk.NW+tk.SE))
self.pic5 = tk.Label(self, image=self.input_pic5(cover_name))
self.pic5.grid(row=5, column=1, sticky=(tk.NW+tk.SE))
'''
self.url = requests.get(cover)
self.imagebyte = io.BytesIO(self.url.content)
self.imagepil = Image.open(self.imagebyte)
self.imagepil = self.imagepil.resize((80, 80), Image.ANTIALIAS) # 重設大小
self.image = ImageTk.PhotoImage(self.imagepil)
'''
# function: 按下歌曲
def click_lan(self, language, rank):
webbrowser.open_new_tab("https://www.youtube.com/results?search_query=" + language[rank*2 - 2] + "+" + language[rank*2 - 1]) # 開啟Youtube搜尋頁面
ranking = Ranking()
ranking.master.title("KKbox Ranking")
ranking.master.geometry('-30-50') # 視窗設在右下角
ranking.master.attributes('-alpha', 1) # 不透明
ranking.master.resizable(0, 0) # 鎖定視窗大小
ranking.configure(bg='Black') # 背景顏色
ranking.master.overrideredirect(True) # 刪除標題欄
ranking.mainloop()
| true |
252d739afbf8adcc337598418688502b7263c125 | Python | nikollson/AIAnimation | /AlphaGoZeroBase/AlphaGoZeroBase/Environment/MujocoModel.py | UTF-8 | 1,263 | 2.609375 | 3 | [] | no_license |
from mujoco_py import load_model_from_path
import numpy as np
class MujocoModel:
def __init__(self, modelPath : str):
self.MujocoModel = load_model_from_path(modelPath)
self.JointList = self.GetJointList()
self.NActuator = len(self.MujocoModel.actuator_names)
self.NAction = self.NActuator * 2 + 1
# self.Naction - 1 means no action
self.NoneAction = self.NAction - 1
self.TorqueCofficient = 1
def GetActionTorque(self, actionNum):
torque = np.zeros(self.NActuator)
if actionNum != self.NoneAction:
dir = (actionNum % 2) * 2 - 1
torque[int(actionNum/2)] += self.TorqueCofficient * dir
return torque
def GetJointList(self):
return []
class Joint:
def __init__(self, joint, site, jointPosition, jointVelocity,
accel, velocity, gyro, force, torque):
self.Joint = joint
self.Site = site
self.JointPosition = jointPosition
self.JointVelocity = jointVelocity
self.Accel = accel
self.Velocity = velocity
self.Gyro = gyro
self.Force = force
self.Torque = torque
| true |
1b847fe2c3452a0c3d6e1a45ba12c872b477fbef | Python | SciLifeLab/scilifelab | /scilifelab/utils/slurm.py | UTF-8 | 750 | 2.5625 | 3 | [
"MIT"
] | permissive | """Useful functions for interacting with the slurm manager
"""
import subprocess
import getpass
try:
import drmaa
except:
pass
def get_slurm_jobid(jobname,user=getpass.getuser()):
"""Attempt to get the job id for a slurm job name. Can this be done with python-drmaa instead?
"""
jobids = []
cmd = ['/usr/bin/squeue','-h','-o','%i','-n',jobname,'-u',user]
try:
retval = str(subprocess.check_output(cmd))
for val in retval.split("\n"):
jobids.append(int(val))
except:
pass
return jobids
def get_slurm_jobstatus(jobid):
"""Get the status for a jobid
"""
s = drmaa.Session()
s.initialize()
status = s.jobStatus(str(jobid))
s.exit()
return status
| true |
1709805c7b31aa7bc000947822d762e707b03d31 | Python | safciezgi/Python-Ubuntu-OS-Trial | /.vscode/DENEME.py | UTF-8 | 2,011 | 2.578125 | 3 | [] | no_license | import os
import psutil
import shutil
import netifaces
import pprint
import platform
print('')
print("="*40, "Ip Addresses", "="*40)
print('')
ip_ = os.popen("ip a").readlines()
from pprint import pprint
pprint(ip_)
print('')
print("="*40, "Network Interfaces Names", "="*40)
print('')
addrs = psutil.net_if_addrs()
eth = list(addrs.keys())
print(str(eth))
print('')
print("="*40, "Network Interfaces Ip & Names", "="*40)
print('')
netifaces.interfaces()
for i in range(len(eth)):
eth_ = str(eth[i])
print(eth_)
print(netifaces.ifaddresses(eth_))
print('')
print('')
print(eth[0] + ' ' + netifaces.ifaddresses(eth[0])[netifaces.AF_INET][0]['addr'])
print(eth[1] + ' ' + netifaces.ifaddresses(eth[1])[netifaces.AF_INET][0]['addr'])
print('')
print("="*40, "Disk Usage", "="*40)
print('')
total, used, free = shutil.disk_usage("/")
print("Total: %d GiB" % (total // (2**30)))
print("Used: %d GiB" % (used // (2**30)))
print("Free: %d GiB" % (free // (2**30)))
print('')
# let's print CPU information
print("="*40, "CPU Info", "="*40)
print('')
# number of cores
print("Physical cores:", psutil.cpu_count(logical=False))
print("Total cores:", psutil.cpu_count(logical=True))
# CPU frequencies
cpufreq = psutil.cpu_freq()
print(f"Max Frequency: {cpufreq.max:.2f}Mhz")
print(f"Min Frequency: {cpufreq.min:.2f}Mhz")
print(f"Current Frequency: {cpufreq.current:.2f}Mhz")
# CPU usage
print("CPU Usage Per Core:")
for i, percentage in enumerate(psutil.cpu_percent(percpu=True, interval=1)):
print(f"Core {i}: {percentage}%")
print(f"Total CPU Usage: {psutil.cpu_percent()}%")
print('')
print("="*40, "System Information", "="*40)
print('')
uname = platform.uname()
print(f"System: {uname.system}")
print(f"Node Name: {uname.node}")
print(f"Release: {uname.release}")
print(f"Version: {uname.version}")
print(f"Machine: {uname.machine}")
print(f"Processor: {uname.processor}")
#link_show= os.popen("ip -br -c link show").readlines()
#from pprint import pprint
#pprint(link_show)
| true |
3dfb99a05589297eadf6686bd29c80d641f5a7bd | Python | Volerous/PACalendar | /FlaskApp/FlaskApp/classes.py | UTF-8 | 5,492 | 2.515625 | 3 | [
"MIT"
] | permissive | from sqlalchemy import String, Column, Table, Integer, ForeignKey, create_engine, DateTime, Boolean, Float, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
import datetime
from sqlalchemy.sql import select
Base = declarative_base()
Event_has_Tags = Table('event_has_tags', Base.metadata,
Column("event_id", Integer, ForeignKey("event.id")),
Column("tag_id", Integer, ForeignKey("tag.id")))
Task_has_Tags = Table('task_has_tags', Base.metadata,
Column("task_id", Integer, ForeignKey("task.id")),
Column("tag_id", Integer, ForeignKey("tag.id")))
class Event(Base):
__tablename__ = "event"
attrs = ["title", "id", "begin_date", "end_date", "all_day", "event_color",
"description", "busy_level", "contact", "location", "str_tags"]
id = Column(Integer, primary_key=True, nullable=False)
title = Column(String(100), nullable=False)
begin_date = Column(DateTime, nullable=False)
end_date = Column(DateTime, nullable=False)
all_day = Column(Boolean, nullable=False)
event_color = Column(String(6), nullable=False)
description = Column(Text)
busy_level = Column(Integer)
contact = Column(String(50))
location = Column(ForeignKey("location.id"))
tags = relationship("Tag", secondary=Event_has_Tags, backref="event")
def _find_or_create_tag(self, tag):
q = Tag.query.filter_by(name=tag)
t = q.first()
if not(t):
t = Tag(tag)
return t
def _get_tags(self):
return [x.name for x in self.tags]
def _set_tags(self, value):
# clear the list first
while self.tags:
del self.tags[0]
# add new tags
for tag in value:
self.tags.append(self._find_or_create_tag(tag))
str_tags = property(_get_tags,
_set_tags,
"Property str_tags is a simple wrapper for tags relation")
class Tag(Base):
attrs = ["title", "color"]
__tablename__ = "tag"
id = Column(Integer, primary_key=True, nullable=False)
title = Column(String(100), nullable=False)
color = Column(String(6))
class Location(Base):
__tablename__ = "location"
id = Column(Integer, primary_key=True, nullable=False)
title = Column(String(60), nullable=False)
address = Column(String(80), nullable=False)
lat = Column(Float, nullable=False)
lng = Column(Float, nullable=False)
data_type = Column(String(30), nullable=False)
class Task(Base):
attrs = ["title", "due_date", "completed",
"priority", "description", "color", "tags"]
__tablename__ = "task"
id = Column(Integer, primary_key=True, nullable=False)
title = Column(String(100), nullable=False)
due_date = Column(DateTime)
completed = Column(Boolean, nullable=False)
priority = Column(Integer, nullable=False)
description = Column(Text)
color = Column(String(20))
tags = relationship("Tag", secondary=Task_has_Tags, backref="task")
tasklist_id = Column(Integer, ForeignKey("tasklist.id"), nullable=False)
sub_tasks = relationship("SubTask")
def _find_or_create_tag(self, tag):
# print(tag["title"])
q = Session.query(Tag).filter_by(title=tag["title"])
t = q.first()
if not(t):
t = Tag(title=tag, color=tag["color"])
return t
def _get_tags(self):
return [x.name for x in self.tags]
def _set_tags(self, value):
if not value:
return
# clear the list first
while self.tags:
del self.tags[0]
# add new tags
for tag in value:
self.tags.append(self._find_or_create_tag(tag))
str_tags = property(_get_tags,
_set_tags,
"Property str_tags is a simple wrapper for tags relation")
def _get_subtasks(self):
return self.sub_tasks
def _set_subtasks(self, value):
if not value:
return
# clear the list first
self.sub_tasks.clear()
# add new tags
for subtask in value:
self.sub_tasks.append(self._find_or_create_subtask(subtasks))
def _find_or_create_subtask(self, subtask):
# find first with the title of the subtask
t = Session.query(SubTask).filter_by(title=subtask["title"]).first()
if not(t):
# if it does not exist then insert the new one into the database
t = SubTask(title=subtask, parent_task=self.id)
# otherwise just return the found value
return t
# Property Setting for getters and setters
str_subtasks = property(_get_subtasks,_set_subtasks)
class SubTask(Base):
__tablename__ = "subtask"
id = Column(Integer, primary_key=True, nullable=False)
title = Column(String(100), primary_key=True, nullable=False)
parent_task = Column(Integer, ForeignKey("task.id"))
class TaskList(Base):
__tablename__ = "tasklist"
id = Column(Integer, primary_key=True, nullable=False)
title = Column(String(100),nullable=False)
repeatable = Column(Boolean)
# create the connection and session
engine = create_engine(
"mysql+mysqldb://volerous:fourarms@localhost/Personal_Assistant")
#engine.execute("USE Personal_Assistant")
Base.metadata.create_all(engine)
session_m = sessionmaker(bind=engine)
Session = session_m()
| true |
d8ec6bab60caaf5fd043d6804a0d6dc02423c8ac | Python | DiogoOliveira111/ProjectoTese | /OpenFiles.py | UTF-8 | 1,309 | 2.859375 | 3 | [] | no_license | import pandas as pd
import pickle
import seaborn as sns
import numpy as np
import easygui
from tkinter import Tk, Label
from WBMTools.sandbox.interpolation import interpolate_data
path = easygui.fileopenbox()
with open(path, 'rb') as handle:
collection= pickle.load(handle)
flag=0
MouseTime=[]
MouseX=[]
MouseY=[]
for i in collection:
event=collection[i]
if( event['Type']=='Mouse'):
data=event['Data'].split(';')
if (i==0):
initial_time = float(data[-1])
MouseTime.append(initial_time/1000)
else:
MouseTime.append((float(data[-1]) - initial_time) / 1000)
MouseX.append(float(data[2]))
MouseY.append(float(data[3]))
flag=1 #Flag to determine if there is Mouse data in the collection
if(flag==0):
root= Tk()
# Make window 300x150 and place at position (50,50)
root.geometry("600x300+50+50")
# Create a label as a child of root window
my_text = Label(root, text='The Collection chosen has no Mouse Data')
my_text.pack()
root.mainloop()
exit()
MouseDict = dict(t=MouseTime, x=MouseX, y=MouseY)
dM = pd.DataFrame.from_dict(MouseDict)
time_var,space_var=interpolate_data(dM,t_abandon=20)
vars={'time_var': time_var, 'space_var': space_var} | true |
7d0cc4c6fedf1d42d4feaa5aeb6d6002f34b4293 | Python | poojan14/Python-Practice | /Hackerearth/Monk Takes a Walk.py | UTF-8 | 946 | 4.28125 | 4 | [] | no_license | '''
Today, Monk went for a walk in a garden. There are many trees in the garden and each tree has an English alphabet on it. While Monk was
walking, he noticed that all trees with vowels on it are not in good state. He decided to take care of them. So, he asked you to tell him
the count of such trees in the garden.
Note : The following letters are vowels: 'A', 'E', 'I', 'O', 'U' ,'a','e','i','o' and 'u'.
Input:
The first line consists of an integer T denoting the number of test cases.
Each test case consists of only one string, each character of string denoting the alphabet (may be lowercase or uppercase) on a tree in the
garden.
Output:
For each test case, print the count in a new line.
'''
if __name__ == '__main__':
T = int(input())
for _ in range(T):
s = input()
v = ['a','e','i','o','u','A','E','I','O','U']
c = 0
for i in s:
if i in v:
c += 1
print(c)
| true |
6c6053d1bdfd8084dbbd7b7ca10189184dd17cfb | Python | chuzcjoe/Leetcode | /337. House Robber 3.py | UTF-8 | 713 | 3.125 | 3 | [] | no_license | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
def dfs(root):
if not root:
return [0,0]
left = dfs(root.left)
right = dfs(root.right)
return [root.val + left[1] + right[1], max(left[0],left[1])+max(right[0],right[1])]
results = dfs(root)
return max(results) | true |
80f9c991bc75b37712ce6dd426fad3fe29d70e09 | Python | mbreault/python | /algorithms/sorting/index.py | UTF-8 | 2,465 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
from functools import wraps
from time import time
import numpy as np
# from https://stackoverflow.com/questions/1622943/timeit-versus-timing-decorator
def timing(f):
@wraps(f)
def wrap(*args, **kw):
ts = time()
result = f(*args, **kw)
te = time()
print('func:%r took: %2.4f sec' % (f.__name__, te-ts))
return result
return wrap
@timing
def builtin(inputlist):
# https://en.wikipedia.org/wiki/Timsort
return sorted(inputlist)
@timing
def selection(inputlist):
returnlist = []
while len(inputlist) > 0:
minindex = 0
for i, _ in enumerate(inputlist):
if inputlist[i] < inputlist[minindex]:
minindex = i
returnlist.append(inputlist.pop(minindex))
return returnlist
@timing
def bubble(inputlist):
length = len(inputlist)
for i in range(length):
for j in range(i, length):
if inputlist[i] > inputlist[j]:
temp = inputlist[i]
inputlist[i] = inputlist[j]
inputlist[j] = temp
return inputlist
@timing
def mergesort(inputlist):
return merge(inputlist)
def merge(inputlist):
# based on https://www.geeksforgeeks.org/merge-sort/
if len(inputlist) > 1:
# split
middle = len(inputlist) // 2
left = inputlist[:middle]
right = inputlist[middle:]
merge(left)
merge(right)
i = j = k = 0
# merge lists by stepping through both and finding the smallest element
while i < len(left) and j < len(right):
if left[i] < right[j]:
inputlist[k] = left[i]
i += 1
else:
inputlist[k] = right[j]
j += 1
k += 1
# clean up any leftovers
while i < len(left):
inputlist[k] = left[i]
i += 1
k += 1
while j < len(right):
inputlist[k] = right[j]
j += 1
k += 1
return inputlist
def main():
n = 10**4
inputlist = np.random.randint(n, size=n).tolist()
# use slicing to pass by value
expected = builtin(inputlist[:])
actual = bubble(inputlist[:])
assert actual == expected
actual = selection(inputlist[:])
assert actual == expected
actual = mergesort(inputlist[:])
assert actual == expected
if __name__ == '__main__':
main()
| true |
9b78d2f4624390257522e511f0472618e1377405 | Python | xbb66kw/Bandit | /bandit_experiment/UCB1.py | UTF-8 | 8,847 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import gzip
import re
import random
from logistic_high_di import HighDimensionalLogisticRegression
class DataReader(object):
def __init__(self):
self.articles_old = set()
self.articles_new = set()
self.line = None
self.files_list = ['/Users/xbb/Desktop/bandit_experiment/r6b_18k_7day.txt']
self. T = 0
self.fin = open(self.files_list[self.T],'r')
def come(self):
'''
extra, delete, key are string
'''
extra = set()
delete = set()
click = 0
self.line = self.fin.readline()
if not self.line:
self.T += 1
self.fin = gzip.open(self.files_list[self.T], 'r')
self.line = self.fin.readline()
'If self.T >= 13, we are running out of the data.'
cha = self.line
matches = re.search(r"id-(\d+)\s(\d).+user\s([\s\d]+)(.+)", cha)
article = int(matches.group(1))
click = int(matches.group(2))
covariates = np.zeros(136).astype(int)
covariates[[int(elem) - 1 for elem in matches.group(3).split(' ') if elem != '']] = 1
key = ''.join(map(str, covariates))
####guest
mab = False
if sum(covariates) == 1:
mab = True
finder = re.findall(r'\|id-(\d+)', matches.group(4))
self.articles_new = set([int(result) for result in finder])
if self.articles_new != self.articles_old:
extra = self.articles_new - self.articles_old
delete = self.articles_old - self.articles_new
self.articles_old = self.articles_new
return {'covariates':covariates, 'article':article, 'click':click, 'extra':extra, 'delete':delete, 'mab':mab, 'key':key}
class Environment(object):
def run(self, agents, data_reader, timestamp = 70000):
self.reward_curves = np.zeros((timestamp, len(agents)))
self.timer = np.zeros(len(agents)).astype(int)
self.agents = agents
times = 0
while np.min(self.timer) < timestamp:
#Also in this step, arms will be refreshed
stuff = data_reader.come()
times += 1
for i, agent in enumerate(agents):# agents can be [...]
if int(np.sqrt(times)) == np.sqrt(times):
print(np.sqrt(times), times, self.timer, agent.acc_reward, '714')
if self.timer[i] < timestamp:
agent.update_arms(stuff)
agent.last_action = agent.recommend(stuff)
if agent.last_action == stuff['article']:
reward = stuff['click']
agent.update(reward, stuff)
agent.acc_reward += reward
self.reward_curves[self.timer[i], i] = agent.acc_reward / (self.timer[i] + 1)
self.timer[i] += 1
print('final', times, self.timer)
def plot(self, number_of_agents):
if number_of_agents == 1:
label_list = ['Logistic']
elif number_of_agents == 2:
label_list = ['Logistic', 'ucb1']
collect = {}
for j in range(len(self.reward_curves[0,:])):
collect[j], = plt.plot(self.reward_curves[:,j], label=label_list[j])
mid_ = "/Users/xbb/Desktop/bandit_experiment/model_selection_clustering/third" + str(j)
np.save(mid_, self.reward_curves[:,j])
if number_of_agents == 1:
plt.legend(handles=[collect[0]])
elif number_of_agents == 2:
plt.legend(handles=[collect[0], collect[1]])
else:
plt.legend(handles=[collect[0], collect[1], collect[2]])
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,0.1))
plt.show()
class ArticlesCollector(object):
'''
This object will be assigned to Groups and MAB object
'''
def __init__(self):
self.__active_articles = set()
self.__extras = set()
self.__deletes = set()
def update(self, extra, delete):
self.__active_articles = self.__active_articles - delete
self.__active_articles = self.__active_articles.union(extra)
self.__extras = extra
self.__deletes = delete
@property
def active_articles(self):
return self.__active_articles
@property
def extras(self):
return self.__extras
@property
def deletes(self):
return self.__deletes
def reset(self):
self.__deletes = set()
self.__extras = set()
###########
####MAB####
class MAB(object):
'''
ArticlesCollector object will be assigned to this MAB object
'''
def __init__(self, articles_collector, alpha=0.2):
self.articles_collector = articles_collector
self.clicks = {}
self.counts = {}
self.alpha = alpha
def recommend(self):
'''updating all article indexes'''
values = np.array([])
articles = []
for article in self.counts.keys():
sum_ = sum(self.clicks.values())
values = np.append(values, self.clicks[article] / self.counts[article] + self.alpha * np.sqrt(np.log(sum_)/(self.counts[article]+1)))
articles.append(article)
return articles[np.argmax(values)]
def update(self, reward, article):
'''
article is a numbe
'''
self.counts[article] += 1
self.clicks[article] += reward
'''While the node hasnt made its own decision, it still require arms updating'''
def articles_update(self, articles_collector):
'''updating all article indexes'''
current_articles_set = self.counts.keys()#set of articles(string)
extra_articles = articles_collector.active_articles - current_articles_set
delete_articles = current_articles_set - articles_collector.active_articles
for article in extra_articles:
self.counts[article] = 1
self.clicks[article] = 0
for article in delete_articles:
del self.counts[article]
del self.clicks[article]
class Agent(object):
'''
Takes Groups object and MAB object as parameters
'''
def __init__(self, mab_object, articles_collector):
'''
articles_collector is the same one in the groups_object
'''
self.acc_reward = 0
'''mab object is used for guests'''
self.mab_object = mab_object
self.articles_collector = articles_collector
self.last_action = '' # a string
def update(self, reward, stuff):
'''
key is a string
stuff, {'covariates':covariates, 'article':article, 'click':click, 'extra':extra, 'delete':delete, 'mab':mab, 'key':None}
'''
key = stuff['key']
covariates = stuff['covariates']
'''MAB can share the information'''
self.mab_object.update(reward, self.last_action) #self.last_action is an article string
def recommend(self, stuff):
'''
receiving a key and decide self.last_acion and self.extra_bonus
key is a string
stuff, {'covariates':covariates, 'article':article, 'click':click, 'extra':extra, 'delete':delete, 'mab':mab, 'key':None}
'''
key = stuff['key']
covariates = stuff['covariates']
self.mab_object.articles_update(self.articles_collector)
self.last_action = self.mab_object.recommend()
return self.last_action
def update_arms(self, stuff):
self.articles_collector.update(stuff['extra'], stuff['delete'])
def main():
A = ArticlesCollector()
DR = DataReader()
E = Environment()
##MAB
M = MAB(A, 0.2)
Ag = Agent(M, A)
E.run([Ag], DR)
E.plot(len([Ag]))
if __name__ == '__main__':
main() | true |
30a017b4248cc1248625418e10040a0e542a0e19 | Python | sohailADev/keygen | /gen.py | UTF-8 | 297 | 3.046875 | 3 | [
"MIT"
] | permissive | import random
import hashlib
def generate_key():
random_num = random.randint(0, 4)
randoms_nums = [11, 22, 33, 44, 55]
bytes_list = bytearray(b'\x01\x02\x03')
bytes_list.append(randoms_nums[random_num])
return hashlib.sha256(bytes_list).hexdigest()
print(generate_key())
| true |
1f4756016a52c9b65489b8c3c5126bc0a469b2be | Python | blont714/Project-Euler | /Problem16.py | UTF-8 | 204 | 3.296875 | 3 | [] | no_license | def main():
num_str = str(2**1000)
sum = 0
for i in num_str:
sum += int(i)
print(sum)
if __name__ == "__main__":
main()
#出力結果: 1366
#実行時間: 0.103s
| true |
9ef7b0e57332e915efe9051e45fa739a35f343f7 | Python | luilui163/zht | /projects/python_chen/task3.py | UTF-8 | 1,207 | 2.828125 | 3 | [] | no_license | # -*-coding: utf-8 -*-
# Python 3.6
# Author:Zhang Haitao
# Email:13163385579@163.com
# TIME:2018-10-23 09:57
# NAME:zht-task3.py
import requests
from bs4 import BeautifulSoup
def get_baidu_news_title(pages=5):
titles=[]
for page in range(1,pages+1):
url=f'http://news.baidu.com/ns?word=%E6%AD%A6%E6%B1%89%E5%A4%A7%E5%AD%A6&pn={page*20}&cl=2&ct=1&tn=news&rn=20&ie=utf-8&bt=0&et=0&rsv_page=1'
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
r=requests.get(url,headers=headers)
'''
trick: 如果没有使用 headers 进行伪装成浏览器的话,获取的不是最新的新闻,
大概又两天的滞后。 这种设定应该是百度为了防止别家新闻网站直接盗用他们的时事新闻。
'''
soup=BeautifulSoup(r.text,'lxml')
results=soup.find_all('div',attrs={'class':'result',})
for result in results:
titles.append(result.find_all('a')[0].text.strip())
with open(r'e:\a\titles.txt','w') as f:
f.write('\n'.join(titles))
if __name__ == '__main__':
get_baidu_news_title()
| true |
589e3c12b7755d38426b1c0df59c0e67990742ef | Python | programparks/Kennesaw-Capstone-Project | /Project Files/Scripts + Installation Instructions/Insert.py | UTF-8 | 11,977 | 2.609375 | 3 | [] | no_license | import json
import pyodbc
import glob
import sys
from Crawler import login
import sys
# from urllib import unquote
from urllib import parse
import requests
import re
from lxml import etree
from bs4 import BeautifulSoup
import os, json, time
from Crawler import crawl
userName = 'zdowning@students.kennesaw.edu '
passWD = 'password1234'
server = 'itcapstone.database.windows.net'
cnxn = pyodbc.connect('Driver={SQL Server};Server=tcp:itcapstone.database.windows.net,1433;Database=CAPSTONE;Uid=capstone@itcapstone;Pwd=Alumnidatabase!;Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;')
cursor = cnxn.cursor()
alumni_first = ""
alumni_last = ""
alumni_school = ""
alumni_program = ""
alumni_degree = ""
alumni_graduation = ""
class Alumni:
first_name = ""
last_name = ""
school_name = ""
id = ""
degree = ""
graduation_date = ""
linked_in = ""
location = ""
job_title = ""
start_date = ""
end_date = ""
alumni_education = []
job_history = []
skill_list = []
def __init__(self,first,last,id,linked_in,education_list,job_history,skill_list):
self.first_name = first
self.last_name = last
self.id = id
self.linked_in = linked_in
self.alumni_education = education_list
self.job_history = job_history
self.skill_list = skill_list
class Education:
degree = ""
fieldOfStudy = ""
school_name = ""
graduation_date = ""
def __init__(self, degree,field,schoolName,graduation_date):
self.degree = degree
self.fieldOfStudy = field
self.school_name = schoolName
self.graduation_date = graduation_date.strip()
class Job:
title = ""
company = "",
startDate = "",
endDate = "",
def __init__(self,title,company,startDate,endDate):
self.title = title
self.company = company
self.startDate = startDate.strip()
self.endDate = endDate.strip()
#Reads from search_strings.txt to fine one student
def insert_one():
s = login(userName,passWD)
search_file = open('search_strings.txt', 'r', encoding='UTF8')
for line in search_file:
keywords = []
keywords = line.split(",")
global alumni_first
alumni_first = keywords[0]
global alumni_last
alumni_last = keywords[1]
global alumni_school
alumni_school = keywords[2]
global alumni_program
alumni_program = keywords[3]
global alumni_degree
alumni_degree = keywords[4]
print(alumni_first)
search_string = alumni_first + " " + alumni_last + " "+ alumni_school + " " + \
alumni_program + " "
crawl(s,search_string,"one")
Insert("one")
#Takes user input to search for multiple students
def insert_many():
s = login(userName, passWD)
global alumni_school
alumni_school = input("Enter the University's name: ")
global alumni_program
alumni_program = input("Enter the program name: ")
global alumni_degree
alumni_degree = input("Enter the degree type (BS, MS, etc.): ")
global alumni_graduation
alumni_graduation = input("Enter the graduation year: ")
search_string = alumni_degree + " " + alumni_program + " " + alumni_school + " " + " " + \
alumni_graduation
print(search_string)
crawl(s,search_string,"many")
Insert("many")
#Check if duplicate education was returned from JSON
def is_education_duplicate(education_object, education_list = []):
for i in range(0,len(education_list)):
if (education_object.degree == education_list[i].degree) and \
(education_object.fieldOfStudy == education_list[i].fieldOfStudy) and \
(education_object.school_name == education_list[i].school_name) and \
(education_object.graduation_date == education_list[i].graduation_date):
return True
return False
#Check if it's the alumni we're looking for
def doesSearchMatch(alumni,num):
education_list = alumni.alumni_education
if num == "one":
if (alumni_first.strip() in alumni.first_name.strip()) and (alumni_last.strip() in alumni.last_name):
for i in range(0,len(education_list)):
print(alumni_degree[0])
degree_stripped = education_list[i].degree.strip().lower()
alumni_degree_stripped = alumni_degree.strip().lower()
if len(degree_stripped) > 0:
if (alumni_program.strip() in education_list[i].fieldOfStudy) and (alumni_school.strip() in education_list[i].school_name) and \
(degree_stripped[0] == alumni_degree_stripped[0]):
return True
if num == "many":
for i in range(0, len(education_list)):
degree_stripped = education_list[i].degree.strip().lower()
alumni_degree_stripped = alumni_degree.strip().lower()
if len(degree_stripped) > 0:
if (alumni_program.strip() in education_list[i].fieldOfStudy) and (
alumni_school.strip() in education_list[i].school_name) and \
(degree_stripped[0] == alumni_degree_stripped[0]):
return True
return False
#Check if a duplicate job was returned from JSON
def is_job_duplicate(job_object, job_list = []):
for i in range(0,len(job_list)):
if (job_object.company == job_list[i].company) and \
(job_object.endDate == job_list[i].endDate) and \
(job_object.startDate == job_list[i].startDate) and \
(job_object.title == job_list[i].title):
return True
return False
#Check if a duplicate skill was returned from JSON
def is_skill_duplicate(skill_object,skill_list = []):
for i in range(0, len(skill_list)):
if skill_object == skill_list[i]:
return True
return False
#Insert into database from JSON
def Insert(num):
AlumniList = []
#Get alumni information for every file
for filename in glob.glob('people_info\*json.txt'):
education_list = []
job_list = []
skill_list = []
with open(filename.title()) as json_file:
data = json.load(json_file)
for fName in data['nameUrlId']:
first_name = fName['firstName']
for lName in data['nameUrlId']:
last_name = lName['lastName']
for alumniId in data['nameUrlId']:
id = alumniId['id']
for url in data['nameUrlId']:
linked_in = url['linkedInUrl']
for education in data['education']:
degree = education['degree']
field = education['field']
schoolName = education['schoolName']
graduation_date = education['endDate']
education_object = Education(degree,field,schoolName,graduation_date)
duplicate = is_education_duplicate(education_object,education_list)
if duplicate != True:
education_list.append(education_object)
for position in data['jobHistory']:
title = position['title']
company = position['company']
startDate = position['startDate']
endDate = position['endDate']
job_object = Job(title,company,startDate,endDate)
duplicate = is_job_duplicate(job_object,job_list)
if duplicate != True:
job_list.append(job_object)
for skill in data['skills']:
skill_name = skill['skill']
duplicate = is_skill_duplicate(skill_name,skill_list)
if duplicate != True:
skill_list.append(skill_name)
json_file.close()
os.remove(filename)
AlumniList.append(Alumni(first_name,last_name,id,linked_in,education_list,job_list,skill_list))
for alumni in AlumniList:
#Does the search match what we're looking for?
search_matches = doesSearchMatch(alumni,num)
if search_matches == True:
print(alumni_first + " " + alumni_last + " " + alumni_school)
print("Does the search match? " + str(search_matches))
try:#
cursor.execute("Insert Into dbo.Alumni(alumni_id,first_name,last_name,linkedid_link,school,education_name,degree) " +
"Values(" + "\'" + alumni.id + "\'" + ","
"\'" + alumni.first_name + "\'" + ","
"\'" + alumni.last_name + "\'" + ","
"\'" + alumni.linked_in + "\'" + ","
"\'" + alumni_school + "\'" + ","
"\'" + alumni_program + "\'" +","
"\'" + alumni_degree + "\'" +
")")
except pyodbc.IntegrityError:
print("Primary Key Violation")
continue;
educations = alumni.alumni_education
jobs = alumni.job_history
skills = alumni.skill_list
for skill in skills:
cursor.execute("Insert Into dbo.Skills(alumni_id,skill_names) " +
"Values(" + "\'" + alumni.id + "\'" + ","
"\'" + skill + "\'" +
")")
for education in educations:
print(education.degree + education.fieldOfStudy + ' ' + education.school_name + ' ' + education.graduation_date)
cursor.execute("Insert Into dbo.Education(alumni_id,education_name,school,degree) " +
"Values(" + "\'" + alumni.id + "\'" + ","
"\'" + education.fieldOfStudy + "\'" + ","
"\'" + education.school_name + "\'" + ","
"\'" + education.degree + "\'"
")")
if education.graduation_date != "":
cursor.execute("Update dbo.Education Set graduation_date = " + "\'" + education.graduation_date + "\'" + "WHERE alumni_id = " + "\'" +
alumni.id + "\'")
for job in jobs:
cursor.execute("Insert Into dbo.Jobs(title,company,alumni_id) " +
"Values(" + "\'" + job.title + "\'" + ","
"\'" + job.company + "\'" + ","
"\'" + alumni.id + "\'" +
")")
if job.startDate != "":
job.startDate = job.startDate.replace(".", "-")#Replace the dot with a dash for the date format
job.startDate = job.startDate + "-01"
cursor.execute(
"Update dbo.Jobs Set startdate = " + "\'" + job.startDate + "\'" + "WHERE alumni_id = " + "\'" +
alumni.id + "\'" + "and title =" + "\'" + job.title + "\'" + "and company =" + "\'" + job.company + "\'")
if job.endDate != "Now":
job.endDate = job.endDate.replace(".","-")
job.endDate = job.endDate + "-01"
if job.endDate != "Now":
cursor.execute(
"Update dbo.Jobs Set enddate = " + "\'" + job.endDate + "\'" + "WHERE startdate = " + "\'" +
job.startDate + "\'" + "and title =" + "\'" + job.title + "\'" + "and company =" + "\'" + job.company + "\'" +
"and alumni_id =" + "\'" + alumni.id + "\'")
print(job.title + ' ' + job.company + ' ' + job.startDate + ' ' + job.endDate)
if __name__ == "__main__":
insert_one()
cnxn.commit()
| true |
2fbf0cac41e8a9c0ea4d2acd8afed0e1a4201686 | Python | Semal31/Gedcom-parser-group1 | /test_parser.py | UTF-8 | 88,881 | 2.84375 | 3 | [] | no_license | import pytest
from parser import *
# Generic individuals dict that should pass most tests
CORRECT_INDIVIDUALS = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
}
CORRECT_FAMILIES = {
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 2020",
"DIV": "30 DEC 2018",
}
}
def test_check_marriage_divorce_dates_with_correct_dates():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
}
individuals = {}
assert check_marriage_divorce_dates(families, individuals) == True
def test_check_marriage_divorce_dates_with_incorrect_dates():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 2020",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert check_marriage_divorce_dates(families, individuals) == False
def test_children_before_death_with_correct_families():
families = {
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 2020",
"DIV": "30 DEC 2018",
}
}
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
}
assert children_before_death(families, individuals) == True
def test_children_before_death_with_incorrect_families():
families = {
"@F7@": {"HUSB": "@I13@", "WIFE": "@I14@", "CHIL": ["@I8@"], "MARR": ""}
}
individuals = {
"@I8@": {
"NAME": "June /Vanderzee/",
"SEX": "F",
"BIRT": "",
"DATE": "4 APR 2020",
"FAMS": "@F4@",
"FAMC": "@F7@",
},
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
"DEAT": "Y",
"DEATH_DATE": "26 NOV 1986",
"FAMS": "@F7@",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "7 JUN 1919",
"DEAT": "Y",
"DEATH_DATE": "13 OCT 2009",
"FAMS": "@F7@",
},
}
assert children_before_death(families, individuals) == False
def test_US05_valid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "9 SEP 2007",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "Diana /Chaney/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert us_05(families, individuals) == True
def test_US05_invalid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 2020",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "9 SEP 2007",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "Diana /Chaney/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert us_05(families, individuals) == False
def test_US10_valid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "9 SEP 2007",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "Diana /Chaney/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert us_10(families, individuals) == True
def test_US10_invalid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 2020",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2012",
"DEAT": "",
"DEATH_DATE": "9 SEP 2020",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "Diana /Chaney/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert us_10(families, individuals) == False
def test_check_birth_before_marriage_valid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 2020",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2012",
"DEAT": "",
"DEATH_DATE": "9 SEP 2020",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "Diana /Chaney/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert check_birth_before_marriage(families, individuals) == True
def test_check_birth_before_marriage_invalid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1802",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2012",
"DEAT": "",
"DEATH_DATE": "9 SEP 2020",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "Diana /Chaney/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert check_birth_before_marriage(families, individuals) == False
def test_check_age_valid():
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2012",
"DEAT": "",
"DEATH_DATE": "9 SEP 2020",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "Diana /Chaney/",
"SEX": "F",
"BIRT": "",
"DATE": "1 JAN 1872",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert check_age(individuals) == True
def test_check_age_invalid():
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2012",
"DEAT": "",
"DEATH_DATE": "9 SEP 2020",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "Diana /Chaney/",
"SEX": "F",
"BIRT": "",
"DATE": "1 JAN 1871",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert check_age(individuals) == False
def test_dates_before_current_valid():
assert dates_before_current("myfamily.ged") == True
def test_dates_before_current_invalid():
assert dates_before_current("testUS01_myfamily.ged") == False
def test_divorce_before_death_bothDead_invalid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
"@F3@": {"HUSB": "@I11@", "WIFE": "@I12@", "CHIL": ["@I3@"], "MARR": "Y"},
"@F4@": {
"HUSB": "@I7@",
"WIFE": "@I8@",
"CHIL": ["@I4@", "@I9@", "@I10@"],
"MARR": "Y",
},
"@F5@": {"HUSB": "@I5@", "WIFE": "@I6@", "DATE": "31 JUL 2020"},
"@F6@": {"HUSB": "@I15@", "WIFE": "@I16@", "CHIL": ["@I7@"], "MARR": ""},
"@F7@": {"HUSB": "@I13@", "WIFE": "@I14@", "CHIL": ["@I8@"], "MARR": ""},
"@F8@": {"HUSB": "@I17@", "WIFE": "@I16@", "MARR": "Y"},
"@F9@": {"HUSB": "@I1@", "CHIL": ["@I18@"]},
}
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Alyssa /Bottesi/",
"SEX": "F",
"BIRT": "",
"DATE": "30 APR 1999",
"FAMS": "@F1@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "5 APR 1600",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"DEAT": "",
"DEATH_DATE": "5 APR 1600",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Felisha /Kissel/",
"SEX": "F",
"BIRT": "",
"DATE": "12 MAY 1994",
"FAMS": "@F5@",
},
"@I7@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "3 NOV 1949",
"FAMS": "@F4@",
"FAMC": "@F6@",
},
"@I8@": {
"NAME": "June /Vanderzee/",
"SEX": "F",
"BIRT": "",
"DATE": "4 APR 1950",
"FAMS": "@F4@",
"FAMC": "@F7@",
},
"@I9@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "27 SEP 1972",
"FAMC": "@F4@",
},
"@I10@": {
"NAME": "Lynn-marie /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "10 AUG 1976",
"FAMC": "@F4@",
},
"@I11@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "8 JAN 1930",
"DEAT": "Y",
"DEATH_DATE": "6 JAN 1990",
"FAMS": "@F3@",
},
"@I12@": {
"NAME": "Leona /Layton/",
"SEX": "F",
"BIRT": "",
"DATE": "5 AUG 1936",
"FAMS": "@F3@",
},
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
"DEAT": "Y",
"DEATH_DATE": "26 NOV 1986",
"FAMS": "@F7@",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "7 JUN 1919",
"DEAT": "Y",
"DEATH_DATE": "13 OCT 2009",
"FAMS": "@F7@",
},
"@I15@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "2 MAR 1916",
"DEAT": "Y",
"DEATH_DATE": "8 JUN 1966",
"FAMS": "@F6@",
},
"@I16@": {
"NAME": "Beatrice /Meyne/",
"SEX": "F",
"BIRT": "",
"DATE": "29 MAY 1914",
"DEAT": "Y",
"DEATH_DATE": "26 APR 2005",
"FAMS": "@F8@",
},
"@I17@": {
"NAME": "Gerrit /Dijkstra/",
"SEX": "M",
"BIRT": "",
"DATE": "13 SEP 1920",
"DEAT": "Y",
"DEATH_DATE": "11 SEP 2001",
"FAMS": "@F8@",
},
"@I18@": {
"NAME": "Sage /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUN 2020",
"FAMC": "@F9@",
},
}
assert divorce_before_death(families, individuals) == False
def test_divorce_before_death_husbDead_invalid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
"@F3@": {"HUSB": "@I11@", "WIFE": "@I12@", "CHIL": ["@I3@"], "MARR": "Y"},
"@F4@": {
"HUSB": "@I7@",
"WIFE": "@I8@",
"CHIL": ["@I4@", "@I9@", "@I10@"],
"MARR": "Y",
},
"@F5@": {"HUSB": "@I5@", "WIFE": "@I6@", "DATE": "31 JUL 2020"},
"@F6@": {"HUSB": "@I15@", "WIFE": "@I16@", "CHIL": ["@I7@"], "MARR": ""},
"@F7@": {"HUSB": "@I13@", "WIFE": "@I14@", "CHIL": ["@I8@"], "MARR": ""},
"@F8@": {"HUSB": "@I17@", "WIFE": "@I16@", "MARR": "Y"},
"@F9@": {"HUSB": "@I1@", "CHIL": ["@I18@"]},
}
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Alyssa /Bottesi/",
"SEX": "F",
"BIRT": "",
"DATE": "30 APR 1999",
"FAMS": "@F1@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "5 APR 1600",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Felisha /Kissel/",
"SEX": "F",
"BIRT": "",
"DATE": "12 MAY 1994",
"FAMS": "@F5@",
},
"@I7@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "3 NOV 1949",
"FAMS": "@F4@",
"FAMC": "@F6@",
},
"@I8@": {
"NAME": "June /Vanderzee/",
"SEX": "F",
"BIRT": "",
"DATE": "4 APR 1950",
"FAMS": "@F4@",
"FAMC": "@F7@",
},
"@I9@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "27 SEP 1972",
"FAMC": "@F4@",
},
"@I10@": {
"NAME": "Lynn-marie /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "10 AUG 1976",
"FAMC": "@F4@",
},
"@I11@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "8 JAN 1930",
"DEAT": "Y",
"DEATH_DATE": "6 JAN 1990",
"FAMS": "@F3@",
},
"@I12@": {
"NAME": "Leona /Layton/",
"SEX": "F",
"BIRT": "",
"DATE": "5 AUG 1936",
"FAMS": "@F3@",
},
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
"DEAT": "Y",
"DEATH_DATE": "26 NOV 1986",
"FAMS": "@F7@",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "7 JUN 1919",
"DEAT": "Y",
"DEATH_DATE": "13 OCT 2009",
"FAMS": "@F7@",
},
"@I15@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "2 MAR 1916",
"DEAT": "Y",
"DEATH_DATE": "8 JUN 1966",
"FAMS": "@F6@",
},
"@I16@": {
"NAME": "Beatrice /Meyne/",
"SEX": "F",
"BIRT": "",
"DATE": "29 MAY 1914",
"DEAT": "Y",
"DEATH_DATE": "26 APR 2005",
"FAMS": "@F8@",
},
"@I17@": {
"NAME": "Gerrit /Dijkstra/",
"SEX": "M",
"BIRT": "",
"DATE": "13 SEP 1920",
"DEAT": "Y",
"DEATH_DATE": "11 SEP 2001",
"FAMS": "@F8@",
},
"@I18@": {
"NAME": "Sage /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUN 2020",
"FAMC": "@F9@",
},
}
assert divorce_before_death(families, individuals) == False
def test_divorce_before_death_wifeDead_invalid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
"@F3@": {"HUSB": "@I11@", "WIFE": "@I12@", "CHIL": ["@I3@"], "MARR": "Y"},
"@F4@": {
"HUSB": "@I7@",
"WIFE": "@I8@",
"CHIL": ["@I4@", "@I9@", "@I10@"],
"MARR": "Y",
},
"@F5@": {"HUSB": "@I5@", "WIFE": "@I6@", "DATE": "31 JUL 2020"},
"@F6@": {"HUSB": "@I15@", "WIFE": "@I16@", "CHIL": ["@I7@"], "MARR": ""},
"@F7@": {"HUSB": "@I13@", "WIFE": "@I14@", "CHIL": ["@I8@"], "MARR": ""},
"@F8@": {"HUSB": "@I17@", "WIFE": "@I16@", "MARR": "Y"},
"@F9@": {"HUSB": "@I1@", "CHIL": ["@I18@"]},
}
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Alyssa /Bottesi/",
"SEX": "F",
"BIRT": "",
"DATE": "30 APR 1999",
"FAMS": "@F1@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"DEAT": "",
"DEATH_DATE": "5 APR 1600",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Felisha /Kissel/",
"SEX": "F",
"BIRT": "",
"DATE": "12 MAY 1994",
"FAMS": "@F5@",
},
"@I7@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "3 NOV 1949",
"FAMS": "@F4@",
"FAMC": "@F6@",
},
"@I8@": {
"NAME": "June /Vanderzee/",
"SEX": "F",
"BIRT": "",
"DATE": "4 APR 1950",
"FAMS": "@F4@",
"FAMC": "@F7@",
},
"@I9@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "27 SEP 1972",
"FAMC": "@F4@",
},
"@I10@": {
"NAME": "Lynn-marie /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "10 AUG 1976",
"FAMC": "@F4@",
},
"@I11@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "8 JAN 1930",
"DEAT": "Y",
"DEATH_DATE": "6 JAN 1990",
"FAMS": "@F3@",
},
"@I12@": {
"NAME": "Leona /Layton/",
"SEX": "F",
"BIRT": "",
"DATE": "5 AUG 1936",
"FAMS": "@F3@",
},
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
"DEAT": "Y",
"DEATH_DATE": "26 NOV 1986",
"FAMS": "@F7@",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "7 JUN 1919",
"DEAT": "Y",
"DEATH_DATE": "13 OCT 2009",
"FAMS": "@F7@",
},
"@I15@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "2 MAR 1916",
"DEAT": "Y",
"DEATH_DATE": "8 JUN 1966",
"FAMS": "@F6@",
},
"@I16@": {
"NAME": "Beatrice /Meyne/",
"SEX": "F",
"BIRT": "",
"DATE": "29 MAY 1914",
"DEAT": "Y",
"DEATH_DATE": "26 APR 2005",
"FAMS": "@F8@",
},
"@I17@": {
"NAME": "Gerrit /Dijkstra/",
"SEX": "M",
"BIRT": "",
"DATE": "13 SEP 1920",
"DEAT": "Y",
"DEATH_DATE": "11 SEP 2001",
"FAMS": "@F8@",
},
"@I18@": {
"NAME": "Sage /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUN 2020",
"FAMC": "@F9@",
},
}
assert divorce_before_death(families, individuals) == False
def test_divorce_before_death_valid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
"@F3@": {"HUSB": "@I11@", "WIFE": "@I12@", "CHIL": ["@I3@"], "MARR": "Y"},
"@F4@": {
"HUSB": "@I7@",
"WIFE": "@I8@",
"CHIL": ["@I4@", "@I9@", "@I10@"],
"MARR": "Y",
},
"@F5@": {"HUSB": "@I5@", "WIFE": "@I6@", "DATE": "31 JUL 2020"},
"@F6@": {"HUSB": "@I15@", "WIFE": "@I16@", "CHIL": ["@I7@"], "MARR": ""},
"@F7@": {"HUSB": "@I13@", "WIFE": "@I14@", "CHIL": ["@I8@"], "MARR": ""},
"@F8@": {"HUSB": "@I17@", "WIFE": "@I16@", "MARR": "Y"},
"@F9@": {"HUSB": "@I1@", "CHIL": ["@I18@"]},
}
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Alyssa /Bottesi/",
"SEX": "F",
"BIRT": "",
"DATE": "30 APR 1999",
"FAMS": "@F1@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Felisha /Kissel/",
"SEX": "F",
"BIRT": "",
"DATE": "12 MAY 1994",
"FAMS": "@F5@",
},
"@I7@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "3 NOV 1949",
"FAMS": "@F4@",
"FAMC": "@F6@",
},
"@I8@": {
"NAME": "June /Vanderzee/",
"SEX": "F",
"BIRT": "",
"DATE": "4 APR 1950",
"FAMS": "@F4@",
"FAMC": "@F7@",
},
"@I9@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "27 SEP 1972",
"FAMC": "@F4@",
},
"@I10@": {
"NAME": "Lynn-marie /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "10 AUG 1976",
"FAMC": "@F4@",
},
"@I11@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "8 JAN 1930",
"DEAT": "Y",
"DEATH_DATE": "6 JAN 1990",
"FAMS": "@F3@",
},
"@I12@": {
"NAME": "Leona /Layton/",
"SEX": "F",
"BIRT": "",
"DATE": "5 AUG 1936",
"FAMS": "@F3@",
},
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
"DEAT": "Y",
"DEATH_DATE": "26 NOV 1986",
"FAMS": "@F7@",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "7 JUN 1919",
"DEAT": "Y",
"DEATH_DATE": "13 OCT 2009",
"FAMS": "@F7@",
},
"@I15@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "2 MAR 1916",
"DEAT": "Y",
"DEATH_DATE": "8 JUN 1966",
"FAMS": "@F6@",
},
"@I16@": {
"NAME": "Beatrice /Meyne/",
"SEX": "F",
"BIRT": "",
"DATE": "29 MAY 1914",
"DEAT": "Y",
"DEATH_DATE": "26 APR 2005",
"FAMS": "@F8@",
},
"@I17@": {
"NAME": "Gerrit /Dijkstra/",
"SEX": "M",
"BIRT": "",
"DATE": "13 SEP 1920",
"DEAT": "Y",
"DEATH_DATE": "11 SEP 2001",
"FAMS": "@F8@",
},
"@I18@": {
"NAME": "Sage /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUN 2020",
"FAMC": "@F9@",
},
}
assert divorce_before_death(families, individuals) == True
def test_US03_valid():
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "9 SEP 2007",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "Diana /Chaney/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert us_03(individuals) == True
def test_US03_invalid():
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2012",
"DEAT": "",
"DEATH_DATE": "9 SEP 2009",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "Diana /Chaney/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert us_03(individuals) == False
def test_US08_valid():
families = {"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"}}
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2000",
"FAMC": "@F1@",
}
}
assert us_08(families, individuals) == True
def test_US08_invalid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "MARR": "15 APR 1999", "CHIL": "@I3@"}
}
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1998",
"FAMC": "@F1@",
}
}
assert us_08(families, individuals) == False
def test_US14_valid():
families = {
"@F1@": {
"HUSB": "@I1@",
"WIFE": "@I2@",
"MARR": "15 APR 1999",
"CHIL": ["@I3@", "@I4@", "@I5@", "@I6@", "@I7@"],
}
}
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMC": "@F1@",
},
"@I4@": {
"NAME": "Michael /Cooke/",
"BIRT": "",
"DATE": "3 DEC 1962",
"FAMC": "@F1@",
},
"@I5@": {
"NAME": "Michael /Cooke/",
"BIRT": "",
"DATE": "7 DEC 1962",
"FAMC": "@F1@",
},
"@I6@": {
"NAME": "Michael /Cooke/",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMC": "@F1@",
},
"@I7@": {
"NAME": "Michael /Cooke/",
"BIRT": "",
"DATE": "5 DEC 1962",
"FAMC": "@F1@",
},
}
assert us_14(families, individuals) == True
def test_US14_invalid():
families = {
"@F1@": {
"HUSB": "@I1@",
"WIFE": "@I2@",
"MARR": "15 APR 1999",
"CHIL": ["@I3@", "@I4@", "@I5@", "@I6@", "@I7@"],
}
}
individuals = {
"@I3@": {
"NAME": "Michael /Cooke/",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMC": "@F1@",
},
"@I4@": {
"NAME": "Michael /Cooke/",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMC": "@F1@",
},
"@I5@": {
"NAME": "Michael /Cooke/",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMC": "@F1@",
},
"@I6@": {
"NAME": "Michael /Cooke/",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMC": "@F1@",
},
"@I7@": {
"NAME": "Michael /Cooke/",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMC": "@F1@",
},
}
assert us_14(families, individuals) == False
def test_US19_valid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "", "MARR": "15 APR 1999"},
"@F2@": {"HUSB": "@I2@", "WIFE": "", "MARR": "15 APR 1999"},
"@F3@": {"HUSB": "@I3@", "WIFE": "", "MARR": "15 APR 1999"},
"@F4@": {"HUSB": "@I4@", "WIFE": "@I5@", "MARR": "15 APR 1999"},
}
individuals = {
"@I1@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2000",
"FAMS": "@F1@",
},
"@I2@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2000",
"FAMC": "@F1@",
"FAMS": "@F2@",
},
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2000",
"FAMS": "@F3@",
},
"@I4@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2000",
"FAMC": "@F2@",
"FAMS": "@F4@",
},
"@I5@": {
"NAME": "Michael /Cooke/",
"SEX": "F",
"BIRT": "",
"DATE": "2 DEC 2000",
"FAMC": "@F3@",
"FAMS": "@F4@",
},
}
assert us_19(families, individuals) == True
def test_US19_invalid():
families = {
"@F1@": {
"HUSB": "@I1@",
"WIFE": "",
"MARR": "15 APR 1999",
"CHIL": ["@I2@", "@I3@"],
},
"@F2@": {"HUSB": "@I2@", "WIFE": "", "MARR": "15 APR 1999", "CHIL": ["@I4@"]},
"@F3@": {"HUSB": "@I3@", "WIFE": "", "MARR": "15 APR 1999", "CHIL": ["@I5@"]},
"@F4@": {"HUSB": "@I4@", "WIFE": "@I5@", "MARR": "15 APR 1999"},
}
individuals = {
"@I1@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2000",
"FAMS": "@F1@",
},
"@I2@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2000",
"FAMC": "@F1@",
"FAMS": "@F2@",
},
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2000",
"FAMC": "@F1@",
"FAMS": "@F3@",
},
"@I4@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2000",
"FAMC": "@F2@",
"FAMS": "@F4@",
},
"@I5@": {
"NAME": "Michael /Cooke/",
"SEX": "F",
"BIRT": "",
"DATE": "2 DEC 2000",
"FAMC": "@F3@",
"FAMS": "@F4@",
},
}
assert us_19(families, individuals) == False
def test_US16_valid():
# Male last names
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I1@",
"WIFE": "@I4@",
"CHIL": ["@I2@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I1@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "9 SEP 2007",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I2@": {
"NAME": "Henry /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2000",
"DEAT": "",
"DEATH_DATE": "9 SEP 2007",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I5@": {
"NAME": "Diana /Cooke/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 2000",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert us_16(families, individuals) == True
def test_US16_invalid():
# Male last names
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I1@",
"WIFE": "@I4@",
"CHIL": ["@I2@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I1@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "9 SEP 2007",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I2@": {
"NAME": "Henry /Smith/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2000",
"DEAT": "",
"DEATH_DATE": "9 SEP 2007",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I5@": {
"NAME": "Diana /Cooke/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 2000",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert us_16(families, individuals) == False
def test_US21_valid():
# Correct gender for role
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I1@",
"WIFE": "@I4@",
"CHIL": ["@I2@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I1@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "9 SEP 2007",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I2@": {
"NAME": "Diana /Cooke/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 2000",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I4@": {
"NAME": "Theresa /Fox/",
"SEX": "F",
"BIRT": "",
"DATE": "2 DEC 2000",
"DEAT": "",
"DEATH_DATE": "9 SEP 2007",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
}
assert us_21(families, individuals) == True
def test_US21_invalid():
# Correct gender for role
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I1@",
"WIFE": "@I4@",
"CHIL": ["@I2@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I1@": {
"NAME": "Michael /Cooke/",
"SEX": "F",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "9 SEP 2007",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I2@": {
"NAME": "Diana /Cooke/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 2000",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I4@": {
"NAME": "Theresa /Fox/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2000",
"DEAT": "",
"DEATH_DATE": "9 SEP 2007",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
}
assert us_21(families, individuals) == False
def test_fewer_than_15_children_correct():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "MARR": "15 APR 1999", "CHIL": "@I3@"}
}
assert fewer_than_15_children(families) == True
def test_fewer_than_15_children_incorrect():
families = {
"@F1@": {
"HUSB": "@I1@",
"WIFE": "@I2@",
"MARR": "15 APR 1999",
"CHIL": [
"@I2@",
"@I5@",
"@I2@",
"@I5@",
"@I2@",
"@I5@",
"@I2@",
"@I5@",
"@I2@",
"@I5@",
"@I2@",
"@I5@",
"@I2@",
"@I5@",
"@I2@",
"@I5@",
],
}
}
assert fewer_than_15_children(families) == False
def test_uncle_aunts_cannot_marry_nieces_nephews_correct():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Alyssa /Bottesi/",
"SEX": "F",
"BIRT": "",
"DATE": "30 APR 1999",
"FAMS": "@F1@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "5 APR 1600",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"DEAT": "",
"DEATH_DATE": "5 APR 1600",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
}
assert uncle_aunts_cannot_marry_nieces_nephews(families, individuals) == True
def test_uncle_aunts_cannot_marry_nieces_nephews_incorrect():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
"@F3@": {
"HUSB": "@I11@",
"WIFE": "@I12@",
"CHIL": ["@I3@", "@I2@"],
"MARR": "Y",
},
}
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Alyssa /Bottesi/",
"SEX": "F",
"BIRT": "",
"DATE": "30 APR 1999",
"FAMS": "@F1@",
"FAMC": "@F3@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "5 APR 1600",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"DEAT": "",
"DEATH_DATE": "5 APR 1600",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Felisha /Kissel/",
"SEX": "F",
"BIRT": "",
"DATE": "12 MAY 1994",
"FAMS": "@F5@",
},
"@I7@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "3 NOV 1949",
"FAMS": "@F4@",
"FAMC": "@F6@",
},
"@I8@": {
"NAME": "June /Vanderzee/",
"SEX": "F",
"BIRT": "",
"DATE": "4 APR 1950",
"FAMS": "@F4@",
"FAMC": "@F7@",
},
"@I9@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "27 SEP 1972",
"FAMC": "@F4@",
},
"@I10@": {
"NAME": "Lynn-marie /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "10 AUG 1976",
"FAMC": "@F4@",
},
"@I11@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "8 JAN 1930",
"DEAT": "Y",
"DEATH_DATE": "6 JAN 1990",
"FAMS": "@F3@",
},
"@I12@": {
"NAME": "Leona /Layton/",
"SEX": "F",
"BIRT": "",
"DATE": "5 AUG 1936",
"FAMS": "@F3@",
},
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
"DEAT": "Y",
"DEATH_DATE": "26 NOV 1986",
"FAMS": "@F7@",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "7 JUN 1919",
"DEAT": "Y",
"DEATH_DATE": "13 OCT 2009",
"FAMS": "@F7@",
},
"@I15@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "2 MAR 1916",
"DEAT": "Y",
"DEATH_DATE": "8 JUN 1966",
"FAMS": "@F6@",
},
"@I16@": {
"NAME": "Beatrice /Meyne/",
"SEX": "F",
"BIRT": "",
"DATE": "29 MAY 1914",
"DEAT": "Y",
"DEATH_DATE": "26 APR 2005",
"FAMS": "@F8@",
},
"@I17@": {
"NAME": "Gerrit /Dijkstra/",
"SEX": "M",
"BIRT": "",
"DATE": "13 SEP 1920",
"DEAT": "Y",
"DEATH_DATE": "11 SEP 2001",
"FAMS": "@F8@",
},
"@I18@": {
"NAME": "Sage /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUN 2020",
"FAMC": "@F9@",
},
}
assert uncle_aunts_cannot_marry_nieces_nephews(families, individuals) == False
def test_siblings_spacing_correct():
assert siblings_could_be_born(CORRECT_INDIVIDUALS, CORRECT_FAMILIES) == True
def test_siblings_spacing_incorrect():
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "16 NOV 1999",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "16 NOV 1999", # Should fail here
"FAMS": "@F5@",
"FAMC": "@F2@",
},
}
families = {
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 2020",
"DIV": "30 DEC 2018",
}
}
assert siblings_could_be_born(individuals, families) == False
def test_incest_among_siblings_correct():
assert siblings_do_not_marry(CORRECT_INDIVIDUALS, CORRECT_FAMILIES) == True
def test_incest_among_siblings_incorrect():
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "16 NOV 1999", # Should fail here
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
}
families = {
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 2020",
"DIV": "30 DEC 2018",
},
"@F3@": {
"HUSB": "@I1@",
"WIFE": "@I5@",
"CHIL": [],
"MARR": "8 AUG 2020",
"DIV": "30 DEC 2018",
},
}
assert siblings_do_not_marry(individuals, families) == False
# US12
def test_parents_not_too_old_valid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
"@F3@": {"HUSB": "@I11@", "WIFE": "@I12@", "CHIL": ["@I3@"]},
"@F4@": {"HUSB": "@I7@", "WIFE": "@I8@", "CHIL": ["@I4@", "@I9@", "@I10@"]},
"@F5@": {"HUSB": "@I5@", "WIFE": "@I6@", "DATE": "31 JUL 2020"},
"@F6@": {"HUSB": "@I15@", "WIFE": "@I16@", "CHIL": ["@I7@"]},
"@F7@": {"HUSB": "@I13@", "WIFE": "@I14@", "CHIL": ["@I8@"]},
"@F8@": {"HUSB": "@I17@", "WIFE": "@I16@"},
"@F9@": {"HUSB": "@I1@", "CHIL": ["@I18@"]},
}
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Alyssa /Bottesi/",
"SEX": "F",
"BIRT": "",
"DATE": "30 APR 1999",
"FAMS": "@F1@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Felisha /Kissel/",
"SEX": "F",
"BIRT": "",
"DATE": "12 MAY 1994",
"FAMS": "@F5@",
},
"@I7@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "3 NOV 1949",
"FAMS": "@F4@",
"FAMC": "@F6@",
},
"@I8@": {
"NAME": "June /Vanderzee/",
"SEX": "F",
"BIRT": "",
"DATE": "4 APR 1950",
"FAMS": "@F4@",
"FAMC": "@F7@",
},
"@I9@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "27 SEP 1972",
"FAMC": "@F4@",
},
"@I10@": {
"NAME": "Lynn-marie /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "10 AUG 1976",
"FAMC": "@F4@",
},
"@I11@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "8 JAN 1930",
"DEAT": "Y",
"DEATH_DATE": "6 JAN 1990",
"FAMS": "@F3@",
},
"@I12@": {
"NAME": "Leona /Layton/",
"SEX": "F",
"BIRT": "",
"DATE": "5 AUG 1936",
"FAMS": "@F3@",
},
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
"DEAT": "Y",
"DEATH_DATE": "26 NOV 1986",
"FAMS": "@F7@",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "7 JUN 1919",
"DEAT": "Y",
"DEATH_DATE": "13 OCT 2009",
"FAMS": "@F7@",
},
"@I15@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "2 MAR 1916",
"DEAT": "Y",
"DEATH_DATE": "8 JUN 1966",
"FAMS": "@F6@",
},
"@I16@": {
"NAME": "Beatrice /Meyne/",
"SEX": "F",
"BIRT": "",
"DATE": "29 MAY 1914",
"DEAT": "Y",
"DEATH_DATE": "26 APR 2005",
"FAMS": "@F8@",
},
"@I17@": {
"NAME": "Gerrit /Dijkstra/",
"SEX": "M",
"BIRT": "",
"DATE": "13 SEP 1920",
"DEAT": "Y",
"DEATH_DATE": "11 SEP 2001",
"FAMS": "@F8@",
},
"@I18@": {
"NAME": "Sage /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUN 2020",
"FAMC": "@F9@",
},
}
assert parents_not_too_old(families, individuals) == True
def test_parents_not_too_old_invalid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
"@F3@": {"HUSB": "@I11@", "WIFE": "@I12@", "CHIL": ["@I3@"]},
"@F4@": {"HUSB": "@I7@", "WIFE": "@I8@", "CHIL": ["@I4@", "@I9@", "@I10@"]},
"@F5@": {"HUSB": "@I5@", "WIFE": "@I6@", "DATE": "31 JUL 2020"},
"@F6@": {"HUSB": "@I15@", "WIFE": "@I16@", "CHIL": ["@I7@"]},
"@F7@": {"HUSB": "@I13@", "WIFE": "@I14@", "CHIL": ["@I8@"]},
"@F8@": {"HUSB": "@I17@", "WIFE": "@I16@"},
"@F9@": {"HUSB": "@I1@", "CHIL": ["@I18@"]},
}
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Alyssa /Bottesi/",
"SEX": "F",
"BIRT": "",
"DATE": "30 APR 1999",
"FAMS": "@F1@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Felisha /Kissel/",
"SEX": "F",
"BIRT": "",
"DATE": "12 MAY 1994",
"FAMS": "@F5@",
},
"@I7@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "3 NOV 1949",
"FAMS": "@F4@",
"FAMC": "@F6@",
},
"@I8@": {
"NAME": "June /Vanderzee/",
"SEX": "F",
"BIRT": "",
"DATE": "4 APR 1950",
"FAMS": "@F4@",
"FAMC": "@F7@",
},
"@I9@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "27 SEP 1972",
"FAMC": "@F4@",
},
"@I10@": {
"NAME": "Lynn-marie /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "10 AUG 1976",
"FAMC": "@F4@",
},
"@I11@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "8 JAN 1930",
"DEAT": "Y",
"DEATH_DATE": "6 JAN 1990",
"FAMS": "@F3@",
},
"@I12@": {
"NAME": "Leona /Layton/",
"SEX": "F",
"BIRT": "",
"DATE": "5 AUG 1936",
"FAMS": "@F3@",
},
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
"DEAT": "Y",
"DEATH_DATE": "26 NOV 1986",
"FAMS": "@F7@",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "7 JUN 1919",
"DEAT": "Y",
"DEATH_DATE": "13 OCT 2009",
"FAMS": "@F7@",
},
"@I15@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "2 MAR 1837",
"DEAT": "Y",
"DEATH_DATE": "8 JUN 1966",
"FAMS": "@F6@",
},
"@I16@": {
"NAME": "Beatrice /Meyne/",
"SEX": "F",
"BIRT": "",
"DATE": "29 MAY 1914",
"DEAT": "Y",
"DEATH_DATE": "26 APR 2005",
"FAMS": "@F8@",
},
"@I17@": {
"NAME": "Gerrit /Dijkstra/",
"SEX": "M",
"BIRT": "",
"DATE": "13 SEP 1920",
"DEAT": "Y",
"DEATH_DATE": "11 SEP 2001",
"FAMS": "@F8@",
},
"@I18@": {
"NAME": "Sage /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUN 2020",
"FAMC": "@F9@",
},
}
assert parents_not_too_old(families, individuals) == False
# US17
def test_check_marriage_to_descendants_valid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
"@F3@": {"HUSB": "@I11@", "WIFE": "@I12@", "CHIL": ["@I3@"]},
"@F4@": {"HUSB": "@I7@", "WIFE": "@I8@", "CHIL": ["@I4@", "@I9@", "@I10@"]},
"@F5@": {"HUSB": "@I5@", "WIFE": "@I6@", "DATE": "31 JUL 2020"},
"@F6@": {"HUSB": "@I15@", "WIFE": "@I16@", "CHIL": ["@I7@"]},
"@F7@": {"HUSB": "@I13@", "WIFE": "@I14@", "CHIL": ["@I8@"]},
"@F8@": {"HUSB": "@I17@", "WIFE": "@I16@"},
"@F9@": {"HUSB": "@I1@", "CHIL": ["@I18@"]},
}
assert check_marriage_to_descendants(families) == True
def test_check_marriage_to_descendants_invalid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I5@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
"@F3@": {"HUSB": "@I11@", "WIFE": "@I12@", "CHIL": ["@I3@"]},
"@F4@": {"HUSB": "@I7@", "WIFE": "@I8@", "CHIL": ["@I4@", "@I9@", "@I10@"]},
"@F5@": {"HUSB": "@I5@", "WIFE": "@I6@", "DATE": "31 JUL 2020"},
"@F6@": {"HUSB": "@I15@", "WIFE": "@I17@", "CHIL": ["@I7@"]},
"@F7@": {"HUSB": "@I8@", "WIFE": "@I14@", "CHIL": ["@I8@"]},
"@F8@": {"HUSB": "@I17@", "WIFE": "@I16@"},
"@F9@": {"HUSB": "@I1@", "CHIL": ["@I18@"]},
}
assert check_marriage_to_descendants(families) == False
def test_unique_names():
assert names_are_unique(CORRECT_INDIVIDUALS) == True
def test_unique_names_invalid():
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
}
assert names_are_unique(individuals) == False
def test_unique_names_duplicate():
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Thomas /Hartmans/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
}
assert names_are_unique(individuals) == True
def test_no_deceased():
assert list_deceased(CORRECT_INDIVIDUALS) == 0
def test_deceased():
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Thomas /Hartmans/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1995",
"FAMS": "@F5@",
"FAMC": "@F2@",
"DEATH_DATE": "04 APR 2021",
},
}
assert list_deceased(individuals) == 1
def test_list_over_30_and_single_valid():
assert list_over_30_and_single(CORRECT_INDIVIDUALS) == True
def test_list_over_30_and_single_valid():
individuals = {
"@I9@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "27 SEP 1972",
"FAMC": "@F4@",
},
"@I10@": {
"NAME": "Lynn-marie /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "10 AUG 1976",
"FAMC": "@F4@",
},
"@I18@": {
"NAME": "Sage /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUN 2020",
"FAMC": "@F9@",
},
}
assert list_over_30_and_single(individuals) == False
def test_order_siblings_by_age_multiple_siblings():
families = {
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I5@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
"@F4@": {"HUSB": "@I7@", "WIFE": "@I8@", "CHIL": ["@I4@", "@I9@", "@I10@"]},
}
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Alyssa /Bottesi/",
"SEX": "F",
"BIRT": "",
"DATE": "30 APR 1999",
"FAMS": "@F1@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Felisha /Kissel/",
"SEX": "F",
"BIRT": "",
"DATE": "12 MAY 1994",
"FAMS": "@F5@",
},
"@I7@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "3 NOV 1949",
"FAMS": "@F4@",
"FAMC": "@F6@",
},
"@I8@": {
"NAME": "June /Vanderzee/",
"SEX": "F",
"BIRT": "",
"DATE": "4 APR 1950",
"FAMS": "@F4@",
"FAMC": "@F7@",
},
"@I9@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "27 SEP 1972",
"FAMC": "@F4@",
},
"@I10@": {
"NAME": "Lynn-marie /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "10 AUG 1976",
"FAMC": "@F4@",
},
"@I11@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "8 JAN 1930",
"DEAT": "Y",
"DEATH_DATE": "6 JAN 1990",
"FAMS": "@F3@",
},
"@I12@": {
"NAME": "Leona /Layton/",
"SEX": "F",
"BIRT": "",
"DATE": "5 AUG 1936",
"FAMS": "@F3@",
},
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
"DEAT": "Y",
"DEATH_DATE": "26 NOV 1986",
"FAMS": "@F7@",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "7 JUN 1919",
"DEAT": "Y",
"DEATH_DATE": "13 OCT 2009",
"FAMS": "@F7@",
},
"@I15@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "2 MAR 1916",
"DEAT": "Y",
"DEATH_DATE": "8 JUN 1966",
"FAMS": "@F6@",
},
"@I16@": {
"NAME": "Beatrice /Meyne/",
"SEX": "F",
"BIRT": "",
"DATE": "29 MAY 1914",
"DEAT": "Y",
"DEATH_DATE": "26 APR 2005",
"FAMS": "@F8@",
},
"@I17@": {
"NAME": "Gerrit /Dijkstra/",
"SEX": "M",
"BIRT": "",
"DATE": "13 SEP 1920",
"DEAT": "Y",
"DEATH_DATE": "11 SEP 2001",
"FAMS": "@F8@",
},
"@I18@": {
"NAME": "Sage /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUN 2020",
"FAMC": "@F9@",
},
}
assert order_siblings_by_age(families, individuals) == True
def test_order_siblings_by_age_no_siblings():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
"@F3@": {"HUSB": "@I11@", "WIFE": "@I12@", "CHIL": ["@I3@"]},
"@F4@": {"HUSB": "@I7@", "WIFE": "@I8@", "CHIL": ["@I4@", "@I9@", "@I10@"]},
"@F5@": {"HUSB": "@I5@", "WIFE": "@I6@", "DATE": "31 JUL 2020"},
"@F6@": {"HUSB": "@I15@", "WIFE": "@I16@", "CHIL": ["@I7@"]},
"@F7@": {"HUSB": "@I13@", "WIFE": "@I14@", "CHIL": ["@I8@"]},
"@F8@": {"HUSB": "@I17@", "WIFE": "@I16@"},
"@F9@": {"HUSB": "@I1@", "CHIL": ["@I18@"]},
}
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Alyssa /Bottesi/",
"SEX": "F",
"BIRT": "",
"DATE": "30 APR 1999",
"FAMS": "@F1@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "29 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Felisha /Kissel/",
"SEX": "F",
"BIRT": "",
"DATE": "12 MAY 1994",
"FAMS": "@F5@",
},
"@I7@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "3 NOV 1949",
"FAMS": "@F4@",
"FAMC": "@F6@",
},
"@I8@": {
"NAME": "June /Vanderzee/",
"SEX": "F",
"BIRT": "",
"DATE": "4 APR 1950",
"FAMS": "@F4@",
"FAMC": "@F7@",
},
"@I9@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "27 SEP 1972",
"FAMC": "@F4@",
},
"@I10@": {
"NAME": "Lynn-marie /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "10 AUG 1976",
"FAMC": "@F4@",
},
"@I11@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "8 JAN 1930",
"DEAT": "Y",
"DEATH_DATE": "6 JAN 1990",
"FAMS": "@F3@",
},
"@I12@": {
"NAME": "Leona /Layton/",
"SEX": "F",
"BIRT": "",
"DATE": "5 AUG 1936",
"FAMS": "@F3@",
},
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
"DEAT": "Y",
"DEATH_DATE": "26 NOV 1986",
"FAMS": "@F7@",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "7 JUN 1919",
"DEAT": "Y",
"DEATH_DATE": "13 OCT 2009",
"FAMS": "@F7@",
},
"@I15@": {
"NAME": "Peter /Lagaveen/",
"SEX": "M",
"BIRT": "",
"DATE": "2 MAR 1916",
"DEAT": "Y",
"DEATH_DATE": "8 JUN 1966",
"FAMS": "@F6@",
},
"@I16@": {
"NAME": "Beatrice /Meyne/",
"SEX": "F",
"BIRT": "",
"DATE": "29 MAY 1914",
"DEAT": "Y",
"DEATH_DATE": "26 APR 2005",
"FAMS": "@F8@",
},
"@I17@": {
"NAME": "Gerrit /Dijkstra/",
"SEX": "M",
"BIRT": "",
"DATE": "13 SEP 1920",
"DEAT": "Y",
"DEATH_DATE": "11 SEP 2001",
"FAMS": "@F8@",
},
"@I18@": {
"NAME": "Sage /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUN 2020",
"FAMC": "@F9@",
},
}
assert order_siblings_by_age(families, individuals) == False
def test_check_unique_ids_valid():
assert check_unique_ids("myfamily.ged") == True
def test_check_unique_ids_invalid():
assert check_unique_ids("testUS22_myfamily.ged") == False
def test_US27():
assert us_27(CORRECT_INDIVIDUALS) == True
def test_US32_valid():
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
}
assert us_32(individuals) == True
def test_US32_invalid():
assert us_32(CORRECT_INDIVIDUALS) == False
def test_US24_valid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "", "DATE": "15 APR 1999"},
"@F2@": {"HUSB": "@I2@", "WIFE": "", "DATE": "15 APR 1999"},
"@F3@": {"HUSB": "@I3@", "WIFE": "", "DATE": "15 APR 1999"},
"@F4@": {"HUSB": "@I4@", "WIFE": "@I5@", "DATE": "15 APR 1999"},
}
assert us_24(families) == True
def test_US24_invalid():
families = {
"@F2@": {
"HUSB": "@I2@",
"WIFE": "@I1@",
"MARR": "15 APR 1999",
"CHIL": ["@I4@"],
},
"@F3@": {
"HUSB": "@I2@",
"WIFE": "@I1@",
"MARR": "15 APR 1999",
"CHIL": ["@I4@"],
},
"@F4@": {"HUSB": "@I4@", "WIFE": "@I5@", "MARR": "15 APR 1999"},
}
assert us_24(families) == False
def test_list_upcoming_birthdays_valid():
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "21 MAY 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "June /Hartman/",
"SEX": "F",
"BIRT": "",
"DATE": "10 MAY 1970",
"FAMS": "@F4@",
"FAMC": "@F1@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "20 APR 2005",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
}
assert list_upcoming_birthdays(individuals) == True
def test_list_upcoming_birthdays_invalid():
assert list_upcoming_birthdays(CORRECT_INDIVIDUALS) == False
def test_list_orphans_valid():
families = {
"@F1@": {
"HUSB": "@I1@",
"WIFE": "@I2@",
"CHIL": ["@I3@", "@I4@"],
"DATE": "15 APR 1999",
},
"@F2@": {
"HUSB": "@I4@",
"WIFE": "@I5@",
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1970",
"DEAT": "Y",
"FAMS": "@F1@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Beatrice /Meyne/",
"SEX": "F",
"BIRT": "",
"DATE": "11 NOV 1970",
"DEAT": "Y",
"FAMS": "@F1@",
"FAMC": "@F2@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 2005",
"FAMC": "@F1@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "2 DEC 2017",
"FAMC": "@F1@",
},
}
assert list_orphans(families, individuals) == True
def test_list_orphans_invalid():
assert list_orphans(CORRECT_FAMILIES, CORRECT_INDIVIDUALS) == False
def test_list_death_in_last_30_days_valid():
individuals = {
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
"DEAT": "Y",
"DEATH_DATE": "26 APR 2020",
"FAMS": "@F7@",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "7 JUN 1919",
"DEAT": "Y",
"DEATH_DATE": "13 OCT 2009",
"FAMS": "@F7@",
},
}
assert list_death_in_last_30_days(individuals) == False
def test_list_death_in_last_30_days_invalid():
individuals = {
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
"DEAT": "Y",
"DEATH_DATE": "26 APR 2021",
"FAMS": "@F7@",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "7 JUN 1919",
"DEAT": "Y",
"DEATH_DATE": "13 OCT 2009",
"FAMS": "@F7@",
},
}
assert list_death_in_last_30_days(individuals) == True
def test_us37_valid():
assert us_37(CORRECT_FAMILIES, CORRECT_INDIVIDUALS) == False
def test_us37_invalid():
families = {
"@F1@": {"HUSB": "@I1@", "WIFE": "@I2@", "DATE": "15 APR 1999"},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "8 AUG 1991",
"DIV": "30 DEC 2018",
},
}
individuals = {
"@I1@": {
"NAME": "Wyett /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "10 OCT 1998",
"FAM": "@F2@",
},
"@I3@": {
"NAME": "Michael /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"DEAT": "",
"DEATH_DATE": "16 APR 2021",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "Diana /Chaney/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Wyett /Cooke/",
"SEX": "M",
"BIRT": "",
"DATE": "10 OCT 1998",
"FAM": "@F2@",
},
}
assert us_37(families, individuals) == True
def test_us42_valid():
assert us_42(CORRECT_FAMILIES, CORRECT_INDIVIDUALS) == True
def test_us42_invalid():
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "39 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
}
families = {
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"CHIL": ["@I1@", "@I5@"],
"MARR": "38 AUG 2020",
"DIV": "33 DEC 2018",
}
}
assert us_42(families, individuals) == False
def test_list_anniversaries():
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Alyssa /Bottesi/",
"SEX": "F",
"BIRT": "",
"DATE": "15 APR 1999",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "39 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "39 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
}
families = {
"@F1@": {
"HUSB": "@I1@",
"WIFE": "@I2@",
"MARR": "28 MAY 2020",
},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"MARR": "28 NOV 1927",
},
"@F3@": {
"HUSB": "@I5@",
"WIFE": "@I6@",
"MARR": "2 MAY 1999",
},
}
assert list_upcoming_anniversaries(individuals, families) == 2
def test_list_large_age_differences():
individuals = {
"@I1@": {
"NAME": "Ryan /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "11 NOV 2020",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I2@": {
"NAME": "Alyssa /Bottesi/",
"SEX": "F",
"BIRT": "",
"DATE": "10 NOV 2019",
"FAMS": "@F9@",
"FAMC": "@F2@",
},
"@I3@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "2 DEC 1962",
"FAMS": "@F2@",
"FAMC": "@F3@",
},
"@I4@": {
"NAME": "June /Lagaveen/",
"SEX": "F",
"BIRT": "",
"DATE": "1 OCT 1970",
"FAMS": "@F2@",
"FAMC": "@F4@",
},
"@I5@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "12 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
"@I6@": {
"NAME": "Thomas /Hartman/",
"SEX": "M",
"BIRT": "",
"DATE": "12 JUL 1994",
"FAMS": "@F5@",
"FAMC": "@F2@",
},
}
families = {
"@F1@": {
"HUSB": "@I1@",
"WIFE": "@I2@",
"MARR": "28 MAY 2020",
},
"@F2@": {
"HUSB": "@I3@",
"WIFE": "@I4@",
"MARR": "28 NOV 1927",
},
"@F3@": {
"HUSB": "@I5@",
"WIFE": "@I6@",
"MARR": "2 MAY 1999",
},
}
assert list_large_age_differences(individuals, families) == 1
def test_US35_valid():
individuals = {
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "25 APR 2021",
},
}
assert us_35(individuals) == True
def test_US35_invalid():
individuals = {
"@I13@": {
"NAME": "Peter /Vanderzee/",
"SEX": "M",
"BIRT": "",
"DATE": "10 JUL 1911",
},
"@I14@": {
"NAME": "Olive /Heritage/",
"SEX": "F",
"BIRT": "",
"DATE": "25 APR 2020",
},
}
assert us_35(individuals) == False
def test_US40_valid():
filePath = "myfamily.ged"
assert us_40(filePath) == True
def test_US40_invalid():
filePath = "requirements.txt"
assert us_40(filePath) == False
| true |
019b5d23d15f4b1b28ee9d89112921f4d325375e | Python | TonyZaitsev/Codewars | /7kyu/Sum Factorial/Sum Factorial.py | UTF-8 | 1,148 | 4.8125 | 5 | [
"MIT"
] | permissive | """
https://www.codewars.com/kata/56b0f6243196b9d42d000034/train/python
Sum Factorial
Factorials are often used in probability and are used as an introductory problem for looping constructs. In this kata you will be summing together multiple factorials.
Here are a few examples of factorials:
4 Factorial = 4! = 4 * 3 * 2 * 1 = 24
6 Factorial = 6! = 6 * 5 * 4 * 3 * 2 * 1 = 720
In this kata you will be given a list of values that you must first find the factorial, and then return their sum.
For example if you are passed the list [4, 6] the equivalent mathematical expression would be 4! + 6! which would equal 744.
Good Luck!
Note: Assume that all values in the list are positive integer values > 0 and each value in the list is unique.
Also, you must write your own implementation of factorial, as you cannot use the built-in math.factorial() method.
"""
def factorial(n):
if n == 1:
return 1
return n * factorial(n-1)
def sum_factorial(lst):
return sum(list(map(lambda x: factorial(x), lst)))
"""
Sample Tests
test.assert_equals(sum_factorial([4,6]), 744)
test.assert_equals(sum_factorial([5,4,1]), 145)
"""
| true |
13fd6dcf6cb638ca81ae9155348eb3a8136120e1 | Python | lgcy/tf-head-pose | /datasets.py | UTF-8 | 6,356 | 2.546875 | 3 | [] | no_license | import os
import numpy as np
from random import randint
import tensorflow as tf
from PIL import Image, ImageFilter
import utils
def get_list_from_filenames(file_path):
# input: relative path to .txt file with file names
# output: list of relative path names
with open(file_path) as f:
lines = f.read().splitlines()
return lines
def rescale(image):
w = image.size[0]
h = image.size[1]
#resize to 240
outsize = 240
if w < h:
return image.resize((outsize,round(h/w * outsize)),Image.BILINEAR)
else:
return image.resize((round(w/h * outsize),outsize),Image.BILINEAR)
def random_crop(image):
w = image.size[0]
h = image.size[1]
size =224
new_left = randint(0,w - size)
new_upper = randint(0,h - size)
return image.crop((new_left,new_upper,size+new_left,size+new_upper))
def nomalizing(image,mean_value,std):
image = np.array(image)
image = image/255.0
for i in range(3):
image[:,:,i] = (image[:,:,i] - mean_value[i])/std[i]
return image
class Pose_300W_LP():
# Head pose from 300W-LP dataset
def __init__(self, data_dir, filename_path, batch_size,image_size,img_ext='.jpg', annot_ext='.mat', image_mode='RGB'):
self.data_dir = data_dir
#self.transform = transform
self.img_ext = img_ext
self.annot_ext = annot_ext
self.batch_size=batch_size
self.image_size=image_size
filename_list = get_list_from_filenames(filename_path)
self.X_train = filename_list
self.y_train = filename_list
self.image_mode = image_mode
self.length = len(filename_list)
self.cursor=0
#self.batch_size=16#args.batch_size
def get(self):
images = np.zeros((self.batch_size,self.image_size, self.image_size, 3))
Llabels = np.zeros((self.batch_size,3),np.int32)
Lcont_labels=np.zeros((self.batch_size,3))
count=0
while count<self.batch_size:
img = Image.open(os.path.join(self.data_dir, self.X_train[self.cursor] + self.img_ext))
#print('img', img.shape)
img = img.convert(self.image_mode)
mat_path = os.path.join(self.data_dir, self.y_train[self.cursor] + self.annot_ext)
# Crop the face loosely
pt2d = utils.get_pt2d_from_mat(mat_path)
x_min = min(pt2d[0, :])
y_min = min(pt2d[1, :])
x_max = max(pt2d[0, :])
y_max = max(pt2d[1, :])
# k = 0.2 to 0.40
k = np.random.random_sample() * 0.2 + 0.2
x_min -= 0.6 * k * abs(x_max - x_min)
y_min -= 2 * k * abs(y_max - y_min)
x_max += 0.6 * k * abs(x_max - x_min)
y_max += 0.6 * k * abs(y_max - y_min)
img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
# We get the pose in radians
pose = utils.get_ypr_from_mat(mat_path)
pitch = pose[0] * 180 / np.pi
yaw = pose[1] * 180 / np.pi
roll = pose[2] * 180 / np.pi
# Flip?
rnd = np.random.random_sample()
if rnd < 0.5:
yaw = -yaw
roll = -roll
img = img.transpose(Image.FLIP_LEFT_RIGHT)
# Blur?
rnd = np.random.random_sample()
if rnd < 0.05:
img = img.filter(ImageFilter.BLUR)
#preprocess
img = rescale(img)
img = random_crop(img)
img = nomalizing(img,[0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# Bin values
bins = np.array(range(-99, 102, 3))
binned_pose = np.digitize([yaw, pitch, roll], bins) - 1
labels = binned_pose
cont_labels=[float(yaw),float(pitch),float(roll)]
images[count, :, :, :] = img
Llabels[count]=labels
Lcont_labels[count]=cont_labels
count+=1
self.cursor+=1
if self.cursor >= len(self.X_train):
np.random.shuffle(self.X_train)
self.cursor = 0
print("self.cursor ====0")
#print(self.X_train[0])
return images,Llabels,Lcont_labels
class AFLW2000():
def __init__(self, data_dir, filename_path, transform, img_ext='.jpg', annot_ext='.mat', image_mode='RGB'):
self.data_dir = data_dir
self.transform = transform
self.img_ext = img_ext
self.annot_ext = annot_ext
filename_list = get_list_from_filenames(filename_path)
self.X_train = filename_list
self.y_train = filename_list
self.image_mode = image_mode
self.length = len(filename_list)
def __getitem__(self, index):
img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext))
img = img.convert(self.image_mode)
mat_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)
# Crop the face loosely
pt2d = utils.get_pt2d_from_mat(mat_path)
x_min = min(pt2d[0,:])
y_min = min(pt2d[1,:])
x_max = max(pt2d[0,:])
y_max = max(pt2d[1,:])
k = 0.20
x_min -= 2 * k * abs(x_max - x_min)
y_min -= 2 * k * abs(y_max - y_min)
x_max += 2 * k * abs(x_max - x_min)
y_max += 0.6 * k * abs(y_max - y_min)
img = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
# We get the pose in radians
pose = utils.get_ypr_from_mat(mat_path)
# And convert to degrees.
pitch = pose[0] * 180 / np.pi
yaw = pose[1] * 180 / np.pi
roll = pose[2] * 180 / np.pi
# Bin values
bins = np.array(range(-99, 102, 3))
labels = np.digitize([yaw, pitch, roll], bins) - 1
cont_labels = [yaw, pitch, roll]
if self.transform is not None:
img = self.transform(img)
return img, labels, cont_labels, self.X_train[index]
def __len__(self):
# 2,000
return self.length
#if __name__ =='__main__':
# data_dir = 'D:/300W_LP'
# filename_path = 'D:/300W_LP/300W_LP_filename_filtered.txt'
# transform = None
# data = Pose_300W_LP(data_dir,filename_path,1,224)
| true |
9fac40da3b6f3a2daa77269d9966389a095beeb9 | Python | 2015shanbhvi/flask_sn | /models.py | UTF-8 | 670 | 2.78125 | 3 | [] | no_license | import sqlite3 as sql
from os import path
#do "pathing"
#layer that contains info for server <--> database
#get dir name, get file path of whatever we pass in
ROOT = path.dirname(path.relpath(__file__))
def create_post(name, content):
#conenct to database
con = sql.connect(path.join(ROOT, 'database.db'))
cur = con.cursor()
#execute the sql statement
cur.execute('insert into posts (name, content) values(?, ?)', (name, content))
con.commit()
con.close()
#pull the posts we want from database
def get_posts():
con = sql.connect(path.join(ROOT, 'database.db'))
cur = con.cursor()
cur.execute('select * from posts')
posts = cur.fetchall()
return posts
| true |
288d970ea0ec55bbd6d3f3601df0f22e36fb0d39 | Python | ELE-22/Monica | /webscrap_index.py | UTF-8 | 2,556 | 2.90625 | 3 | [] | no_license | import pandas as pd
from selenium import webdriver
from read_excel import get_Tags
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import transformdates
#get data from the for loop
modelo_for= list()
warranty_for =list()
df =list()
def scrapping(path):
# Webdriver exe
print(path)
PATH = 'C:\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(PATH)
# Go to the Dell website
driver.get('https://www.dell.com/support/home/en-us?app=warranty')
print(driver.title)
#get data
TAGS_LIST = get_Tags(path)
for tag in TAGS_LIST:
try:
# Seacrh tag box
SEARCH_TAG_BOX = WebDriverWait(driver, 20).until(
EC.presence_of_element_located(
(By.XPATH, '//*[@id="inpEntrySelection"]'))
)
#SEARCH_TAG_BOX = driver.find_element_by_xpath('//*[@id="inpEntrySelection"]')
# Colocar texto en un input
SEARCH_TAG_BOX.send_keys(tag)
# Tecla enter (key return)
SEARCH_TAG_BOX.send_keys(Keys.RETURN)
except:
print('Error al hacer una busqueda')
try:
# Hacemos un wait, pero un elemento padre que ocupemos
time.sleep(5)
car_element = driver.find_element_by_xpath('//*[@id="site-wrapper"]/div/div[5]/div/div[2]/div[1]/div[2]/div/div')
##Obtener info del tag buscado
get_TAG_MODEL = car_element.find_element_by_xpath('//*[@id="site-wrapper"]/div/div[5]/div/div[2]/div[1]/div[2]/div/div/div/div[2]/h1')
get_TAG_Warranty= car_element.find_element_by_xpath('//*[@id="ps-inlineWarranty"]/div[1]/div/p[2]')
#Append to a list
warranty_for.append(get_TAG_Warranty.text)
modelo_for.append(get_TAG_MODEL.text)
driver.get("https://www.dell.com/support/home/en-us?app=products")
except:
print("Un error al guardar las etiquetas")
driver.quit()
time.sleep(3)
warranty_date = transformdates.get_date(warranty_for)
df.insert(0, pd.DataFrame({'TAGS': TAGS_LIST,'MODEL': modelo_for, 'Warranty': warranty_date[0], 'Delta T': warranty_date[1] }))
print(df[0])
def save_result(path):
print('path: {} ,\ndf:{}'.format(path, df[0]))
df[0].to_excel(path, index=False)
print(' \nEl file se guardo en la siguinete ruta: '+path)
| true |
5be312dd6dadf6f6b0eaf9c87714432fefa83e54 | Python | Inndy/tkinter_samples | /src/text_editor.py | UTF-8 | 1,404 | 2.796875 | 3 | [] | no_license | import os
from tkinter import *
HEIGHT = 32
WIDTH = 80
root = Tk()
root.title("Text editor")
def onlist():
onclear()
file_list = '\n'.join(os.listdir())
textarea.insert('@0,0', file_list)
def onread():
onclear()
try:
fobj = open(txtFile.get(), "r")
textarea.insert("@0,0", fobj.read())
fobj.close()
textarea["fg"] = "#000"
except IOError:
textarea.insert("@0,0", "Error: Can't read file\n")
textarea["fg"] = "#f00"
def onwrite():
try:
fobj = open(txtFile.get(), "w")
fobj.write(textarea.get("@0,0", END))
fobj.close()
textarea["fg"] = "#000"
except IOError as e:
textarea.insert("@0,0", "Error: Can't write file\n")
textarea["fg"] = "#ff722b"
def onclear():
textarea.delete("@0,0", END)
btnList = Button(root, text = "List", command = onlist)
btnList.grid(row = 0, column = 0)
btnRead = Button(root, text = "Read", command = onread)
btnRead.grid(row = 0, column = 1)
btnWrite = Button(root, text = "Write", command = onwrite)
btnWrite.grid(row = 0, column = 2)
btnClear = Button(root, text = "Clear", command = onclear)
btnClear.grid(row = 0, column = 3)
txtFile = Entry(root, width = WIDTH)
txtFile.grid(row = 1, column = 0, columnspan = 4)
textarea = Text(root, width = WIDTH, height = HEIGHT)
textarea.grid(row = 2, column = 0, columnspan = 4)
root.mainloop()
| true |
74e37718207744607fba568efa2f4b513f30b206 | Python | steview-d/practicepython-org-exercises | /practice_python_org/16_pass_gen.py | UTF-8 | 3,559 | 3.71875 | 4 | [] | no_license | import random
pw_len, upper, lower, numbers, symbols = 8, 1, 1, 1, 1
stored_pw = []
pw_list_upper = "QAZXSWEDCVFRTGBNHYUJMKIOLP"
pw_list_lower = "polmkiujnbhytgvcfredxzswqa"
pw_list_numbers = "1234567890"
pw_list_symbols = '!"£$%^&*()_+][}{;@#:~?><,./\|'
def generate_password(source, pass_length):
"""Generate a password
source is the list of chars to choose from
length is the number of chars in the password"""
new_pw = ''
for x in range(pass_length):
new_pw += random.choice(source)
stored_pw.append(new_pw)
return new_pw
def make_char_list(mcl_upper, mcl_lower, mcl_numbers, mcl_symbols):
"""Generate a string with all available chars to choose from
based on the users requirements"""
# This can all definitely be shortened & cleaned up
new_char_list = ""
if mcl_upper:
new_char_list += pw_list_upper
if mcl_lower:
new_char_list += pw_list_lower
if mcl_numbers:
new_char_list += pw_list_numbers
if mcl_symbols:
new_char_list += pw_list_symbols
return new_char_list
def draw_screen():
print("*** PASSWORD GENERATOR {} ***".format("v0.1"))
print("-------------------------------\n")
print("1. Generate password(s) with current settings\n"
"2. Change settings\n"
"3. Display stored passwords\n"
"4. Exit program\n")
# Print out current settings
print("\nCurrent Settings\n"
"----------------")
print("Password length to generate: {}".format(pw_len))
print("Use UPPER case characters: {}".format("Yes" if upper == 1 else "No"))
print("Use LOWER case characters: {}".format("Yes" if lower == 1 else "No"))
print("Use NUMBERS: {}".format("Yes" if numbers == 1 else "No"))
print("Use SYMBOLS: {}".format("Yes" if symbols == 1 else "No"))
char_list = make_char_list(upper, lower, numbers, symbols)
while True:
draw_screen()
u_input = input("\nPlease choose an option from above ")
if u_input == "q" or u_input == "4":
break
if u_input == "1":
pw = (generate_password(char_list, pw_len))
print("Your new password is \n\n{}".format(pw))
input("\n(enter to continue)")
if u_input == "2":
pw_len = int(input("How many characters should the password contain?"))
_ = input("Use UPPER case characters? [y]es or [n]o? ")
if _ == "y" or _ == "yes":
upper = 1
else:
upper = 0
_ = input("Use LOWER case characters? [y]es or [n]o? ")
if _ == "y" or _ == "yes":
lower = 1
else:
lower = 0
_ = input("Use NUMBERS? [y]es or [n]o? ")
if _ == "y" or _ == "yes":
numbers = 1
else:
numbers = 0
_ = input("Use SYMBOLS? [y]es or [n]o? ")
if _ == "y" or _ == "yes":
symbols = 1
else:
symbols = 0
char_list = make_char_list(upper, lower, numbers, symbols)
input("\n(enter to continue)")
if u_input == "3":
print("The following passwords have been stored:\n")
for _ in stored_pw:
print(_)
input("\n(enter to continue)")
"""
Could go to town on this and really expand. Think Dashlane pw gen - simulate that, in Python
Features List
* Password Length
* Password Content Choice, so - upper / lower case, numbers, symbols
* Store previously generated passwords
* Use a dict to store password and site / program name
* Allow possibility of password containing multiple same chars
""" | true |
1b48850f668068a7c1174c04e1bbb57e7d4ec7f2 | Python | Ankit-Kumar-Saini/Applications-of-Data-Science | /Sentiment Analysis/app/app.py | UTF-8 | 3,441 | 3.328125 | 3 | [] | no_license | # import necessary modules
import re
import nltk
import time
import pickle
import sqlite3
import numpy as np
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
from flask import Flask, render_template, request
# download stopwords from nltk
nltk.download('stopwords')
## Function to connect to sql database
def sql_init():
"""
This function creates a connection to the database and
then creates a table in the database
"""
# create connection to the database
conn = sqlite3.connect('reviews_database.db')
# create cursor
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS REVIEWS;")
# sql command
sql_cmd = """
CREATE TABLE REVIEWS (TimeStamps INTEGER PRIMARY KEY,
MovieNames VARCHAR(20),
Reviews VARCHAR(50),
Predictions VARCHAR(10));"""
cur.execute(sql_cmd)
conn.commit()
# close the connection
conn.close()
## Function to store reviews in sql database
def sql_store(time_stamp, movie_name, review, prediction):
conn = sqlite3.connect('reviews_database.db')
cur = conn.cursor()
cur.execute("INSERT INTO REVIEWS VALUES (?, ?, ?, ?)", (time_stamp, movie_name, review, prediction))
conn.commit()
# close the connection
conn.close()
# instantiate Flask object
app = Flask(__name__, static_folder = '',)
# Load trained model
model = pickle.load(open('model.pkl', 'rb'))
# Load vectorizer
vectorizer = pickle.load(open('vectorizer.pkl', 'rb'))
# call function to connect to sql_database
sql_init()
# Home page route
@app.route("/home")
@app.route("/")
def home():
return render_template('home.html')
# predict route
@app.route('/predict', methods = ['POST'])
def predict():
if request.method == 'POST':
movie_name = request.form['movie']
review = request.form['review']
time_stamp = int(time.time())
# call the function to clean the review
clean_review = clean_reviews(str(review))
# transform the review using vectorizer object
transformed_review = vectorizer.transform(np.array([clean_review]))
prediction = model.predict(transformed_review)
sentiment = ['Negative', 'Positive'][prediction[0]]
sql_store(time_stamp, movie_name, review, sentiment)
return render_template("pred.html", value = sentiment)
# Clean raw reviews
def clean_reviews(review):
"""
Clean and preprocess a review
1. Remove HTML tags
2. Use regex to remove all special characters (only keep letters)
3. Make strings to lower case and tokenize / word split reviews
4. Remove English stopwords
5. Rejoin to one string
Args:
review: raw text review
Returns:
review: clean text review
"""
# 1. Remove HTML tags
review = BeautifulSoup(review, features = "html.parser").text
# 2. Use regex to find emoticons
emotions = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', review)
# 3. Remove punctuation
review = re.sub("[^a-zA-Z]", " ", review)
# 4. Tokenize into words (all lower case)
review = review.lower().split()
# 5. Remove stopwords
eng_stopwords = set(stopwords.words("english"))
review = [w for w in review if not w in eng_stopwords]
# 6. Join the review to form one sentence
review = ' '.join(review + emotions)
return review | true |
b4de57ba88721f4f144d5ec0412d1891085e312e | Python | adi-dhal/vistaar_cvg | /prob_stat_1/ps_1_1.py | UTF-8 | 144 | 2.859375 | 3 | [] | no_license | import sys
import math
def inp(arg):
ans=[]
for x in arg:
ans.append(math.factorial(int(x)))
print (ans)
return
inp(sys.argv[1:])
| true |
6a3d9af07cc34c3e928dd11fa21c920868076fe3 | Python | zihao-fan/ensemble_learning | /src/data_helper.py | UTF-8 | 1,183 | 2.9375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
current_path = os.path.realpath(__file__)
root_path = '/'.join(current_path.split('/')[:-2])
data_path = os.path.join(root_path,
'data', 'ContentNewLinkAllSample.csv')
def train_test_split(data, ratio=0.2):
msk = np.random.rand(len(data)) < (1 - ratio)
train = data[msk]
test = data[~msk]
return train, test
def get_dataset():
data = pd.read_csv(data_path)
data['class'] = data['class'].astype('category').cat.codes
train, test = train_test_split(data)
return train, test
def plot(bagging, adaboost, cls):
x = np.asarray([5, 10, 25])
plt.plot(x, bagging, label='Bagging')
plt.plot(x, adaboost, label='AdaBoost')
plt.legend()
plt.title(cls)
plt.show()
if __name__ == '__main__':
bagging_tree_f1 = np.asarray([0.902, 0.913, 0.905])
adaboost_tree_f1 = np.asarray([0.906, 0.899, 0.902])
bagging_svm_f1 = np.asarray([0.930, 0.932, 0.928])
adaboost_svm_f1 = np.asarray([0.925, 0.913, 0.917])
plot(bagging_tree_f1, adaboost_tree_f1, 'Tree')
# plot(bagging_svm_f1, adaboost_svm_f1, 'SVM') | true |
bda43fecae815b41c782a506143e389e7199783a | Python | adrielgentil/practica-programacion | /adivina.py | UTF-8 | 8,926 | 3.734375 | 4 | [] | no_license | # Importamos libreria random
import random
# Generamos número aleatorio
n1 = random.randint(1, 30)
# Funcion para preguntar si quiere jugar o no
def pregunta():
sn = input()
if sn.lower() == 'no':
print('Oh, que pena, quería divertirme un poco. Será la próxima entonces. Chau!')
elif sn.lower() == 'si':
print('Genial! Voy a pensar un número del 1 al 30. Intentá adivinarlo.\nPero ojo, solo tenes 10 intentos\nIgual tranqui, te voy a ir ayudando.\nSuerte!')
juego()
else:
print('Respuesta invalida, por favor solo responde Si o No.')
pregunta()
def preg():
sn = input()
while sn.lower() != 'si' and sn.lower() != 'no':
print('Respuesta invalida, por favor solo responde Si o No.')
sn = input()
if sn.lower() == 'si':
print('Genial! Voy a pensar un número del 1 al 30. Intentá adivinarlo.\nPero ojo, solo tenes 10 intentos\nIgual tranqui, te voy a ir ayudando.\nSuerte!')
juego()
else:
print('Oh, que pena, quería divertirme un poco. Será la próxima entonces. Chau!')
# Generamos una funcion del juego
def juego():
num = int(input('Ingresá el número: '))
while num <= 0 or num > 30:
print('Acordate que es un numero entre 1 y 30, asi que intentalo de nuevo. Tranqui, esto no cuenta como intento')
num = int(input('Ingresá el número: '))
if num == n1:
print('¡Que suerte! ¡Ganaste a la primera! ¿Queres jugar de nuevo?')
preg()
else:
if num > n1:
print('Ups, te pasaste. Proba de nuevo, te quedan 9 intentos: ')
else:
print('Ups, te quedaste corto. Proba de nuevo, te quedan 9 intentos: \n')
num = int(input('Ingresá el número: '))
while num <= 0 or num > 30:
print('Acordate que es un numero entre 1 y 30, asi que intentalo de nuevo. Tranqui, esto no cuenta como intento')
num = int(input('Ingresá el número: '))
if num == n1:
print('¡Que bien! ¡Ganaste! ¿Queres jugar de nuevo?')
preg()
else:
if num > n1:
print('Ups, te pasaste. Proba de nuevo, te quedan 8 intentos: ')
else:
print('Ups, te quedaste corto. Proba de nuevo, te quedan 8 intentos: \n')
num = int(input('Ingresá el número: '))
while num <= 0 or num > 30:
print('Acordate que es un numero entre 1 y 30, asi que intentalo de nuevo. Tranqui, esto no cuenta como intento')
num = int(input('Ingresá el número: '))
if num == n1:
print('¡Que bien! ¡Ganaste! ¿Queres jugar de nuevo?')
preg()
else:
if num > n1:
print('Ups, te pasaste. Proba de nuevo, te quedan 7 intentos: ')
else:
print('Ups, te quedaste corto. Proba de nuevo, te quedan 7 intentos: \n')
num = int(input('Ingresá el número: '))
while num <= 0 or num > 30:
print('Acordate que es un numero entre 1 y 30, asi que intentalo de nuevo. Tranqui, esto no cuenta como intento')
num = int(input('Ingresá el número: '))
if num == n1:
print('¡Que bien! ¡Ganaste! ¿Queres jugar de nuevo?')
preg()
else:
if num > n1:
print('Ups, te pasaste. Proba de nuevo, te quedan 6 intentos: ')
else:
print('Ups, te quedaste corto. Proba de nuevo, te quedan 6 intentos: \n')
num = int(input('Ingresá el número: '))
while num <= 0 or num > 30:
print('Acordate que es un numero entre 1 y 30, asi que intentalo de nuevo. Tranqui, esto no cuenta como intento')
num = int(input('Ingresá el número: '))
if num == n1:
print('¡Que bien! ¡Ganaste! ¿Queres jugar de nuevo?')
preg()
else:
if num > n1:
print('Ups, te pasaste. Proba de nuevo, te quedan 5 intentos: ')
else:
print('Ups, te quedaste corto. Proba de nuevo, te quedan 5 intentos: \n')
num = int(input('Ingresá el número: '))
while num <= 0 or num > 30:
print('Acordate que es un numero entre 1 y 30, asi que intentalo de nuevo. Tranqui, esto no cuenta como intento')
num = int(input('Ingresá el número: '))
if num == n1:
print('¡Que bien! ¡Ganaste! ¿Queres jugar de nuevo?')
preg()
else:
if num > n1:
print('Ups, te pasaste. Proba de nuevo, te quedan 4 intentos: ')
else:
print('Ups, te quedaste corto. Proba de nuevo, te quedan 4 intentos: \n')
num = int(input('Ingresá el número: '))
while num <= 0 or num > 30:
print('Acordate que es un numero entre 1 y 30, asi que intentalo de nuevo. Tranqui, esto no cuenta como intento')
num = int(input('Ingresá el número: '))
if num == n1:
print('¡Que bien! ¡Ganaste! ¿Queres jugar de nuevo?')
preg()
else:
if num > n1:
print('Ups, te pasaste. Proba de nuevo, te quedan 3 intentos: ')
else:
print('Ups, te quedaste corto. Proba de nuevo, te quedan 3 intentos: \n')
num = int(input('Ingresá el número: '))
while num <= 0 or num > 30:
print('Acordate que es un numero entre 1 y 30, asi que intentalo de nuevo. Tranqui, esto no cuenta como intento')
num = int(input('Ingresá el número: '))
if num == n1:
print('¡Que bien! ¡Ganaste! ¿Queres jugar de nuevo?')
preg()
else:
if num > n1:
print('Ups, te pasaste. Proba de nuevo, te quedan 2 intentos: ')
else:
print('Ups, te quedaste corto. Proba de nuevo, te quedan 2 intentos: \n')
num = int(input('Ingresá el número: '))
while num <= 0 or num > 30:
print('Acordate que es un numero entre 1 y 30, asi que intentalo de nuevo. Tranqui, esto no cuenta como intento')
num = int(input('Ingresá el número: '))
if num == n1:
print('¡Que bien! ¡Ganaste! ¿Queres jugar de nuevo?')
preg()
else:
if num > n1:
print('Ups, te pasaste. Proba de nuevo, solo te queda un intento, asi que piensalo bien: ')
else:
print('Ups, te quedaste corto. Proba de nuevo, solo te queda un intento, asi que piensalo bien: \n')
num = int(input('Ingresá el número: '))
while num <= 0 or num > 30:
print('Acordate que es un numero entre 1 y 30, asi que intentalo de nuevo. Tranqui, esto no cuenta como intento')
num = int(input('Ingresá el número: '))
if num == n1:
print('¡Que bien! ¡Ganaste! ¿Queres jugar de nuevo?')
preg()
else:
print('Uhhh, perdiste, lo lamento, realmente queria que adivinaras, te di pistas y todo.'
'\nPero bueno, que se le va a hacer. ¿querés intentar de nuevo?')
preg()
# Introducción e inicio del juego
print('\nHola! ¿Cómo te llamas?')
nombre = input()
print('Bien ' + nombre.capitalize() + ', ¿querés jugar un juego?')
preg()
| true |
36ab1990f8f757f61413200b51d8e4d9e7de568f | Python | daniel-reich/ubiquitous-fiesta | /Mwh3zhKFu332qBhQa_18.py | UTF-8 | 54 | 2.703125 | 3 | [] | no_license |
def guess_sequence(n):
return 30 * n * n + 60 * n
| true |
d5a4c342aa1b09d3cb66d00d5737b63c9fa15d6b | Python | raphael-group/chisel | /src/chisel/Plotter.py | UTF-8 | 25,707 | 2.515625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python2.7
import sys, os
import argparse
import random
import warnings
from itertools import cycle
from collections import defaultdict
import numpy as np
import scipy
import scipy.cluster
import scipy.cluster.hierarchy as hier
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
import seaborn as sns
from Utils import *
from Clusterizer import kclustering
from matplotlib.colors import LinearSegmentedColormap
orderchrs = (lambda x : int(''.join([l for l in x if l.isdigit()])))
order = (lambda b : (orderchrs(b[0]), int(b[1]), int(b[2])))
def parse_args():
description = "Generate plots for the analysis of estimated RDRs and BAFs, inferred allele- and haplotype-specific copy numbers, and clones."
parser = argparse.ArgumentParser(description=description)
parser.add_argument("INPUT", type=str, help="Input file with combined RDR and BAF per bin and per cell")
parser.add_argument("-m", "--clonemap", required=False, type=str, default=None, help="Clone map (default: not used, the cells will be clustered for plotting purposes)")
parser.add_argument("-f", "--figformat", required=False, type=str, default='png', help="Format of output figures (default: png, the only other option is pdf)")
parser.add_argument("-s", "--sample", required=False, type=int, default=20, help="Number of cells to sample (default: 20)")
parser.add_argument("--excludenoisy", required=False, default=False, action='store_true', help="Exclude noisy cells from plots (default: False)")
parser.add_argument("--gridsize", required=False, type=str, default='12,6', help="Grid dimenstions specified as comma-separated numbers (default: 12,6)")
parser.add_argument("--plotsize", required=False, type=str, default='5,1.5', help="Plot dimenstions for RDR-BAF plots, specified as comma-separated numbers (default: 5,1.5)")
parser.add_argument("--clussize", required=False, type=str, default='5,3', help="Grid dimenstions for clustered plots, specified as comma-separated numbers (default: 5,3)")
parser.add_argument("--xmax", required=False, type=float, default=None, help="Maximum x-axis value (default: None)")
parser.add_argument("--xmin", required=False, type=float, default=None, help="Minimum x-axis value (default: None)")
parser.add_argument("--ymax", required=False, type=float, default=None, help="Maximum x-axis value (default: None)")
parser.add_argument("--ymin", required=False, type=float, default=None, help="Minimum x-axis value (default: None)")
parser.add_argument("--seed", required=False, type=int, default=None, help="Random seed for replication (default: none)")
args = parser.parse_args()
if not os.path.isfile(args.INPUT):
raise ValueError('ERROR: input file does not exist!')
if args.clonemap and not os.path.isfile(args.clonemap):
raise ValueError('ERROR: the provided clone map does not exist!')
if args.figformat not in ['pdf', 'png']:
raise ValueError('ERROR: figure format must be either pdf or png!')
if args.sample < 1:
raise ValueError('ERROR: number of sampled cells must be positive!')
if args.seed and args.seed < 0:
raise ValueError("Random seed must be positive or zero!")
else:
np.random.seed(args.seed)
def get_size(s):
p = s.split(',')
if len(p) != 2:
raise ValueError('ERROR: wrong format for figure sizes!')
return tuple(map(float, p))
return {
'input' : args.INPUT,
'clonemap' : args.clonemap,
'format' : args.figformat,
'sample' : args.sample,
'nonoisy' : args.excludenoisy,
'gridsize' : get_size(args.gridsize),
'plotsize' : get_size(args.plotsize),
'clussize' : get_size(args.clussize),
'xmax' : args.xmax,
'xmin' : args.xmin,
'ymax' : args.ymax,
'ymin' : args.ymin
}
def main():
log('Parsing and checking arguments')
args = parse_args()
log('\n'.join(['Arguments:'] + ['\t{} : {}'.format(a, args[a]) for a in args]), level='INFO')
log('Reading input')
bins, pos, cells, iscorr = read_cells(args['input'])
log('Number of cells: {}'.format(len(cells)), level='INFO')
log('Number of bins: {}'.format(len(pos)), level='INFO')
log('Setting style')
set_style(args)
if args['clonemap']:
log('Reading clonemap')
index, clones, selected = clonemap_to_index(args['clonemap'], cells)
if all(selected[e] == 'None' for e in selected):
log('Cell will be re-clustered as no clone has been previously identified', level='WARN')
index, clones = clustering_tot(bins, pos, cells)
selected = dict(clones)
else:
log('Clustering cells')
index, clones = clustering_tot(bins, pos, cells)
selected = dict(clones)
if args['nonoisy']:
log('Excluding noisy cells')
bins, pos, cells, index, clones, selected = exclude_noisy(bins, pos, cells, index, clones, selected)
log('Number of cells: {}'.format(len(cells)), level='INFO')
log('Number of bins: {}'.format(len(pos)), level='INFO')
chosen = random.sample(list(enumerate(cells)), args['sample'])
chosen = [p[1] for p in sorted(chosen, key=(lambda x : x[0]))]
log('Plotting RDR and mirrored BAF plots for {} random cells in rbplot_mirrored.{}'.format(args['sample'], args['format']))
rbplot_mirrored(bins, chosen, args)
log('Plotting clustered RDR plots for {} random cells in crdr.{}'.format(args['sample'], args['format']))
crdr(bins, pos, chosen, args)
log('Plotting clustered-mirrored BAF plots for {} random cells in cbaf.{}'.format(args['sample'], args['format']))
cbaf(bins, pos, chosen, args)
log('Plotting read-depth ratios in {}'.format('rdrs.' + args['format']))
gridrdrs(bins, pos, cells, index=index, clones=clones, selected=selected, args=args)
log('Plotting B-allele frequencies in {}'.format('bafs.' + args['format']))
gridbafs(bins, pos, cells, index=index, clones=clones, selected=selected, args=args)
log('Plotting total copy numbers in {}'.format('totalcn.' + args['format']))
totalcns(bins, pos, cells, index=index, clones=clones, selected=selected, args=args)
if iscorr:
log('Plotting total copy numbers corrected by clones in {}'.format('totalcn-corrected.' + args['format']))
totalcns(bins, pos, cells, index=index, clones=clones, selected=selected, args=args, out='totalcn-corrected.', val='CORR-CNS')
log('Plotting LOH in {}'.format('loh.' + args['format']))
loh(bins, pos, cells, index=index, clones=clones, selected=selected, args=args)
if iscorr:
log('Plotting LOH corrected by clones in {}'.format('loh-corrected.' + args['format']))
loh(bins, pos, cells, index=index, clones=clones, selected=selected, args=args, out='loh-corrected.', val='CORR-CNS')
log('Plotting A-specific copy numbers in {}'.format('Aspecificcn.' + args['format']))
acns(bins, pos, cells, index=index, clones=clones, selected=selected, args=args)
if iscorr:
log('Plotting A-specific copy numbers corrected by clones in {}'.format('Aspecificcn-corrected.' + args['format']))
acns(bins, pos, cells, index=index, clones=clones, selected=selected, args=args, out='Aspecificcn-corrected.', val='CORR-CNS')
log('Plotting B-specific copy numbers in {}'.format('Bspecificcn.' + args['format']))
bcns(bins, pos, cells, index=index, clones=clones, selected=selected, args=args)
if iscorr:
log('Plotting B-specific copy numbers corrected by clones in {}'.format('Bspecificcn-corrected.' + args['format']))
bcns(bins, pos, cells, index=index, clones=clones, selected=selected, args=args, out='Bspecificcn-corrected.', val='CORR-CNS')
log('Plotting allele-specific copy numbers in {}'.format('allelecn.' + args['format']))
states(bins, pos, cells, index=index, clones=clones, selected=selected, args=args)
if iscorr:
log('Plotting allele-specific copy numbers corrected by clones in {}'.format('allelecn-corrected.' + args['format']))
states(bins, pos, cells, index=index, clones=clones, selected=selected, args=args, out='allelecn-corrected.', val='CORR-CNS')
log('Plotting haplotype-specific copy numbers in {}'.format('haplotypecn.' + args['format']))
minor(bins, pos, cells, index=index, clones=clones, selected=selected, args=args)
if iscorr:
log('Plotting haplotype-specific copy numbers corrected by clones in {}'.format('haplotypecn-corrected.' + args['format']))
minor(bins, pos, cells, index=index, clones=clones, selected=selected, args=args, out='haplotypecn-corrected.', val='CORR-CNS')
log('KTHKBYE!')
def read_cells(f):
bins = defaultdict(lambda : dict())
cells = set()
with open(f, 'r') as i:
p = i.readline().strip().split()
if len(p) == 12:
form = (lambda p : ((p[0], int(p[1]), int(p[2])), p[3], float(p[6]), float(p[9]), p[10], tuple(map(int, p[11].split('|')))))
with open(f, 'r') as i:
for l in i:
if l[0] != '#' and len(l) > 1:
b, e, rdr, baf, c, cns = form(l.strip().split())
bins[b][e] = {'RDR' : rdr, 'BAF' : baf, 'Cluster' : c, 'CNS' : cns}
cells.add(e)
pos = sorted(bins.keys(), key=order)
for x, b in enumerate(pos):
for e in cells:
bins[b][e]['Genome'] = x
return bins, pos, sorted(cells), False
elif len(p) == 13:
form = (lambda p : ((p[0], int(p[1]), int(p[2])), p[3], float(p[6]), float(p[9]), p[10], tuple(map(int, p[11].split('|'))), tuple(map(int, p[12].split('|')))))
with open(f, 'r') as i:
for l in i:
if l[0] != '#' and len(l) > 1:
b, e, rdr, baf, c, cns, corr = form(l.strip().split())
bins[b][e] = {'RDR' : rdr, 'BAF' : baf, 'Cluster' : c, 'CNS' : cns, 'CORR-CNS' : corr}
cells.add(e)
pos = sorted(bins.keys(), key=order)
for x, b in enumerate(pos):
for e in cells:
bins[b][e]['Genome'] = x
return bins, pos, sorted(cells), True
else:
raise ValueError("Input format is wrong: 12 or 13 fields expected but {} were found".format(len(p)))
def set_style(args):
plt.style.use('ggplot')
sns.set_style("whitegrid")
#plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams["axes.grid"] = True
plt.rcParams["axes.edgecolor"] = "k"
plt.rcParams["axes.linewidth"] = 1.5
def clonemap_to_index(f, cells):
clonemap = {}
selected = {}
with open(f, 'r') as i:
for l in (g for g in i if g[0] != '#' and len(g) > 1):
p = l.strip().split()
assert p[0] not in clonemap
clonemap[p[0]] = int(p[1])
selected[p[0]] = p[2]
mapc = [(clonemap[e], e) for e in cells]
return [p[1] for p in sorted(mapc, key=(lambda x : x[0]))], clonemap, selected
def clustering_tot(bins, pos, cells):
data = [[bins[b][e]['CNS'][d] for b in pos for d in [0, 1]] for e in cells]
linkage = hier.linkage(data, method='average', metric='hamming', optimal_ordering=True)
clus = hier.fcluster(linkage, t=len(cells), criterion='maxclust')
mapc = [(clus[i], e) for i, e in enumerate(cells)]
return [p[1] for p in sorted(mapc, key=(lambda x : x[0]))], {e : clus[i] for i, e in enumerate(cells)}
def exclude_noisy(_bins, _pos, _cells, _index, _clones, _selected):
check = {e : _selected[e] != 'None' for e in _cells}
cells = [e for e in _cells if check[e]]
selected = {e : _selected[e] for e in _selected if check[e]}
clones = {e : _clones[e] for e in _clones if check[e]}
index = [e for e in _index if check[e]]
bins = {b : {e : _bins[b][e] for e in _bins[b] if check[e]} for b in _bins}
pos = sorted(bins.keys(), key=order)
return bins, pos, cells, index, clones, selected
def rbplot_unphased(bins, chosen, args):
form = (lambda d, e : {'RDR' : d['RDR'], 'BAF' : d['BAF'], 'Cluster' : d['Cluster'], 'Cell' : e})
df = [form(bins[b][e], e) for b in bins for e in chosen]
par= {}
par['data'] = pd.DataFrame(df)
par['x'] = 'RDR'
par['y'] = 'BAF'
par['hue'] = 'Cluster'
par['row'] = 'Cell'
par['fit_reg'] = False
par['legend'] = False
par['palette'] = 'tab20'
par['size'] = args['plotsize'][0]
par['aspect'] = args['plotsize'][1]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
g = sns.lmplot(**par)
g.despine(top=False, bottom=False, left=False, right=False)
g.set(ylim=(-0.01, 1.01))
g.set(xlim=(args['xmin'], args['xmax']))
plt.savefig('rbplot_unphased.{}'.format(args['format']), bbox_inches='tight')
plt.close()
def rbplot_mirrored(bins, chosen, args):
form = (lambda d, e : {'RDR' : d['RDR'], '|0.5 - BAF|' : 0.5-min(d['BAF'], 1-d['BAF']), 'Cluster' : d['Cluster'], 'Cell' : e})
df = [form(bins[b][e], e) for b in bins for e in chosen]
par= {}
par['data'] = pd.DataFrame(df)
par['x'] = 'RDR'
par['y'] = '|0.5 - BAF|'
par['hue'] = 'Cluster'
par['row'] = 'Cell'
par['fit_reg'] = False
par['legend'] = False
par['palette'] = 'tab20'
par['size'] = args['plotsize'][0]
par['aspect'] = args['plotsize'][1]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
g = sns.lmplot(**par)
g.despine(top=False, bottom=False, left=False, right=False)
g.set(ylim=(-0.01, 0.51))
g.set(xlim=(args['xmin'], args['xmax']))
plt.savefig('rbplot_mirrored.{}'.format(args['format']), bbox_inches='tight')
plt.close()
def crdr(bins, pos, chosen, args):
form = (lambda d, e : {'Genome' : d['Genome'], 'RDR' : d['RDR'], 'Cluster' : d['Cluster'], 'Cell' : e})
df = [form(bins[b][e], e) for b in bins for e in chosen]
par= {}
par['data'] = pd.DataFrame(df)
par['x'] = 'Genome'
par['y'] = 'RDR'
par['hue'] = 'Cluster'
par['row'] = 'Cell'
par['fit_reg'] = False
par['legend'] = False
par['palette'] = 'tab20'
par['size'] = args['clussize'][0]
par['aspect'] = args['clussize'][1]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
g = sns.lmplot(**par)
g.despine(top=False, bottom=False, left=False, right=False)
for ax in g.axes:
for x, p in enumerate(pos):
if x > 0 and pos[x-1][0] != pos[x][0]:
ax[0].plot((x, x), (0, 2), '--b', linewidth=1.0)
ax[0].margins(x=0, y=0)
addchrplt(pos)
g.set(xlim=(0, len(pos)))
g.set(xlim=(args['ymin'], args['ymax']))
plt.savefig('crdr.{}'.format(args['format']), bbox_inches='tight')
plt.close()
def cbaf(bins, pos, chosen, args):
form = (lambda d, e : {'Genome' : d['Genome'], '|0.5 - BAF|' : 0.5-min(d['BAF'], 1-d['BAF']), 'Cluster' : d['Cluster'], 'Cell' : e})
df = [form(bins[b][e], e) for b in bins for e in chosen]
par= {}
par['data'] = pd.DataFrame(df)
par['x'] = 'Genome'
par['y'] = '|0.5 - BAF|'
par['hue'] = 'Cluster'
par['row'] = 'Cell'
par['fit_reg'] = False
par['legend'] = False
par['palette'] = 'tab20'
par['size'] = args['clussize'][0]
par['aspect'] = args['clussize'][1]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
g = sns.lmplot(**par)
g.despine(top=False, bottom=False, left=False, right=False)
for ax in g.axes:
for x, p in enumerate(pos):
if x > 0 and pos[x-1][0] != pos[x][0]:
ax[0].plot((x, x), (0, 0.5), '--b', linewidth=1.0)
ax[0].margins(x=0, y=0)
addchrplt(pos)
g.set(xlim=(0, len(pos)))
g.set(xlim=(args['ymin'], args['ymax']))
plt.savefig('cbaf.'.format(args['format']), bbox_inches='tight')
plt.close()
def gridrdrs(bins, pos, cells, index=None, clones=None, selected=None, args=None, out='rdrs.', val='RDR'):
df = []
mapc = {}
for x, e in enumerate(index):
df.extend([{'Cell' : x, 'Genome' : bins[b][e]['Genome'], 'RDR' : min(2, bins[b][e][val])} for b in pos])
mapc[x] = (clones[e], selected[e])
df = pd.DataFrame(df)
table = pd.pivot_table(df, values='RDR', columns=['Genome'], index=['Cell'], aggfunc='first')
title = 'Read-depth ratios'
draw(table, bins, pos, cells, index, mapc, palette='coolwarm', center=1, method='single', metric='hamming', title=title, out=out, args=args)
def gridbafs(bins, pos, cells, index=None, clones=None, selected=None, args=None, out='bafs.', val='BAF'):
df = []
mapc = {}
mirror = (lambda v : min(v, 1 - v))
for x, e in enumerate(index):
df.extend([{'Cell' : x, 'Genome' : bins[b][e]['Genome'], 'Mirrored BAF' : mirror(bins[b][e][val])} for b in pos])
mapc[x] = (clones[e], selected[e])
df = pd.DataFrame(df)
table = pd.pivot_table(df, values='Mirrored BAF', columns=['Genome'], index=['Cell'], aggfunc='first')
title = 'Mirrored B-allele frequencies'
draw(table, bins, pos, cells, index, mapc, palette='YlGnBu_r', center=None, method='single', metric='hamming', title=title, out=out, args=args)
def totalcns(bins, pos, cells, index=None, clones=None, selected=None, args=None, out='totalcn.', val='CNS'):
df = []
mapc = {}
for x, e in enumerate(index):
df.extend([{'Cell' : x, 'Genome' : bins[b][e]['Genome'], 'Total CN' : min(6, sum(bins[b][e][val]))} for b in pos])
mapc[x] = (clones[e], selected[e])
df = pd.DataFrame(df)
table = pd.pivot_table(df, values='Total CN', columns=['Genome'], index=['Cell'], aggfunc='first')
title = 'Total copy numbers'
palette = {}
palette.update({0 : 'darkblue'})
palette.update({1 : 'lightblue'})
palette.update({2 : 'lightgray'})
palette.update({3 : 'lightgoldenrodyellow'})
palette.update({4 : 'navajowhite'})
palette.update({5 : 'red'})
palette.update({6 : 'orchid'})
colors = [palette[x] for x in xrange(7) if x in set(df['Total CN'])]
cmap = LinearSegmentedColormap.from_list('multi-level', colors, len(colors))
draw(table, bins, pos, cells, index, mapc, palette=cmap, center=None, method='single', metric='hamming', title=title, out=out, args=args)
def loh(bins, pos, cells, index=None, clones=None, selected=None, args=None, out='loh.', val='CNS'):
df = []
mapc = {}
for x, e in enumerate(index):
df.extend([{'Cell' : x, 'Genome' : bins[b][e]['Genome'], 'LOH' : 1 if 0 in bins[b][e][val] else 0} for b in pos])
mapc[x] = (clones[e], selected[e])
df = pd.DataFrame(df)
table = pd.pivot_table(df, values='LOH', columns=['Genome'], index=['Cell'], aggfunc='first')
myColors = sns.cubehelix_palette(2, start=2, rot=0, dark=0, light=.95)
cmap = LinearSegmentedColormap.from_list('Custom', myColors, len(myColors))
title = 'Loss of heterozigosity (LOH)'
draw(table, bins, pos, cells, index, mapc, palette=cmap, center=None, method='median', metric='cityblock', title=title, out=out, args=args)
def acns(bins, pos, cells, index=None, clones=None, selected=None, args=None, out='Aspecificcn.', val='CNS'):
df = []
mapc = {}
for x, e in enumerate(index):
df.extend([{'Cell' : x, 'Genome' : bins[b][e]['Genome'], 'A-specific CN' : min(8, bins[b][e][val][0])} for b in pos])
mapc[x] = (clones[e], selected[e])
df = pd.DataFrame(df)
table = pd.pivot_table(df, values='A-specific CN', columns=['Genome'], index=['Cell'], aggfunc='first')
title = 'A-specific copy numbers'
draw(table, bins, pos, cells, index, mapc, palette='coolwarm', center=2, method='single', metric='hamming', title=title, out=out, args=args)
def bcns(bins, pos, cells, index=None, clones=None, selected=None, args=None, out='Bspecificcn.', val='CNS'):
df = []
mapc = {}
for x, e in enumerate(index):
df.extend([{'Cell' : x, 'Genome' : bins[b][e]['Genome'], 'B-specific CN' : min(8, bins[b][e][val][1])} for b in pos])
mapc[x] = (clones[e], selected[e])
df = pd.DataFrame(df)
table = pd.pivot_table(df, values='B-specific CN', columns=['Genome'], index=['Cell'], aggfunc='first')
title = 'B-specific copy numbers'
draw(table, bins, pos, cells, index, mapc, palette='coolwarm', center=2, method='single', metric='hamming', title=title, out=out, args=args)
def states(bins, pos, cells, index=None, clones=None, selected=None, args=None, out='allelecn.', val='CNS'):
avail = [(t - i, i) for t in xrange(7) for i in reversed(xrange(t+1)) if i <= t - i]
order = (lambda p : (max(p), min(p)))
convert = (lambda p : order(p) if sum(p) <= 6 else min(avail, key=(lambda x : abs(p[0] - x[0]) + abs(p[1] - x[1]))))
df = []
mapc = {}
found = set()
for x, e in enumerate(index):
df.extend([{'Cell' : x, 'Genome' : bins[b][e]['Genome'], 'Value' : convert(bins[b][e][val])} for b in pos])
mapc[x] = (clones[e], selected[e])
df = pd.DataFrame(df)
found = [v for v in avail if v in set(df['Value'])]
smap = {v : x for x, v in enumerate(found)}
df['CN states'] = df.apply(lambda r : smap[r['Value']], axis=1)
table = pd.pivot_table(df, values='CN states', columns=['Genome'], index=['Cell'], aggfunc='first')
title = 'Copy-number states'
#found = set(df['CN states'] for i, r in df.iterrows())
palette = {}
palette.update({(0, 0) : 'darkblue'})
palette.update({(1, 0) : 'lightblue'})
palette.update({(1, 1) : 'lightgray', (2, 0) : 'dimgray'})
palette.update({(2, 1) : 'lightgoldenrodyellow', (3, 0) : 'gold'})
palette.update({(2, 2) : 'navajowhite', (3, 1) : 'orange', (4, 0) : 'darkorange'})
palette.update({(3, 2) : 'salmon', (4, 1) : 'red', (5, 0) : 'darkred'})
palette.update({(3, 3) : 'plum', (4, 2) : 'orchid', (5, 1) : 'purple', (6, 0) : 'indigo'})
colors = [palette[c] for c in found]
cmap = LinearSegmentedColormap.from_list('multi-level', colors, len(colors))
draw(table, bins, pos, cells, index, mapc, palette=cmap, center=None, method='single', metric='cityblock', title=title, out=out, args=args)
def minor(bins, pos, cells, index=None, clones=None, selected=None, args=None, out='haplotypecn.', val='CNS'):
get_minor = (lambda s : (3 - min(2, min(s))) * (0 if s[0] == s[1] else (-1 if s[0] < s[1] else 1)))
df = []
mapc = {}
for x, e in enumerate(index):
df.extend([{'Cell' : x, 'Genome' : bins[b][e]['Genome'], 'Minor allele' : get_minor(bins[b][e][val])} for b in pos])
mapc[x] = (clones[e], selected[e])
df = pd.DataFrame(df)
table = pd.pivot_table(df, values='Minor allele', columns=['Genome'], index=['Cell'], aggfunc='first')
title = 'Minor alleles'
colormap = 'PiYG'
draw(table, bins, pos, cells, index, mapc, palette=colormap, center=0, method='single', metric='hamming', title=title, out=out, args=args)
def draw(table, bins, pos, cells, index, clones, palette, center, method, metric, title, out, args):
chr_palette = cycle(['#525252', '#969696', '#cccccc'])
chr_colors = {c : next(chr_palette) for c in sorted(set(b[0] for b in bins), key=orderchrs)}
seen = set()
seen_add = seen.add
ordclones = [clones[x] for x in table.index if not (clones[x][0] in seen or seen_add(clones[x][0]))]
cell_palette = cycle(sns.color_palette("muted", len(set(ordclones))))
disc_palette = cycle(sns.color_palette("Greys", 8))
clone_colors = {i[0] : next(cell_palette) if i[1] != 'None' else '#f0f0f0' for i in ordclones}
cell_colors = {x : clone_colors[clones[x][0]] for x in table.index}
para = {}
para['data'] = table
para['cmap'] = palette
if center:
para['center'] = center
para['yticklabels'] = False
para['row_cluster'] = False
para['xticklabels'] = False
para['col_cluster'] = False
para['figsize'] = args['gridsize']
para['rasterized'] = True
para['col_colors'] = pd.DataFrame([{'index' : s, 'chromosomes' : chr_colors[pos[x][0]]} for x, s in enumerate(table.columns)]).set_index('index')
para['row_colors'] = pd.DataFrame([{'index' : x, 'Clone' : cell_colors[x]} for x in table.index]).set_index('index')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
g = sns.clustermap(**para)
addchr(g, pos)
g.fig.suptitle(title)
plt.savefig(out + args['format'], bbox_inches='tight', dpi=600)
plt.close()
def addchr(g, pos, color=None):
corners = []
prev = 0
for x, b in enumerate(pos):
if x != 0 and pos[x-1][0] != pos[x][0]:
corners.append((prev, x))
prev = x
corners.append((prev, x))
ax = g.ax_heatmap
ticks = []
for o in corners:
ax.set_xticks(np.append(ax.get_xticks(), int(float(o[1] + o[0] + 1) / 2.0)))
ticks.append(pos[o[0]][0])
ax.set_xticklabels(ticks, rotation=45, ha='center')
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
def addchrplt(pos):
corners = []
prev = 0
val = pos[0][0]
for x, s in enumerate(pos):
if x != 0 and pos[x-1][0] != pos[x][0]:
corners.append((prev, x, val))
prev = x
val = s[0]
corners.append((prev, x, val))
ticks = [(int(float(o[1] + o[0] + 1) / 2.0), o[2]) for o in corners]
plt.xticks([x[0] for x in ticks], [x[1] for x in ticks], rotation=45, ha='center')
plt.yticks(rotation=0)
if __name__ == '__main__':
main()
| true |
8c8e1318746ccbb75eaba152e8c06d1598c8b68b | Python | EaconTang/python-cook-notes | /book/machine_learning_in_action/decision_tree/trees.py | UTF-8 | 1,948 | 3.46875 | 3 | [] | no_license | # coding=utf8
from numpy import *
from math import log
import matplotlib
def calc_shannon_ent(data_set):
"""
计算香农熵,熵越高,混合的数据越多
"""
num_entries = len(data_set)
label_counts = {}
for _ in data_set:
_label = _[-1]
if _label not in label_counts.keys():
label_counts[_label] = 0
label_counts[_label] += 1
shannon_ent = 0.0
for key in label_counts.keys():
prob = float(label_counts[key]) / num_entries
shannon_ent -= prob * log(prob, 2)
return shannon_ent
def create_data_set():
"""返回数据集"""
data_set = [
[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']
]
labels = ['no surfacing', 'flippers']
return data_set, labels
def split_dataset(dataset, axis, value):
"""根据给定特征值划分数据集"""
ret_dataset = []
for _ in dataset:
if _[axis] == value:
ret_dataset.append(_[:axis] + _[axis + 1:])
return ret_dataset
def choose_best_feature_to_split(dataset):
"""选择最好的数据集划分方式"""
num_features = len(dataset[0]) - 1
base_entropy = calc_shannon_ent(dataset)
best_info_gain = 0.0
best_feature = -1
for i in range(num_features):
feat_list = [_[i] for _ in dataset]
unique_vals = set(feat_list)
new_entropy = 0.0
for value in unique_vals:
sub_dataset = split_dataset(dataset, i, value)
prob = len(sub_dataset) / float(len(dataset))
new_entropy += prob * calc_shannon_ent(sub_dataset)
info_gain = base_entropy - new_entropy
if (info_gain > best_info_gain):
best_info_gain = info_gain
best_feature = i
return best_feature
if __name__ == '__main__':
data_set, labels = create_data_set()
print choose_best_feature_to_split(data_set)
| true |
4f1edd0c5397940979f4d6660041ffdcf190fd95 | Python | lucianojunnior17/Python | /Curso_Guanabara/aula59.py | UTF-8 | 1,136 | 4.40625 | 4 | [
"MIT"
] | permissive | from time import sleep
print(' Olá programa feito para brinar om números ')
sleep(3)
n1 = int(input('Primeiro valor'))
n2 = int(input('Segundo valor'))
opção = 0
while opção != 5:
print('''[1] SOMAR
[2] MULTIPLICAR
[3] MAIOR
[4] NOVOS NÚMEROS
[5] SAIR DO PROGRAMA ''')
opção = int(input('Qual é a sua opção ? '))
if opção == 1:
soma = n1+n2
print('A soma de {} e {} é {}'.format(n1,n2,soma))
sleep(3)
elif opção == 2 :
multi= n1*n2
print('A Multipliacçao de {} e {} é {} '.format(n1,n2,multi))
sleep(3)
elif opção == 3:
if n1 > n2 :
maoir = n1
else:
maoir = n2
print('O maoir número entre {} e {} é {} '.format(n1,n2,maoir))
sleep(3)
elif opção == 4 :
print('Informe os novos números')
n1 = int(input('Insira o primeiro valor'))
n2 = int(input('Insira o segundo valor'))
elif opção == 5 :
print('Saindo do Programa')
sleep(2)
print('Fim do Programa!!!')
| true |
5b0b283265523b4c25ba06b4e7aab3bc7c66cad3 | Python | Puepis/ProjectEuler | /PEuler11 (Reading Grid of Numbers).py | UTF-8 | 2,460 | 4.15625 | 4 | [] | no_license |
'''Description: PEuler 13
"Work out the first ten digits of the sum of the following
one-hundred 50-digit numbers." (numbers read from text file)
Date: Jan. 20, 2019
'''
from operator import mul
def main():
# Open file
gridFile = open("grid.txt", "r")
# Initialize sum
theSum = 0
numbersList = []
# Convert grid to list of integers
for line in gridFile:
line = line.split()
for index in range(len(line)):
line[index] = int(line[index])
numbersList.append(line)
#numbersList = reduceNumbers(numbersList)
horiz = checkHorizProd(numbersList)
vert = checkVertProd(numbersList)
diag = checkDiagProd(numbersList)
# Display greatest products
print horiz
print vert
print diag
# Close file
gridFile.close()
def checkHorizProd(numList):
# Check greatest horizontal product
product = 0
for x in range(len(numList)):
# Iterate to 4th last column
for y in range(len(numList[x]) - 3):
# Find product of sliced list
currentProd = reduce(mul, numList[x][y:y+4])
# Replace greater product
if currentProd > product:
product = currentProd
return product
def checkVertProd(numList):
# Find greatest vertical product
product = 0
# Iterate to 4th last row
for x in range(len(numList) - 3):
for y in range(len(numList[x])):
# Vertical product
currentProd = numList[x][y] * numList[x+1][y] * numList[x+2][y] * numList[x+3][y]
if currentProd > product:
product = currentProd
return product
def checkDiagProd(numList):
product = 0
# Bottom right
for x in range(len(numList) - 3):
for y in range(len(numList[x]) - 3):
currentProd = numList[x][y] * numList[x+1][y+1] * numList[x+2][y+2] * numList[x+3][y+3]
if currentProd > product:
product = currentProd
# Bottom left
for x in range(len(numList) - 3):
for y in range(len(numList) - 1, 2, -1):
currentProd = numList[x][y] * numList[x+1][y-1] * numList[x+2][y-2] * numList[x+3][y-3]
if currentProd > product:
product = currentProd
return product
main() | true |
1d91138fde8cfc6f39230d36982d8fb830f15d46 | Python | Mechalabs/LocalHackDay-Dec1 | /Starting Page.py | UTF-8 | 1,681 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | import pygame
import time
pygame.init()
WIDTH = 800
HEIGHT = 800
gameWindow = pygame.display.set_mode((WIDTH, HEIGHT))
# variables
WHITE = (255,255,255)
BLACK = ( 0, 0, 0)
outline = 0
pygame.font.init()
pygame.mixer.init()
font = pygame.font.SysFont("Comic Sans MS", 36)
Narwhal = pygame.image.load("C:\Users\user\Desktop\Meccha Labs\Undertale Game Thing\Evil Narwhal.png")
##Gaster = pygame.image.load("C:\Users\user\Desktop\Meccha Labs\Undertale Game Thing\Gaster_Sprite2.png")
## SHOULD ADD GLITCHES ON PURPOSE
inPlay = True
# functions
def battlepage():
gameWindow.fill(BLACK)
pygame.draw.rect(gameWindow, WHITE, (50, 600, 300, 100), 1)
pygame.draw.rect(gameWindow, WHITE, (450, 600, 300, 100), 1)
gameWindow.blit(Narwhal, (155, 20))
graphics1 = font.render("Fight-space", 1, WHITE)
graphics2 = font.render("Mercy-enter", 1, WHITE)
gameWindow.blit(graphics1, (150, 620))
gameWindow.blit(graphics2, (550, 620))
pygame.display.update()
pygame.mixer.music.load("C:\Users\user\Desktop\Meccha Labs\Undertale Game Thing\Gasters_Theme.mid")
pygame.mixer.music.set_volume(0.5)
pygame.mixer.music.play(loops = -1)
time.sleep(5)
##def health-point () - see Eric's code as referene
##
##def int ball_game_sim(health)
## print "this is bgame sim, health passed in is ", health
## return 10
while inPlay:
battlepage()
## score-board(new health point)
##
## if key== "space"
## point = call ballon-game_sim()
## recalculate health
##
## if still have health points
## call game2
## recalculate health
##
## if key == "enter"
## point = call game2
| true |
b3ad09277c8c4fd9fb89cfbeb8ece5df04fdb55c | Python | cmutel/ecoinvent-row-report | /ecoinvent_row_report/filesystem.py | UTF-8 | 304 | 3.140625 | 3 | [] | no_license | import hashlib
def md5(filepath, blocksize=65536):
"""Generate MD5 hash for file at `filepath`"""
hasher = hashlib.md5()
fo = open(filepath, 'rb')
buf = fo.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = fo.read(blocksize)
return hasher.hexdigest()
| true |
2bf24e759522d5deb2fb8947884678010b68a755 | Python | 2torus/creme | /creme/metrics/confusion.py | UTF-8 | 4,562 | 3.5625 | 4 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import collections
import functools
import operator
__all__ = ['ConfusionMatrix', 'RollingConfusionMatrix']
class ConfusionMatrix(collections.defaultdict):
"""Confusion matrix.
This class is different from the rest of the classes from the `metrics` module in that it
doesn't have a ``get`` method.
Attributes:
classes (set): The entire set of seen classes, whether they are part of the predictions or
the true values.
Example:
::
>>> from creme import metrics
>>> y_true = ['cat', 'ant', 'cat', 'cat', 'ant', 'bird']
>>> y_pred = ['ant', 'ant', 'cat', 'cat', 'ant', 'cat']
>>> cm = metrics.ConfusionMatrix()
>>> for y_t, y_p in zip(y_true, y_pred):
... cm = cm.update(y_t, y_p)
>>> cm
ant bird cat
ant 2 0 0
bird 0 0 1
cat 1 0 2
>>> cm['bird']['cat']
1
"""
def __init__(self):
super().__init__(collections.Counter)
self.classes_ = set()
def update(self, y_true, y_pred):
self[y_true].update([y_pred])
self.classes_.update({y_true, y_pred})
return self
@property
def classes(self):
return self.classes_
def __str__(self):
# The classes are sorted alphabetically for reproducibility reasons
classes = sorted(self.classes)
# Determine the required width of each column in the table
largest_label_len = max(len(str(c)) for c in classes)
largest_number_len = len(str(max(max(counter.values()) for counter in self.values())))
width = max(largest_label_len, largest_number_len) + 2
# Make a template to print out rows one by one
row_format = '{:>{width}}' * (len(classes) + 1)
# Write down the header
table = row_format.format('', *map(str, classes), width=width) + '\n'
# Write down the true labels row by row
table += '\n'.join((
row_format.format(str(y_true), *[self[y_true][y_pred] for y_pred in classes], width=width)
for y_true in sorted(self)
))
return table
def __repr__(self):
return str(self)
class RollingConfusionMatrix(ConfusionMatrix):
"""Rolling confusion matrix.
This class is different from the rest of the classes from the `metrics` module in that it
doesn't have a ``get`` method.
Parameters:
window_size (int): The size of the window of most recent values to consider.
Attributes:
classes (set): The entire set of seen classes, whether they are part of the predictions or
the true values.
Example:
::
>>> from creme import metrics
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> cm = metrics.RollingConfusionMatrix(window_size=3)
>>> for y_t, y_p in zip(y_true, y_pred):
... cm = cm.update(y_t, y_p)
... print(cm)
... print('-' * 13)
0
0 1
-------------
0 1
0 1 0
1 1 0
-------------
0 1 2
0 1 0 0
1 1 0 0
2 0 0 1
-------------
0 1 2
1 1 0 0
2 0 0 2
-------------
1 2
2 1 2
-------------
"""
def __init__(self, window_size):
super().__init__()
self.events = collections.deque(maxlen=window_size)
@property
def window_size(self):
return self.events.maxlen
def update(self, y_true, y_pred):
# Update the appropriate counter
self[y_true].update([y_pred])
# If the events window is full then decrement the appropriate counter
if len(self.events) == self.events.maxlen:
yt, yp = self.events[0][0], self.events[0][1]
self[yt].subtract([yp])
# Remove empty counters
if not self[yt][yp]:
del self[yt][yp]
if not self[yt]:
del self[yt]
self.events.append((y_true, y_pred))
return self
@property
def classes(self):
return functools.reduce(
operator.or_,
[set(self[yt].keys()) for yt in self] + [set(self.keys())]
)
| true |
dc643664a440d7429b3deb060f611b8c6cfc90d2 | Python | neont21/do-it-python | /chapter02/ex06_price.py | UTF-8 | 361 | 3.625 | 4 | [] | no_license | def service_price():
service = input('서비스 종류를 입력하세요, a/b/c: ')
valueAdded = input('부가세를 포함합니까? y/n: ')
prices = { 'a': 23, 'b': 40, 'c': 67 }
price = prices[service] # need error handling
if valueAdded == 'y':
price *= 1.1
print(str(round(price, 1))+'만원입니다')
service_price()
| true |
a015f0543b6b7f9facd33ee0981f6ca76d329534 | Python | cccristhian/django | /bolg/models.py | UTF-8 | 746 | 2.53125 | 3 | [] | no_license | from django.db import models
from django.utils import timezone
class Publicar(models.Model):
autor =models.ForeignKey('auth.User')
titulo=models.CharField(max_length=200)
texto=models.TextField()
fecha_crear=models.DateTimeField(
default=timezone.now)
fecha_publica=models.DateTimeField(
blank=True, null=True)
def publicacion(self):
self.fecha_publica=timezone.now()
self.save()
def __str__(self):
return self.titulo
def publish(self):
self.fecha_publica = timezone.now()
self.save()
# Create your models here.
#self se esta haciendo referencia a la misma tabla
# def __str__(self): campo de despliegue principal cuando se haga una busqueda
| true |
4fd0c081415b1f1c9eb2392160e3660519186aca | Python | sunnyliang6/Infinite-Double-Panda | /main.py | UTF-8 | 36,751 | 2.96875 | 3 | [] | no_license | ####################################
# This game is based on the original Double Panda game:
# https://www.coolmathgames.com/0-double-panda
####################################
####################################
# Run this file to run the project
# This file contains the game loop
####################################
import random, os, sqlite3
import pygame as pg
from characters import *
from terrain import *
from settings import *
# used game framework template from: https://youtu.be/uWvb3QzA48c
class Game(object):
# initializes game window, etc.
def __init__(self):
pg.init() # initialize pygame modules
pg.mixer.init() # initialize mixer for music
self.screen = pg.display.set_mode((screenWidth, screenHeight))
pg.display.set_caption(title)
self.clock = pg.time.Clock()
self.running = True
# starts new game
def new(self):
self.playing = True
self.showLoginScreen()
if not self.playing:
return
self.showHelpScreen()
# brand new game
self.score = 0
self.getHelp = False
self.players = pg.sprite.Group()
self.playerMaxX = 0
# sounds
# Music stream is "Happy Tune" by syncopika from:
# https://opengameart.org/content/happy-tune
pg.mixer.music.load(os.path.join(soundsFolder, 'happytune.ogg'))
# "Platformer Jumping Sound" by dklon from: https://opengameart.org/content/platformer-jumping-sounds
self.jumpSound = pg.mixer.Sound(os.path.join(soundsFolder, 'jump.wav'))
# "8-Bit Retro SFX" by Christian DeTamble - http://therefactory.bplaced.net from: https://opengameart.org/content/8-bit-retro-sfx
self.shootSound = pg.mixer.Sound(os.path.join(soundsFolder, 'shoot.ogg'))
# "8-Bit Jump #1" by Jesús Lastra from: https://opengameart.org/content/8-bit-jump-1
self.dieSound = pg.mixer.Sound(os.path.join(soundsFolder, 'die.wav'))
# initialize the two players
self.giantPanda = GiantPanda('Giant Panda', self)
self.players.add(self.giantPanda)
self.redPanda = RedPanda('Red Panda', self)
self.players.add(self.redPanda)
# default direction current player is giantPanda
self.currPlayer = self.giantPanda
self.otherPlayer = self.redPanda
# does not change for either current character
self.scrollX = 0
# for switching between players
self.isSwitching = False
self.otherPlayerIsOnCurr = False
# background image is from the original Double Panda game: https://www.coolmathgames.com/0-double-panda
self.background = pg.image.load(os.path.join(imagesFolder, 'gamebackground.png')).convert()
self.platforms = []
self.floor = Floor(self)
self.bamboos = []
self.candies = []
self.enemies = []
if self.checkUsernameExists(self.username):
# if user has stored data, get and read data into game
userData = self.getUserData(self.username)
if self.readUserData(userData):
# if this is False, no game state data will be read and standard starting terrain will be generated in next lines
return
# generate standard starting terrain
self.startingTerrain()
# generates starting terrain for every new game
def startingTerrain(self):
# platforms
plat1 = Platform(self, 1, 600, 300)
plat2 = Platform(self, 2.5, 500, 425)
plat3 = Platform(self, 3.5, 675, 350)
plat4 = Platform(self, 1, 1150, 300)
self.platforms.extend([plat1, plat2, plat3, plat4])
# bamboos
bamboo1 = Bamboo(self, 1100)
self.bamboos.append(bamboo1)
# candies
candy1 = Candy(self, 650, plat1.rect.top, '')
candy2 = Candy(self, 750, plat3.rect.top, '')
candy3 = Candy(self, 800, plat3.rect.top, '')
candy4 = Candy(self, 850, plat3.rect.top, '')
candy5 = Candy(self, 1300, plat4.rect.top, '')
self.candies.extend([candy1, candy2, candy3, candy4, candy5])
# enemies
enemy1 = ArcherEnemy(self, plat4)
self.enemies.append(enemy1)
plat4.addEnemy(enemy1)
enemy2 = BasicEnemy(self, plat2)
self.enemies.append(enemy2)
plat2.addEnemy(enemy2)
# game loop
def run(self):
pg.mixer.music.play(loops=-1)
while self.playing:
self.clock.tick(fps) # standardizes fps across machines
self.events()
self.update()
self.draw()
pg.mixer.music.fadeout(500)
# checks for events
def events(self):
for event in pg.event.get():
# check for closing window
if event.type == pg.QUIT:
if self.playing:
self.playing = False
self.running = False
if event.type == pg.KEYDOWN:
# check for jumping or climbing
if self.isSwitching:
return
if event.key == pg.K_h:
self.showHelpScreen()
return
if event.key == pg.K_s:
self.score += 100000
if event.key == pg.K_q:
# save current game state and quit game
self.saveData()
self.showQuitScreen()
self.playing = False
self.running = False
if event.key == pg.K_UP:
if self.currPlayer.name == self.redPanda.name:
if not self.currPlayer.atBamboo():
self.currPlayer.jump()
else:
self.currPlayer.jump()
# check for switching players
if event.key == pg.K_SPACE:
self.currPlayer.stop()
if self.currPlayer.name == self.giantPanda.name:
self.currPlayer = self.redPanda
self.otherPlayer = self.giantPanda
else:
self.currPlayer = self.giantPanda
self.otherPlayer = self.redPanda
self.isSwitching = True
####################################
# Update helper methods
####################################
# following function derived from makePlayerVisibile() from https://www.cs.cmu.edu/~112/notes/notes-animations-part3.html
# scroll to make currPlayer etc. visible as needed
def makePlayerVisible(self):
# cannot scroll to the left of the starting position
centerX = screenWidth / 2
if (self.currPlayer.rect.centerx > centerX):
self.scrollX = self.currPlayer.rect.centerx - centerX
# manipulates self.scrollX to bring the new currPlayer to center of canvas
def switchTransition(self):
leftEdgeToStartDist = screenWidth / 2
# leftEdgeToStartDist is the distance from the left edge of the screen
if (scrollSpeedWhenSwitching >= abs((self.currPlayer.rect.centerx - leftEdgeToStartDist) - self.scrollX)):
# reached the new currPlayer
self.scrollX = self.currPlayer.rect.centerx - leftEdgeToStartDist
self.isSwitching = False
elif (0 > (self.currPlayer.rect.centerx - leftEdgeToStartDist) - self.scrollX):
# cannot scroll past the left of the starting position
if (self.scrollX <= scrollSpeedWhenSwitching):
self.scrollX = 0
self.isSwitching = False
else:
# new currPlayer is to the left of old currPlayer
self.scrollX -= scrollSpeedWhenSwitching
elif (0 < (self.currPlayer.rect.centerx - leftEdgeToStartDist) - self.scrollX):
# new currPlayer is to the right of old currPlayer
self.scrollX += scrollSpeedWhenSwitching
def updateEnemies(self):
for enemy in self.enemies:
enemy.update()
# updates attributes
def update(self):
if self.giantPanda.livesLeft < 1 or self.redPanda.livesLeft < 1:
# game over
self.saveOnlyNameScore()
self.playing = False
elif self.isSwitching:
self.switchTransition()
else:
self.makePlayerVisible()
self.players.update()
self.updateEnemies()
# generate more terrain, enemies, and candy as Player moves
if (self.platforms[-1].rect.right - self.playerMaxX < screenWidth / 2): # 400
self.generateTerrain()
####################################
# Terrain generation methods
####################################
# returns list of last 5 or less platforms
def getLastFewPlatforms(self):
if (len(self.platforms) < 5):
return self.platforms
else:
return self.platforms[-5::+1]
# returns the furthest rect.right from the given list of platforms
def getFurthestRight(self, platforms):
furthestRight = 0
for platform in platforms:
if platform.rect.right > furthestRight:
furthestRight = platform.rect.right
furthestBambooRight = self.bamboos[-1].x + bambooWidth / 2
if (furthestBambooRight > furthestRight):
furthestRight = furthestBambooRight
return furthestRight
# generates a group of platforms with enemies, candy, and a bamboo if necessary
def generateTerrain(self):
# inspired by https://youtu.be/iQXLQzOaIpE and the 15-112 Game AI TA
# Mini-Lecture to use probabilities (random number)
# from the video, I was also inspired to use a loop to generate 'terrain'
# determine where the bottom platform will be
r1 = random.randint(0, 100)
if (r1 < 40):
bottomLevel = 1
elif (40 <= r1 < 60):
bottomLevel = 2
elif (60 <= r1 < 75):
bottomLevel = 2.5
elif (75 <= r1 < 88):
bottomLevel = 3
else:
bottomLevel = 4
# determine where the top platform will be
if (self.score < 1500):
# depends on the score (higher score -> higher top level)
if (bottomLevel < 3):
topLevel = 3
else:
topLevel = bottomLevel + 1
else:
r2 = random.randint(0, 100)
if (r2 < 70):
topLevel = 5
else:
if (bottomLevel < 4):
topLevel = 4
else:
topLevel = 5
level = bottomLevel
levelCount = 1
lastFewPlatforms = self.getLastFewPlatforms()
furthestRight = self.getFurthestRight(lastFewPlatforms)
x0SetOffChoices = [100, 150, 200]
skippedLevel = False
# generate bamboo if required (too high to reach by just jumping on e/o)
if (bottomLevel > 2):
generatedBamboo = False
# probability of bamboo being on the left or right
probBambooLR = random.choice([0, 1])
if (probBambooLR == 0):
# generate bamboo to the left of the group of platforms
self.generateBamboo(furthestRight, 0)
generatedBamboo = True
x0SetOffChoices = [250]
# generate platforms by looping from bottom to top levels
while level <= topLevel:
# further platforms up can be set off farther
if (level > bottomLevel):
x0SetOffChoices.extend([i for i in range(x0SetOffChoices[-1]+50, 401, 50)])
# calculate x and width of new platform
x = furthestRight + random.choice(x0SetOffChoices)
width = random.randint(platMinLength, platMaxLength)
# create new platform
newPlatform = Platform(self, level, x, width)
self.platforms.append(newPlatform)
self.generateCandy(newPlatform, bottomLevel)
self.generateEnemies(newPlatform, topLevel)
# determine next platform to add
# randomly skip a platform (requiring one player to jump on the other's head)
r3 = random.randint(0, 100)
if (bottomLevel != 2 and not skippedLevel):
if (r3 < 20):
level += 1.5
skippedLevel = True
elif (20 <= r3 < 40):
level += 2
skippedLevel = True
else:
level += 1
else:
level += 1
levelCount += 1
if (bottomLevel > 2 and generatedBamboo == False):
self.generateBamboo(0, levelCount - 1)
# generate candy on a given platform
def generateCandy(self, platform, bottomLevel):
# inspired by https://youtu.be/iQXLQzOaIpE and Game AI TA Mini-Lecture
# to use probabilities (random number)
# from the video, I was also inspired to use a loop to generate 'terrain'
# every platform have candy
# randomly choose where to start at the beginning of the platform
x = random.randint(platform.rect.left + candyWidth / 2,
int(platform.rect.left + (platform.rect.right - platform.rect.left) / 2))
# randomly choose where to start at the beginning of the platform
x1 = random.randint(int(platform.rect.left + (platform.rect.right - platform.rect.left) / 2),
platform.rect.right - candyWidth)
# there should only be one on a platform at a time
# only red panda can eat fried rice
madeFriedRice = False
# loop through the length of the platform
while (x < x1):
newCandy = Candy(self, x, platform.rect.top, '')
# decide whether to make fried rice
if (madeFriedRice == False):
if (bottomLevel > 2):
# increase chance of fried rice if bottomLevel > 2
probFriedRice = random.randint(0, 100)
if (platform.level == 5):
newCandy.makeIntoFriedRice()
madeFriedRice = True
elif (platform.level == 4 and probFriedRice < 90):
newCandy.makeIntoFriedRice()
madeFriedRice = True
elif (platform.level == 3 and probFriedRice < 80):
newCandy.makeIntoFriedRice()
madeFriedRice = True
elif (platform.level == 2.5 and probFriedRice < 70):
newCandy.makeIntoFriedRice()
madeFriedRice = True
else:
probFriedRice = random.randint(0, 100)
if (probFriedRice < 10):
newCandy.makeIntoFriedRice()
madeFriedRice = True
self.candies.append(newCandy)
x += candyWidth + 20
# generate enemies on a given platform
def generateEnemies(self, platform, topLevel):
# inspired by https://youtu.be/iQXLQzOaIpE and Game AI TA Mini-Lecture
# to use probabilities (random number)
# from the video, I was also inspired to use a loop to generate 'terrain'
# probability of this platform having enemy(ies)
probEnemy = random.randint(0, 100)
if (self.score < 5000 and probEnemy < 60):
# very low score (< 5k) has 40% chance of enemy
return
elif (5000 <= self.score < 10000 and probEnemy < 50):
# low score (< 10k) has 50% chance of enemy
return
elif (self.score >= 10000 and probEnemy < 40):
# high score (>= 10k) has 60% chance of enemy
return
# determine how many enemies on the platform
probEnemyCount = random.randint(0, 100)
platformLength = platform.rect.width
if (self.score < 5000):
# very low score (< 5k) will only have 1 enemy per platform
enemyCount = 1
elif (5000 <= self.score < 10000):
if (probEnemy < 15 and platformLength > 300):
# low score (< 10k) has <15% chance of having 2 enemies per platform
enemyCount = 2
else:
# low score (< 10k) has 85% chance of having 1 enemies per platform
enemyCount = 1
elif (self.score >= 10000):
if (probEnemyCount < 5 and platformLength > 400):
# high score (>= 10k) has <5% chance of having 3 enemies per platform
enemyCount = 3
elif (5 <= probEnemyCount < 20 and platformLength > 300):
# high score (>= 10k) has <15% chance of having 2 enemies per platform
enemyCount = 2
else:
# high score (>= 10k) has 80% chance of having 1 enemies per platform
enemyCount = 1
# loop through enemyCount to generate enemies on the platform
while (enemyCount > 0):
# determine what type of enemy
probType = random.randint(0, 10)
if (probType < 6):
newEnemy = BasicEnemy(self, platform)
else:
if (topLevel - platform.level < 1):
# archer enemies should only appear on the top level to be
# able to be killed
newEnemy = ArcherEnemy(self, platform)
else:
newEnemy = BasicEnemy(self, platform)
self.enemies.append(newEnemy)
platform.addEnemy(newEnemy)
enemyCount -= 1
# generate one bamboo next to a group of platforms that requires it
def generateBamboo(self, furthestRight, levelCount):
# Inspired by https://youtu.be/iQXLQzOaIpE and Game AI TA Mini-Lecture
# to use probabilities (random number).
if (furthestRight != 0):
# this means that the bamboo is to the left of the platforms
xSetOff = random.randint(130, 150)
newBamboo = Bamboo(self, xSetOff + furthestRight)
elif (levelCount != 0):
# this means that the bamboo is to the right of the platforms
# find furthest x1
furthestRight = 0
furthestPlatform = self.platforms[-1]
for i in range(-1, levelCount * -1 - 1, -1):
right = self.platforms[i].rect.right
if (right > furthestRight):
furthestRight = right
furthestPlatform = self.platforms[i]
# find what the x setoff should be
lastLevel = furthestPlatform.level
if (lastLevel == 5 or lastLevel == 4.5):
xSetOff = 100
elif (lastLevel == 4 or lastLevel == 3.5):
xSetOff = random.randint(100, 150)
elif (lastLevel == 3):
xSetOff = random.randint(150, 200)
elif (lastLevel == 2.5):
xSetOff = random.randint(200, 250)
newBamboo = Bamboo(self, xSetOff + furthestRight)
self.bamboos.append(newBamboo)
####################################
# Draw methods
####################################
def drawPlatforms(self):
for plat in self.platforms:
plat.draw()
def drawBamboos(self):
for bamboo in self.bamboos:
bamboo.draw()
def drawCandies(self):
for candy in self.candies:
candy.draw()
def drawEnemies(self):
for enemy in self.enemies:
enemy.draw()
def drawScore(self):
self.drawText('Lives Left', 15, white, 8, screenHeight - 69, 'left')
self.drawText(f'Giant Panda: {self.giantPanda.livesLeft}', 15, white, 8, screenHeight - 46, 'left')
self.drawText(f'Red Panda: {self.redPanda.livesLeft}', 15, white, 8, screenHeight - 23, 'left')
self.drawText(f'Score: {self.score}', 15, white, screenWidth - 6, screenHeight - 23, 'right')
# draw next screen
def draw(self):
self.screen.blit(self.background, (0, 0))
self.floor.draw()
self.drawPlatforms()
self.drawBamboos()
self.drawCandies()
self.drawEnemies()
self.drawScore()
self.otherPlayer.draw()
self.currPlayer.draw()
# after drawing everything, flip the display
pg.display.flip()
####################################
# Data methods
####################################
# returns True if username exists
def checkUsernameExists(self, username):
conn = sqlite3.connect('data.db')
cursor = conn.cursor()
try:
cursor.execute(f'SELECT username FROM userData')
result = cursor.fetchall()
allUsernames = []
for user in result:
allUsernames.append(user[0])
if username in allUsernames:
return True
else:
return False
except:
return False
# returns data of this user
def getUserData(self, username):
conn = sqlite3.connect('data.db')
cursor = conn.cursor()
cursor.execute("SELECT * FROM userData WHERE username = :username", {'username': username})
return cursor.fetchall()[0]
# sets up game attributes using saved user data
def readUserData(self, userData):
gpLives = int(userData[2])
rpLives = int(userData[3])
if gpLives < 1 or rpLives < 1:
return False
self.score = int(userData[1])
self.giantPanda.livesLeft = gpLives
self.redPanda.livesLeft = rpLives
self.giantPanda.pos.x = float(userData[4])
self.giantPanda.pos.y = float(userData[5])
self.redPanda.pos.x = float(userData[6])
self.redPanda.pos.y = float(userData[7])
self.giantPanda.rect.centerx = self.giantPanda.pos.x
self.giantPanda.rect.bottom = self.giantPanda.pos.y
self.redPanda.rect.centerx = self.redPanda.pos.x
self.redPanda.rect.bottom = self.redPanda.pos.y
if userData[8] == self.giantPanda.name:
self.currPlayer = self.giantPanda
else:
self.currPlayer = self.redPanda
self.scrollX = int(userData[9])
platList = userData[10].split(', ')
for plat in platList[:-1]:
platValues = plat.split(' ')
level = float(platValues[0])
x = int(platValues[1])
width = int(platValues[2])
newPlat = Platform(self, level, x, width)
self.platforms.append(newPlat)
enemies = platValues[3]
for i in range(len(enemies)):
if enemies[i] == 'b':
newEnemy = BasicEnemy(self, newPlat)
elif enemies[i] == 'a':
newEnemy = ArcherEnemy(self, newPlat)
self.enemies.append(newEnemy)
newPlat.addEnemy(newEnemy)
bambooList = userData[11].split(', ')
for bamboo in bambooList[:-1]:
x = int(bamboo)
newBamboo = Bamboo(self, x)
self.bamboos.append(newBamboo)
candyList = userData[12].split(', ')
for candy in candyList[:-1]:
candyValues = candy.split(' ')
x = int(candyValues[0])
y = int(candyValues[1])
candyType = candyValues[2]
newCandy = Candy(self, x, y, candyType)
self.candies.append(newCandy)
return True
# returns list of scores from all users
def getScores(self):
conn = sqlite3.connect('data.db')
cursor = conn.cursor()
cursor.execute(f'SELECT {score} FROM {userData}')
allScores = []
for user in cursor.fetchall():
allScores.append(user[0])
return allScores
# returns platform, enemy, bamboo, and candy data in string format
def getMyStringData(self):
platData = ''
for plat in self.platforms:
enemyData = ''
for enemy in plat.enemiesOn:
if enemy.name == 'Basic Enemy':
enemyData += 'b'
elif enemy.name == 'Archer Enemy':
enemyData += 'a'
platData += f'{plat.level} {plat.rect.x} {plat.rect.width} {enemyData}, '
bambooData = ''
for bamboo in self.bamboos:
bambooData += f'{bamboo.rect.centerx}, '
candyData = ''
for candy in self.candies:
candyData += f'{candy.rect.x} {candy.rect.bottom} {candy.candyType}, '
return platData, bambooData, candyData
# saves only username, score, and lives left
def saveOnlyNameScore(self):
conn = sqlite3.connect('data.db')
cursor = conn.cursor()
if self.checkUsernameExists(self.username):
# user has already made a previous account
# compare highScore to current score
try:
cursor.execute('SELECT highScore FROM userData WHERE username = :username', {'username': self.username})
highScore = cursor.fetchall()[0][0]
if self.score > highScore:
highScore = self.score
except:
highScore = self.score
# data entry
cursor.execute('''UPDATE userData SET
score = :score,
gpLives = :gpLives, rpLives = :rpLives,
highScore = :highScore WHERE username = :username''',
{'score': self.score,
'gpLives': 0, 'rpLives': 0,
'highScore': highScore,
'username': self.username})
else:
# make new user account
highScore = self.score
# data entry
cursor.execute('''INSERT OR REPLACE INTO userData VALUES
(:username, :score,
:gpLives, :rpLives,
:gpPosX, :gpPosY,
:rpPosX, :rpPosY,
:currPlayer, :scrollX,
:platforms, :bamboos, :candies,
:highScore)''',
{'username': self.username, 'score': self.score,
'gpLives': 0, 'rpLives': 0,
'gpPosX': 0, 'gpPosY': 0,
'rpPosX': 0, 'rpPosY': 0,
'currPlayer': '', 'scrollX': 0,
'platforms': '', 'bamboos': '', 'candies': '',
'highScore': highScore})
conn.commit()
cursor.close()
conn.close()
# saves user and game state data
def saveData(self):
platData, bambooData, candyData = self.getMyStringData()
conn = sqlite3.connect('data.db')
cursor = conn.cursor()
if self.checkUsernameExists(self.username):
# user has already made a previous account
# compare highScore to current score
try:
cursor.execute('SELECT highScore FROM userData WHERE username = :username', {'username': self.username})
highScore = cursor.fetchall()[0][0]
if self.score > highScore:
highScore = self.score
except:
highScore = self.score
# data entry
cursor.execute('''UPDATE userData SET
score = :score,
gpLives = :gpLives, rpLives = :rpLives,
gpPosX = :gpPosX, gpPosY = :gpPosY,
rpPosX = :rpPosX, rpPosY = :rpPosY,
currPlayer = :currPlayer, scrollX = :scrollX,
platforms = :platforms, bamboos = :bamboos, candies = :candies,
highScore = :highScore WHERE username = :username''',
{'score': self.score,
'gpLives': self.giantPanda.livesLeft, 'rpLives': self.redPanda.livesLeft,
'gpPosX': self.giantPanda.pos.x, 'gpPosY': self.giantPanda.pos.y,
'rpPosX': self.redPanda.pos.x, 'rpPosY': self.redPanda.pos.y,
'currPlayer': self.currPlayer.name, 'scrollX': self.scrollX,
'platforms': platData, 'bamboos': bambooData, 'candies': candyData,
'highScore': highScore,
'username': self.username})
else:
# make new user account
highScore = self.score
# data entry
cursor.execute('''INSERT OR REPLACE INTO userData VALUES
(:username, :score,
:gpLives, :rpLives,
:gpPosX, :gpPosY,
:rpPosX, :rpPosY,
:currPlayer, :scrollX,
:platforms, :bamboos, :candies,
:highScore)''',
{'username': self.username, 'score': self.score,
'gpLives': self.giantPanda.livesLeft, 'rpLives': self.redPanda.livesLeft,
'gpPosX': self.giantPanda.pos.x, 'gpPosY': self.giantPanda.pos.y,
'rpPosX': self.redPanda.pos.x, 'rpPosY': self.redPanda.pos.y,
'currPlayer': self.currPlayer.name, 'scrollX': self.scrollX,
'platforms': platData, 'bamboos': bambooData, 'candies': candyData,
'highScore': highScore})
conn.commit()
cursor.close()
conn.close()
# helper for mergeSort()
def merge(self, list1, list2):
result = []
i = j = 0
while i < len(list1) or j < len(list2):
if j == len(list2) or (i < len(list1) and list1[i][1] >= list2[j][1]):
result.append(list1[i])
i += 1
else:
result.append(list2[j])
j += 1
return result
# recursive merge sort (highest to lowest)
# from: https://www.cs.cmu.edu/~112/notes/notes-recursion-part1.html#mergesort
def mergeSort(self, L):
if len(L) < 2:
return L
else:
mid = len(L) // 2
front = L[:mid]
back = L[mid:]
return self.merge(self.mergeSort(front), self.mergeSort(back))
# returns top 5 usernames and scores
def getLeaderboard(self):
conn = sqlite3.connect('data.db')
cursor = conn.cursor()
# get usernames and high scores of all users
cursor.execute('''SELECT username, highScore FROM userData''')
# mergesort from highest to lowest all of the high scores
result = cursor.fetchall()
result = self.mergeSort(result)
# if there are less than 5 entries, fill the rest with dashes if none ('-', '-')
i = len(result)
while i < 5:
result.append(('-', '-'))
i += 1
# if there are more than 5 entries, only show 5
if len(result) > 5:
result = result[:5]
return result
####################################
# Screen methods
####################################
# game splash/start/intro screen
def showIntroScreen(self):
# intro image contains images from the original Double Panda game:
# https://www.coolmathgames.com/0-double-panda
image = pg.image.load(os.path.join(imagesFolder, 'intro.png')).convert()
self.screen.blit(image, (0, 0))
pg.display.flip()
self.waitForKeyPress()
# login screen
def showLoginScreen(self):
# intro image contains images from the original Double Panda game:
# https://www.coolmathgames.com/0-double-panda
image = pg.image.load(os.path.join(imagesFolder, 'login.png')).convert()
self.screen.blit(image, (0, 0))
pg.display.flip()
username = self.waitForTextEntry(30, 390, 442)
self.username = username
conn = sqlite3.connect('data.db')
cursor = conn.cursor()
# create table
cursor.execute('''CREATE TABLE IF NOT EXISTS userData (
username TEXT, score INTEGER,
gpLives INTEGER, rpLives INTEGER,
gpPosX REAL, gpPosY REAL,
rpPosX REAL, rpPosY REAL,
currPlayer TEXT, scrollX INTEGER,
platforms TEXT, bamboos TEXT, candies TEXT,
highScore INTEGER)''')
conn.commit()
cursor.close()
conn.close()
# collects text entry and returns text after 'Enter' is pressed
def waitForTextEntry(self, size, x, y):
waiting = True
text = ''
image = pg.image.load(os.path.join(imagesFolder, 'login.png')).convert()
while waiting:
for event in pg.event.get():
if event.type == pg.KEYDOWN:
if event.unicode.isalnum():
text += event.unicode
elif event.key == pg.K_BACKSPACE:
text = text[:-1]
elif event.key == pg.K_RETURN:
waiting = False
return text
elif event.type == pg.QUIT:
waiting = False
self.running = False
self.playing = False
return ''
self.screen.blit(image, (0, 0))
self.drawText(text, size, white, x, y, 'left')
pg.display.flip()
# instructions screen
def showHelpScreen(self):
# instructions image contains images from the original Double Panda game:
# https://www.coolmathgames.com/0-double-panda
image = pg.image.load(os.path.join(imagesFolder, 'instructions.png')).convert()
self.screen.blit(image, (0, 0))
pg.display.flip()
self.waitForKeyPress()
# quit screen
def showQuitScreen(self):
# quit image contains images from the original Double Panda game:
# https://www.coolmathgames.com/0-double-panda
image = pg.image.load(os.path.join(imagesFolder, 'quit.png')).convert()
self.screen.blit(image, (0, 0))
self.drawText(f'{self.score}', 20, tan, 417, 308, 'left')
pg.display.flip()
self.waitForKeyPress()
# game over screen
def showGameOverScreen(self):
# game over image contains images from the original Double Panda game:
# https://www.coolmathgames.com/0-double-panda
if not self.running:
return
image = pg.image.load(os.path.join(imagesFolder, 'gameover.png')).convert()
self.screen.blit(image, (0, 0))
# draw current score
self.drawText(f'{self.score}', 20, tan, 410, 224, 'left')
# draw leaderboad scores
leaderboard = self.getLeaderboard()
for i in range(len(leaderboard)):
y = 380 + 20 * i
username = leaderboard[i][0]
score = leaderboard[i][1]
self.drawText(username, 15, tan, 395, y, 'right')
self.drawText(f'{score}', 15, tan, 431, y, 'left')
pg.display.flip()
self.waitForKeyPress()
# following function from: https://youtu.be/BKtiVKNsOYk
def waitForKeyPress(self):
waiting = True
while waiting:
self.clock.tick(fps)
for event in pg.event.get():
if event.type == pg.QUIT:
waiting = False
self.running = False
self.playing = False
if event.type == pg.KEYDOWN:
waiting = False
# following function from: https://youtu.be/BKtiVKNsOYk
def drawText(self, text, size, color, x, y, align):
font = pg.font.SysFont('helvetica', size)
surface = font.render(text, True, color)
rect = surface.get_rect()
if align == 'left':
rect.left = x
rect.y = y
elif align == 'right':
rect.right = x
rect.y = y
self.screen.blit(surface, rect)
g = Game()
g.showIntroScreen()
while g.running:
g.new()
g.run()
g.showGameOverScreen()
pg.quit() | true |
d77fcf859522fc913cb14fc980d63cea6a059ed9 | Python | broodfish/cs-ioc5008-hw1 | /connect.py | UTF-8 | 577 | 2.578125 | 3 | [] | no_license | import pandas as pd
import os
id = pd.read_csv("./result/id.csv")
label = pd.read_csv("./result/label.csv")
label = label[0:1040]
labels={
0:'bedroom', 1:'coast', 2:'forest', 3:'highway', 4:'insidecity', 5:'kitchen', 6:'livingroom', 7:'mountain', 8:'office', 9:'opencountry',
10:'street', 11:'suburb', 12:'tallbuilding'
}
for i in range(0, len(label)):
index = int(label.iloc[i])
label.iloc[i] = labels[index]
prediction = pd.concat([id, label], axis=1)
prediction.to_csv("./result/prediction2.csv", encoding="utf_8_sig", index=False)
| true |
0ad935c5977e65bcda56e79c7e5a618189a53b88 | Python | qccr-twl2123/python-algorithm | /base/numpy_test.py | UTF-8 | 1,334 | 3.140625 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import numpy as np
dataset = [[1,0,1,0],[1,0,1,1],[1,1,1,0]]
print dataset
#将列表转换成多维数组
dataset = np.array(dataset)
print dataset
#numpy.sum 数组加法
# sum axis=none 全部相加 0 按列相加 1 按行相加
a = np.sum(dataset,0)+1
print a
sub_dataset =[[1,0,1,0],[1,0,1,1]]
sub_dataset = np.array(sub_dataset)
b = np.sum(sub_dataset) +2
print b
cond_prob_vect = np.log((np.sum(sub_dataset,0)+1.0) / np.sum(dataset)+2)
# print (np.sum(sub_dataset,0)+1.0) / (np.sum(dataset)+2);
# print (np.sum(sub_dataset,0)+1.0),(np.sum(dataset)+2)
# print np.log([7,7])
print cond_prob_vect
print "-----------^^^^^^^^-----------"
x = np.arange(72).reshape((24,3)) # 创建一个24行3列的新数组
train_set1, test_sets1, val_sets1 = np.split(x, 3) # 将数组平均分为3份
print train_set1
train_set2, test_sets2, val_sets2 = np.split(x, [int(0.6*x.shape[0]), int(0.9*x.shape[0])]) # 60%训练集,30%测试集,10%验证集
print ('record of each set - equal arrays: ')
print ('train_set1: %d, test_sets1: %d, val_sets1: %d'%(train_set1.shape[0], test_sets1.shape[0], val_sets1.shape[0]))
print (40*'-')
print ('record of each set - % arrays: ')
print ('train_set2: %d, test_sets2: %d, val_sets2: %d'%(train_set2.shape[0], test_sets2.shape[0], val_sets2.shape[0])) | true |
b7f85d0380f1a44628e9da19eef1d04fbaa8bb91 | Python | nigellak/python-basics | /lesson6.py | UTF-8 | 144 | 3.140625 | 3 | [] | no_license | scores=[45,57,89,56,70]
print(scores[1])
scores.append(81)
print(scores)
scores.pop(0)
print(scores)
for score in scores:
print(score) | true |
42adf510d30a1d81385c41b8fd1558776f3bf07f | Python | kwj2104/ProjectClimbML | /climbing_dataset.py | UTF-8 | 2,249 | 2.5625 | 3 | [] | no_license | import numpy as np
import pickle
import torch
from torch.utils.data import Dataset
import sys
class ClimbingDataset(Dataset):
# Print everything
np.set_printoptions(threshold=np.inf)
# video level data structures
label_dict = {}
video_list = []
# frame level data structures
frame_list = []
frame_label_list = []
def __init__(self, frame_dataset, label_dataset):
# "climb_frame_dataset.pkl"
# "climb_label_dataset.pkl"
with open(frame_dataset, 'rb') as pickle_file:
self.frame_list = pickle.load(pickle_file)
with open(label_dataset, 'rb') as pickle_file:
self.frame_label_list = pickle.load(pickle_file)
#print(len(self.frame_list), len(self.frame_label_list))
def __len__(self):
return len(self.frame_label_list)
# return len(self.label_dict)
def __getitem__(self, idx):
# print(self.frame_label_list[idx])
return torch.from_numpy(self.frame_list[idx]), torch.tensor(self.frame_label_list[idx], dtype=torch.LongTensor)
# Create weighed sampler to deal with class imbalance
def get_weights(self):
unique, counts = np.unique(self.frame_label_list, return_counts=True)
num_samples = sum(counts)
class_weights = [num_samples / counts[i] for i in range(len(counts))]
# Get balanced sample between climbing vs non-climbing
# for i in range(len(class_weights)):
# if i != 1:
# class_weights[i] = (num_samples - counts[1]) / num_samples
weights = [class_weights[self.frame_label_list[i]] for i in range(int(num_samples))]
return weights
class ClimbingVideoDataset(ClimbingDataset):
def __getitem__(self, idx):
#print(self.frame_list[idx][0])
#print(np.stack(self.frame_list[idx],axis=0))
data = torch.from_numpy(np.stack(self.frame_list[idx]))
#print(data.size())
#x, y, z = data.size()[0], data.size()[1], data.size()[2]
#data = data.reshape(z, x, y)
# print(data[0].numpy())
# sys.exit()
label = torch.tensor(np.stack(self.frame_label_list[idx], axis=0), dtype=torch.int32).squeeze(0).squeeze(0)
return data, label
| true |
f0789f0414a5d3e9eec187482820b5e797aafa29 | Python | uborzz/ocr-search | /rady_stream.py | UTF-8 | 2,959 | 2.53125 | 3 | [] | no_license | from threading import Thread
import cv2
"""
rady
basado en webcamvideostream de pyimagesearch para raspi camera.
Camera props:
CAP_PROP_POS_MSEC Current position of the video file in milliseconds.
CAP_PROP_POS_FRAMES 0-based index of the frame to be decoded/captured next.
CAP_PROP_POS_AVI_RATIO Relative position of the video file: 0 - start of the film, 1 - end of the film.
CAP_PROP_FRAME_WIDTH Width of the frames in the video stream.
CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream.
CAP_PROP_FPS Frame rate.
CAP_PROP_FOURCC 4-character code of codec.
CAP_PROP_FRAME_COUNT Number of frames in the video file.
CAP_PROP_FORMAT Format of the Mat objects returned by retrieve() .
CAP_PROP_MODE Backend-specific value indicating the current capture mode.
CAP_PROP_BRIGHTNESS Brightness of the image (only for cameras).
CAP_PROP_CONTRAST Contrast of the image (only for cameras).
CAP_PROP_SATURATION Saturation of the image (only for cameras).
CAP_PROP_HUE Hue of the image (only for cameras).
CAP_PROP_GAIN Gain of the image (only for cameras).
CAP_PROP_EXPOSURE Exposure (only for cameras).
CAP_PROP_CONVERT_RGB Boolean flags indicating whether images should be converted to RGB.
CAP_PROP_WHITE_BALANCE Currently unsupported
CAP_PROP_RECTIFICATION Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)
"""
class Stream:
def __init__(self, src=0, resolution=None, framerate=30):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FPS, framerate)
if resolution:
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, resolution[0])
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, resolution[1])
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
self.records = list()
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def menu_config(self):
# muestra menu configuracion params de la camara
self.stream.set(cv2.CAP_PROP_SETTINGS, 0)
| true |
d2d92542e8b2775686d07b0f47b7894396d5a93d | Python | qnddkrasniqi/prod-python-practice | /advanced-syntax/conditional_expressions.py | UTF-8 | 696 | 3.25 | 3 | [] | no_license | def number(a):
if a == 1:
return 'Yes'
else:
return 'No'
def number(a):
return True if a == 1 else False
def my_list(lst):
if len(lst) > 3:
return 'Too long'
else:
return 'Okay'
def my_list(lst):
return 'Too long' if len(lst) > 3 else 'Okay'
def numrat(c):
if c > 0:
x = 'positive'
else:
x = 'negative'
return x
def numrat(c):
x = 'positive' if c > 0 else 'negative'
return x
def is_python(s):
return 'It is Python' if s == 'Python' else 'It is not Python'
def is_dog(m):
a = 'Yes' if 'dog' in m else 'No'
return a
print(is_dog(['dog', 'ali', 4]))
print(is_dog(['ali', 4]))
| true |
ab2ace41de8b4cd35a43ef994d66e9051557d905 | Python | rj-ram/python-sample | /real_time_video.py | UTF-8 | 2,020 | 2.515625 | 3 | [] | no_license | from keras.preprocessing.image import img_to_array
import imutils
import cv2
from keras.models import load_model
import numpy as np
detection_model_path = ''
emotion_model_path = ''
face-detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = ["angry", "disgust", "scared", "happy", "sad", "suprised", "neutral"]
cv2.namedWindow('your_face')
camera = cv2.VideoCapture(0)
while True:
frame = camera.read()[1]
frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detection.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minsize=(30,30), flags=cv2.CASCADE_SCALE_IMAGE)
canvas = np.zeros((250,300,3), dtype="unint8")
frameClone = frame.copy()
if len(faces) > 0:
faces = sorted(faces, reverse=True)
key = lambda x: (x[2] - x[0] * (x[3] - x[1]))[0]
(fX,fY,fW,fH) = faces
roi = gray[fY:fY + fH,fX:fX + fW]
roi = cv2.resize(roi, (48,48))
roi = roi.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds = emotion_classifier.predict(roi)[0]
emotion_probability = np.max(preds)
label = EMOTIONS[preds.argmax()]
for (i, (emotion,prob)) in enumerate(zip(EMOTIONS, preds)):
text = "{}:{:.2f}%".format(emotion, prob * 100)
w = int(prob * 300)
cv2.rectangle(canvas, (7, (i*35)+5), (w, (i*35)+35), (0, 0, 255), -1)
cv2.putText(canvas, text, (10, (i*35)+23), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (255, 255, 255), 2)
cv2.putText(frameClone, label, (fX, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (0, 0, 255), 2)
cv2.imshow('your_face', frameClone)
cv2.imshow("probablities", canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
| true |
1d55abc151aeffcd6869c5711bdfbc887f6117c4 | Python | cmattey/leetcode_problems | /Python/lc_110_balanced_binary_tree.py | UTF-8 | 730 | 3.5 | 4 | [
"MIT"
] | permissive | # Time: O(n), where n is size(tree)
# Space: O(n)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
return self.get_height(root)!=-1
def get_height(self, root):
if not root:
return 0
left_height = self.get_height(root.left)
if left_height==-1:
return -1
right_height = self.get_height(root.right)
if right_height==-1:
return -1
if abs(left_height-right_height)>1:
return -1
return max(left_height, right_height)+1
| true |
9c8e7ccabc6c763be9ab85e1f7a00685ae04ca9d | Python | Subham2901/Python_Tutorials | /Variables.py | UTF-8 | 2,083 | 4.4375 | 4 | [] | no_license | """Definiton: A variable is a storage location(identified) by a memory location/addrress)
paired with an associated symboloic name(an identifier),which contains some known or unkmnown quantity of information
refered to as value
i.e It's an named memory location which can be used to store information which can be later retrieved by using that name
As python is a dynamically typed language we don't need to declare the type of the vairalble
before using it.But we must assign a value to detect the type of the variable"""
""" Rules of naming a variable:-
1.It must start with a letter or an underscore
2.It can only consists of letters & numbers & underscores.
3.It is case Sensitive
<variable name> <assigment operatora(=)> <value>
"""
b = 10
print(b)
"""except for that we cannot use the reserve words for declaring variables
THE RESERVED WORDS ARE:
and """
myInt=10 # stores integer type data
print(myInt)
myFloat=10.5 #stores float type data
print(myFloat)
mycomplex=1j #stores complex type data
print(mycomplex)
myNum=10e2 #e symbolises exponent symbol
print(myNum)
myNum=10E2 # caps E also symbolises exponent symbol
print(myNum)
mytstr="Subham Singh "# we can store strings into a varialble using double quotes
myStr='loves python' # we can store strings using single quotes too
Mystr=mytstr+myStr # we can add or concatenate two strings just by adding a + sign between them
print(Mystr)
"""Now let us see an another problem"""
myFloat=myInt
print(myFloat)
myInt=myFloat
print(myInt)
"""We will notice in both the cases the output is 10,that means when converting from
integer to float the type has not changed but when we converted from float to int it got converted automatically or implictly
THUS TO OVERCOME THIS ISSUE WE WILL USE EXPLICIT TYPECASTING """
myFloat=float(myInt)# by using explicit typecasting we have changed the type succesfully
print(myFloat)
"""type is an inbuilt functon of python use to display the data type off the variables"""
print(type(myFloat))#this will print the type of the data which is stored in the variable myFloat
| true |
3e00cb4972af1b3654d8e27fa334ec7050a25edb | Python | Aasthaengg/IBMdataset | /Python_codes/p02580/s349625629.py | UTF-8 | 935 | 3.03125 | 3 | [] | no_license | from collections import defaultdict
def main():
_, _, m = map(int, input().split())
row_dict = defaultdict(int)
col_dict = defaultdict(int)
row_col_dict = defaultdict(set)
for _ in range(m):
row, col = map(int, input().split())
row_dict[row] += 1
col_dict[col] += 1
row_col_dict[row].add(col)
max_row_val = max(row_dict.values())
max_col_val = max(col_dict.values())
max_rows = {k for k, v in row_dict.items() if v == max_row_val}
max_cols = {k for k, v in col_dict.items() if v == max_col_val}
ans = max_row_val + max_col_val - 1
flg = False
if ans < m:
for row in max_rows:
for col in max_cols:
if not col in row_col_dict[row]:
ans += 1
flg = True
break
if flg:
break
print(ans)
if __name__ == '__main__':
main()
| true |
23bf1724fa328bfa54e1f42b74a4b5e2956e57cb | Python | TrinityChristiana/py-multi-inheritance | /uncle-jake/py-files/arrangements/types/valentines_day.py | UTF-8 | 699 | 2.734375 | 3 | [] | no_license | from arrangements import Arrangement
class ValentinesDay(Arrangement):
def __init__(self):
super().__init__()
self.stem_inch = 7
self.refrigerated = True
self.descriptor = "flamboyant"
def enhance(self, *args):
try:
for i in args:
if i.uses_pesticides:
self.flowers.append(i)
else:
raise AttributeError("Only roses, lillies, and alstroemeria cal be addded to a ValentinesDay arrangement")
except AttributeError as taco:
print(f"AttributeError: Only roses, lillies, and alstroemeria can be addded to a Valentines Day arrangement")
| true |
c6ffbf0009fbad72ac0f5aed6a8d2c8a5f75fd90 | Python | tarun571999/Pythonprograms | /class.py | UTF-8 | 408 | 3.828125 | 4 | [] | no_license | '''def largest1(a,b,c):
if(a>b and b>c):
print(a)
elif(b>c):
print(b)
else:
print(c)
a= int(input("enter a"))
b= int(input("ENTER b"))
c = int(input("enter c "))
largest1(a,b,c)
l=[1,2,3,4,5]
print(sum(l))
strr ='hello'
print(strr[::-1])'''
n = int(input('enter no of elements\n'))
l=[]
for i in range(n):
l[i]=input('enter the elements')
print(l) | true |
cc3abfdc1674f2cd63dfdc827b7e4b6054c1aa00 | Python | frankye1000/LeetCode | /python/Shortest Distance to a Character.py | UTF-8 | 189 | 2.953125 | 3 | [] | no_license | S = "loveleetcode"
C = 'e'
# Output: [3, 2, 1, 0, 1, 0, 0, 1, 2, 2, 1, 0]
Cindex = [i for i, v in enumerate(S) if v == C]
print([min([abs(i - j) for j in Cindex]) for i in range(len(S))])
| true |
3266eba50c39b5a521b71766b48b31659c9bdc26 | Python | jColeChanged/MIT | /Computer Science 6.01 SC/Unit 1/Exercises 2/2-3-3.py | UTF-8 | 341 | 2.640625 | 3 | [] | no_license | from lib601 import sm
class CountingStateMachine(sm.SM):
startState = 0
def getNextValues(self, state, inp):
return (state + 1, state)
class AlternateZero(CountingStateMachine):
def getNextValues(self, state, inp):
state, output = CountingStateMachine.getNextValues(self, state, inp)
return (state, 0 if output % 2 == 1 else inp) | true |
0a51d22e2d833dacc3672c62d2e3fd59d175aff9 | Python | Pexeso/CWR-DataApi | /tests/parser/dictionary/encoder/record/test_instrumentation_detail.py | UTF-8 | 1,192 | 2.625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import unittest
from cwr.parser.encoder.dictionary import InstrumentationDetailDictionaryEncoder
from cwr.work import InstrumentationDetailRecord
"""
InstrumentationDetailRecord to dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestRecordingDetailRecordDictionaryEncoding(unittest.TestCase):
def setUp(self):
self._encoder = InstrumentationDetailDictionaryEncoder()
def test_encoded(self):
data = InstrumentationDetailRecord(record_type='IND',
transaction_sequence_n=3,
record_sequence_n=15,
instrument_code='AHN',
number_players=2)
encoded = self._encoder.encode(data)
self.assertEqual('IND', encoded['record_type'])
self.assertEqual(3, encoded['transaction_sequence_n'])
self.assertEqual(15, encoded['record_sequence_n'])
self.assertEqual('AHN', encoded['instrument_code'])
self.assertEqual(2, encoded['number_players'])
| true |
108858809506032b7c4f56b21213796daea65cd6 | Python | sunilkumarhr5593/test_git | /trial_1.py | UTF-8 | 37,654 | 2.75 | 3 | [] | no_license | import pandas
import itertools
from math import log
import math
import numpy as np
# =============================================================================
# to calculate the prob of trigram
# =============================================================================
df_trigram = pandas.read_csv('count3l.txt',index_col=None, delim_whitespace=True, names=('trigram', 'prob'))
#print(df_trigram)
new_sum_3 = df_trigram.sum(axis = 0, skipna = False) # sums all prob of trigram
total_prob_3 = (new_sum_3[0])
#print("The log value total occurence from the trigram data is: ",np.log2(total_prob_3)) # total prob of all bigram
#print()
avg_prob_3 = []
df1_3 = (df_trigram.iloc[:,1]) / (total_prob_3) #first row of data frame
avg_prob_3 = np.log2(df1_3)
#print(avg_prob_3)
#print(total_prob)
trigram_prob = []
tprob= df_trigram.iloc[:,0]
trigram_prob = tprob ##########
#print(bigram_prob)
max_prob = max(avg_prob_3)
#print("The log value of max prob is: ",max_prob)
dict_prob_3 = dict((trigram_prob[index], avg_prob_3[index]) for index in range(len(trigram_prob)))
import random
import string
from random import choices
from string import ascii_lowercase
from random import shuffle
import secrets
alphabets = 'abcdefghijklmnopqrstuvwxyz'
cipher = "SOWFBRKAWFCZFSBSCSBQITBKOWLBFXTBKOWLSOXSOXFZWWIBICFWUQLRXINOCIJLWJFQUNWXLFBSZXFBTXAANTQIFBFSFQUFCZFSBSCSBIMWHWLNKAXBISWGSTOXLXTSWLUQLXJBUUWLWISTBKOWLSWGSTOXLXTSWLBSJBUUWLFULQRTXWFXLTBKOWLBISOXSSOWTBKOWLXAKOXZWSBFIQSFBRKANSOWXAKOXZWSFOBUSWJBSBFTQRKAWSWANECRZAWJ"
alphabets_list = []
cipher_list = []
alphabets_list = alphabets
cipher_list = cipher.lower()
random_list = []
#random_list = random.sample((alphabets_list[i]) for i in range(len(alphabets_list)))
print("===========================First iteration=============================")
def key(stringLength=26):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.sample(letters, stringLength))
random_list = key()
key_1 = random_list
print("The initial random key is",key_1)
print()
# =============================================================================
# for i in range(len(alphabets_list)):
#
# random_list_1 = random.choice(random_list)
# #random_list_1 = [i.split('\t')[0] for i in random_list]
# =============================================================================
#print("The initial key is",key_1)
test_list = []
def hill_climb(key_1):
# =============================================================================
# for i in range(len(alphabets_list)):
#
# random_list = random.choice(alphabets_list)
# #random_list_1 = [i.split('\t')[0] for i in random_list]
# print(random_list)
# =============================================================================
dictionary = dict((key_1[i], alphabets_list[i]) for i in range(len(key_1)))
#print(dictionary)
#test_list = dictionary.keys()
#print(test_list)
test_list_1 = []
dic_ini = []
for i in range(len(cipher_list)):
crypt = dictionary.get(cipher_list[i])
dic_ini = np.append(dic_ini, crypt)
#print(dic_ini)
res_join = " ".join(str(x) for x in dic_ini)
res_replace = res_join.replace(" ", "")
#print(res_replace)
n = 3
temp = []
out = [(res_replace[i:i+n]) for i in range(len(res_replace)+1 -n)]
#print(out)
temp = out
#print(temp)
prob_k1 = 0
for i in range(len(temp)):
res = dict_prob_3.get(temp[i])
#res1 = np.sum(res)
prob_k1 = res+ prob_k1
#prob_k1_list = np.append(prob_k1_list, res1)
#print("The total prob of encrypted text is: ",add)
#print("Prob using the key {} and the key is {}".format(prob_k1, "".join(str(x) for x in key_1)) )
#print()
return prob_k1
#return prob_k1, "".join(str(x) for x in random_list)
random_list = list(random_list)
random_list[0], random_list[1] = random_list[1], random_list[0]
key_2 = random_list
#print("The next key is ","".join(str(x) for x in key_2))
print("The prob using key 1 is {} and the key is {}".format(hill_climb(key_1) ,"".join(str(x) for x in key_1)))
print("The prob using key 2 is {} and the key is {}".format(hill_climb(key_2) ,"".join(str(x) for x in key_2)))
if(hill_climb(key_1) > hill_climb(key_2)):
#print("true")
key_1 = list(key_1)
key_1[1], key_1[2] = key_1[2], key_1[1]
key_3 = key_1
print("The prob using key 3 is {} and the key is {}".format(hill_climb(key_3) ,"".join(str(x) for x in key_3)))
hill_climb(key_3)
else:
#print("false")
key_2 = list(key_2)
key_2[24], key_2[25] = key_2[25], key_2[24]
key_3 = key_2
print("The prob using key 3 is {} and the key is {}".format(hill_climb(key_3) ,"".join(str(x) for x in key_3)))
hill_climb(key_3)
if(hill_climb(key_3) > hill_climb(key_2)):
#print("true")
key_3 = list(key_3)
key_3[7], key_3[8] = key_3[8], key_3[7]
key_4 = key_3
print("The prob using key 4 is {} and the key is {}".format(hill_climb(key_4) ,"".join(str(x) for x in key_4)))
hill_climb(key_4)
else:
#print("false")
key_2 = list(key_2)
key_2[11], key_2[12] = key_2[12], key_2[11]
key_4 = key_2
print("The prob using key 4 is {} and the key is {}".format(hill_climb(key_4) ,"".join(str(x) for x in key_4)))
hill_climb(key_4)
if(hill_climb(key_4) > hill_climb(key_3)):
#print("true")
key_4 = list(key_4)
key_4[14], key_4[15] = key_4[15], key_4[14]
key_5 = key_4
print("The prob using key 5 is {} and the key is {}".format(hill_climb(key_5) ,"".join(str(x) for x in key_5)))
hill_climb(key_5)
else:
#print("false")
key_3 = list(key_3)
key_3[18], key_3[19] = key_3[19], key_3[18]
key_5 = key_3
print("The prob using key 5 is {} and the key is {}".format(hill_climb(key_5) ,"".join(str(x) for x in key_5)))
hill_climb(key_5)
if(hill_climb(key_5) > hill_climb(key_4)):
#print("true")
key_5 = list(key_5)
key_5[20], key_5[21] = key_5[21], key_5[20]
key_6 = key_5
print("The prob using key 6 is {} and the key is {}".format(hill_climb(key_6) ,"".join(str(x) for x in key_6)))
hill_climb(key_6)
else:
#print("false")
key_4 = list(key_4)
key_4[21], key_4[22] = key_4[22], key_4[21]
key_6 = key_4
print("The prob using key 6 is {} and the key is {}".format(hill_climb(key_6) ,"".join(str(x) for x in key_6)))
hill_climb(key_6)
if(hill_climb(key_6) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[22], key_5[23] = key_5[23], key_5[22]
key_7 = key_5
print("The prob using key 7 is {} and the key is {}".format(hill_climb(key_7) ,"".join(str(x) for x in key_7)))
hill_climb(key_7)
if(hill_climb(key_7) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[3], key_5[4] = key_5[4], key_5[3]
key_8 = key_5
print("The prob using key 8 is {} and the key is {}".format(hill_climb(key_8) ,"".join(str(x) for x in key_8)))
hill_climb(key_8)
if(hill_climb(key_8) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[7], key_5[8] = key_5[8], key_5[7]
key_9 = key_5
print("The prob using key 9 is {} and the key is {}".format(hill_climb(key_9) ,"".join(str(x) for x in key_9)))
hill_climb(key_9)
if(hill_climb(key_8) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[20], key_5[21] = key_5[0], key_5[25]
key_10 = key_5
print("The prob using key 10 is {} and the key is {}".format(hill_climb(key_10) ,"".join(str(x) for x in key_10)))
hill_climb(key_10)
else:
key_6 = list(key_6)
key_6[22], key_6[23] = key_6[23], key_6[22]
key_7 = key_6
print("The prob using key 7 is {} and the key is {}".format(hill_climb(key_7) ,"".join(str(x) for x in key_7)))
hill_climb(key_7)
x1 = hill_climb(key_1)
x2 = hill_climb(key_2)
x3 = hill_climb(key_3)
x4 = hill_climb(key_4)
x5 = hill_climb(key_5)
x6 = hill_climb(key_6)
x7 = hill_climb(key_7)
y1 = "".join(str(x) for x in key_1)
y2 = "".join(str(x) for x in key_2)
y3 = "".join(str(x) for x in key_3)
y4 = "".join(str(x) for x in key_4)
y5 = "".join(str(x) for x in key_5)
y6 = "".join(str(x) for x in key_6)
y7 = "".join(str(x) for x in key_7)
test_list = x1,x2,x3,x4,x5,x6,x7
test_list_1 = y1,y2,y3,y4,y5,y6,y7
#print(test_list)
#print(test_list_1)
print()
dictionary1 = dict(zip(test_list_1, test_list))
#print(dictionary1)
print("The key with max prob is : {} and the value is : {} ".format(max(dictionary1, key=dictionary1.get),max([i for i in dictionary1.values()]) ))
#rint(max([i for i in dictionary1.values()]) )
# Create a dictionary from zip object
#dictOfWords = dict(dictionary1)
#print(dictOfWords)
y = max(dictionary1, key=dictionary1.get)
z = max([i for i in dictionary1.values()])
print(z, y)
print()
print("===========================Second iteration=============================")
def key(stringLength=26):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.sample(letters, stringLength))
random_list = key()
key_1 = random_list
print("The initial random key is",key_1)
print()
# =============================================================================
# for i in range(len(alphabets_list)):
#
# random_list_1 = random.choice(random_list)
# #random_list_1 = [i.split('\t')[0] for i in random_list]
# =============================================================================
#print("The initial key is",key_1)
test_list = []
def hill_climb(key_1):
# =============================================================================
# for i in range(len(alphabets_list)):
#
# random_list = random.choice(alphabets_list)
# #random_list_1 = [i.split('\t')[0] for i in random_list]
# print(random_list)
# =============================================================================
dictionary = dict((key_1[i], alphabets_list[i]) for i in range(len(key_1)))
#print(dictionary)
#test_list = dictionary.keys()
#print(test_list)
test_list_1 = []
dic_ini = []
for i in range(len(cipher_list)):
crypt = dictionary.get(cipher_list[i])
dic_ini = np.append(dic_ini, crypt)
#print(dic_ini)
res_join = " ".join(str(x) for x in dic_ini)
res_replace = res_join.replace(" ", "")
#print(res_replace)
n = 3
temp = []
out = [(res_replace[i:i+n]) for i in range(len(res_replace)+1 -n)]
#print(out)
temp = out
#print(temp)
prob_k1 = 0
for i in range(len(temp)):
res = dict_prob_3.get(temp[i])
#res1 = np.sum(res)
prob_k1 = res+ prob_k1
#prob_k1_list = np.append(prob_k1_list, res1)
#print("The total prob of encrypted text is: ",add)
#print("Prob using the key {} and the key is {}".format(prob_k1, "".join(str(x) for x in key_1)) )
#print()
return prob_k1
#return prob_k1, "".join(str(x) for x in random_list)
random_list = list(random_list)
random_list[0], random_list[1] = random_list[1], random_list[0]
key_2 = random_list
#print("The next key is ","".join(str(x) for x in key_2))
print("The prob using key 1 is {} and the key is {}".format(hill_climb(key_1) ,"".join(str(x) for x in key_1)))
print("The prob using key 2 is {} and the key is {}".format(hill_climb(key_2) ,"".join(str(x) for x in key_2)))
if(hill_climb(key_1) > hill_climb(key_2)):
#print("true")
key_1 = list(key_1)
key_1[1], key_1[2] = key_1[2], key_1[1]
key_3 = key_1
print("The prob using key 3 is {} and the key is {}".format(hill_climb(key_3) ,"".join(str(x) for x in key_3)))
hill_climb(key_3)
else:
#print("false")
key_2 = list(key_2)
key_2[24], key_2[25] = key_2[25], key_2[24]
key_3 = key_2
print("The prob using key 3 is {} and the key is {}".format(hill_climb(key_3) ,"".join(str(x) for x in key_3)))
hill_climb(key_3)
if(hill_climb(key_3) > hill_climb(key_2)):
#print("true")
key_3 = list(key_3)
key_3[7], key_3[8] = key_3[8], key_3[7]
key_4 = key_3
print("The prob using key 4 is {} and the key is {}".format(hill_climb(key_4) ,"".join(str(x) for x in key_4)))
hill_climb(key_4)
else:
#print("false")
key_2 = list(key_2)
key_2[11], key_2[12] = key_2[12], key_2[11]
key_4 = key_2
print("The prob using key 4 is {} and the key is {}".format(hill_climb(key_4) ,"".join(str(x) for x in key_4)))
hill_climb(key_4)
if(hill_climb(key_4) > hill_climb(key_3)):
#print("true")
key_4 = list(key_4)
key_4[14], key_4[15] = key_4[15], key_4[14]
key_5 = key_4
print("The prob using key 5 is {} and the key is {}".format(hill_climb(key_5) ,"".join(str(x) for x in key_5)))
hill_climb(key_5)
else:
#print("false")
key_3 = list(key_3)
key_3[18], key_3[19] = key_3[19], key_3[18]
key_5 = key_3
print("The prob using key 5 is {} and the key is {}".format(hill_climb(key_5) ,"".join(str(x) for x in key_5)))
hill_climb(key_5)
if(hill_climb(key_5) > hill_climb(key_4)):
#print("true")
key_5 = list(key_5)
key_5[20], key_5[21] = key_5[21], key_5[20]
key_6 = key_5
print("The prob using key 6 is {} and the key is {}".format(hill_climb(key_6) ,"".join(str(x) for x in key_6)))
hill_climb(key_6)
else:
#print("false")
key_4 = list(key_4)
key_4[21], key_4[22] = key_4[22], key_4[21]
key_6 = key_4
print("The prob using key 6 is {} and the key is {}".format(hill_climb(key_6) ,"".join(str(x) for x in key_6)))
hill_climb(key_6)
if(hill_climb(key_6) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[22], key_5[23] = key_5[23], key_5[22]
key_7 = key_5
print("The prob using key 7 is {} and the key is {}".format(hill_climb(key_7) ,"".join(str(x) for x in key_7)))
hill_climb(key_7)
if(hill_climb(key_7) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[3], key_5[4] = key_5[4], key_5[3]
key_8 = key_5
print("The prob using key 8 is {} and the key is {}".format(hill_climb(key_8) ,"".join(str(x) for x in key_8)))
hill_climb(key_8)
if(hill_climb(key_8) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[7], key_5[8] = key_5[8], key_5[7]
key_9 = key_5
print("The prob using key 9 is {} and the key is {}".format(hill_climb(key_9) ,"".join(str(x) for x in key_9)))
hill_climb(key_9)
if(hill_climb(key_8) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[20], key_5[21] = key_5[0], key_5[25]
key_10 = key_5
print("The prob using key 10 is {} and the key is {}".format(hill_climb(key_10) ,"".join(str(x) for x in key_10)))
hill_climb(key_10)
else:
key_6 = list(key_6)
key_6[22], key_6[23] = key_6[23], key_6[22]
key_7 = key_6
print("The prob using key 7 is {} and the key is {}".format(hill_climb(key_7) ,"".join(str(x) for x in key_7)))
hill_climb(key_7)
x1 = hill_climb(key_1)
x2 = hill_climb(key_2)
x3 = hill_climb(key_3)
x4 = hill_climb(key_4)
x5 = hill_climb(key_5)
x6 = hill_climb(key_6)
x7 = hill_climb(key_7)
y1 = "".join(str(x) for x in key_1)
y2 = "".join(str(x) for x in key_2)
y3 = "".join(str(x) for x in key_3)
y4 = "".join(str(x) for x in key_4)
y5 = "".join(str(x) for x in key_5)
y6 = "".join(str(x) for x in key_6)
y7 = "".join(str(x) for x in key_7)
test_list = x1,x2,x3,x4,x5,x6,x7
test_list_1 = y1,y2,y3,y4,y5,y6,y7
#print(test_list)
print()
dictionary1 = dict(zip(test_list_1, test_list))
#print(dictionary1)
print("The key with max prob is : {} and the value is : {} ".format(max(dictionary1, key=dictionary1.get),max([i for i in dictionary1.values()]) ))
#rint(max([i for i in dictionary1.values()]) )
# Create a dictionary from zip object
#dictOfWords = dict(dictionary1)
#print(dictOfWords)
print()
y = max(dictionary1, key=dictionary1.get)
z = max([i for i in dictionary1.values()])
print(z, y)
print("===========================Third iteration=============================")
def key(stringLength=26):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.sample(letters, stringLength))
random_list = key()
key_1 = random_list
print("The initial random key is",key_1)
print()
# =============================================================================
# for i in range(len(alphabets_list)):
#
# random_list_1 = random.choice(random_list)
# #random_list_1 = [i.split('\t')[0] for i in random_list]
# =============================================================================
#print("The initial key is",key_1)
test_list = []
def hill_climb(key_1):
# =============================================================================
# for i in range(len(alphabets_list)):
#
# random_list = random.choice(alphabets_list)
# #random_list_1 = [i.split('\t')[0] for i in random_list]
# print(random_list)
# =============================================================================
dictionary = dict((key_1[i], alphabets_list[i]) for i in range(len(key_1)))
#print(dictionary)
#test_list = dictionary.keys()
#print(test_list)
test_list_1 = []
dic_ini = []
for i in range(len(cipher_list)):
crypt = dictionary.get(cipher_list[i])
dic_ini = np.append(dic_ini, crypt)
#print(dic_ini)
res_join = " ".join(str(x) for x in dic_ini)
res_replace = res_join.replace(" ", "")
#print(res_replace)
n = 3
temp = []
out = [(res_replace[i:i+n]) for i in range(len(res_replace)+1 -n)]
#print(out)
temp = out
#print(temp)
prob_k1 = 0
for i in range(len(temp)):
res = dict_prob_3.get(temp[i])
#res1 = np.sum(res)
prob_k1 = res+ prob_k1
#prob_k1_list = np.append(prob_k1_list, res1)
#print("The total prob of encrypted text is: ",add)
#print("Prob using the key {} and the key is {}".format(prob_k1, "".join(str(x) for x in key_1)) )
#print()
return prob_k1
#return prob_k1, "".join(str(x) for x in random_list)
random_list = list(random_list)
random_list[0], random_list[1] = random_list[1], random_list[0]
key_2 = random_list
#print("The next key is ","".join(str(x) for x in key_2))
print("The prob using key 1 is {} and the key is {}".format(hill_climb(key_1) ,"".join(str(x) for x in key_1)))
print("The prob using key 2 is {} and the key is {}".format(hill_climb(key_2) ,"".join(str(x) for x in key_2)))
if(hill_climb(key_1) > hill_climb(key_2)):
#print("true")
key_1 = list(key_1)
key_1[1], key_1[2] = key_1[2], key_1[1]
key_3 = key_1
print("The prob using key 3 is {} and the key is {}".format(hill_climb(key_3) ,"".join(str(x) for x in key_3)))
hill_climb(key_3)
else:
#print("false")
key_2 = list(key_2)
key_2[24], key_2[25] = key_2[25], key_2[24]
key_3 = key_2
print("The prob using key 3 is {} and the key is {}".format(hill_climb(key_3) ,"".join(str(x) for x in key_3)))
hill_climb(key_3)
if(hill_climb(key_3) > hill_climb(key_2)):
#print("true")
key_3 = list(key_3)
key_3[7], key_3[8] = key_3[8], key_3[7]
key_4 = key_3
print("The prob using key 4 is {} and the key is {}".format(hill_climb(key_4) ,"".join(str(x) for x in key_4)))
hill_climb(key_4)
else:
#print("false")
key_2 = list(key_2)
key_2[11], key_2[12] = key_2[12], key_2[11]
key_4 = key_2
print("The prob using key 4 is {} and the key is {}".format(hill_climb(key_4) ,"".join(str(x) for x in key_4)))
hill_climb(key_4)
if(hill_climb(key_4) > hill_climb(key_3)):
#print("true")
key_4 = list(key_4)
key_4[14], key_4[15] = key_4[15], key_4[14]
key_5 = key_4
print("The prob using key 5 is {} and the key is {}".format(hill_climb(key_5) ,"".join(str(x) for x in key_5)))
hill_climb(key_5)
else:
#print("false")
key_3 = list(key_3)
key_3[18], key_3[19] = key_3[19], key_3[18]
key_5 = key_3
print("The prob using key 5 is {} and the key is {}".format(hill_climb(key_5) ,"".join(str(x) for x in key_5)))
hill_climb(key_5)
if(hill_climb(key_5) > hill_climb(key_4)):
#print("true")
key_5 = list(key_5)
key_5[20], key_5[21] = key_5[21], key_5[20]
key_6 = key_5
print("The prob using key 6 is {} and the key is {}".format(hill_climb(key_6) ,"".join(str(x) for x in key_6)))
hill_climb(key_6)
else:
#print("false")
key_4 = list(key_4)
key_4[21], key_4[22] = key_4[22], key_4[21]
key_6 = key_4
print("The prob using key 6 is {} and the key is {}".format(hill_climb(key_6) ,"".join(str(x) for x in key_6)))
hill_climb(key_6)
if(hill_climb(key_6) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[22], key_5[23] = key_5[23], key_5[22]
key_7 = key_5
print("The prob using key 7 is {} and the key is {}".format(hill_climb(key_7) ,"".join(str(x) for x in key_7)))
hill_climb(key_7)
if(hill_climb(key_7) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[3], key_5[4] = key_5[4], key_5[3]
key_8 = key_5
print("The prob using key 8 is {} and the key is {}".format(hill_climb(key_8) ,"".join(str(x) for x in key_8)))
hill_climb(key_8)
if(hill_climb(key_8) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[7], key_5[8] = key_5[8], key_5[7]
key_9 = key_5
print("The prob using key 9 is {} and the key is {}".format(hill_climb(key_9) ,"".join(str(x) for x in key_9)))
hill_climb(key_9)
if(hill_climb(key_8) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[20], key_5[21] = key_5[0], key_5[25]
key_10 = key_5
print("The prob using key 10 is {} and the key is {}".format(hill_climb(key_10) ,"".join(str(x) for x in key_10)))
hill_climb(key_10)
else:
key_6 = list(key_6)
key_6[22], key_6[23] = key_6[23], key_6[22]
key_7 = key_6
print("The prob using key 7 is {} and the key is {}".format(hill_climb(key_7) ,"".join(str(x) for x in key_7)))
hill_climb(key_7)
x1 = hill_climb(key_1)
x2 = hill_climb(key_2)
x3 = hill_climb(key_3)
x4 = hill_climb(key_4)
x5 = hill_climb(key_5)
x6 = hill_climb(key_6)
x7 = hill_climb(key_7)
y1 = "".join(str(x) for x in key_1)
y2 = "".join(str(x) for x in key_2)
y3 = "".join(str(x) for x in key_3)
y4 = "".join(str(x) for x in key_4)
y5 = "".join(str(x) for x in key_5)
y6 = "".join(str(x) for x in key_6)
y7 = "".join(str(x) for x in key_7)
test_list = x1,x2,x3,x4,x5,x6,x7
test_list_1 = y1,y2,y3,y4,y5,y6,y7
#print(test_list)
print()
dictionary1 = dict(zip(test_list_1, test_list))
#print(dictionary1)
print("The key with max prob is : {} and the value is : {} ".format(max(dictionary1, key=dictionary1.get),max([i for i in dictionary1.values()]) ))
#rint(max([i for i in dictionary1.values()]) )
# Create a dictionary from zip object
#dictOfWords = dict(dictionary1)
#print(dictOfWords)
print()
print("===========================Fourth iteration=============================")
def key(stringLength=26):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.sample(letters, stringLength))
random_list = key()
key_1 = random_list
print("The initial random key is",key_1)
print()
# =============================================================================
# for i in range(len(alphabets_list)):
#
# random_list_1 = random.choice(random_list)
# #random_list_1 = [i.split('\t')[0] for i in random_list]
# =============================================================================
#print("The initial key is",key_1)
test_list = []
def hill_climb(key_1):
# =============================================================================
# for i in range(len(alphabets_list)):
#
# random_list = random.choice(alphabets_list)
# #random_list_1 = [i.split('\t')[0] for i in random_list]
# print(random_list)
# =============================================================================
dictionary = dict((key_1[i], alphabets_list[i]) for i in range(len(key_1)))
#print(dictionary)
#test_list = dictionary.keys()
#print(test_list)
test_list_1 = []
dic_ini = []
for i in range(len(cipher_list)):
crypt = dictionary.get(cipher_list[i])
dic_ini = np.append(dic_ini, crypt)
#print(dic_ini)
res_join = " ".join(str(x) for x in dic_ini)
res_replace = res_join.replace(" ", "")
#print(res_replace)
n = 3
temp = []
out = [(res_replace[i:i+n]) for i in range(len(res_replace)+1 -n)]
#print(out)
temp = out
#print(temp)
prob_k1 = 0
for i in range(len(temp)):
res = dict_prob_3.get(temp[i])
#res1 = np.sum(res)
prob_k1 = res+ prob_k1
#prob_k1_list = np.append(prob_k1_list, res1)
#print("The total prob of encrypted text is: ",add)
#print("Prob using the key {} and the key is {}".format(prob_k1, "".join(str(x) for x in key_1)) )
#print()
return prob_k1
#return prob_k1, "".join(str(x) for x in random_list)
random_list = list(random_list)
random_list[0], random_list[1] = random_list[1], random_list[0]
key_2 = random_list
#print("The next key is ","".join(str(x) for x in key_2))
print("The prob using key 1 is {} and the key is {}".format(hill_climb(key_1) ,"".join(str(x) for x in key_1)))
print("The prob using key 2 is {} and the key is {}".format(hill_climb(key_2) ,"".join(str(x) for x in key_2)))
if(hill_climb(key_1) > hill_climb(key_2)):
#print("true")
key_1 = list(key_1)
key_1[1], key_1[2] = key_1[2], key_1[1]
key_3 = key_1
print("The prob using key 3 is {} and the key is {}".format(hill_climb(key_3) ,"".join(str(x) for x in key_3)))
hill_climb(key_3)
else:
#print("false")
key_2 = list(key_2)
key_2[24], key_2[25] = key_2[25], key_2[24]
key_3 = key_2
print("The prob using key 3 is {} and the key is {}".format(hill_climb(key_3) ,"".join(str(x) for x in key_3)))
hill_climb(key_3)
if(hill_climb(key_3) > hill_climb(key_2)):
#print("true")
key_3 = list(key_3)
key_3[7], key_3[8] = key_3[8], key_3[7]
key_4 = key_3
print("The prob using key 4 is {} and the key is {}".format(hill_climb(key_4) ,"".join(str(x) for x in key_4)))
hill_climb(key_4)
else:
#print("false")
key_2 = list(key_2)
key_2[11], key_2[12] = key_2[12], key_2[11]
key_4 = key_2
print("The prob using key 4 is {} and the key is {}".format(hill_climb(key_4) ,"".join(str(x) for x in key_4)))
hill_climb(key_4)
if(hill_climb(key_4) > hill_climb(key_3)):
#print("true")
key_4 = list(key_4)
key_4[14], key_4[15] = key_4[15], key_4[14]
key_5 = key_4
print("The prob using key 5 is {} and the key is {}".format(hill_climb(key_5) ,"".join(str(x) for x in key_5)))
hill_climb(key_5)
else:
#print("false")
key_3 = list(key_3)
key_3[18], key_3[19] = key_3[19], key_3[18]
key_5 = key_3
print("The prob using key 5 is {} and the key is {}".format(hill_climb(key_5) ,"".join(str(x) for x in key_5)))
hill_climb(key_5)
if(hill_climb(key_5) > hill_climb(key_4)):
#print("true")
key_5 = list(key_5)
key_5[20], key_5[21] = key_5[21], key_5[20]
key_6 = key_5
print("The prob using key 6 is {} and the key is {}".format(hill_climb(key_6) ,"".join(str(x) for x in key_6)))
hill_climb(key_6)
else:
#print("false")
key_4 = list(key_4)
key_4[21], key_4[22] = key_4[22], key_4[21]
key_6 = key_4
print("The prob using key 6 is {} and the key is {}".format(hill_climb(key_6) ,"".join(str(x) for x in key_6)))
hill_climb(key_6)
if(hill_climb(key_6) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[22], key_5[23] = key_5[23], key_5[22]
key_7 = key_5
print("The prob using key 7 is {} and the key is {}".format(hill_climb(key_7) ,"".join(str(x) for x in key_7)))
hill_climb(key_7)
if(hill_climb(key_7) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[3], key_5[4] = key_5[4], key_5[3]
key_8 = key_5
print("The prob using key 8 is {} and the key is {}".format(hill_climb(key_8) ,"".join(str(x) for x in key_8)))
hill_climb(key_8)
if(hill_climb(key_8) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[7], key_5[8] = key_5[8], key_5[7]
key_9 = key_5
print("The prob using key 9 is {} and the key is {}".format(hill_climb(key_9) ,"".join(str(x) for x in key_9)))
hill_climb(key_9)
if(hill_climb(key_8) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[20], key_5[21] = key_5[0], key_5[25]
key_10 = key_5
print("The prob using key 10 is {} and the key is {}".format(hill_climb(key_10) ,"".join(str(x) for x in key_10)))
hill_climb(key_10)
else:
key_6 = list(key_6)
key_6[22], key_6[23] = key_6[23], key_6[22]
key_7 = key_6
print("The prob using key 7 is {} and the key is {}".format(hill_climb(key_7) ,"".join(str(x) for x in key_7)))
hill_climb(key_7)
x1 = hill_climb(key_1)
x2 = hill_climb(key_2)
x3 = hill_climb(key_3)
x4 = hill_climb(key_4)
x5 = hill_climb(key_5)
x6 = hill_climb(key_6)
x7 = hill_climb(key_7)
y1 = "".join(str(x) for x in key_1)
y2 = "".join(str(x) for x in key_2)
y3 = "".join(str(x) for x in key_3)
y4 = "".join(str(x) for x in key_4)
y5 = "".join(str(x) for x in key_5)
y6 = "".join(str(x) for x in key_6)
y7 = "".join(str(x) for x in key_7)
test_list = x1,x2,x3,x4,x5,x6,x7
test_list_1 = y1,y2,y3,y4,y5,y6,y7
#print(test_list)
print()
dictionary1 = dict(zip(test_list_1, test_list))
#print(dictionary1)
print("The key with max prob is : {} and the value is : {} ".format(max(dictionary1, key=dictionary1.get),max([i for i in dictionary1.values()]) ))
#rint(max([i for i in dictionary1.values()]) )
# Create a dictionary from zip object
#dictOfWords = dict(dictionary1)
#print(dictOfWords)
print()
print("===========================Fifth iteration=============================")
def key(stringLength=26):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.sample(letters, stringLength))
random_list = key()
key_1 = random_list
print("The initial random key is",key_1)
print()
# =============================================================================
# for i in range(len(alphabets_list)):
#
# random_list_1 = random.choice(random_list)
# #random_list_1 = [i.split('\t')[0] for i in random_list]
# =============================================================================
#print("The initial key is",key_1)
test_list = []
def hill_climb(key_1):
# =============================================================================
# for i in range(len(alphabets_list)):
#
# random_list = random.choice(alphabets_list)
# #random_list_1 = [i.split('\t')[0] for i in random_list]
# print(random_list)
# =============================================================================
dictionary = dict((key_1[i], alphabets_list[i]) for i in range(len(key_1)))
#print(dictionary)
#test_list = dictionary.keys()
#print(test_list)
test_list_1 = []
dic_ini = []
for i in range(len(cipher_list)):
crypt = dictionary.get(cipher_list[i])
dic_ini = np.append(dic_ini, crypt)
#print(dic_ini)
res_join = " ".join(str(x) for x in dic_ini)
res_replace = res_join.replace(" ", "")
#print(res_replace)
n = 3
temp = []
out = [(res_replace[i:i+n]) for i in range(len(res_replace)+1 -n)]
#print(out)
temp = out
#print(temp)
prob_k1 = 0
for i in range(len(temp)):
res = dict_prob_3.get(temp[i])
#res1 = np.sum(res)
prob_k1 = res+ prob_k1
#prob_k1_list = np.append(prob_k1_list, res1)
#print("The total prob of encrypted text is: ",add)
#print("Prob using the key {} and the key is {}".format(prob_k1, "".join(str(x) for x in key_1)) )
#print()
return prob_k1
#return prob_k1, "".join(str(x) for x in random_list)
random_list = list(random_list)
random_list[0], random_list[1] = random_list[1], random_list[0]
key_2 = random_list
#print("The next key is ","".join(str(x) for x in key_2))
print("The prob using key 1 is {} and the key is {}".format(hill_climb(key_1) ,"".join(str(x) for x in key_1)))
print("The prob using key 2 is {} and the key is {}".format(hill_climb(key_2) ,"".join(str(x) for x in key_2)))
if(hill_climb(key_1) > hill_climb(key_2)):
#print("true")
key_1 = list(key_1)
key_1[1], key_1[2] = key_1[2], key_1[1]
key_3 = key_1
print("The prob using key 3 is {} and the key is {}".format(hill_climb(key_3) ,"".join(str(x) for x in key_3)))
hill_climb(key_3)
else:
#print("false")
key_2 = list(key_2)
key_2[24], key_2[25] = key_2[25], key_2[24]
key_3 = key_2
print("The prob using key 3 is {} and the key is {}".format(hill_climb(key_3) ,"".join(str(x) for x in key_3)))
hill_climb(key_3)
if(hill_climb(key_3) > hill_climb(key_2)):
#print("true")
key_3 = list(key_3)
key_3[7], key_3[8] = key_3[8], key_3[7]
key_4 = key_3
print("The prob using key 4 is {} and the key is {}".format(hill_climb(key_4) ,"".join(str(x) for x in key_4)))
hill_climb(key_4)
else:
#print("false")
key_2 = list(key_2)
key_2[11], key_2[12] = key_2[12], key_2[11]
key_4 = key_2
print("The prob using key 4 is {} and the key is {}".format(hill_climb(key_4) ,"".join(str(x) for x in key_4)))
hill_climb(key_4)
if(hill_climb(key_4) > hill_climb(key_3)):
#print("true")
key_4 = list(key_4)
key_4[14], key_4[15] = key_4[15], key_4[14]
key_5 = key_4
print("The prob using key 5 is {} and the key is {}".format(hill_climb(key_5) ,"".join(str(x) for x in key_5)))
hill_climb(key_5)
else:
#print("false")
key_3 = list(key_3)
key_3[18], key_3[19] = key_3[19], key_3[18]
key_5 = key_3
print("The prob using key 5 is {} and the key is {}".format(hill_climb(key_5) ,"".join(str(x) for x in key_5)))
hill_climb(key_5)
if(hill_climb(key_5) > hill_climb(key_4)):
#print("true")
key_5 = list(key_5)
key_5[20], key_5[21] = key_5[21], key_5[20]
key_6 = key_5
print("The prob using key 6 is {} and the key is {}".format(hill_climb(key_6) ,"".join(str(x) for x in key_6)))
hill_climb(key_6)
else:
#print("false")
key_4 = list(key_4)
key_4[21], key_4[22] = key_4[22], key_4[21]
key_6 = key_4
print("The prob using key 6 is {} and the key is {}".format(hill_climb(key_6) ,"".join(str(x) for x in key_6)))
hill_climb(key_6)
if(hill_climb(key_6) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[22], key_5[23] = key_5[23], key_5[22]
key_7 = key_5
print("The prob using key 7 is {} and the key is {}".format(hill_climb(key_7) ,"".join(str(x) for x in key_7)))
hill_climb(key_7)
if(hill_climb(key_7) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[3], key_5[4] = key_5[4], key_5[3]
key_8 = key_5
print("The prob using key 8 is {} and the key is {}".format(hill_climb(key_8) ,"".join(str(x) for x in key_8)))
hill_climb(key_8)
if(hill_climb(key_8) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[7], key_5[8] = key_5[8], key_5[7]
key_9 = key_5
print("The prob using key 9 is {} and the key is {}".format(hill_climb(key_9) ,"".join(str(x) for x in key_9)))
hill_climb(key_9)
if(hill_climb(key_8) < hill_climb(key_5)):
key_5 = list(key_5)
key_5[20], key_5[21] = key_5[0], key_5[25]
key_10 = key_5
print("The prob using key 10 is {} and the key is {}".format(hill_climb(key_10) ,"".join(str(x) for x in key_10)))
hill_climb(key_10)
else:
key_6 = list(key_6)
key_6[22], key_6[23] = key_6[23], key_6[22]
key_7 = key_6
print("The prob using key 7 is {} and the key is {}".format(hill_climb(key_7) ,"".join(str(x) for x in key_7)))
hill_climb(key_7)
x1 = hill_climb(key_1)
x2 = hill_climb(key_2)
x3 = hill_climb(key_3)
x4 = hill_climb(key_4)
x5 = hill_climb(key_5)
x6 = hill_climb(key_6)
x7 = hill_climb(key_7)
y1 = "".join(str(x) for x in key_1)
y2 = "".join(str(x) for x in key_2)
y3 = "".join(str(x) for x in key_3)
y4 = "".join(str(x) for x in key_4)
y5 = "".join(str(x) for x in key_5)
y6 = "".join(str(x) for x in key_6)
y7 = "".join(str(x) for x in key_7)
test_list = x1,x2,x3,x4,x5,x6,x7
test_list_1 = y1,y2,y3,y4,y5,y6,y7
#print(test_list)
print()
dictionary1 = dict(zip(test_list_1, test_list))
#print(dictionary1)
print("The key with max prob is : {} and the value is : {} ".format(max(dictionary1, key=dictionary1.get),max([i for i in dictionary1.values()]) ))
#rint(max([i for i in dictionary1.values()]) )
# Create a dictionary from zip object
#dictOfWords = dict(dictionary1)
#print(dictOfWords)
print()
| true |
4f9485605b3a65731cc1f675d6558427b02655b0 | Python | ShenQianli/FlowerClassification2018 | /src/datagen_show.py | UTF-8 | 1,463 | 2.734375 | 3 | [] | no_license | from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
import numpy as np
import matplotlib.pyplot as plt
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=30, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.2, # Randomly zoom image
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False, # randomly flip images
data_format='channels_last')
img = image.load_img("../train/daisy/0.jpg", target_size=(200, 200))
x = np.array([np.array(img)])
datagen.fit(x)
i = 0
buf = [np.array(img)]
for batch in datagen.flow(x,batch_size=1):
# plt.imshow(batch[0].astype(np.uint8))
# plt.show()
buf.append(batch[0].astype(np.uint8))
i = i + 1
if(i > 3):
break
fig,ax=plt.subplots(1,4)
for i in range(1):
for j in range (4):
ax[j].imshow(buf[j])
if(i == 0 and j == 0):
ax[j].set_title('raw data')
else:
ax[j].set_title('sample ' + str(j))
plt.tight_layout()
plt.show()
| true |
a16605c48c01a071e37407ba529d04379b37dac1 | Python | valerija-h/DDQN-Assignment | /Code/pixel_ram.py | UTF-8 | 11,351 | 2.84375 | 3 | [] | no_license | import tensorflow.compat.v1 as tf
import os
import matplotlib.pyplot as plt
import gym
import numpy as np
from collections import deque
from IPython.display import clear_output
import random
import pickle
import time
# please note the code in the agent class was adapated from tutorial material
# please note the prioritized replay was adapted from class material
"""
LOADING AND OBSERVING THE ENVIRONMENT
"""
# load the environment (that uses pixel images)
env_name_pixel = "SeaquestNoFrameskip-v4"
env = gym.make(env_name_pixel)
"""
PREPROCESSING THE OBSERVATIONS
"""
def prep_obs(obs):
img = obs[1:192:2, ::2]
img = img.mean(axis=2).astype(np.uint8) # convert to grayscale (values between 0 and 255)
return img.reshape(96, 80, 1)
"""
PRIORITIZED REPLAY
"""
class PrioritizedReplayBuffer():
def __init__(self, maxlen):
self.buffer = deque(maxlen=maxlen)
self.priorities = deque(maxlen=maxlen)
# A new experience is given the maximum priority
def add(self, experience):
self.buffer.append(experience)
self.priorities.append(max(self.priorities, default=1.0))
def get_probabilities(self, priority_scale):
scaled_priorities = np.array(self.priorities) ** priority_scale
sample_probabilities = scaled_priorities / sum(scaled_priorities)
return sample_probabilities
def get_importance(self, probabilities):
importance = 1 / (len(self.buffer) * probabilities)
importance_normalized = importance / max(importance)
return importance_normalized
def sample(self, batch_size, priority_scale=1.0):
sample_size = min(len(self.buffer), batch_size)
sample_probs = self.get_probabilities(priority_scale)
sample_indices = random.choices(range(len(self.buffer)), k=sample_size, weights=sample_probs)
samples = np.array(self.buffer)[sample_indices]
importance = self.get_importance(sample_probs[sample_indices])
return map(list, zip(*samples)), importance, sample_indices
def set_priorities(self, indices, errors, offset=0.001):
for i, e in zip(indices, errors):
self.priorities[i] = abs(e) + offset
"""
CREATING THE AGENT
"""
class QLearningAgent():
def __init__(self, env):
self.action_size = env.action_space.n
self.learning_rate = 0.00025 # higher for experience replay
self.discount_rate = 0.95
self.checkpoint_path = "seaquest_both.ckpt" # where to save model checkpoints
self.min_epsilon = 0.1 # make sure it will never go below 0.1
self.epsilon = self.max_epsilon = 1.0
self.final_exploration_frame = 100000
self.loss_val = np.infty # initialize loss_val
self.error_val = np.infty
self.replay_buffer = PrioritizedReplayBuffer(maxlen=100000) # exerience buffe
self.tau = 0.001
tf.reset_default_graph()
tf.disable_eager_execution()
# observation variable - takes shape 96 by 80
self.X_state_pixel = tf.placeholder(tf.float32, shape=[None, 96, 80, 1])
self.X_state_ram = tf.placeholder(tf.float32, shape=[None, 128])
# create two deep neural network - one for main model one for target model
self.main_q_values, self.main_vars = self.create_model(self.X_state_pixel, self.X_state_ram, name="main") # main learns from target then target gets updated to main
self.target_q_values, self.target_vars = self.create_model(self.X_state_pixel, self.X_state_ram, name="target") # we will use the main network to update this one
# update the target network to have same weights of the main network
# update the target network to have the same weights as the main network
self.copy_ops_hard = [targ_var.assign(self.main_vars[targ_name]) for targ_name, targ_var in self.target_vars.items()]
self.copy_ops_soft = [targ_var.assign(targ_var * (1. - self.tau) + self.main_vars[targ_name] * self.tau) for targ_name, targ_var in self.target_vars.items()]
self.copy_online_to_target = tf.group(*self.copy_ops_hard) # group to apply the operations list
# we create the model for training
with tf.variable_scope("train"):
# variables for actions (X_action) and target values (y)
self.X_action = tf.placeholder(tf.int32, shape=[None])
self.y = tf.placeholder(tf.float32, shape=[None])
self.importance = tf.placeholder(tf.float32, shape=[None])
self.q_value = tf.reduce_sum(self.main_q_values * tf.one_hot(self.X_action, self.action_size), axis=1)
# used to make the target of q table close to real value
self.error = self.y - self.q_value
self.loss = tf.reduce_mean(tf.multiply(tf.square(self.error), self.importance))
# global step to remember the number of times the optimizer was used
self.global_step = tf.Variable(0, trainable=False, name='global_step')
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
# to take the optimizer and tell it to minimize the loss, the function will also add +1 to global_step at each iteration
self.training_op = self.optimizer.minimize(self.loss, global_step=self.global_step)
# saving the session - if u close the notebook it will load back the previous model
self.saver = tf.train.Saver()
self.sess = tf.Session()
if os.path.isfile(self.checkpoint_path + ".index"):
self.saver.restore(self.sess, self.checkpoint_path)
else:
self.sess.run(tf.global_variables_initializer())
self.sess.run(self.copy_online_to_target)
"""
CREATING THE MIXED NETWORK
"""
def create_model(self, X_state_pixel, X_state_ram, name):
prev_layer = X_state_pixel / 255.0 # scale pixel intensities to the [0, 1.0] range.
ram_layer = X_state_ram / 255.0 # scale pixel intensities to the [0, 1.0] range.
initializer = tf.variance_scaling_initializer()
with tf.variable_scope(name) as scope:
prev_layer = tf.layers.conv2d(prev_layer, filters=32, kernel_size=8, strides=4, padding="SAME",
activation=tf.nn.relu, kernel_initializer=initializer)
prev_layer = tf.layers.conv2d(prev_layer, filters=64, kernel_size=4, strides=2, padding="SAME",
activation=tf.nn.relu, kernel_initializer=initializer)
prev_layer = tf.layers.conv2d(prev_layer, filters=64, kernel_size=3, strides=1, padding="SAME",
activation=tf.nn.relu, kernel_initializer=initializer)
flatten = tf.reshape(prev_layer, shape=[-1, 64 * 12 * 10])
final_cnn = tf.layers.dense(flatten, 512, activation=tf.nn.relu, kernel_initializer=initializer)
ram_layer = tf.layers.dense(ram_layer, 128, activation=tf.nn.relu, kernel_initializer=initializer)
ram_layer = tf.layers.dense(ram_layer, 128, activation=tf.nn.relu, kernel_initializer=initializer)
concat = tf.concat([final_cnn, ram_layer], 1)
output = tf.layers.dense(concat, self.action_size, kernel_initializer=initializer)
# create a dictionary of trainable vars by their name
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)
trainable_vars_by_name = {var.name[len(scope.name):]: var for var in trainable_vars}
return output, trainable_vars_by_name
""" ------- CHOOSING AN ACTION -------"""
def get_action(self, state_pixel, state_ram):
q_values = self.main_q_values.eval(feed_dict={self.X_state_pixel: [state_pixel], self.X_state_ram: [state_ram]})
self.epsilon = max(self.min_epsilon, self.max_epsilon - ((self.max_epsilon - self.min_epsilon)/self.final_exploration_frame)*self.global_step.eval()) # slowly decrease epsilon
if np.random.rand() < self.epsilon:
return np.random.randint(self.action_size) # choose random action
else:
return np.argmax(q_values) # optimal action
""" ------- TRAINING -------"""
def train(self, experience, batch_size=32, priority_scale=0.0):
self.replay_buffer.add(experience) # add experience to buffer
# extract an experience batch from the buffer
(pixel_state, ram_state, action, pixel_next_state, ram_next_state, reward, done), importance, indices = self.replay_buffer.sample(batch_size, priority_scale=priority_scale)
# compute q values of next state
next_q_values = self.target_q_values.eval(feed_dict={self.X_state_pixel: np.array(pixel_next_state),
self.X_state_ram: np.array(ram_next_state)})
next_q_values[done] = np.zeros([self.action_size]) # set to 0 if done = true
# compute target values
y_val = reward + self.discount_rate * np.max(next_q_values)
# train the main network
importance = (importance**(1-self.epsilon)).reshape((importance.shape[0],))
feed = {self.X_state_pixel: np.array(pixel_state), self.X_state_ram: np.array(ram_state),
self.X_action: np.array(action), self.y: y_val, self.importance: importance}
_, self.loss_val, self.error_val = self.sess.run([self.training_op, self.loss, self.error], feed_dict=feed)
self.replay_buffer.set_priorities(indices, self.error_val)
agent = QLearningAgent(env)
episodes = 500 # number of episodes
list_rewards = []
total_reward = 0 # reward per episode
copy_steps = 10000 # update target network (from main network) every n steps
save_steps = 10000 # save model every n ste
frame_skip_rate = 4
with agent.sess:
for e in range(episodes):
pixel_state = prep_obs(env.reset())
ram_state = env.unwrapped._get_ram()
done = False
i = 1 # iterator to keep track of steps per episode - for frame skipping and avg loss
total_reward = 0
action = 0
while not done:
step = agent.global_step.eval()
if i % frame_skip_rate == 0:
action = agent.get_action(pixel_state, ram_state)
pixel_next_state, reward, done, info = env.step(action)
pixel_next_state = prep_obs(pixel_next_state)
ram_next_state = env.unwrapped._get_ram()
reward = np.sign(reward)
if i % frame_skip_rate == 0:
agent.train((pixel_state, ram_state, action, pixel_next_state, ram_next_state, reward, done), priority_scale=0.8)
pixel_state = pixel_next_state
ram_state = ram_next_state
total_reward += reward
# regulary update target DQN - every n steps
if step % copy_steps == 0:
agent.copy_online_to_target.run()
# save model regularly - every n steps
if step % save_steps == 0:
agent.saver.save(agent.sess, agent.checkpoint_path)
i += 1
print("\r\tEpisode: {}/{},\tStep: {}\tTotal Reward: {}".format(e + 1, episodes, step, total_reward))
list_rewards.append(total_reward)
pickle.dump(list_rewards, open("results/pixel_ram_seaquest_test.p", "wb"))
plt.plot(list_rewards)
plt.show() | true |
b8f2cfe33fecf9d2494dbeb13c9f3b64647fe49a | Python | Fyssion/FyssionMediaServer | /server/utils/flags.py | UTF-8 | 3,617 | 2.875 | 3 | [] | no_license | """
This source code was responsibly sourced from Rapptz/discord.py
Original: https://github.com/Rapptz/discord.py/blob/a8f44174bafed3989ec2959a62b89006f4a9e9a1/discord/flags.py
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
class flag_value:
def __init__(self, func):
self.flag = func(None)
self.__doc__ = func.__doc__
def __get__(self, instance, owner):
if instance is None:
return self
return instance._has_flag(self.flag)
def __set__(self, instance, value):
instance._set_flag(self.flag, value)
def __repr__(self):
return "<flag_value flag={.flag!r}>".format(self)
class alias_flag_value(flag_value):
pass
def fill_with_flags(*, inverted=False):
def decorator(cls):
cls.VALID_FLAGS = {
name: value.flag
for name, value in cls.__dict__.items()
if isinstance(value, flag_value)
}
if inverted:
max_bits = max(cls.VALID_FLAGS.values()).bit_length()
cls.DEFAULT_VALUE = -1 + (2 ** max_bits)
else:
cls.DEFAULT_VALUE = 0
return cls
return decorator
# n.b. flags must inherit from this and use the decorator above
class BaseFlags:
__slots__ = ("value",)
def __init__(self, **kwargs):
self.value = self.DEFAULT_VALUE
for key, value in kwargs.items():
if key not in self.VALID_FLAGS:
raise TypeError(f"{key:r} is not a valid flag name.")
setattr(self, key, value)
@classmethod
def _from_value(cls, value):
self = cls.__new__(cls)
self.value = value
return self
def __eq__(self, other):
return isinstance(other, self.__class__) and self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.value)
def __repr__(self):
return f"<{self.__class__.__name__} value={self.value}>"
def __iter__(self):
for name, value in self.__class__.__dict__.items():
if isinstance(value, alias_flag_value):
continue
if isinstance(value, flag_value):
yield (name, self._has_flag(value.flag))
def _has_flag(self, o):
return (self.value & o) == o
def _set_flag(self, o, toggle):
if toggle is True:
self.value |= o
elif toggle is False:
self.value &= ~o
else:
raise TypeError(f"Value to set for {self.__class__.__name__} must be a bool.")
| true |
36cfc2d2f193c93ecd32eda7ea95598fb966d870 | Python | krishnakaushik25/Multi-Class-Text-Classification-BERT | /Modular_code/src/ML_Pipeline/utils.py | UTF-8 | 2,494 | 3.09375 | 3 | [] | no_license | import pandas as pd
import tensorflow as tf
from datasets import list_datasets, load_dataset
# check the gpu settings
def check_gpu_info():
print("Tensorflow version : ", tf.__version__)
print("GPU available : ", bool(tf.test.is_gpu_available))
print("GPU name : ", tf.test.gpu_device_name())
# import and the display the dataset
def load_and_display_dataset_details():
ag_news_dataset = load_dataset('ag_news')
print("\n", ag_news_dataset)
print("Dataset Items: \n", ag_news_dataset.items())
print("\nDataset type: \n", type(ag_news_dataset))
print("\nShape of dataset: \n", ag_news_dataset.shape)
print("\nNo of rows: \n", ag_news_dataset.num_rows)
print("\nNo of columns: \n", ag_news_dataset.num_columns)
print("\nColumn Names: \n", ag_news_dataset.column_names)
print("\n", ag_news_dataset.data)
print(ag_news_dataset['train'][0])
print(ag_news_dataset['train'][1])
print(ag_news_dataset['train']['text'][0])
print(ag_news_dataset['train']['label'][0])
print()
print(ag_news_dataset['train']['text'][35000])
print(ag_news_dataset['train']['label'][35000])
print()
print(ag_news_dataset['train']['text'][60000])
print(ag_news_dataset['train']['label'][60000])
print()
print(ag_news_dataset['train']['text'][100000])
print(ag_news_dataset['train']['label'][100000])
return None
# convert the data to dataframes
def load_and_convert_data_to_df():
ag_news_train = load_dataset('ag_news', split='train')
ag_news_test = load_dataset('ag_news', split='test')
print("Train Dataset : ", ag_news_train.shape)
print("Test Dataset : ", ag_news_test.shape)
print(ag_news_train[0])
print(ag_news_test[0])
print("\nTrain Dataset Features: \n", ag_news_train.features)
print("\nTest Dataset Features: \n", ag_news_test.features)
ag_news_train_df = pd.DataFrame(data=ag_news_train)
ag_news_test_df = pd.DataFrame(data=ag_news_test)
class_label_names = ['World', 'Sports', 'Business', 'Sci/Tech']
print("First 10 rows of Train data : \n", ag_news_train_df.head(10))
print("Last 10 rows of Train data : \n", ag_news_train_df.tail(10))
print("First 10 rows of Test data : \n", ag_news_test_df.head(10))
print("Last 10 rows of Test data : \n", ag_news_test_df.tail(10))
print("Class Label Names: \n", class_label_names)
return ag_news_train_df, ag_news_test_df, class_label_names
| true |
7938714a865e1115a45fa79042ff23317c0c2165 | Python | JaMesLiMers/Image_Retrieval_Framework_FYP | /Models/Word2Vec/source/w2v_tfidf.py | UTF-8 | 4,070 | 3.109375 | 3 | [] | no_license | import numpy as np
from numpy.core.fromnumeric import size
from tqdm import tqdm
class W2V_TFIDF:
def __init__(self, corpora, tfidf_model, tfidf_M, w2v_model, corpora_vocab):
"""Initialize the pram that W2V_TFIDF algorithm need.
W2V_TFIDF算法类, 实现了对词向量进行TFIDF加权得到句向量的相似度衡量方法。
Args:
corpora: 多个语料组成的列表, Python列表. For example:
["There is a cat",
"There is a dog",
"There is a wolf"]
tfidf_model: sklearn.feature_extraction.text.TfidfVectorizer
已经fit_transform(corpora)
w2v_model: gensim.models.KeyedVectors
corpora_vocab: corpora的所有词汇
"""
self.corpora = corpora
self.tfidf_model = tfidf_model
self.tfidf_M = tfidf_M
self.corpora_vocab = corpora_vocab
self.w2v_model = w2v_model
self.w2v_vocab = list(self.w2v_model.vocab)
def corpora2vec(self): # problem: 非常慢
"""将corpora的语料转为文档向量
Return:
doc_vec_M: np.array, shape: (语料数 * 词向量维数)
"""
doc_vec_M = np.zeros((len(self.corpora), self.w2v_model.vector_size)) # 语料数 * 词向量维数
corpora_vocab_index = {}
for i, word in enumerate(self.corpora_vocab):
corpora_vocab_index[word] = i
# w2v_vocab_dict = {}
# for i, word in enumerate(self.w2v_vocab):
# w2v_vocab_dict[word] = self.w2v_model.get_vector(word)
for i, sample in enumerate(tqdm(self.corpora)):
for word in sample.split(' '):
if word not in self.w2v_vocab: # 没有对应词向量
continue
word_index = corpora_vocab_index[word]
doc_vec_M[i] += self.tfidf_M[i][word_index] * self.w2v_model.get_vector(word) # debug note...
np.save('corpora_vec_M.npy', doc_vec_M)
return doc_vec_M
def token2vec(self, queryTokens):
"""tokens to vec
Args:
queryTokens: str, 分好词的查询部分, 一个元素是一个词, 以空格分隔, 多个词组合在一起查询.
Return:
token_vec: np.array, shape: (1 * 词向量维数)
"""
tfidf_vec = self.tfidf_model.transform([queryTokens]).toarray().squeeze()
print(tfidf_vec.shape)
print(self.tfidf_M.shape)
corpora_vocab_index = {}
for i, word in enumerate(self.corpora_vocab):
corpora_vocab_index[word] = i
token_vec = np.zeros((1, self.w2v_model.vector_size)) # 语料数 * 词向量维数
for word in queryTokens.split(' '):
if word not in self.w2v_vocab: # 没有对应词向量
continue
word_index = corpora_vocab_index[word]
token_vec[0] += tfidf_vec[word_index] * self.w2v_model.get_vector(word)
print(tfidf_vec[word_index])
print(tfidf_vec[word_index+1])
return token_vec
# if __name__ == '__main__':
# from sklearn.feature_extraction.text import TfidfVectorizer
# import gensim
# corpora = ['两只 老虎 爱 跳舞',
# '小兔子 乖乖 拔 萝卜',
# '我 和 小鸭子 学 走路',
# '童年 是 最美 的 礼物']
# corpora_w2v = []
# for sentence in corpora:
# corpora_w2v.append(sentence.split(' '))
# print(corpora_w2v)
# vectorizer = TfidfVectorizer(token_pattern=r'(?u)\b\w+\b')
# tfidf_model = vectorizer.fit_transform(corpora)
# w2v_model = gensim.models.Word2Vec(corpora_w2v, size=300, min_count=1).wv
# print('w2v_vocab:', list(w2v_model.vocab))
# corpora_vocab = vectorizer.get_feature_names()
# print('corpora_vocab:', corpora_vocab)
# w2v_tfidf = W2V_TFIDF(corpora, tfidf_model, w2v_model, corpora_vocab)
# result = w2v_tfidf.corpora2vec()
# print(result) | true |
bed317dcb9681806f5549988014b9bc5d3276fbd | Python | vladworldss/billing | /src/db/logic.py | UTF-8 | 3,324 | 2.71875 | 3 | [] | no_license | import logging
from decimal import Decimal
from sqlalchemy.orm import Session
from db.models import Wallet, Transaction
from db.constants import WalletStatuses, TransactionStatuses, Currency
logger = logging.getLogger('billing.' + __name__)
class WalletStore:
@staticmethod
def get_wallet(db_session: Session, wallet_id: int):
w = db_session.query(Wallet).filter_by(wallet_id=wallet_id).first()
if not w:
raise Exception('Wallet does not found')
return w.as_dict()
@staticmethod
def create_wallet(db_session: Session, handshake_id: str, amount: Decimal):
wallet = Wallet(
amount=amount,
status=WalletStatuses.ACTIVE.value,
currency=Currency.USD.value,
handshake_id=handshake_id
)
db_session.add(wallet)
db_session.commit()
db_session.refresh(wallet)
logger.debug(
'Wallet by handshake_id "{}": id={} has been created'.format(handshake_id, wallet.wallet_id if wallet else None)
)
return wallet.as_dict()
@staticmethod
def get_wallet_by_handshake(db_session: Session, handshake_id: str):
wallet = db_session.query(Wallet).filter(Wallet.handshake_id == handshake_id).first()
if not wallet:
raise Exception(f'Wallet by handshake_id={handshake_id} not found')
logger.debug(
'Found wallet by handshake_id "{}": id={}'.format(handshake_id, wallet.wallet_id if wallet else None)
)
return wallet.as_dict()
@staticmethod
def get_wallet_by_id(db_session: Session, wallet_id: int):
wallet = db_session.query(Wallet).filter(Wallet.wallet_id == wallet_id).first()
if not wallet:
raise Exception(f'Wallet by id={wallet_id} not found')
logger.debug(
'Found wallet by id={}'.format(wallet.wallet_id if wallet else None)
)
return wallet.as_dict()
class TransactionStore:
@staticmethod
def create_transaction(
db_session: Session,
handshake_id: str,
source_wallet_id: int,
dest_wallet_id: int,
summ: Decimal
) -> dict:
wallets = db_session.query(Wallet).filter(Wallet.wallet_id.in_([source_wallet_id, dest_wallet_id])).all()
if not wallets:
raise Exception('Unknown wallets')
w_dict = {w.wallet_id: w for w in wallets}
source_wallet, dest_wallet = w_dict[source_wallet_id], w_dict[dest_wallet_id]
trans = Transaction(
handshake_id=handshake_id,
source_wallet_id=source_wallet_id,
destination_wallet_id=dest_wallet_id,
trans_sum=summ
)
new_amount = Decimal(source_wallet.amount) - summ
if new_amount < 0:
trans.status = TransactionStatuses.FAILED.value
trans.info = {'msg': f'wallet {source_wallet_id} does not have enough money'}
else:
trans.status = TransactionStatuses.PROCESSED.value
trans.info = {'msg': f'transaction was successful'}
source_wallet.amount = new_amount
dest_wallet.amount += summ
db_session.add(trans)
db_session.commit()
db_session.refresh(trans)
return trans.as_dict()
| true |
07bce61e75315c4c38bf2d8f5651ac5793e9f2c0 | Python | GongMeiting2020/IBI1_2019-20 | /Practical5/variables.py | UTF-8 | 630 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 20 00:26:39 2020
@author: gongmeiting
"""
a=457
b=a*1000+a
print(b%7==0)
c=b/7
d=c/11
e=d/13
print(a==e)
print(a>e)
print(a<e)
#a==e is always True since b/a=7*11*13
#another code, to avoid same variables,use f~j to represnt a~e
f=input ("a three-digit number:")
g=int(f)*1000+int(f)
if g%7==0:
h=g/7
i=h/11
j=i/13
if int (f)>j:
print("f>j")
elif int (f)<j:
print("f<j")
else:
print("f=j")
else:
print("g cannot devided by 7")
X=True
Y=False
Z=(X and not Y) or (Y and not X)
print(Z)
W= X!=Y
print(Z==W)
| true |
c04d74d94a3a40f90dba8a4a86ddd2dfc288655c | Python | explore-ITP/explore-itp.github.io | /code/notebook-3/vis1_dropdown.py | UTF-8 | 8,302 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 10:43:16 2020
@author: larabreitkreutz
"""
import plotly.graph_objects as go
import pandas as pd
import plotly.io as pio
pio.renderers.default = "browser"
import chart_studio
import chart_studio.plotly as py
import chart_studio.tools as tls
# Read in data from main downsampled CSV
filename = #download and add "main_downsamp.csv"
df = pd.read_csv(filename, encoding='utf-8')
# Convert dataframe index to a column
df.reset_index(inplace=True)
df = df.rename(columns = {'index':'Time'})
# Creates reference bounds for latitudes and longitudes
BBox = ((df.Long.min(),df.Long.max(),
df.Lat.min(),df.Lat.max()))
# Creates list of colors, chosen to match color scheme throughout project
colors = ['#7dcbb8', '#9786a0', '#79e788', '#dd7798', '#75ccea', '#3d8198']
# FIGURE 1: EARLY DEPLOYMENTS
# Creates data subsets for early deploylments
is_1 = df[df.ITP == 1]
is_2 = df[df.ITP == 6]
is_3 = df[df.ITP == 8]
# Index Count for rotating through colors
ind=1
# Defines figure traces for each ITP machine
trace1 = go.Scattergeo(
mode= "markers",
name= "ITP 1",
lat= is_1['Lat'],
lon= is_1['Long'],
marker_size=7,
marker_color=colors[ind],
opacity=.8,
text=is_1['Time']
)
ind+=1
trace2 = go.Scattergeo(
mode= "markers",
name= "ITP 6",
lat= is_2['Lat'],
lon= is_2['Long'],
marker_size=7,
marker_color=colors[ind],
opacity=.8,
text=is_2['Time']
)
ind+=1
trace3 = go.Scattergeo(
mode= "markers",
name= "ITP 8",
lat= is_3['Lat'],
lon= is_3['Long'],
marker_size=7,
marker_color=colors[ind],
opacity=.8,
text=is_3['Time']
)
# Creates figure
fig1 = go.Figure(data=[trace1,trace2,trace3])
# Adds title and geo elements
fig1.update_layout(
geo=dict(
landcolor= "rgb(212, 212, 212)",
showcountries= True,
countrycolor= "rgb(245, 245, 245)",
),
title_text= "Subset of Drift Tracks deployed between 2004-2006",
title_x=0.5,
width= 800,
height= 700)
# Changes projection and colors
fig1.update_geos(projection_type="orthographic",
fitbounds="locations",
showcoastlines=True, coastlinecolor="White",
showland=True, landcolor="#576b6c",
showocean=True, oceancolor="#383d3d",
showlakes=True, lakecolor="#122525",
showrivers=True, rivercolor="#122525",
lataxis_showgrid=True, lonaxis_showgrid=True)
# FIGURE 2: MIDDLE DEPLOYMENTS
# Creates data subsets for middle deploylments
is_41 = df[df.ITP == 41]
is_48 = df[df.ITP == 48]
is_49 = df[df.ITP == 49]
ind=2
trace41 = go.Scattergeo(
mode= "markers",
name= "",
lat= is_41['Lat'],
lon= is_41['Long'],
marker_size=7,
marker_color=colors[ind],
)
ind+=1
trace48 = go.Scattergeo(
mode= "markers",
name= "",
lat= is_48['Lat'],
lon= is_48['Long'],
marker_size=7,
marker_color=colors[ind],
)
ind+=1
trace49 = go.Scattergeo(
mode= "markers",
name= "",
lat= is_49['Lat'],
lon= is_49['Long'],
marker_size=7,
marker_color=colors[ind],
)
fig2 = go.Figure(data=[trace41,trace48,trace49])
fig2.update_layout(
geo=dict(
landcolor= "rgb(212, 212, 212)",
showcountries= True,
countrycolor= "rgb(245, 245, 245)",
),
title_text= "Subset of Drift Tracks deployed between 2011-2014",
title_x=0.5,
width= 800,
height= 700)
fig2.update_geos(projection_type="orthographic",
fitbounds="locations",
showcoastlines=True, coastlinecolor="White",
showland=True, landcolor="#576b6c",
showocean=True, oceancolor="#383d3d",
showlakes=True, lakecolor="#122525",
showrivers=True, rivercolor="#122525",
lataxis_showgrid=True, lonaxis_showgrid=True)
# FIGURE 3: LATE DEPLOYMENTS
# Creates data subsets for late deploylments
is_86 = df[df.ITP == 86]
is_91 = df[df.ITP == 91]
is_92 = df[df.ITP == 92]
ind=3
trace86 = go.Scattergeo(
mode= "markers",
name= "",
lat= is_86['Lat'],
lon= is_86['Long'],
marker_size=7,
marker_color=colors[ind],
)
ind+=1
trace91 = go.Scattergeo(
mode= "markers",
name= "",
lat= is_91['Lat'],
lon= is_91['Long'],
marker_size=7,
marker_color=colors[ind],
)
ind+=1
trace92 = go.Scattergeo(
mode= "markers",
name= "",
lat= is_92['Lat'],
lon= is_92['Long'],
marker_size=7,
marker_color=colors[ind],
)
fig3 = go.Figure(data=[trace86,trace91,trace92])
fig3.update_layout(
geo=dict(
landcolor= "rgb(212, 212, 212)",
showcountries= True,
countrycolor= "rgb(245, 245, 245)",
),
title_text= "Subset of Drift Tracks deployed between 2014-2016",
title_x=0.5,
width= 800,
height= 700)
fig3.update_geos(projection_type="orthographic",
fitbounds="locations",
showcoastlines=True, coastlinecolor="White",
showland=True, landcolor="#576b6c",
showocean=True, oceancolor="#383d3d",
showlakes=True, lakecolor="#122525",
showrivers=True, rivercolor="#122525",
lataxis_showgrid=True, lonaxis_showgrid=True)
# DROPDOWN MENU added to fig1 (can be modified for each fig)
#From here, make one figure at a time, using the data subsets below. Use '#' to comment out all but one subset at a time.
# DEFAULT is list_of_machines1
list_of_machines1 = [1,6,8]
#list_of_machines2 = [41,48,49]
#list_of_machines3 = [86,91,92]
def getDataByButton(filter_machine):
global metric
global df
# return arg list to set x, y and chart title
filtered = df[df.ITP == filter_machine]
return [ {'Lat':[filtered['Lat']], 'Long':[filtered['Long']], 'Time':[filtered['Time']], 'ITP':filter_machine},
{'Title':filter_machine} ]
buttons = []
#ADD AN "ALL" Button
buttons.append(dict(method='restyle',
label='All Machines',
visible=True))
# Creates button for each machine
for n in range(len(list_of_machines1)):
buttons.append(dict(method='restyle',
label='ITP Machine' + str(list_of_machines1[n]),
visible=True,
args=[getDataByButton(n)]
)
)
updatemenu = []
your_menu = dict()
updatemenu.append(your_menu)
updatemenu[0]['buttons'] = buttons
updatemenu[0]['direction'] = 'down'
updatemenu[0]['showactive'] = True
# Adds dropdown to fig1, fig2, or fig3. (DEFAULT is fig1)
fig1.update_layout(showlegend=False, updatemenus=updatemenu)
fig1.update_layout(
updatemenus=[go.layout.Updatemenu(
#active=0,
buttons=list(
[
dict(
method="restyle",
args= [{'visible': [True, True, True,]}, # the index of True aligns with the indices of plot traces
{'title': 'All'}]),
dict(
method = 'restyle',
args = [{'visible': [True, False, False]}, # the index of True aligns with the indices of plot traces
{'title': 'ITP ' + list_of_machines1[0]}]),
dict(
method = 'restyle',
args = [{'visible': [False, True, False]},
{'title': 'ITP ' + list_of_machines1[1]}]),
dict(
method = 'restyle',
args = [{'visible': [False, False, True]},
{'title': 'ITP ' + list_of_machines1[2]}]),
])
)
])
fig1.show()
| true |
8d81ed08fecf0110b882e230061a4d518ac38184 | Python | redhat-raptor/pi-camera | /receiver/receiver.py | UTF-8 | 1,136 | 2.609375 | 3 | [] | no_license | import socket
import os
from datetime import datetime
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.DEBUG,
datefmt='%Y-%m-%d %H:%M:%S')
def open_connection():
logging.info('Starting receiver')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("0.0.0.0", 9000))
sock.listen(1)
client, address = sock.accept()
sock.close()
return client
def file_transfer(client):
logging.info('Starting transfer')
_filename = 'picture-received-{}.jpg'.format(datetime.now().strftime('%Y%m%d-%H%M%S'))
with open(f'{_filename}.part', 'wb') as outfile:
while True:
block = client.recv(1024)
if not block:
break
outfile.write(block)
os.rename(f'{_filename}.part', _filename)
logging.info('wrote {} bytes'.format(os.stat(_filename).st_size))
def main():
while True:
client = open_connection()
file_transfer(client)
client.close()
if __name__ == "__main__":
main()
| true |
108b63f69a955f3f4d652c555aa65a9c3556c41c | Python | suixin233/OJ | /input_output.py | UTF-8 | 719 | 3.171875 | 3 | [] | no_license |
def in_put():
num = input()
num2 = num.split(' ')
num3 = []
for i in range(len(num2)):
num3.append(num2[i])
return num2
def out_put(x):
s = " ".join(str(i) for i in x)
return s
def in_put():
num = int(sys.stdin.readline())
return num
import sys
def in_put():
lines = sys.stdin.readlines()
n = int(lines[0].strip('\n'))
line = lines[1].strip('\n').split(' ')
line = [int(i) for i in line]
return n,line
def in_put():
lines = sys.stdin.readlines()
n = int(lines[0].strip('\n'))
nl = []
for i in range(n):
nl.append(int(lines[i+1].strip('\n')))
return n,nl
if __name__ == '__main__':
[n, line] = in_put()
print() | true |
265d1dc3109dcbc8b447a1e2087333a81c6285b1 | Python | aludvik/sawtooth-core | /validator/sawtooth_validator/database/lmdb_database.py | UTF-8 | 3,913 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
from threading import RLock
import os
import pickle
try:
import cPickle as pickle
except ImportError:
pass
import lmdb
from sawtooth_validator.database import database
class LMDBDatabase(database.Database):
"""LMDBDatabase is a thread-safe implementation of the
sawtooth_validator.database.Database interface which uses LMDB for the
underlying persistence.
Attributes:
lock (threading.RLock): A reentrant lock to ensure threadsafe access.
lmdb (lmdb.Environment): The underlying lmdb database.
"""
def __init__(self, filename, flag):
"""Constructor for the LMDBDatabase class.
Args:
filename (str): The filename of the database file.
flag (str): a flag indicating the mode for opening the database.
Refer to the documentation for anydbm.open().
"""
super(LMDBDatabase, self).__init__()
self._lock = RLock()
create = bool(flag == 'c')
if flag == 'n':
if os.path.isfile(filename):
os.remove(filename)
create = True
self._lmdb = lmdb.Environment(path=filename,
map_size=1024**4,
writemap=True,
subdir=False,
create=create,
lock=False)
def __len__(self):
with self._lock:
with self._lmdb.begin() as txn:
return txn.stat()['entries']
def __contains__(self, key):
with self._lock:
with self._lmdb.begin() as txn:
return bool(txn.get(key) is not None)
def get(self, key):
"""Retrieves a value associated with a key from the database
Args:
key (str): The key to retrieve
"""
with self._lock:
with self._lmdb.begin() as txn:
pickled = txn.get(key)
if pickled is not None:
return pickle.loads(pickled)
def set(self, key, value):
"""Sets a value associated with a key in the database
Args:
key (str): The key to set.
value (str): The value to associate with the key.
"""
pickled = pickle.dumps(value)
with self._lock:
with self._lmdb.begin(write=True, buffers=True) as txn:
txn.put(key, pickled, overwrite=True)
def delete(self, key):
"""Removes a key:value from the database
Args:
key (str): The key to remove.
"""
with self._lock:
with self._lmdb.begin(write=True, buffers=True) as txn:
txn.delete(key)
def sync(self):
"""Ensures that pending writes are flushed to disk
"""
with self._lock:
self._lmdb.sync()
def close(self):
"""Closes the connection to the database
"""
with self._lock:
self._lmdb.close()
def keys(self):
"""Returns a list of keys in the database
"""
with self._lock:
with self._lmdb.begin() as txn:
return [key for key, _ in txn.cursor()]
| true |
7f5363f80116b9e90cedcb189bd9d2f42f978baa | Python | AtsukoFukunaga/us_states_game | /main.py | UTF-8 | 1,058 | 3.46875 | 3 | [] | no_license | import turtle
import pandas as pd
screen = turtle.Screen()
screen.title('U.S. States Game')
screen.bgpic('blank_states_img.gif')
screen.setup(width=800, height=500)
data = pd.read_csv('50_states.csv')
all_states = data.state.to_list()
player = turtle.Turtle()
player.hideturtle()
player.penup()
guessed_states = []
while len(guessed_states) < 50:
answer_state = screen.textinput(title=f'{len(guessed_states)}/50 states correct',
prompt='What is another state\'s name?').title()
if answer_state == 'Exit':
states_to_learn = [state for state in all_states if state not in guessed_states]
pd.DataFrame(states_to_learn).to_csv('states_to_learn.csv')
break
if answer_state in all_states and answer_state not in guessed_states:
guessed_states.append(answer_state)
state_data = data[data.state == answer_state]
player.goto(int(state_data.x), int(state_data.y))
player.write(answer_state, align='center', font=('Arial', 12, 'normal'))
screen.exitonclick()
| true |