blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
5a55ebfcb0486172c7df66cc89c4277592e4d2f6 | Python | liuxiao214/Leetcode_Solutions_Python | /Excel_Sheet_Column_Number.py | UTF-8 | 545 | 3.484375 | 3 | [] | no_license | class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
sum=0
i=len(s)-1
while(i>=0):
sum=sum+(ord(s[i])-64)*(26**(len(s)-i-1))
i=i-1
return sum
class Solution1(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
return reduce(lambda x,y:x*26+y,[ord(c)-64 for c in list(s)])
s=Solution()
print ord('A')-ord('B')
print s.titleToNumber("AB")
| true |
d3a9d7a8f5141b053f570d2f32d503cd1a3128cd | Python | wpy-111/python | /Spider/day01/08_group_exercise.py | UTF-8 | 419 | 3.125 | 3 | [] | no_license | c = """<div class="animal">
<p class="name">
<a title="Tiger"></a>
</p>
<p class="content">
Two tigers two tigers run fast
</p>
</div>
<div class="animal">
<p class="name">
<a title="Rabbit"></a>
</p>
<p class="content">
Small white rabbit white and white
</p>
</div>
"""
import re
pattern = re.compile('<a title=(.*?)></a>',re.S)
r = pattern.findall(c)
print(r)
| true |
b3875d7b624f9e610750ecbcc35864fd77e150f6 | Python | mada949/dfs-lineup-generator | /NbaConverter.py | UTF-8 | 3,104 | 2.609375 | 3 | [] | no_license | import csv
import sys
import datetime
import re
import os.path
with open('./nba/inputs/{}/{}/players.csv'.format(sys.argv[1], sys.argv[2]), 'w+') as file:
writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Player Name", "Pos", "Salary", "Team", "Proj FP", "Actual FP"])
if os.path.exists('./nba/inputs/{}/{}/players_cruncher.csv'.format(sys.argv[1], sys.argv[2])):
with open('./nba/inputs/{}/{}/players_cruncher.csv'.format(sys.argv[1], sys.argv[2]), 'rt') as f:
data = csv.reader(f)
for row in data:
# if row is the header, player is injured, or has a projection lower than 1 skip player
if row[0] == 'Player' or row[26] != 'Y':
continue
# else:
# with open('./nba/inputs/{}/players/{}.csv'.format(sys.argv[1], row[0].replace(" ", "").replace(".", "").lower()),'rt') as fi:
# data = csv.reader(fi)
# games = 0
# points = 0
# assists = 0
# rebounds = 0
# steals = 0
# blocks = 0
# turnovers = 0
# for stat_row in data:
# if stat_row[0] == 'date':
# continue
# elif stat_row[0] < sys.argv[2]:
# games += 1
# points += float(stat_row[1])
# assists += float(stat_row[16])
# rebounds += float(stat_row[14]) + float(stat_row[15])
# steals += float(stat_row[17])
# blocks += float(stat_row[18])
# turnovers += float(stat_row[19])
writer.writerow([row[0], row[1], row[6], row[2], row[20], row[28]])
if os.path.exists('./nba/inputs/{}/{}/players_dfn.csv'.format(sys.argv[1], sys.argv[2])):
with open('./nba/inputs/{}/{}/players_dfn.csv'.format(sys.argv[1], sys.argv[2]), 'rt') as f:
data = csv.reader(f)
for row in data:
if row[0] == 'Player Name' or row[2] == 'O':
continue
# if row is the header, player is injured, or has a projection lower than 1 skip player
# if row[0] == 'Player' or row[26] != 'Y':
# continue
# else:
# with open('./nba/inputs/{}/players/{}.csv'.format(sys.argv[1], row[0].replace(" ", "").replace(".", "").lower()),'rt') as fi:
# data = csv.reader(fi)
# games = 0
# points = 0
# assists = 0
# rebounds = 0
# steals = 0
# blocks = 0
# turnovers = 0
# for stat_row in data:
# if stat_row[0] == 'date':
# continue
# elif stat_row[0] < sys.argv[2]:
# games += 1
# points += float(stat_row[1])
# assists += float(stat_row[16])
# rebounds += float(stat_row[14]) + float(stat_row[15])
# steals += float(stat_row[17])
# blocks += float(stat_row[18])
# turnovers += float(stat_row[19])
writer.writerow([row[0], row[3], row[4], row[5], row[25], row[28]]) | true |
52f127ca6272a585d7239b280c50e6c8630251d3 | Python | miladnavi/few-shot-learning | /few_shot_generator.py | UTF-8 | 7,033 | 2.640625 | 3 | [] | no_license | import argparse, os
import os
import tarfile
import glob
import shutil
import random
import torch
import torchvision.datasets
import os
def few_shot_dataset_mnist(number_of_sample):
source_path_unzip = './Dataset/MNIST.tar.gz'
destination_path = './Few_Shot_Dataset'
real_dir_name = '/mnist_png'
custom_dir_name = '/MNIST'
source_path = './Few_Shot_Dataset/MNIST'
#Unpacking data-set zip file
tf = tarfile.open(source_path_unzip)
tf.extractall(destination_path)
os.rename(destination_path + real_dir_name, destination_path + custom_dir_name)
#Few Shot Dataset
training_path = source_path + '/training'
testing_path = source_path + '/testing'
list_of_classes_dir = glob.glob(training_path + '/*', recursive=True)
list_of_classes_dir_test = glob.glob(testing_path + '/*', recursive=True)
os.mkdir(source_path + '/train')
os.mkdir(source_path + '/test')
for class_dir in list_of_classes_dir:
class_dir_name = class_dir.split('/')
class_dir_name = class_dir_name[-1]
list_of_instances = glob.glob(class_dir + '/*', recursive=True)
os.mkdir(source_path + '/train/' + class_dir_name)
for i in range(number_of_sample):
file_path = random.choice(list_of_instances)
file_name = file_path.split('/')
file_name = file_name[-1]
copy_file_train = 'cp ' + file_path + ' ' + source_path + '/train/' + class_dir_name + '/' + file_name
os.system(copy_file_train)
for class_dir in list_of_classes_dir_test:
class_dir_name_test = class_dir.split('/')
class_dir_name_test = class_dir_name_test[-1]
list_of_instances_test = glob.glob(class_dir + '/*', recursive=True)
try:
os.mkdir(source_path + '/test/' + class_dir_name_test)
except:
print("Folder " + class_dir_name_test + " exist!")
for element in list_of_instances_test:
file_path = element
file_name = file_path.split('/')
file_name = file_name[-1]
copy_file_test = 'cp ' + file_path + ' ' + source_path + '/test/' + class_dir_name_test + '/' + file_name
os.system(copy_file_test)
shutil.rmtree(training_path)
shutil.rmtree(testing_path)
def few_shot_dataset_fashion_mnist(number_of_sample):
train_dataset = torchvision.datasets.FashionMNIST(
root='./Dataset', train=True, download=True)
test_dataset = torchvision.datasets.FashionMNIST(
root='./Dataset', train=False, download=True)
try:
os.mkdir('./Few_Shot_Dataset')
except:
print("Dir exists")
try:
os.mkdir('./Few_Shot_Dataset/FashionMNIST')
except:
print("Dir exists")
try:
os.mkdir('./Few_Shot_Dataset/FashionMNIST/train')
except:
print("Dir exists")
try:
os.mkdir('./Few_Shot_Dataset/FashionMNIST/test')
except:
print("Dir exists")
classes = train_dataset.classes
classes_dic = {}
for i, (el) in enumerate(classes):
classes_dic.update({el: 0})
try:
os.mkdir('./Few_Shot_Dataset/FashionMNIST/train/' + str(i))
except:
print("Dir exists")
try:
os.mkdir('./Few_Shot_Dataset/FashionMNIST/test/' + str(i))
except:
print("Dir exists")
for i, (image, label) in enumerate(train_dataset):
if set(classes_dic.values()) == set([number_of_sample]):
break
else:
if classes_dic[classes[label]] < number_of_sample:
classes_dic[classes[label]] += 1
image.save('./Few_Shot_Dataset/FashionMNIST/train/' + str(label) + '/' + classes[label].replace('/', '-') + '-' + str(i) +'.png')
for i, (image, label) in enumerate(test_dataset):
image.save('./Few_Shot_Dataset/FashionMNIST/test/' + str(label) + '/' + classes[label].replace('/', '-')+ '-' + str(i) +'.png')
def few_shot_dataset_cifar(number_of_sample):
train_dataset = torchvision.datasets.CIFAR10(
root='./Dataset', train=True, download=True)
test_dataset = torchvision.datasets.CIFAR10(
root='./Dataset', train=False, download=True)
try:
os.mkdir('./Few_Shot_Dataset')
except:
print("Dir exists")
try:
os.mkdir('./Few_Shot_Dataset/CIFAR')
except:
print("Dir exists")
try:
os.mkdir('./Few_Shot_Dataset/CIFAR/train')
except:
print("Dir exists")
try:
os.mkdir('./Few_Shot_Dataset/CIFAR/test')
except:
print("Dir exists")
classes = train_dataset.classes
classes_dic = {}
for i, (el) in enumerate(classes):
classes_dic.update({el: 0})
try:
os.mkdir('./Few_Shot_Dataset/CIFAR/train/' + str(i))
except:
print("Dir exists")
try:
os.mkdir('./Few_Shot_Dataset/CIFAR/test/' + str(i))
except:
print("Dir exists")
for i, (image, label) in enumerate(train_dataset):
if set(classes_dic.values()) == set([number_of_sample]):
break
else:
if classes_dic[classes[label]] < number_of_sample:
classes_dic[classes[label]] += 1
image.save('./Few_Shot_Dataset/CIFAR/train/' + str(label) + '/' + classes[label] + '-' + str(i) +'.png')
for i, (image, label) in enumerate(test_dataset):
image.save('./Few_Shot_Dataset/CIFAR/test/' + str(label) + '/' + classes[label] + '-' + str(i) +'.png')
"""parsing and configuration"""
def parse_args():
desc = "Generate Few Shot Dataset"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--dataset', type=str, default='mnist',
choices=['mnist', 'fashion_mnist', 'cifar'],
help='Generate Few Shot for Dataset')
parser.add_argument('--number_of_sample', type=int, default=10, choices=[1,5,10,20,30] ,help='The number of the samples of each class')
return check_args(parser.parse_args())
"""checking arguments"""
def check_args(args):
# --number_of_sample
try:
assert args.number_of_sample >= 1
except:
print('number of samples must be larger than or equal to one')
# --batch_size
try:
assert args.dataset in ['mnist', 'fashion_mnist', 'cifar']
except:
print('Dataset should be one of the following: [mnist, fashion_mnist, cifar]')
return args
"""main"""
def main():
# parse arguments
args = parse_args()
if args is None:
exit()
if args.dataset == 'mnist':
few_shot_dataset_mnist(args.number_of_sample)
elif args.dataset == 'fashion_mnist':
few_shot_dataset_fashion_mnist(args.number_of_sample)
elif args.dataset == 'cifar':
few_shot_dataset_cifar(args.number_of_sample)
else:
raise Exception("[!] There is no option for " + args.dataset)
if __name__ == '__main__':
main()
| true |
cf267edfa60b9a9000e42155dcc8219c0d85b66f | Python | ansd15000/baekjoon | /step/level 7 (String)/5622_다이얼.py | UTF-8 | 379 | 3.390625 | 3 | [] | no_license | import sys
ascdial = ['A', 'D', 'G', 'J', 'M', 'P', 'T', 'W', '['] # 아스키 Z값 다음이 [
a = sys.stdin.readline().rstrip()
result = 0
for i in a:
for j in range(len(ascdial)):
if i >= ascdial[j] and i < ascdial[j+1]:
result += j
result += 3 # 앞파벳이 할당되는 다이얼은 숫자2부터라 +1, 다이얼 위치당 +2 = 3
print(result)
| true |
6a24810ad7790195b21da9c9dd81114fa5328913 | Python | UWNETLAB/Nate | /nate/svonet/svo_degree_over_time.py | UTF-8 | 10,339 | 2.59375 | 3 | [
"MIT"
] | permissive | from nate.svonet.graph_svo import generate_ticks, find_max_burst
import networkx as nx
import stop_words as sw
import copy
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.ticker import MaxNLocator
import numpy as np
from multiprocessing import Process, Queue
from os import cpu_count
def get_degree_for_slice(
q: Queue,
G,
edge_burst_dict,
time_slice_start,
time_slice_end,
minimum_burst_level,
stops,
overlap_threshold,
return_edge_overlaps,
list_top,
time_label):
graphCopy = copy.deepcopy(G)
for key in edge_burst_dict:
burst_level = find_max_burst(edge_burst_dict[key], time_slice_start, time_slice_end)
if burst_level > minimum_burst_level:
for node in graphCopy.nodes():
for j in [0, -1]:
for k in [0, -1]:
if key[j] == node[k] and key[j] not in stops:
overlap = len(set(key).intersection(set(node)))
if overlap >= overlap_threshold:
graphCopy.add_edge(key, node, overlap=overlap)
graphCopy.remove_edges_from(nx.selfloop_edges(graphCopy))
degree_list = list(graphCopy.degree)
degree_list.sort(key=lambda x: x[1], reverse=True)
degree_list = degree_list[0:list_top]
overlap_list = []
if return_edge_overlaps:
for entry in degree_list[0:list_top]:
overlap_sum = []
for edge in graphCopy.edges(entry[0]):
overlap_sum.append(graphCopy.edges[edge]['overlap'])
if len(overlap_sum) > 0:
avg = round(sum(overlap_sum) / len(overlap_sum), 2)
else:
avg = 0
overlap_list.append((entry[0], avg))
if return_edge_overlaps:
q.put((time_label, time_slice_end, degree_list, overlap_list))
else:
q.put((time_label, time_slice_end, degree_list))
class SVODegreeOverTimeMixin():
def __init__(self):
self.offset_dict:dict
self.edge_burst_dict:dict
self.s: int
self.gamma: int
self.from_svo: bool
self.lookup: dict
def top_svo_degree(
self,
number_of_slices: int = 8,
list_top: int = 10,
minimum_burst_level: int = 0,
return_edge_overlaps: bool = True,
overlap_threshold: int = 1):
"""[summary]
Args:
number_of_slices (int, optional): [description]. Defaults to 20.
list_top (int, optional): [description]. Defaults to 10.
minimum_burst_level (int, optional): [description]. Defaults to 0.
return_edge_overlaps (bool, optional): [description]. Defaults to True.
overlap_threshold (int, optional): [description]. Defaults to 1.
Raises:
Exception: [description]
Returns:
[type]: [description]
"""
if overlap_threshold > 2 or overlap_threshold < 1:
raise Exception("Overlap Filter must be 1 or 2.")
stops = sw.get_stop_words("english")
# Create list of time slices:
offset_set = set()
for key in self.offset_dict:
for offset in self.offset_dict[key]:
offset_set.add(offset)
time_slices, time_labels = generate_ticks(offset_set, number_of_ticks=(number_of_slices))
# Create network consisting of all Subjects and Objects:
G = nx.Graph()
for entry in self.edge_burst_dict:
G.add_node(entry)
if list_top == None:
list_top = len(self.edge_burst_dict)
# Iterate over time slices
q = Queue()
processes = []
for i in range(1, len(time_slices)):
time_slice_start = time_slices[i-1]
time_slice_end = time_slices[i]
time_label = time_labels[i]
t = Process(
target = get_degree_for_slice,
args= (
q,
G,
self.edge_burst_dict,
time_slice_start,
time_slice_end,
minimum_burst_level,
stops,
overlap_threshold,
return_edge_overlaps,
list_top,
time_label
)
)
processes.append(t)
t.start()
result_list = []
for i in range(1, len(time_slices)):
result_list.append(q.get())
top_degree_by_slice = {}
edge_overlap = {}
result_list = sorted(result_list, key = lambda x: x[1])
for result in result_list:
time_label = result[0]
degree_list = result[2]
top_degree_by_slice[time_label] = degree_list
if return_edge_overlaps:
edge_overlap[time_label] = result[3]
if return_edge_overlaps:
return top_degree_by_slice, edge_overlap
else:
return top_degree_by_slice
def specific_svo_degree(self,
tokens: list,
number_of_slices: int = 15,
minimum_burst_level: int = 0,
overlap_threshold: int = 1):
"""[summary]
Args:
tokens (list): [description]
number_of_slices (int, optional): [description]. Defaults to 20.
minimum_burst_level (int, optional): [description]. Defaults to 0.
overlap_threshold (int, optional): [description]. Defaults to 1.
Returns:
[type]: [description]
"""
if isinstance(tokens, list) == False:
tokens = [tokens]
full_lists = self.top_svo_degree(number_of_slices=number_of_slices,
list_top=None,
minimum_burst_level=minimum_burst_level,
return_edge_overlaps=False,
overlap_threshold=overlap_threshold,
)
token_rank_dict = {}
for day in full_lists:
v = [item for item in full_lists[day] if item[0] in tokens]
token_rank_dict[day] = v
return token_rank_dict
def plot_top_svo_degree(
self,
number_of_slices: int = 8,
list_top: int = 10,
minimum_burst_level: int = 0,
overlap_threshold: int = 1,
filename: str = False,):
"""[summary]
Args:
number_of_slices (int, optional): [description]. Defaults to 20.
list_top (int, optional): [description]. Defaults to 10.
minimum_burst_level (int, optional): [description]. Defaults to 0.
overlap_threshold (int, optional): [description]. Defaults to 1.
"""
data = self.top_svo_degree(
number_of_slices = number_of_slices,
list_top = list_top,
minimum_burst_level = minimum_burst_level,
return_edge_overlaps = False,
overlap_threshold=overlap_threshold,)
date_names = []
time_slices = []
for k, v in data.items():
date_names.append(k)
time_slices.append(v)
for i in range(1, len(date_names)):
x = np.arange(list_top)
values = []
names = []
for top_degrees in time_slices[i]:
values.append(top_degrees[1])
names.append(top_degrees[0])
values.reverse()
names.reverse()
fig, ax = plt.subplots()
fig.set_figwidth(6)
fig.set_figheight(10)
fig.suptitle('{} to {}'.format(date_names[i-1], date_names[i]), fontsize=12, ha="center")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.barh(x, values, color='#32363A')
plt.yticks(x, names)
if filename:
plt.savefig(str(filename) + str(i) + ".pdf")
else:
plt.show()
def plot_specific_svo_degree(self,
tokens: list,
number_of_slices: int = 15,
minimum_burst_level: int = 0,
overlap_threshold: int = 1,
plot_type="line",
filename: str = False,):
if isinstance(tokens, list) == False:
tokens = [tokens]
if plot_type != "line" and plot_type != "bar":
raise Exception("`plot_type` must be one of 'line' or 'bar'")
data = self.specific_svo_degree(tokens=tokens,
number_of_slices=number_of_slices,
minimum_burst_level=minimum_burst_level,
overlap_threshold=overlap_threshold,
)
inverted_dict = {}
for token in tokens:
full_list = []
for date, degree_list in data.items():
degree = [item[1] for item in degree_list if item[0] == token]
full_list.append((date, degree[0]))
inverted_dict[token] = full_list
x = np.arange(number_of_slices)
for k, v in inverted_dict.items():
values = [item[1] for item in v]
dates = [item[0].replace(", ", "\n") for item in v]
fig, ax = plt.subplots()
fig.set_figwidth(10)
fig.set_figheight(6)
fig.suptitle("'{}'".format(k), fontsize=12, ha="center")
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
if plot_type == "bar":
plt.bar(x, values, color='#32363A')
elif plot_type == "line":
plt.plot(x, values, color='#32363A')
plt.xticks(x, dates)
if filename:
plt.savefig(str(filename) + str(k) + ".pdf")
else:
plt.show() | true |
8fe0e1dcb5109242bf10fd2644bc37cbfa1242f7 | Python | duckythescientist/fixedint | /fixedint/tests/test_promotions.py | UTF-8 | 783 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env python3
from nose.tools import assert_raises
from fixedint import *
def test_ordered_promotion():
spam = uint16(42)
eggs = spam + 1
assert isinstance(eggs, uint16)
assert eggs == 43
eggs = 1 + spam
assert not isinstance(eggs, uint16)
assert eggs == 43
def test_soft_cast():
spam = uint16(42)
eggs = spam + 1
assert isinstance(eggs, uint16)
assert eggs == 43
eggs = spam + 1.0
assert isinstance(eggs, uint16)
assert eggs == 43
eggs = spam + 1.5
assert isinstance(eggs, float)
assert eggs == 43.5
def test_hard_cast():
spam = uint16(42)
assert spam // 5 == 8
assert spam // 5.0 == 8
assert_raises(TypeError, lambda: spam // 1.5)
assert_raises(TypeError, lambda: spam & 1.5)
| true |
0dd7f0dfa59923044083f3111660240b63d7b0e3 | Python | Mjheverett/python_dictionaries | /Medium_Exercises/1letter_summary.py | UTF-8 | 227 | 3.90625 | 4 | [] | no_license | input_string = input("Please enter a word: ")
string_dictionary = {}
for i in input_string:
if i in string_dictionary:
string_dictionary[i] += 1
else:
string_dictionary[i] = 1
print(string_dictionary) | true |
9f0a9becd0ff4db1ddea72ceb6250323d26d6615 | Python | baitik07/project1 | /Dm1.py | UTF-8 | 3,242 | 4.1875 | 4 | [] | no_license | # if 12**3 > 13*7:
# print("12**3 bigger")
# elif 12**3 < 13*7:
# print("13*7 bigger")
# else:
# print("Equal")
# if 4**5 > 512+512:
# print("4**5 bigger")
# elif 4**5 < 512+512:
# print("512+512 bigger")
# else:
# print("Equal")
# a = 17925
# print("a = 17925")
# if a < 34**2:
# print("34**2 igger")
# elif a == 34**2:
# print("Equal")
# else:
# print("a is bigger")
# #
# if a < 26*3:
# print("26*3 igger")
# elif a == 26*3:
# print("Equal")
# else:
# print("a is bigger")
# #
# if a < 17*33:
# print("17*33 igger")
# elif a == 17*33:
# print("Equal")
# else:
# print("a is bigger")
# #
# if a < 4394*4:
# print("4394*4 igger")
# elif a == 4394*4:
# print("Equal")
# else:
# print("a is bigger")
# num = int(input("first number for sum: "))
# num1 = int(input("second number for sum: "))
# num2 = int(input("any number for multiplication of result: "))
#
# print("The result by our formula: ")
# result = (((num + num1)*num2)**2) - 193432
# print(result)
# a = int(input("A number: "))
# b = int(input("B number: "))
# c = int(input("C number: "))
#
# if b < a < c:
# print("True number!!!")
# else:
# print("Your number is not correct!!!\nTry again)")
# a = 7
# b = 3
# c = 4.8
# print("Your number: a=7 , b=3 , c=4.8")
# result = (a % b) * c
# print(result)
# a = int(input("Any number: "))
# b = int(input("Any number: "))
# if a == b:
# print("True number: they are equal!")
# else:
# print("False: number is not true!")
# a = int(input("Any number: "))
# b = int(input("Any number: "))
# if a != b:
# print("True number")
# else:
# print("False: number is not true!")
##### Можно предположить, что правильный ответ -3,
# но в этом случае умножив -3 * 2 мы получим -6.
# Чтобы получить исходное -7 нужно к результату прибавить число -1,
# но остаток не может быть отрицательным по определению(r>=0).
##### По этому в данном случае остаток равен 1 и частное равно -4.
# ^^^
# a = -21
# b = 10
# result = a // b
# print(result)
# first_year = int(input('Type your birth year: '))
# print("Now it is 2020 year")
# this_year = 2020
# print("After 2 years you will be: ")
# result = (2020-first_year)+2
# print(result)
# print("2 years ago you was: ")
# result1 = (2020-first_year)-2
# print(result1)
# print("Let's try to know a persent of live of a century which we path!")
# age = int(input("Your age: "))
# century = 100
# result = round((age/century)*100)
# print("The percent of a century which you path:", result,'%')
# print("Your numbers: 25, 75, 10, 95\nTrying to find the arithmetic nubmer)")
# result = (25+75+10+95)/4
# print(result, ": arithmetic mean")
# a = float(input("Write any number wth float: "))
# b = float(input("Write any number wth float: "))
# c = float(input("Write any number wth float: "))
# result = (a+b)*c
# print(result)
# a = 256
# b = 10
# c = round(32/2)
# d = 5
# e = 23
# print(a,';',b,';',c,';',d,';',e)
#
# result = (a-e**(b/d))%c
# print(round(result))
| true |
3563bfc95963c58b5d9c9d479e7ebdea4940ade4 | Python | Surbeivol/daily-coding-problems | /problems/number_possible_binary_topologies.py | UTF-8 | 504 | 3.875 | 4 | [
"MIT"
] | permissive | """
Write a function that takes in a non-negative integer n and that returns the number of possible Binary Tree configuration, irrespective of node values. For instance, there exist only two Binary Tree topologies when n is equal to 2: a root node with a left node, and a root node with a right node. Note than wen n is equal to 0, there is one topology that can be created: the None node.
"""
def num_binary_tree_topologies(numb):
pass
# test
assert num_binary_tree_topologies(3) == 5
print('OK')
| true |
d20f2d0266696f8c9d5fd6730e467e7666d1954a | Python | fregataa/Algorithm-Python | /Programmers/Traffic.py | UTF-8 | 849 | 2.671875 | 3 | [] | no_license | def solution(lines):
answer = 0
jobs = []
timeline = []
for line in lines:
tmp = line.split()
s = tmp[1].split(':')
hour, minute, sec = map(float, s)
t = float(tmp[2].strip('s'))
end = (hour*3600 + minute*60 + sec)*1000
start = end - t*1000 + 1
jobs.append([start, end])
d1 = {'left': end-999, 'right': end, 'count': 0}
d2 = {'left': end, 'right': end+999, 'count': 0}
timeline.append(d1)
timeline.append(d2)
for t in timeline:
for i in range(len(jobs)):
l, r = jobs[i]
if r < t['left'] or l > t['right']:
continue
t['count'] += 1
answer = max(answer, t['count'])
return answer
l = [
'2016-09-15 01:00:04.002 2.0s',
'2016-09-15 01:00:07.000 2s'
]
print(solution(l)) | true |
5e712fe79c5f5285f961c0dded4095a91ea8a61a | Python | withjeffrey/PythonLearning | /6Class.py | UTF-8 | 4,677 | 3.8125 | 4 | [] | no_license |
# coding: utf-8
# In[3]:
#6-1:类的定义与实例化
class MyClass:
"MyClass help."
myclass = MyClass()
print(myclass.__doc__) #输出类说明
help(myclass) #显示类帮助信息
# In[4]:
#6-2:类的方法的定义与使用
class SmplClass:
def info(self):
print('my class')
def mycacl(self,x,y):
return x + y
sc = SmplClass()
sc.info()
print(sc.mycacl(3,4))
# In[6]:
#6-3:__init__()方法用于类实例化时初始化相关数据
class DemoInit:
def __init__(self,x,y=0):
self.x = x
self.y = y
def mycacl(self):
return self.x + self.y
dia = DemoInit(3)
print(dia.mycacl())
dib = DemoInit(3,7)
print(dib.mycacl())
# In[1]:
#例子6-4:在类中调用类自身的方法和全局函数的实例
def coord_chng(x,y):
return (abs(x),abs(y))
class Ant:
def __init__(self,x=0,y=0):
self.x = x
self.y = y
self.disp_point()
def move(self,x,y):
x,y = coord_chng(x,y)
self.edit_point(x,y)
self.disp_point()
def edit_point(self,x,y):
self.x += x
self.y += y
def disp_point(self):
print("current position: (%d,%d)" % (self.x,self.y))
ant_a = Ant()
ant_a.move(2,4)
ant_a.move(-9,6)
# In[2]:
#例子6-5:类中定义的类属性和实例属性的定义及使用
#有时为了不让某个属性或方法在类外被调用或修改,可以使用双下划线的名称
#对实例的修改,会导致所有实例的类属性的值发生变化
class Demo_Property:
class_name = "Demo_Property" #类属性
def __init__(self,x=0):
self.x = x #实例属性
def class_info(self): #类方法,输出信息
print("类变量值:",Demo_Property.class_name)
print("实例变量值:",self.x)
def chng(self,x): #类方法,修改实例属性
self.x = x
def chng_cn(self,name): #类方法,修改类属性
Demo_Property.class_name = name
dpa = Demo_Property() #实例化类
dpb = Demo_Property()
print("初始化两个实例")
dpa.class_info()
dpb.class_info()
print("修改实例属性")
print("修改dpa实例属性")
dpa.chng(3)
dpa.class_info()
dpb.class_info()
print("修改dpb实例属性")
dpb.chng(10)
dpa.class_info()
dpb.class_info()
print("修改类属性")
print("修改dpa类属性")
dpa.chng_cn('dpa')
dpa.class_info()
dpb.class_info()
print('修改dpb类属性')
dpb.chng_cn('dpb')
dpa.class_info()
dpb.class_info()
# In[3]:
#例子6-6:定义类方法和静态方法(类的方法的另外两种类别)
class DemoMthd:
@staticmethod
def static_mthd():
print("调用了静态方法!")
@classmethod
def class_mthd(cls):
print("调用了类方法!")
DemoMthd.static_mthd()
DemoMthd.class_mthd()
dm = DemoMthd()
dm.static_mthd()
dm.class_mthd()
# In[4]:
#例子6-7:类的继承
class Ant:
def __init__(self,x=0,y=0,color='black'):
self.x = x
self.y = y
self.color = color
def crawl(self,x,y):
self.x = x
self.y = y
print('crawl...')
self.info()
def info(self):
print('当前位置:(%d,%d)' % (self.x,self.y))
def attack(self):
print('用嘴咬!')
class FlyAnt(Ant):
def attack(self): #attack()方法被重载了
print('用尾针!')
def fly(self,x,y):
print('fly...')
self.x = x
self.y = y
self.info()
flyant = FlyAnt(color='red')
flyant.crawl(3,5)
flyant.fly(10,14)
flyant.attack()
# In[9]:
#例子6-8:多重继承
class PrntA:
namea = 'PrntA'
def set_value(self,a):
self.a = a
def set_namea(self,namea):
PrntA.namea = namea
def info(self):
print('PrntA:%s,%s' % (PrntA.namea,self.a))
class PrntB:
nameb = 'PrntB'
def set_nameb(self,nameb):
PrntA.nameb = nameb
def info(self):
print('PrntB:%s' % (PrntB.nameb,))
class Sub(PrntA,PrntB):
pass
class Sub2(PrntB,PrntA):
pass
class Sub3(PrntA,PrntB):
def info(self): #info()方法被重载了
PrntA.info(self)
PrntB.info(self)
print('使用第一个子类:')
sub = Sub()
sub.set_value('aaaa')
sub.info()
sub.set_nameb('BBBB')
sub.info()
print('使用第二个子类:')
sub2 = Sub2()
sub2.set_value('aaaa')
sub2.info()
sub2.set_nameb('BBBB')
sub2.info()
print('使用第三个子类:')
sub3 = Sub3()
sub3.set_value('aaaa')
sub3.info()
sub3.set_nameb('BBBB')
sub3.info()
| true |
e350d0b1b9345d1fa2dce58cf482c0c0ca2ae83c | Python | michielborghuis/IPASSBackUp | /ipass6/MainGUI.py | UTF-8 | 1,275 | 2.96875 | 3 | [] | no_license | from tkinter import *
from ipass6.GUI import GUI
class MainGUI:
def __init__(self):
self.root = Tk()
self.label1 = Label(self.root, text='Advanced SIR model for disease spread.',
font=('Calibri', 20)).grid(row=0, columnspan=2)
self.label2 = Label(self.root, text='Would you like to enter the population manually or to choose a country and'
' a year corresponding to a population?').grid(row=1, columnspan=2)
self.buttonManually = Button(self.root, text='Manually', command=self.open_manually).grid(row=2, column=0)
self.buttonAutomatic = Button(self.root, text='Automatic', command=self.open_automatic).grid(row=2, column=1)
def open_automatic(self):
"""Opens the second window where you can choose the population by selecting a country and year."""
automatic = GUI(0)
automatic.run()
def open_manually(self):
"""Opens the second window where you have to choose the population manually."""
manually = GUI(1)
manually.run()
def run(self):
"""Starts the main window."""
self.root.geometry('610x400')
self.root.iconbitmap('COVID-19_icon.ico')
self.root.mainloop()
| true |
76d2a62df381064442b924e2541fd3847cf6a75d | Python | sreekanesh/mycaptain | /positive_int.py | UTF-8 | 421 | 3.40625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 26 21:22:45 2021
@author: sreekanesh
"""
lst=[]
n=int(input('enter the number of elements :'))
for i in range(0,n):
elements=(int(input()))
lst.append(elements)
def positive_num():
for item in lst:
if item < 0:
lst.remove(item)
print(lst)
positive_num() | true |
fe14d54a668940e5b01c3cdec5b54643af0af6f8 | Python | yukselh20/Python | /btkEgitim/tekrar-.py | UTF-8 | 896 | 3.71875 | 4 | [] | no_license | sampleString = """ Phyton's name does not come from a
'snake'""" # 3 tırnak işareti ile yazılan stringler aynı şekilde bastırılır.
print(sampleString)
name = "Atilla"
surname = "İlhan"
formattedMassage = f"{name} [{surname}] is poet"
formattedMassage1 = f"{name:10} [{surname:10}] is poet"
formattedMassage2 = f"{name:.2} [{surname:.2}] is poet"
print(formattedMassage)
print(formattedMassage1)
print(formattedMassage2)
print("******************************************************************************")
example = "this is an example"
print(f"{example:*>50}")
print(f"{example:*<50}")
print(f"{example:*^50}")
print(f"{example:*^50}")
print("******************************************************************************")
userString = input("Enter your string: ")
userString = userString[-3: ] + userString[0:3]
# sondan 3 karakter alır.
print(userString.upper())
| true |
d2d44650350ad2e9d684f09637afd5bd2d4f110e | Python | kundan4U/ML-With-Python- | /ML With python/practice/p2.py | UTF-8 | 112 | 3.296875 | 3 | [] | no_license | #Areacal
n1=eval(input("plese enter length"))
n2=eval(input("plese enter weight"))
a=n1*n2
print("Area is :",a) | true |
ff286c17f17e15b8fe8ff17145b65decaadeeab5 | Python | kasem777/Python-codeacademy | /Loops/over9000.py | UTF-8 | 895 | 4.71875 | 5 | [] | no_license | # Over 9000
# Create a function named over_nine_thousand() that takes a list of numbers
# named lst as a parameter.
# The function should sum the elements of the list until the sum is greater
# than 9000. When this happens, the function should return the sum.
# If the sum of all of the elements is never greater than 9000,
# the function should return total sum of all the elements.
# If the list is empty, the function should return 0.
# For example, if lst was [8000, 900, 120, 5000],
# then the function should return 9020.
# Write your function here
def over_nine_thousand(lst):
total = 0
if len(lst) == total:
return total
for i in range(len(lst)):
total += lst[i]
if total > 9000:
return total
break
return total
# Uncomment the line below when your function is done
print(over_nine_thousand([8000, 900, 120, 5000]))
| true |
f8220dd8c996fceb1278ff9a4158c04c8d514301 | Python | alagoutte/pyaoscx | /pyaoscx/exceptions/parameter_error.py | UTF-8 | 752 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | # (C) Copyright 2019-2021 Hewlett Packard Enterprise Development LP.
# Apache License 2.0
from pyaoscx.exceptions.verification_error import VerificationError
class ParameterError(VerificationError):
"""
Exception class for Verification fails of function of method parameters.
Raised mainly when wrong parameters are passed to functions from user
code
"""
def __init__(self, *args):
if args:
super().__init__(*args[1:])
self.module = args[0]
else:
self.message = None
def __str__(self):
if self.message:
return "Parameter Error: {0} detail: {1}".format(
self.module, self.message)
else:
return "Parameter Error"
| true |
7e919fa2805499e74972fe9fc1d10eacc1079d2f | Python | AsPhilosopher/tensorflow | /ml/haar-adaboost.py | UTF-8 | 1,829 | 3.140625 | 3 | [] | no_license | # haar 特征 = 像素经过运算得到的结果(具体值 向量 矩阵 多维)
# 如何运用特征区分目标? 如阈值判决等
# 如何得到阈值判决?机器学习
# 1 特征 2 判决 3 得到判决
# haar有一系列模板 滑动 缩放 运算量大
# 举例 1080*720 10*10
# 计算量=14模板 * 20缩放 * (1080/2*720/2) * (100点+-) = 50-100亿
# 实时处理 15祯 (50-100)*15 = 1000亿次
# 积分图
# haar + Adaboost face
# Adaboost分类器将错误样本不断加强
# 训练终止条件:1 for count 2 p(误差概率)
# haar> T1 and haar>T2 2个强分类器15-20
# 强分类器进行判决 弱分类器计算强分类器特征(由若干Node节点)
# Adaboost 训练:
# 1.初始化数据权值分布
# 苹果 苹果 苹果 香蕉
# 0.1 0.1 0.1 0.1
# 2 遍历判决阈值 p
# minP=t 最小误差概率
# G1(x) 计算一个权重
# 权值分布 update
# 满足终止条件 结束训练
import cv2
# 1load xml 2load jpg 3haar gray 4detect 5draw
face_xml = cv2.CascadeClassifier('../xml/haarcascade_frontalface_default.xml')
eye_xml = cv2.CascadeClassifier('../xml/haarcascade_eye.xml')
# img = cv2.imread('../img/face.jpg')
img = cv2.imread('../img/beautiful_girl.jpg')
cv2.imshow('src', img)
# haar gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 缩放系数:1.3 人脸[目标]最小不能小于多少像素 5
faces = face_xml.detectMultiScale(gray, 1.3, 5)
print('face=', len(faces))
# draw
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_face = gray[y:y + h, x: x + w]
roi_color = img[y:y + h, x:x + w]
eyes = eye_xml.detectMultiScale(roi_face)
print('eye=', len(eyes))
for (e_x, e_y, e_w, e_h) in eyes:
cv2.rectangle(roi_color, (e_x, e_y), (e_x + e_w, e_y + e_h), (255, 0, 0), 2)
cv2.imshow('dst', img)
cv2.waitKey(0)
| true |
73c4f0038829d3ff58766faef6f48a80f4a63da5 | Python | WuDiDaBinGe/BiNTM | /utils/contrastive_loss.py | UTF-8 | 14,510 | 2.96875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2021/7/8 上午9:49
# @Author : WuDiDaBinGe
# @FileName: contrastive_loss.py
# @Software: PyCharm
import torch
import torch.nn as nn
import math
import numpy as np
class InstanceLoss(nn.Module):
def __init__(self, batch_size, temperature, device):
super(InstanceLoss, self).__init__()
self.batch_size = batch_size
self.temperature = temperature
self.device = device
self.mask = self.mask_correlated_samples(batch_size)
self.criterion = nn.CrossEntropyLoss(reduction="sum")
def mask_correlated_samples(self, batch_size):
N = 2 * batch_size
mask = torch.ones((N, N))
mask = mask.fill_diagonal_(0)
for i in range(batch_size):
mask[i, batch_size + i] = 0
mask[batch_size + i, i] = 0
mask = mask.bool()
return mask
def forward(self, z_i, z_j):
N = 2 * self.batch_size
z = torch.cat((z_i, z_j), dim=0)
# sim (N * N)
sim = torch.matmul(z, z.T) / self.temperature
# sim_i_j (batch_size)
sim_i_j = torch.diag(sim, self.batch_size)
# sim_j_i (batch_size)
sim_j_i = torch.diag(sim, -self.batch_size)
# positive (N)
positive_samples = torch.cat((sim_i_j, sim_j_i), dim=0).reshape(N, 1)
negative_samples = sim[self.mask].reshape(N, -1)
# labels N
labels = torch.zeros(N).to(positive_samples.device).long()
# logits (N,N-1)
logits = torch.cat((positive_samples, negative_samples), dim=1)
loss = self.criterion(logits, labels)
loss /= N
return loss
class ClusterLoss(nn.Module):
def __init__(self, class_num, device, temperature):
super(ClusterLoss, self).__init__()
self.class_num = class_num
self.temperature = temperature
self.device = device
self.mask = self.mask_correlated_clusters(class_num)
self.criterion = nn.CrossEntropyLoss(reduction="sum")
self.similarity_f = nn.CosineSimilarity(dim=2)
def mask_correlated_clusters(self, class_num):
N = 2 * class_num
mask = torch.ones((N, N))
mask = mask.fill_diagonal_(0)
for i in range(class_num):
mask[i, class_num + i] = 0
mask[class_num + i, i] = 0
mask = mask.bool()
return mask
def forward(self, c_i, c_j):
p_i = c_i.sum(0).view(-1)
p_i /= p_i.sum()
ne_i = math.log(p_i.size(0)) + (p_i * torch.log(p_i)).sum()
p_j = c_j.sum(0).view(-1)
p_j /= p_j.sum()
ne_j = math.log(p_j.size(0)) + (p_j * torch.log(p_j)).sum()
ne_loss = ne_i + ne_j
c_i = c_i.t()
c_j = c_j.t()
N = 2 * self.class_num
c = torch.cat((c_i, c_j), dim=0)
sim = self.similarity_f(c.unsqueeze(1), c.unsqueeze(0)) / self.temperature
sim_i_j = torch.diag(sim, self.class_num)
sim_j_i = torch.diag(sim, -self.class_num)
positive_clusters = torch.cat((sim_i_j, sim_j_i), dim=0).reshape(N, 1)
negative_clusters = sim[self.mask].reshape(N, -1)
labels = torch.zeros(N).to(positive_clusters.device).long()
logits = torch.cat((positive_clusters, negative_clusters), dim=1)
loss = self.criterion(logits, labels)
loss /= N
return loss + ne_loss
class Conditional_Contrastive_loss(torch.nn.Module):
def __init__(self, device, batch_size, pos_collected_numerator):
super(Conditional_Contrastive_loss, self).__init__()
self.device = device
self.batch_size = batch_size
self.pos_collected_numerator = pos_collected_numerator
self.calculate_similarity_matrix = self._calculate_similarity_matrix()
self.cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
def _calculate_similarity_matrix(self):
return self._cosine_simililarity_matrix
def remove_diag(self, M):
h, w = M.shape
assert h == w, "h and w should be same"
mask = np.ones((h, w)) - np.eye(h)
mask = torch.from_numpy(mask)
mask = (mask).type(torch.bool).to(self.device)
return M[mask].view(h, -1)
def _cosine_simililarity_matrix(self, x, y):
v = self.cosine_similarity(x.unsqueeze(1), y.unsqueeze(0))
return v
def forward(self, inst_embed, proxy, negative_mask, labels, temperature, margin=0):
# batch_size * batch_size
similarity_matrix = self.calculate_similarity_matrix(inst_embed, inst_embed)
instance_zone = torch.exp((self.remove_diag(similarity_matrix) - margin) / temperature)
# 计算每个instance 与他的标签embedding的相似度
inst2proxy_positive = torch.exp((self.cosine_similarity(inst_embed, proxy) - margin) / temperature)
if self.pos_collected_numerator:
mask_4_remove_negatives = negative_mask[labels]
mask_4_remove_negatives = self.remove_diag(mask_4_remove_negatives)
inst2inst_positives = instance_zone * mask_4_remove_negatives
numerator = inst2proxy_positive + inst2inst_positives.sum(dim=1)
else:
numerator = inst2proxy_positive
denomerator = torch.cat([torch.unsqueeze(inst2proxy_positive, dim=1), instance_zone], dim=1).sum(dim=1)
# 乘 这个temperature的意义是什么(这个是多余的)
criterion = -torch.log((numerator / denomerator)).mean()
return criterion
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR"""
def __init__(self, temperature=0.07, contrast_mode='all',
base_temperature=0.07):
super(SupConLoss, self).__init__()
self.temperature = temperature
self.contrast_mode = contrast_mode
self.base_temperature = base_temperature
def forward(self, features, labels=None, mask=None):
"""Compute loss for model. If both `labels` and `mask` are None,
it degenerates to SimCLR unsupervised loss:
https://arxiv.org/pdf/2002.05709.pdf
Args:
features: hidden vector of shape [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
Returns:
A loss scalar.
"""
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], -1)
batch_size = features.shape[0]
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
elif labels is not None:
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).float().to(device)
else:
mask = mask.float().to(device)
contrast_count = features.shape[1]
# contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)
contrast_feature = features
if self.contrast_mode == 'one':
anchor_feature = features[:, 0]
anchor_count = 1
elif self.contrast_mode == 'all':
anchor_feature = contrast_feature
anchor_count = contrast_count
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.temperature)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
# tile mask
mask = mask.repeat(anchor_count, contrast_count)
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0
)
mask = mask * logits_mask
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()
return loss
def make_mask(labels, n_cls, mask_negatives, device):
labels = labels.detach().cpu().numpy()
n_samples = labels.shape[0]
if mask_negatives:
mask_multi, target = np.zeros([n_cls, n_samples]), 1.0
else:
mask_multi, target = np.ones([n_cls, n_samples]), 0.0
for c in range(n_cls):
c_indices = np.where(labels == c)
mask_multi[c, c_indices] = target
return torch.tensor(mask_multi).type(torch.long)
class ConditionalContrastiveLoss(torch.nn.Module):
def __init__(self, num_classes, temperature, master_rank, DDP):
super(ConditionalContrastiveLoss, self).__init__()
self.num_classes = num_classes
self.temperature = temperature
self.master_rank = master_rank
self.DDP = DDP
self.calculate_similarity_matrix = self._calculate_similarity_matrix()
self.cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
def _make_neg_removal_mask(self, labels):
labels = labels.detach().cpu().numpy()
n_samples = labels.shape[0]
mask_multi, target = np.zeros([self.num_classes, n_samples]), 1.0
for c in range(self.num_classes):
c_indices = np.where(labels == c)
mask_multi[c, c_indices] = target
return torch.tensor(mask_multi).type(torch.long).to(self.master_rank)
def _calculate_similarity_matrix(self):
return self._cosine_simililarity_matrix
def _remove_diag(self, M):
h, w = M.shape
assert h == w, "h and w should be same"
mask = np.ones((h, w)) - np.eye(h)
mask = torch.from_numpy(mask)
mask = (mask).type(torch.bool).to(self.master_rank)
return M[mask].view(h, -1)
def _cosine_simililarity_matrix(self, x, y):
v = self.cosine_similarity(x.unsqueeze(1), y.unsqueeze(0))
return v
def forward(self, embed, proxy, label, **_):
# if self.DDP:
# embed = torch.cat(misc.GatherLayer.apply(embed), dim=0)
# proxy = torch.cat(misc.GatherLayer.apply(proxy), dim=0)
# label = torch.cat(misc.GatherLayer.apply(label), dim=0)
sim_matrix = self.calculate_similarity_matrix(embed, embed)
sim_matrix = torch.exp(self._remove_diag(sim_matrix) / self.temperature)
neg_removal_mask = self._remove_diag(self._make_neg_removal_mask(label)[label])
sim_pos_only = neg_removal_mask * sim_matrix
emb2proxy = torch.exp(self.cosine_similarity(embed, proxy) / self.temperature)
numerator = emb2proxy + sim_pos_only.sum(dim=1)
denomerator = torch.cat([torch.unsqueeze(emb2proxy, dim=1), sim_matrix], dim=1).sum(dim=1)
return -torch.log(numerator / denomerator).mean()
class MiConditionalContrastiveLoss(torch.nn.Module):
def __init__(self, num_classes, temperature, master_rank, DDP):
super(MiConditionalContrastiveLoss, self).__init__()
self.num_classes = num_classes
self.temperature = temperature
self.master_rank = master_rank
self.DDP = DDP
self.calculate_similarity_matrix = self._calculate_similarity_matrix()
self.cosine_similarity = torch.nn.CosineSimilarity(dim=-1)
def _make_neg_removal_mask(self, labels):
labels = labels.detach().cpu().numpy()
n_samples = labels.shape[0]
mask_multi, target = np.zeros([self.num_classes, n_samples]), 1.0
for c in range(self.num_classes):
c_indices = np.where(labels == c)
mask_multi[c, c_indices] = target
return torch.tensor(mask_multi).type(torch.long).to(self.master_rank)
def _calculate_similarity_matrix(self):
return self._cosine_simililarity_matrix
def _remove_diag(self, M):
h, w = M.shape
assert h == w, "h and w should be same"
mask = np.ones((h, w)) - np.eye(h)
mask = torch.from_numpy(mask)
mask = (mask).type(torch.bool).to(self.master_rank)
return M[mask].view(h, -1)
def _cosine_simililarity_matrix(self, x, y):
v = self.cosine_similarity(x.unsqueeze(1), y.unsqueeze(0))
return v
def forward(self, mi_embed, mi_proxy, label, **_):
# if self.DDP:
# mi_embed = torch.cat(misc.GatherLayer.apply(mi_embed), dim=0)
# mi_proxy = torch.cat(misc.GatherLayer.apply(mi_proxy), dim=0)
# label = torch.cat(misc.GatherLayer.apply(label), dim=0)
sim_matrix = self.calculate_similarity_matrix(mi_embed, mi_embed)
sim_matrix = torch.exp(self._remove_diag(sim_matrix) / self.temperature)
neg_removal_mask = self._remove_diag(self._make_neg_removal_mask(label)[label])
sim_pos_only = neg_removal_mask * sim_matrix
emb2proxy = torch.exp(self.cosine_similarity(mi_embed, mi_proxy) / self.temperature)
numerator = emb2proxy + sim_pos_only.sum(dim=1)
denomerator = torch.cat([torch.unsqueeze(emb2proxy, dim=1), sim_matrix], dim=1).sum(dim=1)
return -torch.log(numerator / denomerator).mean()
if __name__ == '__main__':
device = torch.device('cuda')
a = torch.rand((64, 20))
b = torch.rand((64, 20))
labels = torch.randint(low=0, high=5, size=(64,))
negetive_mask = make_mask(labels, 5, True, device)
criten_instance = Conditional_Contrastive_loss(device, 64, pos_collected_numerator=True)
print(criten_instance(a, b, negetive_mask, labels, 0.1))
| true |
0f80504c48250957f48023b48d5b25d9f968ed2d | Python | thrashlover/stepik | /python_part_2.3.1.py | UTF-8 | 611 | 2.84375 | 3 | [] | no_license | import math
from selenium import webdriver
link = "http://suninjuly.github.io/redirect_accept.html"
browser = webdriver.Chrome()
browser.get(link)
browser.find_element_by_css_selector('.btn-primary').click()
new_window = browser.window_handles[1]
# first_window = browser.window_handles[0]
browser.switch_to.window(new_window)
def calc(x):
return str(math.log(abs(12 * math.sin(int(x)))))
x_element = browser.find_element_by_css_selector('#input_value')
x = x_element.text
y = calc(x)
browser.find_element_by_id('answer').send_keys(y)
browser.find_element_by_css_selector('.btn-primary').click()
| true |
c190adee5c1c731fa8e83ab44ad2bc591999722f | Python | Hithru/hacktoberfest2k | /scripts/tan-theressa-2.py | UTF-8 | 120 | 2.75 | 3 | [] | no_license | def hello_world():
""" function to print "Hello World"
"""
hello_text = "Hello World"
print (hello_text) | true |
9f7d5f2ecdec7f68bf7a5ba309d50b8062e03e11 | Python | letmecode1/python | /data_types/string.py | UTF-8 | 800 | 3.703125 | 4 | [] | no_license | single_qoutes = 'This is John'
double_qoutes = "This is Max"
print(single_qoutes)
print(double_qoutes)
triple_qouted = '''
this is a triple
qouted string
'''
print(triple_qouted)
password = "pass" + "word"
print(password)
ha = "HA" * 5
print(ha)
string = "What does the fox say?"
print(string.find("say")) # true, so prints 18
print("javi".find('i')) # true, so it prints 3
print("javi".find('b')) # false, so it prints -1
print("javi".find('vi')) # true, so it prints 2
ssh = 'PoRTstr22isTCp'
print(ssh)
print(ssh.lower())
print(ssh.upper())
# If we need to use qoutes or special characters in a string we can do that by using the '\' character
print("John\tCena")
print("Stone\nCold")
print("'Single' in a double")
print('"Double" in a single')
print("\"Double\" in a Double")
| true |
256fe91760390aa4cab1225de8361543d29552c4 | Python | HenryDaiCode/PEulermusings | /Problem 012.py | UTF-8 | 522 | 3.53125 | 4 | [] | no_license | from math import sqrt
from math import ceil
#Returns nth triangle number
def tri(n):
return ((n * n) + n) // 2
mostdivisors = 0
i = 1
while mostdivisors <= 500:
halfdivisors = 0
trinum = tri(i)
for j in range(1, ceil(sqrt(trinum))):
if trinum % j == 0:
halfdivisors += 1
divisors = 2 * halfdivisors
if sqrt(trinum).is_integer():
divisors += 1
if divisors > mostdivisors:
mostdivisors = divisors
i += 1
#i - 1 to counterract last i += 1
print(tri(i - 1)) | true |
734d79159cdfc164ed4b79b22578fcf4d23eed22 | Python | daniel-hasan/ml-metodo-hierarquico | /gera_experimentos.py | UTF-8 | 2,666 | 2.53125 | 3 | [] | no_license | from base_am.resultado import Fold
from base_am.avaliacao import Experimento
from competicao_am.metodo_competicao import MetodoHierarquico, MetodoTradicional
from competicao_am.avaliacao_competicao import OtimizacaoObjetivoSVMCompeticao
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
def gera_experimento(df_amostra, scikit_method, classe_objetivo, n_trials):
arr_folds = Fold.gerar_k_folds(df_amostra, val_k=5, col_classe="original_genre",
num_repeticoes=1, num_folds_validacao=4,num_repeticoes_validacao=1)
print("gera_experimento")
# arr_to_predict, arr_predictions = ml_method.eval(arr_folds[0].df_treino, arr_folds[0].df_data_to_predict, "original_genre")
# scikit_method = LinearSVC(random_state=2)
ml_method = MetodoHierarquico(scikit_method, "genre")
ClasseObjetivo = classe_objetivo
#colocamos apenas 5 trials para ir mais rápido. Porém, algumas vezes precisamos de dezenas, centenas - ou milhares - de trials para conseguir uma boa configuração
#Isso depende muito da caracteristica do problema, da quantidade de parametros e do impacto desses parametros no resultado
experimento = Experimento(arr_folds, ml_method=ml_method,
ClasseObjetivoOtimizacao=ClasseObjetivo,
num_trials=n_trials)# num_trial até 3 pra depois ir aumentando: 10, 100 etc
experimento.calcula_resultados()
#salva
return experimento
def gera_experimento_metodo_tradicional(df_amostra, scikit_method, classe_objetivo, n_trials):
arr_folds = Fold.gerar_k_folds(df_amostra, val_k=5, col_classe="original_genre",
num_repeticoes=1, num_folds_validacao=4,num_repeticoes_validacao=1)
print("gera_experimento")
# arr_to_predict, arr_predictions = ml_method.eval(arr_folds[0].df_treino, arr_folds[0].df_data_to_predict, "original_genre")
# scikit_method = LinearSVC(random_state=2)
ml_method = MetodoTradicional(scikit_method, "original_genre")
ClasseObjetivo = classe_objetivo
#colocamos apenas 5 trials para ir mais rápido. Porém, algumas vezes precisamos de dezenas, centenas - ou milhares - de trials para conseguir uma boa configuração
#Isso depende muito da caracteristica do problema, da quantidade de parametros e do impacto desses parametros no resultado
experimento = Experimento(arr_folds, ml_method=ml_method,
ClasseObjetivoOtimizacao=ClasseObjetivo,
num_trials=n_trials)# num_trial até 3 pra depois ir aumentando: 10, 100 etc
experimento.calcula_resultados()
#salva
return experimento | true |
7c393b8e0046e489ee0d8e09778d9c759be28a71 | Python | nathanhilton/PythonPals | /PythonPals/Button.py | UTF-8 | 4,658 | 3.265625 | 3 | [] | no_license | import pygame
from pygame.locals import *
class button():
def __init__(self, color, x, y, width, height, text=''):
self.color = color
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
def modify(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def draw(self, screen, fontSize, text_color, center=False, outline=None):
# Call this method to draw the button on the screen
if outline:
pygame.draw.rect(screen, outline, (round(self.x - 2, self.y - 2), round(self.width + 4, self.height + 4)),
0)
pygame.draw.rect(screen, self.color, (round(self.x), round(self.y), round(self.width), round(self.height)), 0)
if self.text != '':
font = pygame.font.Font('JandaManateeSolid.ttf', fontSize)
text = font.render(self.text, 1, text_color)
if center:
screen.blit(text, (round(self.x + (self.width / 2 - text.get_width() / 2)),
round(self.y + (self.height / 2 - text.get_height() / 2))))
else:
screen.blit(text, (round(self.x + self.width), round(self.y)))
pygame.display.update()
def isOver(self, pos):
if pos[0] > self.x and pos[0] < self.x + self.width:
if pos[1] > self.y and pos[1] < self.y + self.height:
return True
return False
class text():
def __init__(self, color, x, y, width, height, textSize, text):
self.color = color
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
self.textSize = textSize
def modify(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def draw(self, screen, fontSize, center):
#pygame.draw.rect(screen, self.color, (round(self.x), round(self.y), round(self.width), round(self.height)), 0)
if len(self.text) != 0:
size = len(self.text)
font = pygame.font.Font("JandaManateeSolid.ttf", fontSize)
i = 0
if center:
for part in self.text:
text = font.render(part, True, self.color)
screen.blit(text, (round(self.x + (self.width / 2 - text.get_width() / 2)),
round(self.y + (self.height / 2 - text.get_height() / 2) + i)))
i += 37
else:
for part in self.text:
text = font.render(part, True, self.color)
screen.blit(text, (round(self.x + self.width), round(self.y + i)))
i += 37
class healthBar():
def __init__(self, x, y, width, height, health, orientation):
self.x = x
self.y = y
self.width = width
self.height = height
self.health = health
self.orientation = orientation
def modify(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def set_health(self, newHealth):
self.health = newHealth
def draw(self, screen):
pygame.draw.rect(screen, (255, 255, 255), (self.x - 2, self.y - 2, self.width + 4, self.height + 4))
pygame.draw.rect(screen, (0, 0, 0), (self.x, self.y, self.width, self.height))
if self.health != 0:
if self.health >= 55:
pygame.draw.rect(screen, (0, 128, 0),
(self.x, self.y, self.width - (self.width * ((100 - self.health) / 100)), self.height))
elif self.health < 25:
pygame.draw.rect(screen, (255, 0, 0),
(self.x, self.y, self.width - (self.width * ((100 - self.health) / 100)), self.height))
else:
pygame.draw.rect(screen, (255, 255, 0),
(self.x, self.y, self.width - (self.width * ((100 - self.health) / 100)), self.height))
# print(self.health)
# font = pygame.font.SysFont('comicsans', 40)
# text = font.render("HP:", 1, (0, 0, 0), (204,204,0))
# text_rect = text.get_rect()
# text_rect.center = (self.x + (self.width * 0.15), self.y + (self.height * 0.5))
# screen.blit(text, text_rect)
pygame.display.update()
| true |
b84bd4fd10135938531e50604ca50b74ab45397e | Python | awani216/FlightFuel | /codes/test.py | UTF-8 | 1,324 | 2.78125 | 3 | [] | no_license | import numpy as np
from numba import jit
import matplotlib.pyplot as plt
import time
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import svm
from sklearn import tree
from sklearn.neighbors import KNeighborsClassifier
from six.moves import cPickle as pickle
import random
with open(r"../random_forest.pickle", 'rb') as fl:
data = pickle.load(fl)
train_dataset = data['train_dataset']
train_labels = data['train_labels']
test_dataset = data['test_dataset']
attributes = data['attributes']
train_labels = np.array(train_labels, dtype=int)
ran = np.random.permutation(len(train_dataset))
train_dataset = train_dataset[ran]
train_labels = train_labels[ran]
@jit
def func():
clf = KNeighborsClassifier()
ftime = time.time()
clf.fit(train_dataset[:50000], train_labels[:50000])
ftime = time.time()
x_test = train_dataset[-1000:]
y_test = train_labels[-1000:]
res = clf.predict(x_test)
ct = 0
for i in range(len(res)):
if abs(res[i] - y_test[i]) <= 200:
ct += 1
print("Time Taken ", time.time()-ftime)
print("Accuracy in step " + str(100*(ct/len(res))))
func()
| true |
e9a6fb887debf8a9a2053beb72c1f8b146231037 | Python | cl19951225/syntheticdatagen | /evaluations/disc_and_preds/metrics/predictive_metrics3.py | UTF-8 | 10,101 | 2.65625 | 3 | [] | no_license |
# Necessary Packages
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Layer, Conv1D, GRU, Flatten, Dense, Input, TimeDistributed
from tensorflow.keras.models import Model
from tensorflow.keras.losses import BinaryCrossentropy, MeanAbsoluteError
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.utils import shuffle
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import MeanSquaredError, Mean
from tensorflow.keras.callbacks import Callback, EarlyStopping
import sys
import nbeats_model
import predictor as conv_pred
class PrintLossPerNthEpoch(Callback):
def __init__(self, label, print_period, *args, **kwargs):
super().__init__(*args, **kwargs)
self.label = label
self.print_period = print_period
def on_epoch_end(self, epoch, logs=None):
if epoch % self.print_period == (self.print_period - 1):
try:
loss = np.round(logs['loss'], 3); val_loss = np.round(logs['val_loss'], 3)
print( f"{self.label} Avg. train / val loss for epoch {epoch+1}: {loss} / {val_loss} " )
except:
loss = np.round(logs['loss'], 3)
print( f"{self.label} Avg. train loss for epoch {epoch+1}: {loss} " )
else:
pass
class Predictor():
def __init__(self, seq_len, dim, hidden_dim, **kwargs):
super(Predictor, self).__init__(**kwargs)
self.seq_len = seq_len
self.dim = dim
self.hidden_dim = hidden_dim
self.predictor = self.build_predictor()
self.loss_func = MeanAbsoluteError()
self.optimizer = tf.keras.optimizers.Adam()
self.predictor.compile(loss = self.loss_func, optimizer = self.optimizer)
def build_predictor(self,):
input_ = Input(shape=(self.seq_len-1, self.dim-1), name='X_real_input')
x = GRU(units = self.hidden_dim, return_sequences=True, activation = 'tanh', name='p_gru')(input_)
x = TimeDistributed(layer = Dense(units = 1) )(x)
output = tf.squeeze(x)
model = Model(input_, output, name = 'predictor')
return model
def fit(self, X, y, epochs, verbose = 0, validation_split = None, shuffle = True, print_period=50):
loss_to_monitor = 'loss' if validation_split is None else 'val_loss'
early_stop_callback = EarlyStopping(monitor=loss_to_monitor, min_delta = 1e-4, patience=50)
print_callback = PrintLossPerNthEpoch(label = 'Pred', print_period = print_period)
Y_hat = self.predictor.fit(X, y,
epochs = epochs,
verbose = verbose,
validation_split = validation_split,
shuffle = shuffle,
callbacks = [print_callback, early_stop_callback]
)
return Y_hat
def __call__(self, X):
return self.predict(X)
def predict(self, X):
Y_hat = self.predictor(X)
return Y_hat
def summary(self):
self.predictor.summary()
class ConvPredictor:
def __init__(self, seq_len, feat_dim, hidden_layer_sizes ):
self.seq_len = seq_len
self.hidden_layer_sizes = hidden_layer_sizes
self.feat_dim = feat_dim
self.predictor = self.build_predictor()
self.loss_func = MeanAbsoluteError()
self.optimizer = tf.keras.optimizers.Adam()
self.predictor.compile(loss = self.loss_func, optimizer = self.optimizer)
def build_predictor(self):
input_ = Input(shape=(self.seq_len, self.feat_dim), name='input')
x = input_
for i, num_filters in enumerate(self.hidden_layer_sizes):
x = Conv1D(
# filters = num_filters,
filters = self.feat_dim//2,
kernel_size=3,
activation='tanh',
padding='causal',
name=f'conv_{i}')(x)
x = TimeDistributed(layer = Dense(units = self.feat_dim//2, activation='relu') )(x)
x = TimeDistributed(layer = Dense(units = 1) )(x)
output = Flatten(name='flatten')(x)
model = Model(input_, output, name = 'predictor')
return model
def fit(self, X, y, epochs, verbose = 0, validation_split = None, shuffle = True, print_period=50):
loss_to_monitor = 'loss' if validation_split is None else 'val_loss'
early_stop_callback = EarlyStopping(monitor=loss_to_monitor, min_delta = 1e-4, patience=50)
print_callback = PrintLossPerNthEpoch(label ='Pred', print_period = print_period)
Y_hat = self.predictor.fit(X, y,
epochs = epochs,
verbose = verbose,
validation_split = validation_split,
shuffle = shuffle,
callbacks = [print_callback, early_stop_callback]
)
return Y_hat
def __call__(self, X):
return self.predict(X)
def predict(self, X):
Y_hat = self.predictor(X)
return Y_hat
def summary(self):
self.predictor.summary()
#####################################################################################################
#####################################################################################################
def predictive_score_metrics (orig_data, generated_data, epochs = 2500, predictor = 'conv', print_epochs = 50):
"""Report the performance of Post-hoc RNN one-step ahead prediction.
Args:
- orig_data: original data
- generated_data: generated synthetic data
Returns:
- predictive: mean abs error
"""
global print_period
print_period = print_epochs
# Basic Parameters
no, seq_len, dim = orig_data.shape
# --------------------------------------------------------------------------
# nbeats
if predictor == 'nbeats':
# train on generated (synthetic) data, test on real data
# X_train, E_train = generated_data[:, :-1, -1:], generated_data[:, :-1, :-1]
# Y_train = generated_data[:, 1:, -1]
# X_test, E_test = orig_data[:, :-fcst_len, :1], orig_data[:, :-fcst_len, 1:]
# Y_test = orig_data[:, -fcst_len:, -1]
fcst_len = 5
X_train, E_train = generated_data[:, :-fcst_len, -1:], generated_data[:, :-fcst_len, :-1]
Y_train = generated_data[:, -fcst_len:, -1]
X_test, E_test = orig_data[:, :-fcst_len, -1:], orig_data[:, :-fcst_len, :-1]
Y_test = orig_data[:, -fcst_len:, -1]
## Builde a post-hoc predictor network
# Network parameters
N, T, D = X_train.shape
predictor = nbeats_model.NBeatsNet(
input_dim = D,
exo_dim = E_train.shape[2],
backcast_length = X_train.shape[1],
forecast_length = Y_train.shape[1],
nb_blocks_per_stack=2,
stack_types=(NBeatsNet.GENERIC_BLOCK, NBeatsNet.GENERIC_BLOCK),
thetas_dim=(16, 16),
hidden_layer_units = 64,
)
predictor.compile(loss='mse', optimizer='adam')
early_stop_callback = EarlyStopping(monitor='val_loss', min_delta = 1e-5, patience=30)
print_callback = PrintLossPerNthEpoch(print_period = print_period)
predictor.fit(
[X_train, E_train], Y_train, epochs = epochs, verbose = 0, validation_split= 0.1,
callbacks = [early_stop_callback, print_callback]
)
y_test_hat = predictor.predict([X_test, E_test])
y_test_hat = np.squeeze(y_test_hat)
# print('predict shape: ', y_test_hat.shape)
y_test_hat = y_test_hat.reshape((-1, 1))
Y_test = Y_test.reshape((-1, 1))
else:
no, seq_len, dim = orig_data.shape
# train on generated (synthetic) data, test on real data
X_train, Y_train = generated_data[:, :-1, :-1], generated_data[:, 1:, -1]
X_test, Y_test = orig_data[:, :-1, :-1], orig_data[:, 1:, -1]
# X_train, Y_train = generated_data[:, :-1, 1:], generated_data[:, 1:, 0]
# X_test, Y_test = orig_data[:, :-1, 1:], orig_data[:, 1:, 0]
# shuffle both X and Y in unison
X_train, Y_train = shuffle (X_train, Y_train)
if predictor == 'rnn':
## Builde a post-hoc RNN predictor network
# Network parameters
hidden_dim = int(dim/2)
batch_size = 128
predictor = Predictor(seq_len = seq_len,
dim = dim,
hidden_dim = int(dim/2)
)
elif predictor == 'conv':
predictor = ConvPredictor(
seq_len = seq_len-1,
feat_dim = dim-1,
hidden_layer_sizes = [50, 100],
)
predictor.fit(
X_train, Y_train,
epochs = epochs,
shuffle = True,
verbose=0,
validation_split=0.1,
print_period = print_period
)
y_test_hat = predictor.predict(X_test)
predictive_score = mean_absolute_error(Y_test, y_test_hat )
# predictive_score = mean_squared_error(Y_test, y_test_hat )
return predictive_score
if __name__== '__main__':
N, T, D = 100, 24, 5
data= np.random.randn(N, T, D)
pred_dims = 1
fcst_len = 5
X, E = data[:, :-fcst_len, -1:], data[:, :-fcst_len, :-1]
Y = data[:, -fcst_len:, -1]
print('orig shapes', X.shape, E.shape, Y.shape)
N_x, T_x, D_x = X.shape
# predictor = Predictor(
# seq_len = T_x,
# feat_dim = D_x,
# hidden_layer_sizes = [50, 100]
# )
predictor = NBeatsNet(
input_dim = X.shape[2],
exo_dim = E.shape[2],
backcast_length = X.shape[1],
forecast_length = Y.shape[1],
)
predictor.summary()
pred = predictor.predict([X, E])
print('pred shape: ', pred.shape)
# predictor.fit(
# X, Y,
# epochs =200,
# verbose = 1
# )
| true |
02fe542f602dd29b0760173bb41126cae9289d73 | Python | kjng/python-playground | /python-crash-course/Alien_Invasion/ship.py | UTF-8 | 1,161 | 3.703125 | 4 | [] | no_license | import pygame
class Ship():
def __init__(self, settings, screen):
"""Initialize ship and set its starting position"""
self.screen = screen
self.settings = settings
# Load ship image and get rectangle
self.image = pygame.image.load('ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Start ship at bottom in the center of the screen
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# Movement flags
self.moving_right = False
self.moving_left = False
# Store decimal of ship's center
self.center = float(self.rect.centerx)
def update(self):
if self.moving_right:
self.center += self.settings.ship_speed_factor
if self.rect.right > self.screen_rect.right + self.rect.width:
self.center = 0
if self.moving_left:
self.center -= self.settings.ship_speed_factor
if self.rect.left < 0 - self.rect.width:
self.center = self.settings.screen_width
self.rect.centerx = self.center
def blitme(self):
"""Draw ship at location"""
self.screen.blit(self.image, self.rect)
| true |
0803a758f415ff5dd5afa4ac9195b80b42cfb35e | Python | akantuni/Codeforces | /1547C/Pair Programming.py | UTF-8 | 1,637 | 2.5625 | 3 | [] | no_license | t = int(input())
for j in range(t):
input()
k, n, m = list(map(int, input().split()))
a = list(map(int, input().split()))
b = list(map(int, input().split()))
file_len = k
check = True
actions = []
for i in range(len(a)):
if a[i] > file_len:
if len(b) > 0:
for e in range(len(b)):
if b[e] > file_len:
l = e
break
else:
if b[e] == 0:
file_len += 1
actions.append(0)
else:
actions.append(b[e])
l = e + 1
b = b[l:]
if a[0] > file_len:
check = False
break
else:
actions.append(a[i])
else:
check = False
break
else:
if a[i] == 0:
file_len += 1
actions.append(0)
else:
actions.append(a[i])
if check == False:
print(-1)
continue
else:
if len(b) > 0:
for i in range(len(b)):
if b[i] > file_len:
check = False
else:
if b[i] == 0:
file_len += 1
actions.append(0)
else:
actions.append(b[i])
if check == False:
print(-1)
else:
print(" ".join(list(map(str, actions))))
| true |
2cfcddee6b0c147d36669c7361bf92a00434688b | Python | CooperStansbury/owl_tools | /aporia/scripts_and_data/get_data.py | UTF-8 | 4,591 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
from __future__ import print_function # for 2.7 users
import pandas as pd
import argparse
import os
import owlready2 as ow
import subprocess
# function to traverse up the is_a tree
def get_tree(node, path):
# add each node to the path
if node not in path:
path.append(node)
# stop once we reach the known top of the tree
if node.name != 'BFO_0000001':
for parent in node.is_a:
if parent not in path:
path.append(parent)
# recursively call get_tree
get_tree(parent, path)
break
return path
def owl_to_csv(input_file):
# dataframe for edges
edge_frame = pd.DataFrame()
# dataframe for edges
node_frame = pd.DataFrame()
# file variables
source_file = os.path.basename(input_file)
base = os.path.splitext(source_file)[0]
file_out = 'merged_' + str(base) + '.owl'
# merge the ontology via robot
print('Merging: ' + str(source_file))
robotMerge = 'robot merge --input ' + source_file + ' --output ' + file_out
process = subprocess.Popen(robotMerge.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print('File merged successfully.')
# make full path for owlready2 object
full_path='file://' + file_out
# get owlready2 ontology object from input file
print('Loading ontology file: ' + str(file_out))
onto = ow.get_ontology(full_path).load()
print('File loaded successfully...')
# increment dataframe
count = 1
# loop to make edges dataset
print('Building datasets...')
for cl in onto.classes():
if not str(cl.label).__contains__('obsolete'):
edge_frame.loc[count, 'id'] = str(cl)
try:
edge_frame.loc[count, 'to'] = str(cl.is_a[0])
except:
edge_frame.loc[count, 'to'] = 'NULL'
edge_frame.loc[count, 'type'] = 'SubClassOf'
## right now using default weight 1
## will experiment with other weights
edge_frame.loc[count, 'weight'] = 1
# build node frame
node_frame.loc[count, 'id'] = str(cl)
# need to handle entities without labels
try:
node_frame.loc[count, 'label'] = str(cl.label[0])
except:
node_frame.loc[count, 'label'] = str(cl)
node_frame.loc[count, 'parent.purl'] = str(cl.iri).rsplit('/', 1)[1].rsplit('_',1)[0].rsplit('#',1)[0]
# distance from the top
# need empty list to store path for each class
class_path = []
node_frame.loc[count, 'entity.weight'] = 1/len(get_tree(cl, class_path))
try:
node_frame.loc[count, 'entity.type'] = str(class_path[-2])
except:
node_frame.loc[count, 'entity.type'] = 'NULL'
# increment dataframe
count += 1
else:
print('DROPPING obsolete class: ' + str(cl))
# drop all elements not in common
for item in list(set(edge_frame.to) - set(node_frame.id)):
print('DROPPING entity mismatch: ' + str(item))
edge_frame = edge_frame[edge_frame.to != item]
edge_frame = edge_frame[edge_frame.id != item]
node_frame = node_frame[node_frame.id != item]
# drop all elements not in common
for item in list(set(node_frame.id) - set(edge_frame.id)):
print('DROPPING entity mismatch: ' + str(item))
edge_frame = edge_frame[edge_frame.to != item]
edge_frame = edge_frame[edge_frame.id != item]
node_frame = node_frame[node_frame.id != item]
print('Number of edges: ' + str(len(edge_frame)))
print('Number of nodes: ' + str(len(node_frame)))
# print(edge_frame.head(10))
# print(node_frame.head(10))
# output each csv to file
print('Creating csv files...')
edge_frame.to_csv('edges_' + str(base) + ".csv", index=False)
node_frame.to_csv('nodes_' + str(base) + ".csv", index=False)
# clean up dir
print('Removing intermediate file: ' + str(file_out))
rmTemp = 'rm ' + str(file_out)
process = subprocess.Popen(rmTemp.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
print('Done!')
if __name__ == "__main__":
# parse command-line args
parser = argparse.ArgumentParser(description='Extract terms from local .OWL file')
parser.add_argument("input_file", help="File to query")
args = parser.parse_args()
# run puppy, run
owl_to_csv(args.input_file)
| true |
6f371030ac5369d6e9893046c1e469dff6eaa9bc | Python | MinMinOvO/leetcode-code-share | /0133 Clone Graph/19-08-16T16-10.py | UTF-8 | 947 | 3.390625 | 3 | [] | no_license | """
# Definition for a Node.
class Node:
def __init__(self, val, neighbors):
self.val = val
self.neighbors = neighbors
"""
from collections import deque
from copy import copy
class Solution:
def cloneGraph(self, node: 'Node') -> 'Node':
if node is None:
return None
node_copy = Node(val=copy(node.val), neighbors=[])
node_dict = {node: node_copy}
que = deque([node])
while que:
n = que.popleft()
for neighbor in n.neighbors:
if neighbor in node_dict:
node_dict[n].neighbors.append(node_dict[neighbor])
else:
neighbor_copy = Node(val=copy(neighbor.val), neighbors=[])
node_dict[n].neighbors.append(neighbor_copy)
node_dict[neighbor] = neighbor_copy
que.append(neighbor)
return node_copy
| true |
de65bb35d2b303aa00dc93dbb1e01d489c638c0d | Python | xuejiekun/cv-demo | /demo_split.py | UTF-8 | 2,369 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from sky.cv import *
def demo_split(filename):
def create_trackbar(winname):
cv2.createTrackbar('pos', winname, 130, 255, lambda x: x)
cv2.createTrackbar('gauss', winname, 1, 1, lambda x: x)
cv2.createTrackbar('ksize', winname, 3, 31, lambda x: x)
cv2.createTrackbar('cnt', winname, 0, 140, lambda x: x)
def get_trackbar_pos(winname):
pos = cv2.getTrackbarPos('pos', winname)
gauss = cv2.getTrackbarPos('gauss', winname)
ksize = cv2.getTrackbarPos('ksize', winname)
index = cv2.getTrackbarPos('cnt', winname)
return pos, gauss, ksize, index
def is_parent(hi):
return hi[0][index][0] == -1 and \
hi[0][index][1] == -1 and \
hi[0][index][3] == -1 and \
hi[0][index][2] != -1
img = CVTest(filename)
img.resize(None, 0.12, 0.12)
mainwin = 'thr'
cv2.namedWindow(mainwin)
create_trackbar(mainwin)
parent_index = None
while True:
# 二值化
pos, gauss, ksize, index = get_trackbar_pos(mainwin)
gray = img.gray
if gauss:
gray = cv2.GaussianBlur(img.gray, (ksize, ksize), 0)
ret, thr = cv2.threshold(gray, pos, 255, cv2.THRESH_BINARY)
# 显示二值图
cv2.imshow(mainwin, thr)
# 轮廓
what, cnts, hi = cv2.findContours(thr.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# 复制一份原图,设置边界矩形
im = img.im.copy()
out_side = 10
# 判断
if index >= len(cnts):
cv2.drawContours(im, cnts, -1, (0, 0, 255), 2)
else:
cv2.drawContours(im, cnts, index, (0, 0, 255), 2)
# 边界矩形
x, y, w, h = cv2.boundingRect(cnts[index])
im = cv2.rectangle(im, (x-out_side, y-out_side), (x+w+out_side, y+h+out_side), (0, 255, 0), 2)
if is_parent(hi):
parent_index = index
print('边缘')
else:
if parent_index is not None and hi[0][index][3] == parent_index:
print('数字')
print(hi[0][index])
# print(len(cnts))
cv2.imshow('im', im)
if cv2.waitKey(100) == ord('q'):
break
if __name__ == '__main__':
demo_split('src/20180805160122.jpg')
| true |
c14e1bf0c6bbb2a377249803c4e3a0cdd03d4b0c | Python | urnotyuhanliu/ormuco | /questionB.py | UTF-8 | 853 | 3.28125 | 3 | [] | no_license | def compareVersion(version1, version2):
versions1 = [int(v) for v in version1.split(".")]
versions2 = [int(v) for v in version2.split(".")]
for i in range(max(len(versions1),len(versions2))):
v1 = versions1[i] if i < len(versions1) else 0
v2 = versions2[i] if i < len(versions2) else 0
if v1 > v2:
return 1
elif v1 < v2:
return -1
return 0
version1, version2 = "1.1", '1.1'
result1 = compareVersion(version1,version2)
version3, version4 = "1.2.1", '1.1'
result2 = compareVersion(version3,version4)
version5, version6 = "0.1.5.4", '0.1.6.7.8'
result3 = compareVersion(version5,version6)
print(version1, "vs", version2,"=", result1,'\n',
version3, "vs", version4,"=", result2,'\n',
version5, "vs", version6,"=", result3)
| true |
7e87977cf5d9cc81edc522b3e42d4da283e32eb8 | Python | deerajnagothu/pyaes | /RSA only/single_server_fog_rsa.py | UTF-8 | 1,474 | 3.0625 | 3 | [
"MIT"
] | permissive | import socket # Import socket module
import rsa
(server_public, server_private) = rsa.newkeys(128)
print(type(server_public['n']))
str_pub = str(server_public)
port = 60000 # Reserve a port for your service.
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
s.bind((host, port)) # Bind to the port
s.listen(5) # Now wait for client connection.
print 'Server listening....'
flag = 0
while True:
conn, addr = s.accept() # Establish connection with client.
print 'Got connection from', addr
hello = conn.recv(1024)
print('Server received', repr(hello))
if hello == "Ahoy!":
dic = {'key':str_pub}
conn.send(dic['key'])
with open('server_receive_text.txt', 'wb') as f:
print "Server is ready to receive !"
while True:
data = conn.recv(16)
# print('data=', data)
if not data:
if flag == 0:
print("File receiving failed")
else:
print("Successfully got the file !")
break
msg = rsa.decrypt(data,server_private)
f.write(msg.decode('utf8'))
flag = 1
f.close()
conn.close()
print("Connection Closed !")
print("Now waiting for new connection again !")
| true |
fbc8f599c3a4737ee528cc19a10b4955dc1a2903 | Python | fikrihasani/NLP_Tweet | /preprocessing.py | UTF-8 | 1,572 | 3.109375 | 3 | [] | no_license | # imports
import re
from string import punctuation
# class
class Preprocessing():
# variables
def __init__(self):
self.tweets_processed = []
self.tweets_splitted = []
self.kelas = []
# methods
def normalization(self,sentence):
return sentence
def Remove_Punctuation(self,sentence):
return ''.join(char for char in sentence if char not in punctuation)
def Remove_Tweet_Mention(self,sentence):
return ' '.join(char for char in sentence.split() if not char.startswith('@'))
def Remove_Hyperlink(self,text):
text = re.sub(r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''', " ", text)
return text
def Process_Tweet(self,tweets):
for tweet in tweets:
str1 = self.Remove_Hyperlink(tweet)
str2 = self.Remove_Tweet_Mention(str1)
str_final = self.Remove_Punctuation(str2)
self.tweets_splitted.append(str_final.split())
self.tweets_processed.append(str_final)
def Process_Class(self,data_tweet):
i = 0
# print(data_tweet['Keluhan'])
for i in data_tweet.index:
if (data_tweet['Keluhan'][i] == 'Ya'):
self.kelas.append('Keluhan')
elif(data_tweet['Respon'][i] == 'Ya'):
self.kelas.append('Respon')
else:
self.kelas.append('Netral')
# i+=1 | true |
611e5f0782cfa2893a39ca3a778e1d4b92ad291b | Python | tklutey/ffldraft | /src/draft/state.py | UTF-8 | 745 | 2.78125 | 3 | [] | no_license | # import pandas as pd
#
# import configurations
# from src.DraftState import DraftState
#
#
# def get_state(freeagents):
# num_competitors = 2
#
# num_rounds = 16
# turns = []
# # generate turns by snake order
# for i in range(num_rounds):
# turns += reversed(range(num_competitors)) if i % 2 else range(num_competitors)
# state = pd.read_csv(configurations.ROOT_DIR + '/data/draft.csv', index_col=0)
#
# for row in range(num_competitors):
# team = state.iloc[row]
# roster = team.tolist()
# for player in roster:
# freeagents.remove(player)
# print(row)
#
#
# # state = DraftState(rosters, turns, freeagents)
#
# if __name__ == '__main__':
# get_state()
#
| true |
2d02b017161ed2b10c48b89660190341f6875b30 | Python | alltheplaces/alltheplaces | /locations/spiders/vivacom_bg.py | UTF-8 | 1,846 | 2.546875 | 3 | [
"CC0-1.0",
"MIT"
] | permissive | import re
from scrapy import Spider
from locations.items import Feature
class VivacomBGSpider(Spider):
name = "vivacom_bg"
item_attributes = {
"brand": "Vivacom",
"brand_wikidata": "Q7937522",
"country": "BG",
}
start_urls = ["https://www.vivacom.bg/bg/stores/xhr?method=getJSON"]
def parse(self, response):
for store in response.json():
if "partners" in store["store_img"]:
continue
item = Feature()
item["ref"] = store["store_id"]
item["lat"], item["lon"] = store["latlng"].split(",")
item["name"] = store["store_name"]
item["phone"] = store["store_phone"]
opening_hours = (
store["store_time"]
.strip()
.replace("почивен ден", "off")
.replace("пон.", "Mo")
.replace("пт.", "Fr")
.replace("пет.", "Fr")
.replace("съб.", "Sa")
.replace("съб", "Sa")
.replace("нед.", "Su")
.replace("нд.", "Su")
.replace(" ", " ")
)
oh = []
for rule in re.findall(
r"(\w{2})\s?-?\s?(\w{2})?:?\s?(\d{2}(\.|:)\d{2})\s?-\s?(\d{2}(\.|:)\d{2})",
opening_hours,
):
start_day, end_day, start_time, _, end_time, _ = rule
start_time = start_time.replace(".", ":")
end_time = end_time.replace(".", ":")
if end_day:
oh.append(f"{start_day}-{end_day} {start_time}-{end_time}")
else:
oh.append(f"{start_day} {start_time}-{end_time}")
item["opening_hours"] = "; ".join(oh)
yield item
| true |
11db461001ab3b78d937796d4709d9593e7c5bd1 | Python | giangnguyen2412/coding-interview | /dynamic_programming/min_partition.py | UTF-8 | 634 | 3.046875 | 3 | [] | no_license | def findMinRec(arr, arr_len, calculated_sum, total_sum):
if (arr_len == 0):
return abs((total_sum - calculated_sum) - calculated_sum)
dct = min(findMinRec(arr, arr_len - 1, calculated_sum + arr[arr_len - 1], total_sum),
findMinRec(arr, arr_len - 1, calculated_sum, total_sum))
return dct
def minPartition(arr):
arr_len = len(arr)
total_sum = sum(arr)
return findMinRec(arr, arr_len, 0, total_sum)
#arr = [5,1,4,3,4,1,5,6,7,8,2,5,2,6,1,7,8,77,44,88,44,44,11,4,1,5,6,7,8,2,5,2,6,1,7,8,77,44,88,44,44,11]
#print(sum(arr))
arr = [1,6,11,5]
print(minPartition(arr))
| true |
2c0f4eedee63f596d02abc0c2ccbb747d270597c | Python | samuelpeet/conehead | /temp2.py | UTF-8 | 1,237 | 3.046875 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from numba import cuda
@cuda.jit(device=True)
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
i = 0
c = complex(x, y)
z = 0.0j
for i in range(max_iters):
z = z * z + c
if (z.real * z.real + z.imag * z.imag) >= 4:
return i
return 255
@cuda.jit
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
x, y = cuda.grid(2)
if x < width and y < height:
real = min_x + x * pixel_size_x
imag = min_y + y * pixel_size_y
color = mandel(real, imag, iters)
image[y, x] = color
width = 15000
height = 10000
image = np.zeros((height, width), dtype=np.uint8)
pixels = width * height
nthreads = 32
nblocksy = (height // nthreads) + 1
nblocksx = (width // nthreads) + 1
create_fractal[(nblocksx, nblocksy), (nthreads, nthreads)](
-2.0, 1.0, -1.0, 1.0, image, 20
)
| true |
550cfd9baa2f54636a2d44ad1f080af8c62cb2b7 | Python | shreyassk18/MyPyCharmProject | /Basic Programs/Fibonacci.py | UTF-8 | 271 | 4.375 | 4 | [] | no_license | #A series of numbers in which each number is the sum of the two preceding numbers
f1=0
f2=1
f = int(input("Enter a range\n"))
print("The fibonacci series for %d is:"%(f))
print(f1)
print(f2)
for i in range(1, f+1):
fib = f1+f2
print(fib)
f1=f2
f2=fib
| true |
7a514008f4c84094234b3a6ac97a0e90898f71f2 | Python | shankar7791/MI-10-DevOps | /Personel/Siddhesh/Practice/18feb/ShortHandIfElse.py | UTF-8 | 76 | 3.71875 | 4 | [] | no_license |
a = 85
b = 25
print("A") if a > b else print("=") if a == b else print("B") | true |
3afd5d56151d9181d8b2af7c58ec8e89c9fdf170 | Python | asinsinwal/Supervised-Learning | /supervised_sentiment.py | UTF-8 | 10,640 | 2.921875 | 3 | [] | no_license | import sys
import collections
import sklearn.naive_bayes
import sklearn.linear_model
import nltk
import random
random.seed(0)
from gensim.models.doc2vec import LabeledSentence, Doc2Vec
from collections import Counter
from sklearn.naive_bayes import BernoulliNB, GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
#nltk.download("stopwords") # Download the stop words from nltk
# User input path to the train-pos.txt, train-neg.txt, test-pos.txt, and test-neg.txt datasets
if len(sys.argv) != 3:
print "python sentiment.py <path_to_data> <0|1>"
print "0 = NLP, 1 = Doc2Vec"
exit(1)
path_to_data = sys.argv[1]
method = int(sys.argv[2])
def main():
train_pos, train_neg, test_pos, test_neg = load_data(path_to_data)
if method == 0:
train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec = feature_vecs_NLP(train_pos, train_neg, test_pos, test_neg)
nb_model, lr_model = build_models_NLP(train_pos_vec, train_neg_vec)
if method == 1:
train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec = feature_vecs_DOC(train_pos, train_neg, test_pos, test_neg)
nb_model, lr_model = build_models_DOC(train_pos_vec, train_neg_vec)
print "Naive Bayes"
print "-----------"
evaluate_model(nb_model, test_pos_vec, test_neg_vec, True)
print ""
print "Logistic Regression"
print "-------------------"
evaluate_model(lr_model, test_pos_vec, test_neg_vec, True)
def load_data(path_to_dir):
"""
Loads the train and test set into four different lists.
"""
train_pos = []
train_neg = []
test_pos = []
test_neg = []
with open(path_to_dir+"train-pos.txt", "r") as f:
for i,line in enumerate(f):
words = [w.lower() for w in line.strip().split() if len(w)>=3]
train_pos.append(words)
with open(path_to_dir+"train-neg.txt", "r") as f:
for line in f:
words = [w.lower() for w in line.strip().split() if len(w)>=3]
train_neg.append(words)
with open(path_to_dir+"test-pos.txt", "r") as f:
for line in f:
words = [w.lower() for w in line.strip().split() if len(w)>=3]
test_pos.append(words)
with open(path_to_dir+"test-neg.txt", "r") as f:
for line in f:
words = [w.lower() for w in line.strip().split() if len(w)>=3]
test_neg.append(words)
return train_pos, train_neg, test_pos, test_neg
def feature_vecs_NLP(train_pos, train_neg, test_pos, test_neg):
"""
Returns the feature vectors for all text in the train and test datasets.
"""
# English stopwords from nltk
stopwords = set(nltk.corpus.stopwords.words('english'))
# Determine a list of words that will be used as features.
# This list should have the following properties:
# (1) Contains no stop words
# (2) Is in at least 1% of the positive texts or 1% of the negative texts
# (3) Is in at least twice as many postive texts as negative texts, or vice-versa.
# YOUR CODE HERE
print "Entered into NLP featuring"
stopwords = list(stopwords)
features = list()
positive_diction = dict()
negative_diction = dict()
#Counter is useful to get the count of redundant words, and keep a single entry for every word into the list/dict
positive_diction = Counter(getAllWordList(train_pos))
negative_diction = Counter(getAllWordList(train_neg))
#Trial ccode to check values
'''
for keys,values in positive_diction.items():
print(keys)
print(values)
print "Length : %d" %len(positive_diction)
'''
#List of words into features from positive dictionary: fullfillying all properties
for key, value in positive_diction.iteritems():
if(key not in features and (value >= len(train_pos)/100 or negative_diction.get(key) > len(train_neg)/100) and value >= 2*negative_diction.get(key) and key not in stopwords):
features.append(key)
#List of words from negative dictionary: fullfillying all properties
for key, value in negative_diction.iteritems():
if(key not in features and (value >= len(train_neg)/100 or positive_diction.get(key) > len(train_pos)/100) and value >= 2*positive_diction.get(key) and key not in stopwords):
features.append(key)
#print "Features" + str(len(features)) #To check length of the features list
# Using the above words as features, construct binary vectors for each text in the training and test set.
# These should be python lists containing 0 and 1 integers.
# YOUR CODE HERE
train_pos_vec = map(lambda x: map(lambda y: 1 if y in x else 0, features), train_pos)
train_neg_vec = map(lambda x: map(lambda y: 1 if y in x else 0, features), train_neg)
test_pos_vec = map(lambda x: map(lambda y: 1 if y in x else 0, features), test_pos)
test_neg_vec = map(lambda x: map(lambda y: 1 if y in x else 0, features), test_neg)
print "Featuring successfully executed."
# Return the four feature vectors
return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec
#Function to get all words, as many as times they occur
def getAllWordList(training_data):
all_words_list = []
for words in training_data:
ongoing_word_list = []
for word in words:
if word not in ongoing_word_list:
ongoing_word_list.append(word)
all_words_list.append(word)
return all_words_list
def feature_vecs_DOC(train_pos, train_neg, test_pos, test_neg):
"""
Returns the feature vectors for all text in the train and test datasets.
"""
# Doc2Vec requires LabeledSentence objects as input.
# Turn the datasets from lists of words to lists of LabeledSentence objects.
# YOUR CODE HERE
print "Entered into Doc2Vec Featuring"
labeled_train_pos = labeledDataSetToList(train_pos, 'train_pos')
labeled_train_neg = labeledDataSetToList(train_neg, 'train_neg')
labeled_test_pos = labeledDataSetToList(test_pos, 'test_pos')
labeled_test_neg = labeledDataSetToList(test_neg, 'test_neg')
# Initialize model
model = Doc2Vec(min_count=1, window=10, size=100, sample=1e-4, negative=5, workers=4)
sentences = labeled_train_pos + labeled_train_neg + labeled_test_pos + labeled_test_neg
model.build_vocab(sentences)
# Train the model
# This may take a bit to run
for i in range(5):
print "Training iteration %d" % (i)
random.shuffle(sentences)
model.train(sentences)
# Use the docvecs function to extract the feature vectors for the training and test data
# YOUR CODE HERE
train_pos_vec = list()
train_neg_vec = list()
test_pos_vec = list()
test_neg_vec = list()
for i in range(len(train_pos)):
train_pos_vec.append(model.docvecs['train_pos'+str(i)])
for i in range(len(train_neg)):
train_neg_vec.append(model.docvecs['train_neg'+str(i)])
for i in range(len(test_pos)):
test_pos_vec.append(model.docvecs['test_pos'+str(i)])
for i in range(len(test_neg)):
test_neg_vec.append(model.docvecs['test_neg'+str(i)])
print "Featuring completed successfully."
# Return the four feature vectors
return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec
#Funtion to get list from labeled data set
def labeledDataSetToList(obj_list, type_label):
labeled = []
for i in range(len(obj_list)):
label = type_label + str(i)
labeled.append(LabeledSentence(obj_list[i], [label]))
return labeled
def build_models_NLP(train_pos_vec, train_neg_vec):
"""
Returns a BernoulliNB and LosticRegression Model that are fit to the training data.
"""
Y = ["pos"]*len(train_pos_vec) + ["neg"]*len(train_neg_vec)
# Use sklearn's BernoulliNB and LogisticRegression functions to fit two models to the training data.
# For BernoulliNB, use alpha=1.0 and binarize=None
# For LogisticRegression, pass no parameters
# YOUR CODE HERE
#Total positive and negative trained words (0 or 1)
A = train_pos_vec + train_neg_vec
nb_model = BernoulliNB(alpha = 1.0, binarize = None).fit(A, Y)
lr_model = LogisticRegression().fit(A, Y)
print "Models build successfully."
return nb_model, lr_model
def build_models_DOC(train_pos_vec, train_neg_vec):
"""
Returns a GaussianNB and LosticRegression Model that are fit to the training data.
"""
Y = ["pos"]*len(train_pos_vec) + ["neg"]*len(train_neg_vec)
# Use sklearn's GaussianNB and LogisticRegression functions to fit two models to the training data.
# For LogisticRegression, pass no parameters
# YOUR CODE HERE
#Total positive and negative trained words (0 or 1)
B = train_pos_vec + train_neg_vec
nb_model = GaussianNB().fit(B, Y)
lr_model = LogisticRegression().fit(B, Y)
print "Models build successfully."
return nb_model, lr_model
def evaluate_model(model, test_pos_vec, test_neg_vec, print_confusion=False):
"""
Prints the confusion matrix and accuracy of the model.
"""
# Use the predict function and calculate the true/false positives and true/false negative.
# YOUR CODE HERE
test_data = ["pos"]*len(test_pos_vec) + ["neg"]*len(test_neg_vec)
#print "Length of Test Data: " + str(len(test_data)) #Get the length of data
prediction = model.predict(test_pos_vec + test_neg_vec)
true_pos = 0
false_pos = 0
true_neg = 0
false_neg = 0
match = 0
#Loop to compute true/false positives and true/false negatives
for i in range(len(test_data)):
if test_data[i] == prediction[i]:
match = match + 1
if test_data[i]=='pos': #Match for all true positives
true_pos = true_pos + 1
else: #Match for all true negatives
true_neg = true_neg + 1
else:
if test_data[i] == 'pos': #Match for all false negatives
false_neg = false_neg + 1
else: #Match for all false positives
false_pos = false_pos + 1
#calculating accuracy using above values
accuracy = float((float)(match)/(float)(len(test_data)))
if print_confusion:
print "predicted:\tpos\tneg"
print "actual:"
print "pos\t\t%d\t%d" % (true_pos, false_neg)
print "neg\t\t%d\t%d" % (false_pos, true_neg)
print "accuracy: %f" % (accuracy)
if __name__ == "__main__":
main()
| true |
b8131bdf74ea77af232bcb83fc4e8688d778dc33 | Python | yiyuli/CSAir | /graph/vertex.py | UTF-8 | 1,646 | 3.734375 | 4 | [] | no_license | class Vertex(object):
"""Vertex object.
Vertex object that stores name, population, country, region, code, continent, timezone, coordinates info and edges starting from it.
It also includes a function that stores an edge which starts from it.
"""
def __init__(self, metro):
"""Constructor of Vertex object.
Args:
metro: JSON object that stores Vertex info.
"""
self.edit(metro)
self.edges = dict()
def __lt__(self, other):
"""Compare function of Vertex object.
"""
return self.code < other.code
def add_edge(self, edge, destination):
"""Store an edge to the edge dictionart of the current Vertex object.
Args:
edge: Edge to be added.
destination: Vertex that is at the destination side of the edge.
"""
self.edges[destination] = edge
def remove_edge(self, destination):
"""Remove an edge from the edge dictionart of the current Vertex object.
Args:
destination: Vertex that is at the destination side of the edge.
"""
if destination in self.edges:
del self.edges[destination]
def edit(self, metro):
"""Edit the information of the Vertex object.
"""
self.name = metro['name']
self.population = metro['population']
self.country = metro['country']
self.region = metro['region']
self.code = metro['code']
self.continent = metro['continent']
self.timezone = metro['timezone']
self.coordinates = str(metro['coordinates'])
| true |
505f7c93854e1a2bc1332f7b35d4d00c333ff291 | Python | oozd/network | /termProjectPart2/s.py | UTF-8 | 1,028 | 3 | 3 | [] | no_license | import socket
import hashlib
import sys
import os
import time
from random import randint
import subprocess
TCP_IP = "10.10.1.2" # brokers IP
TCP_PORT = 8080 # brokers port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # creates tcp socket
s.connect((TCP_IP, TCP_PORT)) # connect it to broker ip and port
def main():
file = open("input.txt", "r") # open the given file
fileString = file.read() # write the file to a string
fileSize = sys.getsizeof(fileString) # 5 000 037 bytes
packetSize = 500 # 500 bytes of packets
idx = 0 # use while sending file packet by packet
currentSize = fileSize
while(currentSize > packetSize): # send file
currentSize = currentSize - packetSize
s.send(fileString[idx*500 : idx*500 + 500]) # Send Packets 500 bytes each
idx = idx + 1
s.send("") # indicates finish packet
s.close()
print hashlib.md5(fileString).hexdigest() # used to check if the file transferred correctly.
# I compared it with the destination file
if __name__ == '__main__':
main()
| true |
8a0fc421a547b9964ba013c6c69d9e98851c3a01 | Python | Aasthaengg/IBMdataset | /Python_codes/p02633/s014509336.py | UTF-8 | 113 | 3.1875 | 3 | [] | no_license | cnt = 0
pos = 360
muki = int(input())
while pos != 0:
pos += muki
pos %= 360
cnt += 1
print(cnt)
| true |
24a0c38daa037b6a49fffd97a1fc4e1fa91eb4ad | Python | WeronikaKomissarova/zot_lab3 | /viewing.py | UTF-8 | 1,595 | 2.96875 | 3 | [] | no_license |
import func as lf
import gggg as lab
from tkinter import *
root=Tk()
root.title('ЦОС')
def clicked():
function = getattr(lf, selected.get())
x,y=function()
lab.dft(y)
def clicked1():
function = getattr(lf, selected.get())
x,y=function()
lab.fft(y)
selected=StringVar()
selected.set('heaviside')
rad1=Radiobutton(root,text='Синус',value='sin',variable=selected).grid(column=0,row=0)
rad2=Radiobutton(root,text='Хивисайд',value='heaviside',variable=selected).grid(column=1,row=0)
rad3=Radiobutton(root,text='Дирак',value='dirac',variable=selected).grid(column=2,row=0)
rad4=Radiobutton(root,text='Случайный сигнал',value='rand_signal',variable=selected).grid(column=0,row=2)
rad5=Radiobutton(root,text='Прямоугольный импульс',value='rectangular',variable=selected).grid(column=1,row=2)
rad6=Radiobutton(root,text='Случайный цифровой',value='random_signal',variable=selected).grid(column=2,row=2)
rad7=Radiobutton(root,text='Шум',value='noise',variable=selected).grid(column=0,row=4)
rad8=Radiobutton(root,text='Радиоимпульс',value='radio_impulse',variable=selected).grid(column=1,row=4)
rad9=Radiobutton(root,text='Синус убывающий \n по экспоненте',value='exp',variable=selected).grid(column=2,row=4)
rad10=Radiobutton(root,text='sin(x)/x',value='sinc',variable=selected).grid(column=0,row=6)
but=Button(root,text='Draw DFT',command=clicked ).grid(column=0,row=8)
but=Button(root,text='Draw FFT',command=clicked1 ).grid(column=1,row=8)
root.mainloop() | true |
75fee1afff4fe2a1087f327d39afc797470a8353 | Python | wfgiles/P3FE | /UM Reboot Python3/Week 1/IDLE run examples.py | UTF-8 | 282 | 3.21875 | 3 | [] | no_license | ##x = {'Christopher Brooks': 'brooksch@umich.edu', 'Bill Gates': 'billg@microsoft.com'}
##x['Christopher Brooks'] #Retrieve a calue by using the index operatorx = {'Christopher Brooks': 'brooksch@umich.edu', 'Bill Gates': 'billg@microsoft.com'}
####x['Christopher Brooks'] #Retrieve a calue by using the index operator
for name, email in x.items():
print(name)
print(email)
| true |
6e2e81b605682c15a6372a0f4c78ceb4cd372560 | Python | jfunky/rwet | /budget2018/budget3.py | UTF-8 | 1,904 | 3.3125 | 3 | [] | no_license | #by jasmine
#april 2017
#rwet hw 8
import pyPdf
import markov
#learned about comparing dictionaries from:
#https://stackoverflow.com/questions/4527942/comparing-two-dictionaries-in-python
def dict_compare(d1, d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
same = set(o for o in intersect_keys if d1[o] == d2[o])
return same
#learned about pyPdf from:
#https://stackoverflow.com/questions/25665/python-module-for-converting-pdf-to-text?noredirect=1&lq=1
pdf = pyPdf.PdfFileReader(open("2018_blueprint.pdf", "rb"))
budgetWords = list()
budgetLines = list()
parkLines = list()
allWords = list()
#split budget lines & create word list
for page in pdf.pages:
string = page.extractText()
# line = string.split(".")
line = string.replace(".","\n")
# print line
if len(line) > 0:
budgetLines.append(line)
for word in line.split():
if len(word) > 0:
budgetWords.append(word)
allWords.append(word)
# read in text
park_str = open('SequoiaNatPark.txt').read()
#split park lines
for line in park_str.split("."):
# line1 = line.split(" ")
line1 = line.replace(".","\n")
parkLines.append(line1)
for word in line1.split():
if len(word) > 0:
allWords.append(word)
#markov stuff
#return generative text based off of the 2018 budget
model_budget = markov.build_model(budgetWords, 3)
generated_budget = markov.generate(model_budget, 2)
#return generative text based off of the parks dept manual
model_park = markov.build_model(park_str.split(), 3)
generated_park = markov.generate(model_park, 2)
# combine based on what these texts have in common
combined = dict_compare(model_budget, model_park)
for element in combined:
print ' '.join(element)
print ' '.join(generated_budget)
print ' '.join(generated_park)
| true |
d729b62878f16cdef6c60138580e82a81f6de916 | Python | marconeuckensap/colab_marcoNlaetitia | /exersice_sys.py | UTF-8 | 1,112 | 4.0625 | 4 | [] | no_license | #!/usr/bin/python3
# @ Add necessary import statements.
import sys
import random
secret = random.randint(1, 10) # from the module 'random'
guessed = set()
def guess_number():
num = input # @ replace None, you need to ask the user
try: # You can ignore this for now, we'll come back to it.
num = int(input("\n what number are you guessing? "))
except ValueError:
return
if num == secret:
print('You did it!!! \ntimes of guesses:', len(guessed) + 1, 'times!')
sys.exit(0)
# @ end script with success
elif num < 1:
sys.exit('you guessed to low! ')
# @ end script with error message (and remove placeholder 'pass')
elif num > 10:
sys.exit('you guessed to high! ')
# @ end script with error message (and remove placeholder 'pass')
elif num in guessed:
print('you already guessed number', num, "before! stop using the same numbers!!!", file=sys.stderr)
# @ print error message, don't end script
else:
guessed.add(num)
print('Guess a number from 1 to 10')
while True:
guess_number() | true |
e1780d56a1d36d38a330317684409c15c249285e | Python | psy1088/Algorithm | /practice/Search/search1.py | UTF-8 | 2,296 | 4.25 | 4 | [] | no_license | # p367 정렬된 배열에서 특정 수의 개수 구하기
n, x = 7, 2 # n = 수열의 원소 수, x = 개수를 구하려는 수
data = [1, 1, 2, 2, 2, 2, 3]
# def binary_search(arr, target, start, end):
# # 이진탐색으로 target과 같은 값을 갖는 원소 찾고, 그것을 기준으로 앞뒤로 while문 돌리면서 하나씩 검사
# # 리스트 안에 target의 개수가 적다면 효율적일듯
# cnt, mid = 0, 0
# while start <= end:
# mid = (start + end) // 2
# if arr[mid] == x:
# cnt += 1
# break
# elif arr[mid] > x:
# end = mid - 1
# else:
# start = mid + 1
#
# if cnt > 0:
# for i in [-1, 1]:
# index = mid
# while True:
# index += i
# if arr[index] == x:
# cnt += 1
# else:
# break
# return cnt
# else:
# return -1
# 2번 풀이
def binary_search(arr, target):
len_arr = len(arr)
first_index = first(arr, x, 0, len_arr - 1)
if first_index is None:
return -1
last_index = last(arr, x, 0, len_arr - 1)
return last_index - first_index + 1
def first(arr, target, start, end):
while start <= end:
print(start, end)
mid = (start + end) // 2
# mid인덱스의 값이 target과 같은데 (mid가 0이거나, mid-1인덱스의 값은 target보다 작으면) => mid가 제일 앞에 있는 target!!
if (mid == 0 or arr[mid - 1] < target) and arr[mid] == target:
print("시작값", mid)
return mid
elif arr[mid] >= target:
end = mid - 1
else:
start = mid + 1
def last(arr, target, start, end):
while start <= end:
mid = (start + end) // 2
if (mid == (end-1) or target < arr[mid+1]) and arr[mid] == target:
# mid인덱스의 값이 target과 같은데 (mid가 제일 끝 인덱스이거나, mid+1인덱스의 값은 target보다 크면) => mid가 제일 뒤에 있는 target!!
print("끝값", mid)
return mid
elif arr[mid] > target:
end = mid - 1
else:
start = mid + 1
print(binary_search(data, x))
| true |
feb6e6a50473ceab58bc779ea6a4443a777c2d57 | Python | 1012560716/jiqixuexi | /matplotlib 学习/matplotlib 3D图.py | UTF-8 | 958 | 3.25 | 3 | [] | no_license | #!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
2018年4月1日
绘制3D图
'''
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
# 建一个视图
fig = plt.figure()
# 建一个3D视图
ax = Axes3D(fig)
# 取值,间隔0.25
x = np.arange(-4,4,0.25)
y = np.arange(-4,4,0.25)
# 将X,Y传入网格中
X,Y = np.meshgrid(x,y)
# 定义Z 的值 sqrt 开平方
R = np.sqrt(X**2+Y**2)
# 对矩阵中的每个元素取正弦
Z = np.sin(R)
# rstride cstride 设置方块的大小 cmap 设置颜色
ax.plot_surface(X,Y,Z,rstride=1,cstride=1,cmap=plt.get_cmap("rainbow"))
# 设置投影, zdir 投影在 z 轴上 offset 与原图的距离 cmap 颜色
ax.contourf(X,Y,Z,zdir="z",offset=-2,cmap="rainbow")
# 设置 z 轴的限制
ax.set_zlim(-2,2)
plt.show()
# 因为现实的图片不可以转动,所以可以在运行中搜索 ippython 然后将代码复制过去,shift+enter 执行
# 这样3D图就可以转动了 | true |
015e86127070b6bcc330ac563e35808345a23574 | Python | SoporteFoji/catastro | /foji_project/foji/api/administrador.py | UTF-8 | 3,371 | 2.546875 | 3 | [] | no_license | from rest_framework.decorators import api_view
from rest_framework.response import Response
from ..serializers.administrator import AdministratorSerializer
from ..models.administrator import Administrator
from ..models.personal_information import PersonalInformation
from ..models.user import User
@api_view(['GET'])
def apiOverview(request):
api_urls = {
'Detail View': '/administrador/<str:pk>/',
'Create': '/administrador-create/',
'Update': '/administrador-update/<str:pk>/',
'Delete': '/administrador-delete/<str:pk>/',
}
return Response(api_urls)
@api_view(['GET'])
def administradorDetail(request, pk):
print("entra aqui")
print(pk)
administrador = Administrator.objects.get(id=pk)
serializer = AdministratorSerializer(administrador, many=False)
return Response(serializer.data)
@api_view(['POST'])
def administradorCreate(request):
print(request.data['personal_information']['id'])
print(request.data['user']['id'])
admin = Administrator(personal_information_id=request.data['personal_information']['id'],user_id=request.data['user']['id'])
admin.save()
return Response(admin)
@api_view(['POST'])
def administradorCrear(request):
print(request.data)
print(request.data['personal_information']['email'])
print(request.data['user']['username'])
pi, created1 = PersonalInformation.objects.update_or_create(email=request.data['personal_information']['email'],
defaults=request.data['personal_information'])
us, created2 = User.objects.update_or_create(username=request.data['user']['username'],defaults=request.data['user'])
us.is_staff=True
us.is_active=True
us.is_superuser=True
us.save()
print(pi.id)
updatepass(request.data['user']['password'], us.id)
try:
admin = Administrator.objects.update_or_create(personal_information=pi,user=us)
#obj = Person.objects.get(first_name='John', last_name='Lennon')
except Administrator.DoesNotExist:
admin = Administrator(personal_information=pi,user=us)
admin.save()
print(admin)
#admin.save()
return Response('exito')
@api_view(['PUT'])
def administradorUpdate(request, pk):
#print(pk)
from django.contrib.auth.models import User
u = User.objects.get(id=request.data['userid'])
print(u.id)
#print(personalinformation)
print(request.data['password'])
u.set_password(request.data['password'])
u.save()
administrador = Administrator.objects.get(id=pk)
#print(personalinformation)
print(administrador.user.id)
serializer = AdministratorSerializer(instance=administrador, data=request.data)
#print(serializer)
#print(serializer.is_valid())
if serializer.is_valid():
print('es validoooo')
serializer.save()
return Response(serializer.data)
def updatepass(passw, pk):
from django.contrib.auth.models import User
u = User.objects.get(id=pk)
u.set_password(passw)
u.save()
'''
def updatepass(passw, pk):
from django.contrib.auth.models import User
u = User.objects.get(id=pk)
u.set_password(passw)
u.save()
'''
@api_view(['DELETE'])
def administradorDelete(request, pk):
administrador = Administrator.objects.get(id=pk)
administrador.delete()
return Response('Administrador borrado exitosamente') | true |
de4a53b55ccd4a62656d06b1880037d8377e8331 | Python | shovonploan/python_short_rpojects | /Connect_Four/script.py | UTF-8 | 5,931 | 3.265625 | 3 | [] | no_license | import numpy as np
import pygame
import sys
import math
board = np.zeros((6, 7))
turn = 0
game = 0
pl1p = 0
pl2p = 0
tie = 0
end = False
win = None
class Error (Exception):
pass
class InputError(Error):
def __init__(self, message):
self.message = message
class SlotError(Error):
def __init__(self, message):
self.message = message
def er1(choose, num):
for i in range(1, num+1):
if choose == i:
return True
return False
def process(num, board,col):
num += 1
choose = 0
while True:
try:
choose = col
if not(er1(choose, num)):
raise InputError('Invalid.\nPlease Enter a correct number.')
elif (choose == 8):
raise InputError('Invalid.\nPlease Enter a correct number.')
elif (choose == 7):
choose = 0
elif not(board[0,choose] == np.zeros((1))):
raise SlotError('Column is not empty')
except InputError as e:
print(e.message)
except SlotError as e:
print(e.message)
except ValueError:
print('Please Enter a correct NUMBER!')
else:
break
return choose-1
def score_board(board):
display_board(board)
global pl1p, pl2p, end, game, tie
print(
f'Total Number of game:{game}\nPlayer 1 won:{pl1p}\nPlayer 2 won:{pl2p}\n Numer of tie:{tie}')
choose = 0
print('Wanna play more?\n1=Yes\t2=No')
while True:
try:
choose = int(input())
if not(choose == 1 or choose == 2):
raise Error.InputError(
'Invalid.\nPlease Enter a correct number.')
except Error.InputError as e:
print(e.message)
except ValueError:
print('Please Enter a correct NUMBER!')
else:
break
if choose == 2:
end = True
def display_board(b):
print(b)
def drop_peace(selection, board, turn):
row = 0
turn+=1
for i in range(6):
if not(np.allclose(board[i][selection],np.zeros((1)))):
break
row = i
board[row,selection] = turn
def handle_turrn(board,col):
global turn
if turn == 0:
print("Player 1 Make Your Selection (1-7)")
selection = process(7, board,col)
turn = 1
drop_peace(selection, board, turn)
else:
print("Player 2 Make Your Selection (1-7)")
selection = process(7, board,col)
turn = 0
drop_peace(selection, board, turn)
def win_or_tie(board):
global win
for r in range(5,-1,-1):
for c in range(4):
if board[r][c] == 2 and board[r][c+1] == 2 and board[r][c+2] == 2 and board[r][c+3] == 2:
win = 1
return True
if board[r][c] == 1 and board[r][c+1] == 1 and board[r][c+2] == 1 and board[r][c+3] == 1:
win = 2
return True
for c in range(6,2,-1):
if board[r][c] == 2 and board[r][c-1] == 2 and board[r][c-2] == 2 and board[r][c-3] == 2:
win = 1
return True
if board[r][c] == 1 and board[r][c-1] == 1 and board[r][c-2] == 1 and board[r][c-3] == 1:
win = 2
return True
for c in range(7):
for r in range(3):
if board[r][c] == 2 and board[r+1][c] == 2 and board[r+2][c] == 2 and board[r+3][c] == 2:
win = 1
return True
if board[r][c] == 1 and board[r+1][c] == 1 and board[r+2][c] == 1 and board[r+3][c]== 1:
win = 2
return True
for r in range(5,2,-1):
if board[r][c] == 2 and board[r-1][c] == 2 and board[r-2][c] == 2 and board[r-3][c] == 2:
win = 1
return True
if board[r][c] == 1 and board[r-1][c] == 1 and board[r-2][c] == 1 and board[r-3][c] == 1:
win = 2
return True
for r in range(3,6):
for c in range(0,4):
if board[r][c]==2 and board[r-1][c+1]==2 and board[r-2][c+2] == 2 and board[r-3][c+3]==2:
win = 1
return True
if board[r][c]==1 and board[r-1][c+1]==1 and board[r-2][c+2] == 1 and board[r-3][c+3]==1:
win = 2
return True
return False
def draw_board(board):
for c in range(7):
for r in range(6):
pygame.draw.rect(screen,(0,0,255), (c*SQUARESIZE, r*SQUARESIZE+SQUARESIZE, SQUARESIZE, SQUARESIZE))
pygame.draw.circle(screen, (0,0,0), (int(c*SQUARESIZE+SQUARESIZE/2), int(r*SQUARESIZE+SQUARESIZE+SQUARESIZE/2)), RADIUS)
for c in range(7):
for r in range(6):
if board[r][c] == 1:
pygame.draw.circle(screen, (255,0,0), (int(c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)
elif board[r][c] == 2:
pygame.draw.circle(screen, (255,255,0), (int(c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)
pygame.display.update()
pygame.init()
SQUARESIZE = 100
width = 7 * SQUARESIZE
height = (6+1) * SQUARESIZE
size = (width, height)
RADIUS = int(SQUARESIZE/2 - 5)
screen = pygame.display.set_mode(size)
draw_board(board)
pygame.display.update()
myfont = pygame.font.SysFont("monspace", 75)
def play_game():
global win, board
win = None
game_over = False
while not game_over:
for event in pygame.event.get():
if event.type == pygame.Quit:
sys.exit
if event.type == pygame.MOUSEBUTTONDOWN:
posx = event.pos[0]
col = int(math.floor(posx/SQUARESIZE))
handle_turrn(board,col)
if win_or_tie(board):
game_over = True
score_board(board)
if __name__ == "__main__":
draw_board(board)
pygame.display.update
play_game()
| true |
416c2798e96987b753db7b7e3ce3d84769364136 | Python | ShimizuKo/AtCoder | /ABC/130-139/134/E.py | UTF-8 | 295 | 2.921875 | 3 | [] | no_license | import bisect
from collections import deque
N = int(input())
A = []
for _ in range(N):
a = int(input())
A.append(a)
b = deque([])
for a in A:
if len(b) == 0:
b.append(a)
else:
if a <= b[0]:
b.appendleft(a)
else:
b[bisect.bisect_left(b, a) - 1] = a
print(len(b)) | true |
8021e61783f8891282f3ba721aeab4447302a3f2 | Python | cs-fullstack-fall-2018/python-review-exercise-3-myiahm | /Ex5.py | UTF-8 | 277 | 3.328125 | 3 | [
"Apache-2.0"
] | permissive | def forLoopFunction():
smallArray=[]
while True:
userInput=input("whatever?: or 'q' to quit ")
if userInput== "q":
for a in smallArray:
print(a)
break
else:smallArray.append(userInput)
forLoopFunction() | true |
35b964b344aa429424887722e09a4993b4758095 | Python | AmartyaSingh/Saluseon | /demo.py | UTF-8 | 522 | 2.84375 | 3 | [] | no_license | # importing basic modules
import requests
import json
# api-endpoint
URL = "<<server url>>"
# generating data here for demo purposes
feat = list(range(24))
s_id = 1
#creating a json object
PARAMS = json.dumps({s_id:{'feat':feat}})
# defining a params dict for the parameters to be sent to the API
header_list = {'content-type':'application/json', 'data':PARAMS}
# sending get request and saving the response as response object
r = requests.post(url = URL, headers = header_list)
#response code
print(r)
| true |
886dad240fee282eb7bc931193550daf5fe64978 | Python | wizardcapone/Basic-IT-Center-Python | /homework3/240.py | UTF-8 | 364 | 3.078125 | 3 | [] | no_license | def input_num(message):
try:
i = float(input(message))
return i
except:
print("mutqagreq miayn tiv")
while True:
my_arr = []
for i in range(1,5):
n = input_num('mutqagreq drakan tiv-' + str(i) + '\n')
my_arr.append(n)
count = 0
for j in range(len(my_arr)):
if my_arr[j] % 7 == 0:
count += 1
print('7in bazmapatik tarreri qanak@ - ',count)
| true |
9a068223f60317f8caec9172d40cb67aedbb9378 | Python | zjb617/Python | /project/date_visualization/the_csv_file_format/sitka_highs_lows.py | UTF-8 | 1,301 | 3.5 | 4 | [] | no_license | import csv
from datetime import datetime
import matplotlib.pyplot as plt
plt.style.use('seaborn')
# 支持中文
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
filename = 'D:/Code/Python/project/date_visualization/the_csv_file_format/data/sitka_weather_2018_simple.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
#从文件中获取日期、最高温度和最低温度。
dates, highs, lows = [],[], []
for row in reader:
current_date = datetime.strptime(row[2], '%Y-%m-%d')
high = int(row[5])
low = int(row[6])
dates.append(current_date)
highs.append(high)
lows.append(low)
#根据最高温度和最低温度绘制图形。
fig, ax = plt.subplots()
ax.plot(dates, highs, c='red', alpha=0.5)
ax.plot(dates, lows, c='blue', alpha=0.5)
ax.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)#alpha指明颜色透明度,0透明,1不透
#设置图形的格式。
ax.set_title("2018年每日最高温度", fontsize=24)
ax.set_xlabel('', fontsize=16)
fig.autofmt_xdate()
ax.set_ylabel('温度(F)', fontsize=16)
ax.tick_params(axis='both', which='major', labelsize=16)
plt.show() | true |
01f78ee173501fe323e969b49c36009d76d69db6 | Python | tourist-C/packnet-sfm | /scripts/train_sfm_utils.py | UTF-8 | 3,594 | 2.5625 | 3 | [
"MIT"
] | permissive | # Copyright 2020 Toyota Research Institute. All rights reserved.
import torch
from monodepth.models import monodepth_beta, load_net_from_checkpoint
from monodepth.functional.image import scale_image
import os
def load_dispnet_with_args(args):
"""
Loads a pretrained depth network
"""
checkpoint = torch.load(args.pretrained_model)
# check for relevant args
assert 'args' in checkpoint, 'Cannot find args in checkpoint.'
checkpoint_args = checkpoint['args']
for arg in ['disp_model', 'dropout', 'input_height', 'input_width']:
assert arg in checkpoint_args, 'Could not find argument {}'.format(arg)
disp_net = monodepth_beta(checkpoint_args.disp_model,
dropout=checkpoint_args.dropout)
disp_net = load_net_from_checkpoint(disp_net, args.pretrained_model, starts_with='disp_network')
disp_net = disp_net.cuda() # move to GPU
print('Loaded disp net of type {}'.format(checkpoint_args.disp_model))
return disp_net, checkpoint_args
def compute_depth_errors(args, gt, pred, use_gt_scale=True, crop=True):
"""
Computes depth errors given ground-truth and predicted depths
use_gt_scale: If True, median ground-truth scaling is used
crop: If True, apply a crop in the image before evaluating
"""
abs_diff, abs_rel, sq_rel, a1, a2, a3 = 0, 0, 0, 0, 0, 0
rmse, rmse_log = 0, 0
batch_size, _, gt_height, gt_width = gt.shape
pred = scale_image(pred, gt_height, gt_width, mode='bilinear', align_corners=True)
for current_gt, current_pred in zip(gt, pred):
gt_channels, gt_height, gt_width = current_gt.shape
current_gt = torch.squeeze(current_gt)
current_pred = torch.squeeze(current_pred)
# Mask within min and max depth
valid = (current_gt > args.min_depth) & (current_gt < args.max_depth)
if crop:
# crop used by Garg ECCV16 to reproduce Eigen NIPS14 results
# construct a mask of False values, with the same size as target
# and then set to True values inside the crop
crop_mask = torch.zeros(current_gt.shape).byte().cuda()
y1, y2 = int(0.40810811 * gt_height), int(0.99189189 * gt_height)
x1, x2 = int(0.03594771 * gt_width), int(0.96405229 * gt_width)
crop_mask[y1:y2, x1:x2] = 1
valid = valid & crop_mask
valid_gt = current_gt[valid]
valid_pred = current_pred[valid]
if use_gt_scale:
# Median ground-truth scaling
valid_pred = valid_pred * torch.median(valid_gt) / torch.median(valid_pred)
valid_pred = valid_pred.clamp(args.min_depth, args.max_depth)
# Calculates threshold values
thresh = torch.max((valid_gt / valid_pred), (valid_pred / valid_gt))
a1 += (thresh < 1.25).float().mean()
a2 += (thresh < 1.25**2).float().mean()
a3 += (thresh < 1.25**3).float().mean()
# Calculates absolute relative error
abs_diff += torch.mean(torch.abs(valid_gt - valid_pred))
abs_rel += torch.mean(torch.abs(valid_gt - valid_pred) / valid_gt)
# Calculates square relative error
sq_rel += torch.mean(((valid_gt - valid_pred)**2) / valid_gt)
# Calculates root mean square error and its log
rmse += torch.sqrt(torch.mean((valid_gt - valid_pred)**2))
r_log = (torch.log(valid_gt) - torch.log(valid_pred))**2
rmse_log += torch.sqrt(torch.mean(r_log))
return torch.tensor([metric / batch_size for metric in [abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3]])
| true |
04c4f2e7cb521b2801762d382ae95d1f49b86dbd | Python | emilybache/SupermarketReceipt-Refactoring-Kata | /python_pytest/src/texttest_fixture.py | UTF-8 | 2,012 | 2.90625 | 3 | [
"MIT"
] | permissive | """
Start texttest from a command prompt in the same folder as this file with this command:
texttest -a sr -d .
"""
import sys,csv
from pathlib import Path
from model_objects import Product, SpecialOfferType, ProductUnit
from receipt_printer import ReceiptPrinter
from shopping_cart import ShoppingCart
from teller import Teller
from tests.fake_catalog import FakeCatalog
def read_catalog(catalog_file):
catalog = FakeCatalog()
if not catalog_file.exists():
return catalog
with open(catalog_file, "r") as f:
reader = csv.DictReader(f)
for row in reader:
name = row['name']
unit = ProductUnit[row['unit']]
price = float(row['price'])
product = Product(name, unit)
catalog.add_product(product, price)
return catalog
def read_offers(offers_file, teller):
if not offers_file.exists():
return
with open(offers_file, "r") as f:
reader = csv.DictReader(f)
for row in reader:
name = row['name']
offerType = SpecialOfferType[row['offer']]
argument = float(row['argument'])
product = teller.product_with_name(name)
teller.add_special_offer(offerType, product, argument)
def read_basket(cart_file, catalog):
cart = ShoppingCart()
if not cart_file.exists():
return cart
with open(cart_file, "r") as f:
reader = csv.DictReader(f)
for row in reader:
name = row['name']
quantity = float(row['quantity'])
product = catalog.products[name]
cart.add_item_quantity(product, quantity)
return cart
def main(args):
catalog = read_catalog(Path("catalog.csv"))
teller = Teller(catalog)
read_offers(Path("offers.csv"), teller)
basket = read_basket(Path("cart.csv"), catalog)
receipt = teller.checks_out_articles_from(basket)
print(ReceiptPrinter().print_receipt(receipt))
if __name__ == "__main__":
main(sys.argv[1:]) | true |
64382a24f46492219661a89235fdcb8f9e1af0a5 | Python | Shikhar21121999/ptython_files | /top_view_btree.py | UTF-8 | 1,740 | 3.953125 | 4 | [] | no_license | class BTnode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def store_top_view(root, recd, curr_dis, level):
'''
utility function to store horizontal distance
of current node(root) from head of the tree
root->curr_node
curr_dis->horizontal distance of current node(root) from head of the tree
recd->dictionary to store nodes at a particular distance
'''
# base case
if root is None:
return
# first we check if we already have data
# alreday saved corrosponding to curr_dis
if curr_dis in recd:
# we check if level of current node is lesser than previous node
if level<recd[curr_dis][1]:
# update the data in the dicitonary
recd[curr_dis] = [root.data,level]
else:
recd[curr_dis] = [root.data,level]
# recurse for left subtree
store_top_view(root.left, recd, curr_dis-1,level+1)
# recurse for right subtree
store_top_view(root.right, recd,curr_dis+1,level+1)
def print_top_view(recd):
'''
Function to print top_view of the tree
It first calls storeverticalorder() to store values in recd
Then we acess first element for each vertical order
to get top view of the tree
'''
recd = dict()
# store values in recd
store_top_view(root, recd, 0,0)
ans=[]
for value in sorted(recd):
ans.append(value[0])
return ans
# main function
if __name__ == "__main__":
# create a Btree
root = BTnode(1)
root.left = BTnode(2)
root.right = BTnode(3)
root.left.left = BTnode(4)
root.left.right = BTnode(5)
print_top_view(root)
# ans = levelOrder(root)
# print(ans)
| true |
ac4d2e144305046cdfb45818456315f7905270c2 | Python | dongsik93/HomeStudy | /Question/sw_expert/D3/3975.py | UTF-8 | 280 | 3.75 | 4 | [] | no_license | T = int(input())
res = []
for _ in range(T):
a, b, c, d = map(int, input().split())
if(a/b < c/d):
res.append("BOB")
elif(a/b > c/d):
res.append("ALICE")
else:
res.append("DRAW")
for tc in range(T):
print("#{} {}".format(tc+1, res[tc])) | true |
8bd64e7dc68a6938e7b36d7647a0e08dd5892b9c | Python | mrahjoo/Solar-for-Industry-Process-Heat | /heat_load_calculations/EPA_hourly_emissions.py | UTF-8 | 28,567 | 2.59375 | 3 | [] | no_license |
import requests
import pandas as pd
import numpy as np
from zipfile import ZipFile
from io import BytesIO
import urllib
from bs4 import BeautifulSoup
import dask.dataframe as dd
import datetime as dt
import scipy.cluster as spc
import matplotlib.pyplot as plt
from pandas.tseries.holiday import USFederalHolidayCalendar
import re
class EPA_AMD:
def __init__(self):
self.ftp_url = "ftp://newftp.epa.gov/DMDnLoad/emissions/hourly/monthly/"
self.months = ['01','02','03','04','05','06','07','08','09','10','11',
'12']
# Info from EPA on relevant facilities from
self.am_facs = pd.read_csv(
'../calculation_data/EPA_airmarkets_facilities.csv'
)
self.am_facs.columns = [x.strip() for x in self.am_facs.columns]
self.am_facs.rename(columns={'Facility ID (ORISPL)': 'ORISPL_CODE',
'Unit ID': 'UNITID'},
inplace=True)
# Fill in missing max heat rate data
self.am_facs.set_index(['ORISPL_CODE', 'UNITID'], inplace=True)
self.am_facs.update(
self.am_facs['Max Hourly HI Rate (MMBtu/hr)'].fillna(method='bfill')
)
self.am_facs.reset_index(inplace=True)
def unit_str_cleaner(am_facs):
"""
Removes start date from unit type.
"""
def search_starts(unit_type):
try:
unit_start = re.search('(\(.+)', unit_type).groups()[0]
unit_type = unit_type.split(' '+unit_start)[0]
except:
unit_start=np.nan
return np.array([unit_type, unit_start])
units_corrected = pd.DataFrame(
np.vstack([x for x in am_facs['Unit Type'].apply(
lambda x: search_starts(x)
)]), columns=['Unit Type', 'Unit Start Date']
)
am_facs['Unit Start Date'] = np.nan
am_facs.update(units_corrected)
return am_facs
# Remove starting dates from Unit type
self.am_facs = unit_str_cleaner(self.am_facs)
# Build list of state abbreviations
ssoup = BeautifulSoup(
requests.get("https://www.50states.com/abbreviations.htm").content,
'lxml'
)
self.states = \
[ssoup.find_all('td')[x].string.lower() for x in range(1,101,2)]
fac_states = [x.lower() for x in self.am_facs.State.unique()]
self.states = list(set.intersection(set(self.states), set(fac_states)))
#Downloaded data saved as parquet files
self.amd_files = \
['../calculation_data/'+f for f in ['epa_amd_2012-2014',
'epa_amd_2015-2017',
'epa_amd_2018-2019']]
# #(partitioned by ORISPL_CODE)
# self.amd_files = '../calculation_data/epa_amd_data'
def dl_data(self, years=None, file_name=None, output=None):
"""
Download and format hourly load data for specified range of years.
"""
all_the_data = pd.DataFrame()
for y in years:
for state in self.states:
for month in self.months:
#source_address is a 2-tuple (host, port) for the socket to bind to as its source address before connecting
y_ftp_url = self.ftp_url+'{!s}/{!s}{!s}{!s}.zip'.format(
str(y),str(y),state,month
)
print(y_ftp_url)
try:
response = urllib.request.urlopen(y_ftp_url)
except urllib.error.URLError as e:
print(y, state, e)
continue
# ftp_file = response.read()
zfile = ZipFile(BytesIO(response.read()))
hourly_data = pd.read_csv(zfile.open(zfile.namelist()[0]),
low_memory=False)
hourly_data = hourly_data[
hourly_data['ORISPL_CODE'].isin(
self.am_facs['ORISPL_CODE']
)
]
if 'HEAT_INPUT' in hourly_data.columns:
hourly_data.dropna(subset=['HEAT_INPUT'],
inplace=True)
if 'HEAT_INPUT (mmBtu)' in hourly_data.columns:
hourly_data.dropna(subset=['HEAT_INPUT (mmBtu)'],
inplace=True)
usecols=['STATE','FACILITY_NAME','ORISPL_CODE','UNITID',
'OP_DATE','OP_HOUR','OP_TIME','GLOAD (MW)',
'SLOAD (1000lb/hr)', 'GLOAD', 'SLOAD',
'HEAT_INPUT','HEAT_INPUT (mmBtu)','FAC_ID',
'UNIT_ID','SLOAD (1000 lbs)']
drop_cols = set.difference(set(hourly_data.columns),usecols)
hourly_data.drop(drop_cols, axis=1, inplace=True)
all_the_data = all_the_data.append(
hourly_data, ignore_index=True
)
print(all_the_data.columns)
if output=='parquet':
all_the_data.to_parquet('../calculation_data/'+file_name,
engine='pyarrow', compression='gzip')
if output=='csv':
all_the_data.to_csv('../calculation_data/'+file_name,
compression='gzip')
print('ftp download complete')
def format_amd(self):
"""
Read AMD parquet files and format date and time, and add
facility information.
"""
def describe_date(amd_data):
"""
Add columns for weekday, month, and holiday. Based on existing
timestamp column in amd_data dataframe.
"""
holidays = USFederalHolidayCalendar().holidays()
if type(amd_data) == dd.DataFrame:
amd_data = amd_data.assign(month=amd_data.timestamp.apply(
lambda x: x.month, meta=('month', 'int')
))
amd_data = amd_data.assign(holiday=amd_data.timestamp.apply(
lambda x: x.date() in holidays, meta=('holiday', 'bool')
))
if type(amd_data) == pd.DataFrame:
amd_data['month'] = amd_data.timestamp.apply(
lambda x: x.month
)
amd_data['holiday'] = amd_data.timestamp.apply(
lambda x: x.date() in holidays
)
return amd_data
#Read parquet files into dask dataframe
# Unable to use partitioned parquet because index is read
# as an object, which precludes merging operations.
# amd_dd = dd.read_parquet(amd_files, engine='pyarrow')
#
# # method for renaming index of amd_dd
# def p_rename(df, name):
# df.index.name = name
# return df
#
# amd_dd.map_partitions(p_rename, 'ORISPL_CODE')
amd_dd = dd.from_pandas(
pd.concat(
[pd.read_parquet(f, engine='pyarrow') for f in self.amd_files],
axis=0, ignore_index=True
).set_index('ORISPL_CODE'), npartitions=311, sort=True,
name='amd'
)
# Merge in info on unit types
am_facs_dd_merge = self.am_facs.drop_duplicates(
['ORISPL_CODE', 'UNITID']
)[
['ORISPL_CODE','UNITID', 'Unit Type', 'Fuel Type (Primary)',
'Fuel Type (Secondary)', 'Max Hourly HI Rate (MMBtu/hr)',
'CHP_COGEN']
].set_index(['ORISPL_CODE'])
amd_dd = amd_dd.merge(
am_facs_dd_merge, how='left', on=['ORISPL_CODE', 'UNITID']
)
def format_amd_dt(dt_row):
date = dt.datetime.strptime(dt_row['OP_DATE'], '%m-%d-%Y').date()
time = dt.datetime.strptime(str(dt_row['OP_HOUR']), '%H').time()
tstamp = dt.datetime.combine(date, time)
return tstamp
amd_dd = amd_dd.rename(
columns={'GLOAD':'GLOAD_MW', 'GLOAD (MW)': 'GLOAD_MW',
'SLOAD': 'SLOAD_1000lb_hr',
'SLOAD (1000lb/hr)': 'SLOAD_1000lb_hr',
'HEAT_INPUT': 'HEAT_INPUT_MMBtu',
'HEAT_INPUT (mmBtu)': 'HEAT_INPUT_MMBtu'}
)
# Match ORISPL to its NAICS using GHGRP data.
xwalk_df = pd.read_excel(
"https://www.epa.gov/sites/production/files/2015-10/" +\
"oris-ghgrp_crosswalk_public_ry14_final.xls", skiprows=3
)
xwalk_df = xwalk_df[['GHGRP Facility ID', 'FACILITY NAME', 'ORIS CODE']]
xwalk_df.replace({'No Match':np.nan}, inplace=True)
xwalk_df['ORIS CODE'] = xwalk_df['ORIS CODE'].astype('float32')
naics_facs = pd.merge(
self.am_facs, xwalk_df, left_on='ORISPL_CODE',
right_on='ORIS CODE', how='left'
)
# Manual matching for facs missing ORIS. Dictionary of ORIS: GHGRP FAC
missing_oris = pd.read_csv('../calculation_data/ORIS_GHGRP_manual.csv')
naics_facs.set_index('ORISPL_CODE', inplace=True)
naics_facs.update(missing_oris.set_index('ORISPL_CODE'))
naics_facs.reset_index(inplace=True)
# Import ghgrp facilities and their NAICS Codes
ghgrp_facs = pd.read_parquet(
'../results/ghgrp_energy_20190801-2337.parquet', engine='pyarrow'
)[['FACILITY_ID', 'PRIMARY_NAICS_CODE']].drop_duplicates()
naics_facs = pd.merge(
naics_facs, ghgrp_facs, left_on='GHGRP Facility ID',
right_on='FACILITY_ID', how='left'
)
naics_facs.set_index('ORISPL_CODE', inplace=True)
naics_facs.update(missing_oris.set_index('ORISPL_CODE'))
naics_facs.reset_index(inplace=True)
amd_dd = amd_dd.merge(
naics_facs.drop_duplicates(
subset=['ORISPL_CODE', 'UNITID']
).set_index('ORISPL_CODE')[['UNITID', 'PRIMARY_NAICS_CODE']],
on=['ORISPL_CODE', 'UNITID']
)
amd_dd = amd_dd.assign(
timestamp=amd_dd.apply(lambda x: format_amd_dt(x), axis=1,
meta=('timestamp', 'datetime64[ns]'))
)
amd_dd = amd_dd.assign(OP_DATE=amd_dd.timestamp.apply(
lambda x: x.date(), meta=('OP_DATE', 'datetime64[ns]')
))
amd_dd = amd_dd.astype(
{'OP_HOUR': 'float32', 'OP_TIME':'float32','GLOAD_MW': 'float32',
'SLOAD_1000lb_hr':'float32', 'HEAT_INPUT_MMBtu': 'float32',
'FAC_ID': 'float32', 'UNIT_ID': 'float32'}
)
amd_dd = describe_date(amd_dd)
amd_dd = amd_dd.assign(
heat_input_fraction=\
amd_dd['HEAT_INPUT_MMBtu']/amd_dd['Max Hourly HI Rate (MMBtu/hr)']
)
amd_dd = amd_dd.assign(year=amd_dd.OP_DATE.apply(lambda x: x.year))
amd_dd = amd_dd.replace(
{'GLOAD_MW':0, 'SLOAD_1000lb_hr':0, 'HEAT_INPUT_MMBtu':0,
'heat_input_fraction':0}, np.nan
)
# Do those dask tasks
amd_dd = amd_dd.compute()
# Calcualte hourly load as a fraction of daily heat input
# amd_dd.set_index(['UNITID', 'OP_DATE', 'timestamp'], append=True,
# inplace=True)
#
# amd_dd['HI_daily_fraction'] = \
# amd_dd[['HEAT_INPUT_MMBtu']].sort_index().divide(
# amd_dd[['HEAT_INPUT_MMBtu']].resample(
# 'D', level='timestamp'
# ).sum(), level=2
# )
#
# amd_dd.reset_index(['ORISPL_CODE', 'UNIT_ID', 'OP_DATE'],inplace=True)
amd_dd.reset_index(inplace=True)
amd_dd.set_index('timestamp', inplace=True)
amd_dd['dayofweek'] = amd_dd.index.dayofweek
def fix_dayweek(dayofweek):
if dayofweek <5:
dayofweek = 'weekday'
elif dayofweek == 5:
dayofweek = 'saturday'
else:
dayofweek = 'sunday'
return dayofweek
amd_dd['dayofweek'] = amd_dd.dayofweek.apply(lambda x: fix_dayweek(x))
amd_dd.reset_index(inplace=True)
amd_dd['final_unit_type'] = amd_dd.CHP_COGEN.map(
{False: 'conv_boiler', True: 'chp_cogen'}
)
amd_dd['final_unit_type'].update(
amd_dd['Unit Type'].map({'Process Heater': 'process_heater'})
)
return amd_dd
def calc_load_factor(self, amd_dd):
"""
Calculate average monthly load factor
(MMBtu/(peak MMBtu)*operating hours and monthly peak load by NAICS.
"""
# Drop facilities with odd data {10298: hourly load > capacity,
# 54207: hourly load > capacity,55470:hourly load > capacity,
# 10867:hourly load > capacity, 10474:hourly load > capacity,
# 880074: hourly load == capacity, 880101: hourly load == capacity}.
#
# drop_facs = [10298, 54207, 55470, 10687, 10474, 880074, 88101]
#
# amd_filtered = amd_dd[amd_dd.ORISPL_CODE not in drop_facs]
avg_load = amd_dd.groupby(
['ORISPL_CODE','PRIMARY_NAICS_CODE', 'Unit Type','UNIT_ID', 'year',
'month']
).HEAT_INPUT_MMBtu.sum()
peak_load = amd_dd.groupby(
['ORISPL_CODE','PRIMARY_NAICS_CODE', 'Unit Type', 'UNIT_ID','year',
'month']
).HEAT_INPUT_MMBtu.max()
# Create annual counts of hours
# Using total hours/year results in load factors > 1 in some instances.
hour_count = pd.DataFrame.from_records(
np.array([np.tile(np.sort(amd_dd.year.unique()), 12),
np.repeat(range(1,13), len(amd_dd.year.unique()))]).T,
columns=['year', 'month']
)
hour_count.sort_values(by=['year', 'month'], inplace=True)
hour_count['hours'] = np.nan
hour_count.set_index(['year', 'month'], inplace=True)
for y in hour_count.index.get_level_values('year').unique():
hours = pd.date_range(start=str(y)+'-01-01', end=str(y+1)+'-01-01',
freq='H')[0:-1]
hours = hours.groupby(hours.month)
hours = pd.DataFrame(
[(k, len(hours[k])) for k in hours.keys()],
columns=['month', 'hours']
)
hours['year'] = y
hours.set_index(['year', 'month'], inplace=True)
hour_count.hours.update(hours.hours)
peak_monthly = pd.merge(
peak_load.reset_index(), hour_count.reset_index(),
on=['year', 'month']
)
peak_monthly.HEAT_INPUT_MMBtu.update(
peak_monthly.HEAT_INPUT_MMBtu.multiply(
peak_monthly.hours
)
)
# Sum by NAICS to get industry average peak load by month
peak_monthly = peak_monthly.groupby(
['PRIMARY_NAICS_CODE', 'month']
).HEAT_INPUT_MMBtu.sum()
load_factor = avg_load.sum(
level=['PRIMARY_NAICS_CODE', 'month']
).divide(peak_monthly)
# Express peak load
# load_factor = avg_load.divide(
# peak_load.multiply(hour_count.hours, level='year')
# )
return load_factor
@staticmethod
def calc_load_shape_revised(amd_dd):
"""
Revised method for estimating daytype load shapes by industry.
Also returns coefficient of variation.
"""
peak_load = amd_dd.groupby(
['ORISPL_CODE','PRIMARY_NAICS_CODE','Unit Type','UNIT_ID',
'year','month']
).HEAT_INPUT_MMBtu.max()
hourly_load_shape = pd.merge(
amd_dd.set_index(['ORISPL_CODE','PRIMARY_NAICS_CODE','Unit Type',
'UNIT_ID','year','month']), peak_load,
left_index=True, right_index=True, how='left',
suffixes=('_obs', '_peak')
)
hourly_load_shape.HEAT_INPUT_MMBtu_obs.update(
hourly_load_shape.HEAT_INPUT_MMBtu_obs.divide(
hourly_load_shape.HEAT_INPUT_MMBtu_peak
)
)
hourly_load_shape.reset_index(inplace=True)
# Drop entries with NaN NAICS codes
hourly_load_shape = hourly_load_shape.dropna(
subset=['PRIMARY_NAICS_CODE']
)
std_dev_hourly_load = hourly_load_shape.groupby(
['PRIMARY_NAICS_CODE', 'month', 'dayofweek', 'OP_HOUR']
).HEAT_INPUT_MMBtu_obs.std()
hourly_load_shape = hourly_load_shape.groupby(
['PRIMARY_NAICS_CODE', 'month', 'dayofweek', 'OP_HOUR']
).HEAT_INPUT_MMBtu_obs.mean()
# Calculate the coefficient of variation (defined as the ratio of
# standard devation to mean)
coeff_var = std_dev_hourly_load.divide(hourly_load_shape)
hourly_load_shape = pd.concat(
[hourly_load_shape, hourly_load_shape.add(
hourly_load_shape.multiply(coeff_var)
), hourly_load_shape.subtract(
hourly_load_shape.multiply(coeff_var)
)],
axis=1)
hourly_load_shape.columns = ['mean', 'high', 'low']
return hourly_load_shape
# def calc_rep_loadshapes(self, amd_dd, by='qpc_naics'):
# """
# Calculate representative hourly loadshapes by facility and unit type.
# Represents hourly mean load ...
# """
#
# # Drop facilities with odd data {10298: hourly load > capacity,
# # 54207: hourly load > capacity,55470:hourly load > capacity,
# # 10867:hourly load > capacity, 10474:hourly load > capacity,
# # 880074: hourly load == capacity, 880101: hourly load == capacity}.
#
# drop_facs = [10298, 54207, 55470, 10687, 10474, 880074, 88101]
#
# amd_filtered = amd_dd[amd_dd.ORISPL_CODE not in drop_facs]
#
# if by == 'naics':
#
# load_summary = amd_filtered.groupby(
# ['PRIMARY_NAICS_CODE', 'Unit Type', 'month','holiday','dayofweek',
# 'OP_HOUR']
# ).agg({'GLOAD_MW': 'mean', 'SLOAD_1000lb_hr': 'mean',
# 'HEAT_INPUT_MMBtu': 'mean'})
#
# elif by == 'qpc_naics':
#
# load_summary = amd_filtered.groupby(
# ['qpc_naics', 'final_unit_type','holiday','dayofweek',
# 'OP_HOUR']
# ).agg({'GLOAD_MW': 'mean', 'SLOAD_1000lb_hr': 'mean',
# 'HEAT_INPUT_MMBtu': 'mean'})
#
# # Make aggregate load curve
# agg_curve = amd_filtered.groupby(
# ['final_unit_type','holiday','dayofweek', 'OP_HOUR'],
# as_index=False
# ).agg({'GLOAD_MW': 'mean', 'SLOAD_1000lb_hr': 'mean',
# 'HEAT_INPUT_MMBtu': 'mean'})
#
# agg_curve['qpc_naics'] = '31-33'
#
# load_summary = load_summary.append(
# agg_curve.set_index(
# ['qpc_naics','final_unit_type','holiday','dayofweek',
# 'OP_HOUR']
# )
# )
#
# else:
#
# load_summary = amd_filtered.groupby(
# ['ORISPL_CODE', 'UNITID','month','holiday','dayofweek','OP_HOUR']
# ).agg(
# {'GLOAD_MW': 'mean', 'SLOAD_1000lb_hr': 'mean',
# 'HEAT_INPUT_MMBtu': 'mean', 'heat_input_fraction':'mean'}
# )
#
# for col in ['GLOAD_MW','SLOAD_1000lb_hr', 'HEAT_INPUT_MMBtu']:
#
# new_name = col.split('_')[0]+'_hourly_fraction_year'
#
# load_summary[new_name] = \
# load_summary[col].divide(
# load_summary[col].sum(level=[0,1,2,3,4])
# )
#
# return load_summary
@staticmethod
def make_load_shape_plots(load_summary, naics, unit_type, load_type):
plot_data = load_summary.xs(
[naics, unit_type], level=['qpc_naics', 'final_unit_type']
)[[load_type]].reset_index()
plot_data['holiday-weekday'] = plot_data[['holiday', 'dayofweek']].apply(
lambda x: tuple(x), axis=1
)
grid = sns.FacetGrid(
plot_data[[load_type, 'OP_HOUR', 'month', 'holiday-weekday']],
col='month', hue='holiday-weekday', col_wrap=3, height=1.75,
aspect=1.5, despine=True
)
grid = (grid.map(plt.plot, 'OP_HOUR', load_type)
.add_legend())
grid.set_axis_labels('Hour', 'Daily Fraction')
grid.set(ylim=(0,0.075))
plt.subplots_adjust(top=0.9)
grid.fig.suptitle(
load_type.split('_')[0]+': '+str(int(naics))+', '+unit_type
)
grid.savefig(
'../Results analysis/load_shape_revised_steam'+str(int(naics))+unit_type+'.png'
)
plt.close()
# for naics in load_summary.index.get_level_values('qpc_naics').unique():
#
# for unit in load_summary.xs(naics, level='qpc_naics').index.get_level_values('final_unit_type').unique():
#
# make_load_shape_plots(load_summary, naics, unit, 'SLOAD_hourly_fraction')
# # Summarize spread of data
# fac_summary_unit = amd_data.groupby(
# ['PRIMARY_NAICS_CODE', 'ORISPL_CODE', 'Unit Type', 'year']
# ).HEAT_INPUT_MMBtu.sum()
#
# fac_summary = amd_data.groupby(
# ['PRIMARY_NAICS_CODE', 'ORISPL_CODE', 'year']
# ).HEAT_INPUT_MMBtu.sum()
#
# # ID NAICS and unit types with more than one facility
# mult_fac_units = amd_data.groupby(
# ['PRIMARY_NAICS_CODE', 'Unit Type', 'year']
# ).ORISPL_CODE.apply(lambda x: np.size(x.unique()))
#
# # ID NAICS with more than facility
# mult_facs = mult_fac_units.sum(level=[0,2])
#
#
# def make_boxplot(df, figname):
#
# fig, ax = plt.subplots(figsize=(12, 8))
#
# sns.boxplot(y='HEAT_INPUT_MMBtu', x='PRIMARY_NAICS_CODE', hue='year',
# orient='v', data=df.reset_index(),
# fliersize=1.25)
#
# plt.savefig(figname+'.png')
#
# plt.close()
# make_boxplot(pd.merge(
# fac_summary_unit, mult_fac_units[mult_fac_units>1],
# on=['PRIMARY_NAICS_CODE', 'Unit Type', 'year'], how='inner'
# ), 'mult_units')
# make_boxplot(pd.merge(
# fac_summary, mult_facs[mult_facs>1], on=['PRIMARY_NAICS_CODE', 'year'],
# how='inner'
# ), 'mult_facs')
# fac_count = amd_data.groupby(
# ['PRIMARY_NAICS_CODE', 'year'],
# as_index=False).apply(lambda x: np.size(x.unique()))
# fac_count.rename(columns={0:'count'}, inplace=True)
# plot_data = fac_count[fac_count.year==2012].sort_values(by='count',
# ascending=False
# ).reset_index(drop=True)
# plot_data['PRIMARY_NAICS_CODE'] = plot_data.PRIMARY_NAICS_CODE.astype(str)
# fig, ax = plt.subplots(figsize=(12, 8))
# sns.barplot(x='PRIMARY_NAICS_CODE', y='count', data=plot_data, color='grey')
# plt.xticks(rotation=90)
# plt.savefig('amd_fac_count_2012.png', bbox_inches='tight', frameon=False)
#
# unit_count = amd_data.groupby(
# ['Unit Type', 'year'],
# as_index=False)['UNITID'].apply(lambda x: np.size(x.unique())).reset_index()
# unit_count.rename(columns={0:'count'}, inplace=True)
# plot_data = unit_count[unit_count.year==2012].sort_values(by='count',
# ascending=False
# ).reset_index(drop=True)
# fig, ax = plt.subplots(figsize=(8, 12))
# sns.barplot(y='Unit Type', x='count', data=plot_data, color='grey')
# plt.savefig('amd_unit_count_2012.png', bbox_inches='tight', frameon=False)
#
#
# @staticmethod
# def run_cluster_analysis(amd_dd, kn=range(1,30)):
# """
# Run to identify day types by unit
# """
#
# # pivot data so hours, weekday/weekend, holiday, and month, are columns
# # and date is row.
#
#
# for g in amd_dd.groupby(['ORISPL_CODE', 'UNIT_ID']).groups:
#
# data = amd_dd.groupby(
# ['ORISPL_CODE', 'UNIT_ID']
# ).get_group(g).join(
# amd_dd.groupby(
# ['ORISPL_CODE', 'UNIT_ID']
# ).get_group(g).apply()
# )
#
# data['TS_DATE'] = data.timestamp.apply(
# lambda x: x.date()
# )
#
# data = data.pivot(
# index='TS_DATE', columns='OP_HOUR',
# values='SLOAD (1000lb/hr)'
# )
#
# data = describe_date(data)
#
# def id_clusters(data):
# """
# K-means clustering hourly load by day.
# kn is the number of clusters to calculate, represented as a range
# """
#
# # Whiten observations (normalize by dividing each column by its standard
# # deviation across all obervations to give unit variance.
# # See scipy.cluster.vq.whiten documentation).
# # Need to whitend based on large differences in mean and variance
# # across energy use by NAICS codes.
# data_whitened = spc.vq.whiten(data)
#
# # Run K-means clustering for the number of clusters specified in K
# KM_load = [spc.vq.kmeans(data_whitened, k, iter=25) for k in kn]
#
# KM_results_dict = {}
#
# KM_results_dict['data_white'] = data_whitened
#
# KM_results_dict['KM_results'] = KM_load
#
# KM_results_dict['centroids'] = [cent for (cent, var) in KM_load]
#
# # Calculate average within-cluster sum of squares
# KM_results_dict['avgWithinSS'] = [var for (cent, var) in KM_load]
#
# # Plot elbow curve to examine within-cluster sum of squares
# # Displays curve and asks for input on number of clusters to use
# fig = plt.figure()
#
# ax = fig.add_subplot(111)
#
# ax.plot(kn, KM_results_dict['avgWithinSS'], 'b*-')
#
# #ax.plot(K[kIdx], avgWithinSS[kIdx], marker='o', markersize=12,
# # markeredgewidth=2, markeredgecolor='r', markerfacecolor='None')
# plt.grid(True)
#
# plt.xlabel('Number of clusters')
#
# plt.ylabel('Average within-cluster sum of squares')
#
# plt.title('Elbow for KMeans clustering')
#
# plt.show(block=False)
#
# # User input for selecting number of clusters.
# # plt.show(block=False) or plt.pause(0.1)
# chosen_k = input("Input selected number of clusters: ")
#
# return chosen_k,
#
# def format_cluster_results(
# KM_results_dict, cla_input, ctyfips, naics_agg, n
# ):
# """
# Format cluster analysis results for n=k clusters, adding cluster ids
# and socio-economic data by county.
# """
#
# # Calcualte cluster ids and distance (distortion) between the observation
# # and its nearest code for a chosen number of clusters.
# cluster_id, distance = spc.vq.vq(
# KM_results['data_white'],
# KM_results['centroids'][chosen_k - 1]
# )
#
# cols = ['cluster']
#
# for col in data.columns:
# cols.append(col)
#
# # Combine cluster ids and energy data
# cluster_id.resize((cluster_id.shape[0], 1))
#
# # Name columns based on selected N-digit NAICS codes
#
# id_load_clusters = \
# pd.DataFrame(
# np.hstack((cluster_id, data)),
# columns=cols
# )
#
# id_load_clusters.set_index(ctyfips[naics_agg], inplace=True)
#
# id_energy.loc[:, 'TotalEnergy'] = id_energy[cols[1:]].sum(axis=1)
| true |
824a93c173659c0042f780313f4cb153f19439a0 | Python | Gistbatch/Reinforcement | /src/cartpole/cartpole_rbf.py | UTF-8 | 4,307 | 2.75 | 3 | [] | no_license | import gym
from gym import wrappers
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import uuid
from sklearn.kernel_approximation import RBFSampler
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
class Regressor:
def __init__(self, dims, learning_rate=0.1):
self.W = np.random.randn(dims) / np.sqrt(dims)
self.learning_rate = learning_rate
def partial_fit(self, X ,Y):
self.W += self.learning_rate * (Y - X.dot(self.W)).dot(X)
def predict(self, features):
return features.dot(self.W)
class FeatureTransformer:
def __init__(self, env, n_components=1000, n_samples=20000):
scaler = StandardScaler()
sample_pos = 5 * np.random.random(n_samples) - 2.5
sample_vel = 4 * np.random.random(n_samples) - 2
sample_ppos = np.random.random(n_samples) - 0.5
sample_pvel = 7 * np.random.random(n_samples) - 3.5
samples = np.stack((sample_pos, sample_vel, sample_ppos, sample_pvel),
axis=-1)
scaler.fit(samples)
featureunion = FeatureUnion([('rbf1', RBFSampler(0.5, n_components)),
('rbf2', RBFSampler(1, n_components)),
('rbf3', RBFSampler(2, n_components)),
('rbf4', RBFSampler(4, n_components))])
features = featureunion.fit_transform(scaler.transform(samples))
self.dimensions = features.shape[1]
self.scaler = scaler
self.featurizer = featureunion
def transform(self, state):
return self.featurizer.transform(self.scaler.transform(state))
class Model:
def __init__(self, env, feature_transformer):
self.env = env
self.feature_transformer = feature_transformer
self.models = []
start = feature_transformer.transform([env.reset()])
for _ in range(env.action_space.n):
model = Regressor(feature_transformer.dimensions)
self.models.append(model)
def predict(self, state):
features = self.feature_transformer.transform(np.atleast_2d(state))
predictions = [model.predict(features) for model in self.models]
return np.stack(predictions).T
def update(self, state, action, G):
features = self.feature_transformer.transform(np.atleast_2d(state))
self.models[action].partial_fit(features, [G])
def action(self, state, epsilon):
if np.random.random() < epsilon:
return self.env.action_space.sample()
return np.argmax(self.predict(state))
def plot_running_avg(rewards):
N = len(rewards)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = rewards[max(0, t - 100):(t + 1)].mean()
plt.plot(running_avg)
plt.title("Running Average")
plt.show()
def play_episode(model, epsilon, gamma=0.99):
observation_t = model.env.reset()
done = False
steps = 0
rewards = 0
while not done:
steps += 1
action_t = model.action(observation_t, epsilon)
observation_t1, reward, done, _ = model.env.step(action_t)
rewards += reward
if done and steps < 200:
reward = -300
if steps > 10000:
break
G = reward + gamma * model.predict(observation_t1).max()
model.update(observation_t, action_t, G)
observation_t = observation_t1
return rewards
def cart_pole():
env = gym.make('CartPole-v0')
transformer = FeatureTransformer(env)
model = Model(env, transformer)
env = wrappers.Monitor(env, 'output/' + str(uuid.uuid4()))
iterations = 100
rewards = np.empty(iterations)
for index in range(iterations):
epsilon = 0.1 * (0.97**index)
current_reward = play_episode(model, epsilon)
rewards[index] = current_reward
if (index + 1) % 10 == 0:
print(
f'episode: {index} reward {current_reward} epsilon: {epsilon}')
print(f'avg reward for last 100 episodes: {rewards[-100:].mean()}')
print(f'total steps {-rewards.sum()}')
plt.plot(rewards)
plt.title('Rewards')
plt.show()
plot_running_avg(rewards)
if __name__ == "__main__":
cart_pole() | true |
ce8474c01d5ca33c2612df42e3e3131026b9eeec | Python | mslitao/Leetcode | /word-breaker.py | UTF-8 | 548 | 3.5625 | 4 | [] | no_license | def breakWords(header):
words = []
n = len(header)
s = 0
for i in range(n):
word = ''
if((header[i] == '_' or header[i] == ' ' or header[i] == '.') and i >= s):
word = header[s:i]
s = i + 1
elif(i == (n -1) and i >= s):
word = header[s:]
elif(header[i].isupper() and i >=s):
word = header[s:i]
s = i
word = word.rstrip('1234567890.')
if(len(word) > 0):
words.append(word.lower())
return words
print(breakWords('a_b_cDEfe__LocationCity12 last1PurchaseAmount New York'))
| true |
0df0fdc30068d276b75a0e9530e808a641eca8aa | Python | anntheknee/LeetCode | /Questions/Dynamic_Programming/Unique_Paths.py | UTF-8 | 839 | 3.296875 | 3 | [] | no_license | # Link: https://leetcode.com/problems/unique-paths/submissions/
# Level: Medium
# Runtime: 32 ms, faster than 73.04% of Python3 online submissions for Unique Paths.
# Memory Usage: 12.9 MB, less than 100.00% of Python3 online submissions for Unique Paths.
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
def memoize(func):
memo = {}
def helper(x,y):
if (x,y) not in memo:
memo[(x,y)] = func(x,y)
return memo[(x,y)]
return helper
@memoize
def recurHelper(x,y):
if x > m:
return 0
if y > n:
return 0
if x == m and y == n:
return 1
return recurHelper(x+1,y) + recurHelper(x,y+1)
return recurHelper(1,1)
| true |
d132323480598566c1704e30ae63916230f4178c | Python | opensafely/T1DM_covid_research | /analysis/match.py | UTF-8 | 14,640 | 2.9375 | 3 | [
"MIT"
] | permissive | import os
import copy
import random
from datetime import datetime
import pandas as pd
NOT_PREVIOUSLY_MATCHED = -9
def import_csvs(
case_csv,
match_csv,
match_variables,
date_exclusion_variables,
index_date_variable,
output_path,
replace_match_index_date_with_case=None,
):
"""
Imports the two csvs specified under case_csv and match_csv.
Also sets the correct data types for the matching variables.
"""
cases = pd.read_csv(
os.path.join(output_path, f"{case_csv}.csv"),
index_col="patient_id",
)
matches = pd.read_csv(
os.path.join(output_path, f"{match_csv}.csv"),
index_col="patient_id",
)
## Set data types for matching variables
month_only = []
for var, match_type in match_variables.items():
if match_type == "category":
cases[var] = cases[var].astype("category")
matches[var] = matches[var].astype("category")
## Extract month from month_only variables
elif match_type == "month_only":
month_only.append(var)
cases[f"{var}_m"] = cases[var].str.slice(start=5, stop=7).astype("category")
matches[f"{var}_m"] = (
matches[var].str.slice(start=5, stop=7).astype("category")
)
for var in month_only:
del match_variables[var]
match_variables[f"{var}_m"] = "category"
## Format exclusion variables as dates
if date_exclusion_variables is not None:
for var in date_exclusion_variables:
cases[var] = pd.to_datetime(cases[var])
matches[var] = pd.to_datetime(matches[var])
## Format index date as date
cases[index_date_variable] = pd.to_datetime(cases[index_date_variable])
if replace_match_index_date_with_case is None:
matches[index_date_variable] = pd.to_datetime(matches[index_date_variable])
return cases, matches
def add_variables(cases, matches, indicator_variable_name="case"):
"""
Adds the following variables to the case and match tables:
set_id - in the final table, this will be identify groups of matched cases
and matches. Here it is set to the patient ID for cases, and a
magic number denoting that a person has not been previously matched
for matches.
indicator_variable_name - a binary variable to indicate whether they are a case or
match. Default name is "case" but this can be changed as needed
...and these variables to the match table:
randomise - this is used to randomly sort when selecting which matches to use.
A random seed is set so that the same matches are picked between
runs on the same input CSVs.
"""
cases["set_id"] = cases.index
matches["set_id"] = NOT_PREVIOUSLY_MATCHED
matches["randomise"] = 1
random.seed(999)
matches["randomise"] = matches["randomise"].apply(lambda x: x * random.random())
cases[indicator_variable_name] = 1
matches[indicator_variable_name] = 0
return cases, matches
def get_bool_index(match_type, value, match_var, matches):
"""
Compares the value in the given case variable to the variable in
the match dataframe, to generate a boolean Series. Comparisons vary
according to the matching specification.
"""
if match_type == "category":
bool_index = matches[match_var] == value
elif isinstance(match_type, int):
bool_index = abs(matches[match_var] - value) <= match_type
else:
raise Exception(f"Matching type '{match_type}' not yet implemented")
return bool_index
def pre_calculate_indices(cases, matches, match_variables):
"""
Loops over each of the values in the case table for each of the match
variables and generates a boolean Series against the match table. These are
returned in a dict.
"""
indices_dict = {}
for match_var in match_variables:
match_type = match_variables[match_var]
indices_dict[match_var] = {}
values = cases[match_var].unique()
for value in values:
index = get_bool_index(match_type, value, match_var, matches)
indices_dict[match_var][value] = index
return indices_dict
def get_eligible_matches(case_row, matches, match_variables, indices):
"""
Loops over the match_variables and combines the boolean Series
from pre_calculate_indices into a single bool Series. Also removes previously
matched patients.
"""
eligible_matches = pd.Series(data=True, index=matches.index)
for match_var in match_variables:
variable_bool = indices[match_var][case_row[match_var]]
eligible_matches = eligible_matches & variable_bool
not_previously_matched = matches["set_id"] == NOT_PREVIOUSLY_MATCHED
eligible_matches = eligible_matches & not_previously_matched
return eligible_matches
def date_exclusions(df1, date_exclusion_variables, index_date):
"""
Loops over the exclusion variables and creates a boolean Series corresponding
to where there are exclusion variables that occur before the index date.
index_date can be either a single value, or a pandas Series whose index
matches df1.
"""
exclusions = pd.Series(data=False, index=df1.index)
for exclusion_var, before_after in date_exclusion_variables.items():
if before_after == "before":
variable_bool = df1[exclusion_var] <= index_date
elif before_after == "after":
variable_bool = df1[exclusion_var] > index_date
else:
raise Exception(f"Date exclusion type '{exclusion_var}' invalid")
exclusions = exclusions | variable_bool
return exclusions
def greedily_pick_matches(
matches_per_case,
matched_rows,
case_row,
closest_match_variables=None,
):
"""
Cuts the eligible_matches list to the number of matches specified. This is a
greedy matching method, so if closest_match_variables are specified, it sorts
on those variables to get the closest available matches for that case. It
always also sorts on random variable.
"""
sort_columns = []
if closest_match_variables is not None:
for var in closest_match_variables:
matched_rows[f"{var}_delta"] = abs(matched_rows[var] - case_row[var])
sort_columns.append(f"{var}_delta")
sort_columns.append("randomise")
matched_rows = matched_rows.sort_values(sort_columns)
matched_rows = matched_rows.head(matches_per_case)
return matched_rows.index
def get_date_offset(offset_str):
"""
Parses the string given by replace_match_index_date_with_case
to determine the unit and length of offset.
Returns a pr.DateOffset of the appropriate length.
"""
if offset_str == "no_offset":
offset = None
else:
length = int(offset_str.split("_")[0])
unit = offset_str.split("_")[1]
if unit in ("year", "years"):
offset = pd.DateOffset(years=length)
elif unit in ("month", "months"):
offset = pd.DateOffset(months=length)
elif unit in ("day", "days"):
offset = pd.DateOffset(days=length)
else:
raise Exception(f"Date offset '{unit}' not implemented")
return offset
def match(
case_csv,
match_csv,
matches_per_case,
match_variables,
index_date_variable,
closest_match_variables=None,
date_exclusion_variables=None,
replace_match_index_date_with_case=None,
indicator_variable_name="case",
output_suffix="",
output_path="output",
):
"""
Wrapper function that calls functions to:
- import data
- find eligible matches
- pick the correct number of randomly allocated matches
- make exclusions that are based on index date
- (this is not currently possible in a study definition, and will only ever be possible
during matching for studies where the match index date comes from the case)
- set the set_id as that of the case_id (this excludes them from being matched later)
- set the index date of the match as that of the case (where desired)
- save the results as a csv
"""
report_path = os.path.join(
output_path,
f"matching_report{output_suffix}.txt",
)
def matching_report(text_to_write, erase=False):
if erase and os.path.isfile(report_path):
os.remove(report_path)
with open(report_path, "a") as txt:
for line in text_to_write:
txt.writelines(f"{line}\n")
print(line)
txt.writelines("\n")
print("\n")
matching_report(
[f"Matching started at: {datetime.now()}"],
erase=True,
)
## Deep copy match_variables
match_variables = copy.deepcopy(match_variables)
## Import_data
cases, matches = import_csvs(
case_csv,
match_csv,
match_variables,
date_exclusion_variables,
index_date_variable,
output_path,
replace_match_index_date_with_case,
)
matching_report(
[
"CSV import:",
f"Completed {datetime.now()}",
f"Cases {len(cases)}",
f"Matches {len(matches)}",
],
)
## Drop cases from match population
## WARNING - this will cause issues in dummy data where population
## sizes are the same, as the indices will be identical.
matches = matches.drop(cases.index, errors="ignore")
matching_report(
[
"Dropping cases from matches:",
f"Completed {datetime.now()}",
f"Cases {len(cases)}",
f"Matches {len(matches)}",
]
)
## Add set_id and randomise variables
cases, matches = add_variables(cases, matches, indicator_variable_name)
indices = pre_calculate_indices(cases, matches, match_variables)
matching_report([f"Completed pre-calculating indices at {datetime.now()}"])
if replace_match_index_date_with_case is not None:
offset_str = replace_match_index_date_with_case
date_offset = get_date_offset(replace_match_index_date_with_case)
if date_exclusion_variables is not None:
case_exclusions = date_exclusions(
cases, date_exclusion_variables, cases[index_date_variable]
)
cases = cases.loc[~case_exclusions]
matching_report(
[
"Date exclusions for cases:",
f"Completed {datetime.now()}",
f"Cases {len(cases)}",
f"Matches {len(matches)}",
]
)
## Sort cases by index date
cases = cases.sort_values(index_date_variable)
for case_id, case_row in cases.iterrows():
## Get eligible matches
eligible_matches = get_eligible_matches(
case_row, matches, match_variables, indices
)
matched_rows = matches.loc[eligible_matches]
## Determine match index date
if replace_match_index_date_with_case is None:
index_date = matched_rows[index_date_variable]
else:
if offset_str == "no_offset":
index_date = case_row[index_date_variable]
elif offset_str.split("_")[2] == "earlier":
index_date = case_row[index_date_variable] - date_offset
elif offset_str.split("_")[2] == "later":
index_date = case_row[index_date_variable] + date_offset
else:
raise Exception(f"Date offset type '{offset_str}' not recognised")
## Index date based match exclusions (faster to do this after get_eligible_matches)
if date_exclusion_variables is not None:
exclusions = date_exclusions(
matched_rows, date_exclusion_variables, index_date
)
matched_rows = matched_rows.loc[~exclusions]
## Pick random matches
matched_rows = greedily_pick_matches(
matches_per_case,
matched_rows,
case_row,
closest_match_variables,
)
## Report number of matches for each case
num_matches = len(matched_rows)
cases.loc[case_id, "match_counts"] = num_matches
## Label matches with case ID if there are enough
if num_matches == matches_per_case:
matches.loc[matched_rows, "set_id"] = case_id
## Set index_date of the match where needed
if replace_match_index_date_with_case is not None:
matches.loc[matched_rows, index_date_variable] = index_date
## Drop unmatched cases/matches
matched_cases = cases.loc[cases["match_counts"] == matches_per_case]
matched_matches = matches.loc[matches["set_id"] != NOT_PREVIOUSLY_MATCHED]
## Describe population differences
scalar_comparisons = compare_populations(
matched_cases, matched_matches, closest_match_variables
)
matching_report(
[
"After matching:",
f"Completed {datetime.now()}",
f"Cases {len(matched_cases)}",
f"Matches {len(matched_matches)}\n",
"Number of available matches per case:",
cases["match_counts"].value_counts().to_string(),
]
+ scalar_comparisons
)
## Write to csvs
matched_cases.to_csv(os.path.join(output_path, f"matched_cases{output_suffix}.csv"))
matched_matches.to_csv(
os.path.join(output_path, f"matched_matches{output_suffix}.csv")
)
appended = matched_cases.append(matched_matches)
appended.to_csv(os.path.join(output_path, f"matched_combined{output_suffix}.csv"))
def compare_populations(matched_cases, matched_matches, closest_match_variables):
"""
Takes the list of closest_match_variables and describes each of them for the matched
case and matched control population, so that their similarity can be checked.
Returns a list strings corresponding to the rows of the describe() output, to be
passed to matching_report(). Returns empty list if no closest_match_variables are
specified.
"""
scalar_comparisons = []
if closest_match_variables is not None:
for var in closest_match_variables:
scalar_comparisons.extend(
[
f"\n{var} comparison:",
"Cases:",
matched_cases[var].describe().to_string(),
"Matches:",
matched_matches[var].describe().to_string(),
]
)
return scalar_comparisons
| true |
621459d7c795ed9e2ceb8603eae83f7e8bac439e | Python | Morgan-Griffiths/RouteMuse | /Local/test/test_math.py | UTF-8 | 1,764 | 2.609375 | 3 | [] | no_license | import numpy as np
import sys
import os
from collections import namedtuple,deque
import time
import pickle
from plots.plot import plot
from gym import Gym
from config import Config
sys.path.append('/Users/morgan/Code/RouteMuse/test')
# sys.path.append('/home/kenpachi/Code/RouteMuse/test')
print('path',os.getcwd())
from test_data import build_data
"""
Generate training data and train on it
TODO
Generate a unique hash for each route
"""
def main():
# Instantiate objects
config = Config('math')
fields = build_data()
gym_math = Gym(fields,config)
# train on data
test_math(gym_math,config)
def test_math(gym_math,config):
"""
For plotting - plot the math mean error versions along with the agent mean
"""
# Create two instances of gyms
# tic = time.time()
# Collections
math_means = []
math_stds = []
math_window = deque(maxlen=100)
for e in range(1,11):
math_rewards = []
math_state = gym_math.reset()
math_loss = [gym_math.loss]
for t in range(config.tmax):
# Compare with math
mean_hist_route = gym_math.historical_grade_route(0)
mean_grade_route = gym_math.mean_grade_route(0)
mean_loc_route = gym_math.mean_location_route(0)
math_route = gym_math.probabilistic_route(math_state)
math_next_state, math_reward = gym_math.step(math_route)
math_rewards.append(math_reward)
math_loss.append(gym_math.loss)
math_state = math_next_state
math_window.append(np.sum(math_rewards))
math_means.append(np.mean(math_window))
math_stds.append(np.std(math_window))
# Compare network vs math
if __name__ == '__main__':
main() | true |
f01a53bb16cce6d33c54dd7d8c0747e0dbaa1d85 | Python | mtn/advent18 | /day12/part1.py | UTF-8 | 1,186 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
with open("input.txt") as f:
inp = f.read().strip()
lines = inp.split("\n")
initial_state = "..." + lines[0].split()[2] + "..."
initial_state = list(map(lambda x: 1 if x == "#" else 0, initial_state))
# Only track the growths, because we'll start assuming no growth
grows = set()
for rule in lines[2:]:
pattern, _, outcome = rule.split()
if outcome == "#":
grows.add(pattern)
as_lists = {}
for rule in grows:
as_lists[rule] = [1 if e == "#" else 0 for e in rule]
def state_as_str(state):
return "".join(["#" if i == 1 else "." for i in state])
state = initial_state
num_plants = len(state)
front_ind = 3
for generation in range(20):
new_state = [0] * len(state)
for i in range(2, len(state) - 2):
for rule in grows:
if as_lists[rule] == state[i - 2 : i + 3]:
new_state[i] = 1
break
if new_state[0:3] != [0, 0, 0]:
front_ind += 3
new_state = [0, 0, 0] + new_state
if new_state[-3:] != [0, 0, 0]:
new_state = new_state + [0, 0, 0]
state = new_state
print(sum([i - front_ind for i, plant in enumerate(state) if plant == 1]))
| true |
180fb10d71bb5f050a7a846e243de90a458ddd5f | Python | cww97/visual-language-grasping | /envs/data.py | UTF-8 | 2,821 | 2.890625 | 3 | [
"BSD-2-Clause"
] | permissive | import os
import re
import yaml
from torchtext import data
from collections import namedtuple
Instruction = namedtuple('Instruction', ('tensor', 'length'))
class Data(object):
class DataSet(data.TabularDataset):
@staticmethod
def sort_key(ex):
return len(ex.text)
def __init__(self, text_field: data.Field, filename):
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \\( ", string)
string = re.sub(r"\)", " \\) ", string)
string = re.sub(r"\?", " \\? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().split()
text_field.tokenize = clean_str
fields = [('text', text_field)]
super().__init__(filename, format='tsv', fields=fields)
def __init__(self, filename=os.path.join(os.path.dirname(__file__), 'sample.tsv')):
self.text_field = data.Field(lower=True)
self.dataset = Data.DataSet(self.text_field, filename)
self.text_field.build_vocab(self.dataset)
self.padding_idx = self.text_field.vocab.stoi[self.text_field.pad_token]
self.seq_len = 10
def get_tensor(self, x: str):
x = self.text_field.preprocess(x)
length = len(x)
if length < self.seq_len:
x += [self.text_field.pad_token] * (self.seq_len - length)
ret = self.text_field.numericalize([x]).t()
return Instruction(ret, length)
def generate(inf, ouf):
template = 'pick up the {color} {name}.\n'
colors = {'blue', 'green', 'brown', 'orange', 'yellow', 'gray', 'red', 'purple', 'cyan', 'pink'}
names = set()
with open(inf, 'r') as f:
blocks = yaml.safe_load(f)
for name_list in blocks['names'].values():
for name in name_list:
names.add(name)
results = [template.format(color=color, name=name) for color in colors for name in names]
import random
random.shuffle(results)
with open(ouf, 'w') as f:
f.writelines(results)
if __name__ == '__main__':
import os
# run this file to generate the sample
inf = os.path.join(os.path.dirname(__file__), 'objects/blocks/blocks.yml')
ouf = os.path.join(os.path.dirname(__file__), 'sample.tsv')
generate(inf, ouf)
test_data = Data()
print(len(test_data.text_field.vocab))
print(test_data.get_tenser('pick up the red cube'))
| true |
63c2486e62de07eb175fe6055391bd23df975fed | Python | MorrellLAB/Deleterious_GP | /Analysis_Scripts/Data_Handling/Remove_Indels.py | UTF-8 | 460 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
"""Super simple script to filter indels/length polymorphisms from a VCF."""
import sys
with open(sys.argv[1], 'r') as f:
for line in f:
if line.startswith('#'):
print line.strip()
else:
tmp = line.strip().split('\t')
ref = tmp[3]
alt = tmp[4]
if len(ref) != 1 or len(alt) != 1:
continue
else:
print line.strip()
| true |
9dbba6b7a6a9fde5e64ac37b37945dc3b120a552 | Python | crusaderkarthik/HackerRank-Python-Practice | /ifelse.py | UTF-8 | 307 | 3.953125 | 4 | [] | no_license | ##** TYPE 1 **##
n = int(input())
if (n % 2 == 1):
print("Weird")
elif n in range(2,6):
print("Not Weird")
elif n in range(6,21):
print("Weird")
elif n>20:
print("Not Weird")
##** TYPE 2 **##
n=int(input())
print("Weird" if n % 2 == 1 or n in range(6,21) else "Not Weird")
| true |
a12af2c480688ed1ea4c59c989d31cc65b816f97 | Python | newtonis/22.01-Circuit-Theory | /TP2/graficos/ejercicio1/bode_inv.py | UTF-8 | 5,365 | 2.546875 | 3 | [] | no_license | from read_spice import *
import numpy as np
from scipy import signal
from math import *
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from read_xls import *
from mpldatacursor import datacursor
a0 = 1e5
fp = 12
wp = fp * 2 * pi
k = 1e3
def dibujar_bode(r1,r2,r3,r4,log_range, excel_filename, spice_filename ,output_filename):
fig, ax1 = plt.subplots()
(s,e) = log_range
f_all = 10.0 ** np.arange(s, e, 0.01)
w_all = [i * (2 * pi) for i in f_all]
print("r1 = ", r1)
print("r2 = ", r2)
print("r3 = ", r3)
g_ideal = -r2 / r1
q = r1 * r2 + r2 * r3 + r1 * r3
G_ac = -a0 * r2 * r3 / (q + a0 * r1 * r3)
fp_p = fp * (1 + r1 * r3 * a0 / q)
wp_p = fp_p * 2 * pi
s1 = signal.lti([G_ac], [1/wp_p ,1])
w, mag, phase = signal.bode(s1, w_all)
f = [i / 2 / pi for i in w]
data_excel = read_excel_data(excel_filename)
### Amplitud
ax1.semilogx(f, mag, "blue", linewidth=2)
#print (data_excel["freq"])
#print (data_excel["Gain"])
ax1.semilogx(data_excel["freq"], data_excel["ratio"], "green", linewidth=2)
data_spice = read_file_spice("input/Ej1_Spice/"+spice_filename)
ax1.semilogx(data_spice["f"],data_spice["abs"],"red",linewidth=2)
plt.xlabel("Frecuencia (Hz)")
plt.ylabel("Amplitud (dB)")
blue_patch = mpatches.Patch(color='blue', label='Teorico')
green_patch = mpatches.Patch(color='green', label='Practica')
red_patch = mpatches.Patch(color='red',label='Simulacion')
plt.legend(handles=[ green_patch, blue_patch , red_patch])
ax1.set_axisbelow(True)
ax1.minorticks_on()
ax1.grid(which='major', linestyle='-', linewidth=0.3, color='black')
ax1.grid(which='minor', linestyle=':', linewidth=0.1, color='black')
datacursor(display='multiple', tolerance=10, formatter="Freq: {x:.3e} Hz \nAmp:{y:.1f} Db".format, draggable=True)
plt.show()
input("Press Enter ")
fig.savefig("output/amp/"+output_filename)
plt.cla()
plt.close()
fig, ax1 = plt.subplots()
### fase
ax1.semilogx(f, phase, "blue", linewidth=2)
ax1.semilogx(data_excel["freq"],data_excel["phase"],"green",linewidth=2)
ax1.semilogx(data_spice["f"],data_spice["pha"],"red",linewidth=2)
plt.xlabel("Frecuencia (Hz)")
plt.ylabel("Fase (grados)")
blue_patch = mpatches.Patch(color='blue', label='Teorico')
green_patch = mpatches.Patch(color='green', label='Practica')
red_patch = mpatches.Patch(color='red', label='Simulacion')
plt.legend(handles=[green_patch, blue_patch, red_patch])
ax1.set_axisbelow(True)
ax1.minorticks_on()
ax1.grid(which='major', linestyle='-', linewidth=0.3, color='black')
ax1.grid(which='minor', linestyle=':', linewidth=0.1, color='black')
datacursor(display='multiple', tolerance=10, formatter="Freq: {x:.3e} Hz \nFase:{y:.1f} grados".format, draggable=True)
plt.show()
input("Press Enter ")
fig.savefig("output/pha/"+output_filename)
plt.cla()
plt.close()
### impedancia de entrada
fig, ax1 = plt.subplots()
#### Teorico #####
wp_pp = (G_ac / a0 + 1) / (1 / wp_p + G_ac / (a0 * wp))
k = r1 / (G_ac / a0 + 1)
s1 = signal.lti([k / wp_p, k], [1 / wp_pp, 1])
w, H = signal.freqresp(s1, w_all)
f = [i / 2 / pi for i in w]
# axes.figure()
ax1.semilogx(f, abs(H), 'blue', linewidth=2)
#### Practico #####
#print (data_excel)
vin = data_excel["amp usada"]
vd = data_excel["vd"]
zin = [vin[i]*r1/(vin[i]-vd[i]) for i in range(len(vin)) ]
ax1.semilogx(data_excel["freq"],zin,'green',linewidth=2)
#### Simulado ####
data_spice = read_file_spice("input/Ej1_SpiceImp/"+spice_filename)
zin = [ 10**(data_spice["abs"][i]/20) for i in range(len(data_spice["abs"]))]
ax1.semilogx(data_spice["f"], zin, "red", linewidth=2)
plt.xlabel("Frecuencia (Hz)")
plt.ylabel("Impedancia (ohms)")
blue_patch = mpatches.Patch(color='blue', label='Teorico')
green_patch = mpatches.Patch(color='green', label='Practica')
red_patch = mpatches.Patch(color='red', label='Simulacion')
plt.legend(handles=[green_patch, blue_patch, red_patch])
ax1.grid(which='major', linestyle='-', linewidth=0.3, color='black')
ax1.grid(which='minor', linestyle=':', linewidth=0.1, color='black')
datacursor(display='multiple', tolerance=10, formatter="Freq: {x:.3e} Hz \nAmp:{y:.1f} Db".format, draggable=True)
plt.show()
input("Press Enter ")
fig.savefig("output/imp/" + output_filename)
plt.cla()
plt.close()
dibujar_bode(r1=1.2*k,r2=12*k,r3=1.2*k,r4=4.99*k, # caso 10
excel_filename="input/Ej1_Bodes/Inversor_G10_OK.xlsx",
spice_filename="Inversor_G10_OK.txt",
output_filename="Inversor_G10.png",
log_range=(2,7))
dibujar_bode(r1=1.2*k,r2=1.2*k,r3=1.2*k,r4=4.99*k, # caso 10
excel_filename="input/Ej1_Bodes/Inversor_G1_OK.xlsx",
spice_filename="Inversor_G1_OK.txt",
output_filename="Inversor_G1.png",
log_range=(3,7))
dibujar_bode(r1=12*k,r2=1.2*k,r3=12*k,r4=49.9*k, # caso 10
excel_filename="input/Ej1_Bodes/Inversor_G0.1_OK.xlsx",
spice_filename="Inversor_G0.1_OK.txt",
output_filename="Inversor_G0.1.png",
log_range=(4,7))
#plt.show() | true |
741b5c21e2624377684596c6649382334098286f | Python | drlongle/leetcode | /algorithms/problem_1329/leetcode3.py | UTF-8 | 1,046 | 3.75 | 4 | [] | no_license | class Solution:
def diagonalSort(self, mat: List[List[int]]) -> List[List[int]]:
n, m = len(mat), len(mat[0])
def sort_diagonal(i, j):
"""
Sort the current diagonal
"""
diagonal = []
# store the current diagonal
# in the list
while i < n and j < m:
diagonal.append(mat[i][j])
i += 1
j += 1
# sort the diagonal values
diagonal.sort()
# push the sorted values
# back into the matrix
while i > 0 and j > 0:
j -= 1
i -= 1
mat[i][j] = diagonal.pop()
# sort all diagonals
# in the lower left corner
for i in range(n):
sort_diagonal(i, 0)
# sort all diagonals
# in the upper right corner
for j in range(m):
sort_diagonal(0, j)
return mat
| true |
2429b8d9c1e5d2819bfd4d9f0114c8c315897d1d | Python | rishalab/COSPEX | /Sample Code/Calc_profit.py | UTF-8 | 3,227 | 4.03125 | 4 | [
"MIT"
] | permissive | #Calculate the maximum profit that can be earned by a merchant such that weight limit is not exceeded.
def calc_profit(profit: list, weight: list, max_weight: int) -> int:
"""
Function description is as follows-
:param profit: Take a list of profits
:param weight: Take a list of weight if bags corresponding to the profits
:param max_weight: Maximum weight that could be carried
:return: Maximum expected gain
>>> calc_profit([1, 2, 3], [3, 4, 5], 15)
6
>>> calc_profit([10, 9 , 8], [3 ,4 , 5], 25)
27
"""
if len(profit) != len(weight):
raise ValueError("The length of profit and weight must be same.")
if max_weight <= 0:
raise ValueError("max_weight must greater than zero.")
if any(p < 0 for p in profit):
raise ValueError("Profit can not be negative.")
if any(w < 0 for w in weight):
raise ValueError("Weight can not be negative.")
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
profit_by_weight = [p / w for p, w in zip(profit, weight)]
# Creating a copy of the list and sorting profit/weight in ascending order
sorted_profit_by_weight = sorted(profit_by_weight)
# declaring useful variables
length = len(sorted_profit_by_weight)
limit = 0
gain = 0
i = 0
test = []
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
biggest_profit_by_weight = sorted_profit_by_weight[length - i - 1]
"""
Calculate the index of the biggest_profit_by_weight in profit_by_weight list.
This will give the index of the first encountered element which is same as of
biggest_profit_by_weight. There may be one or more values same as that of
biggest_profit_by_weight but index always encounter the very first element
only. To curb this alter the values in profit_by_weight once they are used
here it is done to -1 because neither profit nor weight can be in negative.
"""
index = profit_by_weight.index(biggest_profit_by_weight)
profit_by_weight[index] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
profit = [int(x) for x in "5 8 7 1 12 3 4".split()]
weight = [int(x) for x in "2 7 1 6 4 2 5".split()]
max_weight = 100
# Function Call
calc_profit(profit, weight, max_weight)
# source: https://github.com/TheAlgorithms/Python
| true |
2e4690ba25ded75d90bbb847e59f46c77dfb7dcd | Python | nickagliano/ai-projects | /Project2/Perceptron.py | UTF-8 | 3,382 | 3.609375 | 4 | [] | no_license | import numpy as np # numpy is for vectors
import random
import math
# for plotting the data
import matplotlib.pyplot as plt
# IMPORTANT NOTE:
# The data set lists all male and then all female data points. Think about which
# data points you should use for training and which for testing --
# i.e. algorithm will fail if trained on one type of patters and tested on another
class Perceptron(object):
def __init__(self, no_of_inputs, iterations=5000, learning_rate=2):
# number of patterns? (2d array?)
# desired output array? (is this supervised or unsupervised?)
self.iterations = iterations # number of iterations
self.learning_rate = learning_rate # AKA 'alpha', 'learning constant'
# initalize weights to random between (-.5, .5)
#random.uniform(-0.5, 0.5)
self.weights = [random.uniform(-0.5,0.5),random.uniform(-0.5,0.5),random.uniform(-0.5,0.5)]
print(self.weights)
#self.weights = np.zeros(no_of_inputs + 1) # array of weights
# unipolar hard activation function, called by train_hard function
def predict_hard(self, inputs):
summation = np.dot(inputs, self.weights[1:]) + self.weights[0]
if summation > 0:
activation = 1
else: # since if condition is > 0, else means <= 0
activation = 0
return activation
# set error thresholds:
# E < 10^-5 for Group A,
# E < 10^2 for Group B,
# E < 1.45 * 10^3 for Group C
def train_hard(self, training_inputs, labels, stopping_criterion): # add parameter for '% of data used for training
count = 0
for _ in range(self.iterations):
error = 0
for inputs, label in zip(training_inputs, labels):
prediction = self.predict_hard(inputs)
update = self.learning_rate * (label - prediction)
self.weights[1:] += self.learning_rate * (label - prediction) * inputs # error = (label - prediction)
self.weights[0] += self.learning_rate * (label - prediction)
error += int(update != 0.0)
#print(error)
if error < stopping_criterion:
print('Error: ' + str(error))
print('Iterations: ' + str(count))
return None
count += 1
# unipolar soft activation function
def predict_soft(self, inputs):
k = 3 # gain value
summation = np.dot(inputs, self.weights[1:]) + self.weights[0] # deos this need to be tweaked for soft activation function?
activation = (1 / (1 + np.exp(k * -summation)))
if (activation > .8):
activation = 1
elif activation < .2:
activation = 0
# print(activation)
return activation
def train_soft(self, training_inputs, labels, stopping_criterion):
count = 0
for _ in range(self.iterations):
error = 0
for inputs, label in zip(training_inputs, labels):
prediction = self.predict_soft(inputs)
update = self.learning_rate * (label - float(prediction))
self.weights[1:] += self.learning_rate * (label - prediction) * inputs # error = (label - prediction)
self.weights[0] += self.learning_rate * (label - prediction)
error += float(update != 0.0)
if error < stopping_criterion:
print('Error: ' + str(error))
print('Iterations: ' + str(count))
return None
count += 1
print(str(self.weights) + ' , error: ' + str(error) + ' , iterations: ' + str(count))
# to easily present the findings!
def print_results(self):
print('weight of x: ' + str(self.weights[1]))
print('weight of y: ' + str(self.weights[2]))
print('weight of bias: ' + str(self.weights[0]))
| true |
2bcbe0becc4d80fd963a4e6ff1d143c25caa81a2 | Python | SindhuMuthiah/100daysofcode | /acc3.py | UTF-8 | 197 | 3.3125 | 3 | [] | no_license | '''se=set()'''
arr=[]
n=int(input())
for i in range(n):
num=input()
arr.append(num)
'''for j in range(n):
se.add(arr[j])'''
se=set(arr)
print(se)
k=len(se)
print(k)
| true |
2d5aa7684345d228003b45981ad6f2438097c1ee | Python | Riduidel/codingame | /src/main/2 - medium/mayan numbers.py | UTF-8 | 2,414 | 3.375 | 3 | [] | no_license | import sys
import math
from functools import reduce
def to_mayan_number(number, NUMBERS):
if number<20:
return NUMBERS[number]
else:
remainder = number%20
text = to_mayan_number(int(number/20), NUMBERS)
return line_to_string(text, NUMBERS[remainder])
def to_arabian_number(number, NUMBERS):
returned = 0
power = 1
for i, n in enumerate(reversed(number)):
index = NUMBERS.index(n)
# print("number\n%s is at index %s"%(n, index), file=sys.stderr)
if i==0:
returned = index
else:
power = power*20
returned += index*power
return returned
def print_mayan_number(number):
for i, n in enumerate(number):
print("\n=== %d\n%s"%(i, n), file=sys.stderr)
def line_to_string(a, b):
return "%s\n%s"%(a, b)
def parse_mayan_numbers(l, h, lines):
returned=[]
for i in range(int(len(lines[0])/l)):
# print("i is %d"%i, file=sys.stderr)
number = []
for j in range(h):
start=i*l
end=(i+1)*l
number.append(lines[j][start:end])
returned.append(reduce(line_to_string, number))
return returned
def parse_mayan_value(l, h, lines):
returned=[]
number = ""
for i in range(int(len(lines)/h)):
number = []
for j in range(h):
number.append(lines[i*h+j])
returned.append(reduce(line_to_string, number))
return returned
l, h = [int(i) for i in input().split()]
print("Mayan numbers uses L=%d, H=%d"%(l,h), file=sys.stderr)
# Here we read mayan numbers
lines = []
for i in range(h):
lines.append(input())
NUMBERS = parse_mayan_numbers(l, h, lines)
# print_mayan_number(NUMBERS)
# Now we read the numbers on which we want to do operation
s1 = int(input())
lines = []
for i in range(s1):
lines.append(input())
n1mayan = parse_mayan_value(l, h, lines)
n1arabian = to_arabian_number(n1mayan, NUMBERS)
print("Number 1 is", file=sys.stderr)
print_mayan_number(n1mayan)
# print("In arabian, it means %d"%n1arabian, file=sys.stderr)
s2 = int(input())
lines = []
for i in range(s2):
lines.append(input())
n2mayan = parse_mayan_value(l, h, lines)
n2arabian = to_arabian_number(n2mayan, NUMBERS)
print("Number 2 is", file=sys.stderr)
print_mayan_number(n2mayan)
# print("In arabian, it means %d"%n2arabian, file=sys.stderr)
operation = input()
full_op = "%d %s %d"%(n1arabian, operation, n2arabian)
print("Operation to perform is %s"%full_op, file=sys.stderr)
result = int(eval(full_op))
print("%s = %d"%(full_op, result), file=sys.stderr)
print(to_mayan_number(result, NUMBERS))
| true |
b9f011ba69674e7203466da29cde0b908ee07010 | Python | rmazzine/Twitter_Sentiment_Analysis_Rotten_Apple | /preprocessing/GatherMostFrequentWords.py | UTF-8 | 1,604 | 3 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 30 16:31:53 2019
@author: mazzi
"""
# This algo will receive several tweets and store in a dataframe
import tweepy
import operator
import pandas as pd
consumer_key = 'YOUR_KEY_HERE'
consumer_key_secret = 'YOUR_KEY_HERE'
access_token = 'YOUR_TOKEN_HERE'
access_token_secret = 'YOUR_TOKEN_HERE'
auth = tweepy.OAuthHandler(consumer_key, consumer_key_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
dict_word_count = {}
class MyStreamListener(tweepy.StreamListener):
def on_status(self, status):
if status!="":
words_list = status.text.split()
for word in words_list:
if word in dict_word_count:
dict_word_count[word]+=1
else:
dict_word_count[word]=1
myStreamListener = MyStreamListener()
myStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)
myStream.filter(track="a,o,e,vai,foi,sou,é,como,não,sim,você,vc,eu,como,eh,coisa,bom,ruim,viu,passou,caiu,andou,pq,porque,nós,nos,comprou,para,pra,fica",languages=['pt'])
sorted_x = sorted(dict_word_count.items(), key=operator.itemgetter(1))
list_500 = sorted_x[-501:]
# Delete first as it is RT
list_500 = list_500[:500]
df_500 = pd.DataFrame(list_500)
del df_500[1]
# This output contains the 500 most frequent words on twitter at the moment
# I let this code run for 30 minutes
df_500.to_excel('500_most_frequent_words_pt.xls',index=False,encoding='utf-32')
| true |
906a2ad3da7416959df74944c9c3f5cdf67c311d | Python | baidoosik/ProblemSolving | /BOJ/problem_9012.py | UTF-8 | 373 | 3.28125 | 3 | [] | no_license | n = int(input())
problems = [input() for i in range(n)]
for p in problems:
criteria = 0
for c in p:
if c == '(':
criteria += 1
else:
criteria -= 1
if criteria < 0:
print('NO')
break
if criteria == 0:
print('YES')
elif criteria < 0:
pass
else:
print('NO')
| true |
a61c5548221c2cabf47b06e09b4310f25b7f566b | Python | SerioSticks/AprendiendoPython | /Multiplo.py | UTF-8 | 955 | 4.09375 | 4 | [] | no_license | #Creador Jorge Alberto Flores Sánchez
#Matricula: 1622167 Grupo: 22
#Fecha de Creación : 18/09/2019
#Se captura un numero y se almacena una vez convertido a int
numero=int(input("Dame un numero entero:"))
#Se almacenan en valores de tipo booleano los residuos de la operacion.
#Esto quiere decir que si el residuo es 0 entonces es múltiplo.
esMultiplicado3=((numero%3)==0)
esMultiplicado5=((numero%5)==0)
esMultiplicado7=((numero%7)==0)
#Usar el operador and resuelve por cierto si todas las condiciones,
#son ciertas.En el caso de or se da por cierta al menos si una lo es,
#Los parentecis dividen las condiciones esto quiere decir que la
#comparación con and es una y con or es independiente de esta.
if((esMultiplicado3 and esMultiplicado5) or esMultiplicado7):
print("Correcto.")
else:
print("Incorrecto.")
#Esta condicion evalua solo si es multiplo de 3 y 5 o 7 , en cualquiera
# de estos casos sera verdadera , de lo contrario sera falsa. | true |
35107e11991bdd1f26b686e262a2926492efb690 | Python | punithanae/Python-Basic | /Rocket.py | UTF-8 | 1,285 | 3.609375 | 4 | [] | no_license | print("Welcome to Rocket development system")
print("please enter the details")
print("enter the height of the rocket body ")
a=float(input())
print("enter the weigth of the rocket body")
b=int(input())
print("enter the diameter of the rocket body")
c=float(input())
print("enter the berght of rocket body")
d=int(input())
print("enter the nose of the rocket height")
f=float(input())
print("enter the nose of the rocket breght")
g=float(input())
print("enter the wing 3 or 4")
h=int(input())
print("enter the type of engine")
print("1.A grade engine - 101s,max weight 30kg ")
print("2.B grade engine - 961s,max weight 20kg ")
print("3.C grade engine - 98s,max weight 15kg ")
j=int(input())
if(a+f <=2 and b<=30 and j==1):
for i in range (0,100):
print('|',end='')
print("Rocket max altitude 1.5km")
elif(a+f <=1.5 and b<=20 and j==2):
for i in range (0,100):
print('|',end='')
print("Rocket max altitude 1.6km")
elif(a+f <=1 and b<=15 and j==2):
for i in range (0,100):
print('|',end='')
print("Rocket max altitude 1.0km")
else:
print("Something wents wrong in rocket weight or height")
z=a+f
if (z== 2 and b== 20 and j== 1 and h==4):
print ("Rocket is stable ")
else:
print("Rocket is unstable")
| true |
479397f00122d6f5b38df326095064e13a195a47 | Python | mike-briggs/QualityAssurance | /src/backend.py | UTF-8 | 6,964 | 3.09375 | 3 | [
"MIT"
] | permissive | # backend.py
# handles all (merged) transactions once a day
import sys
import os
import glob
# Inputs:
# - Transaction summary file (merged from several)
# - Previous instance of Master Account List
# AAAAAAA MMMMM NNNN
# acctNum money name
# Outputs:
# - New instance of Master Account List
# AAAAAAA MMMMM NNNN
# acctNum money name
# - New Valid Accunts List for next set of frontends
# AAAAAAA
# acctNum
# Backend
# Read in previous Master Account list and store in local data structure
# Format coming in: AAAAAAA MMMMM NNNN
# acctnum money name
# Read in transaction summary file and store in local data structure
# Go through each transaction
# Ensure transaction is valid
# Perform transaction by updating master account List
# Create new valid accounts list from Master
# Output valid accounts and master accounts
# Used to help store the master accounts list
class Account:
def __init__(self, accountNumber, balance, name):
self.accountNumber = accountNumber
self.balance = balance
self.name = name
def toString(self):
return self.accountNumber+" "+str(self.balance)+" "+self.name
# Handles all merged transactions
def mergeFiles(day):
temp = ""
temp = day
location_in = "./D%s/**/*.out.txt" % (day)
location_out = "./D%s/day_merged_out.txt" % (day)
print(location_in)
input_files = glob.glob(location_in)
with open(location_out, "wb") as outf:
for f in input_files:
with open(f, "rb") as inf:
outf.write(inf.read())
# returns a list of account objects
def parseMasterAccounts(filepath):
masterAccountList = []
with open(filepath) as f:
masterAccountList = f.readlines()
length = len(masterAccountList)
# stores each line (account number) in list
for i in range(length):
line = masterAccountList[i].split(" ")
masterAccountList[i] = Account(
line[0].strip(), line[1].strip(), line[2].strip())
# return list of valid accounts
return masterAccountList
# returns a list of transactions
def parseTransactions(filepath):
transactionList = []
with open(filepath) as f:
transactionList = f.readlines()
# return list of transactions
return transactionList
# Deposit Money into an account
def deposit(accountList, inputAccountNumber, inputAmount):
for j in range(len(accountList)):
if accountList[j].accountNumber == inputAccountNumber: # if found,
accountList[j].balance = int(
accountList[j].balance) + int(inputAmount)
return True
# Withdraw money from an account
def withdraw(accountList, inputAccountNumber, inputAmount):
for j in range(len(accountList)):
#assert accountList[j].accountNumber == inputAccountNumber, "Account number is valid"
if accountList[j].accountNumber == inputAccountNumber:
#assert int(accountList[j].balance) >= int(inputAmount)
if int(accountList[j].balance) >= int(inputAmount):
accountList[j].balance = int(
accountList[j].balance) - int(inputAmount)
return [int(accountList[j].accountNumber), int(accountList[j].balance)]
# Transfer money from on account to another
def transfer(accountList, toAccountNumber, inputAmount, fromAccountNumber):
toAccountBalance = 0
fromAccountBalance = 0
for j in range(len(accountList)):
if accountList[j].accountNumber == toAccountNumber:
toAccountBalance = accountList[j].balance
toIndex = j
if accountList[j].accountNumber == fromAccountNumber:
fromAccountBalance = accountList[j].balance
fromIndex = j
if fromAccountBalance >= int(inputAmount):
accountList[toIndex].balance = int(
accountList[toIndex].balance) + int(inputAmount)
accountList[fromIndex].balance = int(
accountList[fromIndex].balance) - int(inputAmount)
# Create a new account
def createacct(accountList, inputAccountNumber, accountName):
accountList.append(Account(inputAccountNumber, 0, accountName))
return accountList
# Delete an account
def deleteacct(accountList, inputAccountNumber, accountName):
for j in range(len(accountList)):
if accountList[j].accountNumber == inputAccountNumber:
accountList.remove(accountList[j])
return True
# MAIN
day = sys.argv[1]
prevDay = int(day) - 1
prevDay = str(prevDay)
print(prevDay)
mergeFiles(day) #uncomment when testing
if(int(day) == 1):
inMasterAccountListPath = "master_accounts.txt"
else:
inMasterAccountListPath = "./D%s/master_accounts_out.txt"%(prevDay) # of previous day
inTransactionListPath = "./D%s/day_merged_out.txt" %(day)# merge.txt for program, mergeT1.txt for T1, mergeT2.txt for T2 and so on...
outMasterAccountListPath = "./D%s/master_accounts_out.txt"%(day) # master_accounts_out.txt
outValidAccountListPath = "./valid_accounts.txt" # valid_accounts_out.txt
MasterAccountList = parseMasterAccounts(inMasterAccountListPath)
TransactionList = parseTransactions(inTransactionListPath)
# Put account numbers into dictionary, key=index of obj
allAccountNums = {}
for i in range(len(MasterAccountList)):
allAccountNums[i] = MasterAccountList[i]
# TRANSACTIONS
# Iterate through all Transactions
for i in range(1, len(TransactionList)):
# Split each transaction into its arguments
current = TransactionList[i].split()
# 000 1111111 222 3333333 4444
# TYP accntTo amt accntFr name
if current[0] == "DEP": # Deposit
deposit(MasterAccountList, current[1], current[2])
elif current[0] == "WDR": # Withdraw
withdraw(MasterAccountList, current[1], current[2])
elif current[0] == "XFR": # Transfer
transfer(MasterAccountList, current[1], current[2], current[3])
elif current[0] == "NEW": # Create account
createacct(MasterAccountList, current[1], current[4])
elif current[0] == "DEL": # Delete account
deleteacct(MasterAccountList, current[1], current[4])
elif current[0] == "EOS": # End of session
print("EOS")
# OUTPUTS
# Clear previous data in output files, will be overwritten anyways
open(outMasterAccountListPath, 'w').close()
open(outValidAccountListPath, 'w').close()
# For each account
for i in range(len(MasterAccountList)):
# Write to Master Account List
with open(outMasterAccountListPath, 'a') as wf:
wf.write(MasterAccountList[i].toString()) # Write to file
if(i != len(MasterAccountList) - 1):
wf.write("\n")
# Write to Valid ACcount List
with open(outValidAccountListPath, 'a') as wf:
wf.write(MasterAccountList[i].accountNumber) # Write to file
if(i != len(MasterAccountList) - 1):
wf.write("\n")
| true |
8d9d6ca64023beff353274e849637aa63053b153 | Python | aparna501/python101 | /Milestone_Project_1.py | UTF-8 | 1,826 | 3.953125 | 4 | [] | no_license | #Milestone_Project_1
class Milestone_1:
def __init__(self,w,h,n,str,sent_1,sent_2):
self.w=w
self.h=h
self.n=n
self.str=str
self.sent_1=sent_1
self.sent_2=sent_2
# Reverse string
def reverse(self):
rev=self.str[::-1]
print("the reverse of the string is:",rev)
#Palindrome or not
def palindrome(self):
rev=self.str[::-1]
if self.str==rev:
print("the string is palindrome")
else:
print("the string is not palindrome")
#BMI Calculator
def bmi_calculator(self):
h=self.h
w=self.w
print(self.w/self.h**2)
#Factorial
def factorial(self):
factorial=1
if self.n<0:
print("factorial number doesnot exists")
elif self.n==0:
print("the factorial of 0 is 1")
else:
for i in range(factorial,self.n+1):
factorial=factorial*i
print("factorial number of",self.n,"is:",factorial)
#Vowels
def vowel(self):
vowel=['a','e','i','o','u','A','E','I','O','U']
count=0
for i in self.str:
if i in vowel:
count+=1
print("no.of vowels:",count)
#Punctuations
def punctuations(self):
punctuations='''!()-[]{};:'"\,<>./?@#$%^&*_~'''
no_punctuations=""
for i in self.sent_1:
if i not in punctuations:
no_punctuations=no_punctuations+i
print(no_punctuations)
# Print * Triangle
def pattern(self):
for i in range(0,self.n):
for j in range(0,i+1):
print("*",end="")
print("\n")
#Sorting ann Splitting
def sort(self):
word=self.sent_2
sent_2=word.split()
print(sent_2)
sent_2.sort()
print(sent_2)
obj_milestone1=Milestone_1(45,5.3,7,'madam',"hel@#$%lo w<>~!orld","welcome to world of python")
obj_milestone1.reverse()
obj_milestone1.palindrome()
obj_milestone1.bmi_calculator()
obj_milestone1.factorial()
obj_milestone1.vowel()
obj_milestone1.punctuations()
obj_milestone1.pattern()
obj_milestone1.sort()
| true |
c1db88f46c7a773abab7274761f920fb485750bd | Python | HarishGajjar/Python-Projects-for-beginners | /object oriented programming[python]/oops-6.py | UTF-8 | 716 | 3.234375 | 3 | [] | no_license | """
Created on Sat Mar 28 2020
Topic: Inheritance
@author: HarishGajjar
Credit:- Telusko
original Source :- https://youtu.be/qiSCMNBIP2g
"""
class A:
def feature1 (self):
print("Feature1 is working...")
def feature2 (self):
print("Feature2 is working...")
class B:
def feature3 (self):
print("Feature3 is working...")
def feature4 (self):
print("Feature4 is working...")
class C(A, B):
def feature5 (self):
print("Feature5 is working...")
def feature6 (self):
print("Feature6 is working...")
c1 = C()
c1.feature1()
c1.feature2()
c1.feature3()
c1.feature4()
c1.feature5()
c1.feature6() | true |
980bb1dba49d430daaa2ca330057d89955f33372 | Python | anantgupta04/Coding-Challenges | /steps.py | UTF-8 | 1,114 | 4.03125 | 4 | [] | no_license | '''
his problem was recently asked by LinkedIn:
You are given a positive integer N which represents the number of steps in a staircase. You can either climb 1 or 2 steps at a time. Write a function that returns the number of unique ways to climb the stairs.
Bonus: solution in O(n) time?
'''
def staircase_recursion(n):
ans = 0
# print('Value of n = ', n)
if (n == 0) or (n == 1):
return 1
else:
prev_2 = staircase_recursion(n - 2)
prev_1 = staircase_recursion(n - 1)
ans = prev_2 + prev_1
return ans
def staircase_nonrecursion(n):
ans = [0 for _ in range(n + 1)]
ans[0] = 1
ans[1] = 1
if n > 2:
for i in range(2, n + 1):
ans[i] = ans[i - 1] + ans[i - 2]
return ans[n]
if __name__ == '__main__':
print('recursion 4= ', staircase_recursion(4)) # 5
print('recursion 5 = ', staircase_recursion(5)) # 8
print('recursion 35 = ', staircase_recursion(35))
print('Non recursion 5 = ', staircase_nonrecursion(5))
print('Non recursion = ', staircase_nonrecursion(35)) # to compare the time differnece
| true |
55b78eb29320a330154426e0900d3463180a352e | Python | i-pi/i-pi | /tools/py/a2b.py | UTF-8 | 2,851 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python3
""" a2b.py
Reads positions of a system in format 'a' and returns a
file written in format 'b'.
Assumes the input file is in a format 'a'.
Syntax:
a2b.py filename format_a format_b
"""
import sys
import re
from ipi.utils.io import read_file, print_file, read_file_raw
from ipi.engine.properties import Trajectories as Traj
from ipi.utils.messages import verbosity
cell_unit_re = re.compile(r"cell\{([A-Za-z_]*)\}") # cell unit pattern
traj_dict = Traj().traj_dict # trajectory dictionary
traj_re = [
re.compile("%s%s" % (key, r"\{[A-Za-z_]*\}")) for key in list(traj_dict.keys())
] # trajectory patterns
verbosity.level = "low"
def get_cell_units(comment, mode):
# default units
if mode == "pdb":
auto_cell = "angstrom"
else:
auto_cell = "atomic_unit"
cunit = cell_unit_re.search(comment)
if cunit is not None:
auto_cell = cunit.group(1)
return auto_cell
def get_key_dim_units(comment, mode):
# if mode is pdb return standard units and dimensions.
if mode == "pdb":
auto_units = "angstrom"
auto_dimension = "length"
# if mode is not pdb looks for the units in the comment line.
else:
auto_units = "atomic_unit"
auto_dimension = "undefined"
is_comment_useful = []
if comment != "":
is_comment_useful = [
_f for _f in [key.search(comment.strip()) for key in traj_re] if _f
]
if len(is_comment_useful) > 0:
traj = is_comment_useful[0].group()[:-1].split("{")
key, auto_dimension, auto_units = (
traj[0],
traj_dict[traj[0]]["dimension"],
traj[1],
)
if mode == "pdb" and auto_dimension != "length":
raise ValueError(
"PDB Standard is only designed for atomic positions with units in Angstroms"
)
return key, auto_dimension, auto_units
def main(filename, imode, omode):
ipos = open(filename, "r")
ifr = 0
# extracts the dimension, its units and the cell_units from the first frame
ret = read_file_raw(imode, ipos)
ipos.close()
comment = ret["comment"]
cell_units = get_cell_units(comment, imode)
key, dim, dim_units = get_key_dim_units(comment, imode)
ipos = open(filename, "r")
while True:
try:
ret = read_file(imode, ipos)
pos = ret["atoms"]
cell = ret["cell"]
except EOFError: # finished reading files
sys.exit(0)
print_file(
omode,
pos,
cell,
filedesc=sys.stdout,
title="",
key=key,
dimension=dim,
units=dim_units,
cell_units=cell_units,
)
ifr += 1
if __name__ == "__main__":
main(*sys.argv[1:])
| true |
6a77d10f434ef38c34ad1926e86387f0894ae70e | Python | aps-7bm/PyMotorTable | /PyMotorTable2/PyMotorTableCalcs.py | UTF-8 | 6,229 | 3 | 3 | [] | no_license | '''Underlying calculations for PyMotorTable.
Alan Kastengren, XSD
Started June 15, 2013
Change Log
November 18, 2014: Make initial points temporary, rather than confirmed, so they don't have to be erased.
'''
#imports
import numpy as np
import math
#Lists to save points.
temp_points = [0,1] #Provisional: all points in revised grid
confirmed_points = [] #Points in current grid
delete_points = [] #Confirmed points that will be deleted
_rounding = 0.001
order_reversed = False
overwrite_overlaps = True
current_points = []
history_dict = {'Start':[],'End':[],'Spacing':[],'Num Pts':[]}
temp_dict = {'Start':None,'End':None,'Spacing':None,'Num Pts':None}
#
def fset_rounding(new_rounding):
'''Sets the parameter than controls how close points must be to be
considered equal.
'''
_rounding = new_rounding
def fclear_temp():
delete_points[:] = []
temp_points[:] = []
def fclear_all():
'''Clears all temp, delete, and confirmed points, as well as record
of previously confirmed points.
'''
delete_points[:] = []
temp_points[:] = []
confirmed_points[:] = []
for key in ['Start','End','Spacing','Num Pts']:
history_dict[key] = []
def fcompute_spacing(start,end,num_points):
'''Compute the spacing between points given start, end, # of points.
'''
if num_points < 2:
return 0
else:
return math.fabs(start - end) / (num_points - 1)
def fcompute_num_points(start,end,spacing):
'''Compute the number of points given start, end, and spacing.
'''
return int(math.floor((math.fabs(start-end) + _rounding) / spacing) + 1)
def fcompute_temp_grid(start,end,spacing=None,num_points=None):
'''Function to compute the temporary points.
Written to accept either a fixed spacing or a fixed number of points.
'''
#Save the start,end,spacing,and num_pts temporarily so we can save later
temp_dict['Start'] = start
temp_dict['End'] = end
#If we have a valid spacing; note that if so, we ignore num_points.
if spacing:
#Compute the number of points we will get. Need to add one for endpts.
num_strides = fcompute_num_points(start,end,spacing) - 1
#Use numpy.linspace to compute temp_points list. Keep track of signs
if (end - start) > 0:
temp_points[:] = list(np.linspace(start,start+num_strides*spacing,num_strides+1))
else:
temp_points[:] = list(np.linspace(end,end+num_strides*spacing,num_strides+1))
#If no valid spacing, use number of points.
elif num_points is not None:
temp_points[:] = list(np.linspace(start,end,num_points))
else:
print "Need either a valid number of points or spacing."
return None
temp_dict['Num Pts'] = len(temp_points)
if len(temp_points) > 1:
temp_dict['Spacing'] = temp_points[1] - temp_points[0]
else:
temp_dict['Spacing'] = 0
def fmerge_temp_confirmed():
'''Function to compute full array of temporary points.
If there are already confirmed points, figure out whether any points are in
common between confirmed and temp. Also figure out if any confirmed points
will be deleted.
If there are no temp points and the confirmed points are to be overwritten,
the confirmed points are simply deleted.
'''
#First case: if there are no confirmed points, do nothing.
if len(confirmed_points) == 0:
return
#Clear the delete_points list
delete_points[:] = []
#Sort both lists
confirmed_points.sort()
temp_points.sort()
#If we are overwriting overlap regions, find confirmed points between
#start and end of temp_points and add to delete_points
if overwrite_overlaps:
for item in confirmed_points:
#What if there are no temp points: useful for deleting part of grid
if not len(temp_points):
if temp_dict['Start'] < temp_dict['End'] and temp_dict['Start']-item < _rounding and temp_dict['End']-item > -_rounding:
delete_points.append(item)
elif temp_dict['End']-item < _rounding and temp_dict['Start']-item > -_rounding:
delete_points.append(item)
#Now, handle the more typical case where there actually are temp points
elif temp_points[0]-item < _rounding and temp_points[-1]-item > -_rounding:
delete_points.append(item)
#If we aren't overwriting overlaps, find coincident points and remove
#from the temp array
else:
for item in confirmed_points:
for (i,temp) in enumerate(temp_points):
if math.fabs(temp-item) < _rounding:
del temp_points[i]
#If we are larger than item, might as well break loop, since lists are sorted
elif temp_points[i] > item:
break
def fdelete_points():
'''Delete the points in delete_points from confirmed_points
'''
for delete_item in delete_points:
for i in range(len(confirmed_points)):
if delete_item == confirmed_points[i]:
del confirmed_points[i]
break
else:
print "Unmatched delete_point entry. Something didn't work."
print delete_item
delete_points[:] = []
def fconfirm_temp(order_reversed=False):
'''Confirm the temp grid, making the temp points confirmed points.
Also saves parameters of this grid in the history dictionary.
'''
if len(delete_points):
fdelete_points()
confirmed_points.extend(temp_points[:])
confirmed_points.sort(reverse=order_reversed)
fclear_temp()
for key in ['Start','End','Spacing','Num Pts']:
history_dict[key].append(temp_dict[key])
temp_dict[key] = None
def getConfirmedPoints():
return np.array(sorted(confirmed_points,reverse=order_reversed))
def getTempPoints():
return np.array(sorted(temp_points,reverse=order_reversed))
def getDeletePoints():
return np.array(sorted(delete_points,reverse=order_reversed))
def getCurrentPoints():
return np.array(sorted(current_points,reverse=order_reversed))
| true |
39d9b75e095fc26cbd21d37e7e64ea3ee775f36f | Python | davidchen/pathfinder | /utils/a_star.py | UTF-8 | 17,635 | 2.796875 | 3 | [] | no_license | from datetime import datetime
from . import helper_defs
from . import the_david_brian_heap
from copy import copy, deepcopy
from . import colors
import pygame
def weighted_a_star(start_node, goal_node, grid, heuristic, weight):
helper_defs.reset_cells_in_grid(grid)
helper_defs.set_cell_values(grid, goal_node, heuristic)
helper_defs.set_cell_neighbors(grid)
runtime_start = datetime.now()
memory_start = helper_defs.memory()
start_node.g_value = 0
start_node.parent = None
fringe = the_david_brian_heap.DavidsAndBriansHeapForCellPriorityWithAReallyLongName()
fringe.insert(start_node, start_node.g_value + start_node.h_value * weight)
start_node.in_fringe = True
path_solution = []
while not fringe.is_empty():
current_cell = fringe.pop()
if current_cell is goal_node: # goal found
runtime = datetime.now() - runtime_start
memory_used = helper_defs.memory() - memory_start
previous = goal_node.parent
while previous.parent:
path_solution.append(previous)
previous = previous.parent
print('RUNTIME: {} milliseconds'.format(runtime.microseconds / 1000))
print('LENGTH OF PATH: {}'.format(sum([cell.g_value for cell in path_solution])))
print('NODES EXPANDED: {}'.format(sum([cell.expanded_already for row in grid for cell in row])))
print('MEMORY USED: {} bytes ({} MB)'.format(memory_used, memory_used / 1000000))
return path_solution[::-1]
current_cell.expanded_already = True
for neighbor in current_cell.neighbors:
if not neighbor.expanded_already:
if not neighbor.in_fringe:
neighbor.g_value = 99999999
neighbor.parent = None
if current_cell.g_value + helper_defs.calc_g_value(current_cell, neighbor) < neighbor.g_value:
neighbor.g_value = current_cell.g_value + helper_defs.calc_g_value(current_cell, neighbor)
neighbor.parent = current_cell
if neighbor.in_fringe:
fringe.remove(neighbor)
neighbor.in_fringe = False
fringe.insert(neighbor, neighbor.g_value + neighbor.h_value * weight)
neighbor.in_fringe = True
print('No Path to Goal Found.')
return
def uniform_cost_search(start_node, goal_node, grid):
return weighted_a_star(start_node, goal_node, grid, None, 0)
def a_star(start_node, goal_node, grid, heuristic):
helper_defs.reset_cells_in_grid(grid)
helper_defs.set_cell_values(grid, goal_node, heuristic)
helper_defs.set_cell_neighbors(grid)
runtime_start = datetime.now()
memory_start = helper_defs.memory()
start_node.g_value = 0
start_node.parent = None
fringe = the_david_brian_heap.DavidsAndBriansHeapForCellPriorityWithAReallyLongName()
fringe.insert(start_node, start_node.g_value + start_node.h_value)
start_node.in_fringe = True
path_solution = []
while not fringe.is_empty():
current_cell = fringe.pop()
# snippet colors in search process.
# if show_search_space and current_cell.c_type == 1 and not current_cell.is_highway:
# current_color = map_color_gradient(gradient_color_1, gradient_color_2, current_cell)
# tmp_rect = pygame.Surface((SCALE-1, SCALE-1))
# tmp_rect.set_alpha(100)
# tmp_rect.fill(current_color)
# MAIN_SCREEN.blit(tmp_rect, (current_cell.pixel_x+1, current_cell.pixel_y+1))
# pygame.display.update()
# --------
if current_cell is goal_node: # goal found
runtime = datetime.now() - runtime_start
memory_used = helper_defs.memory() - memory_start
previous = goal_node.parent
while previous.parent:
path_solution.append(previous)
previous = previous.parent
print('RUNTIME: {} milliseconds'.format(runtime.microseconds / 1000))
print('LENGTH OF PATH: {}'.format(sum([cell.g_value for cell in path_solution])))
print('NODES EXPANDED: {}'.format(sum([cell.expanded_already for row in grid for cell in row])))
print('MEMORY USED: {} bytes ({} MB)'.format(memory_used, memory_used / 1000000))
return path_solution[::-1]
current_cell.expanded_already = True
for neighbor in current_cell.neighbors:
if not neighbor.expanded_already:
if not neighbor.in_fringe:
neighbor.g_value = 99999999
neighbor.parent = None
if current_cell.g_value + helper_defs.calc_g_value(current_cell, neighbor) < neighbor.g_value:
neighbor.g_value = current_cell.g_value + helper_defs.calc_g_value(current_cell, neighbor)
neighbor.parent = current_cell
if neighbor.in_fringe:
fringe.remove(neighbor)
neighbor.in_fringe = False
fringe.insert(neighbor, neighbor.g_value + neighbor.h_value)
neighbor.in_fringe = True
print('No Path to Goal Found.')
return
def sequential_a_star(start_node, goal_node, grid, all_heuristics, weight1, weight2):
helper_defs.reset_cells_in_grid(grid)
helper_defs.set_cell_neighbors(grid)
runtime_start = datetime.now()
memory_start = helper_defs.memory()
list_of_fringes = []
list_of_grids = []
for i in range(0, 5):
list_of_fringes.append(the_david_brian_heap.DavidsAndBriansHeapForCellPriorityWithAReallyLongName())
# list_of_grids.append({'start': copy(start_node), 'goal': copy(goal_node), 'grid': copy(grid)})
list_of_grids.append({'grid': copy(grid)})
list_of_grids[i]['start'] = list_of_grids[i]['grid'][start_node.row][start_node.col]
list_of_grids[i]['goal'] = list_of_grids[i]['grid'][goal_node.row][goal_node.col]
helper_defs.set_cell_values(list_of_grids[i]['grid'], goal_node, all_heuristics[i])
for i in range(0, 5):
current_start = list_of_grids[i]['start']
current_goal = list_of_grids[i]['goal']
current_start.g_value = 0.0
current_goal.g_value = 999999999.0
list_of_fringes[i].insert(current_start, key(current_start, current_goal, weight1, all_heuristics[i]))
current_start.in_fringe = True
while not list_of_fringes[0].is_empty():
for i in range(1, 5):
current_goal = list_of_grids[i]['goal']
if not list_of_fringes[i].is_empty() and list_of_fringes[i].peek_min_priority() <= (weight2 * list_of_fringes[0].peek_min_priority()):
if current_goal.g_value <= list_of_fringes[i].peek_min_priority():
if current_goal.g_value < 999999999.0: # goal found
runtime = datetime.now() - runtime_start
memory_used = helper_defs.memory() - memory_start
path_solution = []
previous = current_goal.parent
while previous.parent:
path_solution.append(previous)
previous = previous.parent
print('RUNTIME: {} milliseconds'.format(runtime.microseconds / 1000))
print('LENGTH OF PATH: {}'.format(sum([cell.g_value for cell in path_solution])))
print('NODES EXPANDED: {}'.format(sum([cell.expanded_already for row in list_of_grids[i]['grid'] for cell in row])))
print('MEMORY USED: {} bytes ({} MB)'.format(memory_used, memory_used / 1000000))
return path_solution[::-1]
else:
cell_from_heap = list_of_fringes[i].pop()
for neighbor in cell_from_heap.neighbors:
if not neighbor.expanded_already:
if not neighbor.in_fringe:
neighbor.g_value = 999999999.0
neighbor.parent = None
if neighbor.g_value > cell_from_heap.g_value + helper_defs.calc_g_value(cell_from_heap, neighbor):
neighbor.g_value = cell_from_heap.g_value + helper_defs.calc_g_value(cell_from_heap, neighbor)
neighbor.parent = cell_from_heap
if neighbor.in_fringe:
list_of_fringes[i].remove(neighbor)
neighbor.in_fringe = False
list_of_fringes[i].insert(neighbor, key(neighbor, list_of_grids[i]['goal'], weight1, all_heuristics[i]))
neighbor.in_fringe = True
cell_from_heap.expanded_already = True
else:
if list_of_grids[0]['goal'].g_value <= list_of_fringes[0].peek_min_priority():
if list_of_grids[0]['goal'].g_value < 999999999.0: # goal found
runtime = datetime.now() - runtime_start
memory_used = helper_defs.memory() - memory_start
path_solution = []
previous = list_of_grids[0]['goal'].parent
while previous.parent:
path_solution.append(previous)
previous = previous.parent
print('RUNTIME: {} milliseconds'.format(runtime.microseconds / 1000))
print('LENGTH OF PATH: {}'.format(sum([cell.g_value for cell in path_solution])))
print('NODES EXPANDED: {}'.format(sum([cell.expanded_already for row in list_of_grids[i]['grid'] for cell in row])))
print('MEMORY USED: {} bytes ({} MB)'.format(memory_used, memory_used / 1000000))
return path_solution[::-1]
else:
cell_from_heap = list_of_fringes[0].pop()
for neighbor in cell_from_heap.neighbors:
if not neighbor.expanded_already:
if not neighbor.in_fringe:
neighbor.g_value = 999999999.0
neighbor.parent = None
if neighbor.g_value > cell_from_heap.g_value + helper_defs.calc_g_value(cell_from_heap, neighbor):
neighbor.g_value = cell_from_heap.g_value + helper_defs.calc_g_value(cell_from_heap, neighbor)
neighbor.parent = cell_from_heap
if neighbor.in_fringe:
list_of_fringes[0].remove(neighbor)
neighbor.in_fringe = False
list_of_fringes[0].insert(neighbor, key(neighbor, list_of_grids[0]['goal'], weight1, all_heuristics[0]))
neighbor.in_fringe = True
cell_from_heap.expanded_already = True
print('No Path to Goal Found.')
return
def integrated_a_star(start_node, goal_node, grid, all_heuristics, weight1, weight2):
helper_defs.reset_cells_in_grid(grid)
helper_defs.set_cell_neighbors(grid)
runtime_start = datetime.now()
memory_start = helper_defs.memory()
list_of_fringes = []
start_node.g_value = 0
goal_node.g_value = 999999999.0
v_vals = {}
for i in range(0, 5):
list_of_fringes.append(the_david_brian_heap.DavidsAndBriansHeapForCellPriorityWithAReallyLongName())
list_of_fringes[i].insert(start_node, key(start_node, goal_node, weight1, all_heuristics[i]))
start_node.in_fringe = True
while not list_of_fringes[0].is_empty():
for i in range(1, 5):
if not list_of_fringes[i].is_empty() and list_of_fringes[i].peek_min_priority() <= (weight2 * list_of_fringes[0].peek_min_priority()):
if goal_node.g_value <= list_of_fringes[i].peek_min_priority():
if goal_node.g_value < 999999999.0:
runtime = datetime.now() - runtime_start
memory_used = helper_defs.memory() - memory_start
path_solution = []
previous = goal_node.parent
while previous.parent:
path_solution.append(previous)
previous = previous.parent
print('RUNTIME: {} milliseconds'.format(runtime.microseconds / 1000))
print('LENGTH OF PATH: {}'.format(sum([cell.g_value for cell in path_solution])))
print('NODES EXPANDED: {}'.format(sum([cell.expanded_already for row in grid for cell in row])))
print('MEMORY USED: {} bytes ({} MB)'.format(memory_used, memory_used / 1000000))
return path_solution[::-1]
else:
cell_from_heap = list_of_fringes[i].pop()
for p in range(0,5):
list_of_fringes[p].remove(cell_from_heap)
v_vals[cell_from_heap] = cell_from_heap.g_value
for neighbor in cell_from_heap.neighbors:
if not neighbor.expanded_already:
if not neighbor.in_fringe:
neighbor.g_value = 999999999.0
neighbor.parent = None
if neighbor.g_value > cell_from_heap.g_value + helper_defs.calc_g_value(cell_from_heap, neighbor):
neighbor.g_value = cell_from_heap.g_value + helper_defs.calc_g_value(cell_from_heap, neighbor)
neighbor.parent = cell_from_heap
if neighbor.in_fringe:
list_of_fringes[0].remove(neighbor)
neighbor.in_fringe = False
list_of_fringes[0].insert(neighbor, key(neighbor, goal_node, weight1, all_heuristics[0]))
neighbor.in_fringe = True
for z in range(1,5):
if key(neighbor, goal_node, weight1, all_heuristics[i]) <= weight2*key(neighbor, goal_node, weight1, all_heuristics[0]):
list_of_fringes[z].insert(neighbor, key(neighbor, goal_node, weight1, all_heuristics[0]))
neighbor.in_fringe = True
cell_from_heap.expanded_already = True
else:
if goal_node.g_value <= list_of_fringes[0].peek_min_priority():
if goal_node.g_value < 999999999.0:
runtime = datetime.now() - runtime_start
memory_used = helper_defs.memory() - memory_start
path_solution = []
previous = goal_node.parent
while previous.parent:
path_solution.append(previous)
previous = previous.parent
print('RUNTIME: {} milliseconds'.format(runtime.microseconds / 1000))
print('LENGTH OF PATH: {}'.format(sum([cell.g_value for cell in path_solution])))
print('NODES EXPANDED: {}'.format(sum([cell.expanded_already for row in grid for cell in row])))
print('MEMORY USED: {} bytes ({} MB)'.format(memory_used, memory_used / 1000000))
return path_solution[::-1]
else:
cell_from_heap = list_of_fringes[0].pop()
for p in range(0,5):
list_of_fringes[p].remove(cell_from_heap)
v_vals[cell_from_heap] = cell_from_heap.g_value
for neighbor in cell_from_heap.neighbors:
if not neighbor.expanded_already:
if not neighbor.in_fringe:
neighbor.g_value = 999999999.0
neighbor.parent = None
if neighbor.g_value > cell_from_heap.g_value + helper_defs.calc_g_value(cell_from_heap, neighbor):
neighbor.g_value = cell_from_heap.g_value + helper_defs.calc_g_value(cell_from_heap, neighbor)
neighbor.parent = cell_from_heap
if neighbor.in_fringe:
list_of_fringes[0].remove(neighbor)
neighbor.in_fringe = False
list_of_fringes[0].insert(neighbor, key(neighbor, goal_node, weight1, all_heuristics[0]))
neighbor.in_fringe = True
for z in range(1,5):
if key(neighbor, goal_node, weight1, all_heuristics[i]) <= weight2*key(neighbor, goal_node, weight1, all_heuristics[0]):
list_of_fringes[z].insert(neighbor, key(neighbor, goal_node, weight1, all_heuristics[0]))
neighbor.in_fringe = True
cell_from_heap.expanded_already = True
print('No Path to Goal Found.')
return
def key(node, current_goal, weight1, current_heuristic):
return node.g_value + weight1 * current_heuristic(node, current_goal)
| true |
4ae42680df2a9ead51b442ddee08c1ff21d7586f | Python | jspw/Basic_Python | /basic/print emoji.py | UTF-8 | 909 | 3.09375 | 3 | [
"Unlicense"
] | permissive | #website : https://unicode.org/emoji/charts/full-emoji-list.html
#replace '+' with '000'
print("\U0001F600")
print("\U0001F603")
print("\U0001F604")
print("\U0001F601")
print("\U0001F606")
print("\U0001F605")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("\U0001F602")
print("At the End of the Day : \U0001F595 \b Fuck You !") | true |
a13807886e430bc21bc7e078e0e485a2d88edbf7 | Python | CPJKU/score_following_game | /score_following_game/utils.py | UTF-8 | 3,319 | 2.65625 | 3 | [
"MIT"
] | permissive | import cv2
import numpy as np
import os
import shutil
import soundfile as sf
def write_video(images, fn_output='output.mp4', frame_rate=20, overwrite=False):
"""Takes a list of images and interprets them as frames for a video.
Source: http://tsaith.github.io/combine-images-into-a-video-with-python-3-and-opencv-3.html
"""
height, width, _ = images[0].shape
if overwrite:
if os.path.exists(fn_output):
os.remove(fn_output)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(fn_output, fourcc, frame_rate, (width, height))
for cur_image in images:
frame = cv2.resize(cur_image, (width, height))
out.write(frame) # Write out frame to video
# Release everything if job is finished
out.release()
return fn_output
def mux_video_audio(path_video, path_audio, path_output='output_audio.mp4'):
"""Use FFMPEG to mux video with audio recording."""
from subprocess import check_call
check_call(["ffmpeg", "-y", "-i", path_video, "-i", path_audio, "-shortest", path_output])
def render_video(observation_images, pool, fps=20, mux_audio=True, real_perf=False, video_path='videos'):
if not os.path.isdir(video_path):
os.mkdir(video_path)
if mux_audio:
fn_audio = 'tmp.wav'
if real_perf == 'wav':
# copy over wav file
shutil.copy(pool.curr_song.path_perf, fn_audio)
else:
# get synthesized MIDI as WAV
perf_audio, fs = pool.get_current_perf_audio_file()
sf.write(fn_audio, perf_audio, fs)
# frame rate video is now based on the piano roll's frame rate
path_video = write_video(observation_images, fn_output=pool.get_current_song_name() + '.mp4',
frame_rate=fps, overwrite=True)
# mux video and audio with ffmpeg
mux_video_audio(path_video, fn_audio, path_output=os.path.join(video_path,
pool.get_current_song_name() + '_audio.mp4'))
# clean up
os.remove(fn_audio)
os.remove(path_video)
else:
write_video(observation_images, frame_rate=1, overwrite=True)
def get_opencv_bar(value, bar_heigth=500, min_value=0, max_value=11, color=(255, 255, 0), title=None):
value_coord = bar_heigth - int(float(bar_heigth - 20) * value / max_value) + 20
value_coord = np.clip(value_coord, min_value, bar_heigth - 1)
bar_img_bgr = np.zeros((bar_heigth, 100, 3), np.uint8)
cv2.line(bar_img_bgr, (0, value_coord), (bar_img_bgr.shape[1] - 1, value_coord), color, 5)
# write current speed to observation image
font_face = cv2.FONT_HERSHEY_SIMPLEX
text = "%.2f" % value
text_size = cv2.getTextSize(text, fontFace=font_face, fontScale=0.6, thickness=1)[0]
text_org = (100 - text_size[0], value_coord - 6)
cv2.putText(bar_img_bgr, text, text_org, fontFace=font_face, fontScale=0.6, color=color, thickness=1)
if title is not None:
text_size = cv2.getTextSize(title, fontFace=font_face, fontScale=0.6, thickness=1)[0]
text_org = (100 // 2 - text_size[0] // 2, 20)
cv2.putText(bar_img_bgr, title, text_org, fontFace=font_face, fontScale=0.6, color=color, thickness=1)
return bar_img_bgr
| true |
8d85459c5c4ef509d4fd3ff1458bd39290960f03 | Python | abhishekjais-124/Contest-Reminder-python-project | /reminder_project/main.py | UTF-8 | 4,942 | 2.6875 | 3 | [] | no_license | from bs4 import BeautifulSoup
import requests
import datetime
import re
from twilio.rest import Client
import random
from datetime import timedelta
sid= "" #not written due to privacy
token = ""#due to privacy
client = Client(sid,token)
Name = []
p_title = []
level = ['school','easy']
f1 = open("leetcode.txt","r")
i = 0
Full = []
for line in f1:
if len(line) >= 10:
x = line.split('\t')
for i in x:
if not (i.isspace() or i == ''):
i = ('-').join(i.strip().lower().split(" "))
if len(i)> 0:
Full.append(i)
break
#print(Full)
leet_prob = "https://leetcode.com/problems/"
n2 = len(Full)
choose2 = random.randint(0,n2-1)
y1 = " ".join(Full[choose2].split('-')).capitalize()
y3 = leet_prob + Full[choose2]
#print(y1,y3)
for i in level:
URL2 = "https://www.codechef.com/problems/" + i
parsed_html = requests.get(URL2)
soup2 = BeautifulSoup(parsed_html.content,"html.parser")
data2 = str(soup2.findAll("tr", class_ = "problemrow"))
Name += re.findall(r'<b>(.*?)</b>',data2)
p_title += re.findall(r'/status/(.*?)"',data2)
#print(data2)
#print(p_title)
total = len(Name)
#print(len(Name),len(p_title))
choose = random.randint(0,total-2)
#for i in range(total):
# print(Name[i],p_title[i])
x1 = Name[choose] + " (" + p_title[choose] + ")"
x3 = "https://www.codechef.com/problems/" + p_title[choose]
#print(x1,x3)
Month = ['January','February','March','April','May','June','July',"August",'September','October','November','December']
Mainloc = ['spoj','code','hacker','google','facebook','codingame']
Neg = ['developer','fullstack','hackathon','frontend','backend','machine','deep','devops']
URL = "https://clist.by/"
def fun(l):
for i in Mainloc:
if l.find(i) != -1:
return True
return False
parsed_html = requests.get(URL)
soup = BeautifulSoup(parsed_html.content,"html.parser")
data = str(soup.findAll("a", class_ = "data-ace"))
#print(data)
contest_title = re.findall(r'title"(.*?)",',data)
location = re.findall(r'location"(.*?)",',data)
start = re.findall(r'start"(.*?)",',data)
end = re.findall(r'end"(.*?)",',data)
url_list = re.findall(r'url: (.*?)",',data)
#print(contest_title)
#print(location)
#print(start)
#print(end)
#print(url_list)
A = []
events = len(start)
for i in range(events):
s = start[i].strip().split(" ")
s_month =Month.index(s[0][2:]) + 1
s_date = int(s[1][:-1])
s_year = int(s[2])
s_time = s[3].strip().split(':')
#print(s_month,s_date,s_year,s_time)
e = end[i].strip().split(" ")
e_month = Month.index(e[0][2:]) + 1
e_date = int(e[1][:-1])
e_year = int(e[2])
e_time = e[3].strip().split(':')
#print(e_month, e_date, e_year, e_time)
a = datetime.datetime(s_year,s_month,s_date,int(s_time[0]) , int(s_time[1]),int(s_time[2]))
b = datetime.datetime(e_year, e_month, e_date, int(e_time[0]), int(e_time[1]), int(e_time[2]))
a += timedelta(hours= 5,minutes= 30)
b += timedelta(hours=5, minutes=30)
c = b - a
d = a - datetime.datetime.today()
#print(a,b,c.days)
loc = location[i][2:]
zz = contest_title[i][2:].lower()
if c.days <= 10 and fun(loc) and d.days < 1:
if 'hacker' in loc:
ff = 0
for m in Neg:
if zz.find(m) != -1:
ff = 1
break
if ff: continue
temp = []
t1 = loc.split('.')[0].capitalize() + ": " + contest_title[i][2:]
#print(t1)
print(a,b,loc)
t2 = a.strftime("%d %b %Y %I:%M %p")
t3 = b.strftime("%d %b %Y %I:%M %p")
if c.days == 0:
tt =divmod(c.seconds,3600)
t4 = str(tt[0]) + " hours " + str(tt[1]//60) + " minutes"
else:
tt = divmod(c.seconds,60)
t4 = str(c.days) + " days"
temp.append(t1);temp.append(t2);temp.append(t3);temp.append(t4);temp.append(url_list[i])
A.append(temp.copy())
#print(A)
Numbers = ["7615815667"]
msg = "Contest Reminder" + '\n\n'
c = 0
n = len(A)
print("Working...")
for i in A:
msg += '*' + str(c+1) +'*'+ ". " +'*'+ i[0] + '*'+ '\n' + 'Start: ' + i[1] + '\n' + 'End: ' + i[2] + '\n' + 'Duration: ' + i[3] + '\n' + 'link: ' + i[4] + '\n\n'
c += 1
print(c)
if c % 5 == 0 or c == n:
if c == n:
ch3 = random.randint(1,100)
if ch3 %2 ==0:
msg += '_Problem of the day_' + '\n\n' + '*' +x1 +'*\n' + x3 + '\n\n' + "By AJ"
else:
msg += '_Problem of the day_' + '\n\n' + '*' +y1 +'*\n' + y3 + '\n\n' + "By AJ"
for j in Numbers:
message = client.messages.create(body = msg, from_ = "whatsapp:+14155238886",to = "whatsapp:+91" + j)
msg = ""
| true |
c449a168bc35a5dc2c4bbd69080ec793614a77e6 | Python | miguelfscpaulino/AtariGo | /submission/go.py | UTF-8 | 26,521 | 3.796875 | 4 | [] | no_license | import sys
import copy
class State():
"""docstring for class State"""
# class state has the following atributes
def __init__(self, mat, player, filled, dim, groups1, groups2, zeros1, zeros2, terminalflag=False, drawflag= False):
self.mat = mat # game matrix
self.player = player # current player
self.filled = filled # list of positions occupied in the board in a certain state
self.dim = dim # dimension of the board(9x9 -> dim = 9)
self.drawflag = drawflag # Flag that is 1 if state is a draw, 0 else
self.terminalflag = terminalflag # Flag that is 1 if state is a terminal, 0 else
self.groups1 = groups1 # List of lists of player 1 in which is list is a group(string) and has the positions of said group
self.groups2 = groups2 # List of lists of player 2 in which is list is a group(string) and has the positions of said group
self.zeros1 = zeros1 # List of lists of player 1 in which is list is the liberties of a group in groups1 list and has the positions of said liberties
self.zeros2 = zeros2 # List of lists of player 2 in which is list is the liberties of a group in groups2 list and has the positions of said liberties
def surronding_zeros(self, pos, dim, filled):
#Function that checks if a certain position has zeros(liberties) around itself and returns a list with thoose positions.
#If there aren't any zeros, returns an empty list.
zeros = []
if (pos - dim) >= 0: #Checks if the position above exists and is a zero
for item in filled:
if item[1] == (pos-dim):
break
elif item[1] > (pos-dim):
zeros.append(pos-dim)
break
if (pos + dim) < dim*dim: #Checks if the position bellow exists and is a zero
for item in filled:
if item[1] == (pos+dim):
break
elif item[1] > (pos+dim) or item == filled[-1]:
zeros.append(pos+dim)
break
if (pos % dim) != 0: #Checks if the position to the left exists and is a zero
for item in filled:
if item[1] == pos-1:
break
elif item[1] > pos-1:
zeros.append(pos-1)
break
if ((pos+1) % dim) != 0: #Checks if the position to the right exists and is a zero
for item in filled:
if item[1] == (pos+1):
break
elif item[1] > (pos+1) or item == filled[-1]:
zeros.append(pos+1)
break
return zeros
# Group of self explanatory functions(getters and setters)
def getMat(self):
return self.mat
def getPlayer(self):
return self.player
def getFilled(self):
return self.filled
def getDim(self):
return self.dim
def getDrawFlag(self):
return self.drawflag
def getTerminalFlag(self):
return self.terminalflag
def getGroups1(self):
return self.groups1
def getGroups2(self):
return self.groups2
def getZeros1(self):
return self.zeros1
def getZeros2(self):
return self.zeros2
def setDrawFlag(self, flag):
self.drawflag = flag
def setTerminalFlag(self, flag):
self.terminalflag = flag
def setMat(self, m):
self.mat = m
def setPlayer(self, p):
self.player = p
def setFilled(self, f):
self.filled = f
def addFilled(self, f):
self.filled.append(f)
def removeFilled(self, f):
self.filled.remove(f)
def setDim(self, d):
self.dim = d
class Game():
"""docstring for class Game"""
def getState(self): #Returns the current state
return self.state
def to_move(self, s):
#returns the player to move next, given the state "s"
return s.getPlayer()
def terminal_test(self, s):
#checks if state "s" is terminal
zeros1 = s.getZeros1()
zeros2 = s.getZeros2()
for zeros in [zeros1, zeros2]: #If a certain group has no liberties, the state is terminal.
for i in zeros:
if len(i) == 0:
s.setTerminalFlag(True)
s.setDrawFlag(False)
return True
if not self.actions(s): #Sets draw flag if it is a draw
s.setTerminalFlag(False)
s.setDrawFlag(True)
return False
def utility(self, s, p):
#returns payoff of state "s" if terminal or evaluation with respect to player
if s.getDrawFlag(): #If it's a draw, the utility is 0.
return 0
if s.getTerminalFlag(): # If the state is terminal , returns + or - 1 depending if the original player given by the original state won or not, respecitvely.
if s.getPlayer() == p:
return -1
else:
return 1
if p ==1: # Else, it's considered that the state that reduces the largest number of liberties of an opposing group is the better one.
zeros= s.getZeros2()
else:
zeros= s.getZeros1()
liberties = []
for i in zeros:
liberties.append(len(i))
dim = s.getDim()
return 1 - min(liberties)/(dim*dim);
def actions(self, s):
#returns list of valid moves at state "s"
dim = s.getDim()
player = s.getPlayer()
if player == 1: #Choses what lists to evaluate depending on whose turn it is.
zerosCont = s.getZeros2()
zerosOwn = s.getZeros1()
else:
zerosCont = s.getZeros1()
zerosOwn = s.getZeros2()
mat = s.getMat()
filled = s.getFilled()
dim = s.getDim()
aux = [(player, i+1, k+1) for i in range(dim) for k in range(dim) if mat[i][k] == 0] #initialy all zeros are possible moves in the board
rmv = []
for mov in aux: # Cicle to remove suicidal moves, which envolves verifying if the move is a suicide to the
continue_flag = False # specific piece, to the group it's going to belong to if played or if it's not a suicide because it captures
ind = coord2ind(mov[2]-1, mov[1]-1, dim) # an oponent group.
if not s.surronding_zeros(ind, dim, filled):
for i in zerosCont:
if len(i) == 1 and i[0] == ind:
continue_flag = True
break
if continue_flag:
continue
for i in zerosOwn:
if ind in i:
if len(i) != 1:
continue_flag = True
break
if continue_flag:
continue
else:
rmv.append(mov)
for k in rmv:
aux.remove(k)
return aux
def result(self, s, a):
#returns the sucessor game state after playing move "a" at state "s"
if a[0] == 1: #if player is player 1..
groups = copy.deepcopy(s.getGroups1()) #..player groups is groups2,..
zeros = copy.deepcopy(s.getZeros1()) #..list of surounding zeros is palyer's 1 zeros
zerosCont = copy.deepcopy(s.getZeros2()) #..and list of contrarie surounding zeros is palyer's 2 zeros
player = 2
else: #same but for player 2
groups = copy.deepcopy(s.getGroups2())
zeros = copy.deepcopy(s.getZeros2())
zerosCont = copy.deepcopy(s.getZeros1())
player = 1
mat = s.getMat()
filled = s.getFilled()
dim = s.getDim()
ind = coord2ind(a[2]-1, a[1]-1, dim) #gets index of the new piece
mat = mat[:(a[1]-1)] + [mat[a[1]-1][:(a[2]-1)] + [a[0]] + mat[a[1]-1][a[2]:]] + mat[a[1]:] #remakes the board with the new piece
filled = filled + [(a[0], ind)] #adds the piece's index to the "filled" list
filled.sort(key=lambda x:x[1]) #sort "filled" list by the indexes
UpGroup=[] #group to which the top piece (of the piece that was played) is associated
DownGroup=[] # " " " " down piece is associated
LeftGroup=[] # " " " " left piece is associated
RightGroup=[] # " " " " right piece is associated
joinedUp=False # flag if a connection to the upper piece was made
joinedDown=False # " " " " " " down piece was made
joinedLeft=False # " " " " " " left piece was made
joinedRight = False # " " " " " " right piece was made
if (ind - dim) >= 0 and mat[a[1]-2][a[2]-1] == a[0]: #if the position above the new piece exists and has a piece of the same colour..
for k in groups: #..finds the group of that piece..
if (ind - dim) in k:
k.append(ind) #..connects the new piece with that group.
joinedUp=True
break
if (ind + dim) < dim*dim and mat[a[1]][a[2]-1] == a[0]: #if the position bellow the new piece exists and has a piece of the same colour..
if joinedUp: #..if a connection with the above piece was made..
cntup_flag = True
cntup = 0
cntdown_flag = True
cntdown = 0
DownGroup = []
UpGroup = []
for k in groups: #..search the group list..
if ind in k:
cntup_flag = False
UpGroup=k #..finds the above piece's group..
if (ind+dim) in k:
cntdown_flag = False
DownGroup=k #..finds the bellow piece's group..
if UpGroup and DownGroup:
break
if cntup_flag:
cntup += 1
if cntdown_flag:
cntdown += 1
if UpGroup is not DownGroup: #..and if the groups are not the same..
for k in DownGroup:
UpGroup.append(k) #..connects the pieces of the bellow piece's group to the group of the above piece.
groups.remove(DownGroup)
for k in zeros[cntdown]:
zeros[cntup].append(k) #adds the bellow group's liberties to the above group's
zeros.remove(zeros[cntdown]) #removes the liberties list of the down group
else: #if a connection was not made with the above ..
for k in groups: #..search the group list..
if (ind+dim) in k:
k.append(ind) #..and connects to the bellow piece's group
joinedDown = True
break
if (ind%dim) != 0 and mat[a[1]-1][a[2]-2] == a[0]: #if the position to the left of the new piece exists and has a piece of the same colour..
if joinedUp: #..if a connection with the above piece was made..
cntup_flag = True
cntup = 0
cntleft_flag = True
cntleft = 0
LeftGroup = []
UpGroup = []
for k in groups: #..search the group list..
if ind in k:
cntup_flag = False
UpGroup=k #..finds the above piece's group..
if (ind-1) in k:
cntleft_flag = False
LeftGroup=k #..finds the left piece's group..
if UpGroup and LeftGroup:
break
if cntup_flag:
cntup += 1
if cntleft_flag:
cntleft += 1
if UpGroup is not LeftGroup: #..and if the groups are not the same..
for k in LeftGroup:
UpGroup.append(k) #..connects the pieces of the left piece's group to the group of the above piece.
groups.remove(LeftGroup)
for k in zeros[cntleft]:
zeros[cntup].append(k) #adds the left group's liberties to the above group's
zeros.remove(zeros[cntleft]) #removes the liberties list of the left group
elif joinedDown: #..if a connection with the above piece was not made but instead made with the bellow piece..
cntdown_flag = True
cntdown = 0
cntleft_flag = True
cntleft = 0
DownGroup = []
LeftGroup = []
for k in groups: #..search the group list..
if ind in k:
cntdown_flag = False
DownGroup=k #..finds the bellow piece's group..
if (ind-1) in k:
cntleft_flag = False
LeftGroup=k #..finds the left piece's group..
if DownGroup and LeftGroup:
break
if cntdown_flag:
cntdown += 1
if cntleft_flag:
cntleft += 1
if DownGroup is not LeftGroup: #..and if the groups are not the same..
for k in LeftGroup:
DownGroup.append(k) #..connects the pieces of the left piece's group to the group of the bellow piece.
groups.remove(LeftGroup)
for k in zeros[cntleft]:
zeros[cntdown].append(k) #adds the left group's liberties to the bellow group's
zeros.remove(zeros[cntleft]) #removes the liberties list of the left group
else: #if a connection was not made with the above and bellow pieces..
for k in groups: #..search the group list..
if (ind-1) in k:
k.append(ind) #..and connects to the right piece's group
joinedLeft = True
break
if (ind+1)%dim != 0 and mat[a[1]-1][a[2]] == a[0]: #if the position on the right of the new piece exists and has a piece of the same colour..
if joinedUp: #..if a connection with the above piece was made..
cntup_flag = True
cntup = 0
cntright_flag = True
cntright = 0
UpGroup = []
RightGroup = []
for k in groups: #..search the group list..
if ind in k:
cntup_flag = False
UpGroup=k #..finds the above piece's group..
if (ind+1) in k:
cntright_flag = False
RightGroup=k #..finds the right piece's group..
if UpGroup and RightGroup:
break
if cntup_flag:
cntup += 1
if cntright_flag:
cntright += 1
if UpGroup is not RightGroup: #..and if the groups are not the same..
for k in RightGroup:
UpGroup.append(k) ##..connects the pieces of the right piece's group to the group of the above piece.
groups.remove(RightGroup)
for k in zeros[cntright]:
zeros[cntup].append(k) #adds the right group's liberties to the above group's
zeros.remove(zeros[cntright]) #removes the liberties list of the right group
elif joinedDown: #..if a connection with the above piece was not made but instead made with the bellow piece..
cntdown_flag = True
cntdown = 0
cntright_flag = True
cntright = 0
DownGroup = []
RightGroup = []
for k in groups: #..search the group list..
if ind in k:
cntdown_flag = False
DownGroup=k #..finds the bellow piece's group..
if (ind+1) in k:
cntright_flag = False
RightGroup=k #..finds the right piece's group..
if DownGroup and RightGroup:
break
if cntdown_flag:
cntdown += 1
if cntright_flag:
cntright += 1
if DownGroup is not RightGroup: #..and if the groups are not the same..
for k in RightGroup:
DownGroup.append(k) #..connects the pieces of the right piece's group to the group of the bellow piece.
groups.remove(RightGroup)
for k in zeros[cntright]:
zeros[cntdown].append(k) #adds the right group's liberties to the bellow group's
zeros.remove(zeros[cntright]) #removes the liberties list of the right group
elif joinedLeft: #..if a connection was not made with the above and bellow pieces but was made with the left piece..
cntleft_flag = True
cntleft = 0
cntright_flag = True
cntright = 0
LeftGroup = []
RightGroup = []
for k in groups: #..search the group list..
if ind in k:
cntleft_flag = False
LeftGroup=k #..finds the left piece's group..
if (ind+1) in k:
cntright_flag = False
RightGroup=k #..finds the right piece's group..
if LeftGroup and RightGroup:
break
if cntleft_flag:
cntleft += 1
if cntright_flag:
cntright += 1
if LeftGroup is not RightGroup: #..and if the groups are not the same..
for k in RightGroup:
LeftGroup.append(k) #..connects the pieces of the right piece's group to the group of the left piece..
groups.remove(RightGroup)
for k in zeros[cntright]:
zeros[cntleft].append(k) #adds the right group's liberties to the left group's
zeros.remove(zeros[cntright]) #removes the liberties list of the right group
else: #if a connection was nor made with any other pieces..
for k in groups: #..search the group list..
if (ind+1) in k:
k.append(ind) #.. and connects with the right piece's group
joinedRight = True
break
if not joinedUp and not joinedDown and not joinedLeft and not joinedRight: #if the new piece has not been connected with any other piece..
groups.append([ind]) #.. creates a new group with the new piece..
zeros.append(s.surronding_zeros(ind, dim, filled)) #..and creates a new list of surrounding zeros associated with that group
for i in range(len(zeros)):
if ind in zeros[i]:
for k in s.surronding_zeros(ind, dim, filled):
zeros[i].append(k) #adds the surrouding zeros of the new piece to the group of liberties where the new piece has benn added
zeros[i] = list(set(zeros[i]))
if ind in zeros[i]:
zeros[i].remove(ind) #removes the new piece board's index from all the liberties lists where "ind" was a liberty of the player's groups
for i in range(len(zerosCont)): ##removes the new piece board's index from all the liberties lists where "ind" was a liberty of the next player's groups
if ind in zerosCont[i]:
zerosCont[i].remove(ind)
if a[0] == 1:
groups1 = groups
zeros1 = zeros
groups2 = s.getGroups2()
zeros2 = zerosCont
else:
groups1 = s.getGroups1()
zeros1 = zerosCont
groups2 = groups
zeros2 = zeros
return State(mat, player, filled, dim, groups1, groups2, zeros1, zeros2)
def load_board(self, s):
#loads board from file stream "s". returns corresponding state
l = s.readline().rstrip('\n').split(' ')
player = int(l[1])
dim = int(l[0])
# Reads file and creates list of lists of integers with the board
l = [line.rstrip('\n') for line in s.readlines()]
mat = [list(map(int, list(i))) for i in l]
# List of tuples with filled positions of the board
aux = [(mat[x][y], coord2ind(y, x, dim)) for x in range(dim) for y in range(dim) if mat[x][y] != 0]
groups1=[]
zeros1=[]
groups2=[]
zeros2=[]
for i in aux: # Fills the two lists of existing groups and corresponding liberties
if i[0] == 1:
groups = groups1
else:
groups = groups2
joined=False
coord = ind2coord(i[1],dim)
if (i[1] - dim) >= 0 and mat[coord[0]-1][coord[1]] == i[0]: #Connects to any same color piece above it, if it exists.
for k in groups:
if (i[1] - dim) in k:
k.append(i[1])
joined=True
break
if (i[1]%dim) != 0 and mat[coord[0]][coord[1]-1] == i[0]: #Connects to any same color piece to the left, if it exists.
if joined:
UpGroup=[]
LeftGroup=[]
for k in groups:
if (i[1]) in k:
UpGroup=k
if (i[1]-1) in k:
LeftGroup=k
if UpGroup and LeftGroup:
break
if UpGroup is not LeftGroup:
for k in LeftGroup:
UpGroup.append(k)
groups.remove(LeftGroup)
else:
for k in groups:
if (i[1]-1) in k:
k.append(i[1])
joined = True
break
if not joined:
groups.append([i[1]])
for (groups, zeros) in [(groups1, zeros1),(groups2, zeros2)]: #Defning the liberties of each group by verifying the zeros next to it.
cnt=0
for k in groups:
zeros.append([])
for i in k:
coord = ind2coord(i,dim)
if (i - dim) >= 0 and mat[coord[0]-1][coord[1]]==0 and (i-dim) not in zeros[cnt]:
zeros[cnt].append(i-dim)
if (i + dim) < dim*dim and mat[coord[0]+1][coord[1]]==0 and (i+dim) not in zeros[cnt]:
zeros[cnt].append(i+dim)
if (i%dim) != 0 and mat[coord[0]][coord[1]-1]==0 and (i-1) not in zeros[cnt]:
zeros[cnt].append(i-1)
if ((i+1)%dim) != 0 and mat[coord[0]][coord[1]+1]==0 and (i+1) not in zeros[cnt]:
zeros[cnt].append(i+1)
cnt += 1
auxState= State(mat, player, aux, dim, groups1, groups2, zeros1, zeros2)
self.terminal_test(auxState)
self.state = State(mat, player, aux, dim, groups1, groups2, zeros1, zeros2, auxState.getTerminalFlag(), auxState.getDrawFlag())
return self.state
# Converts 2-D coordinates of a matrix in a positive integer index
def coord2ind(x, y, s):
return x + s*y
# Converts index of a matrix into 2-D coordinates
def ind2coord(i, s):
return (int(i / s), i % s)
| true |
e93175ce946f67af41d22d03459afa17e38f7857 | Python | NicoKNL/coding-problems | /problems/code-jam/2021/closest-pick.py | UTF-8 | 1,383 | 3.34375 | 3 | [] | no_license | def findGaps(P, K):
head = 0
gaps = []
tail = 0
if P[0] > 1:
head = P[0] - 1
for i in range(len(P) - 1):
p_0 = P[i]
p_1 = P[i + 1]
winning = (p_1 - p_0 - 1)
if winning:
gaps.append(winning)
if P[-1] < K:
tail = K - P[-1]
return head, gaps, tail
def findBestSingleScore(head, gaps, tail):
gaps.append(head)
gaps.append(tail)
return max(gaps)
def findBestDoubleScore(head, gaps, tail):
best_two = sorted([head, tail])
if gaps:
gaps = sorted([(g + 1) // 2 for g in gaps])
best_two[0] = max(best_two[0], gaps[-1])
gaps = gaps[:-1]
best_two = sorted(best_two)
if gaps:
best_two[0] = max(best_two[0], gaps[-1])
return sum(best_two)
def solve():
N, K = [int(x) for x in input().split()]
P = sorted(list({int(x) for x in input().split()}))
head, gaps, tail = findGaps(P, K)
if not head and not gaps and not tail:
return 0.0
best_single_score = findBestSingleScore(head, list(gaps), tail) / K
best_double_score = findBestDoubleScore(head, list(gaps), tail) / K
return max(best_single_score, best_double_score)
if __name__ == "__main__":
T = int(input())
c = 1
while T:
print(f"Case #{c}: {solve()}")
c += 1
T -= 1
| true |
04d988ec2b48c578e1742645c2518032c0ce21fd | Python | nitzanadut/Exercises | /Python/7 Ejected/ejected.py | UTF-8 | 1,123 | 3.984375 | 4 | [] | no_license | import re
import math
# Regex to check if a command is legal
validate_command = lambda command: re.match(r'^(UP|DOWN|LEFT|RIGHT)\s\d+$', command)
def main():
print("Hey! Expecting input of format: (UP/DOWN/LEFT/RIGHT NUMBER). 0 to stop inserting commands")
commands = []
command = ''
# Input from user
while command != '0':
command = input('$ ')
if not validate_command(command) and command != '0':
print('Invalid Command')
continue
elif validate_command(command):
commands.append((command.split()[0], int(command.split()[1])))
delta_x = 0
delta_y = 0
# Calculate delta x/y
for cmd in commands:
if cmd[0] == 'UP': delta_y += cmd[1]
elif cmd[0] == 'DOWN': delta_y -= cmd[1]
elif cmd[0] == 'LEFT': delta_x -= cmd[1]
elif cmd[0] == 'RIGHT': delta_x += cmd[1]
# Printing the final distance using Pythagoras' Theorem
print('The final distance from (0,0) is', round(math.sqrt(delta_x**2 + delta_y**2)))
if __name__ == '__main__':
main() | true |
ef848aecf359cbd5d403e5b9dcf4b4949da7f770 | Python | jpmendel/branch-prediction-visualizer | /src/util/util.py | UTF-8 | 263 | 2.96875 | 3 | [
"MIT"
] | permissive | class Util(object):
@staticmethod
def logical_right_shift(val, n):
return (val % 0x100000000) >> n
@staticmethod
def sign_extend(value, bits):
sign_bit = 1 << (bits - 1)
return (value & (sign_bit - 1)) - (value & sign_bit) | true |
88c1b8caa31723047966b939d00e8c77b58a05a4 | Python | kravitejar/PYTHON-MACHINE-LEARNING | /classesandobjects.py | UTF-8 | 1,496 | 4.4375 | 4 | [] | no_license | ##An object is a single software unit that combines data and method.
##Data in an object are known as attributes.
##Procedures/Functions in an object are known as methods.
##
##
##class Car:
## pass
##
##ford=Car() #ford is the object or instance of class Car
##honda=Car()
##audi=Car()
##
###can add attributes(speed) on the go for empty classes(Car)
##ford.speed=200
##honda.speed=220
##audi.speed=250
##
##ford.color='red'
##honda.color='blue'
##audi.color='black'
##
##print(ford.speed)
##print(ford.color)
##
##ford.speed=300
##ford.color='blue'
##
##print(ford.speed)
##print(ford.color)
class Rectangle:
pass
rect1=Rectangle()
rect2=Rectangle()
rect1.height=20
rect2.height=30
rect1.width=40
rect2.width=10
print(rect1.height*rect1.width)
print(rect2.height*rect2.width)
##
##class Car:
## def __init__(self,speed,color):
##
## #init behaves as a constructor.
## #First method called whenever an instance is created
## #default values for attributes allowed eg. self.speed=100
## #if multple init methods are used, python takes the last init method by default
## print('the __init__ is called')
## print(speed,color)
## self.speed=speed
## self.color=color
##
##
##ford=Car(200,'red')
###ford is the object or instance of class Car
##honda=Car(250,'blue')
##audi=Car(300,'black')
##
##print(ford.speed)
##print(ford.color)
| true |
061eabfd0bf87dfd9c7970faf0c00d455b1057d9 | Python | patmoore/anki-conjugate-spanish | /tests/TestEstar.py | UTF-8 | 5,041 | 2.515625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import unittest
from conjugate_spanish import Tense, Person
from conjugate_spanish.espanol_dictionary import Espanol_Dictionary, Verb_Dictionary
Espanol_Dictionary.load()
class TestEstar(unittest.TestCase):
def __check__(self, tense, expected):
estar = Verb_Dictionary.get("estar")
for person, expected in expected.items():
self.assertEqual(expected,estar.conjugate(tense, person, returnAsString=True))
def test_estar_present_tense(self):
expected = { Person.first_person_singular: "estoy",
Person.second_person_singular:"estás",
Person.third_person_singular:"está",
Person.first_person_plural:"estamos",
Person.second_person_plural:"estáis",
Person.third_person_plural:"están"}
self.__check__(Tense.present_tense, expected)
def test_estar_incomplete_past_tense(self):
expected = { Person.first_person_singular: "estaba",
Person.second_person_singular:"estabas",
Person.third_person_singular:"estaba",
Person.first_person_plural:"estábamos",
Person.second_person_plural:"estabais",
Person.third_person_plural:"estaban"}
self.__check__(Tense.incomplete_past_tense, expected)
def test_estar_past_tense(self):
expected = { Person.first_person_singular: "estuve",
Person.second_person_singular:"estuviste",
Person.third_person_singular:"estuvo",
Person.first_person_plural:"estuvimos",
Person.second_person_plural:"estuvisteis",
Person.third_person_plural:"estuvieron"}
self.__check__(Tense.past_tense, expected)
def test_estar_future_tense(self):
expected = { Person.first_person_singular: "estaré",
Person.second_person_singular:"estarás",
Person.third_person_singular:"estará",
Person.first_person_plural:"estaremos",
Person.second_person_plural:"estaréis",
Person.third_person_plural:"estarán"}
self.__check__(Tense.future_tense, expected)
def test_estar_conditional_tense(self):
expected = { Person.first_person_singular: "estaría",
Person.second_person_singular:"estarías",
Person.third_person_singular:"estaría",
Person.first_person_plural:"estaríamos",
Person.second_person_plural:"estaríais",
Person.third_person_plural:"estarían"}
self.__check__(Tense.conditional_tense, expected)
def test_estar_present_subjective_tense(self):
expected = { Person.first_person_singular: "esté",
Person.second_person_singular:"estés",
Person.third_person_singular:"esté",
Person.first_person_plural:"estemos",
Person.second_person_plural:"estéis",
Person.third_person_plural:"estén"}
self.__check__(Tense.present_subjective_tense, expected)
def test_estar_past_subjective_tense(self):
expected = { Person.first_person_singular: "estuviera",
Person.second_person_singular:"estuvieras",
Person.third_person_singular:"estuviera",
Person.first_person_plural:"estuviéramos",
Person.second_person_plural:"estuvierais",
Person.third_person_plural:"estuvieran"}
self.__check__(Tense.past_subjective_tense, expected)
def test_estar_imperative_positive_tense(self):
expected = { Person.first_person_singular: None,
Person.second_person_singular:"está",
Person.third_person_singular:"esté",
Person.first_person_plural:"estemos",
Person.second_person_plural:"estad",
Person.third_person_plural:"estén"}
self.__check__(Tense.imperative_positive, expected)
def test_estar_imperative_negative_tense(self):
expected = { Person.first_person_singular: None,
Person.second_person_singular:"estés",
Person.third_person_singular:"esté",
Person.first_person_plural:"estemos",
Person.second_person_plural:"estéis",
Person.third_person_plural:"estén"}
self.__check__(Tense.imperative_negative, expected)
def test_estar_tense(self):
estar = Verb_Dictionary.get("estar")
self.assertEqual("estando", estar.conjugate(Tense.gerund, returnAsString=True))
self.assertEqual("estado", estar.conjugate(Tense.adjective, returnAsString=True))
self.assertEqual("estado", estar.conjugate(Tense.past_participle, returnAsString=True)) | true |
cc83c2f62da5ebf4bb2c9c36774f8f9d5ca45be7 | Python | Lord-Fifth/Competitive-Coding | /Atoms & Molecules/TCS/Xplore/Python/Prime.py | UTF-8 | 866 | 3.609375 | 4 | [] | no_license | """
Write a Python code to count how many prime integers are there in a given list of integers.
"""
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the 'checkCoPrimeExistance' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY numbers as parameter.
#
def countPrimeNumbers(numbers):
c = 0
for i in numbers:
flag = False
if(i == 0 or i == 1):
flag = True
for j in range(2, i):
if(i % j == 0):
flag = True
break
if(not flag):
c += 1
return c
if __name__ == '__main__':
numbers=[]
count=int(input())
for i in range(count):
numbers.append(int(input()))
print(countPrimeNumbers(numbers))
"""
Input
4
3
5
7
11
Output
4
""" | true |