blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9e36ec7110913fbb912262af24488ad4250f4c2c | Python | codenomad101/stock_app | /stock_app.py | UTF-8 | 380 | 3.3125 | 3 | [] | no_license | import yfinance as yf
import streamlit as st
import pandas as pd
st.write(""" # Simple Stock App
Shown below are the stocks of dogecoin and closing value
""")
tickerSymbol = 'DOGE-INR'
tickerData = yf.Ticker(tickerSymbol)
tickerDf =tickerData.history(period ='1d',start='2020-1-1' , end='2021-5-15')
st.line_chart(tickerDf.Close)
st.line_chart(tickerDf.Volume) | true |
df1c46ea93f3a229f9c6d2a6b17f75b82b8508df | Python | mjdawson89/AgileUnoModule7 | /test.py | UTF-8 | 581 | 3.25 | 3 | [
"MIT"
] | permissive | """
Matthew Dawson
11/22/20
AgileUnoModule7
"""
# 1
# import my_module and pprint
import my_module
import pprint
# 2
# use the greeting method from my_module to print out your name
print(my_module.greeting('Matt'))
# 3
# use the letter_text module to print out a string
print(my_module.letter_text(name="Matt",amount="$100",denomination="USD"))
# 4
# use the my_module.my_json_data and print it out
print(my_module.my_json_data)
# 5
# import the my_json_data as my_data and print out the my_json_data using pprint
from my_module import my_json_data as my_data
pprint.pprint(my_data)
| true |
512298269a2fb000f7e84a74d618aa615549d0da | Python | suboice114/FirstPythonDemo | /SomeExample/example16.py | UTF-8 | 323 | 3.75 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# @Time : 2019/9/8 13:25
# @Author : su
# @File : example16.py
"""
题目:有一分数序列:2/1,3/2,5/3,8/5,13/8,21/13...求出这个数列的前20项之和。
"""
a = 2.0
b = 1.0
s = 0.0
for i in range(1, 21):
s += a / b
b, a = a, a + b
print(s) | true |
b2756162b865a221dd2284c8b24f85e340d75b68 | Python | Mushrif/UNH | /HW4_4 | UTF-8 | 1,027 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env python3
import pickle
import shelve
##### GETTING INPUT AND STORE IT IN VARIABLES #######
u_name = input("Hi user, Could you write down your name : ")
u_age = input("your age: ")
u_country = input("Country of Origin: ")
name = [u_name]
age = [u_age]
country = [u_country]
dic_pic= {"name":u_name,"age:":u_age,"Country":u_country}
##### STORING IN PICKELS ######
def pic(name,age,country):
p_file= open('pFile.txt' , 'wb')
pickle.dump(dic_pic,p_file)
p_file.close()
##### STORING IN SHELVE #######
def shv(name,age,country):
s_file = shelve.open('sfile.dat')
s_file["name"] = [u_name]
s_file["age"] = [u_age]
s_file["country"] = [u_country]
s_file.sync()
s_file.close()
##### PASSING Para ######
pic(name,age,country)
shv(name,age,country)
print ("--------------------")
# reading back
s = shelve.open ('sfile.dat')
p = open('pFile.txt' , 'rb')
pread = pickle.load(p)
p.close()
print (pread)
print ("--------------------")
for s_word in s.keys():
print(s_word, s[s_word])
| true |
69cb086aeea3fc58ce973efaac38d3a6c6222e42 | Python | nima-hakimi/TDT4113 | /Project2/historian_player.py | UTF-8 | 1,588 | 3.390625 | 3 | [] | no_license | """
Historian player.
"""
import random
from player import Player
from action import Action
class HistorianPlayer(Player):
def __init__(self, name, remember):
super().__init__(name)
self.history = []
self.remember = remember
self.is_beaten_by = {
"rock": "paper",
"paper": "scissor",
"scissor": "rock"
}
def select_action(self):
most_common_moves = {
"rock": 0,
"paper": 0,
"scissor": 0
}
n_last_sequence = self.history[-self.remember:]
# Returns a random value if the history is empty
if len(n_last_sequence) == 0:
random_value = random.randint(0, 2)
value = self.action_map[random_value]
return Action(value)
for i in range(len(self.history)):
temp_list = self.history[i:i + len(n_last_sequence)]
if temp_list == n_last_sequence:
if i + len(n_last_sequence) >= len(self.history):
break
next_move = self.history[i + len(n_last_sequence)]
most_common_moves[next_move] += 1
most_common_move = ""
number_of_common_moves = -1
for key in most_common_moves:
if most_common_moves[key] > number_of_common_moves:
number_of_common_moves = most_common_moves[key]
most_common_move = key
return Action(self.is_beaten_by[most_common_move])
def receive_result(self, action):
self.history.append(action.value)
| true |
1128445ab7bc4b541f8aaf1e2fcc6901106d7d70 | Python | five-hundred-eleven/DataStructures | /DoublyLinkedListClasses.py | UTF-8 | 4,938 | 3.6875 | 4 | [
"MIT"
] | permissive | class ListNode:
def __init__(self, obj, next_node=None, prev_node=None):
assert isinstance(next_node, ListNode) or next_node is None
assert isinstance(prev_node, ListNode) or prev_node is None
self.__val = obj
self.__next = next_node
self.__prev = prev_node
@property
def val(self):
return self.__val
@val.setter
def val(self, obj):
self.__val = obj
@property
def next(self):
return self.__next
@next.setter
def next(self, next_node):
assert isinstance(next_node, ListNode) or next_node is None
self.__next = next_node
@property
def prev(self):
return self.__prev
@prev.setter
def prev(self, prev_node):
assert isinstance(prev_node, ListNode) or prev_node is None
self.__prev = prev_node
class DoubleLinkedList:
def __init__(self, *args):
assert len(args) <= 1
if args:
iterable = args[0]
else:
iterable = []
self.__head = None
self.__tail = None
for obj in iterable:
self.append(obj)
def isEmpty(self):
return self.__head is None
def append(self, obj):
if self.__head is None:
self.__head = self.__tail = ListNode(obj)
else:
self.__tail.next = ListNode(obj, prev_node=self.__tail)
self.__tail = self.__tail.next
def insert(self, index, obj):
if self.__head is None:
self.__head = self.__tail = ListNode(obj)
elif index == 0:
self.__head.prev = ListNode(obj, next_node=self.__head)
self.__head = self.__head.prev
else:
if index >= 0:
node = self.__head
for _ in range(index):
if node is None:
break
node = node.next
if node is None:
self.__tail.next = ListNode(obj, prev_node=self.__tail)
self.__tail = self.__tail.next
return
else:
node = self.__tail
for _ in range(-index - 1):
if node is None:
break
node = node.prev
if node is None:
self.__head.prev = ListNode(obj, next_node=self.__head)
self.__head = self.__head.prev
return
if node.prev is None: # changing head
self.__head.prev = ListNode(obj, next_node=self.__head)
self.__head = self.__head.prev
else:
node.prev.next = ListNode(obj, next_node=node, prev_node=node.prev)
node.prev = node.prev.next
def pop(self, *args):
assert len(args) <= 1
if self.__head is None:
raise IndexError("Cannot pop from empty list")
if args:
index = args[0]
node = self.__get_node(index)
else:
node = self.__tail
obj = node.val
if node is self.__head and node is self.__tail:
self.__head = self.__tail = None
elif node is self.__head:
self.__head.next.prev = None
self.__head = self.__head.next
elif node is self.__tail:
self.__tail.prev.next = None
self.__tail = self.__tail.prev
else:
node.prev.next, node.next.prev = node.next, node.prev
return obj
def __getitem__(self, index):
return self.__get_node(index).val
def __setitem__(self, index, obj):
self.__get_node(index).val = obj
def __delitem__(self, index):
self.pop(index)
def __repr__(self):
return "DoubleLinkedList([" + ", ".join([str(x) for x in self]) + "])"
def __iter__(self):
self.__curr = self.__head
return self
def __next__(self):
if self.__curr is None:
raise StopIteration()
obj = self.__curr.val
self.__curr = self.__curr.next
return obj
def __len__(self):
counter = 0
node = self.__head
while node is not None:
node = node.next
counter += 1
return counter
def __get_node(self, index):
assert type(index) == int
if self.__head is None:
raise IndexError("Cannot access item in empty list")
if index >= 0:
node = self.__head
for _ in range(index):
if node.next is None:
raise IndexError("index out of range")
node = node.next
else:
node = self.__tail
for _ in range(-index - 1):
if node.prev is None:
raise IndexError("index out of range")
node = node.prev
return node
| true |
f541823bd1058a430a3f17b4dcc411ae1f136ef3 | Python | flippedZH/Carplate-Recognition | /Find_province.py | UTF-8 | 2,949 | 2.8125 | 3 | [] | no_license |
import numpy as np
import cv2
import cv2 as cv
import os
def load_data(filename_1):
filepath2 = "C:\\Users\\zh\\Desktop\\data_province\\txt\\"
pathlist=os.listdir(filepath2)
path_name=[]
for i in pathlist:
path_name.append(i.split(".")[0])
with open(filename_1, 'r') as fr_1:
temp_address = [row.strip() for row in fr_1.readlines()]
middle_route=path_name
sample_number = 0 # 用来计算总的样本数
dataArr = np.zeros((3216, 400))
label_list = []
for i in range(len(temp_address)):
with open(r'C:\Users\zh\Desktop\data_province\txt\\' + temp_address[i]+'.txt', 'r') as fr_2:
temp_address_2 = [row_1.strip() for row_1 in fr_2.readlines()] ##????????
for j in range(len(temp_address_2)):
sample_number += 1
path_temp="C:/Users\zh\Desktop\data_province\data/" +str(middle_route[i]) + '/' + str(temp_address_2[j])
temp_img = cv2.imread(path_temp)
temp_img=cv2.cvtColor(temp_img,cv2.COLOR_BGR2GRAY)
print(sample_number)
#temp_img=cv.cvtColor(temp_img,cv2.COLOR_BAYER_GR2GRAY)
temp_img = cv2.resize(temp_img, (20, 20), interpolation=cv2.INTER_LINEAR)
temp_img = temp_img.reshape(1, 400)
#前面定义了的 表示 第i行的所有列用读取的图片temp_img填充
dataArr[sample_number - 1, :] = temp_img # dataArr = np.zeros((13156, 400))
# label_list = []
#给每个标签扩充 数量(因为同个文件夹里面的标签相同)
label_list.extend([i] * len(temp_address_2))
return dataArr, np.array(label_list) #返回(13156, 400)数据集中的所有数据以及对应的标签 标签格式是一样的
def SVM_rocognition_character(img): ## character_list是输入的图片 可能是多张图片
character_Arr = np.zeros((1,400))
img = cv2.resize(img, (20, 20), interpolation=cv2.INTER_LINEAR) ##还原为原来的大小 INTER_LINEAR 线性插值
new_character_ = img.reshape((1,400))[0]
character_Arr[0,:] = new_character_
from sklearn.externals import joblib
clf = joblib.load("based_SVM_province_train_model.m")
predict_result = clf.predict(character_Arr)
#print(predict_result.tolist()) #将数组或者矩阵转换成列表 predict_result中存的是序号
ProvinceList=['川','鄂', '赣', '甘', '贵', '桂', '黑', '沪', '冀', '津', '京', '吉', '辽', '鲁', '蒙', '闽', '宁', '青', '琼', '山西', '苏', '晋',
'皖', '湘', '新', '豫', '渝', '粤', '云', '藏', '浙']
#for k in range(len(predict_result.tolist())):
# print('%c'%ProvinceList[predict_result.tolist()[k]]) #结果
return predict_result.tolist()[0]
if __name__=="__main__":
src=cv2.imread("CutChar_Img/1.jpg")
gray = cv2.cvtColor(src,cv2.COLOR_BGR2GRAY)
SVM_rocognition_character(gray) | true |
67baade9b270bb474d82bc8a28a14a3bf2b60356 | Python | VictoriaLasso/correlation_viewer | /vcorr/generate_sample_data_set.py | UTF-8 | 515 | 2.953125 | 3 | [
"MIT"
] | permissive | __author__ = 'Diego'
import numpy as np
import pandas as pd
def generate_sample_data(n_vars,n_subjs):
subjects = ["subj_%d"%i for i in xrange(n_subjs)]
df = pd.DataFrame(index=subjects)
for var in xrange(n_vars):
array = np.random.random(len(subjects))
df["var_%d"%var] = array
return df
if __name__ == "__main__":
import os
file_name = os.path.join(os.path.dirname(__file__),"..","test_data.csv")
test_df = generate_sample_data(20,40)
test_df.to_csv(file_name)
| true |
63659538d129da9ae075271eda249ff6c053af61 | Python | STEllAR-GROUP/phylanx | /tests/regressions/python/794_3d_array.py | UTF-8 | 913 | 2.578125 | 3 | [
"BSL-1.0"
] | permissive | # Copyright (c) 2019 R. Tohid
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from phylanx import Phylanx
import numpy as np
@Phylanx
def one_d():
return np.array([1, 2])
@Phylanx
def two_d():
return np.array([[1, 2], [3, 4]])
@Phylanx
def three_d():
return np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
@Phylanx
def one_d_float():
return np.array([1, 2], dtype='float')
def np_one_d():
return np.array([1, 2])
def np_two_d():
return np.array([[1, 2], [3, 4]])
def np_three_d():
return np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
def np_one_d_float():
return np.array([1, 2], dtype='float')
assert (one_d() == np_one_d()).all()
assert (two_d() == np_two_d()).all()
assert (three_d() == np_three_d()).all()
assert (one_d_float() == np_one_d_float()).all()
| true |
6acbc655b0ca87eac03617b286854c22c65d6745 | Python | bmatilla/ai2es_xai_course | /ai2es_xai_course/utils/occlusion.py | UTF-8 | 10,863 | 2.828125 | 3 | [
"MIT"
] | permissive | """Helper methods for occlusion."""
import numpy
from ai2es_xai_course.utils import utils
from ai2es_xai_course.utils import cnn
DEFAULT_LINE_WIDTH = 2.
def _get_grid_points(x_min, x_spacing, num_columns, y_min, y_spacing, num_rows):
"""Returns grid points in regular x-y grid.
M = number of rows in grid
N = number of columns in grid
:param x_min: Minimum x-coordinate over all grid points.
:param x_spacing: Spacing between adjacent grid points in x-direction.
:param num_columns: N in the above definition.
:param y_min: Minimum y-coordinate over all grid points.
:param y_spacing: Spacing between adjacent grid points in y-direction.
:param num_rows: M in the above definition.
:return: x_coords: length-N numpy array with x-coordinates at grid points.
:return: y_coords: length-M numpy array with y-coordinates at grid points.
"""
# TODO(thunderhoser): Put this in utils.py.
x_max = x_min + (num_columns - 1) * x_spacing
y_max = y_min + (num_rows - 1) * y_spacing
x_coords = numpy.linspace(x_min, x_max, num=num_columns)
y_coords = numpy.linspace(y_min, y_max, num=num_rows)
return x_coords, y_coords
def get_occlusion_maps(
model_object, predictor_matrix, half_window_size_px=1, fill_value=0.):
"""Computes occlusion map for each example for the positive class.
E = number of examples
M = number of rows in grid
N = number of columns in grid
C = number of channels (predictor variables)
:param model_object: Trained model (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param predictor_matrix: E-by-M-by-N-by-C numpy array of predictors.
:param half_window_size_px: Half-size of occlusion window (pixels). If
half-size is P, the full window will (2 * P + 1) rows by (2 * P + 1)
columns.
:param fill_value: Fill value. Inside the occlusion window, all channels
will be assigned this value, to simulate missing data.
:return: occlusion_prob_matrix: E-by-M-by-N numpy array of predicted
probabilities after occlusion.
:return: original_probs: length-E numpy array of predicted probabilities
before occlusion.
"""
half_window_size_px = int(numpy.round(half_window_size_px))
assert half_window_size_px >= 0
assert not numpy.any(numpy.isnan(predictor_matrix))
assert len(predictor_matrix.shape) == 4
num_grid_rows = predictor_matrix.shape[1]
num_grid_columns = predictor_matrix.shape[2]
occlusion_prob_matrix = numpy.full(predictor_matrix.shape[:-1], numpy.nan)
for i in range(num_grid_rows):
print('Occluding windows centered on row {0:d}...'.format(i + 1))
for j in range(num_grid_columns):
first_row = max([i - half_window_size_px, 0])
last_row = min([i + half_window_size_px + 1, num_grid_rows])
first_column = max([j - half_window_size_px, 0])
last_column = min([j + half_window_size_px + 1, num_grid_columns])
new_predictor_matrix = predictor_matrix + 0.
new_predictor_matrix[
:, first_row:last_row, first_column:last_column, :
] = fill_value
occlusion_prob_matrix[:, i, j] = cnn.apply_model(
model_object=model_object,
predictor_matrix=new_predictor_matrix, verbose=False
)
original_probs = cnn.apply_model(
model_object=model_object,
predictor_matrix=predictor_matrix, verbose=False
)
return occlusion_prob_matrix, original_probs
def smooth_occlusion_maps(occlusion_prob_matrix, smoothing_radius_grid_cells):
"""Smooths occlusion maps via Gaussian filter.
:param occlusion_prob_matrix: See output doc for `get_occlusion_maps`.
:param smoothing_radius_grid_cells: e-folding radius (number of grid cells).
:return: occlusion_prob_matrix: Smoothed version of input.
"""
num_examples = occlusion_prob_matrix.shape[0]
for i in range(num_examples):
occlusion_prob_matrix[i, ...] = utils.apply_gaussian_filter(
input_matrix=occlusion_prob_matrix[i, ...],
e_folding_radius_grid_cells=smoothing_radius_grid_cells
)
return occlusion_prob_matrix
def normalize_occlusion_maps(occlusion_prob_matrix, original_probs):
"""Normalizes occlusion maps (scales to range -inf...1).
:param occlusion_prob_matrix: See output doc for `get_occlusion_maps`.
:param original_probs: Same.
:return: normalized_occlusion_matrix: numpy array with same shape as input,
except that each value is now a normalized *decrease* in probability.
A value of 1 means that probability decreases all the way zero; a value
of 0 means that probability does not decrease at all; a value of -1
means that probability doubles; ...; etc.
"""
assert not numpy.any(numpy.isnan(occlusion_prob_matrix))
assert len(occlusion_prob_matrix.shape) == 3
assert numpy.all(occlusion_prob_matrix >= 0.)
assert numpy.all(occlusion_prob_matrix <= 1.)
num_examples = occlusion_prob_matrix.shape[0]
assert not numpy.any(numpy.isnan(original_probs))
assert len(original_probs) == num_examples
normalized_occlusion_matrix = numpy.full(
occlusion_prob_matrix.shape, numpy.nan
)
original_probs_with_nan = original_probs + 0.
original_probs_with_nan[original_probs_with_nan == 0] = numpy.nan
for i in range(num_examples):
normalized_occlusion_matrix[i, ...] = (
(original_probs_with_nan[i] - occlusion_prob_matrix[i, ...]) /
original_probs_with_nan[i]
)
normalized_occlusion_matrix[numpy.isnan(normalized_occlusion_matrix)] = 0.
return normalized_occlusion_matrix
def plot_normalized_occlusion_map(
normalized_occlusion_matrix_2d, axes_object, colour_map_object,
max_contour_value, contour_interval, line_width=DEFAULT_LINE_WIDTH):
"""Plots 2-D normalized occlusion map with line contours.
:param normalized_occlusion_matrix_2d: See output doc for
`normalize_occlusion_maps`.
:param axes_object: See input doc for `plot_occlusion_map`.
:param colour_map_object: Same.
:param max_contour_value: Same.
:param contour_interval: Same.
:param line_width: Same.
"""
# Check input args.
assert max_contour_value >= 0.
max_contour_value = max([max_contour_value, 1e-6])
assert contour_interval >= 0.
contour_interval = max([contour_interval, 1e-7])
assert contour_interval < max_contour_value
assert not numpy.any(numpy.isnan(normalized_occlusion_matrix_2d))
assert len(normalized_occlusion_matrix_2d.shape) == 2
assert numpy.all(normalized_occlusion_matrix_2d <= 1.)
half_num_contours = int(numpy.round(
1 + max_contour_value / contour_interval
))
# Find grid coordinates.
num_grid_rows = normalized_occlusion_matrix_2d.shape[0]
num_grid_columns = normalized_occlusion_matrix_2d.shape[1]
x_coord_spacing = num_grid_columns ** -1
y_coord_spacing = num_grid_rows ** -1
x_coords, y_coords = _get_grid_points(
x_min=x_coord_spacing / 2, y_min=y_coord_spacing / 2,
x_spacing=x_coord_spacing, y_spacing=y_coord_spacing,
num_rows=num_grid_rows, num_columns=num_grid_columns
)
x_coord_matrix, y_coord_matrix = numpy.meshgrid(x_coords, y_coords)
# Plot positive contours.
positive_contour_values = numpy.linspace(
0., max_contour_value, num=half_num_contours
)
axes_object.contour(
x_coord_matrix, y_coord_matrix, normalized_occlusion_matrix_2d,
positive_contour_values, cmap=colour_map_object,
vmin=numpy.min(positive_contour_values),
vmax=numpy.max(positive_contour_values),
linewidths=line_width, linestyles='solid', zorder=1e6,
transform=axes_object.transAxes
)
# Plot negative contours.
negative_contour_values = positive_contour_values[1:]
axes_object.contour(
x_coord_matrix, y_coord_matrix, -normalized_occlusion_matrix_2d,
negative_contour_values, cmap=colour_map_object,
vmin=numpy.min(negative_contour_values),
vmax=numpy.max(negative_contour_values),
linewidths=line_width, linestyles='dashed', zorder=1e6,
transform=axes_object.transAxes
)
def plot_occlusion_map(
occlusion_prob_matrix_2d, axes_object, colour_map_object,
min_contour_value, max_contour_value, contour_interval,
line_width=DEFAULT_LINE_WIDTH):
"""Plots 2-D occlusion map with line contours.
M = number of rows in grid
N = number of columns in grid
:param occlusion_prob_matrix_2d: M-by-N numpy array of probabilities after
occlusion.
:param axes_object: Will plot on these axes (instance of
`matplotlib.axes._subplots.AxesSubplot`).
:param colour_map_object: Colour scheme (instance of `matplotlib.pyplot.cm`
or similar).
:param min_contour_value: Minimum contour value.
:param max_contour_value: Max contour value.
:param contour_interval: Interval between successive contours.
:param line_width: Line width for contours.
"""
# Check input args.
assert min_contour_value >= 0.
assert max_contour_value >= 0.
min_contour_value = max([min_contour_value, 1e-6])
max_contour_value = max([max_contour_value, 1e-5])
assert contour_interval >= 0.
contour_interval = max([contour_interval, 1e-7])
assert not numpy.any(numpy.isnan(occlusion_prob_matrix_2d))
assert len(occlusion_prob_matrix_2d.shape) == 2
assert numpy.all(occlusion_prob_matrix_2d >= 0.)
assert numpy.all(occlusion_prob_matrix_2d <= 1.)
assert contour_interval < max_contour_value
num_contours = int(numpy.round(
1 + (max_contour_value - min_contour_value) / contour_interval
))
# Find grid coordinates.
num_grid_rows = occlusion_prob_matrix_2d.shape[0]
num_grid_columns = occlusion_prob_matrix_2d.shape[1]
x_coord_spacing = num_grid_columns ** -1
y_coord_spacing = num_grid_rows ** -1
x_coords, y_coords = _get_grid_points(
x_min=x_coord_spacing / 2, y_min=y_coord_spacing / 2,
x_spacing=x_coord_spacing, y_spacing=y_coord_spacing,
num_rows=num_grid_rows, num_columns=num_grid_columns
)
x_coord_matrix, y_coord_matrix = numpy.meshgrid(x_coords, y_coords)
contour_values = numpy.linspace(
min_contour_value, max_contour_value, num=num_contours
)
axes_object.contour(
x_coord_matrix, y_coord_matrix, occlusion_prob_matrix_2d,
contour_values, cmap=colour_map_object,
vmin=min_contour_value, vmax=max_contour_value,
linewidths=line_width, linestyles='solid', zorder=1e6,
transform=axes_object.transAxes
)
| true |
f4963b4683ecbc9459790db48a0fc2887a908bee | Python | ZhaoOfficial/Introduction-to-algorithm | /Part 1/Chapter 5/Acceptance_Rejection.py | UTF-8 | 624 | 3.25 | 3 | [] | no_license | """
Using acceptance rejection to implement
Normal distribution
"""
import numpy as np
import matplotlib.pyplot as plt
n = 10000
# g(x) = e^{-x}
# Y ~ Expo(1)
U1 = np.random.random_sample((n, ))
Y = -np.log(U1)
# c = \sup\frac{f(x)}{g(x)}
c = np.sqrt(2 * np.e / np.pi)
# \frac{f(x)}{cg(x)} = e^{-0.5(x - 1)^2}
U2 = np.random.random_sample((n, ))
X = Y[U2 <= np.exp(-0.5 * (Y - 1) ** 2)]
U3 = np.random.random_sample((len(X), ))
sign = np.where(U3 > 0.5, 1, -1)
X = sign * X
plt.figure(figsize = (10, 8))
plt.xlim((-4, 4))
plt.hist(X, bins = 100, color = 'r', alpha = 0.4, edgecolor = 'y', label = 'sample')
plt.show() | true |
b1f5a27f82c25a1ad2d9292f65eddf9f9945950b | Python | ynivin/vcd-tools | /Types/Module.py | UTF-8 | 1,434 | 3.15625 | 3 | [] | no_license | class Module(object):
def __init__(self, name):
self.name = name
self.parent = None
self.submodules = {}
self.wires = {}
def add_submodule(self, new_submodule):
new_submodule.set_parent(self)
self.submodules[new_submodule.get_name()] = new_submodule
def add_submodules(self, new_submodules):
for module in new_submodules:
module.set_parent(self)
self.submodules[module.get_name()] = module
def add_wire(self, new_wire):
self.wires[new_wire.get_name()] = new_wire
def add_wires(self, new_wires):
self.wires.update({wire.get_name(): wire for wire in new_wires})
def get_name(self):
return self.name
def get_fullname(self):
parent_fullname = self.parent.get_fullname() if self.parent else ""
return "%s/%s" % (parent_fullname, self.get_name())
def tree(self, depth = 1, include_wires = True):
temp_tree = "%s%s%s\n"%(" "*4*(depth-1), "|===", self.get_name())
if include_wires:
for wire in self.wires.values():
temp_tree += "%s%s%s\n"%(" "*4*(depth), "|---", wire.get_name())
for submodule in self.submodules.values():
temp_tree += submodule.tree(depth+1)
return temp_tree
def get_parent(self):
return self.parent
def set_parent(self, parent):
self.parent = parent
| true |
963b7ad60e7b51e9a0c790faa076d508ce1c654f | Python | IntroCept/cricos_scrape | /cricos_scrape/items.py | UTF-8 | 2,763 | 2.578125 | 3 | [] | no_license | # Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/topics/items.html
import re
from scrapy.item import Item, Field
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Compose, Identity
import phonenumbers
class InstitutionItem(Item):
type = Field()
code = Field()
provider_id = Field()
name = Field()
tradingName = Field()
website = Field()
postal_address = Field()
class ContactItem(Item):
type = Field()
institution = Field()
name = Field()
title = Field()
phone = Field()
fax = Field()
email = Field()
class CourseItem(Item):
type = Field()
institution = Field()
code = Field()
name = Field()
duration = Field()
level = Field()
class CourseCampusItem(Item):
type = Field()
course = Field()
campus = Field()
class CampusItem(Item):
type = Field()
institution = Field()
name = Field()
address_lines = Field()
suburb = Field()
postcode = Field()
phone = Field()
fax = Field()
def trim(lines):
return [line.strip() for line in lines]
def trimjoin(lines):
ret = "".join([line.strip() for line in lines])
if not len(ret):
return None
return ret
def sanitize_address(lines):
return [re.sub("[^0-9A-Za-z \./\-,\(\)'&]", "", line) for line in lines]
def parse_address(lines):
(state, postcode) = re.match("^\s*(.*?)\s*(\d+)\s*$", lines.pop()).groups()
suburb = lines.pop().strip()
return {
"address_lines": lines,
"suburb": suburb,
"state": state.strip(),
"postcode": postcode.strip(),
}
# TODO: it would be good to log the numbers that fail so we can tweak our
# formatting logic.
# Another TODO: maybe we shouldn't be doing this here. I ran into a number that
# was lacking an area code and was thus not valid. If we were validating and
# formatting numbers at a later stage, we could use additional context such as
# the postal state to infer missing data.
def format_phone(num):
try:
num = phonenumbers.parse(num, "AU")
if not phonenumbers.is_valid_number(num):
return None
return phonenumbers.format_number(num, phonenumbers.PhoneNumberFormat.NATIONAL)
except:
return None
def Phone():
return Compose(trimjoin, format_phone)
class JoiningLoader(ItemLoader):
default_output_processor = Compose(trimjoin)
class ContactLoader(JoiningLoader):
default_item_class = ContactItem
# phone_out = Phone()
# fax_out = Phone()
# mobile_out = Phone()
class InstitutionLoader(JoiningLoader):
default_item_class = InstitutionItem
postal_address_out = Compose(trim, sanitize_address, parse_address)
| true |
679c8a32122add2c612f398fe2a47bc5e876ccbe | Python | happyhk/MATH_A | /BP_NN.py | UTF-8 | 2,877 | 2.5625 | 3 | [] | no_license | # 引入相关库
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn import preprocessing
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# 读取数据
train_data = np.array(pd.read_csv("./train_data/traindatasets.csv"))
test_data = np.array(pd.read_csv("./last_train_data/test_112501.csv"))
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# 提取特征列,即X (共7列,代表7个变量)
train_feature = np.array(train_data[:, [0, 1, 2, 3, 4, 5, 6]])
# 提取预测结果列,即Y
train_label = np.array(train_data[:, [7]])
# 提取测试集特征列
test_xs = np.array(test_data[:, [0, 1, 2, 3, 4, 5, 6]])
print(test_data.shape)
print(train_feature.shape)
print(train_label.shape)
# 搭建神经网络
# 定义x y
model_input = tf.placeholder(tf.float32, [None, 7]) # 长度为7,代表7个特征
y = tf.placeholder(tf.float32, [None, 1]) # 长度为1,代表要预测的变量只有1个
# train_feature = preprocessing.scale(train_feature) # 数据预处理,归一化
# test_xs = preprocessing.scale(test_x) # 也对测试集进行预处理
print(test_xs.shape)
# 定义神经网络隐藏层
# 初始化权值。 为18*20矩阵 20代表20个神经元
Weights_L1 = tf.Variable(tf.random_normal([7, 20]))
# 偏置矩阵
biases_L1 = tf.Variable(tf.zeros([1, 20]))
Wx_plus_b_L1 = tf.matmul(model_input, Weights_L1) + biases_L1
# 激活函数私有tanh
L1 = tf.nn.tanh(Wx_plus_b_L1)
# 定义神经网络输出层
Weights_L2 = tf.Variable(tf.random_normal([20, 1]))
biases_L2 = tf.Variable(tf.zeros([1, 20]))
model_output = tf.matmul(L1, Weights_L2) + biases_L2
# 代价函数
loss = tf.reduce_mean(tf.square(y - model_output))
# 定义优化器。使用动量法 也可以使用随机梯度下降法等
train_step = tf.train.MomentumOptimizer(0.05, 0.05).minimize(loss)
if 'session' in locals() and session is not None:
print('Close interactive session')
session.close()
with tf.Session() as sess:
# 初始化变量
sess.run(tf.global_variables_initializer())
# writer=tf.summary.FileWriter("gra",graph=tf.get_default_graph())
print(sess.run(loss, feed_dict={model_input: train_feature, y: train_label}))
for i in range(200):
sess.run(train_step, feed_dict={model_input: train_feature, y: train_label})
print("epoch:", i)
# print(sess.run(L1,feed_dict={x: train_feature, y: train_label}))
print(sess.run(loss, feed_dict={model_input: train_feature, y: train_label}))
prd = sess.run(model_output, feed_dict={model_input: test_xs}) # 获取对测试集的预测结果
print(prd)
print(type(prd))
tf.saved_model.simple_save(sess, "./model/", inputs={"myInput": model_input}, outputs={"myOutput": model_output})
| true |
47d9c3575a71bd2cc0b5a49e4bd42d4f88888ca4 | Python | qq453388937/data_mining_faith | /machine_learining/first.py | UTF-8 | 6,250 | 3.5625 | 4 | [
"MIT"
] | permissive | # -*- coding:utf-8 -*-
# Scikit-learn 实现数据集的特征工程
# 机器学习的算法和原理
# 应用Scikit-learn==0.18 实现机器学习算法的应用,结合场景解决实际问题
# 人工智能 > 机器学习(典型问题:垃圾邮件分类) > 深度学习(图像识别)
# 机器学习的定义: 数据, 自动分析获得规律, 对未知数据进行预测
# 意义: 提高生产效率,量化投资,智能客服
# TensorFlow
# 掌握算法的应用场景, 从某个业务领域切入问题
# 特征工程
# : 专业背景知识和技巧处理数据, 使得特征能在机器学习算法上的发挥更好的作用的过程
""" 数据和特征决定了机器学习的上限,而模型和算法只是逼近这个上限而已"""
# 需要筛选处理一些合适的特征
# 数据集的构成 : 特征值(事物的特点) + 目标值(预测的结果) (重要!!!)
# 特征工程包含3个内容: 1.特征抽取 2.特征预处理 3. 特征降维
# 1.特征抽取: 将任意数据( 如文本或图像,类别 ) 转换为可用于机器学习的数字特征!!!!! (重要!!!) 字典,文本,图像
import sklearn, jieba
# 特征抽取模块
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
def countvec():
""" 文本特征提取 """
# 实例化count
count = CountVectorizer() # 没有sparse = False
# 穿进去列表 中文有问题,只能以符号隔开
data = count.fit_transform(['Life is is short, i like python 大大', 'life is too long, i dislike python'])
print(count.get_feature_names()) # 文本抽取返回单词列表(重复的词只算一次) 单个字母没影响,默认过滤不计入
print(data.toarray())
from sklearn.feature_extraction import DictVectorizer
def dictvec():
""" 对字典数据特征抽取 """
""" 目的对特征当中有类别的信息做处理,处理: one-hot编码->最好选择"""
# 实例化
dict_vec = DictVectorizer(sparse=False)
# 3个样本的特征数据 (字典形式)
dict_data = [{'city': '北京', 'temperature': 100},
{'city': '上海', 'temperature': 60},
{'city': '深圳', 'temperature': 30}]
# 调用fit_transform
data = dict_vec.fit_transform(dict_data) # 默认返回sparse矩阵(目的节省空间->了解)
print(dict_vec.get_feature_names()) # 'city=上海', 'city=北京', 'city=深圳', 'temperature']
print(data) # sparse矩阵
""" one-hot 编码 """
# print(data.toarray()) or sparse=False 即可
# ['city=上海','city=北京','city=深圳','temperature']
def fenci():
def cut_word(s1, s2, s3):
c1 = jieba.cut(s1)
c2 = jieba.cut(s2)
c3 = jieba.cut(s3)
print(c3) # <generator object Tokenizer.cut at 0x10d1cb8e0>
# 先将着三个转换成列表,变成以空格隔开的字符串
ct1 = " ".join(list(c1))
ct2 = " ".join(list(c2))
ct3 = " ".join(list(c3))
print(ct3) # 如果 只用 一种 方式 了解 某样 事物 , 你 就 不会 真正 了解 它 。 了解 事物 真正 含义 的 秘密 取决于 如何 将 其 与 我们 所 了解 的 事物 相 联系 。
return ct1, ct2, ct3
s1 = "今天很残酷,明天更残酷,后天很美好,但绝对大部分是死在明天晚上,所以每个人不要放弃今天。"
s2 = "我们看到的从很远星系来的光是在几百万年之前发出的,这样当我们看到宇宙时,我们是在看它的过去。"
s3 = "如果只用一种方式了解某样事物,你就不会真正了解它。了解事物真正含义的秘密取决于如何将其与我们所了解的事物相联系。"
ret1, ret2, ret3 = cut_word(s1, s2, s3)
# 实例化对象
count = CountVectorizer(stop_words=["不会", "不要", "绝对"])
# 对分词特征进行抽取
data = count.fit_transform([ret1, ret2, ret3])
print(count.get_feature_names())
print(data.toarray())
""" stop_words 停止词,这些词不能反映文章主题,词语性质比较中性,因为,所以 """
from sklearn.feature_extraction.text import TfidfVectorizer
def TfIdfvector():
"""
tdidf 作用: 用以评估一字词对于一个文件集或者一个语料库中的其中一份文件的重要程度
为了处理这种同一个词在很多文章出现次数较高
tf 词频 是一个词语在文章中出现的频率 出现次数除以总次数 5/100=0.05
逆文档频率 idf = lg(10000000/10000)=3 lg(一千万/一万) = 3
tfidf = 3 * 0.05 => 0.15
"""
def cut_word(s1, s2, s3):
c1 = jieba.cut(s1)
c2 = jieba.cut(s2)
c3 = jieba.cut(s3)
print(c1) # <generator object Tokenizer.cut at 0x10e934bf8> 生成器
# 先将着三个转换成列表,变成以空格隔开的字符串
ct1 = " ".join(list(c1))
ct2 = " ".join(list(c2))
ct3 = " ".join(list(c3))
print(ct1) # 如果 只用 一种 方式 了解 某样 事物 , 你 就 不会 真正 了解 它 。 了解 事物 真正 含义 的 秘密 取决于 如何 将 其 与 我们 所 了解 的 事物 相 联系 。
return ct1, ct2, ct3
s1 = "今天很残酷,明天更残酷,后天很美好,但绝对大部分是死在明天晚上,所以每个人不要放弃今天。"
s2 = "我们看到的从很远星系来的光是在几百万年之前发出的,这样当我们看到宇宙时,我们是在看它的过去。"
s3 = "如果只用一种方式了解某样事物,你就不会真正了解它。了解事物真正含义的秘密取决于如何将其与我们所了解的事物相联系。"
ret1, ret2, ret3 = cut_word(s1, s2, s3)
print(ret1, ret2, ret3)
# 实例化对象
tfidf = TfidfVectorizer(stop_words=["不会", "不要", "绝对"]) # 对每篇文章的重要性排序,找到前N个重要词
""" 分类机器算法前期处理方式 """
# 对分词特征进行抽取
data = tfidf.fit_transform([ret1, ret2, ret3])
print(tfidf.get_feature_names())
print(data.toarray())
if __name__ == '__main__':
# dictvec()
# countvec()
# fenci()
TfIdfvector()
| true |
17eb3d38df8e5b4f328ce26b5978acdc6000259e | Python | liuyuzhou/databasesourcecode | /chapter3/delete_exp.py | UTF-8 | 1,539 | 3.578125 | 4 | [] | no_license | import pymysql
# 打开数据库连接,添加端口号写法
db = pymysql.connect("localhost", "root", "root", "data_school", 3306)
# 使用cursor()方法获取操作游标
cursor = db.cursor()
def query_mysql(s_num):
"""
根据条件查找数据
:param s_num:
:return:
"""
# SQL 查询语句
sql = "SELECT * FROM python_class WHERE number={}".format(s_num)
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
if results is None or len(results) == 0:
print(f'没有找到学号为{s_num}的信息。')
for row in results:
num = row[1]
name = row[2]
# 打印结果
print(f"学号为{s_num}的详细信息为:number={num},name={name}")
except:
print("Error: unable to fetch data")
def delete_mysql(num):
"""
根据指定条件做删除
:param num:
:return:
"""
# SQL 删除语句
sql = "DELETE FROM python_class WHERE number={}".format(num)
try:
# 执行SQL语句
cursor.execute(sql)
# 提交修改
db.commit()
except:
# 发生错误时回滚
db.rollback()
if __name__ == "__main__":
number = 1005
print('------删除之前--------')
query_mysql(number)
delete_mysql(number)
print('------删除之后--------')
query_mysql(number)
# 关闭连接
db.close()
| true |
b9707f23bff9f2cbce25e045ca6d2a640e07bbba | Python | preacher6/Rel-IMP | /pygame_mantenimiento.py | UTF-8 | 18,095 | 2.515625 | 3 | [] | no_license | import pygame
import sys
import os
import matplotlib.pyplot as plt
from properties import *
from pygame.locals import *
import pygame_gui
WHITE = (255, 255, 255)
GRAY = (112, 128, 144)
SEMIWHITE = (245, 245, 245)
SEMIWHITE2 = (240, 240, 240)
SEMIWHITE3 = (220, 220, 220)
LIGHTGRAY = (192, 192, 192)
class PGManten:
"""
Clase para trabajar con pygame
"""
def __init__(self, window_size=(1000, 650)):
self.initialize_pygame()
self.clock = pygame.time.Clock()
self.font = pygame.font.SysFont('Arial', 40)
self.font_2 = pygame.font.SysFont('Arial', 20)
self.font_3 = pygame.font.SysFont('Arial', 16)
self.WINDOW_SIZE = window_size # Tamaño ventana principal
self.screen_form = pygame.display.set_mode(self.WINDOW_SIZE)
self.logo = pygame.image.load(os.path.join('pics', 'logo.png')) # Carga logo utp
#self.logo = pygame.transform.scale(self.logo, (118, 76))
self.property_class = Property(workspace_size=(800, 460)) # Instancia de propiedades
self.error = False # Define si se produce una excepción
@staticmethod
def initialize_pygame():
pygame.init()
os.environ['SDL_VIDEO_CENTERED'] = '1' # Centra la interfaz
pygame.display.set_caption('REL-Imp')
def execute_pygame(self):
manager = pygame_gui.UIManager((800, 600))
hello_button = pygame_gui.elements.UIButton(relative_rect=pygame.Rect((350, 275), (100, 50)),
text='Say Hello',
manager=manager)
position_mouse = (0, 0) # Inicializar posicion presionad
grid = True # Rejilla habilitada
close = False
timer = 0 # Necesario para el doble click
dt = 0 # Incrementos del timer
while not close:
time_delta = self.clock.tick(60)/1000.0
keys = pygame.key.get_pressed() # Obtencion de tecla presionada
for event in pygame.event.get():
if self.property_class.draw: # Eventos para texto de name
self.property_class.check_text(event)
if event.type == pygame.QUIT:
close = True
elif event.type == pygame.MOUSEBUTTONDOWN:
position_mouse = pygame.mouse.get_pos()
#print('pos', position_mouse)
if pygame.mouse.get_pressed()[0]: # Boton izquierdo
if timer == 0 and self.property_class.modulos:
timer = 0.001
elif timer < 0.3 and not self.property_class.elem_type and self.property_class.modulos: # Doble click apertura modulo/Debo bloquear esto mientras aun se este ejecutando el panel de propiedades
timer = 0
self.property_class.name_element.active = True # Activar casilla de nombre propiedades
self.property_class.element_property(position_mouse, 1) # Activar propiedad elemento
if self.property_class.elem_selected:
self.property_class.name_element.buffer = \
[char for char in self.property_class.elem_selected]
# Aca se le pone al buffer el texto del elemento seleccionado
for container in self.property_class.elementos['containers']: #Acciones de ingreso de texto
if container.selected:
for caja in container.cajas:
if caja.tag == self.property_class.elem_selected:
self.property_class.box_field1.buffer = [char for char in str(caja.alpha)] # se le pone a los buffers los valores de los elementos seleccionados
self.property_class.box_field2.buffer = [char for char in str(caja.betha)]
for knn_ind in container.knn:
for col in knn_ind.cols:
for caja in col:
if caja.tag == self.property_class.elem_selected:
self.property_class.box_field1.buffer = \
[char for char in str(caja.alpha)]
self.property_class.box_field2.buffer = \
[char for char in str(caja.betha)]
for stand_ind in container.stand:
if stand_ind.tag == self.property_class.elem_selected:
self.property_class.box_field1.buffer = [char for char in str(stand_ind.alpha)]
self.property_class.box_field2.buffer = [char for char in str(stand_ind.betha)]
for kdn in container.kdn:
if kdn.tag == self.property_class.elem_selected:
self.property_class.box_field1.buffer = [char for char in str(kdn.alpha)]
self.property_class.box_field2.buffer = [char for char in str(kdn.betha)]
self.property_class.list_box_systems.consult_position(position_mouse)
if self.property_class.add_rect.collidepoint(position_mouse) \
and self.property_class.cont < 14: # Agregar pestañas si son menos de 14
self.property_class.add_container()
if self.property_class.min_rect.collidepoint(position_mouse):
self.property_class.delete_container(position_mouse) # Verificar si alguna pestaña se cierra
self.property_class.select_container(position_mouse, self.screen_form) # Seleccionar pestaña
if not self.property_class.show_msg:
self.property_class.check_actions(position_mouse) # verifica acciones
if not self.property_class.show_msg2:
self.property_class.check_actions(position_mouse) # verifica acciones
self.property_class.close_elements(position_mouse) # Cerrar elementos
self.property_class.add_red_elements(position_mouse)
for container in self.property_class.elementos['containers']:
container.list_box.scroll.action_bar(position_mouse)
self.property_class.list_box_systems.scroll.action_bar(position_mouse)
if self.property_class.show_msg: # Si La ventana de mensajes esta disponible
if self.property_class.mensaje.accept.recta.collidepoint(position_mouse):
self.property_class.show_msg = False
if self.property_class.show_msg2: # Si La ventana de mensajes esta disponible
if self.property_class.mensaje2.accept.recta.collidepoint(position_mouse):
self.property_class.actions = [0]*9
self.property_class.show_msg2 = False
if self.property_class.line_delete:
for container in self.property_class.elementos['containers']:
if container.selected:
for conexion in container.conections:
if conexion.elem1 == self.property_class.line_element.elem1 and conexion.elem2\
== self.property_class.line_element.elem2:
container.conections.remove(conexion)
for nodo in container.nodos:
nodo.connected = False
for key in container.keys: # Recorre nodos conectados
# Busca los terminales de la conexion eliminada y los retira de algun nodo
if conexion.elem1 in container.nodos_sistema[key] and \
conexion.elem2 in container.nodos_sistema[key]:
container.nodos_sistema[key].remove(conexion.elem1)
container.nodos_sistema[key].remove(conexion.elem2)
self.property_class.line_element = None
self.property_class.line_delete = False
break
if self.property_class.connecting: # Si se encuentra la linea de dibujo activa se pueden adicionar elementos a la conexion
self.property_class.duple_conection.append([self.property_class.init_pos, self.property_class.end_line]) # Todos los puntos de una conexion
self.property_class.points_conection.extend(self.property_class.build_rect_points(self.property_class.duple_conection[-1]))
if self.property_class.end_line != self.property_class.duple_conection[0][0]:
self.property_class.init_pos = self.property_class.end_line
for container in self.property_class.elementos['containers']:
if container.selected:
for nodo in container.nodos:
if nodo.rect.collidepoint(self.property_class.end_line):
self.property_class.hold_line = False # Deja de dibujar la linea
self.property_class.line_able = False # Deja de habilitar linea
self.property_class.connecting = False # Deja de conectar
self.property_class.elem2 = nodo # Elemento final de la conexion
conexion = Conexion(self.property_class.duple_conection,
self.property_class.elem1,
self.property_class.elem2,
self.property_class.points_conection)
container.conections.add(conexion)
self.property_class.duple_conection = []
self.property_class.points_conection = []
# Verificar a que nodo del sistema van los nodos fisicos del objeto
container.check_node(self.property_class.elem1, self.property_class.elem2)
else:
self.property_class.duple_conection.pop()
if self.property_class.actions[3]: # Eliminar elemento
self.property_class.delete_element(position_mouse)
if self.property_class.actions[5]: # importa modulo
if self.property_class.list_box_modules.accept2.recta.collidepoint(position_mouse):
self.property_class.draw_module = True
self.property_class.elem_modulo = self.property_class.list_box_modules.list_items[self.property_class.list_box_modules.conten_actual-1]
"""if self.property_class.rect_up.collidepoint(position_mouse):
self.property_class.scroll()"""
if self.property_class.line_able: # Permitir dibujar linea
self.property_class.hold_line = True
if self.property_class.drawing: # Poner elemento
self.property_class.put_element()
if self.property_class.moving and self.property_class.move_inside: # Redesplazar elemento
self.property_class.repos_element()
if self.property_class.check.recta.collidepoint(position_mouse): # Para rotar cajitas
for container in self.property_class.elementos['containers']:
if container.selected:
for caja in container.cajas:
if caja.tag == self.property_class.elem_selected:
conectado = True
for nodo in caja.nodos:
if nodo.connected:
conectado = False
if conectado:
caja.orientation = not caja.orientation
self.property_class.check.push = caja.orientation
for nodo in caja.nodos:
container.nodos.remove(nodo)
caja.calc_nodes()
for nodo in caja.nodos:
container.nodos.add(nodo)
else:
self.property_class.show_msg = True
self.property_class.mensaje.text = 'El elemento debe estar desconectado'
elif pygame.mouse.get_pressed()[2] and self.property_class.modulos: # Boton derecho
self.property_class.element_property(position_mouse)
elif keys[K_ESCAPE]: # Acciones al presionar tecla escape
position_mouse = self.property_class.cancel()
self.property_class.close_elements((0, 0), force=True)
self.property_class.box_field1.active = False
self.property_class.box_field2.active = False
manager.process_events(event)
if event.type == pygame.USEREVENT:
if event.user_type == pygame_gui.UI_BUTTON_PRESSED:
if event.ui_element == hello_button:
print('Hello World!')
manager.update(time_delta)
if timer != 0: # Incremento del timer
timer += dt
if timer >= 0.5: # Reinicio del timer
timer = 0
abs_position = pygame.mouse.get_pos()
self.screen_form.fill(SEMIWHITE2)
self.property_class.draw_containers(self.screen_form)
self.property_class.draw_on_screen(self.screen_form, abs_position, position_mouse)
self.property_class.exec_actions(self.screen_form, abs_position, position_mouse) # Ejecutar acciones: Mover, borrar...
if self.property_class.actions[6] or self.property_class.elem_proper or self.property_class.config_bit \
or self.property_class.time_play or self.property_class.time_plot: # Escribir nombre de pestañas
self.property_class.draw_text(self.screen_form)
self.property_class.draw = True
if self.property_class.hold_line: # Dibujando linea en caliente
self.property_class.draw_line(self.screen_form)
if self.property_class.element_moved != None: # Mover elementos
self.property_class.move_element(self.screen_form, abs_position)
self.property_class.moving = True
if self.property_class.show_msg:
self.property_class.mensaje.draw(self.screen_form)
if self.property_class.mensaje.accept.recta.collidepoint(abs_position):
self.property_class.mensaje.accept.over = True
else:
self.property_class.mensaje.accept.over = False
if self.property_class.show_msg2:
self.property_class.mensaje2.draw(self.screen_form)
if self.property_class.mensaje2.accept.recta.collidepoint(abs_position):
self.property_class.mensaje2.accept.over = True
else:
self.property_class.mensaje2.accept.over = False
self.screen_form.blit(self.logo, (20, 10))
self.screen_form.blit(self.font.render('REL-Imp', True, (0, 0, 0)), (550, 20))
self.screen_form.blit(self.font_2.render('Plataforma para la identificación de componentes críticos', True, (0, 0, 0)), (550, 65))
#self.screen_form.blit(self.font_2.render('medida de la importancia', True, (0, 0, 0)), (30, 95))
self.screen_form.blit(self.font_3.render('Editando sistema:', True, (0, 0, 0)), (760, 570))
self.screen_form.blit(self.font_3.render(self.property_class.cont_selected.name, True, (0, 0, 0)), (870, 570))
self.screen_form.blit(self.font_2.render('Sistemas', True, (0, 0, 0)), (50, 345))
self.clock.tick(60)
dt = self.clock.tick(30) / 1000 # Delta del timer
#manager.draw_ui(self.screen_form)
pygame.display.update()
| true |
cf0c370137ea8321598c4b84473f1171ba6d263d | Python | fxy1018/Leetcode | /80_Remove_Duplicates_from_Sorted_Array_II.py | UTF-8 | 698 | 3.375 | 3 | [] | no_license | '''
Created on Feb 1, 2017
@author: fanxueyi
'''
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
#method1 : two pointers
point1 = 0
point2 = 1
while point2 < len(nums):
if nums[point1] == nums[point2]:
if point2-point1 == 1:
point2 += 1
else:
nums.remove(nums[point1])
else:
point1 = point2
point2 += 1
return(len(nums))
s=Solution()
print(s.removeDuplicates([1,2,2,2,2,3]))
| true |
aeb283bf4986660960dd86b551549b0c23a859e4 | Python | soumyax1das/soumyax1das | /exceptional.py | UTF-8 | 340 | 3.25 | 3 | [] | no_license | import sys
def convert(i):
try:
x=int(i)
print('Value is -',x)
except (ValueError,TypeError) as e:
print('Conversion failed')
print(str(e))
#raise
except (IndentationError, SyntaxError, NameError):
pass
if __name__ == '__main__':
convert(3)
convert('a')
convert(6)
| true |
6852871e280da8b09e5d3f24edc04c4a0a2a901c | Python | CindyLiu617/parkingPrediction | /appserver/externalsort.py | UTF-8 | 3,755 | 2.734375 | 3 | [] | no_license | import csv
import heapq
import os
import utils
LINES_PER_FILE = 50000
DATE_FORMAT = '%Y-%m-%d %H:%M:%S\n'
def external_sort(local_file):
def record_constructor(uploaded):
def save_sorted(time_str_list, file_name):
sorted_time_str = sort_string_list(time_str_list)
with open(file_name, 'w') as f:
f.writelines(sorted_time_str)
return file_name
with open(uploaded, 'rb') as f:
csv_reader = csv.reader(f, delimiter='\t')
entry_times = []
exit_times = []
file_index = 0
entry_files = []
exit_files = []
for i, line in enumerate(csv_reader):
if i > 0:
splited = line[0].split(',')
entry_time = '%s\n' % splited[0]
exit_time = '%s\n' % splited[1]
entry_times.append(entry_time)
exit_times.append(exit_time)
if i % LINES_PER_FILE == 0:
try:
entry_files.append(save_sorted(entry_times, 'entryTime%d.txt' % file_index))
exit_files.append(save_sorted(exit_times, 'exitTime%d.txt' % file_index))
file_index += 1
except IOError as e:
print('Operation failed: %s' % e.strerror)
entry_times = []
exit_times = []
if len(entry_times) != 0:
entry_files.append(save_sorted(entry_times, 'entryTime%d.txt' % file_index))
exit_files.append(save_sorted(exit_times, 'exitTime%d.txt' % file_index))
return entry_files, exit_files
def sort_string_list(time_string_list):
cache = {}
def compare(left, right):
left_time = utils.str_to_datetime(left, DATE_FORMAT, cache=cache)
right_time = utils.str_to_datetime(right, DATE_FORMAT, cache=cache)
if left_time > right_time:
return 1
elif left_time < right_time:
return -1
else:
return 0
sorted_list = sorted(time_string_list, cmp=compare)
return sorted_list
def merge_k_sorted_files(file_list, merged_file_name):
with open(merged_file_name, 'w') as merged_file:
file_readers = []
try:
for file_name in file_list:
f = open(file_name, 'r')
file_readers.append(f)
heap = []
for reader in file_readers:
line_read = reader.readline()
heap.append((utils.str_to_datetime(line_read, DATE_FORMAT), line_read, reader))
heapq.heapify(heap)
while heap:
pop = heapq.heappop(heap)
merged_file.write(pop[1])
line_read = pop[2].readline()
if line_read.strip() != '':
heapq.heappush(heap, (utils.str_to_datetime(line_read, DATE_FORMAT), line_read, pop[2]))
merged_file.close()
for reader in file_readers:
reader.close()
for file_name in file_list:
os.remove(os.getcwd() + '/' + file_name)
return merged_file_name
except IOError as e:
print('Operation failed: %s' % e.strerror)
entry_file_list, exit_file_list = record_constructor(local_file)
entries = merge_k_sorted_files(entry_file_list, 'entry_time.txt')
exits = merge_k_sorted_files(exit_file_list, 'exit_time.txt')
return entries, exits
| true |
fafe39cc34b127fb8871ea92e99e0b3096605cc3 | Python | banje/acmicpc | /6588.py | UTF-8 | 388 | 2.984375 | 3 | [] | no_license | b = []
for i in range(2, 1000000):
c = int(i**0.5)
for j in range(2, c+1):
if not i%j:
break
else:
b.append(i)
while True:
c=int(input())
if c==0:
break
d=round(c/4+0.1)
j=1
while j<=d:
if 2*j+1 in b and c-(2*j+1) in b:
print("{} = {} + {}".format(c,2*j+1,c-(2*j+1)))
break
j=j+1 | true |
65209ea4a04a4af3315f62508ad2c3410c086893 | Python | Narendon123/python | /167p.py | UTF-8 | 142 | 3.265625 | 3 | [] | no_license | a=input()
a=a.replace(" ","")
e=len(a)
for i in range(2,e):
if(e%i==0):
print("no")
break
else:
print("yes")
| true |
181570b74f565291a60e9677405fd62f1364a75c | Python | gustavogattino/Curso-em-Video-Python | /Mundo 1 - Fundamentos/Aula10/aula10_2.py | UTF-8 | 220 | 4.09375 | 4 | [] | no_license | """Exemplos aula 10."""
nome = str(input('Qual o seu nome? ')).strip()
if nome.upper() == 'GUSTAVO':
print('Seu nome é muito lindo.')
else:
print('Que nome normal você tem.')
print('Bom dia, {}'.format(nome))
| true |
8461429339f312e26ac716d8be99084897a72a21 | Python | katsu-tamashiro/hangman | /chpater10-hangman.py | UTF-8 | 1,534 | 3.453125 | 3 | [] | no_license | import random
def hangman():
answer = ["cat","dog","gollira"]
a=random.randint(0,2)
word = answer[a]
# print(word)
wrong = 0
stages =["",
"______________ ",
"| ",
"| | ",
"| 0 ",
"| /|/ ",
"| / / ",
"| "
]
retters = list(word)
board = ["_"] * len(word)
win = False
print("ハングマンへようこそ!")
while wrong < len(stages) -1 :
print("\n")
msg = "1文字を予想してね"
char = input(msg)
if char in retters:
cind = retters.index(char)
board[cind] = char
retters[cind] = "$"
#print(retters)
#print(board)
else:
wrong += 1
print("".join(board))
e = wrong + 1
print("\n".join(stages[0:e]))
# print(e,len(stages))
if e == len(stages):
print("あなたの負け")
print("正解は{}です".format(word))
if "_" not in board:
print("あなたの勝ち")
print(" ".join(board))
win = True
break
#print(board)
# print("\n".join(stages[0:6]))
hangman()
| true |
28890e9ddf77ca3b145956f089d7fd4fd7a7e167 | Python | diegofneves/ALGO_REDES_2016_2_LISTA4 | /Questao 3.py | UTF-8 | 684 | 3.484375 | 3 | [] | no_license | maior = 0
salarios = [
[float(input("Digite seu salário: ")), float(input("Digite seu salário: ")), float(input("Digite seu salário: "))],
[float(input("Digite seu salário: ")), float(input("Digite seu salário: ")), float(input("Digite seu salário: "))],
[float(input("Digite seu salário: ")), float(input("Digite seu salário: ")), float(input("Digite seu salário: "))]
]
menor = salarios[0][0]
for linha in salarios:
for salario in linha:
if salario > maior:
maior = salario
elif salario < menor:
menor = salario
print("Maior salário: ", maior)
print("Menor salário: ", menor)
| true |
1a547028e6a2ef51eb1baf153aba4dfb8dce0922 | Python | LarisaOvchinnikova/python_codewars | /Find the index of the first occurrence of an item in a list (with a twist).py | UTF-8 | 109 | 2.734375 | 3 | [] | no_license | # https://www.codewars.com/kata/585ba66ce08bae791b00011b
def index_finder(lst, x):
return lst.index(x, 1) | true |
82ee287e6845612755d59870798414a2fb868c7c | Python | platformer/Python-Stuffs | /Vector.py | UTF-8 | 5,859 | 3.03125 | 3 | [] | no_license | class vector:
def __init__(self, inlist = [], defsize = 0, defval = None, isUnbounded = False, capFunc = lambda cap:cap+10, truesize = None):
"""Use keyword arguments.
Don't enter a truesize value - this is used by vector when performing deepcopy.\n
inlist: list with which to be initialized\n
defsize: default size; if less than len(inlist), will be ignored\n
defval: value with which to populate empty indices\n
isUnbounded: enter true if you wish vector to expand for out-of-bounds list access\n
capFunc: function for calculating excess capacity when vector expands; takes new size as input and returns new capacity"""
self.arr = inlist.copy()
self.arr += [defval] * (defsize - len(inlist))
self.size = self.cap = len(self.arr)
self.defval = defval
self.isUnb = isUnbounded
self.capFunc = capFunc
if truesize != None:
self.size = truesize
def __getitem__(self, key):
if isinstance(key, slice):
indices = range(*key.indices(self.size))
return [self.arr[i] for i in indices]
if key < 0:
raise IndexError("key less than 0")
if key >= self.size:
if self.isUnb and key >= self.cap:
self._copy(key)
else:
raise IndexError("key greater than size")
return self.arr[key]
def __setitem__(self, key, value):
if key < 0:
raise IndexError("key less than 0")
if key >= self.cap:
self.__copy(key)
self.arr[key] = value
if key >= self.size:
self.size = key+1
def insert(self, key, value):
if key < 0 or key > self.size:
raise IndexError("key out of bounds")
if key == self.size:
self.append(value)
for i in range(self.size-1, key-1, -1):
self.__setitem__(i+1, self.arr[i])
self.__setitem__(key, value)
self.size += 1
def append(self, value):
self.__setitem__(self.size, value)
def __delitem__(self, key):
if key >= self.size or key < 0:
raise IndexError("key out of bounds")
for i in range(key, self.size-1):
self.arr[i] = self.arr[i+1]
self.arr[self.size-1] = self.defval
self.size -= 1
def remove(self, key):
if key < 0 or key >= self.size:
raise IndexError("key out of bounds")
for i in range(key, self.size-1):
self.arr[i] = self.arr[i+1]
self.size -= 1
def __add__(self, other):
ans = []
ans += self.arr[0:self.size]
ans += other[0:len(other)]
return vector(inlist=ans)
def __iadd__(self, other):
size2 = len(other)
for i in range(0, size2):
self.__setitem__(i+self.size, other[i])
self.size += size2
return self
def __mul__(self, n):
return vector(inlist=self.arr[0:self.size] * n, defval=self.defval)
def __imul__(self, n):
return self.__mul__(n)
def __eq__(self, other):
if self.size != len(other):
return False
for i in range(0, self.size):
if self.arr[i] != other[i]:
return False
return True
def _copy(self, index):
temp = self.arr
self.arr = [self.defval] * (self.capFunc(index+1))
for i in range(0, self.size):
self.arr[i] = temp[i]
self.size = index+1
self.cap = self.capFunc(index+1)
def __len__(self):
return self.size
def __str__(self):
return "["+", ".join(map(str, self.arr[0:self.size]))+"]"
def __contains__(self, item):
return item in self.arr[0:self.size]
def count(self, item):
count = 0
for i in range(0, self.size):
if self.arr[i] == item:
count+=1
return count
def index(self, item, beg, end):
for i in range(max(0, beg), min(self.size, end)):
if self.arr[i] == item:
return i
return None
def __copy__(self):
return vector(inlist=self.arr[0:self.size], defval=self.defval)
def copy(self):
return self.__copy__()
def __deepcopy__(self, memodict={}):
return vector(inlist=[x.__deepcopy__(memodict) for x in self.arr], defval=self.defval, truesize=self.size)
def __iter__(self):
return vector.iterator(self).__iter__()
def __reversed__(self):
return vector.iteratorReversed(self, self.size).__iter__()
class iterator:
def __init__(self, vector):
self.vect = vector
self.curr = 0
def __iter__(self):
return self
def __next__(self):
if self.curr >= self.vect.size:
raise StopIteration
temp = self.vect[self.curr]
self.curr+=1
return temp
class iteratorReversed:
def __init__(self, vector, size):
self.vect = vector
self.curr = size-1
def __iter__(self):
return self
def __next__(self):
if self.curr <= 0:
raise StopIteration
temp = self.vect[self.curr]
self.curr-=1
return temp
if __name__ == "__main__":
b = [0,1,2,3,4,5]
print(b[0:6:2])
b = vector(inlist=b)
print(b[1:4])
print(b[0:8])
print(b[10:0:-1])
if 10 in b:
print("yes")
else:
print("no")
print(b)
c = b.copy()
print(c)
d = vector()
d[9] = 12
d[9] <- 12 | true |
49ab864f30b6a8c0fae94b003faeb4cfaf79aaf8 | Python | scimaksim/Python | /Nikiforov_2.py | UTF-8 | 2,311 | 4.625 | 5 | [] | no_license | # -----------------------------------------------------------------------------
# Name: Grades
# Purpose: Grade calculator - assignment #2
#
# Author: Maksim Nikiforov
# Date: 10/09/2016
# -----------------------------------------------------------------------------
"""
Computes the letter grade earned in a fictional course.
This program prompts the user to enter grades earned on different
components of a course. The program will keep prompting for grades
until the user presses 'Enter' without typing any grade.
"""
input_not_blank = True # Initiate variable for while loop
all_grades = [] # Initiate empty list of all grades
min_grade = 0 # Initiate min. grade at 0
while input_not_blank:
grade = input("Please enter a grade: ")
if grade and 0 <= float(grade) <= 100: # Ensure grade is in valid range
all_grades.append(float(grade)) # Add valid grades to list
elif grade and 0 > float(grade) > 100: # Discard out-of-range grades
continue
elif not grade and len(all_grades) < 1: # Ask for at least 1 grade
print("Error: Please enter at least 1 grade!")
if not grade and len(all_grades) >= 1: # Stop loop when list has 1+ grades
input_not_blank = False # and user input is empty
if len(all_grades) >= 4: # Check if list has 4 grades
min_grade = min(all_grades) # Save value of lowest grades
all_grades.remove(min_grade) # Remove lowest grade from list
course_average = round(sum(all_grades)/len(all_grades), 1) # Calc. course avg.
if course_average >= 90: # Letter grade conditions
letter_grade = 'A'
elif course_average >= 80 <= 89.9:
letter_grade = 'B'
elif course_average >= 70 <= 79.9:
letter_grade = 'C'
elif course_average >= 60 <= 69.9:
letter_grade = 'D'
else:
letter_grade = 'F'
if min_grade: # Print lowest grade when available
print("The lowest grade dropped:", min_grade)
print("Course average:", course_average) # Print grade statistics
print("Letter grade:", letter_grade)
print("Based on the following grades:") # Loop through and print all grades
for grade in all_grades:
print(grade)
| true |
b3941bf4a711e0a240504133fa5146014968b801 | Python | BrushkouMatvey/SatelliteImageSegmentation | /Dataset/CustomGenerator.py | UTF-8 | 513 | 2.625 | 3 | [] | no_license | from abc import ABC, abstractmethod
from keras.utils import Sequence
import numpy as np
class CustomGenerator(ABC, Sequence):
def __init__(self, image_filenames, labels_filenames, batch_size):
self.image_filenames = image_filenames
self.labels_filenames = labels_filenames
self.batch_size = batch_size
def __len__(self):
return (np.ceil(len(self.image_filenames) / float(self.batch_size))).astype(np.int)
@abstractmethod
def __getitem__(self, idx):
pass | true |
ded78c84432cc6f871c3bbda8a5f479e47e66f81 | Python | jixiexiaojie/algorithm015 | /Week_01/移动零.py | UTF-8 | 439 | 3.03125 | 3 | [] | no_license | class Solution:
def moveZeroes(self,nums):
"frist: time:O(n),step:O(n)"
j=0
for i in range(len(nums)):
if nums[i]!=0:
nums[i],nums[j]=nums[j],nums[i]
j+=1
def moveZeroes(self,nums):
"second: time:O(n),step:O(n)"
j=0
for i in range(len(nums)):
if nums[i]!=0:
nums[j]=nums[i]
if i!=j:
nums[i]=0
j+=1
if __name__=="__main__":
nums=[0,1,0,3,12]
s=Solution()
value=s.moveZeroes(nums)
print(value)
| true |
3853c0f189169fa3db2c2d6b62d3a26a205aabdc | Python | rupak-118/concept-to-clinic | /prediction/src/algorithms/segment/trained_model.py | UTF-8 | 1,847 | 3.4375 | 3 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # -*- coding: utf-8 -*-
"""
algorithms.segment.trained_model
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An API for a trained segmentation model to predict nodule boundaries and
descriptive statistics.
"""
from src.preprocess.load_dicom import load_dicom
def predict(dicom_path, centroids):
""" Predicts nodule boundaries.
Given a pth to a DICOM image and a list of centroids
(1) load the segmentation model from its serialized state
(2) pre-process the dicom data into whatever format the segmentation
model expects
(3) for each pixel create an indicator 0 or 1 of if the pixel is
cancerous
(4) write this binary mask to disk, and return the path to the mask
Args:
dicom_path (str): a path to a DICOM directory
centroids (list[dict]): A list of centroids of the form::
{'x': int,
'y': int,
'z': int}
Returns:
dict: Dictionary containing path to serialized binary masks and
volumes per centroid with form::
{'binary_mask_path': str,
'volumes': list[float]}
"""
load_dicom(dicom_path)
segment_path = 'path/to/segmentation'
volumes = calculate_volume(segment_path, centroids)
return_value = {
'binary_mask_path': segment_path,
'volumes': volumes
}
return return_value
def calculate_volume(segment_path, centroids):
""" Calculates tumor volume from pixel masks
Args:
segment_path (str): A path to the serialized binary mask for
each centroid
centroids (list[dict]): A list of centroids of the form::
{'x': int,
'y': int,
'z': int}
Returns:
list[float]: List of volumes per centroid
"""
return [0.5 for centroid in centroids]
| true |
003abbbbfb6bcef05a76530b0d5d7cb3f99a9db8 | Python | grohj/AdventOfCode2019 | /day_8.py | UTF-8 | 1,073 | 3.203125 | 3 | [] | no_license | def chunk(arr, size):
for i in range(0, len(arr), size):
yield (arr[i: i + size])
if __name__ == "__main__":
width = 25
height = 6
per_layer = width * height
with open("inputs/day_8.txt") as f:
data = f.read()
digits = list()
layers = list(chunk(list(map(lambda x: int(x), data)), per_layer))
min_zeros = per_layer
min_layer = None
for layer in layers:
zero_count = len([x for x in layer if x == 0])
if min_zeros > zero_count:
min_layer = layer
min_zeros = zero_count
ones_count = len([x for x in min_layer if x == 1])
twos_count = len([x for x in min_layer if x == 2])
print(ones_count * twos_count)
result = []
for i in range(per_layer):
pixel = 2
for layer in layers:
if layer[i] != 2:
pixel = layer[i]
break
result.append(pixel)
for chnk in chunk(result, width):
for j in range(width):
print("█" if chnk[j] == 1 else "░", end="")
print("")
| true |
fe45139d708c2bd05fa957c7a4a8bca0ddfbe4ae | Python | SobertKaos/lifecal | /lifecal.py | UTF-8 | 2,197 | 3.609375 | 4 | [] | no_license | import datetime
import math
import numpy as np
import matplotlib.pyplot as plt
class LifeCal(object):
""" This is LifeCal, documentation pending """
def __init__(self, birth=datetime.datetime(1980, 1, 1), life_expectancy=80):
self.birth = birth
self.life_expectancy = life_expectancy
self.weeks_alive = self.get_weeks_alive()
self.expected_weeks_alive = self.get_expected_weeks_alive()
self.cells = self.spawn_cells()
def get_birth(self):
""" Returns birth """
return self.birth
def get_life_expectancy(self):
""" Returns life expectancy """
return self.life_expectancy
def get_weeks_alive(self):
""" Returns number of entire weeks since birth """
birth = self.get_birth()
date = datetime.datetime.today()
return math.floor((date-birth).days / 7)
def get_expected_weeks_alive(self):
""" Returns expected amount of weeks a person is alive """
birth = self.get_birth()
life_expectancy = self.get_life_expectancy()
death = birth.replace(birth.year+life_expectancy)
return math.floor((death-birth).days / 7)
def spawn_cells(self):
""" Write some documentation here """
expectancy = self.get_life_expectancy()
weeks_alive = self.get_weeks_alive()
cells = np.zeros((expectancy, 52))
yrs, w = 0, 0
for i in range(weeks_alive):
cells[yrs][w] = 1
if w == 51:
w = 0
yrs += 1
else:
w += 1
return cells
def get_cells(self):
""" Returns cells """
return self.cells
def show(self):
""" Plots life calendar """
plt.matshow(self.get_cells(), origin='lower')
plt.show()
return None
def bokeh_show(self):
""" !WIP! Plots life calendar with Bokeh """
from bokeh.charts import HeatMap, show
p = HeatMap(self.get_cells(), title="Life Calendar")
show(p)
def update(self):
""" Updates cell states and redraws plot """
pass
if __name__ == "__main__":
lif = LifeCal()
lif.show()
| true |
dd7d641bdacb4efd7df8a290722bcdd9f8d3f1e0 | Python | mrigya-pycode/cat-dog-game-using-OOPs | /catdog game by oops.py | UTF-8 | 734 | 3.515625 | 4 | [] | no_license | import random
class cats_dogs:
def __init__(self, ran_num):
self.ran_num = ran_num
def for_count(self,cat,dog):
for i in range(len(ran_num)):
if ran_num[i] == user_ip[i]:
cat += 1
else:
dog += 1
print("{}:cat,{}:dog".format(cat, dog))
def for_exit(self):
if user_ip == "exit":
exit()
game=True
ran_num = str(random.randint(1000, 9999))
cd=cats_dogs(ran_num)
while True:
cat=0
dog=0
user_ip = str(input("enter the 4 digit number:"))
cd.for_exit()
cd.for_count(cat,dog)
if len(user_ip)>4:
print("enter valid input please")
elif cat==4:
game=False | true |
950dc3f1a22226be8a97ae47b5295ff875ec21de | Python | kanderson102/apptracker | /app.py | UTF-8 | 1,595 | 2.609375 | 3 | [
"MIT"
] | permissive | from flask import Flask, render_template
import pandas as pd
import altair as alt
from vega_datasets import data
from datetime import datetime
import json
app = Flask(__name__)
data_path = 'data/AUM_V4_Activity_2018-06-21_17-16-27.csv'
df = pd.read_csv(data_path, parse_dates=[['Date', 'Time']])
df = df[:-3]
df = df.set_index('Date_Time')
df['Duration'] = pd.to_timedelta(df.Duration)
df.index = pd.to_datetime(df.index)
# tinder = df[df['App name'] == 'Tinder']
# tinder = tinder.resample('H')['Duration'].sum().reset_index()
# tinder.Duration = pd.to_datetime(tinder['Date_Time'].dt.date.astype(str) + ' ' + tinder['Duration'].dt.to_pytimedelta().astype(str))
# print(tinder.head())
df = df.resample('H')['Duration'].sum().reset_index()
print(df.head())
df.Duration = pd.to_datetime(df['Date_Time'].dt.date.astype(str) + ' ' + df['Duration'].dt.to_pytimedelta().astype(str))
print(df.head())
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
@app.route("/", methods=["GET"])
def main():
return render_template('index.html')
@app.route("/data/bar")
def chart():
# obj = json.dumps(dates, cls=DateTimeEncoder)
chart = alt.Chart(tinder).mark_bar().encode(
# x='day(Date):O',
x='Date_Time:T',
y='hoursminutes(Duration)'
)
return chart.to_json() # dates.to_html()
if __name__ == "__main__":
app.run(debug=True, threaded=True, port=8080)
| true |
c0e5c56f6cf704a78e497f77d1999eaed3cdbe90 | Python | Barret-ma/leetcode | /560. Subarray Sum Equals K.py | UTF-8 | 882 | 3.90625 | 4 | [] | no_license | # Given an array of integers and an integer k, you need to
# find the total number of continuous subarrays whose sum equals to k.
# Example 1:
# Input:nums = [1,1,1], k = 2
# Output: 2
# Note:
# The length of the array is in range [1, 20,000].
# The range of numbers in the array is [-1000, 1000] and the
# range of the integer k is [-1e7, 1e7].
import collections
class Solution(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
if not nums:
return 0
ans = 0
sum = 0
counts = collections.defaultdict(lambda: 0)
counts[0] = 1
for num in nums:
sum += num
print(sum - k)
ans += counts[sum - k]
counts[sum] += 1
return ans
s = Solution()
print(s.subarraySum([1, 1, 1], 2)) | true |
a38d0b877c93b8fc5c1d0fbdc47128eac2986abf | Python | netzsooc/ExploreLyrics | /canciones.py | UTF-8 | 848 | 2.84375 | 3 | [] | no_license | #%%
import os
import requests
import pandas as pd
data = pd.read_csv("https://raw.githubusercontent.com/walkerkq/musiclyrics/master/billboard_lyrics_1964-2015.csv", encoding="cp1252")
con_letra = data[data["Lyrics"].notnull()]
con_letra["Length"] = con_letra.Lyrics.apply(len)
# Determinar estadísticas
longitud = con_letra.Length
num_dev = 1
mean = longitud.mean()
std = longitud.std()
theta = int(mean - (num_dev * std))
con_letra = con_letra[con_letra["Length"] > theta]
#%%
def tokenize(cadena):
tokens = cadena.split()
retoks = []
for token in tokens:
retok = ""
for char in token:
if char.isalpha():
retok += char
else:
if retok:
retoks.append(retok)
retok = ""
retoks.append(char)
return retoks | true |
22e9f05bcf84b1140a015082cd24f39a7aec9b5a | Python | Svastikkka/DS-AND-ALGO | /Linked List/Palindrome LinkedList.py | UTF-8 | 757 | 3.515625 | 4 | [] | no_license | class Node:
def __init__(self,data):
self.data=data
self.next=None
def LinkedList(arr):
head=None
tail=None
if len(arr)<1:
return
else:
for i in arr:
if i ==-1:
break
NewNode = Node(i)
if head == None:
head=NewNode
tail=NewNode
else:
tail.next=NewNode
tail=NewNode
return head
def printLL(head):
arr=[]
while head is not None:
arr.append(head.data)
head=head.next
if arr[0:]==arr[::-1]:
print('true')
else:
print('false')
t=int(input())
for i in range(t):
arr=list(map(int,input().split()))
printLL(LinkedList(arr)) | true |
b6dd1ea11e8a3939d1f54cd340b67d2c93e158ba | Python | calixt88/Shuriken-Soldier | /main.py | UTF-8 | 1,020 | 3.046875 | 3 | [] | no_license | import pygame
import os
import time
from StartScreen import *
from Constants import *
from pygame import mixer
#Window Initialize
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
#Sets window title and icon
pygame.display.set_caption("Shurkien Soldier")
shurikenIcon = pygame.image.load(os.path.join('Assets','Images', 'maroonShuriken.png'))
pygame.display.set_icon(shurikenIcon)
#Updates the screen
def update():
pygame.display.update()
#draws the game screen
def drawGameScreen():
WIN.fill((WHITE))
update()
#Runs the code for the gameLoop
def gameLoop():
clock = pygame.time.Clock()
run = True
while run:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
drawGameScreen()
#Main function.
def main():
startScreen()
#gameLoop()
pygame.QUIT()
#Keeps from running the main method after importing
if __name__ == "__main__":
main() | true |
6caa1f159e5ddcbe1713990f8ef40767663f37f2 | Python | Boombarm/onlinejudge | /Python/src/uri_beecrowd/STRING/P3313_Wordplay.py | UTF-8 | 774 | 3.296875 | 3 | [] | no_license | # author : Teerapat Phokhonwong
# Problem: 3313 - Wordplay
# Link: https://www.beecrowd.com.br/judge/en/problems/view/3313
# Answer: Accepted
# Submission: 12/12/22, 11:22:12 AM
# Runtime: 0.006s
# Note: สลับตำแหน่งท้ายไปที่ตำแหน่งแรก แล้วหาว่าคำเรียงลำดับ ลำดับไหนเล็กสุด และใหญ่สุด
counter = 0
while True:
arr = set()
word = input()
if word == '*':
break
n = len(word)
idx = 0
while idx < n:
new_word = f"{word[n-1]}{word[0:n-1]}"
arr.add(new_word)
word = new_word
idx += 1
arr = sorted(arr)
counter += 1
print(f"Caso {counter}: {arr[0]} {arr[n-1]}") | true |
36c3a1719bffdb9529cbcd21645ea93f28e20a99 | Python | wesbdss/AIDA-fluxo | /fluxo.py | UTF-8 | 2,314 | 3.109375 | 3 | [
"MIT"
] | permissive | import yaml
import random
import logging
#
# A estrutura
#
# init: LABEL
# label:
# response: "RESPOSTA"
# output:
# label1: "LABEL"
# label2: "LABEL"
class Fluxo:
def __init__(self,diretory="fluxo.yaml"):
self.diretory = diretory
logging.debug("{} - {}".format(self.__class__,"Fluxo Carregado {}".format(diretory)))
def main(self):
with open(self.diretory,'r') as f:
self.states = yaml.load(f.read(),Loader=yaml.FullLoader)
self.state = self.states['init']
self.checkState()
print(self.responseState())
def restartState(self):
self.state = self.states['init']
logging.debug("{} - {}".format(self.__class__,"Fluxo Resetado"))
def responseState(self):
if "response" not in self.states[self.state].keys():
print(self.__class__,"Não Há resposta disponível")
response = ''
if type(self.states[self.state]['response']) == list:
response = self.states[self.state]['response']
response = response[random.randrange(0,len(self.states[self.state]['response']))]
else:
response = self.states[self.state]['response']
return response
def nextState(self,intent):
logging.debug("{} - {}".format(self.__class__,"Proximo Estado"))
if "output" not in self.states[self.state].keys():
logging.debug("{} - {}".format(self.__class__,"Encerrar Fluxo"))
exit(0)
if intent in self.states[self.state]['output'].keys():
self.state = self.states[self.state]['output'][intent]
self.checkState()
else:
intent="empty"
if intent in self.states[self.state]['output'].keys():
self.state = self.states[self.state]['output'][intent]
self.checkState()
else:
plogging.debug("{} - {}".format(self.__class__,"Não Há um Proximo estado"))
exit(1)
def checkState(self):
if self.state in self.states.keys():
return True
else:
logging.debug("{} - {}".format(self.__class__,"Label não existente ou estado inválido"))
exit(1)
if if __name__ == "__main__":
a = Fluxo()
a.main()
| true |
3e3bb30f0734029ac7dad9da8c4228961a3ba7ab | Python | LucasBarbosaRocha/URI | /Strings/1276.py | UTF-8 | 769 | 3.140625 | 3 | [] | no_license | while True:
try:
entrada = input("")
if (entrada == ""):
print()
else:
entrada = entrada.replace(" ","")
ordenada = ''.join(sorted(entrada)) + "$"
intervalos = []
first = 1
intervalo = ""
for i in range(len(ordenada)):
if (first == 1):
intervalo = ordenada[i]
first = 2
else:
if (ordenada[i-1] != ordenada[i] and ord(ordenada[i-1]) - ord(ordenada[i]) != -1):
intervalo = intervalo + ":" + ordenada[i-1]
intervalos.append(intervalo)
intervalo = ordenada[i]
if (len(intervalos) > 1):
for i in range(len(intervalos)):
if(i < len(intervalos) - 1):
print("%s, " %intervalos[i], end='')
else:
print("%s" %intervalos[i])
else:
print(intervalos[0])
except :
break | true |
84114432fa9cd05c1a89e577cd6c0089ee404040 | Python | complicat9716/EmbroideryFractal | /Old_PythonCodes/CircleJEF.py | UTF-8 | 3,741 | 2.796875 | 3 | [] | no_license | from math import *
# Going through all four sides of the square
# Clockwise, starting at the bottom left
# Sides are 1 cm long, in ten stiches 1mm each
# 246 is the unsigned 8-bit version of -10
##################################################################################################
# Starting stitch
stitches = [128, 2,
0, 0,
206, 206,]
# control the step size
stepsize = 0.1
# starting point
t = 0
# end point
end = 2*pi
# scale factor
scale = 10
while t < end:
################################
# x_axis
x_axis = int(scale*cos(t))
# Positive limit
if x_axis > 127:
print("X positive limit exceed!!")
if x_axis < 0:
x_axis = 256 + x_axis
# negative limit
if x_axis < 129:
print("X negative limit exceed!!")
################################
# y_axis
y_axis = int(scale*sin(t))
# Positive limit
if y_axis > 127:
print("Y positive limit exceed!!")
if y_axis < 0:
y_axis = 256 + y_axis
# negative limit
if y_axis < 129:
print("Y negative limit exceed!!")
################################
# stitch
stitches += [x_axis, y_axis,]
t = t + stepsize
stitches += [128, 16] # "Last stitch" command code
##################################################################################################
# jef headers
jefBytes = [124, 0, 0, 0, # The byte offset of the first stitch
10, 0, 0, 0, # Unknown number
ord("2"), ord("0"), ord("2"), ord("0"), # YYYY
ord("0"), ord("3"), ord("1"), ord("1"), # MMDD
ord("1"), ord("2"), ord("3"), ord("0"), # HHMM
ord("0"), ord("0"), 99, 0, # SS00
1, 0, 0, 0, # Number of physical threads (1)
(len(stitches)//2) & 0xff, (len(stitches)//2) >> 8 & 0xff, 0, 0, # Number of stitches
3, 0, 0, 0, # Sewing machine hoop
50, 0, 0, 0, # Left boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Top boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Right boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Bottom boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Left boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Top boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Right boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Bottom boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Left boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Top boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Right boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Bottom boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Left boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Top boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Right boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Bottom boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Left boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Top boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Right boundary distance from center (in 0.1 mm)
50, 0, 0, 0, # Bottom boundary distance from center (in 0.1 mm)
32, 0, 0, 0, # Thread color (orange)
13, 0, 0, 0, # Unknown number
] + stitches
# write jef file
jefBytes = bytes(jefBytes)
with open("Circle_test.jef", "wb") as f:
f.write(jefBytes)
| true |
a9f7678e283752d4c592ad1c9d5b1fc1b4fc9dd9 | Python | Lemmah/tensorflow | /exampleOne.py | UTF-8 | 1,780 | 3.765625 | 4 | [] | no_license | # Getting started with tensorflow
# Done by James Lemayian
import tensorflow as tf
node1 = tf.constant(3.0, tf.float32)
node2 = tf.constant(4.0) # also float32 implicitly
# This does not print the actual value of the nodes because the nodes are not evaluated
print(node1,node2)
# Running the nodes in a session actually evaluates them
sess = tf.Session()
print(sess.run([node1,node2]))
# Adding two nodes using the tf add operation
node3 = tf.add(node1, node2)
print("node3: ", node3)
print("sess.run(node3): ", sess.run(node3))
# Parametizing graphs to accept external inputs
# a placeholder is a promise to provide a value later
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adderNode = a + b # + sign provides a shortcut for tf.add(a,b)
print(sess.run(adderNode, {a:3, b:4.5}))
print(sess.run(adderNode, {a:[1,3], b: [4,4.5]}))
# complicate stuff, maybe add and tripple
addAndTripple = adderNode * 3.
print(sess.run(addAndTripple, {a: 3., b: 4.5}))
# How to take arbitrary value using a Variables
# Variables allow us to add trainable parameters to a graph
W = tf.Variable([.3], tf.float32)
b = tf.Variable([-.3], tf.float32)
x = tf.placeholder(tf.float32)
linearModel = W * x + b
# Initializing the global variables
sess.run(tf.global_variables_initializer())
# evaluate linear model for several values of x as follows
print(sess.run(linearModel, {x:[1,2,3,4]}))
# evaluating model on training data
y = tf.placeholder(tf.float32)
squaredDeltas = tf.square(linearModel - y)
loss = tf.reduce_sum(squaredDeltas)
print(sess.run(loss, {x:[1,2,3,4], y: [0,-1,-2,-3]}))
# Improving it manually by fixing the values of x and b
fixW = tf.assign(W, [-1.])
fixb = tf.assign(b, [1.])
sess.run([fixW, fixb])
print(sess.run(loss, {x:[1,2,3,4], y: [0,-1,-2,-3]}))
| true |
6a9e1a7b2ada9470950b52b04cdd3858e664207c | Python | Zhifu-Xiao/Log-Analysis-with-Hadoop-and-Hive | /Mapper2.py | UTF-8 | 476 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
import sys
import string
all = string.maketrans('','')
nodigs = all.translate(all, string.digits)
for line in sys.stdin:
response_bytes = 0L
line = line.strip()
split = line.split(" ")
month = split[2]
if (month != 'Jul'):
response_bytes = 0L
else:
response_bytes = split[len(split) - 1].translate(all, nodigs)
if not response_bytes:
response_bytes = 0L
response_bytes = int(response_bytes)
print(response_bytes)
| true |
403efe59b1a9636228f3596d906a7adc91326b9d | Python | DiegoSantosWS/estudos-python | /estudo-6/lista-01.py | UTF-8 | 1,619 | 4.5625 | 5 | [] | no_license | # Criando uma lista com 3 inteiros
lista_numeros = [25, 78, 55]
# Os elemetos da lista iniciam com zero
# 0=25, 1=78, 2=55
# Na linha abaixo será impresso 78
print(lista_numeros[1])
# Alterando o segundo elemento da lista de 78 para 30
lista_numeros[1] = 30
# Na linha abaixo será impresso 30
print(lista_numeros[1])
# Acessando os elementos para calcular a média
notas = [7.5, 5.6, 9.5, 10.0]
media = (notas[0] + notas[1] + notas[2] + notas[3]) / 4
print(media)
# Alterando itens da lista pelo seu índice, outro exemplo:
bancos = ["Banco do Brasil", "CEF", "Banestes"]
print(bancos) # Resultado: ['Banco do Brasil', 'CEF', 'Banestes’]
bancos[1] = "Itaú"
print(bancos) # Resultado: ['Banco do Brasil', 'Itaú', 'Banestes’]
#Assim como no fatiamento de string, o último elemento de uma lista pode ser acessado pelo índice -1.
bancos = ["Banco do Brasil", "CEF", "Banestes"]
bancos[-1] = "Itaú"
print(bancos) # Resultado: ['Banco do Brasil', 'CEF', ’Itaú’]
#Para incrementar o valor de uma lista podemos usar o operador de adição.
numeros = [1, 2, 3, 10, 12]
numeros = numeros + [8, 7, 15]
print(numeros) #Resultado:[1, 2, 3, 10, 12, 8, 7, 15]
# Mas também podemos fazer assim, usando o operador de atribuição com adição “+=”:
numeros = [1, 2, 3, 10, 12]
numeros += [8, 7, 15]
print(numeros) # Resultado [1, 2, 3, 10, 12, 8, 7, 15]
bancos = ["Banco do Brasil", "CEF", "Banestes", "Itaú", "Sicoob", "Bradesco"]
bancos += ["Real", "Safra", "Santander"]
# Resultado: ['Banco do Brasil', 'CEF', 'Banestes', 'Itaú’,
# 'Sicoob', 'Bradesco', 'Real', 'Safra', 'Santander'] | true |
c5e97cecf8dd31fd517c5467e25a36a2d281b7d4 | Python | spdut/deep_qa | /deep_qa/data/data_generator.py | UTF-8 | 10,627 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | from typing import List
import logging
import random
from copy import deepcopy
from ..common.params import Params
from ..common.util import group_by_count
from . import IndexedDataset
from .instances import IndexedInstance
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class DataGenerator:
"""
A ``DataGenerator`` takes an :class:`~.dataset.IndexedDataset` and converts it into a
generator, yielding batches suitable for training. You might want to do this instead of just
creating one large set of numpy arrays for a few reasons:
#. Creating large arrays for your whole data could take a whole lot of memory, maybe more than
is available on your machine.
#. Creating one large array means padding all of your instances to the same length. This
typically means you waste a whole lot of computation on padding tokens. Using a
``DataGenerator`` instead allows you to only pad each `batch` to the same length, instead of
all of your instances across your whole dataset. We've typically seen a 4-5x speed up just
from doing this (partially because Keras is pretty bad at doing variable-length computation;
the speed-up isn't quite as large with plain tensorflow, I think).
#. If we're varying the padding lengths in each batch, we can also vary the batch size, to
optimize GPU memory usage. This means we'll use smaller batch sizes for big instances, and
larger batch sizes for small instances. We've seen speedups up to 10-12x (on top of the
4-5x speed up above) from doing this.
Parameters
----------
text_trainer: TextTrainer
We need access to the ``TextTrainer`` object so we can call some methods on it, such as
:func:`~deep_qa.training.TextTrainer.get_instance_sorting_keys`.
dynamic_padding: bool, optional (default=False)
If ``True``, we will set padding lengths based on the data `per batch`, instead of on the
whole dataset. This only works if your model is structured to allow variable-length
sequences (typically using ``None`` for specific dimensions when you build your model), and
if you don't set padding values in
:func:`~deep_qa.training.TextTrainer._set_padding_lengths`. This flag specifically is read
in :func:`~deep_qa.training.TextTrainer._set_padding_lengths` to know if we should set
certain padding values or not. It's handled correctly for ``num_sentence_words`` and
``num_word_characters`` in :class:`~deep_qa.training.TextTrainer`, but you need to be sure
to implement it correctly in subclasses for this to work.
padding_noise: double, optional (default=.1)
When sorting by padding length, we add a bit of noise to the lengths, so that the sorting
isn't deterministic. This parameter determines how much noise we add, as a percentage of
the actual padding value for each instance.
sort_every_epoch: bool, optional (default=True)
If ``True``, we will re-sort the data after every epoch, then re-group the instances into
batches. If ``padding_noise`` is zero, this does nothing, but if it's non-zero, this will
give you a slightly different ordering, so you don't have exactly the same batches at every
epoch. If you're doing adaptive batch sizes, this will lead to re-computing the adaptive
batches each epoch, which could give a different number of batches for the whole dataset,
which means each "epoch" might no longer correspond to `exactly` one pass over the data.
This is probably a pretty minor issue, though.
adaptive_batch_sizes: bool, optional (default=False)
Only relevant if ``dynamic_padding`` is ``True``. If ``adaptive_batch_sizes`` is ``True``,
we will vary the batch size to try to optimize GPU memory usage. Because padding lengths
are done dynamically, we can have larger batches when padding lengths are smaller,
maximizing our usage of the GPU. In order for this to work, you need to do two things: (1)
override :func:`~TextTrainer._get_padding_memory_scaling` to give a big-O bound on memory
usage given padding lengths, and (2) tune the `adaptive_memory_usage_constant` parameter
for your particular model and GPU. See the documentation for
:func:`~TextTrainer._get_padding_memory_scaling` for more information.
adaptive_memory_usage_constant: int, optional (default=None)
Only relevant if ``adaptive_batch_sizes`` is ``True``. This is a manually-tuned parameter,
specific to a particular model architecture and amount of GPU memory (e.g., if you change
the number of hidden layers in your model, this number will need to change). See
:func:`~TextTrainer._get_padding_memory_scaling` for more detail. The recommended way to
tune this parameter is to (1) use a fixed batch size, with ``biggest_batch_first`` set to
``True``, and find out the maximum batch size you can handle on your biggest instances
without running out of memory. Then (2) turn on ``adaptive_batch_sizes``, and set this
parameter so that you get the right batch size for your biggest instances. If you set the
log level to ``DEBUG`` in ``scripts/run_model.py``, you can see the batch sizes that are
computed.
maximum_batch_size: int, optional (default=1000000)
If we're using adaptive batch sizes, you can use this to be sure you do not create batches
larger than this, even if you have enough memory to handle it on your GPU. You might
choose to do this to keep smaller batches because you like the noisier gradient estimates
that come from smaller batches, for instance.
biggest_batch_first: bool, optional (default=False)
This is largely for testing, to see how large of a batch you can safely use with your GPU.
It's only meaningful if you're using dynamic padding - this will let you try out the
largest batch that you have in the data `first`, so that if you're going to run out of
memory, you know it early, instead of waiting through the whole batch to find out at the
end that you're going to crash.
"""
def __init__(self, text_trainer, params: Params):
self.text_trainer = text_trainer
self.dynamic_padding = params.pop('dynamic_padding', False)
self.padding_noise = params.pop('padding_noise', 0.2)
self.sort_every_epoch = params.pop('sort_every_epoch', True)
self.adaptive_batch_sizes = params.pop('adaptive_batch_sizes', False)
self.adaptive_memory_usage_constant = params.pop('adaptive_memory_usage_constant', False)
self.maximum_batch_size = params.pop('maximum_batch_size', 1000000)
self.biggest_batch_first = params.pop('biggest_batch_first', False)
#: This field can be read after calling ``create_generator`` to get the number of steps you
#: should take per epoch in ``model.fit_generator`` or ``model.evaluate_generator`` for
#: this data.
self.last_num_batches = None
def create_generator(self, dataset: IndexedDataset):
"""
Main external API call: converts an ``IndexedDataset`` into a data generator suitable for
use with Keras' ``fit_generator`` and related methods.
"""
grouped_instances = self.__create_batches(dataset)
self.last_num_batches = len(grouped_instances)
def generator():
while True:
if self.sort_every_epoch:
unpadded_dataset = deepcopy(dataset)
groups = self.__create_batches(unpadded_dataset)
else:
groups = grouped_instances
for group in groups:
batch = IndexedDataset(group)
batch.pad_instances(self.text_trainer.get_padding_lengths(), verbose=False)
yield batch.as_training_data()
return generator()
def __create_batches(self, dataset: IndexedDataset) -> List[List[IndexedInstance]]:
if self.dynamic_padding:
dataset.sort_by_padding(self.text_trainer.get_instance_sorting_keys(), self.padding_noise)
instances = dataset.instances
if self.adaptive_batch_sizes:
grouped_instances = self.__adaptive_grouping(instances)
else:
grouped_instances = group_by_count(instances, self.text_trainer.batch_size, None)
grouped_instances[-1] = [instance for instance in grouped_instances[-1] if instance is not None]
if self.biggest_batch_first:
# We'll actually pop the last _two_ batches, because the last one might not
# be full.
last_batch = grouped_instances.pop()
penultimate_batch = grouped_instances.pop()
random.shuffle(grouped_instances)
grouped_instances.insert(0, penultimate_batch)
grouped_instances.insert(0, last_batch)
else:
random.shuffle(grouped_instances)
return grouped_instances
def __adaptive_grouping(self, instances: List[IndexedInstance]):
batches = []
current_batch = []
current_lengths = {}
logger.debug("Creating adatpive groups")
for instance in instances:
current_batch.append(instance)
instance_lengths = instance.get_padding_lengths()
for key in instance_lengths:
current_lengths[key] = max(instance_lengths[key], current_lengths.get(key, -1))
big_o_memory_constant = self.text_trainer.get_padding_memory_scaling(current_lengths)
if (len(current_batch) * big_o_memory_constant > self.adaptive_memory_usage_constant
or len(current_batch) > self.maximum_batch_size):
current_batch.pop()
if logger.getEffectiveLevel() <= logging.DEBUG:
padding_lengths = IndexedDataset(current_batch).padding_lengths()
logger.debug("Batch size: %d; padding: %s", len(current_batch), padding_lengths)
batches.append(current_batch)
current_batch = [instance]
current_lengths = instance_lengths
if logger.getEffectiveLevel() <= logging.DEBUG:
padding_lengths = IndexedDataset(current_batch).padding_lengths()
logger.debug("Batch size: %d; padding: %s", len(current_batch), padding_lengths)
batches.append(current_batch)
return batches
| true |
392b4b90d83cd9ee4b1204757761efbaa54e04db | Python | oeuftete/advent-of-code | /adventofcode/year2020/day16/solution.py | UTF-8 | 5,658 | 2.765625 | 3 | [] | no_license | import logging
from dataclasses import dataclass, field
from typing import Tuple
from aocd.models import Puzzle
logging.basicConfig(level=logging.INFO)
@dataclass
class Ticket:
ticket_values: list = field(default_factory=list)
@dataclass
class TicketValidator:
boundary_rules: dict = field(default_factory=dict)
def is_value_valid_for_rule(self, v, field_name):
is_valid = False
for boundary in self.boundary_rules[field_name]:
logging.debug("... boundary=%s", boundary)
if v in range(boundary[0], boundary[1]):
logging.debug("... value=%s VALID", v)
is_valid = True
return is_valid
def is_valid(self, ticket) -> Tuple[bool, list]:
logging.debug("Checking %s for validity...", ticket)
invalid_values = []
for v in ticket.ticket_values:
logging.debug("... value=%s", v)
is_valid = False
for rule_field_name in self.boundary_rules:
logging.debug("... rule=%s", rule_field_name)
if self.is_value_valid_for_rule(v, rule_field_name):
is_valid = True
break
if not is_valid:
logging.debug("... value=%s INVALID", v)
invalid_values.append(v)
return (not bool(invalid_values), invalid_values)
@dataclass
class TicketNotebook:
notes: list = field(default_factory=list)
nearby: list = field(default_factory=list)
validator: TicketValidator = field(default_factory=TicketValidator)
field_map: dict = field(init=False, default_factory=dict)
def __post_init__(self):
mode = 0
rules = {}
for line in self.notes:
if not line:
mode += 1
continue
if mode == 0:
ticket_field, ranges = line.split(": ")
boundaries = []
for raw_range in ranges.split(" or "):
incl_start, incl_end = raw_range.split("-")
boundaries.append((int(incl_start), int(incl_end) + 1))
rules[ticket_field] = boundaries
if mode == 1:
if line.startswith("your ticket"):
continue
self.my_ticket = Ticket([int(n) for n in line.split(",")])
if mode == 2:
if line.startswith("nearby"):
continue
self.nearby.append(Ticket([int(n) for n in line.split(",")]))
self.validator = TicketValidator(rules)
self._build_field_map()
def _build_field_map(self):
ticket_fields = list(self.validator.boundary_rules.keys())
possible_fields = [ticket_fields[:] for _ in range(len(ticket_fields))]
for valid_ticket in self.valid_nearby:
for ti, value in enumerate(valid_ticket.ticket_values):
for ticket_field in ticket_fields:
logging.debug(
"Checking if field [%s] can be valid for value [%s]...",
ticket_field,
value,
)
if not self.validator.is_value_valid_for_rule(value, ticket_field):
logging.debug("... field [%s] was INVALID ...", ticket_field)
logging.debug(
"... current possible fields: %s", possible_fields[ti]
)
if ticket_field in possible_fields[ti]:
possible_fields[ti].remove(ticket_field)
logging.debug("... removed [%s]", ticket_field)
logging.debug("All possible fields: %s", possible_fields)
while any(len(pf) > 1 for pf in possible_fields):
# First, find any fields that are the only possibility
identified_fields = []
for pf in filter(lambda pf: len(pf) == 1, possible_fields):
identified_fields.append(pf[0])
# Then, remove those from any others
for pfi, pf in enumerate(possible_fields):
if len(pf) == 1:
continue
for idf in identified_fields:
if idf in pf:
pf.remove(idf)
possible_fields[pfi] = pf
logging.debug("All possible fields after elimination: %s", possible_fields)
for i, field_names in enumerate(possible_fields):
assert len(field_names) == 1
self.field_map[field_names[0]] = i
def validate_nearby(self, n) -> bool:
return self.validator.is_valid(self.nearby[n])[0]
@property
def valid_nearby(self):
return list(filter(lambda nt: self.validator.is_valid(nt)[0], self.nearby))
@property
def nearby_error_rate(self) -> int:
rate = 0
for nt in self.nearby:
rate += sum(self.validator.is_valid(nt)[1])
return rate
def field_no(self, field_name) -> int:
return self.field_map[field_name]
@property
def departure_product(self) -> int:
product = 1
for field_name, field_value in self.field_map.items():
if field_name.startswith("departure"):
product *= self.my_ticket.ticket_values[field_value]
return product
if __name__ == "__main__":
puzzle = Puzzle(year=2020, day=16)
notes = puzzle.input_data.strip().splitlines()
ticket_notebook = TicketNotebook(notes)
puzzle.answer_a = ticket_notebook.nearby_error_rate
puzzle.answer_b = ticket_notebook.departure_product
| true |
5749862cbbecd9d122b587ec0e532df1bd0d664f | Python | noreallyimfine/Rubiks-Cube | /test_cube_mechanics.py | UTF-8 | 32,053 | 2.65625 | 3 | [
"MIT"
] | permissive | import unittest
from cube import RubiksCube
class CubeTurnTests(unittest.TestCase):
def setUp(self):
self.cube = RubiksCube()
def test_L_prime(self):
# Corners
top_back_left = self.cube.top_layer['back_left'].sides.copy()
top_front_left = self.cube.top_layer['front_left'].sides.copy()
bot_front_left = self.cube.bot_layer['front_left'].sides.copy()
bot_back_left = self.cube.bot_layer['back_left'].sides.copy()
# Edges
mid_back_left = self.cube.mid_layer['back_left'].sides.copy()
top_left_middle = self.cube.top_layer['left_middle'].sides.copy()
mid_front_left = self.cube.mid_layer['front_left'].sides.copy()
bot_left_middle = self.cube.bot_layer['left_middle'].sides.copy()
self.cube._L_prime()
self.assertEqual(self.cube.top_layer['back_left'].sides['back'], top_front_left['top'])
self.assertEqual(self.cube.top_layer['back_left'].sides['top'], top_front_left['front'])
self.assertEqual(self.cube.top_layer['back_left'].sides['left'], top_front_left['left'])
self.assertEqual(self.cube.top_layer['front_left'].sides['left'], bot_front_left['left'])
self.assertEqual(self.cube.top_layer['front_left'].sides['front'], bot_front_left['bottom'])
self.assertEqual(self.cube.top_layer['front_left'].sides['top'], bot_front_left['front'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['left'], bot_back_left['left'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['front'], bot_back_left['bottom'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['bottom'], bot_back_left['back'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['left'], top_back_left['left'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['back'], top_back_left['top'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['bottom'], top_back_left['back'])
self.assertEqual(self.cube.mid_layer['back_left'].sides['left'], top_left_middle['left'])
self.assertEqual(self.cube.mid_layer['back_left'].sides['back'], top_left_middle['top'])
self.assertEqual(self.cube.top_layer['left_middle'].sides['left'], mid_front_left['left'])
self.assertEqual(self.cube.top_layer['left_middle'].sides['top'], mid_front_left['front'])
self.assertEqual(self.cube.mid_layer['front_left'].sides['left'], bot_left_middle['left'])
self.assertEqual(self.cube.mid_layer['front_left'].sides['front'], bot_left_middle['bottom'])
self.assertEqual(self.cube.bot_layer['left_middle'].sides['left'], mid_back_left['left'])
self.assertEqual(self.cube.bot_layer['left_middle'].sides['bottom'], mid_back_left['back'])
def test_L(self):
# Corners
top_back_left = self.cube.top_layer['back_left'].sides.copy()
top_front_left = self.cube.top_layer['front_left'].sides.copy()
bot_front_left = self.cube.bot_layer['front_left'].sides.copy()
bot_back_left = self.cube.bot_layer['back_left'].sides.copy()
# Edges
mid_back_left = self.cube.mid_layer['back_left'].sides.copy()
top_left_middle = self.cube.top_layer['left_middle'].sides.copy()
mid_front_left = self.cube.mid_layer['front_left'].sides.copy()
bot_left_middle = self.cube.bot_layer['left_middle'].sides.copy()
self.cube._L()
self.assertEqual(self.cube.top_layer['back_left'].sides['back'], bot_back_left['bottom'])
self.assertEqual(self.cube.top_layer['back_left'].sides['top'], bot_back_left['back'])
self.assertEqual(self.cube.top_layer['back_left'].sides['left'], bot_back_left['left'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['left'], bot_front_left['left'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['bottom'], bot_front_left['front'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['back'], bot_front_left['bottom'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['left'], top_front_left['left'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['front'], top_front_left['top'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['bottom'], top_front_left['front'])
self.assertEqual(self.cube.top_layer['front_left'].sides['left'], top_back_left['left'])
self.assertEqual(self.cube.top_layer['front_left'].sides['top'], top_back_left['back'])
self.assertEqual(self.cube.top_layer['front_left'].sides['front'], top_back_left['top'])
self.assertEqual(self.cube.bot_layer['left_middle'].sides['left'], mid_front_left['left'])
self.assertEqual(self.cube.bot_layer['left_middle'].sides['bottom'], mid_front_left['front'])
self.assertEqual(self.cube.mid_layer['front_left'].sides['left'], top_left_middle['left'])
self.assertEqual(self.cube.mid_layer['front_left'].sides['front'], top_left_middle['top'])
self.assertEqual(self.cube.top_layer['left_middle'].sides['left'], mid_back_left['left'])
self.assertEqual(self.cube.top_layer['left_middle'].sides['top'], mid_back_left['back'])
self.assertEqual(self.cube.mid_layer['back_left'].sides['left'], bot_left_middle['left'])
self.assertEqual(self.cube.mid_layer['back_left'].sides['back'], bot_left_middle['bottom'])
def test_R(self):
# Corners
top_back_right = self.cube.top_layer['back_right'].sides.copy()
top_front_right = self.cube.top_layer['front_right'].sides.copy()
bot_front_right = self.cube.bot_layer['front_right'].sides.copy()
bot_back_right = self.cube.bot_layer['back_right'].sides.copy()
# Edges
mid_back_right = self.cube.mid_layer['back_right'].sides.copy()
top_right_middle = self.cube.top_layer['right_middle'].sides.copy()
mid_front_right = self.cube.mid_layer['front_right'].sides.copy()
bot_right_middle = self.cube.bot_layer['right_middle'].sides.copy()
self.cube._R()
self.assertEqual(self.cube.top_layer['back_right'].sides['top'], top_front_right['front'])
self.assertEqual(self.cube.top_layer['back_right'].sides['back'], top_front_right['top'])
self.assertEqual(self.cube.top_layer['back_right'].sides['right'], top_front_right['right'])
self.assertEqual(self.cube.top_layer['front_right'].sides['front'], bot_front_right['bottom'])
self.assertEqual(self.cube.top_layer['front_right'].sides['top'], bot_front_right['front'])
self.assertEqual(self.cube.top_layer['front_right'].sides['right'], bot_front_right['right'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['bottom'], bot_back_right['back'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['front'], bot_back_right['bottom'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['right'], bot_back_right['right'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['back'], top_back_right['top'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['bottom'], top_back_right['back'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['right'], top_back_right['right'])
self.assertEqual(self.cube.top_layer['right_middle'].sides['top'], mid_front_right['front'])
self.assertEqual(self.cube.top_layer['right_middle'].sides['right'], mid_front_right['right'])
self.assertEqual(self.cube.mid_layer['front_right'].sides['front'], bot_right_middle['bottom'])
self.assertEqual(self.cube.mid_layer['front_right'].sides['right'], bot_right_middle['right'])
self.assertEqual(self.cube.bot_layer['right_middle'].sides['bottom'], mid_back_right['back'])
self.assertEqual(self.cube.bot_layer['right_middle'].sides['right'], mid_back_right['right'])
self.assertEqual(self.cube.mid_layer['back_right'].sides['back'], top_right_middle['top'])
self.assertEqual(self.cube.mid_layer['back_right'].sides['right'], top_right_middle['right'])
def test_R_prime(self):
# Corners
top_back_right = self.cube.top_layer['back_right'].sides.copy()
top_front_right = self.cube.top_layer['front_right'].sides.copy()
bot_front_right = self.cube.bot_layer['front_right'].sides.copy()
bot_back_right = self.cube.bot_layer['back_right'].sides.copy()
# Edges
mid_back_right = self.cube.mid_layer['back_right'].sides.copy()
top_right_middle = self.cube.top_layer['right_middle'].sides.copy()
mid_front_right = self.cube.mid_layer['front_right'].sides.copy()
bot_right_middle = self.cube.bot_layer['right_middle'].sides.copy()
self.cube._R_prime()
self.assertEqual(self.cube.top_layer['back_right'].sides['top'], bot_back_right['back'])
self.assertEqual(self.cube.top_layer['back_right'].sides['back'], bot_back_right['bottom'])
self.assertEqual(self.cube.top_layer['back_right'].sides['right'], bot_back_right['right'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['bottom'], bot_front_right['front'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['back'], bot_front_right['bottom'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['right'], bot_front_right['right'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['bottom'], top_front_right['front'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['front'], top_front_right['top'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['right'], top_front_right['right'])
self.assertEqual(self.cube.top_layer['front_right'].sides['top'], top_back_right['back'])
self.assertEqual(self.cube.top_layer['front_right'].sides['front'], top_back_right['top'])
self.assertEqual(self.cube.top_layer['front_right'].sides['right'], top_back_right['right'])
self.assertEqual(self.cube.top_layer['right_middle'].sides['top'], mid_back_right['back'])
self.assertEqual(self.cube.top_layer['right_middle'].sides['right'], mid_back_right['right'])
self.assertEqual(self.cube.mid_layer['back_right'].sides['back'], bot_right_middle['bottom'])
self.assertEqual(self.cube.mid_layer['back_right'].sides['right'], bot_right_middle['right'])
self.assertEqual(self.cube.bot_layer['right_middle'].sides['bottom'], mid_front_right['front'])
self.assertEqual(self.cube.bot_layer['right_middle'].sides['right'], mid_front_right['right'])
self.assertEqual(self.cube.mid_layer['front_right'].sides['front'], top_right_middle['top'])
self.assertEqual(self.cube.mid_layer['front_right'].sides['right'], top_right_middle['right'])
def test_F(self):
top_front_right = self.cube.top_layer['front_right'].sides.copy()
top_front_left = self.cube.top_layer['front_left'].sides.copy()
bot_front_left = self.cube.bot_layer['front_left'].sides.copy()
bot_front_right = self.cube.bot_layer['front_right'].sides.copy()
top_front_middle = self.cube.top_layer['front_middle'].sides.copy()
mid_front_left = self.cube.mid_layer['front_left'].sides.copy()
bot_front_middle = self.cube.bot_layer['front_middle'].sides.copy()
mid_front_right = self.cube.mid_layer['front_right'].sides.copy()
self.cube._F()
self.assertEqual(self.cube.top_layer['front_right'].sides['right'], top_front_left['top'])
self.assertEqual(self.cube.top_layer['front_right'].sides['top'], top_front_left['left'])
self.assertEqual(self.cube.top_layer['front_right'].sides['front'], top_front_left['front'])
self.assertEqual(self.cube.top_layer['front_left'].sides['top'], bot_front_left['left'])
self.assertEqual(self.cube.top_layer['front_left'].sides['left'], bot_front_left['bottom'])
self.assertEqual(self.cube.top_layer['front_left'].sides['front'], bot_front_left['front'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['left'], bot_front_right['bottom'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['bottom'], bot_front_right['right'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['front'], bot_front_right['front'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['bottom'], top_front_right['right'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['right'], top_front_right['top'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['front'], top_front_right['front'])
self.assertEqual(self.cube.top_layer['front_middle'].sides['top'], mid_front_left['left'])
self.assertEqual(self.cube.top_layer['front_middle'].sides['front'], mid_front_left['front'])
self.assertEqual(self.cube.mid_layer['front_left'].sides['left'], bot_front_middle['bottom'])
self.assertEqual(self.cube.mid_layer['front_left'].sides['front'], bot_front_middle['front'])
self.assertEqual(self.cube.bot_layer['front_middle'].sides['bottom'], mid_front_right['right'])
self.assertEqual(self.cube.bot_layer['front_middle'].sides['front'], mid_front_right['front'])
self.assertEqual(self.cube.mid_layer['front_right'].sides['right'], top_front_middle['top'])
self.assertEqual(self.cube.mid_layer['front_right'].sides['front'], top_front_middle['front'])
def test_F_prime(self):
top_front_right = self.cube.top_layer['front_right'].sides.copy()
top_front_left = self.cube.top_layer['front_left'].sides.copy()
bot_front_left = self.cube.bot_layer['front_left'].sides.copy()
bot_front_right = self.cube.bot_layer['front_right'].sides.copy()
top_front_middle = self.cube.top_layer['front_middle'].sides.copy()
mid_front_left = self.cube.mid_layer['front_left'].sides.copy()
bot_front_middle = self.cube.bot_layer['front_middle'].sides.copy()
mid_front_right = self.cube.mid_layer['front_right'].sides.copy()
self.cube._F_prime()
self.assertEqual(self.cube.top_layer['front_left'].sides['left'], top_front_right['top'])
self.assertEqual(self.cube.top_layer['front_left'].sides['top'], top_front_right['right'])
self.assertEqual(self.cube.top_layer['front_left'].sides['front'], top_front_right['front'])
self.assertEqual(self.cube.top_layer['front_right'].sides['top'], bot_front_right['right'])
self.assertEqual(self.cube.top_layer['front_right'].sides['right'], bot_front_right['bottom'])
self.assertEqual(self.cube.top_layer['front_right'].sides['front'], bot_front_right['front'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['bottom'], bot_front_left['left'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['right'], bot_front_left['bottom'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['front'], bot_front_left['front'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['bottom'], top_front_left['left'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['left'], top_front_left['top'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['front'], top_front_left['front'])
self.assertEqual(self.cube.top_layer['front_middle'].sides['top'], mid_front_right['right'])
self.assertEqual(self.cube.top_layer['front_middle'].sides['front'], mid_front_right['front'])
self.assertEqual(self.cube.mid_layer['front_right'].sides['right'], bot_front_middle['bottom'])
self.assertEqual(self.cube.mid_layer['front_right'].sides['front'], bot_front_middle['front'])
self.assertEqual(self.cube.bot_layer['front_middle'].sides['bottom'], mid_front_left['left'])
self.assertEqual(self.cube.bot_layer['front_middle'].sides['front'], mid_front_left['front'])
self.assertEqual(self.cube.mid_layer['front_left'].sides['left'], top_front_middle['top'])
self.assertEqual(self.cube.mid_layer['front_left'].sides['front'], top_front_middle['front'])
def test_B(self):
top_back_right = self.cube.top_layer['back_right'].sides.copy()
bot_back_right = self.cube.bot_layer['back_right'].sides.copy()
bot_back_left = self.cube.bot_layer['back_left'].sides.copy()
top_back_left = self.cube.top_layer['back_left'].sides.copy()
top_back_middle = self.cube.top_layer['back_middle'].sides.copy()
mid_back_right = self.cube.mid_layer['back_right'].sides.copy()
bot_back_middle = self.cube.bot_layer['back_middle'].sides.copy()
mid_back_left = self.cube.mid_layer['back_left'].sides.copy()
self.cube._B()
self.assertEqual(self.cube.top_layer['back_right'].sides['top'], bot_back_right['right'])
self.assertEqual(self.cube.top_layer['back_right'].sides['right'], bot_back_right['bottom'])
self.assertEqual(self.cube.top_layer['back_right'].sides['back'], bot_back_right['back'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['right'], bot_back_left['bottom'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['bottom'], bot_back_left['left'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['back'], bot_back_left['back'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['bottom'], top_back_left['left'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['left'], top_back_left['top'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['back'], top_back_left['back'])
self.assertEqual(self.cube.top_layer['back_left'].sides['left'], top_back_right['top'])
self.assertEqual(self.cube.top_layer['back_left'].sides['top'], top_back_right['right'])
self.assertEqual(self.cube.top_layer['back_left'].sides['back'], top_back_right['back'])
self.assertEqual(self.cube.top_layer['back_middle'].sides['top'], mid_back_right['right'])
self.assertEqual(self.cube.top_layer['back_middle'].sides['back'], mid_back_right['back'])
self.assertEqual(self.cube.mid_layer['back_right'].sides['right'], bot_back_middle['bottom'])
self.assertEqual(self.cube.mid_layer['back_right'].sides['back'], bot_back_middle['back'])
self.assertEqual(self.cube.bot_layer['back_middle'].sides['bottom'], mid_back_left['left'])
self.assertEqual(self.cube.bot_layer['back_middle'].sides['back'], mid_back_left['back'])
self.assertEqual(self.cube.mid_layer['back_left'].sides['left'], top_back_middle['top'])
self.assertEqual(self.cube.mid_layer['back_left'].sides['back'], top_back_middle['back'])
def test_B_prime(self):
top_back_right = self.cube.top_layer['back_right'].sides.copy()
bot_back_right = self.cube.bot_layer['back_right'].sides.copy()
bot_back_left = self.cube.bot_layer['back_left'].sides.copy()
top_back_left = self.cube.top_layer['back_left'].sides.copy()
top_back_middle = self.cube.top_layer['back_middle'].sides.copy()
mid_back_right = self.cube.mid_layer['back_right'].sides.copy()
bot_back_middle = self.cube.bot_layer['back_middle'].sides.copy()
mid_back_left = self.cube.mid_layer['back_left'].sides.copy()
self.cube._B_prime()
self.assertEqual(self.cube.top_layer['back_right'].sides['right'], top_back_left['top'])
self.assertEqual(self.cube.top_layer['back_right'].sides['top'], top_back_left['left'])
self.assertEqual(self.cube.top_layer['back_right'].sides['back'], top_back_left['back'])
self.assertEqual(self.cube.top_layer['back_left'].sides['top'], bot_back_left['left'])
self.assertEqual(self.cube.top_layer['back_left'].sides['left'], bot_back_left['bottom'])
self.assertEqual(self.cube.top_layer['back_left'].sides['back'], bot_back_left['back'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['left'], bot_back_right['bottom'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['bottom'], bot_back_right['right'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['back'], bot_back_right['back'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['bottom'], top_back_right['right'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['right'], top_back_right['top'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['back'], top_back_right['back'])
self.assertEqual(self.cube.top_layer['back_middle'].sides['top'], mid_back_left['left'])
self.assertEqual(self.cube.top_layer['back_middle'].sides['back'], mid_back_left['back'])
self.assertEqual(self.cube.mid_layer['back_left'].sides['left'], bot_back_middle['bottom'])
self.assertEqual(self.cube.mid_layer['back_left'].sides['back'], bot_back_middle['back'])
self.assertEqual(self.cube.bot_layer['back_middle'].sides['bottom'], mid_back_right['right'])
self.assertEqual(self.cube.bot_layer['back_middle'].sides['back'], mid_back_right['back'])
self.assertEqual(self.cube.mid_layer['back_right'].sides['right'], top_back_middle['top'])
self.assertEqual(self.cube.mid_layer['back_right'].sides['back'], top_back_middle['back'])
def test_U(self):
top_front_right = self.cube.top_layer['front_right'].sides.copy()
top_back_right = self.cube.top_layer['back_right'].sides.copy()
top_back_left = self.cube.top_layer['back_left'].sides.copy()
top_front_left = self.cube.top_layer['front_left'].sides.copy()
top_front_middle = self.cube.top_layer['front_middle'].sides.copy()
top_right_middle = self.cube.top_layer['right_middle'].sides.copy()
top_back_middle = self.cube.top_layer['back_middle'].sides.copy()
top_left_middle = self.cube.top_layer['left_middle'].sides.copy()
self.cube._U()
self.assertEqual(self.cube.top_layer['front_right'].sides['front'], top_back_right['right'])
self.assertEqual(self.cube.top_layer['front_right'].sides['right'], top_back_right['back'])
self.assertEqual(self.cube.top_layer['front_right'].sides['top'], top_back_right['top'])
self.assertEqual(self.cube.top_layer['back_right'].sides['right'], top_back_left['back'])
self.assertEqual(self.cube.top_layer['back_right'].sides['back'], top_back_left['left'])
self.assertEqual(self.cube.top_layer['back_right'].sides['top'], top_back_left['top'])
self.assertEqual(self.cube.top_layer['back_left'].sides['back'], top_front_left['left'])
self.assertEqual(self.cube.top_layer['back_left'].sides['left'], top_front_left['front'])
self.assertEqual(self.cube.top_layer['back_left'].sides['top'], top_front_left['top'])
self.assertEqual(self.cube.top_layer['front_left'].sides['left'], top_front_right['front'])
self.assertEqual(self.cube.top_layer['front_left'].sides['front'], top_front_right['right'])
self.assertEqual(self.cube.top_layer['front_left'].sides['top'], top_front_right['top'])
self.assertEqual(self.cube.top_layer['front_middle'].sides['front'], top_right_middle['right'])
self.assertEqual(self.cube.top_layer['front_middle'].sides['top'], top_right_middle['top'])
self.assertEqual(self.cube.top_layer['right_middle'].sides['right'], top_back_middle['back'])
self.assertEqual(self.cube.top_layer['right_middle'].sides['top'], top_back_middle['top'])
self.assertEqual(self.cube.top_layer['back_middle'].sides['back'], top_left_middle['left'])
self.assertEqual(self.cube.top_layer['back_middle'].sides['top'], top_left_middle['top'])
self.assertEqual(self.cube.top_layer['left_middle'].sides['left'], top_front_middle['front'])
self.assertEqual(self.cube.top_layer['left_middle'].sides['top'], top_front_middle['top'])
def test_U_prime(self):
top_front_right = self.cube.top_layer['front_right'].sides.copy()
top_back_right = self.cube.top_layer['back_right'].sides.copy()
top_back_left = self.cube.top_layer['back_left'].sides.copy()
top_front_left = self.cube.top_layer['front_left'].sides.copy()
top_front_middle = self.cube.top_layer['front_middle'].sides.copy()
top_right_middle = self.cube.top_layer['right_middle'].sides.copy()
top_back_middle = self.cube.top_layer['back_middle'].sides.copy()
top_left_middle = self.cube.top_layer['left_middle'].sides.copy()
self.cube._U_prime()
self.assertEqual(self.cube.top_layer['front_right'].sides['right'], top_front_left['front'])
self.assertEqual(self.cube.top_layer['front_right'].sides['front'], top_front_left['left'])
self.assertEqual(self.cube.top_layer['front_right'].sides['top'], top_front_left['top'])
self.assertEqual(self.cube.top_layer['front_left'].sides['front'], top_back_left['left'])
self.assertEqual(self.cube.top_layer['front_left'].sides['left'], top_back_left['back'])
self.assertEqual(self.cube.top_layer['front_left'].sides['top'], top_back_left['top'])
self.assertEqual(self.cube.top_layer['back_left'].sides['left'], top_back_right['back'])
self.assertEqual(self.cube.top_layer['back_left'].sides['back'], top_back_right['right'])
self.assertEqual(self.cube.top_layer['back_left'].sides['top'], top_back_right['top'])
self.assertEqual(self.cube.top_layer['back_right'].sides['back'], top_front_right['right'])
self.assertEqual(self.cube.top_layer['back_right'].sides['right'], top_front_right['front'])
self.assertEqual(self.cube.top_layer['back_right'].sides['top'], top_front_right['top'])
self.assertEqual(self.cube.top_layer['front_middle'].sides['front'], top_left_middle['left'])
self.assertEqual(self.cube.top_layer['front_middle'].sides['top'], top_left_middle['top'])
self.assertEqual(self.cube.top_layer['left_middle'].sides['left'], top_back_middle['back'])
self.assertEqual(self.cube.top_layer['left_middle'].sides['top'], top_back_middle['top'])
self.assertEqual(self.cube.top_layer['back_middle'].sides['back'], top_right_middle['right'])
self.assertEqual(self.cube.top_layer['back_middle'].sides['top'], top_right_middle['top'])
self.assertEqual(self.cube.top_layer['right_middle'].sides['right'], top_front_middle['front'])
self.assertEqual(self.cube.top_layer['right_middle'].sides['top'], top_front_middle['top'])
def test_D(self):
bot_front_right = self.cube.bot_layer['front_right'].sides.copy()
bot_front_left = self.cube.bot_layer['front_left'].sides.copy()
bot_back_left = self.cube.bot_layer['back_left'].sides.copy()
bot_back_right = self.cube.bot_layer['back_right'].sides.copy()
bot_front_middle = self.cube.bot_layer['front_middle'].sides.copy()
bot_left_middle = self.cube.bot_layer['left_middle'].sides.copy()
bot_back_middle = self.cube.bot_layer['back_middle'].sides.copy()
bot_right_middle = self.cube.bot_layer['right_middle'].sides.copy()
self.cube._D()
self.assertEqual(self.cube.bot_layer['front_right'].sides['right'], bot_front_left['front'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['front'], bot_front_left['left'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['bottom'], bot_front_left['bottom'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['front'], bot_back_left['left'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['left'], bot_back_left['back'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['bottom'], bot_back_left['bottom'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['left'], bot_back_right['back'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['back'], bot_back_right['right'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['bottom'], bot_back_right['bottom'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['back'], bot_front_right['right'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['right'], bot_front_right['front'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['bottom'], bot_front_right['bottom'])
self.assertEqual(self.cube.bot_layer['front_middle'].sides['front'], bot_left_middle['left'])
self.assertEqual(self.cube.bot_layer['front_middle'].sides['bottom'], bot_left_middle['bottom'])
self.assertEqual(self.cube.bot_layer['left_middle'].sides['left'], bot_back_middle['back'])
self.assertEqual(self.cube.bot_layer['left_middle'].sides['bottom'], bot_back_middle['bottom'])
self.assertEqual(self.cube.bot_layer['back_middle'].sides['back'], bot_right_middle['right'])
self.assertEqual(self.cube.bot_layer['back_middle'].sides['bottom'], bot_right_middle['bottom'])
self.assertEqual(self.cube.bot_layer['right_middle'].sides['right'], bot_front_middle['front'])
self.assertEqual(self.cube.bot_layer['right_middle'].sides['bottom'], bot_front_middle['bottom'])
def test_D_prime(self):
bot_front_right = self.cube.bot_layer['front_right'].sides.copy()
bot_front_left = self.cube.bot_layer['front_left'].sides.copy()
bot_back_left = self.cube.bot_layer['back_left'].sides.copy()
bot_back_right = self.cube.bot_layer['back_right'].sides.copy()
bot_front_middle = self.cube.bot_layer['front_middle'].sides.copy()
bot_left_middle = self.cube.bot_layer['left_middle'].sides.copy()
bot_back_middle = self.cube.bot_layer['back_middle'].sides.copy()
bot_right_middle = self.cube.bot_layer['right_middle'].sides.copy()
self.cube._D_prime()
self.assertEqual(self.cube.bot_layer['front_right'].sides['front'], bot_back_right['right'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['right'], bot_back_right['back'])
self.assertEqual(self.cube.bot_layer['front_right'].sides['bottom'], bot_back_right['bottom'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['right'], bot_back_left['back'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['back'], bot_back_left['left'])
self.assertEqual(self.cube.bot_layer['back_right'].sides['bottom'], bot_back_left['bottom'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['back'], bot_front_left['left'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['left'], bot_front_left['front'])
self.assertEqual(self.cube.bot_layer['back_left'].sides['bottom'], bot_front_left['bottom'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['left'], bot_front_right['front'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['front'], bot_front_right['right'])
self.assertEqual(self.cube.bot_layer['front_left'].sides['bottom'], bot_front_right['bottom'])
self.assertEqual(self.cube.bot_layer['front_middle'].sides['front'], bot_right_middle['right'])
self.assertEqual(self.cube.bot_layer['front_middle'].sides['bottom'], bot_right_middle['bottom'])
self.assertEqual(self.cube.bot_layer['right_middle'].sides['right'], bot_back_middle['back'])
self.assertEqual(self.cube.bot_layer['right_middle'].sides['bottom'], bot_back_middle['bottom'])
self.assertEqual(self.cube.bot_layer['back_middle'].sides['back'], bot_left_middle['left'])
self.assertEqual(self.cube.bot_layer['back_middle'].sides['bottom'], bot_left_middle['bottom'])
self.assertEqual(self.cube.bot_layer['left_middle'].sides['left'], bot_front_middle['front'])
self.assertEqual(self.cube.bot_layer['left_middle'].sides['bottom'], bot_front_middle['bottom'])
if __name__ == "__main__":
unittest.main() | true |
2b5a3a95890db38dd2662278d72dfc1accd729ee | Python | sanchitkalra/Classes | /conditions_and_loops/odd_and_even_sum.py | UTF-8 | 303 | 3.359375 | 3 | [] | no_license | num = int(input())
count = 0
n_str = str(num)
odd_sum = 0
even_sum = 0
for i in n_str:
count+=1
i = 0
while i < count:
y = int(n_str[i])
if y%2 == 0:
even_sum += int(n_str[i])
else:
odd_sum += int(n_str[i])
i += 1
print(even_sum, " ", odd_sum) | true |
f17e02e0c0d3988e05c6ed351726fb3e260a862b | Python | shaswata56/RsaCtfTool | /_primefac/_factor_algo/_mpqs.py | UTF-8 | 10,407 | 3.140625 | 3 | [
"Beerware"
] | permissive | from __future__ import division
# Multiple Polynomial Quadratic Sieve
# Most of this function is copied verbatim from
# https://codegolf.stackexchange.com/questions/8629/9088#9088
def mpqs(n):
"""
When the bound proves insufficiently large, we throw out all our work and
start over.
TODO: When this happens, get more data, but don't trash what we already
have.
TODO: Rewrite to get a few more relations before proceeding to the
linear algebra.
TODO: When we need to increase the bound, what is the optimal increment?
"""
from _primefac._arith import ispower, isqrt, ilog, gcd, mod_sqrt, legendre
from _primefac._arith import modinv
from _primefac._prime import isprime, nextprime
from _primefac._util import listprod, mpz
from six.moves import xrange
from math import log
# Special cases: this function poorly handles primes and perfect powers:
m = ispower(n)
if m:
return m
if isprime(n):
return n
root_2n = isqrt(2 * n)
bound = ilog(n ** 6, 10) ** 2 # formula chosen by experiment
while True:
try:
prime, mod_root, log_p, num_prime = [], [], [], 0
# find a number of small primes for which n is a quadratic residue
p = 2
while p < bound or num_prime < 3:
leg = legendre(n % p, p)
if leg == 1:
prime += [p]
# the rhs was [int(mod_sqrt(n, p))].
# If we get errors, put it back.
mod_root += [mod_sqrt(n, p)]
log_p += [log(p, 10)]
num_prime += 1
elif leg == 0:
return p
p = nextprime(p)
x_max = len(prime) * 60 # size of the sieve
# maximum value on the sieved range
m_val = (x_max * root_2n) >> 1
"""
fudging the threshold down a bit makes it easier to find powers of
primes as factors as well as partial-partial relationships, but it
also makes the smoothness check slower. there's a happy medium
somewhere, depending on how efficient the smoothness check is
"""
thresh = log(m_val, 10) * 0.735
# skip small primes. they contribute very little to the log sum
# and add a lot of unnecessary entries to the table instead, fudge
# the threshold down a bit, assuming ~1/4 of them pass
min_prime = mpz(thresh * 3)
fudge = sum(log_p[i] for i, p in enumerate(prime) if p < min_prime)
fudge = fudge // 4
thresh -= fudge
smooth, used_prime, partial = [], set(), {}
num_smooth, num_used_prime, num_partial = 0, 0, 0
num_poly, root_A = 0, isqrt(root_2n // x_max)
while num_smooth <= num_used_prime:
# find an integer value A such that:
# A is =~ sqrt(2*n) // x_max
# A is a perfect square
# sqrt(A) is prime, and n is a quadratic residue mod sqrt(A)
while True:
root_A = nextprime(root_A)
leg = legendre(n, root_A)
if leg == 1:
break
elif leg == 0:
return root_A
A = root_A ** 2
# solve for an adequate B. B*B is a quadratic residue mod n,
# such that B*B-A*C = n. this is unsolvable if n is not a
# quadratic residue mod sqrt(A)
b = mod_sqrt(n, root_A)
B = (b + (n - b * b) * modinv(b + b, root_A)) % A
C = (B * B - n) // A # B*B-A*C = n <=> C = (B*B-n)//A
num_poly += 1
# sieve for prime factors
sums, i = [0.0] * (2 * x_max), 0
for p in prime:
if p < min_prime:
i += 1
continue
logp = log_p[i]
g = gcd(A, p)
if g == p:
continue
inv_A = modinv(A // g, p // g) * g
# modular root of the quadratic
a, b, k = (
mpz(((mod_root[i] - B) * inv_A) % p),
mpz(((p - mod_root[i] - B) * inv_A) % p),
0,
)
while k < x_max:
if k + a < x_max:
sums[k + a] += logp
if k + b < x_max:
sums[k + b] += logp
if k:
sums[k - a + x_max] += logp
sums[k - b + x_max] += logp
k += p
i += 1
# check for smooths
i = 0
for v in sums:
if v > thresh:
x, vec, sqr = x_max - i if i > x_max else i, set(), []
# because B*B-n = A*C
# (A*x+B)^2 - n = A*A*x*x+2*A*B*x + B*B - n
# = A*(A*x*x+2*B*x+C)
# gives the congruency
# (A*x+B)^2 = A*(A*x*x+2*B*x+C) (mod n)
# because A is chosen to be square, it doesn't
# need to be sieved
sieve_val = (A * x + 2 * B) * x + C
if sieve_val < 0:
vec, sieve_val = {-1}, -sieve_val
for p in prime:
while sieve_val % p == 0:
if p in vec:
"""
track perfect sqr facs to avoid sqrting
something huge at the end
"""
sqr += [p]
vec ^= {p}
sieve_val = mpz(sieve_val // p)
if sieve_val == 1: # smooth
smooth += [(vec, (sqr, (A * x + B), root_A))]
used_prime |= vec
elif sieve_val in partial:
"""
combine two partials to make a (xor) smooth that
is, every prime factor with an odd power is in our
factor base
"""
pair_vec, pair_vals = partial[sieve_val]
sqr += list(vec & pair_vec) + [sieve_val]
vec ^= pair_vec
smooth += [
(
vec,
(
sqr + pair_vals[0],
(A * x + B) * pair_vals[1],
root_A * pair_vals[2],
),
)
]
used_prime |= vec
num_partial += 1
else:
# save partial for later pairing
partial[sieve_val] = (vec, (sqr, A * x + B, root_A))
i += 1
num_smooth, num_used_prime = len(smooth), len(used_prime)
used_prime = sorted(list(used_prime))
# set up bit fields for gaussian elimination
masks, mask, bitfields = [], 1, [0] * num_used_prime
for vec, _ in smooth:
masks += [mask]
i = 0
for p in used_prime:
if p in vec:
bitfields[i] |= mask
i += 1
mask <<= 1
# row echelon form
offset = 0
null_cols = []
for col in xrange(num_smooth):
pivot = (
bitfields[col - offset] & masks[col] == 0
) # This occasionally throws IndexErrors.
# TODO: figure out why it throws errors and fix it.
for row in xrange(col + 1 - offset, num_used_prime):
if bitfields[row] & masks[col]:
if pivot:
bitfields[col - offset], bitfields[row], pivot = (
bitfields[row],
bitfields[col - offset],
False,
)
else:
bitfields[row] ^= bitfields[col - offset]
if pivot:
null_cols += [col]
offset += 1
# reduced row echelon form
for row in xrange(num_used_prime):
mask = bitfields[row] & -bitfields[row] # lowest set bit
for up_row in xrange(row):
if bitfields[up_row] & mask:
bitfields[up_row] ^= bitfields[row]
# check for non-trivial congruencies
# TODO: if none exist, check combinations of null space columns...
# if _still_ none exist, sieve more values
for col in null_cols:
all_vec, (lh, rh, rA) = smooth[col]
lhs = lh # sieved values (left hand side)
rhs = [rh] # sieved values - n (right hand side)
rAs = [rA] # root_As (cofactor of lhs)
i = 0
for field in bitfields:
if field & masks[col]:
vec, (lh, rh, rA) = smooth[i]
lhs += list(all_vec & vec) + lh
all_vec ^= vec
rhs += [rh]
rAs += [rA]
i += 1
factor = gcd(listprod(rAs) * listprod(lhs) - listprod(rhs), n)
if 1 < factor < n:
return factor
except IndexError:
pass
bound *= 1.2
__all__ = [mpqs]
| true |
a35a96e242a5011d9a7eaa6db0cfc7c8070bb47b | Python | Hugo-cruz/birdie-ps-webcrawler | /main.py | UTF-8 | 588 | 2.625 | 3 | [] | no_license | from selenium import webdriver
import json
import crawler_functions as crawler
import utils as utils
refrigerator_page = 'https://www.lowes.com/c/Refrigerators-Appliances'
if __name__ == "__main__":
subcategory_pages = crawler.get_subcategory_pages(refrigerator_page)
print(subcategory_pages)
products = []
for subcategory in subcategory_pages:
utils.merge_lists(products,crawler.get_products_from_subcategory_page(subcategory))
product_list = []
for product in products:
product_list.append(crawler.get_product_info(product))
| true |
91b0b64da1bc54e2cceb4af15772bc7d14a58ac8 | Python | bendell02/nowCoder | /03_kaoyan/016_n_factorial.py | UTF-8 | 111 | 3.234375 | 3 | [] | no_license | import math
while True:
try:
N = input()
print math.factorial(N)
except:
break
| true |
c3c72c9029be0e30d1b3a130e788aea9420be69b | Python | curieshicy/My_Utilities_Code | /Grokking_the_Coding_Interviews/p77_frequency_sort.py | UTF-8 | 1,031 | 3.796875 | 4 | [] | no_license | import heapq
from collections import OrderedDict, defaultdict
def sort_character_by_frequency(str):
od = OrderedDict()
for ch in str:
if ch in od:
od[ch] += 1
else:
od[ch] = 1
ans = ''
freq_ch = [(freq, ch) for ch, freq in od.items()]
freq_ch.sort(key = lambda t: t[0], reverse = True)
for freq, ch in freq_ch:
ans += ch * freq
return ans
def sort_character_by_frequency(str):
d = defaultdict(int)
for ch in str:
d[ch] += 1
max_heap = []
for ch, freq in d.items():
heapq.heappush(max_heap, (-freq, ch))
ans = ''
while max_heap:
freq, ch = heapq.heappop(max_heap)
ans += ch * (-freq)
return ans
def main():
print("String after sorting characters by frequency: " +
sort_character_by_frequency("Programming"))
print("String after sorting characters by frequency: " +
sort_character_by_frequency("abcbab"))
main()
| true |
e34d3c227237ea271c03e7d44a98beeda05bf954 | Python | pbp1992/wear-detection-using-conv_nets | /core/model.py | UTF-8 | 2,208 | 2.78125 | 3 | [] | no_license | import torch
import torch.nn as nn
def Conv2d(in_filters, out_filters, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)):
return nn.Conv2d(in_filters, out_filters, kernel_size=kernel_size, stride=stride, padding=padding)
def Max2d(kernel_size=(2, 2), stride=2):
return nn.MaxPool2d(kernel_size=kernel_size, stride=stride)
def Upsample(scale=2):
return nn.Upsample(scale_factor=scale, mode='nearest')
def UpConv2d(in_filters, out_filters, kernel_size=(3, 3), stride=(2, 2)):
"""Upsample the image by a scale of 2"""
return nn.ConvTranspose2d(in_filters, out_filters, kernel_size=kernel_size, stride=stride, padding=(1, 1),
output_padding=(1, 1))
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.features = nn.Sequential(
Conv2d(3, 16),
nn.BatchNorm3d(16),
Max2d(), # by 2
Conv2d(16, 32),
nn.BatchNorm3d(32),
Max2d(), # by 4
Conv2d(32, 64),
nn.BatchNorm3d(64),
Max2d(), # by 8
Conv2d(64, 64),
nn.BatchNorm3d(64),
Max2d(), # by 16
nn.ReLU(),
Conv2d(64, 64),
nn.BatchNorm3d(64),
nn.ReLU(),
Upsample(), # * 2
Conv2d(64, 64),
nn.BatchNorm3d(64),
Upsample(), # * 4
Conv2d(64, 32),
nn.BatchNorm3d(32),
Upsample(), # * 8
Conv2d(32, 16),
nn.BatchNorm3d(16),
Upsample(), # * 16
)
self.further_processing = nn.Sequential(
# input is assumed to be four channels,
# processed from features and further input
Conv2d(19, 32),
nn.BatchNorm3d(32),
nn.ReLU(),
Conv2d(32, 32),
nn.BatchNorm3d(32),
nn.ReLU(),
Conv2d(32, 1),
)
# size would now be (h/16,w/16)
def forward(self, in_):
#
out_ = self.features(in_)
out_ = torch.cat((out_, in_), 1)
out_ = self.further_processing(out_)
return out_.squeeze()
| true |
e3c4ef8519ae1a57dbf4947024ac4fd72b582790 | Python | gebn/wood | /wood/invalidate.py | UTF-8 | 1,362 | 3.140625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from typing import Iterator
import abc
from wood.comparison import Comparison
from wood.entities import Entity
class Invalidator(metaclass=abc.ABCMeta):
"""
Implemented by things that know how to invalidate the changes in a
comparison.
"""
def invalidate(self, comparison: Comparison[Entity, Entity]) -> None:
"""
Invalidate as necessitated by a comparison.
:param comparison: The comparison whose changes to invalidate.
"""
raise NotImplementedError()
class PrefixInvalidator(Invalidator, metaclass=abc.ABCMeta):
"""
Implemented by things that can invalidate a regex-like prefix suffixed with
an asterisk. This allows a more efficient (and cheaper) invalidation by
grouping paths together where possible.
"""
def invalidate(self, comparison: Comparison[Entity, Entity]) -> None:
self._invalidate_prefixes(comparison.invalidations())
@abc.abstractmethod
def _invalidate_prefixes(self, prefixes: Iterator[str]) -> None:
"""
Purge a collection of prefixes.
:param prefixes: The collection of prefixes to purge. These will
resemble relative paths that may or may not end with
an asterisk, meaning a wildcard.
"""
raise NotImplementedError()
| true |
daa5ac601b64dc62f8f339151adf17452c74f8d6 | Python | metanoia1989/PythonStudy | /DesignPattern/09_责任链模式/bad_call.py | UTF-8 | 927 | 3.671875 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
糟糕的调用演示
"""
def function_1(in_string):
print("function_1")
return "".join([x for x in in_string if x != '1'])
def function_2(in_string):
print("function_2")
return "".join([x for x in in_string if x != '2'])
def function_3(in_string):
print("function_3")
return "".join([x for x in in_string if x != '3'])
def function_4(in_string):
print("function_4")
return "".join([x for x in in_string if x != '4'])
def main_function(input_string):
if '1' in input_string:
input_string = function_1(input_string)
if '2' in input_string:
input_string = function_2(input_string)
if '3' in input_string:
input_string = function_3(input_string)
if '4' in input_string:
input_string = function_4(input_string)
print(input_string)
if __name__ == "__main__":
main_function("1221345439") | true |
b54a799cfd40c627f4442726cafa15a96e35c528 | Python | RahulMarathe94/Fall-2018-ECE-478-578-Robotics1-TurtleBot_Project2 | /Project Files/Robot Theatre/newton.py | UTF-8 | 820 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
'''
says a line if the line exists for the robot.
Then publishes increment to indicate its done.
'''
import rospy
import pygame
import os
import time
from std_msgs.msg import Int32
def lineCallback(data):
line = data.data
audio_file = "/home/turtle1/catkin_ws/src/project2_play/scripts/lines/act1/"+ str(line) +".wav"
if(os.path.isfile(audio_file)):
pygame.mixer.init(44100, -16, 2, 2048)
pygame.mixer.init()
audio_play = pygame.mixer.Sound(audio_file)
free_channel = pygame.mixer.find_channel()
playing = audio_play.play()
while playing.get_busy():
pygame.time.wait(50)
time.sleep(2.5)
increment.publish(line)
return
rospy.init_node("Newton")
increment = rospy.Publisher('/increment', Int32, queue_size=1)
rospy.Subscriber("/lines",Int32,lineCallback)
rospy.spin()
| true |
8ddafa4ab2aa831bfec272f9c5b853a0c1bb5e6b | Python | RalphMul/DataScientistCourseDS | /zelfstudie/Exercise_List4.py | UTF-8 | 1,569 | 4.3125 | 4 | [] | no_license | """
Autor: Ralph Mul
File name: Exercise_List4
Info: This exersise is created based on the Python Data Structure Exercise for Beginners assignment as stated in
https://pynative.com/python-data-structure-exercise-for-beginners/
Date: 17-08-2020
Version 0.1
assignment:
Given a list iterate it and count the occurrence of each element
and create a dictionary to show the count of each element
Expected output:
Original list [11, 45, 8, 11, 23, 45, 23, 45, 89]
Printing count of each item {11: 2, 45: 3, 8: 1, 23: 2, 89: 1}
"""
"""
originalList = [11, 45, 8, 11, 23, 45, 23, 45, 89]
count = 0
indexPlace = 0
for value in originalList:
index = originalList[indexPlace]
print('ik ben voor de if')
print("value: " + str(value))
print("index: " + str(index))
print()
if value == index:
count = count + 1
print("ik ben na de if")
print("count: " + str(count))
print()
count = 0
"""
#original solution
sampleList = [11, 45, 8, 11, 23, 45, 23, 45, 89]
print("Original list ", sampleList)
countDict = dict()
for item in sampleList:
print("ik ben voor de if")
print("item: " + str(item))
print("countDict: " + str(countDict))
if(item in countDict):
countDict[item] += 1
print("ik ben in de if")
print("item: " + str(item))
print("countDict: " + str(countDict))
else:
print("ik ben in de else")
countDict[item] = 1
print("item: " + str(item))
print("countDict: " + str(countDict))
print("Printing count of each item ",countDict)
| true |
664cec4d35d9fff6ff4917ad37a97150b37f5d7c | Python | mikasiddiqui/Python | /Euler/euler 39.py | UTF-8 | 748 | 3.3125 | 3 | [] | no_license | def rightAngle():
count = 0
number = 0
for p in range(900,1001):
numbers = []
for partition in partitionSum(p,3):
pythagoras = []
for r in partition:
pythagoras.append(r)
if pythagoras[2]**2 + pythagoras[1]**2 == pythagoras[0]**2:
numbers.append(pythagoras)
if (len(numbers) > count):
count = len(numbers)
number = p
print(number)
print(count)
def partitionSum(n, size, limit=None):
if size == 1:
yield [n]
return
if limit is None:
limit = n
start = (n + size - 1) // size
stop = min(limit, n - size + 1) + 1
for i in range(start, stop):
for tail in partitionSum(n - i, size - 1, i):
yield [i] + tail
rightAngle() | true |
cf74d3bf5462107dab0e993407ab9bcfa29d9f1c | Python | omarfakhreddine/Focus | /FocusGameTests.py | UTF-8 | 9,846 | 3.765625 | 4 | [] | no_license | # This file contains tests for FocusGame.
import unittest
from FocusGame import FocusGame, Tile, Board
class FocusGameTests(unittest.TestCase):
# write test methods below
def test_initializer_of_focus_game_creates_object_when_passed_valid_parameters(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertIsInstance(g, FocusGame)
def test_initializer_of_focus_game_does_not_create_FocusGame_object_when_not_passed_valid_parameters(self):
def f():
g = FocusGame()
self.assertRaises(TypeError, f)
def test_max_height_initialized_with_FocusGame_object_to_five(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g._max_height, 5)
def test_turn_assigned_to_first_player_name_in_tuple_in_arguments_when_passed_string_name(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g._turn, g._p1.get_name())
def test_if_turn_set_to_second_players_name_after_change_turn_passed_player1(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
g.change_turn('player1')
self.assertEqual(g.get_turn(), "player2")
def test_if_check_turn_returns_false_when_player_name_passed_does_not_match_current_turn(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
g.check_turn(g._p2.get_name())
self.assertFalse(g.check_turn(g._p2.get_name()))
def test_if_board_initialized_incorrectly_with_valid_FocusGame_initialization(self):
g = FocusGame(("player1", "r"), ("player2", "t"))
self.assertNotEqual(g._board.get_board(),
[["r", "r", "g", "g", "r", "r"],
["g", "g", "r", "r", "g", "g"],
["r", "r", "g", "g", "r", "r"],
["g", "g", "r", "r", "g", "g"],
["r", "r", "g", "g", "r", "r"],
["g", "g", "r", "r", "g", "g"]],
"They are equal.")
def test_if_tile_init_creates_Tile_when_passed_valid_parameters(self):
g = Tile((0, 0), ["r"])
self.assertIsInstance(g, Tile)
def test_if_get_position_from_Tile_returns_tuple_of_2_numbers_when_passed_valid_parameters(self):
g = Tile((0, 0), ["r"])
self.assertEqual((0, 0), g.get_position())
def test_if_get_pieces_from_Tile_returns_list_of_pieces_when_passed_valid_parameters(self):
g = Tile((0, 0), ["r", 'g', 'g'])
self.assertEqual(["r", 'g', 'g'], g.get_pieces())
def test_get_tile_returns_tile_object_at_valid_coordinates_passed_to_get_tile(self):
g = FocusGame(('player1', 'r',), ('player2', 'g'))
self.assertIsInstance(g._board.get_tile((0, 0)), Tile)
def test_if_get_height_from_Tile_returns_number_of_pieces_in_Tile_pieces(self):
g = Tile((0, 0), ["r", 'g', 'g'])
self.assertEqual(3, g.get_height())
def test_if_get_top_from_Tile_returns_piece_colour_on_top_of_Tile_pieces(self):
g = Tile((0, 0), ["r", 'g', 'g'])
self.assertEqual('g', g.get_top())
def test_check_location_passing_valid_parameters(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.check_location(1, (0, 0), (1, 0)), True)
def test_check_location_passing_invalid_parameters(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.check_location(1, (0, 1), (0, 0)), True)
def test_check_number_of_pieces_passing_valid_number_of_pieces(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.check_number_of_pieces(1, (0, 0)), True)
def test_check_number_of_pieces_passing_too_many_pieces(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.check_number_of_pieces(2, (0, 0)), False)
def test_get_tile_location_with_valid_location(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(type(g.get_tile((0, 0))), Tile)
def test_show_reserve_with_invalid_name(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.show_reserve("Player"), False)
def test_show_reserve_with_valid_name_and_0_in_reserve(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.show_reserve("player2"), 0)
def test_change_reserves_and_reserved_move_passing_invalid_name(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
g._p1.change_reserves(5)
self.assertEqual(g.reserved_move('player', (1, 1)), False)
def test_reserved_move_passing_invalid_location_off_board(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
g._p1.change_reserves(5)
self.assertEqual(g.reserved_move('player1', (1, 6)), False)
def test_reserved_move_on_wrong_turn(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
g._p1.change_reserves(5)
self.assertEqual(g.reserved_move('player2', (1, 1)), False)
def test_reserved_move_passing_invalid_number_of_reserves(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.reserved_move('player1', (1, 1)), False)
def test_reserved_move_passing_valid_parameters_does_not_make_tile_height_more_than_5_adds_new_colour_to_tile(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
g._p1.change_reserves(5)
g.reserved_move('player1', (1, 1))
self.assertEqual(g.get_tile((1, 1)).get_pieces(), ['g', 'r'])
def test_reserved_move_passing_valid_parameters_changes_tile_pieces_accordingly(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
g._p1.change_reserves(5)
g.reserved_move('player1', (1, 1))
self.assertEqual(g.get_tile((1, 1)).get_pieces(), ['g', 'r'])
def test_reserved_move_passing_valid_parameters_changes_reserves_count_accordingly(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
g._p1.change_reserves(5)
g.reserved_move('player1', (1, 1))
self.assertEqual(g.show_reserve('player1'), 4)
def test_reserved_move_passing_valid_parameters_changes_turn_accordingly(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
g._p1.change_reserves(5)
g.reserved_move('player1', (1, 1))
self.assertEqual(g.get_turn(), 'player2')
def test_check_win_passing_player_with_over_6_captured(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
g._p1.change_reserves(5)
g._p1.change_captured(7)
g.check_win('player1')
self.assertEqual(g.check_win('player1'), True)
def test_show_pieces_passing_valid_tile_position(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.show_pieces((0, 0)), ['r'])
def test_get_player_from_name_passing_invalid_name(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.get_player_from_name("player"), False)
def test_move_piece_on_wrong_turn(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.move_piece('player2', (0, 0), (1, 0), 1), False)
def test_move_piece_on_correct_turn(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.move_piece('player1', (0, 0), (1, 0), 1), 'successfully moved')
def test_move_piece_passing_invalid_name(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.move_piece('player', (0, 0), (1, 0), 1), False)
def test_move_piece_passing_invalid_destination_off_board(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.move_piece('player1', (0, 0), (1, 6), 1), False)
def test_move_piece_passing_invalid_start_off_board(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.move_piece('player1', (0, -1), (0, 1), 1), False)
def test_move_piece_with_invalid_starting_tile_because_wrong_pile_top(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.move_piece('player1', (1, 0), (1, 1), 1), False)
def test_move_piece_passing_same_destination_as_start(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.move_piece('player1', (0, 0), (0, 0), 1), False)
def test_move_piece_passing_destination_at_diagonal_from_start(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.move_piece('player1', (0, 0), (1, 1), 1), False)
def test_move_piece_passing_too_many_pieces_to_move(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.move_piece('player1', (0, 0), (3, 0), 3), False)
def test_move_piece_passing_valid_destination(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.move_piece('player1', (0, 0), (1, 0), 1), 'successfully moved')
def test_move_piece_passing_valid_start(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
self.assertEqual(g.move_piece('player1', (0, 0), (1, 0), 1), 'successfully moved')
def test_multiple_move_passing_valid_parameters_moving_3_pieces(self):
g = FocusGame(("player1", "r"), ("player2", "g"))
g.move_piece('player1', (0, 0), (0, 1), 1)
g.move_piece('player2', (1, 0), (1, 1), 1)
g.move_piece('player1', (0, 1), (0, 3), 2)
g.move_piece('player2', (1, 1), (1, 3), 2)
g.move_piece('player1', (0, 3), (1, 3), 1)
self.assertEqual(g.get_tile((1, 3)).get_pieces(), ['r', 'g', 'g', 'r'])
if '__name__' == "__main__":
# provided test
unittest.main()
| true |
d438ddaea9e9869aebe5fd66adb0620e044be9a2 | Python | aidansmyth95/AlgoExpertSolutions | /Dynamic Programming/Square of Zeroes/Solution1.py | UTF-8 | 1,011 | 4.21875 | 4 | [] | no_license | '''
Dynamic programming.
I like the iterative solutons better, plus they have better time complexity than recursive.
First solution: sub-optimal in time, best in space.
Get all squares, moving top left corner, and then check each to see if square of zeros
'''
# O(n^4) T | O(1) S
def squareOfZeroes(matrix):
n = len(matrix)
for topRow in range(n):
for leftCol in range(n):
squareLength = 2
while squareLength <= n - leftCol and squareLength <= n - topRow:
# while in bounds
bottomRow = topRow + squareLength - 1
rightCol = leftCol + squareLength - 1
if isSquareOfZeroes(matrix, topRow, leftCol, bottomRow, rightCol):
return True
squareLength += 1
return False
# this is time complexity costly! O(len^2) complexity
def isSquareOfZeroes(matrix, r1, c1, r2, c2):
for row in range(r1, r2 + 1):
if matrix[row][c1] != 0 or matrix[row][c2] != 0:
return False
for col in range(c1, c2 + 1):
if matrix[r1][col] != 0 or matrix[r2][col] != 0:
return False
return True | true |
4ca61bdecfbc78711d14ab57a5e6a1be4b02fa76 | Python | robcharlwood/inference_logic | /tests/data_structures/test_prologlists.py | UTF-8 | 419 | 2.8125 | 3 | [
"MIT"
] | permissive | import pytest
from inference_logic.data_structures import PrologListNull, construct
def test__repr__():
assert repr(construct([1, [2, 3], 4])) == "[1, [2, 3], 4]"
def test__eq__fail():
with pytest.raises(TypeError) as error:
PrologListNull() == 0
assert str(error.value) == "0 must be a PrologListNull"
def test_list__repr__():
assert repr(construct([1, [2, 3], 4])) == "[1, [2, 3], 4]"
| true |
7e7ff5f6fe381a38c9a10e685d6e8508503cf547 | Python | monchier/streamlit | /examples/core/checkbox.py | UTF-8 | 207 | 2.5625 | 3 | [] | no_license | import streamlit as st
i1 = st.checkbox('checkbox 1', True)
st.write('value 1:', i1)
i2 = st.checkbox('checkbox 2', False)
st.write('value 2:', i2)
i3 = st.checkbox('checkbox 3')
st.write('value 3:', i3)
| true |
76abb0b5f08260d4a5c735de692bbf9d7ae56250 | Python | thkim1011/graph | /src-python/graph.py | UTF-8 | 1,518 | 3.65625 | 4 | [] | no_license | class Graph:
"""
Implements a simple graph
"""
def __init__(self, vertices):
self.vertices = set(vertices)
self.adj_lists = {}
for vertex in vertices:
self.adj_lists[vertex] = []
def add_vertex(self, vertex):
self.vertices.add(vertex)
def add_edge(self, vertex1, vertex2):
if not (vertex1 in self.vertices) or not (vertex2 in self.vertices):
raise ValueError("vertex not in graph")
self.adj_lists[vertex1].append(vertex2)
self.adj_lists[vertex2].append(vertex1)
def delete_vertex(self, vertex):
self.vertices.remove(vertex)
def delete_edge(self, vertex1, vertex2):
pass
def degree(self, vertex):
pass
def get_neighbors(self, vertex):
pass
class DirectedWeightedGraph:
"""
Implements a directed weighted graph
"""
def __init__(self, vertices):
self.vertices = vertices
self.adj_list = {}
for vertex in vertices:
self.adj_list[vertex] = []
def add_edge(self, vertex1, vertex2, weight):
if not (vertex1 in self.vertices) or not (vertex2 in self.vertices):
raise ValueError("vertex not in graph")
self.adj_list[vertex1].append((vertex2, weight))
def get_edges(self):
for vertex in self.vertices:
for edge in self.adj_list[vertex]:
yield (vertex, edge[0], edge[1])
def get_vertices(self):
return self.vertices
| true |
fce666426650d6c8d80e933d12808dfd5f2043c5 | Python | assertpy/assertpy | /tests/test_dyn.py | UTF-8 | 4,512 | 3.171875 | 3 | [
"BSD-3-Clause"
] | permissive | # Copyright (c) 2015-2019, Activision Publishing, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from assertpy import assert_that, fail
class Person(object):
def __init__(self, first_name, last_name, shoe_size):
self.first_name = first_name
self.last_name = last_name
self.shoe_size = shoe_size
@property
def name(self):
return '%s %s' % (self.first_name, self.last_name)
def say_hello(self):
return 'Hello, %s!' % self.first_name
def say_goodbye(self, target):
return 'Bye, %s!' % target
fred = Person('Fred', 'Smith', 12)
def test_dynamic_assertion():
assert_that(fred).is_type_of(Person)
assert_that(fred).is_instance_of(object)
assert_that(fred.first_name).is_equal_to('Fred')
assert_that(fred.last_name).is_equal_to('Smith')
assert_that(fred.shoe_size).is_equal_to(12)
assert_that(fred).has_first_name('Fred')
assert_that(fred).has_last_name('Smith')
assert_that(fred).has_shoe_size(12)
def test_dynamic_assertion_on_property():
assert_that(fred.name).is_equal_to('Fred Smith')
assert_that(fred).has_name('Fred Smith')
def test_dynamic_assertion_on_method():
assert_that(fred.say_hello()).is_equal_to('Hello, Fred!')
assert_that(fred).has_say_hello('Hello, Fred!')
def test_dynamic_assertion_failure():
try:
assert_that(fred).has_first_name('Joe')
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected <Fred> to be equal to <Joe> on attribute <first_name>, but was not.')
def test_dynamic_assertion_bad_name_failure():
try:
assert_that(fred).foo()
fail('should have raised error')
except AttributeError as ex:
assert_that(str(ex)).is_equal_to('assertpy has no assertion <foo()>')
def test_dynamic_assertion_unknown_attribute_failure():
try:
assert_that(fred).has_foo()
fail('should have raised error')
except AssertionError as ex:
assert_that(str(ex)).is_equal_to('Expected attribute <foo>, but val has no attribute <foo>.')
def test_dynamic_assertion_no_args_failure():
try:
assert_that(fred).has_first_name()
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('assertion <has_first_name()> takes exactly 1 argument (0 given)')
def test_dynamic_assertion_too_many_args_failure():
try:
assert_that(fred).has_first_name('Fred', 'Joe')
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).is_equal_to('assertion <has_first_name()> takes exactly 1 argument (2 given)')
def test_dynamic_assertion_on_method_failure():
try:
assert_that(fred).has_say_goodbye('Foo')
fail('should have raised error')
except TypeError as ex:
assert_that(str(ex)).contains('val does not have zero-arg method <say_goodbye()>')
def test_chaining():
assert_that(fred).has_first_name('Fred').has_last_name('Smith').has_shoe_size(12)
| true |
a06723663e36e64aaf5ba9c0120ca14469559318 | Python | stellarlib/centaurus | /src/game/logic/ai_control/unit_ai/ai.py | UTF-8 | 974 | 2.890625 | 3 | [] | no_license | from .behaviours import *
from src.map import Hex
class AI(object):
def __init__(self, owner):
self.owner = owner
self._alert = False
self._range = 1
@property
def player(self):
return self.owner.game.logic.player
@property
def map(self):
return self.owner.game.map
@property
def alerted(self):
return self._alert
def alert(self):
self._alert = True
def get_behaviour(self):
if not self.alerted:
return self.owner, STAY
return self._get_behaviour()
def _get_behaviour(self):
raise NotImplementedError
def adj_to_player(self):
player = Hex(*self.player.pos)
owner = Hex(*self.owner.pos)
return Hex.hex_distance(player, owner) == 1
def player_in_range(self):
player = Hex(*self.player.pos)
owner = Hex(*self.owner.pos)
return Hex.hex_distance(player, owner) <= self._range
| true |
58f888a3a8283b6252562cd3554daf94b8ea6e9d | Python | wolfdan666/WolfEat3moreMeatEveryday | /公司面试题/深信服cpp软开A卷_2020.5.11做/C栈弹出所有可能.py | UTF-8 | 1,080 | 3.359375 | 3 | [] | no_license | #import itertools
def GetAllSeq(input, i, stk, tmp, res):
# 注意tmp1和stk是要回溯的,所以这两者不能传引用;而res是一直保留结果的,传引用就对
tmp1 = list(tmp)
stk1 = list(stk)
if i == len(input): # 结果记录
stk1 = stk1[::-1]
tmp1.extend(stk1)
res.append(tmp1)
#print(res)
return
stk1.append(input[i]) # 先将当前字母入栈,然后后面选择继续递归入栈或者出栈
GetAllSeq(input, i+1, stk1, tmp1, res) #继续遍历
while stk1: # 一个个出栈
#tmp = []
# 出栈一个之后遍历进入下一个,或者是出栈多个之后遍历进入下一个i+1
tmp1.append(stk1[-1])
stk1.pop()
GetAllSeq(input, i+1, stk1, tmp1, res)
input = 'abc'
res = []
stk = ''
tmp = ''
GetAllSeq(input, 0, stk, tmp, res)
print(res)
res = [''.join(x) for x in res]
# 法一
res = list(set(res))
res.sort()
print(res)
# 法二
#res = itertools.groupby(res)
# 法三 dict去重并保留顺序
# res = list(dict.fromkeys(res))
# print(res) | true |
5e6dc2cbbd124dfcde4b38d8bbe13590ddf19736 | Python | den4uk/andriller | /andriller/gui/tooltips.py | UTF-8 | 1,388 | 2.71875 | 3 | [
"MIT"
] | permissive | import tkinter as tk
from contextlib import suppress
class ToolTip:
offset = 25
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + self.offset
y = y + cy + self.widget.winfo_rooty() + self.offset
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry(f"+{x}+{y}")
with suppress(tk.TclError): # For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "noActivates")
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
background="#ffffe0", relief=tk.SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
| true |
8569075d6601786870596c3945dc0e6dc0d0c9af | Python | fpviviani/python-tkinter | /gui/mainWindow.py | UTF-8 | 1,514 | 3.265625 | 3 | [] | no_license | import tkinter as tk
from gui import buttons as b
class mainWindow:
def __init__(self):
self.window = tk.Tk()
texto = ""
self.initWidgets(self.window, texto)
self.window.mainloop()
def initWidgets(self, window, texto):
self.title = self.window.title('Janela Principal')
self.frameTopo = tk.Frame(self.window)
self.frameBase = tk.Frame(self.window)
self.frameInput = tk.Frame(self.window)
self.frameMiddle = tk.Frame(self.window)
self.label1 = tk.Label(self.frameTopo, text="Label do topo", font=("Arial Bold", 40))
self.label2 = tk.Label(self.frameMiddle, text="Label da base", font=("Arial Bold", 20))
self.label1.pack(side='top')
self.label2.pack(side='left')
self.frameTopo.pack()
self.frameMiddle.pack()
self.frameInput.pack()
self.frameBase.pack()
self.labelResult = tk.Label(self.frameInput, text=texto)
self.labelResult.pack(side="left")
self.commands = b.Commands()
self.botao1 = tk.Button(self.frameBase, text="Entrar texto", command=lambda: self.commands.processaB1(self))
self.botao2 = tk.Button(self.frameBase, text="Limpar", command=lambda: self.commands.clear(self))
self.botao1.pack(side='left')
self.botao2.pack(side='right')
def removeWidgets(self):
self.frameTopo.destroy()
self.frameMiddle.destroy()
self.frameInput.destroy()
self.frameBase.destroy()
| true |
a95253a2c554ad687dd9a29c7f20715a62ec0211 | Python | MarioSanzRodrigo/GREDOS | /others/ManagementLayer/ManagementLayer/RenemaAppManager/Sender.py | UTF-8 | 1,448 | 2.9375 | 3 | [] | no_license | #!/usr/bin/python
#-------------------------------------------------------------------------------------
# This module is part of the PHD Thesis:
# "A User-Centric SDN Management Architecture for NFV-based Residential Networks".
# Copyright Ricardo Flores Moyano 2016.
#-------------------------------------------------------------------------------------
import socket, json, sys
import threading
class Sender(threading.Thread):
# To send RENESE messages to the lower layer, each RENEMA App uses the Sender service of the
# RENEMA App Manager to do it.
# The sequence is as follows: Fist, a new instance of the Sender is created. Second, the message
# to be sent is load in the "message_to_send" variable and finally, the method run is launch as a
# thread.
def __init__(self):
# Init Method to initialize the Sender:
threading.Thread.__init__(self)
self.tcp_port = 14713
self.message_to_send = ''
def run(self):
# This method is called to send the RENESE message once the "message_to_send" has been previously
# defined by the RENEMA App.
s = socket.socket()
host = socket.gethostname()
s.connect((host, self.tcp_port))
s.send(self.message_to_send)
s.close
def setMessageToSend(self, json_data):
# This method set the RENESE message to be sent to the lower layer.
# The message received here is already formatted as JSON message.
self.message_to_send = json_data
| true |
dc69b5b679e2334d13604f57c372caca0ddcc2db | Python | ctmakro/pimona | /colors.py | UTF-8 | 975 | 2.953125 | 3 | [] | no_license | from termcolor import colored, cprint
import colorama
colorama.init()
def colored_print_generator(*a,**kw):
def colored_print(*items,**incase):
text = ' '.join(map(lambda i:str(i), items))
# escape unsupported unicode in current encoding
# (to prevent emojis from crashing CMD
text = text.encode(encoding='gbk', errors='replace').decode(encoding='gbk')
print(colored(text, *a,**kw),**incase)
return colored_print
import pprint
def prettify(json):
return pprint.pformat(json, indent=4, width=80, depth=None, compact=True)
cpg = colored_print_generator
print_info = cpg('green',)
print_debug = cpg('yellow')
print_up = cpg('yellow', attrs=['bold'])
print_down = cpg('cyan', attrs=['bold'])
print_err = cpg('red', attrs=['bold'])
if __name__ == '__main__':
cpg = colored_print_generator
printredcyan = cpg('red', 'on_cyan')
printredcyan('red', 'on_cyan')
print(prettify({'asd':'gerf','a':{'v':'b'}}))
| true |
bbb6a84172623a54ab610bbdd9aa6c178a1cb0cf | Python | itsolutionscorp/AutoStyle-Clustering | /all_data/exercism_data/python/leap/6a77f22115944d92affaedad6c32d58d.py | UTF-8 | 108 | 2.53125 | 3 | [] | no_license | def is_leap_year(year):
if year % 100 == 0:
return not (year % 400)
return not (year % 4)
| true |
9c21b3b6fe0bb72567eb607bd189beb849efe2a4 | Python | ChitMyoKo/passwdgen | /passwdgen/generator.py | UTF-8 | 5,852 | 3.484375 | 3 | [
"MIT",
"LicenseRef-scancode-mit-old-style",
"LicenseRef-scancode-public-domain"
] | permissive | # -*- coding: utf-8 -*-
import math
from .utils import secure_random, load_word_list
from .constants import *
__all__ = [
"chars",
"words"
]
def chars(charset=None, length=None, min_entropy=None):
"""Generates a character-based password. If the length parameter is supplied, the min_entropy parameter
is ignored (i.e. either a length or a minimum entropy is required, but not both). If no length or
min_entropy parameters are supplied, a default password length is chosen (see
constants.DEFAULT_CHAR_PASSWORD_LENGTH).
Args:
charset: The character set to use from which to source characters. If not specified, it defaults to
the character set with alphanumeric and special characters.
length: The desired length of the password.
min_entropy: The desired minimum entropy of the password, based on the given charset.
Returns:
A string containing the generated password.
"""
if charset not in PASSWORD_CHARSETS:
raise ValueError("Unrecognised charset: %s" % charset)
else:
charset_chars = list(PASSWORD_CHARSETS[charset])
charset_size = len(charset_chars)
password = ""
if length is None and min_entropy is None:
length = DEFAULT_CHAR_PASSWORD_LENGTH
if length is not None:
for i in range(length):
password += charset_chars[secure_random(charset_size)]
else:
# work backwards from the entropy
entropy_per_char = math.log(charset_size, 2.0)
# round up on the number of characters
min_chars = int(math.ceil(min_entropy / entropy_per_char))
for i in range(min_chars):
password += charset_chars[secure_random(charset_size)]
return password
def select_random_words(word_list, count, starting_letters=None):
if starting_letters is None:
total_words = len(word_list)
return [word_list[secure_random(total_words)] for i in range(count)]
else:
# assume word_list is a dictionary
result = []
for i in range(count):
ch = starting_letters[i]
result.append(word_list[ch][secure_random(len(word_list[ch]))])
return result
def words(dict_set=None, separator=None, word_count=None, min_entropy=None, starting_letters=None):
"""Generates a word-based password from the given dictionary. If the word_count parameter is supplied,
the min_entropy parameter is ignored (i.e. either a word count or minimum entropy is required, but not
both). If no length or min_entropy parameters are supplied, a default word count is chosen (see
constants.DEFAULT_WORD_PASSWORD_WORDS).
Args:
dict_set: The word list/dictionary from which to generate a password. Defaults to the built-in word list.
separator: The separator to use between words.
word_count: The number of words to use to build the password.
min_entropy: The desired minimum entropy of the password, based on the given dictionary.
starting_letters: A string containing the desired starting letters of the generated words. If word_count or
min_entropy are specified, the number of letters in this string must exactly match the intended number
of words that will be generated.
Returns:
A string containing the generated password.
"""
if dict_set is None:
dict_set = load_word_list()
word_list = list(dict_set)
word_list_size = len(word_list)
# words categorised by alphabetical character
categorised_words = dict()
if starting_letters is not None:
# make sure it's lowercase
starting_letters = starting_letters.lower()
for word in dict_set:
ch = word[0]
if ch in categorised_words:
categorised_words[ch].append(word)
else:
categorised_words[ch] = [word]
# check that all of the required starting letters are represented
for ch in starting_letters:
if not (ch in categorised_words):
raise ValueError("Dictionary does not contain any words beginning with \"%s\"" % ch)
password_words = []
if word_count is None and min_entropy is None:
word_count = DEFAULT_WORD_PASSWORD_WORDS if starting_letters is None else len(starting_letters)
if separator is None:
separator = DEFAULT_WORD_SEPARATOR
if word_count is not None:
if starting_letters is None:
password_words.extend(select_random_words(word_list, word_count))
else:
if len(starting_letters) < word_count:
raise ValueError((
"Please supply at least %d starting letters to meet the minimum word count " +
"requirement"
) % word_count)
password_words.extend(
select_random_words(
categorised_words,
word_count,
starting_letters=starting_letters
)
)
else:
entropy_per_word = math.log(word_list_size, 2.0)
min_words = int(math.ceil(min_entropy / entropy_per_word))
if starting_letters is None:
password_words.extend(select_random_words(word_list, min_words))
else:
if len(starting_letters) < min_words:
raise ValueError((
"Please supply at least %d starting letters to meet the minimum word count " +
"requirement"
) % min_words)
password_words.extend(
select_random_words(
categorised_words,
min_words,
starting_letters=starting_letters
)
)
return separator.join(password_words)
| true |
6b47311904d2fee10dcfb30817580e48527cf3e4 | Python | 812231487/periodicity | /periodicity/phase.py | UTF-8 | 4,846 | 3.15625 | 3 | [
"MIT"
] | permissive | import numpy as np
from .acf import gaussian, smooth
def stringlength(t, x, dphi=0.1, n_periods=1000, s=0):
"""String Length
(Dworetsky 1983, MNRAS, 203, 917)
Parameters
----------
t: array-like
time array
x: array-like
signal array
dphi: float (optional default=0.1)
factor to multiply (1 / baseline) in order to get frequency separation
n_periods: int (optional default=1000)
number of trial periods
s: int (optional)
standard deviation of Gaussian filter used to smooth, measured in samples
Returns
-------
periods: array-like
trial periods
ell: array-like
string length for each period
"""
# scale x to range from -0.25 to +0.25
x = (x - np.max(x)) / (2 * (np.max(x) - np.min(x))) - 0.25
df = dphi / (np.max(t) - np.min(t))
periods = 1 / np.linspace(df, n_periods*df, n_periods)
periods.sort()
ell = []
for period in periods:
phi = ((t / period) % 1)
sorted_args = np.argsort(phi)
phi = phi[sorted_args]
m = x[sorted_args]
ll = np.hypot(np.roll(m, -1) - m, np.roll(phi, -1) - phi).sum()
ell.append(ll)
# TODO: consider flagging false periods for rejection
ell = np.array(ell)
if s > 0:
kernel = gaussian(mu=0, sd=s)
h = kernel(np.arange(-(3 * s - 1), 3 * s, 1.))
ell = smooth(ell, kernel=h)
return periods, ell
def pdm(t, x, nb=5, nc=2, pmin=.01, pmax=10, n_periods=1000, s=0):
"""Phase Dispersion Minimization
(Stellingwerf 1978, ApJ, 224, 953)
Parameters
----------
t: array-like
time array
x: array-like
signal array
nb: int (optional default=5)
number of phase bins
nc: int (optional default=2)
number of covers per bin
pmin, pmax: floats (optional defaults=0.01 and 10)
minimum/maximum trial period normalized by the baseline
n_periods: int (optional default=1000)
number of trial periods
s: int (optional)
standard deviation of Gaussian filter used to smooth, measured in samples
Returns
-------
periods: array-like
trial periods
theta: array-like
phase dispersion statistic as in Eq. 3 of the paper
"""
t = np.asarray(t)
x = np.asarray(x)
sigma = np.var(x, ddof=1)
t0 = t.max() - t.min()
theta = []
periods = np.linspace(pmin*t0, pmax*t0, n_periods)
m0 = nb * nc
for period in periods:
phi = ((t / period) % 1)
sorted_args = np.argsort(phi)
phi = phi[sorted_args]
m = x[sorted_args]
mj = []
for k in range(m0):
mask = phi >= k / m0
mask &= phi < (k + nc) / m0
mask |= phi < (k - (m0 - nc)) / m0
mj.append(m[mask])
sj = np.array([np.var(k, ddof=1) for k in mj])
nj = np.array([k.size for k in mj])
ss = np.sum((nj - 1) * sj)/(np.sum(nj) - m0)
theta.append(ss/sigma)
theta = np.array(theta)
if s > 0:
kernel = gaussian(mu=0, sd=s)
h = kernel(np.arange(-(3 * s - 1), 3 * s, 1.))
theta = smooth(theta, kernel=h)
return periods, theta
def pdm2(t, x, pmin=None, pmax=None, n_periods=None, s=0, oversample=10, do_subharmonic=False):
t = np.asarray(t)
x = np.asarray(x)
sigma = np.var(x, ddof=1)
ne = t.size
assert x.size == ne, "incompatible array shapes"
theta_crit = 1. - 11. / ne ** 0.8
dt = np.median(np.diff(t))
t0 = t.max() - t.min()
thetas = []
if pmax is None:
pmax = oversample * t0
if pmin is None:
pmin = 2 * dt
if n_periods is None:
n_periods = int((1 / pmin - 1 / pmax) * oversample * t0 + 1)
periods = np.linspace(pmax, pmin, n_periods)
for period in periods:
phi = ((t - t[0]) / period) % 1
masks = np.array([np.logical_and(phi < (b + 1) / 10, phi >= b / 10) for b in range(10)])
sj = np.array([np.var(x[masks[j]], ddof=1) for j in range(10)])
nj = masks.sum(axis=1)
good = nj > 1
ss = np.sum((nj[good] - 1) * sj[good]) / np.sum(nj[good] - 1)
theta = ss / sigma
if do_subharmonic and period <= pmax / 2 and theta < theta_crit:
sub_index = int((n_periods - 1) * (1 - (2 * period - pmin) / (pmax - pmin)) + 0.5)
theta = (theta + thetas[sub_index]) / 2
thetas.append(theta)
thetas = np.array(thetas)[::-1]
periods = periods[::-1]
if s > 0:
kernel = gaussian(mu=0, sd=s)
h = kernel(np.arange(-(3 * s - 1), 3 * s, 1.))
thetas = smooth(thetas, kernel=h)
return periods, thetas
# TODO: Analysis of Variance (Schwarzenberg-Czerny 1989)
# TODO: Gregory-Loredo method (Gregory & Loredo 1992)
# TODO: conditional entropy method (Graham et al. 2013)
| true |
0ace6610e3e3f4560d6a5c413eeaa4e30189f00e | Python | antny94/Python | /Loops.py | UTF-8 | 394 | 3.96875 | 4 | [] | no_license | # practice with loops in python
# I imagine it'll be similar to c++?
Anime = ["Avatar: The Last Airbender", "Vinland Saga", "Spirited Away"]
Anime.append("A Place Further Than The Universe")
Anime.append("Naruto")
lengthAnime = len(Anime)
for x in range(0, lengthAnime):
print(Anime[x])
# practice with while loops
age = 0
while age < 50:
print(age)
age += 1
| true |
2bf885fb3b96ca6800d5d454419428676f333a69 | Python | sunjunee/offer_book_python_codes | /codes/T58-2.py | UTF-8 | 442 | 4.1875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
@ Author: Jun Sun {Python3}
@ E-mail: sunjunee@qq.com
@ Date: 2018-05-27 16:50:18
"""
# T58-2 左旋转字符串
# 字符串的左旋转操作是把字符串前面的若干
# 字符转移到字符串的尾部。比如输入字符串
# "abcdefg"和数字2,该函数将返回左旋转
# 两位得到的结果"cdefgab"
def rotateStrs(strs, index):
return strs[index:] + strs[0:index]
print(rotateStrs("abcdefg", 2)) | true |
8ac226d3a7bd12be9e1455d16d02c208a8011811 | Python | alexisflores99/Repo-for-Python | /Interfaces Graficas/menu.py | UTF-8 | 732 | 2.671875 | 3 | [] | no_license | from tkinter import *
root = Tk()
barraMenu = Menu(root)
root.config(menu=barraMenu)
archivoMenu = Menu(barraMenu,tearoff=0)
barraMenu.add_cascade(label="Archivo",menu=archivoMenu)
archivoMenu.add_command(label = "Nuevo Archivo")
archivoMenu.add_command(label = "Nueva Ventana")
archivoMenu.add_separator()
archivoMenu.add_command(label = "Salir")
editarMenu = Menu(barraMenu, tearoff=0)
barraMenu.add_cascade(label = "Editar", menu=editarMenu)
editarMenu.add_command(label="Deshacer")
editarMenu.add_command(label="Rehacer")
ayudaMenu = Menu(barraMenu, tearoff=0)
barraMenu.add_cascade(label = "Ayuda", menu=ayudaMenu)
ayudaMenu.add_command(label = "Ver Licencia")
ayudaMenu.add_command(label = "Acerca de")
root.mainloop() | true |
a35b196be5fedfd354b7b38f7ca051b3f2aa12d4 | Python | ShrutiMarwaha/Python | /Rosalind_problems/longest_common_dna_motif.py | UTF-8 | 2,959 | 3.78125 | 4 | [] | no_license | # Problem: Finding a Shared Motif http://rosalind.info/problems/lcsm/
#
# A common substring of a collection of strings is a substring of every member of the collection.
# We say that a common substring is a longest common substring if there does not exist a longer common substring.
# For example, "CG" is a common substring of "ACGTACGT" and "AACCGGTATA", but it is not as long as possible;
# in this case, "GTA" is a longest common substring of "ACGTACGT" and "AACCGTATA".
# Given: A collection of k DNA strings of length at most 1 kbp each in FASTA format.
# Return: A longest common substring of the collection. (If multiple solutions exist, you may return any single solution.)
#
# Sample Dataset
# >Rosalind_1
# GATTACA
# >Rosalind_2
# TAGACCA
# >Rosalind_3
# ATACA
# Sample Output
# AC
import re
# function to extract dna sequences from fasta files
def sequences_from_fasta_file(f):
try:
sequence_file = open(f,"r")
except IOError:
print("the file does not exist")
seq_list = []
seq_string = ""
for line in sequence_file:
line = line.rstrip()
if re.search("^[^>/w]",line):
seq_string += line
else:
if len(seq_string)>0:
seq_list.append(seq_string)
seq_string = ""
if len(seq_string)>0:
seq_list.append(seq_string)
return(seq_list)
#########################################################################################
# function to find longest pattern common among all sequences
def longest_common_motif(f):
# call sequences_from_fasta_file function to extract dna sequences from fasta files
dna_sequences = sequences_from_fasta_file(f)
print "input sequences: %s" % dna_sequences
# sort the dna_sequences list by length of each sequence
sorted_seq_list = sorted(dna_sequences, key=len) # using length as key is very important, else it will sort alphabetically
shortest_sequence = sorted_seq_list[0]
other_sequences = sorted_seq_list[1:]
print "shortest sequence in the list: %s" % shortest_sequence
# print "sequences across which the motif should be searched %s" % other_sequences
len_shortest_sequence = len(shortest_sequence)
shared_motif = ""
for i in range(0,len_shortest_sequence):
for j in range(len_shortest_sequence, i+len(shared_motif), -1):
pattern = shortest_sequence[i:j]
matched_all = True
for seq in other_sequences:
if pattern not in seq:
matched_all = False
break
if matched_all:
shared_motif = pattern
print "shared motif found: %s" % shared_motif
break
return(shared_motif)
small_dataset = "/Users/shruti/Downloads/dna_motif.txt"
big_dataset = "/Users/shruti/Downloads/rosalind_lcsm.txt"
print "\nlongest common motif: %s" % longest_common_motif(small_dataset) | true |
09f9db374d9752da2f39a3bd0e335aecc6c69572 | Python | moneymashi/SVN_fr.class | /pythonexp/a10_dataload/a07_insertMysql.py | UTF-8 | 1,364 | 2.9375 | 3 | [] | no_license | '''
Created on 2017. 7. 31.
@author: kitcoop
파이썬에서 삽입과 삭제 또는 갱신..
1. 연결객체인 pymysql.connect()에 있는 cursor() 메서드를 호출하여..
sql(insert into 테이블명 values(##,##,##)) 명령으로 처리한다.
2. excute( sql 명령어 )
3. 연결 객체에 있는 commit() 호출로 반영, rollback() 호출하여 취소
처리된다.
4. 예외 처리..
try:
연결및 sql 처리, commit()
except:
예외발생시 처리할 내용, rollback()
finally:
정상 처리 및 예외 발생 여부 상관 없이 처리
con.close()
'''
from pandas import Series, DataFrame
import numpy as np
import pandas as pd
import sys, pymysql
con = pymysql.connect(host='localhost', port=3306, user='root',
passwd='11111', db = 'test', charset="utf8")
try:
cursor = con.cursor()
## cursor.execute("insert into contact(name, phone) values('홍길동','01078889999') ") ## 삽입
## cursor.execute("update contact set phone='01088889999' where name='홍길동'") ## 수정
cursor.execute("delete from contact where name='홍길동' ") ##삭제
con.commit()
print("CRUD 성공")
except:
print("예외발생!!",sys.exc_info())
con.rollback()
finally:
con.close()
| true |
99e57669b0c6eb101ced98749b92579fc9deabe7 | Python | RafaelDias108/AulaWeb1 | /Ativ05.py | UTF-8 | 743 | 4.375 | 4 | [] | no_license | # Elabore um código em Python para receber do usuário:
# ▪ Nome do aluno;
# ▪ 02 notas;
# ▪ 02 pesos, respectivamente para cada nota;
# ▪ Retorne uma mensagem para o usuário informando a média ponderada desse aluno;
# ▪ Utilize o método split para separar a entrada do usuário em nota e peso. Exemplo: Digite a nota e peso: 9.5,3
nome_aluno = input("Entre com seu nome: ")
nota_01,peso_01 = input("Entre com a nota P1 e o peso: ").split(",",2)
nota_02, peso_02= input("Entre com a nota P2 e o peso: ").split(",",2)
p1 = int(nota_01)
p2 = int(nota_02)
peso1 = int(peso_01)
peso2 = int(peso_02)
media_pond = ((p1*peso1)+(p2*peso2))/(peso1+peso2)
print("A média Ponderado do aluno ",nome_aluno," é: ",media_pond) | true |
e324438c0aa36daadc165b4fdf88b1035f169eba | Python | thisistom/codesamples | /mastermind.py | UTF-8 | 9,388 | 4.25 | 4 | [] | no_license | #!/usr/bin/env python
"""
Mastermind - a simple command-line logic game.
The game will pick a numerical code, and the player must guess the code in as
few tries as possible. The game will tell users how many correct digits they
have, but not which ones are correct. Users can optionally specify the
difficulty of the game using command line arguments.
Usage: mastermind.py [numberOfDigits [maximumDigitSize (2-9)]]
e.g. "mastermind.py 3 4" will create 3 digit code made up of digits from 1-4.
Challenge taken from http://usingpython.com/python-programming-challenges.
"""
import sys
import random
import readline # makes raw_input() friendlier
class Mastermind(object):
"""
A class representing a game of mastermind. Chooses a combination of numbers
on initialisation, and validates guesses (returning the number of digits in
the guess which are correct) against that code until C{reset()} is called.
"""
# Class Variables ---------------------------------------------------------
# Variables defining difficulty - the number of random numbers to guess,
# and the maximum digit that will be chosen.
DefaultCount = 4
DefaultMaxDigit = 4
# Public Instance Functions -----------------------------------------------
def __init__(self, count=DefaultCount, maxDigit=DefaultMaxDigit):
"""
Initializes an instance of the class with the given settings.
@type count: C{int}
@type maxDigit: C{int}
@param count: The number of digits to use in the game (optional).
@param maxDigit: The maximum size of the digit to use in the game
(optional).
"""
self.reset(count, maxDigit)
def reset(self, count=DefaultCount, maxDigit=DefaultMaxDigit):
"""
Resets the game using the given settings.
@type count: C{int}
@type maxDigit: C{int}
@param count: The number of digits to use in the game (optional).
@param maxDigit: The maximum size of the digit to use in the game
(optional).
"""
self.__count = count
self.__maxDigit = maxDigit
self.__numGuesses = 0
# Seed the random number generator based on the current system time
random.seed()
# Create a new code
self.__code = self.__generateNewCode()
def checkGuess(self, guess):
"""
Checks the given guess against the current code. We permit either a
single string of digits (e.g. "1234") or space-separated digits
(e.g. "1 2 3 4").
@type guess: C{str}
@rtype: C{int}
@param guess: The guess to validate against the current code.
@return: The number of digits in the given guess which match the
current code, or -1 if the guess was invalid.
"""
# Split the guess into a list of integers
guessInts = self.__splitGuess(guess)
if len(guessInts) != self.__count:
# Exit early if there are the wrong number of elements
return -1
# Be kind - only count this as a guess if it's valid
self.__numGuesses += 1
# Now check the guess - return the number of digits that match
return sum(1 for (x, y) in zip(guessInts, self.__code) if x == y)
def getMaxDigit(self):
"""
@rtype: C{int}
@return: The maximum digit which will be used in this game.
"""
return self.__maxDigit
def getCount(self):
"""
@rtype: C{int}
@return: The number of digits that will be used in this game.
"""
return self.__count
def getNumGuesses(self):
"""
@rtype: C{int}
@return: The number of guesses that the user has made in this game.
"""
return self.__numGuesses
# Private Instance Functions ----------------------------------------------
def __generateNewCode(self):
"""
@rtype: C{list} of C{int}
@return: A new set of random digits according to the current values of
self.__maxDigit and self.__count. The digits will be between 1 and
self.__maxDigit inclusive.
"""
return [random.randint(1, self.__maxDigit)
for x in xrange(self.__count)]
def __splitGuess(self, guess):
"""
Splits the given string representing a guess by the user into a list of
integers. We permit either a single string of digits (e.g. "1234") or
space-separated digits (e.g. "1 2 3 4").
@type guess: C{str}
@rtype: C{list} of C{int}
@param guess: The guess to split into a list of integers.
@return: A list of integers split from the given guess, if any could be
extracted.
"""
strippedGuess = guess.strip()
if " " in strippedGuess:
# There are spaces - split into individual digits
splitGuess = strippedGuess.split()
else:
# No spaces - assume this is a single string of digits
splitGuess = list(strippedGuess)
# Convert all elements to integers and return those which are in the
# correct range
return [int(x) for x in splitGuess
if x.isdigit() and 0 < int(x) <= self.__maxDigit]
# Protected Module Functions --------------------------------------------------
def _isQuit(guess):
"""
@type guess: C{str}
@rtype: C{bool}
@param guess: The string to evaluate whether the user wants to quit.
@return: C{True} if the given input means that the user wants to quit,
otherwise C{False}.
"""
lowerCaseGuess = guess.lower()
return (lowerCaseGuess.startswith("quit")
or lowerCaseGuess.startswith("exit"))
def _printUsageAndExit(message=""):
"""
Prints the correct usage of this script and exits.
@type message: C{str}
@param message: An optional message to print before printing the usage.
"""
print("%s\nUsage : %s [numberOfDigits [maximumDigitSize (2-9)]]"
% (message, __file__))
exit(2)
def _getInput(prompt):
"""
Displays the given prompt, and waits for input from the user.
@type prompt: C{str}
@rtype: C{str}
@param prompt: The prompt to display when requesting data from the user.
@return: The input received from the user.
"""
return raw_input(prompt)
# Public Module Functions -----------------------------------------------------
def play(count=Mastermind.DefaultCount, maxDigit=Mastermind.DefaultMaxDigit):
"""
The main game function. Creates a Mastermind instance with the given
settings, and passes user input into it until the user correctly guesses
the code, then resets the game again. Continues until the user types "quit"
or "exit".
@type count: C{int}
@type maxDigit: C{int}
@param count: The number of digits to use in the game (optional).
@param maxDigit: The maximum size of the digit to use in the game
(optional).
"""
# Store how many possible combinations there are
numCombinations = pow(maxDigit, count)
# Create the main game object
game = Mastermind(count, maxDigit)
# Print some welcome text
welcomeText = ("\n --- Let's play MASTERMIND! ---\n"
"Guess the code: %d digits between 1 and %d"
% (count, maxDigit))
print(welcomeText)
# Loop until the user asks to quit
while True:
prompt = " %d > " % (game.getNumGuesses() + 1)
guess = _getInput(prompt)
if _isQuit(guess):
# Let the user quit
print ("Thanks for playing!")
break
if not len(guess.strip()):
# No input - try again
continue
numCorrect = game.checkGuess(guess)
if numCorrect < 0:
# The guess wasn't valid
print ("Please enter %d digits between 1 and %d"
% (count, maxDigit))
continue
# Print some asterisks to show how many digits they have correct
print ("%s[%s%s]" % ((" " * (len(prompt) - 1), ("*" * numCorrect),
("-" * (count - numCorrect)))))
if numCorrect == count:
# They won! Reset the game and go again.
print ("Well done! That took you %d attempts, out of %d possible "
"combinations." % (game.getNumGuesses(), numCombinations))
game.reset(count, maxDigit)
print(welcomeText)
if __name__ == "__main__":
# Get default values for the game difficulty
count = Mastermind.DefaultCount
maxDigit = Mastermind.DefaultMaxDigit
# Validate any command line arguments.
args = sys.argv[1:]
if args:
if len(args) > 2:
# Wrong number of args
_printUsageAndExit("Wrong number of arguments.")
if any((not x.isdigit() for x in args)):
# Args aren't digits
_printUsageAndExit("Arguments must be digits.")
count = int(args[0])
if len(args) > 1:
maxDigit = int(args[1])
if not 1 < maxDigit <= 9:
# Max digit arg is out of range
_printUsageAndExit("Maximum digit argument out of range: %d."
% maxDigit)
# Play the game
play(count, maxDigit)
| true |
f38cf36fddaf0bdf3fff8d928c3baf9aaee82a92 | Python | maik001/Simplify | /python/admin/modelo/Aluno.py | UTF-8 | 650 | 3.453125 | 3 | [] | no_license | '''
Created on 7 de nov de 2019
@author: jefferson Oliveira
'''
class Aluno:
__nomeAlu = ""
__matricula = ""
__curso = ""
__serie = ""
#getters
def getNomeAlu(self):
return self.__nomeAlu
def getMatricula(self):
return self.__matricula
def getCurso(self):
return self.__curso
def getSerie(self):
return self.__serie
#setters
def setCurso(self,curso):
self.__curso = curso
def setNomeAlu(self,nomeAlu):
self.__nomeAlu = nomeAlu
def setMatricula(self,matricula):
self.__matricula = matricula
def setSerie(self,serie):
self.__serie = serie
| true |
52bc44b7d522faaca6c98f6b0ed080d30317141a | Python | William-Weng/Python | /PyGame/02.Pic/Color.py | UTF-8 | 514 | 2.703125 | 3 | [] | no_license | #-*- coding: UTF-8 -*-
import pygame
pygame.init()
allColors = pygame.Surface((4096, 4096), depth=24) # 4096 * 4096 * 24bits 的白紙 ==> bland_alpha_surface = pygame.Surface((256, 256), flags=SRCALPHA, depth=32)
for r in range(256): # rgb ==> (0,0,0) ~ (255,255,255)
print(r + 1, "out of 256")
x, y = (r & 0b1111) * 256, (r >> 0b100) * 256
for g in range(256):
for b in range(256):
allColors.set_at((x + g, y + b), (r, g, b))
pygame.image.save(allColors, "allColors.bmp")
| true |
1f9a9ed73c67c92665ffd34a7915993224499415 | Python | HumanCompatibleAI/atari-irl | /atari_irl/utils.py | UTF-8 | 7,278 | 2.828125 | 3 | [] | no_license | """
This may all be thrown away soonish, but I could imagine keeping these design
patterns in some form or other.
I hope that most of our patches to the baselines + gym code can happen in this
library, and not need to move into other parts of the code.
Desiderata:
- Not introduce too many dependencies over Adam's patched baselines library
- Basically work and be easy to use
- Contain most of our other patches over other libraries
- Generate useful information about whether or not we want to keep this
incarnation of things
This is heavily based on
- https://github.com/openai/baselines/blob/master/baselines/ppo2/run_mujoco.py
- https://github.com/AdamGleave/baselines/tree/master/baselines/ppo2
"""
import tensorflow as tf
import numpy as np
from baselines import bench, logger
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common import set_global_seeds
from atari_irl import environments
from atari_irl.environments import one_hot
import gym
import csv
import matplotlib.pyplot as plt
def optional_teardown(context, teardown_on_context_exit=True):
if teardown_on_context_exit:
return context
else:
context.teardown = context.__exit__
def no_args_safe_exit(*args):
args = [None, None, None] if not args else args
context.teardown(*args)
context.__exit__ = no_args_safe_exit
return context
class TfContext:
def __init__(self, ncpu=1):
config = tf.ConfigProto(
allow_soft_placement=True,
intra_op_parallelism_threads=ncpu,
inter_op_parallelism_threads=ncpu,
device_count={'GPU': 1},
)
config.gpu_options.allow_growth=True
self.tf_session = tf.Session(config=config)
def __enter__(self):
self.tf_session.__enter__()
return self
def __exit__(self, *args):
self.tf_session.__exit__(*args)
tf.reset_default_graph()
class EnvironmentContext:
def __init__(self, *, env_name=None, make_env=None, seed, n_envs=1, env_modifiers=list(), vec_env_modifiers=list()):
self.env_name = env_name
if make_env is None:
make_env = lambda: gym.make(self.env_name)
self.make_env = make_env
self.n_envs = n_envs
self.env_modifiers = env_modifiers
self.vec_env_modifiers = vec_env_modifiers
self.seed = seed
def __enter__(self):
def make_env(i):
def _thunk():
env = self.make_env()
env.seed(i)
for fn in self.env_modifiers:
env = fn(env)
env = bench.Monitor(env, logger.get_dir(), allow_early_resets=True)
return env
return _thunk
set_global_seeds(self.seed)
self.base_vec_env = SubprocVecEnv([make_env(i + self.seed) for i in range(self.n_envs)])
self.environments = self.base_vec_env
for fn in self.vec_env_modifiers:
self.environments = fn(self.environments)
return self
def __exit__(self, *args):
self.base_vec_env.close()
def read_cols_from_dict(dirname, *cols, start=0, end=-1):
ans = dict([(c, []) for c in cols])
with open(dirname + '/progress.csv', 'r') as f:
reader = csv.DictReader(f)
for row in reader:
for c in cols:
ans[c].append(float(row[c]))
return (ans[c][start:end] for c in cols)
def plot_from_dirname(dirname):
plt.plot(*read_cols_from_dict(dirname,'total_timesteps', 'eprewmean'))
def batched_call(fn, batch_size, args, check_safety=True):
N = args[0].shape[0]
for arg in args:
assert arg.shape[0] == N
# Things get super slow if we don't do this
if N == batch_size:
return fn(*args)
arg0_batches = []
fn_results = []
start = 0
def slice_result(result, subslice):
if isinstance(result, dict):
return dict(
(key, value[subslice])
for key, value in result.items()
)
else:
return result[subslice]
def add_batch(*args_batch, subslice=None):
results_batch = fn(*args_batch)
if subslice:
results_batch = [slice_result(r, subslice) for r in results_batch]
args_batch = [slice_result(r, subslice) for r in args_batch]
fn_results.append(results_batch)
if check_safety:
arg0_batches.append(args_batch[0])
# add data for all of the batches that cleanly fit inside the batch size
for start in range(0, N - batch_size, batch_size):
end = start + batch_size
add_batch(*[arg[start:end] for arg in args])
# add data for the last batch that would run past the end of the data if it
# were full
start += batch_size
if start != N:
remainder_slice = slice(start - N, batch_size)
add_batch(
*(arg[N - batch_size:N] for arg in args),
subslice=remainder_slice
)
# integrity check
if check_safety:
final_arg0 = np.vstack(arg0_batches)
# reshape everything
final_results = []
for i, res in enumerate(fn_results[0]):
if isinstance(res, np.ndarray) or isinstance(res, list):
final_results.append(
np.vstack([results_batch[i] for results_batch in fn_results])
)
elif isinstance(res, dict):
for key, item in res.items():
assert isinstance(item, np.ndarray) or isinstance(item, list)
final_results.append(dict(
(
key,
np.vstack([
results_batch[i][key] for results_batch in fn_results
])
)
for key in res.keys()
))
else:
raise NotImplementedError
# Integrity checks in case I wrecked this
if check_safety:
assert len(final_arg0) == N
assert np.isclose(final_arg0, args[0]).all()
return final_results
class TfEnvContext:
def __init__(self, tf_cfg, env_config):
self.tf_cfg = tf_cfg
self.env_config = env_config
self.seed = env_config['seed']
env_modifiers = environments.env_mapping[env_config['env_name']]
one_hot_code = env_config.pop('one_hot_code')
if one_hot_code:
env_modifiers = environments.one_hot_wrap_modifiers(env_modifiers)
self.env_config.update(env_modifiers)
def __enter__(self):
self.env_context = EnvironmentContext(**self.env_config)
self.env_context.__enter__()
self.train_graph = tf.Graph()
self.tg_context = self.train_graph.as_default()
self.tg_context.__enter__()
self.sess = tf.Session(config=self.tf_cfg)
# from tensorflow.python import debug as tf_debug
# sess = tf_debug.LocalCLIDebugWrapperSession(sess , ui_type='readline')
self.sess_context = self.sess.as_default()
self.sess_context.__enter__()
tf.set_random_seed(self.seed)
return self
def __exit__(self, *args):
self.sess_context.__exit__(*args)
self.tg_context.__exit__(*args)
self.env_context.__exit__(*args)
| true |
25ac97f645362cba45e4ff8893599382d34d9a27 | Python | ssbagalkar/PythonDataStructuresPractice | /SlidingWindow/02_first_negative_number.py | UTF-8 | 1,584 | 3.78125 | 4 | [] | no_license | """
Video -> https://www.youtube.com/watch?v=uUXXEgK2Jh8&list=PL_z_8CaSLPWeM8BDJmIYDaoQ5zuwyxnfj&index=4&ab_channel=AdityaVermaAdityaVerma
Problem --> https://www.geeksforgeeks.org/first-negative-integer-every-window-size-k/
Complexity:
Method Time (worst) Auxiliary Space(worst) Passing tests?
first_neg_number_not_optimized O(n*k) O(k) Yes, with TLE
"""
from collections import deque
def first_negative_number_sliding(arr, k):
n = len(arr)
neg_values=[]
for ii in range(n-k+1):
jj = ii
while jj < (ii + k) :
if arr[jj] >= 0:
if jj == ii + k - 1:
neg_values.append(0)
jj+=1
else:
neg_values.append(arr[jj])
break
return neg_values
def first_negative_number_optimized(nums, n, k):
if len(nums) == 0:
return None
if len(nums) == 1:
if nums[0] < 0:
return nums[0]
else:
return None
deq = deque()
for ii in range(k):
if nums[ii] < 0:
deq.append(ii)
for ii in range(k, n):
if len(deq) > 0:
print(f"{nums[deq[0]]}", end=" ")
else:
print("0", end=" ")
# delete all indexes in the deque which are less than bounds of the current window
while deq and deq[0] <= ii - k:
deq.popleft()
if nums[ii] < 0:
deq.append(ii)
if not deq:
print(0)
else:
print(nums[deq[0]], end=" ")
print(first_neg_number_not_optimized([12, -1, -7, 8, -15, 30, 16, 28], 8, 3))
first_negative_number_optimized([12, -1, -7, 8, -15, 30, 16, 28], 8, 3) | true |
0a1fbe85881b5122e6bb123942fc63639d9b8e70 | Python | himanshukushabhauakolkar/python-lab | /armstrongno.py | UTF-8 | 174 | 2.890625 | 3 | [] | no_license | x=int(input('enter the no '))
q=x//100
r=x%100
r1=r%10
r2=r1%10
q1=r//10
z=q**3+q1**3+r2**3
if x==z:
print('armstrong no ')
else:
print('not a armstrong no')
| true |
ced938088327cf33555bfcff9884a7db63b78c4c | Python | HektorW/sublime-fold-comments | /foldcomments.py | UTF-8 | 2,950 | 2.859375 | 3 | [] | no_license | import sublime, sublime_plugin, re
class ToggleFoldCommentsCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
comments = view.find_by_selector('comment')
regions = []
prev = None
for region in comments:
# multi_line = len(view.lines(region)) > 1
# adjacent = prev and view.rowcol(prev.a)[0] == view.rowcol(region.a)[0]-1
adjacent = self.adjacentComments(prev, region)
# if not multi_line and adjacent:
if adjacent:
regions[-1] = regions[-1].cover(region)
else:
regions.append(region)
prev = region
regions = self.filter(regions)
regions = map(lambda region: self.formatRegion(region), regions)
self.toggle(regions)
def adjacentComments(self, region_a, region_b):
if not region_a or not region_b:
return False
view = self.view
text = view.substr(sublime.Region(region_a.b, region_b.a))
if re.search('\S', text) is None:
return True
return False
def filter(self, regions):
regions = filter(lambda region: self.inSelection(region), regions)
regions = filter(lambda region: len(self.view.lines(region)) > 1, regions)
return regions
def formatRegion(self, region):
view = self.view
total_text = view.substr(region)
res = re.search('\n', total_text)
first_newline = res.start() if res is not None else 0
start_a = region.a + first_newline
end_b = region.b
if total_text[-1] == '\n':
end_b -= 1
return sublime.Region(start_a, end_b)
def inSelection(self, region):
view = self.view
selection = view.sel()
if len(filter(lambda sel: not sel.empty(), selection)) == 0:
return True
for sel in selection:
if region.intersects(sel):
return True
return False
def toggle(self, regions):
view = self.view
if len(regions) > 0:
if view.fold(regions[0]):
view.fold(regions)
else:
view.unfold(regions)
# class ToggleFoldCommentsSelectionCommand(sublime_plugin.TextCommand):
# def run(self, edit):
# view = self.view
# selection = view.sel()
# selected_regions = any_f(lambda region: not region.empty(), selection)
# comments = view.find_by_selector('comment')
# regions = []
# prev = None
# for region in comments:
# multi_line = len(view.lines(region)) > 1
# adjacent = prev and view.rowcol(prev.a)[0] == view.rowcol(region.a)[0]-1
# if not multi_line and adjacent:
# regions[-1] = regions[-1].cover(region)
# else:
# regions.append(region)
# prev = region
# if selected_regions:
# regions = filter(lambda region: any_f(lambda sel: region.intersects(sel), selection), regions)
# regions = filter(lambda region: len(view.lines(region)) > 1, regions)
# regions = [sublime.Region(region.a, region.b-1) if view.substr(region)[-1] == '\n' else region for region in regions]
# if len(regions) > 0:
# if view.fold(regions[0]):
# view.fold(regions)
# else:
# view.unfold(regions)
# def any_f(f, it):
# return len(filter(f, it)) > 0
| true |
82000cf061ff0590d5c4902fe2c20ca9beb69880 | Python | Quantomatic/pyzx | /pyzx/circuit/qasmparser.py | UTF-8 | 10,553 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | # PyZX - Python library for quantum circuit rewriting
# and optimization using the ZX-calculus
# Copyright (C) 2018 - Aleks Kissinger and John van de Wetering
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from fractions import Fraction
from typing import List, Dict, Tuple, Optional
from . import Circuit
from .gates import Gate, qasm_gate_table, ZPhase, XPhase, CRZ, YPhase
class QASMParser(object):
"""Class for parsing QASM source files into circuit descriptions."""
def __init__(self) -> None:
self.gates: List[Gate] = []
self.customgates: Dict[str,Circuit] = {}
self.registers: Dict[str,Tuple[int,int]] = {}
self.qubit_count: int = 0
self.circuit: Optional[Circuit] = None
def parse(self, s: str, strict:bool=True) -> Circuit:
lines = s.splitlines()
r = []
#strip comments
for s in lines:
if s.find("//")!=-1:
t = s[0:s.find("//")].strip()
else: t = s.strip()
if t: r.append(t)
if r[0].startswith("OPENQASM"):
r.pop(0)
elif strict:
raise TypeError("File does not start with OPENQASM descriptor")
if r[0].startswith('include "qelib1.inc";'):
r.pop(0)
elif strict:
raise TypeError("File is not importing standard library")
data = "\n".join(r)
# Strip the custom command definitions from the normal commands
while True:
i = data.find("gate ")
if i == -1: break
j = data.find("}", i)
self.parse_custom_gate(data[i:j+1])
data = data[:i] + data[j+1:]
#parse the regular commands
commands = [s.strip() for s in data.split(";") if s.strip()]
gates: List[Gate] = []
for c in commands:
self.gates.extend(self.parse_command(c, self.registers))
circ = Circuit(self.qubit_count)
circ.gates = self.gates
self.circuit = circ
return self.circuit
def parse_custom_gate(self, data: str) -> None:
data = data[5:]
spec, body = data.split("{",1)
if "(" in spec:
i = spec.find("(")
j = spec.find(")")
if spec[i+1:j].strip():
raise TypeError("Arguments for custom gates are currently"
" not supported: {}".format(data))
spec = spec[:i] + spec[j+1:]
spec = spec.strip()
if " " in spec:
name, args = spec.split(" ",1)
name = name.strip()
args = args.strip()
else:
raise TypeError("Custom gate specification doesn't have any "
"arguments: {}".format(data))
registers : Dict[str,Tuple[int,int]] = {}
qubit_count = 0
for a in args.split(","):
a = a.strip()
if a in registers:
raise TypeError("Duplicate variable name: {}".format(data))
registers[a] = (qubit_count,1)
qubit_count += 1
body = body[:-1].strip()
commands = [s.strip() for s in body.split(";") if s.strip()]
circ = Circuit(qubit_count)
for c in commands:
for g in self.parse_command(c, registers):
circ.add_gate(g)
self.customgates[name] = circ
def parse_command(self, c: str, registers: Dict[str,Tuple[int,int]]) -> List[Gate]:
gates: List[Gate] = []
name, rest = c.split(" ",1)
if name in ("barrier","creg","measure", "id"): return gates
if name in ("opaque", "if"):
raise TypeError("Unsupported operation {}".format(c))
args = [s.strip() for s in rest.split(",") if s.strip()]
if name == "qreg":
regname, sizep = args[0].split("[",1)
size = int(sizep[:-1])
registers[regname] = (self.qubit_count, size)
self.qubit_count += size
return gates
qubit_values = []
is_range = False
dim = 1
for a in args:
if "[" in a:
regname, valp = a.split("[",1)
val = int(valp[:-1])
if not regname in registers: raise TypeError("Invalid register {}".format(regname))
qubit_values.append([registers[regname][0]+val])
else:
if is_range:
if registers[a][1] != dim:
raise TypeError("Error in parsing {}: Register sizes do not match".format(c))
else:
dim = registers[a][1]
is_range = True
s = registers[a][0]
qubit_values.append(list(range(s,s + dim)))
if is_range:
for i in range(len(qubit_values)):
if len(qubit_values[i]) != dim:
qubit_values[i] = [qubit_values[i][0]]*dim
for j in range(dim):
argset = [q[j] for q in qubit_values]
if name in self.customgates:
circ = self.customgates[name]
if len(argset) != circ.qubits:
raise TypeError("Argument amount does not match gate spec: {}".format(c))
for g in circ.gates:
gates.append(g.reposition(argset))
continue
if name in ("x", "z", "s", "t", "h", "sdg", "tdg"):
if name in ("sdg", "tdg"):
g = qasm_gate_table[name](argset[0],adjoint=True) # type: ignore # mypy can't handle -
else: g = qasm_gate_table[name](argset[0]) # type: ignore # - Gate subclasses with different numbers of parameters
gates.append(g)
continue
if name.startswith(("rx", "ry", "rz", "u1", "crz")):
i = name.find('(')
j = name.find(')')
if i == -1 or j == -1: raise TypeError("Invalid specification {}".format(name))
valp = name[i+1:j]
# try:
# phasep = float(valp)/math.pi
# except ValueError:
# if valp.find('pi') == -1: raise TypeError("Invalid specification {}".format(name))
# valp = valp.replace('pi', '')
# valp = valp.replace('*','')
# try: phasep = float(valp)
# except: raise TypeError("Invalid specification {}".format(name))
# phase = Fraction(phasep).limit_denominator(100000000)
phase = self.parse_phase_arg(valp)
if name.startswith('rx'): g = XPhase(argset[0],phase=phase)
elif name.startswith('crz'): g = CRZ(argset[0],argset[1],phase=phase)
elif name.startswith('rz'): g = ZPhase(argset[0],phase=phase)
elif name.startswith("ry"): g = YPhase(argset[0],phase=phase)
else: raise TypeError("Invalid specification {}".format(name))
gates.append(g)
continue
if name.startswith('u2') or name.startswith('u3'): # see https://arxiv.org/pdf/1707.03429.pdf
i = name.find('(')
j = name.find(')')
if i == -1 or j == -1: raise TypeError("Invalid specification {}".format(name))
vals = name[i+1:j].split(',')
phases = [self.parse_phase_arg(val) for val in vals]
if name.startswith('u2'):
if len(phases) != 2: raise TypeError("Invalid specification {}".format(name))
gates.append(ZPhase(argset[0],phase=(phases[1]-Fraction(1,2))%2))
gates.append(XPhase(argset[0],phase=Fraction(1,2)))
gates.append(ZPhase(argset[0],phase=(phases[0]+Fraction(1,2))%2))
continue
else:
# See equation (5) of https://arxiv.org/pdf/1707.03429.pdf
if len(phases) != 3: raise TypeError("Invalid specification {}".format(name))
gates.append(ZPhase(argset[0],phase=phases[2]))
gates.append(XPhase(argset[0],phase=Fraction(1,2)))
gates.append(ZPhase(argset[0],phase=(phases[0]+1)%2))
gates.append(XPhase(argset[0],phase=Fraction(1,2)))
gates.append(ZPhase(argset[0],phase=(phases[1]+3)%2))
continue
if name in ("cx","CX","cz","ch", "swap"):
g = qasm_gate_table[name](control=argset[0],target=argset[1]) # type: ignore
gates.append(g)
continue
if name in ("ccx", "ccz"):
g = qasm_gate_table[name](ctrl1=argset[0],ctrl2=argset[1],target=argset[2]) # type: ignore
gates.append(g)
continue
raise TypeError("Unknown gate name: {}".format(c))
return gates
def parse_phase_arg(self, val):
try:
phase = float(val)/math.pi
except ValueError:
if val.find('pi') == -1: raise TypeError("Invalid specification {}".format(name))
try:
val = val.replace('pi', '')
val = val.replace('*','')
if val.find('/') != -1:
n, d = val.split('/',1)
n = n.strip()
if not n: n = 1
elif n == '-': n = -1
else: n = int(n)
d = int(d.strip())
phase = Fraction(n,d)
else:
val = val.strip()
if not val: phase = 1
elif val == '-': phase = -1
else: phase = float(val)
except: raise TypeError("Invalid specification {}".format(val))
phase = Fraction(phase).limit_denominator(100000000)
return phase
def qasm(s: str) -> Circuit:
"""Parses a string representing a program in QASM, and outputs a `Circuit`."""
p = QASMParser()
return p.parse(s, strict=False)
| true |
e11325a8ec900f9c58c7f5d7e13a90dd5302864b | Python | ausaafnabi/Machine-Learning-Projects | /Clustering/K-means.py | UTF-8 | 3,161 | 3.03125 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import numpy as np
import random
import os
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
np.random.seed(0);
X, y = make_blobs(n_samples=5000, centers=[[4,4], [-2, -1], [2, -3], [1, 1]], cluster_std=0.9)
plt.scatter(X[:, 0], X[:, 1], marker='.')
k_means = KMeans(init = "k-means++", n_clusters = 4, n_init = 12)
k_means.fit(X)
k_means_labels = k_means.labels_
print(k_means_labels)
k_means_cluster_centers = k_means.cluster_centers_
print(k_means_cluster_centers)
# Initialize the plot with the specified dimensions.
fig = plt.figure(figsize=(6, 4))
# Colors uses a color map, which will produce an array of colors based on
# the number of labels there are. We use set(k_means_labels) to get the
# unique labels.
colors = plt.cm.Spectral(np.linspace(0, 1, len(set(k_means_labels))))
# Create a plot
ax = fig.add_subplot(1, 1, 1)
# For loop that plots the data points and centroids.
# k will range from 0-3, which will match the possible clusters that each
# data point is in.
for k, col in zip(range(len([[4,4], [-2, -1], [2, -3], [1, 1]])), colors):
# Create a list of all data points, where the data poitns that are
# in the cluster (ex. cluster 0) are labeled as true, else they are
# labeled as false.
my_members = (k_means_labels == k)
# Define the centroid, or cluster center.
cluster_center = k_means_cluster_centers[k]
# Plots the datapoints with color col.
ax.plot(X[my_members, 0], X[my_members, 1], 'w', markerfacecolor=col, marker='.')
# Plots the centroids with specified color, but with a darker outline
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=6)
# Title of the plot
ax.set_title('KMeans')
# Remove x-axis ticks
ax.set_xticks(())
# Remove y-axis ticks
ax.set_yticks(())
# Show the plot
plt.show()
DATASET_PATH = os.path.join("../","datasets")
csv_file_path = os.path.join(DATASET_PATH,"Cust_Segmentation.csv")
cust_df = pd.read_csv(csv_file_path)
print(cust_df.head())
df = cust_df.drop('Address', axis=1)
print(df.head())
from sklearn.preprocessing import StandardScaler
X = df.values[:,1:]
X = np.nan_to_num(X)
Clus_dataSet = StandardScaler().fit_transform(X)
Clus_dataSet
clusterNum = 3
k_means = KMeans(init = "k-means++", n_clusters = clusterNum, n_init = 12)
k_means.fit(X)
labels = k_means.labels_
print(labels)
df["Clus_km"] = labels
print(df.head(5))
df.groupby('Clus_km').mean()
area = np.pi * ( X[:, 1])**2
plt.scatter(X[:, 0], X[:, 3], s=area, c=labels.astype(np.float), alpha=0.5)
plt.xlabel('Age', fontsize=18)
plt.ylabel('Income', fontsize=16)
plt.show()
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(1, figsize=(8, 6))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
# plt.ylabel('Age', fontsize=18)
# plt.xlabel('Income', fontsize=16)
# plt.zlabel('Education', fontsize=16)
ax.set_xlabel('Education')
ax.set_ylabel('Age')
ax.set_zlabel('Income')
ax.scatter(X[:, 1], X[:, 0], X[:, 3], c= labels.astype(np.float))
plt.show()
| true |
d4ef7c569d617c8f72ed4d1a173c4c5ac8aef154 | Python | justynast/python-dla-kazdego | /06 - funkcje/ćwiczenie_06_02_03.py | UTF-8 | 1,743 | 4.46875 | 4 | [] | no_license | """
2. Zmodyfikuj projekt 'Jaka to liczba?' z rozdziału 3. przez użycie w nim funkcji ask_number().
3. Zmodyfikuj nową wersję gry 'Jaka to liczba?', którą utworzyłeś w ramach poprzedniego zadania, tak
aby kod programu znalazł się w funkcji o nazwie main().
"""
import random
def displayInstruction():
""" display instruction of the game """
print(
"""
Hello human!
I am thinking about a number between 1 and 100.
Try to guess the number!
"""
)
def getNumber():
""" generate random number in range 1-100 """
theNumber = random.randint(1, 100)
return(theNumber)
def askNumber(question, low, high):
""" ask about the number in range """
response = None
while response not in range(low, high):
response = int(input(question))
return response
def prompt(theNumber, guess):
""" display prompt """
if guess > theNumber:
print("Your guess is too big.")
elif guess < theNumber:
print("Your guess is too small.")
def winning(theNumber, guess):
""" determine winning condition """
if theNumber == guess:
return True
def congrat(theNumber, tries):
""" congratulate player and display the number of tries"""
print("Yes, the number I'm thiking of is", theNumber)
print("It took you", tries, "tries to guess. Congratulations!")
def main():
theNumber = getNumber()
displayInstruction()
guess = askNumber("What number am I thinking about? ", 1, 100)
tries = 1
while not winning(theNumber, guess):
prompt(theNumber, guess)
guess = askNumber("\nWhat number am I thinking about? ", 1, 100)
tries += 1
congrat(theNumber, tries)
main()
input("\n\nPress any key to exit")
| true |
07930b2e41f5669c03113ee1c743f60f831b87f0 | Python | sunilgvs/SDETTraining | /Python/Activity3.py | UTF-8 | 764 | 4.03125 | 4 | [] | no_license | user1 = input("what is Player1 Name ")
user2 = input("what is Player2 Name ")
user1_ans = input(user1 + ", do you want to choose rock, paper or scissor? " ).lower()
user2_ans = input(user2 + ", do you want to choose rock, paper or scissor? " ).lower()
if user1_ans == user2_ans:
print("its tie ")
elif user1_ans == "rock":
if user2_ans == "scissor":
print(" Rock Wins ")
else:
print(" scissor Wins ")
elif user1_ans == "paper":
if user2_ans == "scissor":
print(" Scissor Wins ")
else:
print(" Rock Wins ")
elif user1_ans == "rock":
if user2_ans == "paper":
print(" Paper Wins ")
else:
print(" Scissor Wins ")
else:
print(" Enter Values Correctly")
| true |
84078a344ca008afff0b64a150f38d13f3360f9e | Python | weiguxp/pythoncode | /ProblemSets/ParseFile.py | UTF-8 | 344 | 3.84375 | 4 | [] | no_license | def ParseF(n):
''' opens file n and returns a list of words
n = name of file
'''
fin = open(n)
myList = []
for line in fin:
word = line.strip()
myList.append(word)
return myList
def ParsePartF(n, l):
''' Opens a file and parses the first l words
n = file name
l = desired length of list'''
myList = ParseF(n)
return myList[:l] | true |
ecabd59cd8744fa3c869ec76dbb7716028709f46 | Python | mobone/screener | /playit.py | UTF-8 | 1,065 | 2.609375 | 3 | [] | no_license | import sqlite3 as lite
import sys
import pandas as pd
con = lite.connect('screens.db')
cur = con.cursor()
try:
filename = sys.argv[1]
metrics = sys.argv[2]
original_metrics = metrics
metrics = metrics.replace('+', '", "')
metrics = '"' + metrics +'"'
result = pd.read_sql('Select Date, Ticker, {0}, "Last Close" from screens where date = \'{1}\''.format(metrics, filename), con, index_col=['Ticker'])
final_prices = pd.read_sql('select `Adj Close`, Ticker from price_data where date = \'2014-10-31\' group by ticker', con, index_col=['Ticker'])
result = result.join(final_prices)
result['Sum'] = 0
metrics = original_metrics.split('+')
for i in metrics:
result['Sum'] += result[i].convert_objects(convert_numeric=True)
result['Last Close'] = result['Last Close'].convert_objects(convert_numeric=True)
result['pct change'] = (result['Adj Close']-result['Last Close'])/result['Last Close']
result.to_csv('{0}.csv'.format(original_metrics))
except Exception as e:
print e
| true |
4dd57f91155f1e5f2a96d4c1da39dd70b2c88db1 | Python | hyamynl619/DS-Unit-3-Sprint-1-Software-Engineering | /module2-oop-code-style-and-reviews/Making_a_class.py | UTF-8 | 428 | 3.953125 | 4 | [
"MIT"
] | permissive |
"""
Define a Pet class with their attributes
"""
class Pet(object):
def __init__(self, name, speaks):
self.name = name
self.speaks = speaks
def full_name(self):
return f"{self.name}"
def speaks(self):
print({self.name} + ' speaks')
if __name__ == "__main__":
pet = Pet(name="Dog", speaks="Woof Woof")
print(pet.full_name)
| true |
59d9d80eb84f3d84053bb8a9449753c47f3a0cd4 | Python | sourav-crossml/session1 | /week1-august/assigment1withclass.py | UTF-8 | 2,861 | 3.21875 | 3 | [] | no_license | import datetime
from hashlib import new
import uuid
import os,getpass
import json
import random
class UserData:
"""
this class will print user current datetime , local user and output
from the text files and then save that output to json file with userid
email and name
"""
def __init__(self,user_fldr):
"""
the init method will give path of user folder fromt he class object
"""
self.user_folder = user_fldr
def currenttime(self):
"""
this method will give current datetime
"""
now = datetime.datetime.now()
current_date_time = now.strftime("%d/%m/%Y, %H:%M:%S")
print(current_date_time)
def username(self):
"""
this method will give local user name
"""
print(getpass.getuser())
def read_file(self, file_name):
"""
this method will read text files from user folder
"""
with open(self.user_folder + file_name, 'r') as file:
names_list = file.readlines()
for name in names_list:
print(name.strip())
def appending_data_to_new_list(self, file_name):
"""
this method will genertae a new list with all names of all txt files in user folder
"""
all_names = []
with open(self.user_folder + file_name, 'r') as file:
names_list = file.readlines()
for name in names_list:
all_names.append(name.strip())
return all_names
def gen_email(self,user_name):
"""
this method will generate email
"""
email_addrs=[]
for name in user_name:
first_name,last_name=name.split()
email_addrs.append(first_name[0:3]+last_name[0:3]+"@sourav.com")
return email_addrs
def genrating_user_id(self,user_name):
"""
this method will generate uuid
"""
new_uudi=[]
for names in user_name:
b=random.randint(100000,99999999)
first_name,last_name=names.split()
new_uudi.append(str(b)+first_name[0:1]+last_name)
return new_uudi
def appending_data_to_json_file(self,user_name):
self.username=user_name
json_data=[]
for user in user_name:
json_data.append({self.genrating_user_id(user):{"Username":user,"email":self.gen_email(user)}})
print(json_data)
PWD = os.getcwd()
users_folder = PWD+'/users/'
o=UserData(users_folder)
# o.currenttime()
# o.username()
files_list = os.listdir(users_folder)
# print(files_list)
new_list = []
for file in files_list:
# o.read_file(file)
new_list.extend(o.appending_data_to_new_list(file))
# print(len(new_list))
# o.genrating_user_id(new_list)
# o.gen_email(new_list)
o.appending_data_to_json_file(new_list) | true |
97b41508dd32944a86b542490d238a94bbb09ec6 | Python | cyrh/pytorch-example | /capsulenet/model/capsnet.py | UTF-8 | 2,587 | 2.515625 | 3 | [] | no_license | #coding:utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import sys
sys.path.append("..")
import utils
import config
conf = config.DefaultConf()
class CapsNet(nn.Module):
"""
input :a group of capsule -> shape:[batch_size*1152(feature_num)*8(in_dim)]
output:a group of new capsule -> shape[batch_size*10(feature_num)*16(out_dim)]
"""
def __init__(self,in_features,out_features,in_dim,out_dim):
"""
"""
super(CapsNet,self).__init__()
#number of output features,10
self.out_features = out_features
#number of input features,1152
self.in_features = in_features
#dimension of input capsule
self.in_dim = in_dim
#dimension of output capsule
self.out_dim = out_dim
#full connect parameter W with shape [1(batch共享),1152,10,8,16]
self.W = nn.Parameter(torch.randn(1,self.in_features,self.out_features,in_dim,out_dim))
def forward(self,x):
#input x,shape=[batch_size,in_features,in_dim]
#[batch_size,1152,8]
# (batch, input_features, in_dim) -> (batch, in_features, out_features,1,in_dim)
x = torch.stack([x] * self.out_features, dim=2).unsqueeze(3)
W = torch.cat([self.W] * conf.batch_size,dim=0)
# u_hat shape->(batch_size,in_features,out_features,out_dim)=(batch,1152,10,1,16)
u_hat = torch.matmul(x,W)
#b for generate weight c,with shape->[1,1152,10,1]
b = torch.zeros([1,self.in_features,self.out_features,1]).double()
if self.cuda:
b = b.cuda()
b = Variable(b)
for i in range(3):
c = F.softmax(b,dim=2)
#c shape->[batch_size,1152,10,1,1]
c = torch.cat([c] * conf.batch_size, dim=0).unsqueeze(dim=4)
#s shape->[batch_size,1,10,1,16]
s = (u_hat * c).sum(dim=1,keepdim=True)
#output shape->[batch_size,1,10,1,16]
v = utils.squash(s,dim=-1)
v_1 = torch.cat([v] * self.in_features, dim=1)
#(batch,1152,10,1,16)matmul(batch,1152,10,16,1)->(batch,1152,10,1,1)
#squeeze
#mean->(1,1152,10,1)
#print u_hat.shape,v_1.shape
update_b = torch.matmul(u_hat,v_1.transpose(3,4)).squeeze(dim=4).mean(dim=0,keepdim=True)
b = b+update_b
return v.squeeze(1).transpose(2,3)
if __name__ == "__main__":
net = CapsNet(1152,10,8,16)
a = torch.ones([4,1152,8])
print net(a).shape
| true |
6aba8898439d95e54d60e99f968e0e4dc7060797 | Python | Divyanshu169/IT556_Worthless_without_coffee_DA-IICT | /201501004_Aashini/Assigment 3 Spacy/Spacy.py | UTF-8 | 750 | 2.65625 | 3 | [] | no_license | import spacy
from elasticsearch import Elasticsearch
from pprint import pprint
import json
import requests
#es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
nlp = spacy.load('en')
doc = nlp(u'Apple is looking at buying U.K. startup for $1 billion')
"""
for token in doc:
print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
token.shape_, token.is_alpha, token.is_stop)
for ent in doc.ents:
print(ent.text, ent.start_char, ent.end_char, ent.label_)
"""
data = json.load(open('json_book.json'))
for i in data["books"]:
print (i)
print("\n-----------------------\n")
#es.index(index='book2', doc_type='book_doc', id=i['title'], body=i)
doc=nlp(i['title'])
#print (list(doc.noun_chunks))
| true |
f8ff6558a18738d87c7fb04f0b1ec0c699488360 | Python | ShehanIshanka/t-combo | /t_combo/TextCombo.py | UTF-8 | 2,890 | 3.03125 | 3 | [
"MIT"
] | permissive | import argparse
import json
import re
import sys
def process_multiple_files(config_file):
configs = open(config_file, "r").read()
config_dict = json.loads(configs)
for config in config_dict["configs"]:
process_single_file(config["input_file"], config["output_file"])
def process_single_file(input_file, output_file):
text_combo = TextCombo(input_file)
f = open(output_file, "w")
f.write(text_combo.substitute())
f.close()
print(
"Input file \"%s\" is successfully processed and generated the output file \"%s\"" % (input_file, output_file))
class TextCombo:
def __init__(self, file_name):
self.file_name = file_name
self.value_dict = dict()
self.argument_separator = ","
self.argument_value_separator = ":"
self.combination_identifier = "({.+?})"
self.text_replacer_arg = "function"
# Read the file content
def read_file(self):
return open(self.file_name, "r").read()
# Build the dictionary for arguments and its values
def build_dict(self, combination_string):
arg_value_dict = {}
for string in combination_string[1:len(combination_string) - 1].split(self.argument_separator):
arg_value_dict[string.split(self.argument_value_separator)[0]] = \
string.split(self.argument_value_separator)[1]
return arg_value_dict
# Substitute and combine texts
def substitute(self):
file_content = self.read_file()
for combination_string in re.findall(self.combination_identifier, file_content):
if bool(re.search(self.text_replacer_arg, combination_string)):
temp_arg_value_dict = self.build_dict(combination_string)
temp_combo = TextCombo(temp_arg_value_dict[self.text_replacer_arg])
temp_combo.value_dict = temp_arg_value_dict
file_content = file_content.replace(combination_string, temp_combo.substitute())
else:
for string in combination_string[1:len(combination_string) - 1].split(self.argument_separator):
replacement = string.split(self.argument_value_separator)[1]
if replacement not in self.value_dict.keys():
continue
file_content = file_content.replace("{" + string + "}", self.value_dict[replacement])
return file_content
def main(args):
parser = argparse.ArgumentParser(description="Combine text files.")
parser.add_argument("-i", "--input_file", type=str, required=False, help='Input file containing text')
parser.add_argument("-o", "--output_file", type=str, required=False,
help='Output file to be generated combining text')
args = parser.parse_args(args)
x(args.xcenter, args.ycenter)
if __name__ == '__main__':
main(sys.argv[1:])
| true |
b080ca7293632141ce0aefc0caf8c0074db086e7 | Python | gauravbg/Drug-Abuse-Analysis-using-Twitter-Reddit-Data | /Drug-Abuse-Analytics-on-Twitter-data/MTurk/PostHit.py | UTF-8 | 3,912 | 2.640625 | 3 | [] | no_license | '''
Created on Feb 17, 2017
@author: Gaurav BG
'''
import csv
import sys
from boto.mturk.connection import MTurkConnection
from boto.mturk.question import QuestionContent,Question,QuestionForm,Overview,AnswerSpecification,SelectionAnswer,FormattedContent,FreeTextAnswer
from fileinput import filename
sandbox_host = 'mechanicalturk.sandbox.amazonaws.com'
real_host = 'mechanicalturk.amazonaws.com'
ACCESS_ID ="DUMMY_FOR_GITHUB"
SECRET_KEY = "DUMMY_FOR_GITHUB"
HOST = sandbox_host
class TweetInfo:
def __init__(self, tweetId, text, keyword, uId, scName):
self.tweetId = tweetId
self.text = text
self.keyword = keyword
self.uId = uId
self.scName = scName
def parseFile(fileName):
f = open(fileName, encoding="latin-1")
csv_f = csv.reader(f)
allTweetInfos = []
for row in csv_f:
tweet = TweetInfo(tweetId= row[1], text= row[3], keyword= row[4], uId= row[5], scName= row[6])
allTweetInfos.append(tweet)
return allTweetInfos
if __name__ == '__main__':
filePath = "C:/Users/gaura/Desktop/Course Material/Sem 2/Advanced Project (CSE-523)/Data/Marijuana_AMT_data/"
inFileName = "cannabis.csv.csv"
outFilename = "C:/Users/gaura/Desktop/Course Material/Sem 2/Advanced Project (CSE-523)/Data/HITs/hitIds.txt"
out_file = open(outFilename, 'w')
mtc = MTurkConnection(aws_access_key_id=ACCESS_ID,
aws_secret_access_key=SECRET_KEY,
host=HOST)
allTweetInfos = parseFile(filePath + inFileName)
title = "Classify these Tweets as marijuana related or not"
description = ("Read the Tweets and mark each of them as marijuana related or not. Select not sure if unsure")
keywords = "Marijuana, Classify, Drugs, Weed, Smoking, Study, Medical"
hittype= "3LA9UWLSKBAE4HD14EHD90BL61WY18"
ratings =[('Its related to Marijuana','1'),
('Not related to marijuana','-1'),
('Not Sure','0')]
totalSize = 35
batchSize = 10
batchCount = int(totalSize/batchSize) + 1
startIndex = 1
endIndex = 1 + batchSize
out_file.write(str(filePath+inFileName))
out_file.write('\n')
for batchNum in range(batchCount):
overview = Overview()
overview.append_field('Title', 'Classify these Tweets as marijuana related or not')
overview.append(FormattedContent("Select the appropriate answer from dropdown for each tweet"))
question_form = QuestionForm()
question_form.append(overview)
qNumber = 1
for index in range(startIndex, endIndex):
tweet = allTweetInfos[index]
qc1 = QuestionContent()
tweetText = str(qNumber) + ") " + tweet.text
qc1.append_field('Title',tweetText)
fta1 = SelectionAnswer(min=1, max=1,style='radiobutton',
selections=ratings,
type='text',
other=False)
q1 = Question(identifier=tweet.tweetId,
content=qc1,
answer_spec=AnswerSpecification(fta1),
is_required=True)
question_form.append(q1)
qNumber = qNumber + 1
response = mtc.create_hit(hit_type=hittype,
questions=question_form,
max_assignments=3,
title=title,
description=description,
keywords=keywords)
print("Batch: " + str(batchNum) + "-- " + str(response[0].HITId))
out_file.write("Hit ID: " +str(response[0].HITId) + "\n")
startIndex = endIndex
increment = min(batchSize, totalSize - endIndex)
endIndex = endIndex + increment
out_file.write('--------------------------------------------------- \n')
| true |