blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4483b5d884084baa1adcb7081aef1b6e21cb9a61 | Python | abhidurg/CSE_480_Database_Systems | /proj08/wait_die_shedule.py | UTF-8 | 7,563 | 2.90625 | 3 | [] | no_license | class Action:
"""
This is the Action class.
"""
def __init__(self, object_, transaction, type_):
self.object_ = object_
self.transaction = transaction
assert type_ in ("WRITE", "COMMIT", "ROLLBACK", "LOCK", "UNLOCK", "WAIT")
self.type_ = type_
def __str__(self):
return f"Action('{self.object_}', '{self.transaction}', '{self.type_}')"
__repr__ = __str__
def __eq__(self, other):
return ((self.object_ == other.object_) and
(self.transaction == other.transaction) and
(self.type_ == other.type_))
# Do not modify any code above this line
#used stackoverflow to get nice one line code to check if dictionary has any non empty lists:
#https://stackoverflow.com/questions/5889611/one-liner-to-determine-if-dictionary-values-are-all-empty-lists-or-not
import copy
def wait_die_scheduler(actions):
transaction_dict = {}
timestamp_dict = {}
object_dict = {}
return_actions = []
Ts = 0
for action in actions:
if action.object_ not in object_dict:
object_dict[action.object_] = [None, None] #lock status, and who locked it
if action.transaction not in transaction_dict:
transaction_dict[action.transaction] = []
timestamp_dict[action.transaction] = Ts
Ts += 1
sorted_transaction_dict = dict(sorted(transaction_dict.items()))
completed_actions_dict = copy.deepcopy(sorted_transaction_dict)
while True:
if any(a != [] for a in dict(sorted_transaction_dict).values()): #as long as there are actions left to do for each trans
for key,value in dict(sorted_transaction_dict).items():
#print("current action dict: ", sorted_transaction_dict)
#print("curret object dict: ", object_dict)
print("Trying to do", key)
if value: #list of actions for each transaction
for act in value: #each action is list of actions
if act.type_ == "COMMIT":
print(" Doing COMMIT", act)
return_actions.append(act)
completed_actions_dict[act.transaction].append(act)
value.remove(act)
for obj,status in object_dict.items():
if status[0] == "LOCKED" and status[1] == act.transaction:
print(" Doing UNLOCK ", Action(object_=obj,transaction=act.transaction,type_="UNLOCK"))
return_actions.append(Action(object_=obj,transaction=act.transaction,type_="UNLOCK"))
object_dict[obj][0] = None
object_dict[obj][1] = None
break
else:
if act.type_ == "WRITE":
if object_dict[act.object_][0] == None:
object_dict[act.object_][0] = "LOCKED"
object_dict[act.object_][1] = act.transaction
print(" Doing LOCK", Action(object_=act.object_, transaction=act.transaction, type_="LOCK"))
#completed_actions_dict[act.transaction].append(Action(object_=act.object_, transaction=act.transaction, type_="LOCK"))
return_actions.append(Action(object_=act.object_, transaction=act.transaction, type_="LOCK"))
return_actions.append(act)
print(" Doing WRITE", act)
completed_actions_dict[act.transaction].append(act)
value.remove(act)
break
else: #object already locked!
if object_dict[act.object_][1] == act.transaction: #but saved, same trans!
print(" Doing WRITE", act)
return_actions.append(act)
completed_actions_dict[act.transaction].append(act)
value.remove(act)
break
else: #crap locks dont agree
if timestamp_dict[act.transaction] < timestamp_dict[object_dict[act.object_][1]]: #older, make wait
print(" Doing WAIT", Action(object_="NA", transaction=act.transaction, type_="WAIT"), "beacuse ", act.transaction, " is older")
return_actions.append(Action(object_="NA", transaction=act.transaction, type_="WAIT"))
break
else: #its younger, kill!
return_actions.append(Action(object_="NA", transaction=act.transaction, type_="ROLLBACK"))
#need to unlock
value = completed_actions_dict[act.transaction] + value
sorted_transaction_dict[act.transaction] = value
print(" Doing ROLLBACK", value)
for obj,status in object_dict.items():
if status[0] == "LOCKED" and status[1] == act.transaction:
print(" Doing UNLOCK ", Action(object_=obj,transaction=act.transaction,type_="UNLOCK"))
return_actions.append(Action(object_=obj,transaction=act.transaction,type_="UNLOCK"))
object_dict[obj][0] = None
object_dict[obj][1] = None
completed_actions_dict[act.transaction] = []
#waiting = True
break
else:
return_actions.append(act)
value.remove(act)
if actions:
action_to_do = actions.pop(0)
print("Adding", action_to_do, "to transaction list")
sorted_transaction_dict[action_to_do.transaction].append(action_to_do)
else:
if any(a != [] for a in sorted_transaction_dict.values()):
continue
break
# if any(a != [] for a in sorted_transaction_dict.values()):
# continue
# else:
# action_to_do = actions.pop(0)
# print("Adding", action_to_do, "to transaction list")
# sorted_transaction_dict[action_to_do.transaction].append(action_to_do)
return return_actions | true |
81dbd093cab21233ebebfa94e49f02f141dea0dd | Python | truboprovod4uk/My-Project | /sinX.py | UTF-8 | 2,659 | 3.4375 | 3 | [] | no_license | # Це не мій проект, програма написана по прикладу з ютуба
# Ця програма генерує координатну сітку та синусоїду, приймаючи дані від користувача
from tkinter import *
import math
root = Tk()
root.title("Графік фунції")
root.geometry('1240x640')
canvas = Canvas(root, width=900, height=640, bg='#002')
canvas.pack(side='right')
# лінії сітки по вертикалі
for y in range(21):
k = 50 * y
canvas.create_line(10 + k, 610, 10 + k, 10, width=1, fill='#191938')
# лінії сітки по горизонталі
for x in range(13):
k = 50 * x
canvas.create_line(10, 10 + k, 1010, 10 + k, width=1, fill='#191938')
# лінії координат x та y
canvas.create_line(10,10,10,610, width=1, arrow=FIRST, fill='white')# вісь Y
canvas.create_line(0,310,1010,310,width=1, arrow=LAST, fill='white')# вісь X
canvas.create_text(10,10,text='300', fill='white')
canvas.create_text(10,610,text='-300', fill='white')
canvas.create_text(10,310,text='0', fill='white')
canvas.create_text(1020,310,text='1000', fill='white')
'''w = 0.0209 #циклічна частота
phi = 10 #зміщення графіка по Х
A = 200 #амплітуда
dy = 310 #зміщення графіка по У'''
label_w = Label(root,text='Циклічна частота:')
label_w.place(x=0,y=10)
label_phi = Label(root,text='Зміщення графіка по Х:')
label_phi.place(x=0,y=30)
label_A = Label(root,text='Aмплітуда:')
label_A.place(x=0,y=50)
label_dy = Label(root,text='Зміщення графіка по У:')
label_dy.place(x=0,y=70)
entry_w = Entry(root)
entry_w.place(x=150,y=10)
entry_phi = Entry(root)
entry_phi.place(x=150,y=30)
entry_A = Entry(root)
entry_A.place(x=150,y=50)
entry_dy = Entry(root)
entry_dy.place(x=150,y=70)
def sinus(w, phi, A, dy):
global sin
sin=0
xy = []
for x in range(1000):
y = math.sin(x*w)
xy.append(x+phi)
xy.append(y*A+dy)
sin=canvas.create_line(xy, fill='blue')
def clean():
canvas.delete(sin)
btn_calc = Button(root, text='calc')
btn_calc.bind('<Button-1>',lambda event:sinus(float(entry_w.get()),
float(entry_phi.get()),
float(entry_A.get()),
float(entry_dy.get())))
btn_calc.place(x=10,y=100)
btn_clean = Button(root, text='clean')
btn_clean.bind('<Button-1>', lambda event: clean())
btn_clean.place(x=100,y=100 )
root.mainloop()
| true |
526ac0ca6af79b634f4ee51403f84c8d7a121745 | Python | Shurui-Zhang/Biometrics | /biometrics.py | UTF-8 | 7,487 | 2.546875 | 3 | [] | no_license | #!pip install removebg
import cv2 as cv
import torchvision.models.segmentation as segmentation
import numpy
import torch
from torchvision import transforms
from PIL import Image
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
from sklearn.neighbors import KNeighborsClassifier
from removebg import RemoveBg
from matplotlib import pyplot
from google.colab import drive
drive.mount('/content/drive')
def removeBackground():
"""
This method is called only when the dataset is reconstructed
"""
rmbg = RemoveBg("R2MCHXvXFFX6QE86aYuKN2Ef", "error.log")
path = "/content/drive/MyDrive/Colab Notebooks/BIOM/biometrics/training"
for pic in os.listdir(path):
rmbg.remove_background_from_img_file("%s/%s" % (path, pic))
def getLoaderDataset(path_train, path_test):
"""
Converts dataset to torchvision.datasets.ImageFolder and torch.utils.data.DataLoader object
"""
transform = transforms.Compose([
transforms.ToTensor() # convert to tensor
])
train_dataset = ImageFolder(path_train, transform)
train_loader = DataLoader(train_dataset, batch_size=1)
test_dataset = ImageFolder(path_test, transform)
test_loader = DataLoader(test_dataset, batch_size=1)
return train_loader, test_loader, train_dataset, test_dataset
def process_data(train_loader, test_loader):
"""
Convert the data to ndarray
"""
matrix_train_image = []
matrix_train_label = []
matrix_test_image = []
matrix_test_label = []
for train_image, train_label in train_loader:
numpy_train_image = train_image.numpy()[0].transpose(1, 2, 0)
numpy_train_label = train_label.numpy()
matrix_train_image.append(numpy_train_image)
matrix_train_label.append(numpy_train_label)
for test_image, test_label in test_loader:
numpy_test_image = test_image.numpy()[0].transpose(1, 2, 0)
numpy_test_label = test_label.numpy()
matrix_test_image.append(numpy_test_image)
matrix_test_label.append(numpy_test_label)
matrix_train_image = numpy.asarray(matrix_train_image)
matrix_train_label = numpy.asarray(matrix_train_label)
matrix_test_image = numpy.asarray(matrix_test_image)
matrix_test_label = numpy.asarray(matrix_test_label)
return matrix_train_image, matrix_train_label, matrix_test_image, matrix_test_label
def segment(image):
"""
Semantic Segmentation
"""
preprocess_input = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
model = segmentation.deeplabv3_resnet101(pretrained=True)
model.eval()
predictions = model(preprocess_input(image).unsqueeze(0))['out'][0]
preds = predictions.argmax(0).byte().cpu().numpy()
return preds # numpy.ndarray
def processImage(predictions):
"""
Convert the image to a binary image
"""
for i in range(predictions.shape[0]):
for j in range(predictions.shape[1]):
if predictions[i][j] != 0:
predictions[i][j] = 255
return predictions
def cut(image):
"""
Crop the image
"""
height_min = (image.sum(axis=1) != 0).argmax()
height_max = ((image.sum(axis=1) != 0).cumsum()).argmax()
width_min = (image.sum(axis=0) != 0).argmax()
width_max = ((image.sum(axis=0) != 0).cumsum()).argmax()
head_top = image[height_min, :].argmax()
size = height_max - height_min
temp = numpy.zeros((size, size))
temp = numpy.zeros((size, size))
l1 = head_top - width_min
r1 = width_max - head_top
temp[:, (size // 2 - l1):(size // 2 + r1)] = image[height_min:height_max, width_min:width_max]
temp = torch.from_numpy(temp)
transform = transforms.Compose([
transforms.CenterCrop((340, 160))
])
temp = transform(temp)
temp = temp.numpy()
return temp
def calculateHuMoments(images):
"""
Calculate the Hu Moments of all images in the traing set or test set
"""
hu_moments_list = []
for image in images:
moments = cv.moments(image)
hu_moments = cv.HuMoments(moments) # numpy.ndarray (7, 1)
hu_moments = hu_moments.flatten()
hu_moments_list.append(hu_moments)
return hu_moments_list # list
def getFeatures(hu_moments_list):
"""
The Hu Moments of the side-on view and the front-on view of an object are pieced together
as the feature vector of this object.
"""
features = []
hu_moments = None
for i in range(len(hu_moments_list)):
if i % 2 == 0:
hu_moments = hu_moments_list[i]
else:
feature = numpy.hstack((hu_moments, hu_moments_list[i]))
features.append(feature)
hu_moments = None
features = numpy.asarray(features)
return features
def modifyLabels(labels):
"""
Merge the labels of side-on view and front-on view of each group in the training set into one.
"""
result = []
for i in range(len(labels)):
if i % 2 == 0:
result.append(labels[i])
result = numpy.asarray(result)
return result
def knn(k, matrix_train_features, matrix_train_label, matrix_test_features, classes):
"""
using k-nearest-neighbour classifier to classify the images in test set
"""
neigh = KNeighborsClassifier(k, weights='distance')
neigh.fit(matrix_train_features, matrix_train_label)
predictions = neigh.predict(matrix_test_features)
results = []
for i in range(len(predictions)):
class_name = classes[predictions[i]]
results.append(class_name)
return results
#testing code
path_train = "/content/drive/MyDrive/Colab Notebooks/BIOM/biom_set_test/training"
path_test = "/content/drive/MyDrive/Colab Notebooks/BIOM/biom_set_test/test"
train_loader, test_loader, train_dataset, test_dataset = getLoaderDataset(path_train, path_test)
matrix_train_image, matrix_train_label, matrix_test_image, matrix_test_label = process_data(train_loader, test_loader)
train_images_o = []
train_images = []
for train_image in matrix_train_image:
train_images_o.append(train_image)
predictions = segment(train_image)
image = processImage(predictions)
train_images.append(image)
test_images_o = []
test_images = []
for test_image in matrix_test_image:
test_images_o.append(test_image)
predictions = segment(test_image)
image = processImage(predictions)
test_images.append(image)
processed_train_images = []
processed_test_images = []
for image in train_images:
processed_image = cut(image)
processed_train_images.append(processed_image)
for image in test_images:
processed_image = cut(image)
processed_test_images.append(processed_image)
train_hu_moments_list = calculateHuMoments(processed_train_images)
matrix_train_features = getFeatures(train_hu_moments_list)
test_hu_moments_list = calculateHuMoments(processed_test_images)
matrix_test_features = getFeatures(test_hu_moments_list)
classes = train_dataset.classes
matrix_train_label = modifyLabels(matrix_train_label)
results = knn(1, matrix_train_features, matrix_train_label, matrix_test_features, classes)
print(results)
| true |
560da9b167d3608df28bb4fcff90bfe8a3a44334 | Python | kwendim/sentence_extractor | /new_textifier.py | UTF-8 | 4,026 | 3.046875 | 3 | [] | no_license | """Extracts the title, content and date from the blogs and saves it in a file that is organized into sentences. The name of the file will be the date followed by the article title.
Argument passed should be the absolute path to the folder containing the website data. When giving path, start from the "/" folder"""
__author__ = 'kidus'
import os
import glob
import sys
import html2text
from datetime import datetime
import selenium
from selenium.webdriver.firefox import webdriver
import re,nltk
import unicodedata
import codecs
def address_split(location):
return location[len_path+1:]
def clear_date(the_date):
"""returns properly formatted date"""
for element in days:
if the_date.startswith(element):
day=element
break
the_date=the_date[len(day)+2:]
for value in months.keys():
if the_date.startswith(value):
val_month= months[value]
the_date= the_date[len(value)+1:]
break
buff= ''
for element in the_date:
if element != ',':
buff += element
else:
the_date= the_date[len(buff)+2:]
val_day= buff
break
if len(buff)!=2:
val_day = "0"+val_day
return_date= the_date[0:4]+"-"+val_month+"-"+val_day
return return_date
def get_text(link):
"""uses html2Text library to tidy up and return content of an html code string."""
h= html2text.HTML2Text()
h.ignore_links = True
h.ignore_images= True
h.ignore_emphasis = True
return h.handle(link)
def htmltotext(the_file):
"""Opens a webpage using selenium, extracts the content and writes it onto a file organized into sentences."""
link= "file://" + the_file
wd.get(link)
try:
element = wd.find_element_by_class_name("articletitle")
except selenium.common.exceptions.NoSuchElementException:
print "didn't load " + the_file
return
header_name= get_text(element.get_attribute('innerHTML'))
header_name= header_name.rstrip()
try:
element = wd.find_element_by_class_name('postmetadata-date')
date_of_post= element.get_attribute('innerHTML')
date_of_post= clear_date(date_of_post)
except selenium.common.exceptions.NoSuchElementException:
print "couldn't load date for article " + the_file
date_of_post = "unknown"
try:
element = wd.find_element_by_class_name('post-content')
data= element.get_attribute('innerHTML')
data= get_text(data)
#print data
except selenium.common.exceptions.NoSuchElementException:
print "article loading failed" + the_file
data= data.replace('\n',' ')
date= data.replace('#','')
data=data.replace("*",'')
if data.find("### Related Posts") != -1:
data= data[0:data.find("### Related Posts")]
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
data = '\n-----\n'.join(tokenizer.tokenize(data))
file = codecs.open(date_of_post + '_' + header_name + '.txt', "w", "utf-8")
file.write(header_name + '\n\n')
file.write(data)
file.close()
def main(current_folder):
"""recursively goes into the folders passed as an argument and sends html files to be read."""
current_folder = glob.glob(os.path.join(current_folder ,'*'))
for element in current_folder:
if element.endswith('.html'):
#print element
htmltotext(element)
elif os.path.isdir(element):
#print element
main(element)
if __name__=='__main__':
if len(sys.argv)==2:
global cwd , the_dir, len_path,wd,days,months
days=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
months={'January':'01', 'February':'02','March':'03' ,'April':'04','May':'05','June':'06','July':'07','August':'08','September':'09','October':'10','November':'11','December':'12'}
wd= webdriver.WebDriver()
len_path= len(sys.argv[1])
(lash,the_dir)= os.path.split(sys.argv[1])
cwd = os.getcwd()
new_folder= os.path.join(cwd ,the_dir)
count = 1
while os.path.isdir(new_folder):
new_folder= os.path.join(cwd,the_dir + '_' + str(count))
count +=1
#print new_folder
os.makedirs(new_folder)
os.chdir(new_folder)
main(sys.argv[1])
else:
print "Enter absolute path to folder with webfiles in it as an argument."
| true |
c3fe8b65722d5325f3abf1d2369553632e933c52 | Python | ikikohei/atcoder | /EDPC/D_knapsack.py | UTF-8 | 584 | 3.015625 | 3 | [] | no_license | def knapsack(N,W,items):
dp = [[0 for i in range(W+1)] for j in range(N+1)]
for n in range(N):
for w in range(W):
if items[n][0] <= w+1:
dp[n+1][w+1] = max(dp[n][w+1],dp[n][w+1-items[n][0]]+items[n][1])
else:
dp[n+1][w+1] = dp[n][w+1]
return dp
def main():
N, W = map(int, input().split())
items = []
for i in range(N):
items.append(list(map(int, input().split())))
print(items)
dp = knapsack(N,W,items)
# print(dp)
print(dp[N][W])
if __name__=='__main__':
main()
| true |
9a23231487f9c4b3c7ad52de71dda7005520116a | Python | MKDevil/Python | /学习/0标准库/lib_string.py | UTF-8 | 2,110 | 3.90625 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import string
################################### 字符串常量 ###################################
# ascii_lowercase 生成小写字母 a~z
print('小写字母:', string.ascii_lowercase)
# ascii_uppercase 生成大写字母 A~Z
print('大写字母:', string.ascii_uppercase)
# ascii_letters 生成所有的字母,a~z + A~Z
print('所有字母:', string.ascii_letters)
# digits 数字0~9
print('所有数字:', string.digits)
# hexdigits 十六进制的基础字符 0~9 + A~F
print('十六进制元素:', string.hexdigits)
# octdigits 八进制的基础字符 0~7
print('八进制元素:', string.octdigits)
# punctuation ASCII字符的字符串
print('打印ASCII字符:', string.punctuation)
# whitespace 所有被视为空格的ASCII字符,包括字符空间,制表符,换行符,返回,换行,垂直制表符
print('ASCII中的空格字符:', string.whitespace)
# printable 所有可以被打印的ASCII字符,digits + letters + punctuation + whitespace
print('所有可以被打印的ASCII字符:', string.printable)
################################### 自定义字符串格式 ###################################
# Formatter() string中的一个类,有下列公共方法
# format(format_string, *args, **kwargs)
# vformat(format_string, args, kwargs)
# parse(format_string)
# get_field(field_name, args, kwargs)
# get_value(key, args, kwargs)
# check_unused_args(used_args, args, kwargs)
# format_field(value, format_spec)
# convert_field(value, conversion)
################################### 辅助函数 ###################################
# capwords(str, sep = '') 将每个单词的首字母大写;将每个单词除首字母外小写;将单词之间的多个空格用一个空格代替
# sep的值为你想要的分隔符,默认为空格
strCap = ' ni haoaaaa sdsaaasdsaaa'
print('测试字符串为:', strCap)
print('string.capwords(strCap) = ', string.capwords(strCap))
print('string.capwords(strCap, sep = “a”) = ',
string.capwords(strCap, sep='a'))
| true |
1ea5227e42395c4db8154e6cf2b33a243d58f7f6 | Python | gumuxiansheng/BFSUCitation | /export.py | UTF-8 | 2,230 | 2.609375 | 3 | [] | no_license | import xlwt
wb = xlwt.Workbook(encoding='utf-8', style_compression=0)
ws = wb.add_sheet('export', cell_overwrite_ok=True)
ws2 = wb.add_sheet('citation', cell_overwrite_ok=True)
colNames = ['AU', 'TI', 'SO', 'VL', 'IS', 'BP', 'EP', 'DI', 'PD', 'PY', 'AB', 'ZR', 'TC', 'ZB', 'Z8', 'ZS',
'Z9', 'SN', 'EI', 'UT', 'CR']
for i in range(0, colNames.__len__()):
ws.write(0, i, colNames[i])
with open('./data/savedrecsRobotics2018.txt', 'r') as f:
row_i = 0
col_j = 0
row_x = 0
item = ''
itemx = ''
shouldAddRow = False
inCR = False
for row in f:
if row.startswith('PT'):
inCR = False
shouldAddRow = True
continue
if row.startswith('AU') or row.startswith('TI')\
or row.startswith('SO') or row.startswith('VL') or row.startswith('IS') or row.startswith('BP')\
or row.startswith('EP') or row.startswith('DI') or row.startswith('PD') or row.startswith('PY')\
or row.startswith('AB') or row.startswith('ZR') or row.startswith('TC') or row.startswith('ZB')\
or row.startswith('Z8') or row.startswith('ZS') or row.startswith('Z9') or row.startswith('SN')\
or row.startswith('EI') or row.startswith('UT'):
inCR = False
if item != '':
ws.write(row_i, col_j, item)
if shouldAddRow:
row_i += 1
shouldAddRow = False
col_j = colNames.index(row[0:2])
item = row[3:]
elif row.startswith(' '):
if inCR:
itemx = row[2:]
itemxs = itemx.split(',')
ws2.write(row_x, 1, itemxs[0])
ws2.write(row_x, 2, itemxs[1])
row_x += 1
else:
item += row[2:]
elif row.startswith('EF') and item != '':
inCR = False
ws.write(row_i, col_j, item)
elif row.startswith('CR'):
inCR = True
itemx = row[3:]
itemxs = itemx.split(',')
ws2.write(row_x, 1, itemxs[0])
ws2.write(row_x, 2, itemxs[1])
row_x += 1
wb.save('./data/savedrecsRobotics2018.xls')
| true |
3a5e978e04a542f4a3051b2c30545b960fddd00f | Python | suseongKim87/python-study | /응용1/mod2.py | UTF-8 | 264 | 3.5625 | 4 | [] | no_license | PI = 3.141592
class Math:
def solv(self, r):
return PI*(r**2) #반지름을 계산하는 클래스, r**2는 r의 제곱.
def sum(a,b):
return a+b
if __name__=="__main__":
print(PI)
a = Math()
print(a.solv(2))
print(sum(PI,4.4)) | true |
75ef2bcedebb7524b11ceaecfbcaeb10bd81cffa | Python | AustinAkerley/crypto | /crypto/cryptanalysis/pollard.py | UTF-8 | 625 | 3.328125 | 3 | [] | no_license | # Title: Pollard's p-1 Factorization Algorithm
# Creator: Austin Akerley
# Date Created: 05/17/2020
# Last Editor: Austin Akerley
# Date Last Edited: 05/17/2020
# Associated Book Page Nuber: 139
# INPUT(s) -
# N - type: int, desc: integer to be factored into p and q, note: p*q = N
from crypto.src.fast_power import fast_power
from crypto.src.eea import eea
def pollard(N):
a = 2
p = None
for j in range(2, 1000):
a = fast_power(a, j, N)
d = eea(a-1, N).get("gcd")
if d > 1 and d < N:
p = d
break
return p
# OUTPUT - type int
# p - type: int, desc: One of the factors of N
| true |
1f18c885822ba15d6542e386f42f1e1a7627905f | Python | cchan19/region_classification | /work/feature_extracting/Basic_feature/Code_Basic_feature_1/feature.py | UTF-8 | 68,772 | 2.75 | 3 | [] | no_license | import time
import numpy as np
import sys
import datetime
import pandas as pd
import os
from Config import config
# 用字典查询代替类型转换,可以减少一部分计算时间
date2position = {}
datestr2dateint = {}
str2int = {}
date2int = {}
for i in range(24):
str2int[str(i).zfill(2)] = i
# 访问记录内的时间从2018年10月1日起,共182天
# 将日期按日历排列
for i in range(182):
date = datetime.date(day=1, month=10, year=2018) + datetime.timedelta(days=i)
date_int = int(date.__str__().replace("-", ""))
date2position[date_int] = [i % 7, i // 7]
date2int[str(date_int)] = i
datestr2dateint[str(date_int)] = date_int
def get_statistic_variable(tmp):
tmp = np.array(tmp).flatten()
if len(tmp) > 0:
return [np.sum(tmp), tmp.mean(), tmp.std(), tmp.max(), tmp.min()] + list(
np.percentile(tmp, [25, 50, 75])) # shape = (8, )
else:
return list(np.zeros((8,)) - 0)
def relative_ratio(A, B, ep=1):
return list((np.array(A) + ep) / (np.array(B) + ep))
def get_fine_feature_cishu(data):
# 待加的特征: 1) 平常以及节日每天人数最多的是几点钟 2) 8点到17点, 17点24点, 0点到8点
# assert data.shape == (7, 26, 24)
# data shape: (7, 26, 24), (7, 26, 8)
fr24_to_num = data.shape[-1]
feature = []
feature += [data.mean(), data.std()]
# 国庆节 1. 每天的平均人数 2. 均值 3. 方差
guoqing_day = np.mean(data[:, 0, :], axis=-1)
assert len(guoqing_day) == 7
feature += list(guoqing_day)
feature += [guoqing_day.mean(), guoqing_day.std()]
feature += [(guoqing_day.mean() + 1) / (data.mean()+ 1), (guoqing_day.std() + 1) / (data.std() + 1)]
feature += list(np.argmax(data[:, 0, :], axis=-1)) # 每天中人数最多的时间
feature += list(np.argmin(data[:, 0, :], axis=-1)) # 每天中人数最多的时间
assert len(guoqing_day) == 7
# 工作日 和 休息日
hour_24_work = np.array([np.sum(data[:5, :, i]) / 26 / 5 for i in range(fr24_to_num)])
hour_24_rest = np.array([np.sum(data[5:, :, i]) / 26 / 2 for i in range(fr24_to_num)])
assert len(hour_24_work) == fr24_to_num
assert len(hour_24_rest) == fr24_to_num
feature += list((hour_24_work + 1) / (data.mean() + 1))
feature += list((hour_24_rest + 1) / (data.mean() + 1))
feature += [(hour_24_work.mean() + 1) / (data.mean() + 1), (hour_24_work.std() + 1) / (data.std() + 1),
(hour_24_rest.mean() + 1) / (data.mean() + 1), (hour_24_rest.std() + 1) / (data.std() + 1)]
# 工作日 和 休息日 后一小时和前一小时的比值
if fr24_to_num==24:
hour_24_work_ratio = np.array([(hour_24_work[(i + 1) % 24] + 1) / (hour_24_work[i] + 1) for i in range(24)])
hour_24_rest_ratio = np.array([(hour_24_rest[(i + 1) % 24] + 1) / (hour_24_rest[i] + 1) for i in range(24)])
feature += list(hour_24_work_ratio)
feature += list(hour_24_rest_ratio)
# 工作日 和 休息日 的比值
feature += list(hour_24_rest_ratio / (hour_24_work_ratio + 0.1))
# 工作日 和 休息日
day_work = np.array([np.sum(data[i, :, :]) / 26 / fr24_to_num for i in range(5)])
day_rest = np.array([np.sum(data[i, :, :]) / 26 / fr24_to_num for i in range(5, 7)])
assert len(day_work) == 5
assert len(day_rest) == 2
feature += list(day_work)
feature += list(day_rest)
feature += [(day_work.mean() + 1) / (data.mean() + 1), (day_work.std() + 1) / (data.std() + 1),
(day_rest.mean() + 1) / (data.mean() + 1), (day_rest.std() + 1) / (data.std() + 1)]
# 过年
guonian_day = list(np.array([np.sum(data[i, 18, :]) / fr24_to_num for i in range(7)])) + list(
np.array([np.sum(data[i, 19, :]) / fr24_to_num for i in range(7)])) + list(
np.array([np.sum(data[i, 20, :]) / fr24_to_num for i in range(3)]))
guonian_day = np.array(guonian_day)
guonian_hour_24_chu_1_7 = np.array([np.sum(data[:, 18, i]) / 14 for i in range(fr24_to_num)])
guonian_hour_24_chu_8_15 = np.array([np.sum(data[:, 19, i]) / 14 for i in range(fr24_to_num)])
assert len(guonian_hour_24_chu_1_7) == fr24_to_num
feature += list((guonian_day + 1) / (data.mean() + 1))
feature += [(guonian_day.mean() + 1) / (data.mean() + 1), (guonian_day.std() + 1) / (data.std() + 1)]
feature += list((guonian_hour_24_chu_1_7 + 1) / (data.mean() + 1))
feature += list((guonian_hour_24_chu_8_15 + 1) / (data.mean() + 1))
feature += [(guonian_hour_24_chu_1_7.mean() + 1) / (data.mean() + 1), (guonian_hour_24_chu_1_7.std() + 1) / (data.std() + 1)]
feature += [(guonian_hour_24_chu_8_15.mean() + 1) / (data.mean() + 1), (guonian_hour_24_chu_8_15.std() + 1) / (data.std() + 1)]
if fr24_to_num==24:
guonian_hour_24_chu_1_7_ratio = np.array(
[(guonian_hour_24_chu_1_7[(i + 1) % 24] + 1) / (guonian_hour_24_chu_1_7[i] + 1) for i in range(24)])
guonian_hour_24_chu_8_15_ratio = np.array(
[(guonian_hour_24_chu_8_15[(i + 1) % 24] + 1) / (guonian_hour_24_chu_8_15[i] + 1) for i in range(24)])
guonian_hour_24_ratio_chu_1_7_relative_work = (guonian_hour_24_chu_1_7 + 1) / (hour_24_work + 1)
guonian_hour_24_ratio_chu_8_15_relative_work = (guonian_hour_24_chu_8_15 + 1) / (hour_24_work + 1)
feature += list(guonian_hour_24_chu_1_7_ratio)
feature += list(guonian_hour_24_chu_8_15_ratio)
feature += list(guonian_hour_24_ratio_chu_1_7_relative_work)
feature += list(guonian_hour_24_ratio_chu_8_15_relative_work)
# 春运
chunyun_day = list(np.array([np.sum(data[i, 16, :]) / fr24_to_num for i in range(7)])) + list(
np.array([np.sum(data[i, 17, :]) / fr24_to_num for i in range(7)]))
chunyun_day = np.array(chunyun_day)
if fr24_to_num ==24:
chunyun_hour_24 = np.array([np.sum(data[:, 16:18, i]) / 14 for i in range(fr24_to_num)])
chunyun_hour_24_ratio = np.array(
[(chunyun_hour_24[(i + 1) % 24] + 1) / (chunyun_hour_24[i] + 1) for i in range(24)])
chunyun_hour_24_relative_work = (chunyun_hour_24 + 1) / (hour_24_work + 1)
feature += list((chunyun_hour_24 + 1)/ (data.mean() + 1))
feature += list(chunyun_hour_24_ratio)
feature += list(chunyun_hour_24_relative_work)
feature += list((chunyun_day + 1) / (data.mean() + 1))
feature += [(chunyun_day.mean() + 1)/ (data.mean() + 1), (chunyun_day.std() + 1) / (data.std() + 1)]
# guonian_day_danian30_chu6 = np.array([np.sum(data[i, 18, :])/24 for i in range(7)])
# guonian_day_chu6_zhengyue15 = np.array([np.sum(data[i, 19, :])/24 for i in range(7)])
if fr24_to_num == 24:
guonian_hour_24_danian30_chu6 = np.array([np.sum(data[:, 18, i]) / 7 for i in range(fr24_to_num)])
guonian_hour_24_chu6_zhengyue15 = np.array([np.sum(data[:, 19, i]) / 7 for i in range(fr24_to_num)])
assert len(guonian_hour_24_danian30_chu6) == 24
feature += list(guonian_hour_24_danian30_chu6)
feature += [(guonian_hour_24_danian30_chu6.mean() + 1) / (data.mean() + 1), (guonian_hour_24_danian30_chu6.std() + 1) / (data.std() + 1)]
assert len(guonian_hour_24_chu6_zhengyue15) == 24
feature += list(guonian_hour_24_chu6_zhengyue15)
feature += [(guonian_hour_24_chu6_zhengyue15.mean() +1) / (data.mean() + 1),
(guonian_hour_24_chu6_zhengyue15.std() + 1) / (data.std() + 1)]
# 元旦
yuandan_day = np.array(
[data[5, 12, :].mean(), data[6, 12, :].mean(), data[0, 13, :].mean(), data[1, 13, :].mean()]) / (data.mean() + 1)
yuandan_hour_24 = (data[6, 12, :] + data[0, 13, :] + data[1, 13, :]) / 3 / (data.mean() + 1)
feature += list(yuandan_day)
feature += list(yuandan_hour_24)
# 双11 圣诞节 情人节
jieri_day = np.array([data[6, 5, :].mean(), data[1, 12, :].mean(), data[3, 19, :].mean()]) / (data.mean() + 1)
jieri_hour_24 = (data[6, 5, :] + data[1, 12, :] + data[3, 19, :]) / 3 / (data.mean() + 1)
feature += list(jieri_day)
feature += list(jieri_hour_24)
return feature
def get_feature_cishu(data):
feature = []
feature += get_fine_feature_cishu(data[:, :, :8])
feature += get_fine_feature_cishu(data[:, :, 11:14])
feature += get_fine_feature_cishu(data[:, :, 8:17])
feature += get_fine_feature_cishu(data[:, :, 17:])
feature += get_fine_feature_cishu(data[:, :, 8:11])
feature += get_fine_feature_cishu(data[:, :, 14:17])
feature += get_fine_feature_cishu(data[:, :, 17:20])
feature += get_fine_feature_cishu(data[:, :, 20:24])
feature += get_fine_feature_cishu(data)
return feature
def get_feature_reshu(data):
assert data.shape == (7, 26)
feature = []
feature += list(data.flatten())
feature += get_statistic_variable(data)
# 国庆节 1. 每天的平均人数 2. 均值 3. 方差
guoqing_day = data[:, 0]
assert len(guoqing_day) == 7
feature += get_statistic_variable(guoqing_day)
feature += list(relative_ratio(get_statistic_variable(guoqing_day), get_statistic_variable(data), ep=1))
feature += list([np.argmax(guoqing_day), np.argmin(guoqing_day)])
# 每天平均人数
day_renshu = np.mean(data, axis=-1)
feature += list(day_renshu)
feature += list([np.argmax(day_renshu), np.argmin(day_renshu)])
# 每周人数
week_renshu = np.mean(data, axis=0)
feature += list(week_renshu)
feature += list([np.argmax(week_renshu), np.argmin(week_renshu)])
# 工作日 和 休息日
day_work = np.array([np.sum(data[i, :, ]) / 26 for i in range(5)])
day_rest = np.array([np.sum(data[i, :, ]) / 26 for i in range(5, 7)])
assert len(day_work) == 5
assert len(day_rest) == 2
feature += [day_work.mean() / (data.mean() + 1), day_rest.mean() / (data.mean() + 1), ]
# 过年
guonian_day = list(data[:, 18]) + list(data[:, 19]) + list(data[:3, 20])
guonian_day = np.array(guonian_day)
feature += list([np.argmax(data[:, 18]), np.argmin(data[:, 18])])
feature += list([np.argmax(data[:, 19]), np.argmin(data[:, 19])])
feature += list(guonian_day / (data.mean() + 1))
feature += [guonian_day.mean() / (data.mean() + 1), guonian_day.std() / (data.std() + 1)]
# 春运
chunyun_day = list(data[:, 16]) + list(data[:, 17])
chunyun_day = np.array(chunyun_day)
feature += list([np.argmax(data[:, 16]), np.argmin(data[:, 16])])
feature += list([np.argmax(data[:, 17]), np.argmin(data[:, 17])])
feature += list(chunyun_day / (data.mean() + 1))
feature += [chunyun_day.mean() / (data.mean() + 1), chunyun_day.std() / (data.std() + 1)]
# guonian_day_danian30_chu6 = np.array([np.sum(data[i, 18, :])/24 for i in range(7)])
# guonian_day_chu6_zhengyue15 = np.array([np.sum(data[i, 19, :])/24 for i in range(7)])
# 元旦
yuandan_day = np.array([data[5, 12], data[6, 12], data[0, 13], data[1, 13]])
yuandan_day_relative = np.array([data[5, 12], data[6, 12], data[0, 13], data[1, 13]]) / (data.mean() + 1)
feature += list(yuandan_day) + list(yuandan_day_relative)
# 双11 圣诞节 情人节
jieri_day = np.array([data[6, 5], data[1, 12], data[3, 19]])
jieri_day_relative = np.array([data[6, 5], data[1, 12], data[3, 19]]) / (data.mean() + 1)
feature += list(jieri_day) + list(jieri_day_relative)
return feature
# def get_feature_1(table):
# # 用户时间纵向特征,看用户在时间轴上的变化
#
# # 编号 (星期i, 星期j) : k
# # 可以考虑下将礼拜1到5进行合并!!!! 能减少几百个特征
# # dict_day2day = {(1, 2): 0, (2, 3): 1, (3, 4): 2, (4, 5): 3, (5, 6): 4, (6, 7): 5,
# # (7, 1): 6, (5, 1): 7, (6, 1): 8}
# dict_day2day = {(1, 2): 0, (2, 3): 0, (3, 4): 0, (4, 5): 0, (5, 6): 1, (6, 7): 2,
# (7, 1): 3, (5, 1): 4, (6, 1): 5}
# dict_num = 6
#
# strings = table[1]
#
# # shape = (1, )
# # ok到FEATURE
# Num_users = len(strings) # 统计用户人数
#
# # shape = (用户人数, )
# # ok到FEATURE # 天 小时 每天几小时 每天小时的std 做多一天几小时, 做少一天几小时, 25, 50 , 75, 分位数
# Num_users_day_hour_information = np.zeros(
# (Num_users, 9)) # 10 100 10 0.5 16 3
#
# # Num_users_day = np.zeros((Num_users, )) # 统计每个用户访问天数
# # Num_users_hour = np.zeros((Num_users, )) # 统计每个用户访问小时数
# # Num_users_hour_day_mean = [] #统计每个用户每天工作的小时数的均值
# # Num_users_hour_day_std = [] #统计每个用户每天工作的小时数的方差
# # Num_users_hour_day_per_25 = [] #统计每个用户每天工作的小时数的分位数25%
# # Num_users_hour_day_per_50 = [] #统计每个用户每天工作的小时数的中位数
# # Num_users_hour_day_per_75 = [] #统计每个用户每天工作的小时数的分位数75%
# # Num_users_hour_day_max = [] #统计每个用户每天工作的小时数的max
#
#
#
# # ok到FEATURE # 总共的差值 差值平均值 插值的std 最大插值,最小插值, 25, 50 , 75 分位数
# Num_users_chazhi_information = np.zeros((Num_users, 8)) # 跨度100天 2.5天 0.5天 100天
#
# # Num_users_chazhi_day_mean = [] # 统计每个用户差值的均值
# # Num_users_chazhi_day_std = [] # 统计每个用户差值的方差
# # Num_users_chazhi_day_per_25 = [] # 统计每个用户差值的分位数25%
# # Num_users_chazhi_day_per_50 = [] # 统计每个用户差值的中位数
# # Num_users_chazhi_day_per_75 = [] # 统计每个用户差值的分位数75%
# # Num_users_chazhi_day_max = [] # 统计每个用户差值的max
#
#
#
# # shape = (>用户人数, )
# # ok到FEATURE
# Num_chazhi_Day = [] # 统计相邻天数的差值
# Num_users_hour_day = [] # 统计每天工作的小时数
#
# # ok到FEATURE
# Num_users_aoye = np.zeros((Num_users,)) # 统计用户熬夜个数, 7点以后
# Num_users_zaoqi = np.zeros((Num_users,)) # 统计用户早起个数, 6点之前
# Num_users_tongxiao = np.zeros((Num_users,)) # 统计用户通宵个数, 前一天晚上7点以后, 到次日6点之前
#
# # ok到FEATURE
# Num_day2day = np.zeros((dict_num, Num_users))
#
# # Num_fri2mon = [] #统计每个用户礼拜5到礼拜1的差值个数
# # Num_sat2mon = [] #统计每个用户礼拜6到礼拜1的差值个数
# # Num_sun2mon = [] #统计每个用户礼拜7到礼拜1的差值个数
# # Num_mon2tue = [] #统计每个用户礼拜1到礼拜2的差值个数
# # Num_tue2wen = [] #统计每个用户礼拜2到礼拜3的差值个数
# # Num_wen2thr = [] #统计每个用户礼拜3到礼拜4的差值个数
# # Num_thr2fri = [] #统计每个用户礼拜4到礼拜5的差值个数
# # Num_fri2sat = [] #统计每个用户礼拜5到礼拜6的差值个数
# # Num_sat2sun = [] #统计每个用户礼拜6到礼拜7的差值个数
#
#
# # shape = (24, ) 24 * 12 特征
# # ok到FEATURE
#
# # Num_day2day_hour_pre = np.zeros((dict_num, 24)) # dict_num = 6
# # Num_day2day_hour_next = np.zeros((dict_num, 24))
#
#
# # Num_fri2mon_24hour_for_pre = [] #统计礼拜5到礼拜1的差值个数, 且礼拜5最晚几点走
# # Num_fri2mon_24hour_for_next = [] #统计礼拜5到礼拜1的差值个数, 且礼拜1几点到
#
# # Num_sat2mon_24hour_for_pre = [] #统计礼拜6到礼拜1的差值个数, 且礼拜6最晚几点走
# # Num_sat2mon_24hour_for_next = [] #统计礼拜6到礼拜1的差值个数, 且礼拜1几点到
#
# # Num_sun2mon_24hour_for_pre = [] #统计礼拜7到礼拜1的差值个数, 且礼拜7最晚几点走
# # Num_sun2mon_24hour_for_next = [] #统计礼拜7到礼拜1的差值个数, 且礼拜1几点到
#
# # Num_mon2tue_24hour_for_pre = [] #统计礼拜1到礼拜2的差值个数, 且礼拜1最晚几点走
# # Num_mon2tue_24hour_for_next = [] #统计礼拜1到礼拜2的差值个数, 且礼拜2几点到
#
# # Num_tue2wen_24hour_for_pre = [] #统计礼拜2到礼拜3的差值个数, 且礼拜2最晚几点走
# # Num_tue2wen_24hour_for_next = [] #统计礼拜2到礼拜3的差值个数, 且礼拜3几点到
#
# # Num_wen2thr_24hour_for_pre = [] #统计礼拜3到礼拜4的差值个数, 且礼拜3最晚几点走
# # Num_wen2thr_24hour_for_next = [] #统计礼拜3到礼拜4的差值个数, 且礼拜4几点到
#
# # Num_thr2fri_24hour_for_pre = [] #统计礼拜4到礼拜5的差值个数, 且礼拜4最晚几点走
# # Num_thr2fri_24hour_for_next = [] #统计礼拜4到礼拜5的差值个数, 且礼拜5几点到
#
# # Num_fri2sat_24hour_for_pre = [] #统计礼拜5到礼拜6的差值个数, 且礼拜5最晚几点走
# # Num_fri2sat_24hour_for_next = [] #统计礼拜5到礼拜6的差值个数, 且礼拜6几点到
#
# # Num_sat2sun_24hour_for_pre = [] #统计礼拜6到礼拜7的差值个数, 且礼拜6最晚几点走
# # Num_sat2sun_24hour_for_next = [] #统计礼拜6到礼拜7的差值个数, 且礼拜7几点到
#
#
# init_cishu = np.zeros((7, 26, 24)) # 统计26周每天每小时的用户人数
# init_renshu = np.zeros((7, 26)) #
#
# zaoshang_hour_workday_dao = []
# wanshang_hour_workday_zou = []
# zaoshang_hour_restday_dao = []
# wanshang_hour_restday_zou = []
# zaoshang_hour_restday_dao_sat = []
# wanshang_hour_restday_zou_sat = []
# zaoshang_hour_restday_dao_sun = []
# wanshang_hour_restday_zou_sun = []
# work_day_kuadu = []
# rest_day_kuadu = []
# sat_day_kuadu = []
# sun_day_kuadu = []
#
# # print('\n 用户数目:', len(strings))
#
# for user_idx, string in enumerate(strings):
# temp = [[item[0:8], item[9:].split("|")] for item in string.split(',')]
#
# cnt_day, cnt_hour = len(temp), 0 # 统计工作天数,和工作小时数
# tmp = np.zeros((cnt_day,))
# for i, (date, visit_lst) in enumerate(temp):
# tmp[i] = len(visit_lst)
# cnt_hour += len(visit_lst)
# Num_users_hour_day.append(len(visit_lst))
#
#
# # 天 小时 每天几小时 每天小时的std 做多一天几小时 最少多少小时
# Num_users_day_hour_information[user_idx, :] = np.array(
# [cnt_day, cnt_hour, tmp.mean(), tmp.std(), tmp.max(), tmp.min()] + list(np.percentile(tmp, [25, 50, 75])))
# # Num_users_day[user_idx] = cnt_day
# # Num_users_hour[user_idx] = np.sum(tmp)
#
#
# jiange_day = []
# jiange_flag = 0
# for date, visit_lst in temp:
# x, y = date2position[datestr2dateint[date]]
# init_renshu[x][y] += 1 # 统计每小时的到访的总人数 7 * 26
# for visit in visit_lst: init_cishu[x][y][str2int[visit]] += 1 # 统计每天到访的总人数 7 * 26 * 24
#
# zaoqi_hour = str2int[visit_lst[0]]
# wanshang_hour = str2int[visit_lst[-1]]
#
# if x < 5: # workday
# zaoshang_hour_workday_dao.append(zaoqi_hour)
# wanshang_hour_workday_zou.append(wanshang_hour)
# work_day_kuadu.append(wanshang_hour - zaoqi_hour)
# if x >= 5:
# zaoshang_hour_restday_dao.append(zaoqi_hour)
# wanshang_hour_restday_zou.append(wanshang_hour)
# rest_day_kuadu.append(wanshang_hour - zaoqi_hour)
# if x == 5:
# zaoshang_hour_restday_dao_sat.append(zaoqi_hour)
# wanshang_hour_restday_zou_sat.append(wanshang_hour)
# wanshang_hour_restday_zou_sat.append(wanshang_hour - zaoqi_hour)
# else:
# zaoshang_hour_restday_dao_sun.append(zaoqi_hour)
# wanshang_hour_restday_zou_sun.append(wanshang_hour)
# sun_day_kuadu.append(wanshang_hour - zaoqi_hour)
#
# if zaoqi_hour <= 6: # 早起
# Num_users_zaoqi[user_idx] += 1
#
# if jiange_flag: # 涉及到插值,昨天和今天的关系
# jiange_flag = 1
#
# day_cha = date2int[date] - date2int[pre_date]
# jiange_day.append(day_cha)
#
# if day_cha == 1:
# idx = dict_day2day[(pre_x + 1, x + 1)] # 前一天, 后一天
# Num_day2day[idx, user_idx] += 1 # (6, 用户数)
# # Num_day2day_hour_pre[idx, pre_aoye_hour] += 1 # 前一天 几点走
# # Num_day2day_hour_next[idx, zaoqi_hour] += 1 # 后一天 几点到
#
# if zaoqi_hour <= 6 and pre_aoye_hour >= 7: # 通宵
# Num_users_tongxiao[user_idx] += 1
#
# if day_cha == 2 or day_cha == 3:
# if (pre_x + 1, x + 1) == (5, 1) or (pre_x + 1, x + 1) == (6, 1): # 礼拜五(六)晚上走 礼拜1早上几点到
# idx = dict_day2day[(pre_x + 1, x + 1)] # 前一天, 后一天
# # Num_day2day_hour_pre[idx, pre_aoye_hour] += 1
# # Num_day2day_hour_next[idx, zaoqi_hour] += 1
#
# pre_date = date
# pre_x, pre_y = x, y
# pre_aoye_hour = str2int[visit_lst[-1]]
#
# if pre_aoye_hour >= 7: # 熬夜
# Num_users_aoye[user_idx] += 1
#
# Num_chazhi_Day += jiange_day
# jiange_day = np.array(jiange_day)
#
# if len(jiange_day) > 0:
# Num_users_chazhi_information[user_idx, :] = np.array(
# [np.sum(jiange_day), jiange_day.mean(), jiange_day.std(), jiange_day.max(), jiange_day.min()] + list(
# np.percentile(jiange_day, [25, 50, 75])))
#
# Num_chazhi_Day = np.array(Num_chazhi_Day) # 统计相邻天数的差值
# Num_users_hour_day = np.array(Num_users_hour_day) # 统计每天工作的小时数
#
# # print(Jiange_Day)
#
# # 特征个数=1
# FEATURE = [Num_users]
#
# # 特征个数= 1 + 18= 19
# FEATURE += list(np.mean(Num_users_day_hour_information, axis=0)) + list(
# np.std(Num_users_day_hour_information, axis=0))
# # Num_users_day_hour_information: (Num_users, 9)
# # print(len(FEATURE))
#
# # 特征个数= 19 + 8 * 3 = 43
# FEATURE += [np.sum(Num_users_aoye), Num_users_aoye.mean(), Num_users_aoye.std(), Num_users_aoye.max(),
# Num_users_aoye.min()] + list(np.percentile(Num_users_aoye, [25, 50, 75])) # 统计用户熬夜个数, 7点以后
# FEATURE += [np.sum(Num_users_zaoqi), Num_users_zaoqi.mean(), Num_users_zaoqi.std(), Num_users_zaoqi.max(),
# Num_users_zaoqi.min()] + list(np.percentile(Num_users_aoye, [25, 50, 75])) # 统计用户早起个数, 6点之前
# FEATURE += [np.sum(Num_users_tongxiao), Num_users_tongxiao.mean(), Num_users_tongxiao.std(),
# Num_users_tongxiao.max(), Num_users_tongxiao.min()] + list(
# np.percentile(Num_users_tongxiao, [25, 50, 75])) # 统计用户通宵个数, 前一天晚上7点以后, 到次日6点之前
# # print(len(FEATURE))
#
# # 特征个数 = 43 + 12 + 16 = 71
# FEATURE += list(np.mean(Num_day2day, axis=1)) + list(
# np.std(Num_day2day, axis=1)) # Num_day2day = np.zeros((9, Num_users))
# FEATURE += list(np.mean(Num_users_chazhi_information, axis=0)) + list(
# np.std(Num_users_chazhi_information, axis=0)) # Num_users_chazhi_information = np.zeros((Num_users, 8))
# # print(len(FEATURE))
#
# # 特征个数 = 71 + 12 * 24 = 359
# # FEATURE += list(Num_day2day_hour_pre.flatten()) + list(Num_day2day_hour_next.flatten()) # (6, 24)
# # print(len(FEATURE))
#
#
# # 特征个数 = 71 + 16 = 87
# if len(Num_chazhi_Day) > 0:
# FEATURE += [np.sum(Num_chazhi_Day), Num_chazhi_Day.mean(), Num_chazhi_Day.std(), Num_chazhi_Day.max(),
# Num_chazhi_Day.min()] + list(np.percentile(Num_chazhi_Day, [25, 50, 75]))
# else:
# FEATURE += list(np.zeros((8,)))
# if len(Num_users_hour_day) > 0:
# FEATURE += [np.sum(Num_users_hour_day), Num_users_hour_day.mean(), Num_users_hour_day.std(),
# Num_users_hour_day.max(), Num_users_hour_day.min()] + list(
# np.percentile(Num_users_hour_day, [25, 50, 75])) # np.sum(Num_users_hour_day) 之前算cnt_hour已经算过
# else:
# FEATURE += list(np.zeros((8,)))
#
# # print(len(FEATURE))
# # 特征个数 = 87 + 12 * 8 = 183
#
# for tmp in [zaoshang_hour_workday_dao, wanshang_hour_workday_zou, zaoshang_hour_restday_dao,
# wanshang_hour_restday_zou,
# zaoshang_hour_restday_dao_sat, wanshang_hour_restday_zou_sat,
# zaoshang_hour_restday_dao_sun, wanshang_hour_restday_zou_sun,
# work_day_kuadu, rest_day_kuadu, sat_day_kuadu, sun_day_kuadu]:
# tmp = np.array(tmp)
# if len(tmp) > 0:
# FEATURE += [np.sum(tmp), tmp.mean(), tmp.std(), tmp.max(), tmp.min()] + list(
# np.percentile(tmp, [25, 50, 75]))
# else:
# FEATURE += list(np.zeros((8,)))
#
# FEATURE = np.array(FEATURE)
# # print('feature num =', len(FEATURE))
# # print('FEATURE max:', FEATURE.max())
#
# assert len(FEATURE) == 183
# return init_cishu, init_renshu, FEATURE
# def get_feature_1_1(table):
# # 用户时间纵向特征,看用户在时间轴上的变化, 与上面的区别,进行相对归一化!!!
#
# # 编号 (星期i, 星期j) : k
# # 可以考虑下将礼拜1到5进行合并!!!! 能减少几百个特征
# # dict_day2day = {(1, 2): 0, (2, 3): 1, (3, 4): 2, (4, 5): 3, (5, 6): 4, (6, 7): 5,
# # (7, 1): 6, (5, 1): 7, (6, 1): 8}
#
# dict_day2day = {(1, 2): 0, (2, 3): 0, (3, 4): 0, (4, 5): 0, (5, 6): 1, (6, 7): 2,
# (7, 1): 3, (5, 1): 4, (6, 1): 5}
#
# dict_num = 6
#
# strings = table[1]
#
# # shape = (1, )
# Num_users = len(strings) # 统计用户人数
#
#
# # shape = (用户人数, )
# # 天 小时 每天几小时 每天小时的std 做多一天几小时, 做少一天几小时, 25, 50 , 75, 分位数
# Num_users_day_hour_information = np.zeros((Num_users, 9)) # 10 100 10 0.5 16 3
# # Num_users_day = np.zeros((Num_users, )) # 统计每个用户访问天数
# # Num_users_hour = np.zeros((Num_users, )) # 统计每个用户访问小时数
# # Num_users_hour_day_mean = [] #统计每个用户每天工作的小时数的均值
# # Num_users_hour_day_std = [] #统计每个用户每天工作的小时数的方差
# # Num_users_hour_day_per_25 = [] #统计每个用户每天工作的小时数的分位数25%
# # Num_users_hour_day_per_50 = [] #统计每个用户每天工作的小时数的中位数
# # Num_users_hour_day_per_75 = [] #统计每个用户每天工作的小时数的分位数75%
# # Num_users_hour_day_max = [] #统计每个用户每天工作的小时数的max
#
#
#
# # ok到FEATURE # 总共的差值 差值平均值 插值的std 最大插值,最小插值, 25, 50 , 75 分位数
#
# Num_users_chazhi_information = np.zeros((Num_users, 8)) # 跨度100天 2.5天 0.5天 100天
# # Num_users_chazhi_day_mean = [] # 统计每个用户差值的均值
# # Num_users_chazhi_day_std = [] # 统计每个用户差值的方差
# # Num_users_chazhi_day_per_25 = [] # 统计每个用户差值的分位数25%
# # Num_users_chazhi_day_per_50 = [] # 统计每个用户差值的中位数
# # Num_users_chazhi_day_per_75 = [] # 统计每个用户差值的分位数75%
# # Num_users_chazhi_day_max = [] # 统计每个用户差值的max
#
#
#
# # shape = (>用户人数, )
# # ok到FEATURE
#
# Num_chazhi_Day = [] # 统计相邻天数的差值
# Num_users_hour_day = [] # 统计每天工作的小时数
#
# Num_users_aoye = np.zeros((Num_users,)) # 统计用户熬夜个数, 7点以后
# Num_users_zaoqi = np.zeros((Num_users,)) # 统计用户早起个数, 6点之前
# Num_users_tongxiao = np.zeros((Num_users,)) # 统计用户通宵个数, 前一天晚上7点以后, 到次日6点之前
#
#
# Num_day2day = np.zeros((dict_num, Num_users))
# # Num_fri2mon = [] #统计每个用户礼拜5到礼拜1的差值个数
# # Num_sat2mon = [] #统计每个用户礼拜6到礼拜1的差值个数
# # Num_sun2mon = [] #统计每个用户礼拜7到礼拜1的差值个数
# # Num_mon2tue = [] #统计每个用户礼拜1到礼拜2的差值个数
# # Num_tue2wen = [] #统计每个用户礼拜2到礼拜3的差值个数
# # Num_wen2thr = [] #统计每个用户礼拜3到礼拜4的差值个数
# # Num_thr2fri = [] #统计每个用户礼拜4到礼拜5的差值个数
# # Num_fri2sat = [] #统计每个用户礼拜5到礼拜6的差值个数
# # Num_sat2sun = [] #统计每个用户礼拜6到礼拜7的差值个数
#
#
# # shape = (24, ) 24 * 12 特征
# # ok到FEATURE
#
# Num_day2day_hour_pre_specific = [[] for i in range(dict_num)]
# Num_day2day_hour_next_specific = [[] for i in range(dict_num)]
# # Num_day2day_hour_pre = np.zeros((dict_num, 24)) # dict_num = 6
# # Num_day2day_hour_next = np.zeros((dict_num, 24))
# # Num_fri2mon_24hour_for_pre = [] #统计礼拜5到礼拜1的差值个数, 且礼拜5最晚几点走
# # Num_fri2mon_24hour_for_next = [] #统计礼拜5到礼拜1的差值个数, 且礼拜1几点到
#
# # Num_sat2mon_24hour_for_pre = [] #统计礼拜6到礼拜1的差值个数, 且礼拜6最晚几点走
# # Num_sat2mon_24hour_for_next = [] #统计礼拜6到礼拜1的差值个数, 且礼拜1几点到
#
# # Num_sun2mon_24hour_for_pre = [] #统计礼拜7到礼拜1的差值个数, 且礼拜7最晚几点走
# # Num_sun2mon_24hour_for_next = [] #统计礼拜7到礼拜1的差值个数, 且礼拜1几点到
#
# # Num_mon2tue_24hour_for_pre = [] #统计礼拜1到礼拜2的差值个数, 且礼拜1最晚几点走
# # Num_mon2tue_24hour_for_next = [] #统计礼拜1到礼拜2的差值个数, 且礼拜2几点到
#
# # Num_tue2wen_24hour_for_pre = [] #统计礼拜2到礼拜3的差值个数, 且礼拜2最晚几点走
# # Num_tue2wen_24hour_for_next = [] #统计礼拜2到礼拜3的差值个数, 且礼拜3几点到
#
# # Num_wen2thr_24hour_for_pre = [] #统计礼拜3到礼拜4的差值个数, 且礼拜3最晚几点走
# # Num_wen2thr_24hour_for_next = [] #统计礼拜3到礼拜4的差值个数, 且礼拜4几点到
#
# # Num_thr2fri_24hour_for_pre = [] #统计礼拜4到礼拜5的差值个数, 且礼拜4最晚几点走
# # Num_thr2fri_24hour_for_next = [] #统计礼拜4到礼拜5的差值个数, 且礼拜5几点到
#
# # Num_fri2sat_24hour_for_pre = [] #统计礼拜5到礼拜6的差值个数, 且礼拜5最晚几点走
# # Num_fri2sat_24hour_for_next = [] #统计礼拜5到礼拜6的差值个数, 且礼拜6几点到
#
# # Num_sat2sun_24hour_for_pre = [] #统计礼拜6到礼拜7的差值个数, 且礼拜6最晚几点走
# # Num_sat2sun_24hour_for_next = [] #统计礼拜6到礼拜7的差值个数, 且礼拜7几点到
#
#
# init_cishu = np.zeros((7, 26, 24)) # 统计26周每天每小时的用户人数
# init_renshu = np.zeros((7, 26)) #
#
# zaoshang_hour_workday_dao = []
# wanshang_hour_workday_zou = []
# zaoshang_hour_restday_dao = []
# wanshang_hour_restday_zou = []
# zaoshang_hour_restday_dao_sat = []
# wanshang_hour_restday_zou_sat = []
# zaoshang_hour_restday_dao_sun = []
# wanshang_hour_restday_zou_sun = []
# work_day_kuadu = []
# rest_day_kuadu = []
# sat_day_kuadu = []
# sun_day_kuadu = []
#
# ########################################################################################################
# # 国庆节
#
#
#
# ########################################################################################################
#
# # print('\n 用户数目:', len(strings))
#
# for user_idx, string in enumerate(strings):
# temp = [[item[0:8], item[9:].split("|")] for item in string.split(',')]
#
# cnt_day, cnt_hour = len(temp), 0 # 统计工作天数,和工作小时数
# tmp = np.zeros((cnt_day,))
# for i, (date, visit_lst) in enumerate(temp):
# tmp[i] = len(visit_lst)
# cnt_hour += len(visit_lst)
# Num_users_hour_day.append(len(visit_lst))
#
#
# # 天 小时 每天几小时 每天小时的std 做多一天几小时 最少多少小时
# Num_users_day_hour_information[user_idx, :] = np.array(
# [cnt_day, cnt_hour, tmp.mean(), tmp.std(), tmp.max(), tmp.min()] + list(np.percentile(tmp, [25, 50, 75])))
# # Num_users_day[user_idx] = cnt_day
# # Num_users_hour[user_idx] = np.sum(tmp)
#
#
# jiange_day = []
# jiange_flag = 0
# for date, visit_lst in temp:
# x, y = date2position[datestr2dateint[date]]
# init_renshu[x][y] += 1 # 统计每小时的到访的总人数 7 * 26
# for visit in visit_lst: init_cishu[x][y][str2int[visit]] += 1 # 统计每天到访的总人数 7 * 26 * 24
#
# zaoqi_hour = str2int[visit_lst[0]]
# wanshang_hour = str2int[visit_lst[-1]]
#
# if x < 5: # workday
# zaoshang_hour_workday_dao.append(zaoqi_hour)
# wanshang_hour_workday_zou.append(wanshang_hour)
# work_day_kuadu.append(wanshang_hour - zaoqi_hour)
# if x >= 5:
# zaoshang_hour_restday_dao.append(zaoqi_hour)
# wanshang_hour_restday_zou.append(wanshang_hour)
# rest_day_kuadu.append(wanshang_hour - zaoqi_hour)
# if x == 5:
# zaoshang_hour_restday_dao_sat.append(zaoqi_hour)
# wanshang_hour_restday_zou_sat.append(wanshang_hour)
# wanshang_hour_restday_zou_sat.append(wanshang_hour - zaoqi_hour)
# else:
# zaoshang_hour_restday_dao_sun.append(zaoqi_hour)
# wanshang_hour_restday_zou_sun.append(wanshang_hour)
# sun_day_kuadu.append(wanshang_hour - zaoqi_hour)
#
# if zaoqi_hour <= 6: # 早起
# Num_users_zaoqi[user_idx] += 1
#
# if jiange_flag: # 涉及到插值,昨天和今天的关系
# jiange_flag = 1
#
# day_cha = date2int[date] - date2int[pre_date]
# jiange_day.append(day_cha)
#
# if day_cha == 1:
# idx = dict_day2day[(pre_x + 1, x + 1)] # 前一天, 后一天
# Num_day2day[idx, user_idx] += 1 # (6, 用户数)
# # Num_day2day_hour_pre[idx, pre_aoye_hour] += 1 # 前一天 几点走
# # Num_day2day_hour_next[idx, zaoqi_hour] += 1 # 后一天 几点到
# Num_day2day_hour_pre_specific[idx].append(pre_aoye_hour)
# Num_day2day_hour_next_specific[idx].append(zaoqi_hour)
#
# if zaoqi_hour <= 6 and pre_aoye_hour >= 7: # 通宵
# Num_users_tongxiao[user_idx] += 1
#
# if day_cha == 2 or day_cha == 3:
# if (pre_x + 1, x + 1) == (5, 1) or (pre_x + 1, x + 1) == (6, 1): # 礼拜五(六)晚上走 礼拜1早上几点到
# idx = dict_day2day[(pre_x + 1, x + 1)] # 前一天, 后一天
# # Num_day2day_hour_pre[idx, pre_aoye_hour] += 1
# # Num_day2day_hour_next[idx, zaoqi_hour] += 1
# Num_day2day_hour_pre_specific[idx].append(pre_aoye_hour)
# Num_day2day_hour_next_specific[idx].append(zaoqi_hour)
#
# pre_date = date
# pre_x, pre_y = x, y
# pre_aoye_hour = str2int[visit_lst[-1]]
#
# if pre_aoye_hour >= 7: # 熬夜
# Num_users_aoye[user_idx] += 1
#
# Num_chazhi_Day += jiange_day
# jiange_day = np.array(jiange_day)
# Num_users_chazhi_information[user_idx, :] = np.array(get_statistic_variable(jiange_day))
#
# Num_chazhi_Day = np.array(Num_chazhi_Day) # 统计相邻天数的差值
# Num_users_hour_day = np.array(Num_users_hour_day) # 统计每天工作的小时数
#
# # print(Jiange_Day)
#
# # 特征个数=1
# FEATURE = [Num_users]
#
# # 特征个数= 1 + 18= 19
# FEATURE += list(np.mean(Num_users_day_hour_information, axis=0)) + list(
# np.std(Num_users_day_hour_information, axis=0))
# # Num_users_day_hour_information: (Num_users, 9)
# # print(len(FEATURE))
#
# # 特征个数= 19 + 8 * 3 = 43
# FEATURE += [np.sum(Num_users_aoye), Num_users_aoye.mean(), Num_users_aoye.std(), Num_users_aoye.max(),
# Num_users_aoye.min()] + list(np.percentile(Num_users_aoye, [25, 50, 75])) # 统计用户熬夜个数, 7点以后
# FEATURE += [np.sum(Num_users_zaoqi), Num_users_zaoqi.mean(), Num_users_zaoqi.std(), Num_users_zaoqi.max(),
# Num_users_zaoqi.min()] + list(np.percentile(Num_users_aoye, [25, 50, 75])) # 统计用户早起个数, 6点之前
#
# FEATURE += [np.sum(Num_users_tongxiao), Num_users_tongxiao.mean(), Num_users_tongxiao.std(),
# Num_users_tongxiao.max(), Num_users_tongxiao.min()] + list(
# np.percentile(Num_users_tongxiao, [25, 50, 75])) # 统计用户通宵个数, 前一天晚上7点以后, 到次日6点之前
# # print(len(FEATURE))
#
# # 特征个数 = 43 + 12 + 16 = 71
# FEATURE += list(np.mean(Num_day2day, axis=1)) + list(np.std(Num_day2day, axis=1)) # Num_day2day = np.zeros((9, Num_users))
# FEATURE += list(np.mean(Num_users_chazhi_information, axis=0)) + list(
# np.std(Num_users_chazhi_information, axis=0)) # Num_users_chazhi_information = np.zeros((Num_users, 8))
# # print(len(FEATURE))
#
# # 特征个数 = 71 + 12 * 24 = 359
# # FEATURE += list(Num_day2day_hour_pre.flatten()) + list(Num_day2day_hour_next.flatten()) # (6, 24)
# for i in range(dict_num):
# FEATURE += get_statistic_variable(Num_day2day_hour_pre_specific[i]) # 8
# FEATURE += get_statistic_variable(Num_day2day_hour_next_specific[i]) # 8
# # print(len(FEATURE))
#
# # 特征个数 = 359 + 16 = 375
# FEATURE += get_statistic_variable(Num_chazhi_Day)
# FEATURE += get_statistic_variable(Num_users_hour_day)
#
# # print(len(FEATURE))
# # 特征个数 = 375 + 12 * 8 = 471 下面的是重要的性质 importance比较大
# for tmp in [zaoshang_hour_workday_dao, wanshang_hour_workday_zou, zaoshang_hour_restday_dao,
# wanshang_hour_restday_zou,
# zaoshang_hour_restday_dao_sat, wanshang_hour_restday_zou_sat,
# zaoshang_hour_restday_dao_sun, wanshang_hour_restday_zou_sun,
# work_day_kuadu, rest_day_kuadu, sat_day_kuadu, sun_day_kuadu]:
# tmp = np.array(tmp)
# FEATURE += get_statistic_variable(tmp)
#
# FEATURE = np.array(FEATURE)
#
# # print('feature num =', len(FEATURE))
# # print('FEATURE max:', FEATURE.max())
#
# # assert len(FEATURE) == 471
#
# return init_cishu, init_renshu, list(FEATURE)
def get_jieri_feature(strings, jieri_dict, name_jieri='guoqing'):
num_jieri = len(jieri_dict)
FEATURE = []
zaoshang_hour_dao = [[] for _ in range(num_jieri)]
wanshang_hour_zou = [[] for _ in range(num_jieri)]
zaowanshang_hour_daozou = [[] for _ in range(num_jieri)]
kuadu = [[] for _ in range(num_jieri)]
for user_idx, string in enumerate(strings):
temp = [[item[0:8], item[9:].split("|")] for item in string.split(',')]
for date, visit_lst in temp:
if date not in jieri_dict.keys(): # 不再节日范围内
continue
idx = jieri_dict[date] # 10月1日到8日
zaoqi_hour = str2int[visit_lst[0]]
wanshang_hour = str2int[visit_lst[-1]]
zaoshang_hour_dao[idx].append(zaoqi_hour)
wanshang_hour_zou[idx].append(wanshang_hour)
zaowanshang_hour_daozou.append(zaoqi_hour * 24 + wanshang_hour)
kuadu.append(wanshang_hour - zaoqi_hour)
for idx in range(num_jieri):
FEATURE += get_statistic_variable(zaoshang_hour_dao[idx])
FEATURE += get_statistic_variable(wanshang_hour_zou[idx])
FEATURE += get_statistic_variable(zaowanshang_hour_daozou[idx])
FEATURE += get_statistic_variable(kuadu[idx])
if name_jieri == 'guoqing':
tmp1, tmp2, tmp3, tmp4 = [], [], [], []
for idx in range(5):
tmp1 += zaoshang_hour_dao[idx]
tmp2 += wanshang_hour_zou[idx]
tmp3 += zaowanshang_hour_daozou[idx]
tmp4 += kuadu[idx]
FEATURE += get_statistic_variable(tmp1) + get_statistic_variable(tmp2) \
+ get_statistic_variable(tmp3) + get_statistic_variable(tmp4)
tmp1, tmp2, tmp3, tmp4 = [], [], [], []
for idx in range(5, 7):
tmp1 += zaoshang_hour_dao[idx]
tmp2 += wanshang_hour_zou[idx]
tmp3 += zaowanshang_hour_daozou[idx]
tmp4 += kuadu[idx]
FEATURE += get_statistic_variable(tmp1) + get_statistic_variable(tmp2) \
+ get_statistic_variable(tmp3) + get_statistic_variable(tmp4)
if name_jieri == 'guonian_chunyun':
tmp1, tmp2, tmp3, tmp4 = [], [], [], []
for idx in range(7): # 20190121 - 20190127 春运
tmp1 += zaoshang_hour_dao[idx]
tmp2 += wanshang_hour_zou[idx]
tmp3 += zaowanshang_hour_daozou[idx]
tmp4 += kuadu[idx]
FEATURE += get_statistic_variable(tmp1) + get_statistic_variable(tmp2) \
+ get_statistic_variable(tmp3) + get_statistic_variable(tmp4)
tmp1, tmp2, tmp3, tmp4 = [], [], [], []
for idx in range(7, 15): # 小年到除夕
tmp1 += zaoshang_hour_dao[idx]
tmp2 += wanshang_hour_zou[idx]
tmp3 += zaowanshang_hour_daozou[idx]
tmp4 += kuadu[idx]
FEATURE += get_statistic_variable(tmp1) + get_statistic_variable(tmp2) \
+ get_statistic_variable(tmp3) + get_statistic_variable(tmp4)
tmp1, tmp2, tmp3, tmp4 = [], [], [], []
for idx in range(15, 21): # 初6 到 元宵
tmp1 += zaoshang_hour_dao[idx]
tmp2 += wanshang_hour_zou[idx]
tmp3 += zaowanshang_hour_daozou[idx]
tmp4 += kuadu[idx]
FEATURE += get_statistic_variable(tmp1) + get_statistic_variable(tmp2) \
+ get_statistic_variable(tmp3) + get_statistic_variable(tmp4)
tmp1, tmp2, tmp3, tmp4 = [], [], [], []
for idx in range(21, 39): # 初6 到 元宵
tmp1 += zaoshang_hour_dao[idx]
tmp2 += wanshang_hour_zou[idx]
tmp3 += zaowanshang_hour_daozou[idx]
tmp4 += kuadu[idx]
FEATURE += get_statistic_variable(tmp1) + get_statistic_variable(tmp2) \
+ get_statistic_variable(tmp3) + get_statistic_variable(tmp4)
if name_jieri == 'yuandan':
tmp1, tmp2, tmp3, tmp4 = [], [], [], []
for idx in range(1, num_jieri): #元旦放假期间
tmp1 += zaoshang_hour_dao[idx]
tmp2 += wanshang_hour_zou[idx]
tmp3 += zaowanshang_hour_daozou[idx]
tmp4 += kuadu[idx]
FEATURE += get_statistic_variable(tmp1) + get_statistic_variable(tmp2) \
+ get_statistic_variable(tmp3) + get_statistic_variable(tmp4)
return FEATURE
def get_work_rest_feature(strings):
# work day rest day satedat sunday
FEATURE = []
zaoshang_hour_workday_dao = []
wanshang_hour_workday_zou = []
zaowanshang_hour_workday_daozou = []
work_day_kuadu = []
zaoshang_hour_restday_dao = []
wanshang_hour_restday_zou = []
zaowanshang_hour_restday_daozou = []
rest_day_kuadu = []
zaoshang_hour_restday_dao_sat = []
wanshang_hour_restday_zou_sat = []
zaowanshang_hour_restday_daozou_sat = []
sat_day_kuadu = []
zaoshang_hour_restday_dao_sun = []
wanshang_hour_restday_zou_sun = []
zaowanshang_hour_restday_daozou_sun = []
sun_day_kuadu = []
for user_idx, string in enumerate(strings):
temp = [[item[0:8], item[9:].split("|")] for item in string.split(',')]
for date, visit_lst in temp:
x, y = date2position[datestr2dateint[date]]
zaoqi_hour = str2int[visit_lst[0]]
wanshang_hour = str2int[visit_lst[-1]]
if x < 5: # workday
zaoshang_hour_workday_dao.append(zaoqi_hour)
wanshang_hour_workday_zou.append(wanshang_hour)
zaowanshang_hour_workday_daozou.append(zaoqi_hour * 24 + wanshang_hour)
work_day_kuadu.append(wanshang_hour - zaoqi_hour)
if x >= 5:
zaoshang_hour_restday_dao.append(zaoqi_hour)
wanshang_hour_restday_zou.append(wanshang_hour)
zaowanshang_hour_restday_daozou.append(zaoqi_hour * 24 + wanshang_hour)
rest_day_kuadu.append(wanshang_hour - zaoqi_hour)
if x == 5:
zaoshang_hour_restday_dao_sat.append(zaoqi_hour)
wanshang_hour_restday_zou_sat.append(wanshang_hour)
zaowanshang_hour_restday_daozou_sat.append(zaoqi_hour * 24 + wanshang_hour)
sat_day_kuadu.append(wanshang_hour - zaoqi_hour)
else:
zaoshang_hour_restday_dao_sun.append(zaoqi_hour)
wanshang_hour_restday_zou_sun.append(wanshang_hour)
zaowanshang_hour_restday_daozou_sun.append(zaoqi_hour * 24 + wanshang_hour)
sun_day_kuadu.append(wanshang_hour - zaoqi_hour)
for tmp in [zaoshang_hour_workday_dao, wanshang_hour_workday_zou, zaowanshang_hour_workday_daozou, work_day_kuadu,
zaoshang_hour_restday_dao, wanshang_hour_restday_zou, zaowanshang_hour_restday_daozou, rest_day_kuadu,
zaoshang_hour_restday_dao_sat, wanshang_hour_restday_zou_sat, zaowanshang_hour_restday_daozou_sat, sat_day_kuadu,
zaoshang_hour_restday_dao_sun, wanshang_hour_restday_zou_sun, zaowanshang_hour_restday_daozou_sun, sun_day_kuadu]:
FEATURE += get_statistic_variable(tmp)
return FEATURE
def get_feature_1_2(table):
# 用户时间纵向特征,看用户在时间轴上的变化, 与上面的区别,进行相对归一化!!!
# 编号 (星期i, 星期j) : k
# 可以考虑下将礼拜1到5进行合并!!!! 能减少几百个特征
# dict_day2day = {(1, 2): 0, (2, 3): 1, (3, 4): 2, (4, 5): 3, (5, 6): 4, (6, 7): 5,
# (7, 1): 6, (5, 1): 7, (6, 1): 8}
FEATURE = []
dict_day2day = {(1, 2): 0, (2, 3): 0, (3, 4): 0, (4, 5): 0, (5, 6): 1, (6, 7): 2,
(7, 1): 3, (5, 1): 4, (6, 1): 5}
dict_num = 6
strings = table[1]
# shape = (1, )
Num_users = len(strings) # 统计用户人数
FEATURE += [Num_users]
name_jieri = 'guoqing'
########################################################################################################
# 国庆节
jieri_dict = {}
dates = [str(_) for _ in range(20181001, 20181009)]
for idx, date in enumerate(dates):
jieri_dict[date] = idx
FEATURE += get_jieri_feature(strings, jieri_dict, name_jieri=name_jieri)
# Guoqing_zaoshang_hour_dao = [[], [], [], [], [], [], [], []] # 10月1日到8日
# Guoqing_wanshang_hour_zou = [[], [], [], [], [], [], [], []] # 10月1日到8日
# Guoqing_zaowanshang_hour_daozou = [[], [], [], [], [], [], [], []] # 10月1日到8日 zaoshang * 24 + wanshang
# Guoqing_kuadu = [[], [], [], [], [], [], [], []] # 10月1日到8日
#
# for date, visit_lst in temp:
# idx = date2int[date] # 10月1日到8日
# if idx > 7:
# continue
# zaoqi_hour = str2int[visit_lst[0]]
# wanshang_hour = str2int[visit_lst[-1]]
# Guoqing_zaoshang_hour_dao[idx].append(zaoqi_hour)
# Guoqing_wanshang_hour_zou[idx].append(wanshang_hour)
# Guoqing_zaowanshang_hour_daozou.append(zaoqi_hour * 24 + wanshang_hour)
# Guoqing_kuadu.append(wanshang_hour - zaoqi_hour)
#
# for idx in enumerate(8):
# FEATURE += get_statistic_variable(Guoqing_zaoshang_hour_dao[idx])
# FEATURE += get_statistic_variable(Guoqing_wanshang_hour_zou[idx])
# FEATURE += get_statistic_variable(Guoqing_zaowanshang_hour_daozou[idx])
# FEATURE += get_statistic_variable(Guoqing_kuadu[idx])
#
# tmp1, tmp2, tmp3, tmp4 = [], [], [], []
# for idx in enumerate(5):
# tmp1 += Guoqing_zaoshang_hour_dao[idx]
# tmp2 += Guoqing_wanshang_hour_zou[idx]
# tmp3 += Guoqing_zaowanshang_hour_daozou[idx]
# tmp4 += Guoqing_kuadu[idx]
# FEATURE += get_statistic_variable(tmp1) + get_statistic_variable(tmp2) \
# + get_statistic_variable(tmp3) + get_statistic_variable(tmp4)
#
# tmp1, tmp2, tmp3, tmp4 = [], [], [], []
# for idx in enumerate(5, 7):
# tmp1 += Guoqing_zaoshang_hour_dao[idx]
# tmp2 += Guoqing_wanshang_hour_zou[idx]
# tmp3 += Guoqing_zaowanshang_hour_daozou[idx]
# tmp4 += Guoqing_kuadu[idx]
# FEATURE += get_statistic_variable(tmp1) + get_statistic_variable(tmp2) \
# + get_statistic_variable(tmp3) + get_statistic_variable(tmp4)
name_jieri = 'guonian_chunyun'
########################################################################################################
# 过年和春运
jieri_dict = {}
dates = [str(_) for _ in range(20190121, 20190132)] + [str(_) for _ in range(20190201, 20190229)]
for idx, date in enumerate(dates):
jieri_dict[date] = idx
FEATURE += get_jieri_feature(strings, jieri_dict, name_jieri=name_jieri)
# st_date, ed_date = date2int['20190127'], date2int['20190220'] # 20190128 小年, 20190219 元宵
# num_guonian = ed_date - st_date + 1
# Guoqnian_zaoshang_hour_dao = [[] for i in range(num_guonian)]
# Guoqnian_wanshang_hour_zou = [[] for i in range(num_guonian)]
# Guoqnian_zaowanshang_hour_daozou = [[] for i in range(num_guonian)] zaoshang * 24 + wanshang
# Guoqnian_kuadu = [[] for i in range(num_guonian)]
#
# for date, visit_lst in temp:
# idx = date2int[date]
# if idx < st_date or idx > ed_date:
# continue
# zaoqi_hour = str2int[visit_lst[0]]
# wanshang_hour = str2int[visit_lst[-1]]
# Guoqing_zaoshang_hour_dao[idx].append(zaoqi_hour)
# Guoqing_wanshang_hour_zou[idx].append(wanshang_hour)
# Guoqing_zaowanshang_hour_daozou.append(zaoqi_hour * 24 + wanshang_hour)
# Guoqing_kuadu.append(wanshang_hour - zaoqi_hour)
#
# for idx in enumerate(8):
# FEATURE += get_statistic_variable(Guoqing_zaoshang_hour_dao[idx])
# FEATURE += get_statistic_variable(Guoqing_wanshang_hour_zou[idx])
# FEATURE += get_statistic_variable(Guoqing_zaowanshang_hour_daozou[idx])
# FEATURE += get_statistic_variable(Guoqing_kuadu[idx])
#
# tmp1, tmp2, tmp3, tmp4 = [], [], [], []
# for idx in enumerate(5):
# tmp1 += Guoqing_zaoshang_hour_dao[idx]
# tmp2 += Guoqing_wanshang_hour_zou[idx]
# tmp3 += Guoqing_zaowanshang_hour_daozou[idx]
# tmp4 += Guoqing_kuadu[idx]
# FEATURE += get_statistic_variable(tmp1) + get_statistic_variable(tmp2) \
# + get_statistic_variable(tmp3) + get_statistic_variable(tmp4)
#
# tmp1, tmp2, tmp3, tmp4 = [], [], [], []
# for idx in enumerate(5, 7):
# tmp1 += Guoqing_zaoshang_hour_dao[idx]
# tmp2 += Guoqing_wanshang_hour_zou[idx]
# tmp3 += Guoqing_zaowanshang_hour_daozou[idx]
# tmp4 += Guoqing_kuadu[idx]
# FEATURE += get_statistic_variable(tmp1) + get_statistic_variable(tmp2) \
# + get_statistic_variable(tmp3) + get_statistic_variable(tmp4)
name_jieri = 'yuandan'
########################################################################################################
# 元旦
jieri_dict = {}
dates = [str(_) for _ in range(20181229, 20181232)] + ['20190101']
for idx, date in enumerate(dates):
jieri_dict[date] = idx
FEATURE += get_jieri_feature(strings, jieri_dict, name_jieri=name_jieri)
name_jieri = 'shengdan'
########################################################################################################
# 圣诞节
jieri_dict = {}
dates = [str(_) for _ in range(20181224, 20181226)]
for idx, date in enumerate(dates):
jieri_dict[date] = idx
FEATURE += get_jieri_feature(strings, jieri_dict, name_jieri=name_jieri)
work_rest = 'work_rest'
########################################################################################################
# work day rest day sat day sun day
FEATURE += get_work_rest_feature(strings)
# shape = (用户人数, )
# 天 小时 每天几小时 每天小时的std 做多一天几小时, 做少一天几小时, 25, 50 , 75, 分位数
Num_users_day_hour_information = np.zeros(
(Num_users, 9)) # 10 100 10 0.5 16 3
# Num_users_day = np.zeros((Num_users, )) # 统计每个用户访问天数
# Num_users_hour = np.zeros((Num_users, )) # 统计每个用户访问小时数
# Num_users_hour_day_mean = [] #统计每个用户每天工作的小时数的均值
# Num_users_hour_day_std = [] #统计每个用户每天工作的小时数的方差
# Num_users_hour_day_per_25 = [] #统计每个用户每天工作的小时数的分位数25%
# Num_users_hour_day_per_50 = [] #统计每个用户每天工作的小时数的中位数
# Num_users_hour_day_per_75 = [] #统计每个用户每天工作的小时数的分位数75%
# Num_users_hour_day_max = [] #统计每个用户每天工作的小时数的max
# ok到FEATURE # 总共的差值 差值平均值 插值的std 最大插值,最小插值, 25, 50 , 75 分位数
Num_users_chazhi_information = np.zeros((Num_users, 8)) # 跨度100天 2.5天 0.5天 100天
# Num_users_chazhi_day_mean = [] # 统计每个用户差值的均值
# Num_users_chazhi_day_std = [] # 统计每个用户差值的方差
# Num_users_chazhi_day_per_25 = [] # 统计每个用户差值的分位数25%
# Num_users_chazhi_day_per_50 = [] # 统计每个用户差值的中位数
# Num_users_chazhi_day_per_75 = [] # 统计每个用户差值的分位数75%
# Num_users_chazhi_day_max = [] # 统计每个用户差值的max
# shape = (>用户人数, )
# ok到FEATURE
Num_chazhi_Day = [] # 统计相邻天数的差值
Num_users_hour_day = [] # 统计每天工作的小时数
Num_users_aoye = np.zeros((Num_users,)) # 统计用户熬夜个数, 7点以后
Num_users_zaoqi = np.zeros((Num_users,)) # 统计用户早起个数, 6点之前
Num_users_tongxiao = np.zeros((Num_users,)) # 统计用户通宵个数, 前一天晚上7点以后, 到次日6点之前
Num_day2day = np.zeros((dict_num, Num_users))
# Num_fri2mon = [] #统计每个用户礼拜5到礼拜1的差值个数
# Num_sat2mon = [] #统计每个用户礼拜6到礼拜1的差值个数
# Num_sun2mon = [] #统计每个用户礼拜7到礼拜1的差值个数
# Num_mon2tue = [] #统计每个用户礼拜1到礼拜2的差值个数
# Num_tue2wen = [] #统计每个用户礼拜2到礼拜3的差值个数
# Num_wen2thr = [] #统计每个用户礼拜3到礼拜4的差值个数
# Num_thr2fri = [] #统计每个用户礼拜4到礼拜5的差值个数
# Num_fri2sat = [] #统计每个用户礼拜5到礼拜6的差值个数
# Num_sat2sun = [] #统计每个用户礼拜6到礼拜7的差值个数
# shape = (24, ) 24 * 12 特征
# ok到FEATURE
Num_day2day_hour_pre_specific = [[] for i in range(dict_num)]
Num_day2day_hour_next_specific = [[] for i in range(dict_num)]
# Num_day2day_hour_pre = np.zeros((dict_num, 24)) # dict_num = 6
# Num_day2day_hour_next = np.zeros((dict_num, 24))
# Num_fri2mon_24hour_for_pre = [] #统计礼拜5到礼拜1的差值个数, 且礼拜5最晚几点走
# Num_fri2mon_24hour_for_next = [] #统计礼拜5到礼拜1的差值个数, 且礼拜1几点到
# Num_sat2mon_24hour_for_pre = [] #统计礼拜6到礼拜1的差值个数, 且礼拜6最晚几点走
# Num_sat2mon_24hour_for_next = [] #统计礼拜6到礼拜1的差值个数, 且礼拜1几点到
# Num_sun2mon_24hour_for_pre = [] #统计礼拜7到礼拜1的差值个数, 且礼拜7最晚几点走
# Num_sun2mon_24hour_for_next = [] #统计礼拜7到礼拜1的差值个数, 且礼拜1几点到
# Num_mon2tue_24hour_for_pre = [] #统计礼拜1到礼拜2的差值个数, 且礼拜1最晚几点走
# Num_mon2tue_24hour_for_next = [] #统计礼拜1到礼拜2的差值个数, 且礼拜2几点到
# Num_tue2wen_24hour_for_pre = [] #统计礼拜2到礼拜3的差值个数, 且礼拜2最晚几点走
# Num_tue2wen_24hour_for_next = [] #统计礼拜2到礼拜3的差值个数, 且礼拜3几点到
# Num_wen2thr_24hour_for_pre = [] #统计礼拜3到礼拜4的差值个数, 且礼拜3最晚几点走
# Num_wen2thr_24hour_for_next = [] #统计礼拜3到礼拜4的差值个数, 且礼拜4几点到
# Num_thr2fri_24hour_for_pre = [] #统计礼拜4到礼拜5的差值个数, 且礼拜4最晚几点走
# Num_thr2fri_24hour_for_next = [] #统计礼拜4到礼拜5的差值个数, 且礼拜5几点到
# Num_fri2sat_24hour_for_pre = [] #统计礼拜5到礼拜6的差值个数, 且礼拜5最晚几点走
# Num_fri2sat_24hour_for_next = [] #统计礼拜5到礼拜6的差值个数, 且礼拜6几点到
# Num_sat2sun_24hour_for_pre = [] #统计礼拜6到礼拜7的差值个数, 且礼拜6最晚几点走
# Num_sat2sun_24hour_for_next = [] #统计礼拜6到礼拜7的差值个数, 且礼拜7几点到
init_cishu = np.zeros((7, 26, 24)) # 统计26周每天每小时的用户人数
init_renshu = np.zeros((7, 26)) #
# if True:
# zaoshang_hour_workday_dao = []
# wanshang_hour_workday_zou = []
# zaowanshang_hour_workday_daozou =[]
#
# zaoshang_hour_restday_dao = []
# wanshang_hour_restday_zou = []
# zaowanshang_hour_restday_daozou = []
#
# zaoshang_hour_restday_dao_sat = []
# wanshang_hour_restday_zou_sat = []
# zaowanshang_hour_restday_daozou_sat = []
#
# zaoshang_hour_restday_dao_sun = []
# wanshang_hour_restday_zou_sun = []
# zaowanshang_hour_restday_daozou_sun = []
#
# work_day_kuadu = []
# rest_day_kuadu = []
# sat_day_kuadu = []
# sun_day_kuadu = []
#
########################################################################################################
# print('\n 用户数目:', len(strings))
for user_idx, string in enumerate(strings):
temp = [[item[0:8], item[9:].split("|")] for item in string.split(',')]
cnt_day, cnt_hour = len(temp), 0 # 统计工作天数,和工作小时数
tmp = np.zeros((cnt_day,))
for i, (date, visit_lst) in enumerate(temp):
tmp[i] = len(visit_lst)
cnt_hour += len(visit_lst)
Num_users_hour_day.append(len(visit_lst))
# 天 小时 每天几小时 每天小时的std 做多一天几小时 最少多少小时
Num_users_day_hour_information[user_idx, :] = np.array(
[cnt_day, cnt_hour, tmp.mean(), tmp.std(), tmp.max(), tmp.min()] + list(np.percentile(tmp, [25, 50, 75])))
# Num_users_day[user_idx] = cnt_day
# Num_users_hour[user_idx] = np.sum(tmp)
jiange_day = []
jiange_flag = 0
for date, visit_lst in temp:
x, y = date2position[datestr2dateint[date]]
init_renshu[x][y] += 1 # 统计每小时的到访的总人数 7 * 26
for visit in visit_lst: init_cishu[x][y][str2int[visit]] += 1 # 统计每天到访的总人数 7 * 26 * 24
zaoqi_hour = str2int[visit_lst[0]]
wanshang_hour = str2int[visit_lst[-1]]
# if x < 5: # workday
# zaoshang_hour_workday_dao.append(zaoqi_hour)
# wanshang_hour_workday_zou.append(wanshang_hour)
# work_day_kuadu.append(wanshang_hour - zaoqi_hour)
# if x >= 5:
# zaoshang_hour_restday_dao.append(zaoqi_hour)
# wanshang_hour_restday_zou.append(wanshang_hour)
# rest_day_kuadu.append(wanshang_hour - zaoqi_hour)
# if x == 5:
# zaoshang_hour_restday_dao_sat.append(zaoqi_hour)
# wanshang_hour_restday_zou_sat.append(wanshang_hour)
# wanshang_hour_restday_zou_sat.append(wanshang_hour - zaoqi_hour)
# else:
# zaoshang_hour_restday_dao_sun.append(zaoqi_hour)
# wanshang_hour_restday_zou_sun.append(wanshang_hour)
# sun_day_kuadu.append(wanshang_hour - zaoqi_hour)
if zaoqi_hour <= 6: # 早起
Num_users_zaoqi[user_idx] += 1
if jiange_flag: # 涉及到插值,昨天和今天的关系
jiange_flag = 1
day_cha = date2int[date] - date2int[pre_date]
jiange_day.append(day_cha)
if day_cha == 1:
idx = dict_day2day[(pre_x + 1, x + 1)] # 前一天, 后一天
Num_day2day[idx, user_idx] += 1 # (6, 用户数)
# Num_day2day_hour_pre[idx, pre_aoye_hour] += 1 # 前一天 几点走
# Num_day2day_hour_next[idx, zaoqi_hour] += 1 # 后一天 几点到
Num_day2day_hour_pre_specific[idx].append(pre_aoye_hour)
Num_day2day_hour_next_specific[idx].append(zaoqi_hour)
if zaoqi_hour <= 6 and pre_aoye_hour >= 7: # 通宵
Num_users_tongxiao[user_idx] += 1
if day_cha == 2 or day_cha == 3:
if (pre_x + 1, x + 1) == (5, 1) or (pre_x + 1, x + 1) == (6, 1): # 礼拜五(六)晚上走 礼拜1早上几点到
idx = dict_day2day[(pre_x + 1, x + 1)] # 前一天, 后一天
# Num_day2day_hour_pre[idx, pre_aoye_hour] += 1
# Num_day2day_hour_next[idx, zaoqi_hour] += 1
Num_day2day_hour_pre_specific[idx].append(pre_aoye_hour)
Num_day2day_hour_next_specific[idx].append(zaoqi_hour)
pre_date = date
pre_x, pre_y = x, y
pre_aoye_hour = str2int[visit_lst[-1]]
if pre_aoye_hour >= 7: # 熬夜
Num_users_aoye[user_idx] += 1
Num_chazhi_Day += jiange_day
jiange_day = np.array(jiange_day)
Num_users_chazhi_information[user_idx, :] = np.array(get_statistic_variable(jiange_day))
Num_chazhi_Day = np.array(Num_chazhi_Day) # 统计相邻天数的差值
Num_users_hour_day = np.array(Num_users_hour_day) # 统计每天工作的小时数
# print(Jiange_Day)
# 特征个数= 1 + 18= 19
FEATURE += list(np.mean(Num_users_day_hour_information, axis=0)) + list(
np.std(Num_users_day_hour_information, axis=0))
# Num_users_day_hour_information: (Num_users, 9)
# print(len(FEATURE))
# 特征个数= 19 + 8 * 3 = 43
FEATURE += [np.sum(Num_users_aoye), Num_users_aoye.mean(), Num_users_aoye.std(), Num_users_aoye.max(),
Num_users_aoye.min()] + list(np.percentile(Num_users_aoye, [25, 50, 75])) # 统计用户熬夜个数, 7点以后
FEATURE += [np.sum(Num_users_zaoqi), Num_users_zaoqi.mean(), Num_users_zaoqi.std(), Num_users_zaoqi.max(),
Num_users_zaoqi.min()] + list(np.percentile(Num_users_aoye, [25, 50, 75])) # 统计用户早起个数, 6点之前
FEATURE += [np.sum(Num_users_tongxiao), Num_users_tongxiao.mean(), Num_users_tongxiao.std(),
Num_users_tongxiao.max(), Num_users_tongxiao.min()] + list(
np.percentile(Num_users_tongxiao, [25, 50, 75])) # 统计用户通宵个数, 前一天晚上7点以后, 到次日6点之前
# print(len(FEATURE))
# 特征个数 = 43 + 12 + 16 = 71
FEATURE += list(np.mean(Num_day2day, axis=1)) + list(
np.std(Num_day2day, axis=1)) # Num_day2day = np.zeros((9, Num_users))
FEATURE += list(np.mean(Num_users_chazhi_information, axis=0)) + list(
np.std(Num_users_chazhi_information, axis=0)) # Num_users_chazhi_information = np.zeros((Num_users, 8))
# print(len(FEATURE))
# 特征个数 = 71 + 12 * 24 = 359
# FEATURE += list(Num_day2day_hour_pre.flatten()) + list(Num_day2day_hour_next.flatten()) # (6, 24)
for i in range(dict_num):
FEATURE += get_statistic_variable(Num_day2day_hour_pre_specific[i]) # 8
FEATURE += get_statistic_variable(Num_day2day_hour_next_specific[i]) # 8
# print(len(FEATURE))
# 特征个数 = 359 + 16 = 375
FEATURE += get_statistic_variable(Num_chazhi_Day)
FEATURE += get_statistic_variable(Num_users_hour_day)
# print(len(FEATURE))
# 特征个数 = 375 + 12 * 8 = 471
# for tmp in [zaoshang_hour_workday_dao, wanshang_hour_workday_zou, zaoshang_hour_restday_dao,
# wanshang_hour_restday_zou,
# zaoshang_hour_restday_dao_sat, wanshang_hour_restday_zou_sat,
# zaoshang_hour_restday_dao_sun, wanshang_hour_restday_zou_sun,
# work_day_kuadu, rest_day_kuadu, sat_day_kuadu, sun_day_kuadu]:
# tmp = np.array(tmp)
# FEATURE += get_statistic_variable(tmp)
FEATURE = np.array(FEATURE)
# print('feature num =', len(FEATURE))
# print('FEATURE max:', FEATURE.max())
# assert len(FEATURE) == 471
return init_cishu, init_renshu, list(FEATURE)
def visit2array(table):
FEATURE = []
# init_cishu, init_renshu, FEATURE_3 = get_feature_1_1(table)
init_cishu, init_renshu, FEATURE_3 = get_feature_1_2(table)
FEATURE_1 = get_feature_cishu(init_cishu)
FEATURE_2 = get_feature_reshu(init_renshu)
# print(len(FEATURE_1), len(FEATURE_2), len(FEATURE_3))
FEATURE = FEATURE_1 + FEATURE_2 + FEATURE_3
shape = np.array([len(FEATURE_1), len(FEATURE_2), len(FEATURE_3)])
# print(len(FEATURE), shape)
return FEATURE, init_cishu, shape
| true |
ec560887d6a722946ee492eb7991f55a190be42f | Python | AK-1121/code_extraction | /python/python_7777.py | UTF-8 | 92 | 3.015625 | 3 | [] | no_license | # How do I print a Celsius symbol with matplotlib?
ax.set_xlabel('Temperature ($^\circ$C)')
| true |
3fc5b97db4907104c33052bb141f5e13fc7692e8 | Python | knighton/deepzen | /deepzen/api/base/core/cast.py | UTF-8 | 875 | 2.71875 | 3 | [] | no_license | class BaseCastAPI(object):
def cast(self, x, dtype=None, device=None, copy=False):
raise NotImplementedError
def cast_to_cpu(self, x, dtype, copy=False):
return self.cast(x, dtype, self.cpu(), copy)
def cast_to_gpu(self, x, dtype, gpu=None, copy=False):
return self.cast(x, dtype, self.gpu(gpu), copy)
def to_device(self, x, device=None, copy=False):
return self.cast(x, self.dtype(x), device, copy)
def to_cpu(self, x, copy=False):
return self.cast(x, self.dtype(x), self.cpu(), copy)
def to_gpu(self, x, gpu=None, copy=False):
return self.cast(x, self.dtype(x), self.gpu(gpu), copy)
def cast_numpy_to_tensor(self, x, dtype=None, device=None):
raise NotImplementedError
def numpy_to_tensor(self, x, device=None):
return self.cast_numpy_to_tensor(x, x.dtype.name, device)
| true |
04d60a62b037b423cf0ff90a2a6ef2a450c138cd | Python | Guilhermesav/Assessment2 | /ok.py | UTF-8 | 1,520 | 3.078125 | 3 | [] | no_license | import threading, time,multiprocessing
lista_tamanhos = [1000, 2000, 3000, 4000, 5000]
if __name__ == '__main__':
for tam in lista_tamanhos:
lista = [1.3, 10.4, 40.0, 59.87, 33.01, 101.4]*tam
tamanho = len(lista)
def calcPorcent(lista, inicio, fim):
for i in range(inicio, fim):
lista[i] = lista[i] * 0.1
# Captura tempo inicial
t_inicio = float(time.time())
Nthreads = 3 # Número de threads a ser criado
lista_threads = []
for i in range(Nthreads):
ini = i * tamanho//Nthreads # início do intervalo da lista
fim = (i + 1) * tamanho//Nthreads # fim do intervalo da lista
t = threading.Thread(target=calcPorcent, args=(lista, ini, fim))
t.start() # inicia thread
lista_threads.append(t)
lista_process.append(p)
for t in lista_threads:
t.join() # Espera as threads terminarem
# Captura tempo final
t_fim = float(time.time())
t_inicio_seq = float(time.time())
for i in range(tamanho):
lista[i] = lista[i] * 0.1
t_fim_seq = float(time.time())
print(f"Tempo total Paralelo em segundos, com {tam} Threads: {round(t_fim - t_inicio,15)}")
print(f"Tempo total Sequencial em segundos: {round(t_fim_seq - t_inicio_seq,15)}")
| true |
f6b18ed443cbcd69ebd6c87fc002987aa5a7e8d9 | Python | eboling/MBTS-Water-Tiers | /gen_tiers.py | UTF-8 | 1,589 | 2.53125 | 3 | [] | no_license | import sys
from numpy import loadtxt, arange, ones
from scipy import stats
from rate_tier import RateTier, TierSystem
from RawQData import RawQData
data_file_dir = sys.argv[1]
tier_file_name = sys.argv[2]
spreadsheet_file_name = sys.argv[3]
raw = RawQData(data_file_dir)
lines = loadtxt(tier_file_name)
#print lines
rates = TierSystem()
for l in lines:
rates.add_tier(RateTier(l[0], l[1], l[2]))
total_revenue = 0
total_volume = 0
for qd in raw.Qs:
for v in qd:
rates.account(v)
total_revenue += rates.total_revenue()
total_volume += rates.total_volume()
#rates.dump();
rates.clear_account()
print("total volume for the year: {0}".format(total_volume))
print("total revenue for the year: {0}".format(total_revenue))
print("applying elasticity:")
rates.set_elasticity([0.6, 0.6])
#rates.set_elasticity([0.6, 0.6, 0.6])
total_elastic_revenue = 0
total_elastic_volume = 0
for qd in raw.Qs:
for v in qd:
rates.account(v)
total_elastic_revenue += rates.total_revenue()
total_elastic_volume += rates.total_volume()
#rates.dump();
rates.clear_account()
print("total elastic volume for the year: {0} ({1}% reduction)".format(total_elastic_volume, 100.0 - (total_elastic_volume / total_volume * 100.0)))
print("total elastic revenue for the year: {0} ({1}% reduction)".format(total_elastic_revenue, 100.0 - (total_elastic_revenue / total_revenue * 100.0)))
rates.set_elasticity([1.0, 1.0, 1.0])
rates.account(110)
#rates.account(1489)
#rates.account(12600)
#rates.account(280)
rates.dump();
#rates.export(spreadsheet_file_name)
| true |
bd9d9fb1da81c85732962429ae943f51c52e7307 | Python | conceptslearningmachine-FEIN-85-1759293/affiliates | /affiliates/links/tests/test_models.py | UTF-8 | 929 | 2.53125 | 3 | [
"MIT",
"BSD-3-Clause"
] | permissive | from datetime import date
from nose.tools import eq_
from affiliates.base.tests import TestCase
from affiliates.links.models import Link
from affiliates.links.tests import DataPointFactory, LinkFactory
class LinkTests(TestCase):
def test_manager_total_link_clicks(self):
for clicks in (4, 6, 9, 10): # = 29 clicks
DataPointFactory.create(link_clicks=clicks, date=date(2014, 4, 26))
for clicks in (25, 5, 5): # = 35 clicks
LinkFactory.create(aggregate_link_clicks=clicks)
# Create a link with multiple datapoints to test for a faulty
# join that would screw up the totals.
link = LinkFactory.create()
DataPointFactory.create(link_clicks=7, link=link, date=date(2014, 4, 26))
DataPointFactory.create(link_clicks=7, link=link, date=date(2014, 4, 27))
# 29 + 35 + 7 + 7 = 78 clicks
eq_(Link.objects.total_link_clicks(), 78)
| true |
8941efe0b69af32e43f78600ecb5ae41ca0d6266 | Python | magdacisowska/AdventOfCode | /AOC/day5.py | UTF-8 | 1,411 | 3.78125 | 4 | [] | no_license | def is_gonna_react(a, b):
low_up = a.isupper() and b.islower()
up_low = a.islower() and b.isupper()
same_sign = a.lower() == b.lower()
return (low_up or up_low) and same_sign
def react(input):
new_polymer = []
old_polymer = list(reversed(input))
while old_polymer:
current_unit = old_polymer.pop()
if new_polymer and is_gonna_react(current_unit, new_polymer[-1]):
new_polymer.pop()
else:
new_polymer.append(current_unit)
return new_polymer
def remove_unit(unit, polymer):
new_polymer = []
for i in range(len(polymer)):
if polymer[i] != unit.upper() and polymer[i] != unit.lower():
new_polymer.append(polymer[i])
return new_polymer
if __name__ == '__main__':
input = open('inputs/input5.txt').read()
new_polymer = react(input)
print('Part One: Polymer after reaction has {} units'.format(len(new_polymer) - 1))
alphabet = []
for letter in range(97, 123):
alphabet.append(chr(letter))
data = {}
for letter in alphabet:
new_polymer = remove_unit(letter, input)
new_polymer = react(new_polymer)
data[letter] = len(new_polymer)
max_value = min(data.values()) - 1
max_arg = min(data, key=data.get)
print('Part Two: The shortest polymer is {} units long, after the removal of letter: {}'.format(max_value, max_arg))
| true |
180d6d68c232abb4ba3e49474ce7005a49191109 | Python | AnastasiaMazur/test_octopus_task | /encryption_decryption.py | UTF-8 | 1,119 | 2.875 | 3 | [] | no_license | import base64
import os
from Crypto import Random
from Crypto.PublicKey import RSA
from settings import PRIVATE_KEY_FILENAME
def generate_keys():
if os.path.exists(PRIVATE_KEY_FILENAME):
print("GET KEY FROM PATH")
with open(PRIVATE_KEY_FILENAME, 'r') as f:
PRIVATE_KEY = RSA.importKey(f.read())
else:
print("CREATE NEW KEY")
random_generator = Random.new().read
PRIVATE_KEY = RSA.generate(1024, random_generator)
with open(PRIVATE_KEY_FILENAME, 'wb') as f:
f.write(PRIVATE_KEY.exportKey('PEM'))
PUBLIC_KEY = PRIVATE_KEY.publickey()
print("CHECK KEYS IS FINISHED")
return PRIVATE_KEY, PUBLIC_KEY
def encrypt_message(message_body, pub_key):
encrypted_msg = pub_key.encrypt(message_body.encode(), 32)[0]
encoded_encrypted_msg = base64.b64encode(encrypted_msg)
return encoded_encrypted_msg
def decrypt_message(encoded_message_body, priv_key):
decoded_encrypted_msg = base64.b64decode(encoded_message_body)
decoded_decrypted_msg = priv_key.decrypt(decoded_encrypted_msg)
return decoded_decrypted_msg
| true |
80437243415280ea32f1dcecd97be3e9a4968ba2 | Python | jumbokh/micropython_class | /ESP32/Lab/Dust/pms5003.py | UTF-8 | 3,562 | 3.09375 | 3 | [] | no_license | """
Program to read data from PLANTOWER PMS5003
Modified from program to read data from NovaFitness SDS011 by
Nils Jacob Berland
njberland@gmail.com / njberland@sensar.io
+47 40800410
Modified by
Szymon Jakubiak
Measured values of PM1, PM2.5 and PM10 are in ug/m^3
Number of particles in #/cm^3
"""
import serial
from time import sleep
# Specify serial port address
ser_port = "COM37"
ser = serial.Serial(ser_port, baudrate=9600, stopbits=1, parity="N", timeout=2)
# Specify delay between data reads in seconds
data_delay = 1
try:
ser.write([66, 77, 225, 0, 0, 1, 112]) # put sensor in passive mode
while True:
ser.flushInput()
ser.write([66, 77, 226, 0, 0, 1, 113]) # ask for data
s = ser.read(32)
# Check if data header is correct
if s[0] == int("42",16) and s[1] == int("4d",16):
print("Header is correct")
cs = (s[30] * 256 + s[31]) # check sum
# Calculate check sum value
check = 0
for i in range(30):
check += s[i]
# Check if check sum is correct
if check == cs:
# PM1, PM2.5 and PM10 values for standard particle in ug/m^3
pm1_hb_std = s[4]
pm1_lb_std = s[5]
pm1_std = float(pm1_hb_std * 256 + pm1_lb_std)
pm25_hb_std = s[6]
pm25_lb_std = s[7]
pm25_std = float(pm25_hb_std * 256 + pm25_lb_std)
pm10_hb_std = s[8]
pm10_lb_std = s[9]
pm10_std = float(pm10_hb_std * 256 + pm10_lb_std)
# PM1, PM2.5 and PM10 values for atmospheric conditions in ug/m^3
pm1_hb_atm = s[10]
pm1_lb_atm = s[11]
pm1_atm = float(pm1_hb_atm * 256 + pm1_lb_atm)
pm25_hb_atm = s[12]
pm25_lb_atm = s[13]
pm25_atm = float(pm25_hb_atm * 256 + pm25_lb_atm)
pm10_hb_atm = s[14]
pm10_lb_atm = s[15]
pm10_atm = float(pm10_hb_atm * 256 + pm10_lb_atm)
# Number of particles bigger than 0.3 um, 0.5 um, etc. in #/cm^3
part_03_hb = s[16]
part_03_lb = s[17]
part_03 = int(part_03_hb * 256 + part_03_lb)
part_05_hb = s[18]
part_05_lb = s[19]
part_05 = int(part_05_hb * 256 + part_05_lb)
part_1_hb = s[20]
part_1_lb = s[21]
part_1 = int(part_1_hb * 256 + part_1_lb)
part_25_hb = s[22]
part_25_lb = s[23]
part_25 = int(part_25_hb * 256 + part_25_lb)
part_5_hb = s[24]
part_5_lb = s[25]
part_5 = int(part_5_hb * 256 + part_5_lb)
part_10_hb = s[26]
part_10_lb = s[27]
part_10 = int(part_10_hb * 256 + part_10_lb)
print("Standard particle:")
print("PM1:", pm1_std, "ug/m^3 PM2.5:", pm25_std, "ug/m^3 PM10:", pm10_std, "ug/m^3")
print("Atmospheric conditions:")
print("PM1:", pm1_atm, "ug/m^3 PM2.5:", pm25_atm, "ug/m^3 PM10:", pm10_atm, "ug/m^3")
print("Number of particles:")
print(">0.3:", part_03, " >0.5:", part_05, " >1.0:", part_1, " >2.5:", part_25, " >5:", part_5, " >10:", part_10)
sleep(data_delay)
except KeyboardInterrupt:
ser.close()
print("Serial port closed")
| true |
d74b7c1683e2eb5230b34e175b894e236a2833fb | Python | herowhj/weixin_crawler_2.0 | /utils/time.py | UTF-8 | 673 | 2.890625 | 3 | [] | no_license | def get_internet_time():
"""
:return: 获取百度服务器时间
"""
import requests,time,datetime
try:
r = requests.get(url="http://www.baidu.com")
date = r.headers['Date']
#将GMT时间转换成北京时间
net_time = time.mktime(datetime.datetime.strptime(date[5:25], "%d %b %Y %H:%M:%S").timetuple())+8*3600
return int(net_time)
except:
from instance import PLATFORM
if PLATFORM == 'win':
return None
else:
import time
return (time.time())
if __name__ == '__main__':
net_time = get_internet_time()
if net_time:
print(net_time)
| true |
7e85a37f9b7cc6ab92fea8c3fe17a132b68a161a | Python | ninastijepovic/MasterThesis | /hera_sim/visibilities/simulators.py | UTF-8 | 13,973 | 2.53125 | 3 | [
"MIT"
] | permissive | from __future__ import division
from builtins import object
import warnings
import healpy
import numpy as np
from cached_property import cached_property
from pyuvsim import analyticbeam as ab
from pyuvsim.simsetup import (
initialize_uvdata_from_params,
initialize_catalog_from_params,
uvdata_to_telescope_config,
_complete_uvdata
)
from os import path
from abc import ABCMeta, abstractmethod
class VisibilitySimulator(object):
__metaclass__ = ABCMeta
"""
Base VisibilitySimulator class.
Any actual visibility simulator should be sub-classed from this one.
This class provides several convenience methods and defines the API.
"""
# Whether this particular simulator has the ability to simulate point
# sources directly.
point_source_ability = True
# Whether this particular simulator has the ability to simulate diffuse
# maps directly.
diffuse_ability = True
def __init__(self, obsparams=None, uvdata=None, sky_freqs=None,
beams=None, beam_ids=None, sky_intensity=None,
point_source_pos=None, point_source_flux=None, nside=2**5):
"""
Parameters
----------
obsparams : dict or filepath, optional
Exactly the expected input to `pyuvsim`'s
:func:`pyuvsim.simsetup.initialize_uvdata_from_params`
function. By default `uvdata`, `beams`, and `beam_ids`
are used instead.
uvdata : UVData object, optional
A :class:`pyuvdata.UVData` object contain information about
the "observation". Initalized from `obsparams`, if included.
sky_freqs : array_like, optional
Frequencies at which the sky intensity and/or point sources
are defined in [Hz]. Defaults to the unique frequencies in
`uvdata` Shape=(NFREQS,).
beams : array_like of `pyuvsim.analyticbeam.AnalyticBeam`,
optional
UVBeam models for as many antennae as have unique beams.
Initialized from `obsparams`, if included. Defaults to a
single uniform beam is applied for every antenna. Each beam
is the response of an individual antenna and NOT a
per-baseline response.
Shape=(N_BEAMS,).
beam_ids : array_like of int, optional
List of integers specifying which beam model each antenna
uses (i.e. the index of `beams` which it should refer to).
Initialized from `obsparams`, if included. By default, all
antennas use the same beam (beam 0).
Shape=(N_ANTS,).
sky_intensity : array_like, optional
A healpix model for the intensity of the sky emission, in
[Jy/sr]. Shape=(NFREQS, N_PIX_SKY).
point_source_pos : array_like, optional
An array of point sources. For each source, the entries are
(ra, dec) [rad] (assumed to be in J2000).
Shape=(N_SOURCES, 2).
point_source_flux : array_like, optional
An array of fluxes of the given point sources, per
frequency. Fluxes in [Jy]. Shape=(NFREQS, N_SOURCES).
nside : int, optional
Only used if sky_intensity is *not* given but the simulator
is incapable of directly dealing with point sources. In this
case, it sets the resolution of the healpix map to which the
sources will be allocated.
Notes
-----
Input beam models represent the responses of individual
antennas and are NOT the same as per-baseline "primary
beams". This interpretation of a "primary beam" would be the
product of the responses of two input antenna beams.
"""
if obsparams:
(self.uvdata,
self.beams,
self.beam_ids) = initialize_uvdata_from_params(obsparams)
if point_source_pos is None:
try:
# Try setting up point sources from the obsparams.
# Will only work, of course, if the "catalog" key is in obsparams.
# If it's not there, it will raise a KeyError.
catalog = initialize_catalog_from_params(obsparams)[0]
point_source_pos = np.array([catalog['ra_j2000'], catalog['dec_j2000']]).T * np.pi/180.
point_source_flux = np.atleast_2d(catalog['flux_density_I'])
except KeyError:
# If 'catalog' was not defined in obsparams, that's fine. We assume
# the user has passed some sky model directly (we'll catch it later).
pass
# convert the beam_ids dict to an array of ints
nms = list(self.uvdata.antenna_names)
tmp_ids = np.zeros(len(self.beam_ids), dtype=int)
for name, id in self.beam_ids.items():
tmp_ids[nms.index(name)] = id
self.beam_ids = tmp_ids
self.beams.set_obj_mode()
_complete_uvdata(self.uvdata, inplace=True)
else:
if uvdata is None:
raise ValueError("if obsparams is not given, uvdata must be.")
self.uvdata = uvdata
if beams is None:
self.beams = [ab.AnalyticBeam("uniform")]
else:
self.beams = beams
if beam_ids is None:
self.beam_ids = np.zeros(self.n_ant, dtype=np.int)
else:
self.beam_ids = beam_ids
self._nside = nside
self.sky_intensity = sky_intensity
if sky_freqs is None:
self.sky_freqs = np.unique(self.uvdata.freq_array)
else:
self.sky_freqs = sky_freqs
self.point_source_pos = point_source_pos
self.point_source_flux = point_source_flux
self.validate()
def validate(self):
"""Checks for correct input format."""
if (self.point_source_pos is None) != (self.point_source_flux is None):
raise ValueError("Either both or neither of point_source_pos and "
"point_source_flux must be given.")
if self.sky_intensity is not None and not healpy.isnpixok(self.n_pix):
raise ValueError("The sky_intensity map is not compatible with "
"healpy.")
if self.point_source_pos is None and self.sky_intensity is None:
raise ValueError("You must pass at least one of sky_intensity or "
"point_sources.")
if np.max(self.beam_ids) >= self.n_beams:
raise ValueError("The number of beams provided must be at least "
"as great as the greatest beam_id.")
if self.point_source_flux is not None:
if self.point_source_flux.shape[0] != self.sky_freqs.shape[0]:
raise ValueError("point_source_flux must have the same number "
"of freqs as sky_freqs.")
if self.point_source_flux is not None:
flux_shape = self.point_source_flux.shape
pos_shape = self.point_source_pos.shape
if (flux_shape[1] != pos_shape[0]):
raise ValueError("Number of sources in point_source_flux and "
"point_source_pos is different.")
if (self.sky_intensity is not None
and self.sky_intensity.shape[0] != self.sky_freqs.shape[0]):
raise ValueError("sky_intensity has a different number of freqs "
"than sky_freqs.")
if self.sky_intensity is not None and self.sky_intensity.ndim != 2:
raise ValueError("sky_intensity must be a 2D array (a healpix map "
"per frequency).")
if not self.point_source_ability and self.point_source_pos is not None:
warnings.warn("This visibility simulator is unable to explicitly "
"simulate point sources. Adding point sources to "
"diffuse pixels.")
if self.sky_intensity is None:
self.sky_intensity = 0
self.sky_intensity += self.convert_point_sources_to_healpix(
self.point_source_pos, self.point_source_flux, self.nside
)
if not self.diffuse_ability and self.sky_intensity is not None:
warnings.warn("This visibility simulator is unable to explicitly "
"simulate diffuse structure. Converting diffuse "
"intensity to approximate points.")
(pos,
flux) = self.convert_healpix_to_point_sources(self.sky_intensity)
if self.point_source_pos is None:
self.point_source_pos = pos
self.point_source_flux = flux
else:
self.point_source_flux = \
np.hstack((self.point_source_flux, flux))
self.point_source_pos = np.hstack((self.point_source_pos, pos))
self.sky_intensity = None
@staticmethod
def convert_point_sources_to_healpix(point_source_pos, point_source_flux,
nside=2**5):
"""
Convert point sources to an approximate diffuse HEALPix model.
The healpix map returned is in RING scheme.
Parameters
----------
point_source_pos : array_like
An array of point sources. For each source, the entries are
(ra, dec) [rad] (assumed to be in J2000).
Shape=(N_SOURCES, 2).
point_source_flux : array_like
point_source_flux : array_like, optional
An array of fluxes of the given point sources, per
frequency. Fluxes in [Jy]. Shape=(NFREQS, N_SOURCES).
nside : int, optional
HEALPix nside parameter (must be a power of 2).
Returns
-------
array_like
The HEALPix diffuse model. Shape=(NFREQ, NPIX).
"""
hmap = np.zeros((len(point_source_flux), healpy.nside2npix(nside)))
# Get which pixel every point source lies in.
pix = healpy.ang2pix(nside, np.pi/2 - point_source_pos[:, 1],
point_source_pos[:, 0])
hmap[:, pix] += point_source_flux / healpy.nside2pixarea(nside)
return hmap
@staticmethod
def convert_healpix_to_point_sources(hmap):
"""
Convert a HEALPix map to a set of point sources.
The point sources are placed at the center of each pixel.
Parameters
----------
hmap : array_like
The HEALPix map. Shape=(NFREQ, NPIX).
Returns
-------
array_like
The point source approximation. Positions in (ra, dec) (J2000).
Shape=(N_SOURCES, 2). Fluxes in [Jy]. Shape=(NFREQ, N_SOURCES).
"""
nside = healpy.get_nside(hmap[0])
ra, dec = healpy.pix2ang(nside, np.arange(len(hmap[0])), lonlat=True)
flux = hmap * healpy.nside2pixarea(nside)
return np.array([ra*np.pi/180, dec*np.pi/180]).T, flux
def simulate(self):
"""Perform the visibility simulation."""
self._write_history()
vis = self._simulate()
self.uvdata.data_array += vis
return vis
@abstractmethod
def _simulate(self):
"""Subclass-specific simulation method, to be overwritten."""
pass
@property
def nside(self):
"""Nside parameter of the sky healpix map."""
try:
return healpy.get_nside(self.sky_intensity[0])
except TypeError:
if not healpy.isnsideok(self._nside):
raise ValueError("nside must be a power of 2")
return self._nside
@cached_property
def n_pix(self):
"""Number of pixels in the sky map."""
return self.sky_intensity.shape[1]
@cached_property
def n_ant(self):
"""Number of antennas in array."""
return self.uvdata.get_ants().shape[0]
@cached_property
def n_beams(self):
"""Number of beam models used."""
return len(self.beams)
def _write_history(self):
"""Write pertinent details of simulation to the UVData's history."""
class_name = self.__class__.__name__
self.uvdata.history += ("Visibility Simulation performed with "
"hera_sim's {} simulator\n").format(class_name)
self.uvdata.history += "Class Repr: {}".format(repr(self))
def write_config_file(self, filename, direc='.', beam_filepath=None,
antenna_layout_path=None):
"""
Writes a YAML config file corresponding to the current UVData object.
Parameters
----------
filename : str
Filename of the config file.
direc : str
Directory in which to place the config file and its
supporting files.
beam_filepath : str, optional
Where to put the beam information. Default is to place it alongside
the config file, but with extension '.beams'.
antenna_layout_path : str, optional
Where to put the antenna layout CSV file. Default is alongside the
main config file, but appended with '_antenna_layout.csv'.
"""
if beam_filepath is None:
beam_filepath = path.basename(filename) + ".beams"
if antenna_layout_path is None:
antenna_layout_path = (path.basename(filename)
+ "_antenna_layout.csv")
uvdata_to_telescope_config(
self.uvdata, beam_filepath=beam_filepath,
layout_csv_name=antenna_layout_path,
telescope_config_name=filename, return_names=False, path_out=direc
)
| true |
c111c58750c12fb8ab344a4176468cc873ed6468 | Python | laxminagln/IOSD-UIETKUK-HacktoberFest-Meetup-2019 | /Beginner/age.py | UTF-8 | 296 | 3.78125 | 4 | [
"Apache-2.0"
] | permissive | while True:
try:
a = int(input("enter your age :"))
if a > 18:
print("Adult")
elif 10 < a <= 18:
print("Teen")
elif a <= 10:
print("Child")
break
except ValueError:
print("enter valid age")
break
| true |
c40e07411d8abc5540de0bc15810940894d58900 | Python | samuelcharles007/TheSelfTaughtProgrammer | /Odd_or_evenusingloop.py | UTF-8 | 104 | 3.375 | 3 | [] | no_license | l=input("Type a Int")
r=input("Type another int")
i=[]
for i in range(int(l),int(r)):
print(i)
| true |
1e75fedd8d82ee9b9159f872729f4eb4466f79a2 | Python | Comradgrimo/Py_sql_qt | /lesson_1.py | UTF-8 | 5,402 | 3.78125 | 4 | [] | no_license | # 1. Написать функцию host_ping(), в которой с помощью утилиты ping будет проверяться доступность сетевых узлов.
# Аргументом функции является список, в котором каждый сетевой узел должен быть представлен именем хоста или ip-адресом.
# В функции необходимо перебирать ip-адреса и проверять их доступность с выводом соответствующего сообщения
# («Узел доступен», «Узел недоступен»). При этом ip-адрес сетевого узла должен создаваться с помощью функции ip_address().
from ipaddress import ip_address
import subprocess
import random
import re
from tabulate import tabulate
def random_ipv4(foo: int) -> list:
"""Создает список случайных ip адресов.
:param foo: цисло узлов
:return: список ip
"""
ip_list = []
for i in range(foo):
str = ip_address(
f'77.{random.randint(0,255)}.{random.randint(0,255)}.{random.randint(0,255)}')
ip_list.append(str)
return ip_list
def host_ping(bar: list):
"""Проверяет доступность ip адресов.
-w 2 -- останавливаеn пинг через 2 сек, на тот случай если ip мертв
-с 2 -- отправляем 2 пакета больше не нужно для наших целей
:param bar: список узлов
:return: Узел доступен/Узел недоступен
"""
for i in bar:
p = subprocess.Popen(f'ping {i} -w 2 -c 2',
shell=True, stdout=subprocess.PIPE)
if re.search('100% packet loss', p.stdout.read().decode()):
print(f'Узел {i} не доступен')
else:
print(f'Узел {i} доступен')
# 2. Написать функцию host_range_ping() для перебора ip-адресов из заданного диапазона. Меняться должен только последний
# октет каждого адреса. По результатам проверки должно выводиться соответствующее сообщение.
def host_range_ping(number: int, ip: str) -> list:
"""Функция перебора ip адресов из заданного диапазона. тк меняется только
последний октет то идет прибавка 1 к последнему октету в ip адресе.
:param number: целое цисло - количество ip адресов
:param ip: начальный ip адресс
:return: список айпи адресов
"""
try:
bar = []
foo = ip_address(ip)
for i in range(number):
bar.append(foo+i)
return bar
except TypeError:
print('Введите целое число')
# 3. Написать функцию host_range_ping_tab(), возможности которой основаны на функции из примера 2. Но в данном случае
# результат должен быть итоговым по всем ip-адресам, представленным в табличном формате (использовать модуль tabulate).
def host_range_ping_tab(bar: list) -> str:
"""Функция распределяет переданные аругментом список ip адресов на те
которые пингуются и нет.
:param bar: Список ip адресов
:return: Две колонки с работающими и не работающими ip адресами
"""
reach, unreach = [], []
try:
for i in bar:
p = subprocess.Popen(f'ping {i} -w 2 -c 2',
shell=True, stdout=subprocess.PIPE)
if re.search('100% packet loss', p.stdout.read().decode()):
unreach.append(i)
print(f'Узел {i} не доступен')
else:
reach.append(i)
print(f'Узел {i} доступен')
ip = {'Reachable': reach, 'Unreachable': unreach}
print(tabulate(ip, headers='keys'))
except TypeError:
print('Передайте аргументом список ip адресов')
# 4. Продолжаем работать над проектом «Мессенджер»:
# a) Реализовать скрипт, запускающий два клиентских приложения: на чтение чата и на запись в него. Уместно использовать модуль subprocess).
for i in range(2):
p = subprocess.call('xfce4-terminal -H -e "python3 client_6.py"', executable='/bin/bash', shell=True)
# b) Реализовать скрипт, запускающий указанное количество клиентских приложений.
if __name__ == '__main__':
bar = random_ipv4(5)
foo = (host_range_ping(8, '87.250.250.250'))
print(host_ping(bar))
print(host_range_ping_tab(foo))
| true |
7942d5e162a8d475762bf5af9657985322d8f283 | Python | hackengineer/enet_tensorflow | /utils.py | UTF-8 | 5,852 | 2.890625 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
def process_path_enc(file_path):
'''
Function to process the path containing the images and the
labels for the input pipeline. In this case we work for the
encoder output
Arguments
----------
'file_path' = path containing the images and
label folders
Returns
-------
'img' = image tensors
'iml_end, iml_dec' = label tensors for the encorer and
decoder heads
'''
# img file
img_file = file_path
# label file
label_file = tf.strings.regex_replace(img_file, "/images", "/labels")
print(img_file, label_file)
# decoding image
img = tf.io.read_file(img_file)
img = tf.image.decode_png(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = tf.image.resize(img, [360, 480])
# decoding label
print(label_file)
iml = tf.io.read_file(label_file)
iml = tf.image.decode_png(iml, channels=1)
iml = tf.image.convert_image_dtype(iml, tf.uint8)
iml_enc = tf.image.resize(iml, [45, 60], method='nearest')
return img, iml_enc
def process_path_dec(file_path):
'''
Function to process the path containing the images and the
labels for the input pipeline. In this case we work for the
decoder output
Arguments
----------
'file_path' = path containing the images and
label folders
Returns
-------
'img,iml' = image and label tensors
'''
# img file
img_file = file_path
# label file
label_file = tf.strings.regex_replace(img_file, "/images", "/labels")
print(img_file, label_file)
# decoding image
img = tf.io.read_file(img_file)
img = tf.image.decode_png(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = tf.image.resize(img, [360, 480])
# decoding label
print(label_file)
iml = tf.io.read_file(label_file)
iml = tf.image.decode_png(iml, channels=1)
iml = tf.image.convert_image_dtype(iml, tf.uint8)
iml = tf.image.resize(iml, [360, 480], method='nearest') # 45,60
return img, iml
def process_path_encdec(file_path):
'''
Function to process the path containing the images and the
labels for the input pipeline. In this case we work for a
double objective function, one from the encoder and one from
the decoder
Arguments
----------
'file_path' = path containing the images and
label folders
Returns
-------
'img' = image tensors
'iml_end, iml_dec' = label tensors for the encorer and
decoder heads
'''
# img file
img_file = file_path
# label file
label_file = tf.strings.regex_replace(img_file, "/images", "/labels")
print(img_file, label_file)
# decoding image
img = tf.io.read_file(img_file)
img = tf.image.decode_png(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = tf.image.resize(img, [360, 480])
# decoding label
print(label_file)
iml = tf.io.read_file(label_file)
iml = tf.image.decode_png(iml, channels=1)
iml = tf.image.convert_image_dtype(iml, tf.uint8)
iml_enc = tf.image.resize(iml, [45, 60], method='nearest')
iml_dec = tf.image.resize(iml, [360, 480], method='nearest')
return img, (iml_enc, iml_dec)
def tf_dataset_generator(dataset_path,
map_fn,
batch_size=16,
cache=True,
train=True,
shuffle_buffer_size=1000):
'''
Creates a training tf.dataset from images in the dataset_path
Arguments
----------
'dataset_path' = path containing the dataset images
'map_fn' = function to map for the image processing
Returns
-------
'data_set' = training tf.dataset to plug in in model.fit()
'''
# create a list of the training images
data_filelist_ds = tf.data.Dataset.list_files(dataset_path + '/*')
# create the labeled dataset (returns (img,label) pairs)
data_set = data_filelist_ds.map(
map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# For a small dataset, only load it once, and keep it in memory.
# use `.cache(filename)` to cache preprocessing work for datasets that
# don't fit in memory.
if cache:
if isinstance(cache, str):
data_set = data_set.cache(cache)
else:
data_set = data_set.cache()
# if training i want to shuffle, repeat and define a batch
if train:
data_set = data_set.shuffle(buffer_size=shuffle_buffer_size)
# define the batch size
data_set = data_set.batch(batch_size)
# Repeat forever
data_set = data_set.repeat()
# `prefetch` lets the dataset fetch batches in the background while the
# model is training.
data_set = data_set.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data_set
def get_class_weights(data_set, num_classes=12, c=1.02):
'''
Gets segmentation class weights from the dataset
Arguments
----------
'tf.dataset' = tf.dataset as returned from tf_dataset_generator
Returns
-------
'class_weights' = class weights for the segmentation classes
'''
# building a giant array to count how many pixels per label
label_list = []
for img, label in data_set.take(-1):
label_list.append(label.numpy())
label_array = np.array(label_list).flatten()
# counting the pixels
each_class = np.bincount(label_array, minlength=num_classes)
# computing the weights as in the original paper
prospensity_score = each_class / len(label_array)
class_weights = 1 / (np.log(c + prospensity_score))
return class_weights
| true |
58b9dbafe87eb8e484a8e896b4cc259fda4be589 | Python | EgehanGundogdu/drf-test-driven-development-exercies | /app/core/tests/test_models.py | UTF-8 | 1,167 | 2.890625 | 3 | [
"MIT"
] | permissive | from django.test import TestCase
from django.contrib.auth import get_user_model
class UserModelTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
email="test1@gmail.com", password="super secret"
)
def test_create_user_with_email(self):
"""Test creating a new user with email instead of username"""
user = get_user_model().objects.create_user(
email="test@test.com", password="super_secret"
)
self.assertEqual(user.email, "test@test.com")
self.assertTrue(user.check_password("super_secret"))
def test_create_user_with_invalid_email(self):
"""
Test creating user with invalid email. Raises an error.
"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(
email="", password="super secret")
def test_create_new_super_user(self):
"""
Test create a super user.
"""
user = get_user_model().objects.create_superuser(
email="super@user.com", password="123"
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| true |
e85b8529a6197568e727b40623000a9f82c7fed0 | Python | xiguashuiguo/langren | /M_langren.py | UTF-8 | 2,072 | 2.71875 | 3 | [] | no_license | class M_langren(object):
"""狼人游戏的流程控制"""
playerNum = 0
langrenNum = 0
day = 1
mumber = []
langrenList = []
nvwuNum = 0
YuyanNum = 0
Zancun = []
Nvwu_D = True
Nvwu_J = True
JingzhangNum = 0
def __init__(self):
return
def setPlayer(self,P):
self.playerNum = P
return
def setLangNum(self,K):
self.langrenNum = K
return
def setPlayerList(self,P):
a = 1
for i in range(P):
self.mumber.append(a)
a+=1
return
def nextDay(self):
self.day+=1
return
def setLangren(self,a):
self.langrenList.append(a)
return
def setNvwuNum(self,a):
self.nvwuNum = a
return
def setYuyanNum(self,a):
self.YuyanNum = a
return
def act_Lang(self,a):
self.mumber.remove(int(a))
return
def act_Nv(self,a,way):
if way == 1:
self.mumber.append(int(a))
elif way == 2:
b = self.mumber[:]
for i in self.mumber:
if i == a:
b.remove(int(i))
self.mumber = b
return
def act_Yu(self,a):
return (a in self.langrenList)
def setZancun(self,a):
self.Zancun.append(int(a))
return
def act_N(self):
for i in self.Zancun:
self.mumber.remove(i)
return
def setJingzhang(self,a):
self.JingzhangNum=a
return
def isWin(self):
Score_L = 0
for i in self.langrenList:
if i in self.mumber:
Score_L+=1
Score_P=len(self.mumber)-Score_L
if self.JingzhangNum in self.langrenList:
Score_L+=1
else:
Score_P+=1
if Score_L>Score_P:
print('狼人获胜')
return True
if len(self.mumber)<=1:
print('平民胜利')
return True
return False | true |
73866ad8e111316f51d570f0ec89eac841e2400b | Python | blackholemedia/writings | /algorithm/solutions/offer/hassubtree.py | UTF-8 | 2,585 | 2.984375 | 3 | [] | no_license | #-*- coding=utf-8 -*-
from functools import reduce
import sys
if sys.platform == 'linux':
sys.path.append('/home/alta/ds')
from mytree.binarytreefromlist import BinaryTreeFromList
from mytree.tree import TreeNode
else:
sys.path.append('c:\\users\\alta')
from datastructure.mytree.binarytreefromlist import BinaryTreeFromList
from datastructure.mytree.tree import TreeNode
class Solution(BinaryTreeFromList):
def HasSubtree(self, pRoot1, pRoot2):
# write code here
if pRoot1 != None and pRoot2 != None:
if pRoot1.val == pRoot2.val:
if pRoot2.left and pRoot2.right:
if self.HasSubtree(pRoot1.left,pRoot2.left) and self.HasSubtree(pRoot1.right,pRoot2.right):
return True
else:
return self.HasSubtree(pRoot1.left,pRoot2) or self.HasSubtree(pRoot1.right,pRoot2)
elif pRoot2.left:
if self.HasSubtree(pRoot1.left,pRoot2.left):
return True
else:
return self.HasSubtree(pRoot1.left,pRoot2)
elif pRoot2.right:
if self.HasSubtree(pRoot1.right,pRoot2.right):
return True
else:
return self.HasSubtree(pRoot1.right,pRoot2)
else:
return True
else:
return False
else:
return False
def getsubtree(self, parent_tree=None):
iter_node = parent_tree
for i in range(4):
if i % 2 == 0:
iter_node = parent_tree.left
else:
iter_node = iter_node.right
return iter_node
if __name__ == '__main__':
import random
#randomlist = [random.randint(0, 999) for i in range(20)]
mybinarytree = BinaryTreeFromList()
#for i in [8,8,7,9,2,'#','#','#','#',4,7]:
for i in [8,'#',9,3,2]:
mybinarytree.add_node_byleft(i)
mybinarytree.dec_alergic()
mybinarytree.print_all()
#otherlist = [random.randint(0, 999) for i in range(5)]
othertree = BinaryTreeFromList()
#for i in [8,9,2]:
# othertree.add_node_byleft(i)
othertree.print_all()
ytree = Solution()
#subtree = ytree.getsubtree(mybinarytree._header)
#print(subtree)
#print(ytree.HasSubtree(mybinarytree._header, subtree))
print(ytree.HasSubtree(mybinarytree._header, othertree._header))
| true |
780da105a891cc88976004347547179172257d78 | Python | EasonPeng-TW/big5_10 | /big5_10.py | UTF-8 | 863 | 3.078125 | 3 | [] | no_license | import pandas as pd
import requests
big5_url = 'https://www.taifex.com.tw/cht/3/largeTraderFutQry'
big5_table = pd.read_html(requests.get(big5_url, headers={'User-agent': 'Mozilla/5.0(Windows NT 6.1; Win64; x64)AppleWebKit/537.36(KHTML, like Gecko)Chrome/63.0.3239.132 Safari/537.36'}).text)
big5_table[3]
big10_call = big5_table[3].iloc[2][5].split('%', 1)[0] #以%分割字串,分割1次,取出第一項
print(big10_call)
big10_put = big5_table[3].iloc[2][9].split('%', 1)[0]
print(big10_put)
dif_big10 = float(big10_call) - float(big10_put)
print('前十大差額{:.2f}%'.format(dif_big10) ) #浮點數取到小數後一位
big5_call = big5_table[3].iloc[2][3].split('%', 1)[0]
print(big5_call)
big5_put = big5_table[3].iloc[2][7].split('%', 1)[0]
print(big5_put)
dif_big5 = float(big5_call) - float(big5_put)
print('前五大差額{:.2f}%'.format(dif_big5)) | true |
e7ce5ead997e40a4f4499cadf7037d87b7fe4925 | Python | zhafen/cc | /cc/concept_n_body.py | UTF-8 | 2,100 | 2.59375 | 3 | [] | no_license | import rebound
import augment
########################################################################
class Simulation( object ):
@augment.store_parameters
def __init__(
self,
concept_map,
r_c = 5.,
rep_power = 3.,
att_power = 1.,
inital_dims = ( 10., 10., 10. ),
inital_vdims = ( 2., 2., 2. )
):
'''The force between two particles is the derivative of
V(r) = M * a * r ** -rep_power - M * r ** -att_power
Where M is the mass of the other particle.
When M >> m, a = (att_power/rep_power) * r_c**(rep_power - att_power)
where r_c is the circular orbit.
'''
# Initialize the simulation
self.sim = rebound.Simulation()
# Setup particles
for c in concept_map.concepts:
x, y, z = [
np.random.uniform( -length / 2., length / 2. )
for length in initial_dims
]
vx, vy, vz = [
np.random.uniform( -vlength / 2., vlength / 2. )
for vlength in initial_vdims
]
self.sim.add(
m = concept_map.weights[c],
x = x, y = y, z = z,
vx = vx, vy = vy, vz = vz,
)
# Move to center-of-momentum frame
sim.move_to_com()
# Setup repulsive force
def scaled_repulsive_force( r ):
prefactor = att_power * r_c**(rep_power - att_power)
force = prefactor * r**( -rep_power - 1 )
return force
# Add additional forces
def repulsive_force( sim ):
ps = sim.contents.particles
# Loop through particles
for i, p in enumerate( ps ):
net_force = 0.
# Loop through other particles
for j, p_e in enumerate( ps ):
assert False, "Need to calc r."
net_force += p_e.m * scaled_repulsive_force( r )
########################################################################
| true |
4b0585286ff2df8e916efb8fb46717e499f73ea1 | Python | aleph-im/pyaleph | /tests/toolkit/test_batch.py | UTF-8 | 747 | 2.890625 | 3 | [
"MIT"
] | permissive | import pytest
from aleph.toolkit.batch import async_batch
async def async_range(*args):
for i in range(*args):
yield i
@pytest.mark.asyncio
async def test_async_batch():
# batch with a remainder
batches = [b async for b in async_batch(async_range(0, 10), 3)]
assert batches == [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
# iterable divisible by n
batches = [b async for b in async_batch(async_range(0, 4), 2)]
assert batches == [[0, 1], [2, 3]]
# n = 1
batches = [b async for b in async_batch(async_range(0, 5), 1)]
assert batches == [[0], [1], [2], [3], [4]]
# n = len(iterable)
batches = [b async for b in async_batch(async_range(0, 7), 7)]
assert batches == [[0, 1, 2, 3, 4, 5, 6]]
| true |
45e3b61514c33f3cdee0a33845caa990fb574e44 | Python | sy2es94098/MLGame-Summer | /games/easy_game/ml/ml_play_template.py | UTF-8 | 496 | 2.953125 | 3 | [] | no_license | import random
class MLPlay:
def __init__(self):
print("Initial ml script")
def update(self, scene_info: dict):
"""
Generate the command according to the received scene information
"""
# print("AI received data from game :", scene_info)
actions = ["UP", "DOWN", "LEFT", "RIGHT"]
return random.sample(actions, 1)
def reset(self):
"""
Reset the status
"""
print("reset ml script")
pass
| true |
55c08f336f70a4531a1d2188170309835655bd70 | Python | mgilgamesh/DRL-Continuous-Control | /model.py | UTF-8 | 2,231 | 2.6875 | 3 | [] | no_license | import numpy as np
import random
from collections import namedtuple, deque
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.nn as nn
class Actor(nn.Module):
""" Policy Model """
def __init__(self, state_size, action_size, seed):
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, 128)
self.bn1 = nn.BatchNorm1d(128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, action_size)
self.reset_parameters()
def hidden_init(self, layer):
hid_layer = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(hid_layer)
return (-lim, lim)
def reset_parameters(self):
self.fc1.weight.data.uniform_(*self.hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*self.hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
x = self.fc1(state)
x = self.bn1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.tanh(x)
return x
class Critic(nn.Module):
""" Value Model """
def __init__(self, state_size, action_size, seed):
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, 128)
self.bn1 = nn.BatchNorm1d(128)
self.fc2 = nn.Linear(128+action_size, 128)
self.fc3 = nn.Linear(128, 1)
self.reset_parameters()
def hidden_init(self, layer):
hid_layer = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(hid_layer)
return (-lim, lim)
def reset_parameters(self):
self.fc1.weight.data.uniform_(*self.hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*self.hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
x = self.fc1(state)
x = self.bn1(x)
x = F.relu(x)
x = torch.cat((x, action), dim=1)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x | true |
dfddfc42c13760ab4ec35e05bf36242fdb22c204 | Python | mcxu/code-sandbox | /PythonSandbox/src/misc/num_subsets_min_max_below_k.py | UTF-8 | 564 | 3.3125 | 3 | [] | no_license | '''
https://leetcode.com/discuss/interview-question/268604/Google-interview-Number-of-subsets
Requirement: O(n^2) time or better.
Example 1:
nums = [2, 4, 5, 7]
k = 8
Output: 5
Explanation: [2], [4], [2, 4], [2, 4, 5], [2, 5]
Example 2:
nums = [1, 4, 3, 2]
k = 8
Output: 15
Explanation: 16 (2^4) - 1 (empty set) = 15
Example 3:
nums = [2, 4, 2, 5, 7]
k = 10
Output: 27
Explanation: 31 (2^5 - 1) - 4 ([7], [5, 7], [4, 5, 7], [4, 7]) = 27
'''
class Solution:
def numSubsetsMinMaxBelowK(self, nums, k):
nums = sorted(nums) # O(n*log(n)) time
| true |
647b3208a3698a3c774325e5c1cf33cd3e8d547f | Python | viticlick/adventofcode | /2020/day_02/day2_02.py | UTF-8 | 335 | 3.40625 | 3 | [] | no_license | import re
from operator import xor
f = open('input.txt','r')
counter = 0
for line in f.readlines():
(a,b,c,d) = re.findall('(\d+)-(\d+) (\w): (\w+)', line)[0]
first_occurence = d[int(a) - 1] == c
last_occurence = d[int(b) - 1] == c
if xor(first_occurence, last_occurence):
counter = counter + 1
print(counter)
| true |
49a03219699c3ce1cef26cb80feea6b3dd96021d | Python | rahul9852-dot/Python-From-Scratch | /Basic Python/Practise_problem/facorial.py | UTF-8 | 383 | 4.25 | 4 | [] | no_license |
# Factorial of given number
# single line of code
# Recursive approach
def fact(n):
return 1 if (n==1 or n==0) else n*fact(n-1)
print(fact(5))
# Iterative approach
def factorial(n):
if n<0:
return 0
elif n==1 or n==0:
return 1
else:
fact=1
while n>1:
fact*=n
n-=1
return fact
print(factorial(5))
| true |
d24188f846922dd017fc873a5e1b476b04548a06 | Python | pinartopcam/project | /Assignment.py | UTF-8 | 14,996 | 2.78125 | 3 | [] | no_license | import Preference
import Instructor as i
import Course as c
import TeachingAssistant as t
from random import shuffle
import random
class Assignment:
def __init__(self, preference_list, ta_list, course_list, instructor_list):
self.preference_list = preference_list
self.ta_list = ta_list
self.course_list = course_list
self.instructor_list = instructor_list
def check_preferences(self):
from random import randint
for instructor in self.instructor_list:
for course in instructor.courses:
#get the number of preferences for each specific course
course_preferences = []
final_list = []
final_ids = []
available_ranks = []
for pref in instructor.preferences:
if pref.course_id == course.id:
course_preferences.append(pref)
# to remove the duplicate preferences
if pref.ta_id not in final_ids:
final_list.append(pref)
final_ids.append(pref.ta_id)
available_ranks.append(pref.rank)
else:
index = instructor.preferences.index(pref)
instructor.preferences.pop(index)
course_preferences = final_list
preference_per_course = len(course_preferences)
if preference_per_course == 10:
continue
else:
nr_of_preferences = preference_per_course
if nr_of_preferences > 10:
#if there are more than 10 preferences
index = nr_of_preferences
while index != nr_of_preferences:
instructor.preferences.pop(index-1)
index = index - 1
else:
#if there are less than 10 preferences
required_pref = 10 - nr_of_preferences
not_available_ranks = []
count = 1
while count < 11:
if count not in available_ranks:
not_available_ranks.append(count)
count = count + 1
not_available_ranks.reverse()
while required_pref != 0:
random_number = randint(0, len(self.ta_list)-1)
valid_preference = True
for pref in instructor.preferences:
if pref.ta_id == self.ta_list[random_number].id and course.id == pref.course_id:
#if this ta is already in preference list
valid_preference = False
break
if valid_preference:
instructor.createPreference(instructor.id,self.ta_list[random_number].id, course.id, not_available_ranks.pop())
nr_of_preferences = nr_of_preferences + 1
required_pref = required_pref - 1
for ta in self.ta_list:
ta_preferences = ta.preferences
final_list = []
final_ids = []
available_ranks = []
# to remove the duplicate preferences
for pref in ta.preferences:
if pref.course_id not in final_ids:
final_list.append(pref)
final_ids.append(pref.course_id)
available_ranks.append(pref.rank)
else:
index = ta.preferences.index(pref)
ta.preferences.pop(index)
ta_preferences = final_list
if len(ta_preferences) == 10:
continue
else:
nr_of_preferences = len(ta_preferences)
not_available_ranks = []
count = 1
while count < 11:
if count not in available_ranks:
not_available_ranks.append(count)
count = count + 1
not_available_ranks.reverse()
if nr_of_preferences > 10:
#if there are more than 10 preferences
index = nr_of_preferences
while index != nr_of_preferences:
ta_preferences.pop(index-1)
index = index - 1
else:
#if there are less than 10 preferences
required_pref = 10 - nr_of_preferences
while required_pref != 0:
random_number = randint(0, len(self.course_list)-1)
valid_preference = True
for pref in ta.preferences:
if pref.course_id == self.course_list[random_number].id:
#if this ta is already in preference list
valid_preference = False
break
if valid_preference:
ta.createPreference(ta.id, self.course_list[random_number].id, None, not_available_ranks.pop())
required_pref = required_pref - 1
def check_constraints(self):
print('no code yet')
def assign_random_preference(self):
print('no code yet')
def score(self, chromosome, courselist,instructorlist, numberOfPref, empFactor, wi, wta):
index = 0
courseSatisfaction=[]
for c in range(len(courselist)):
assignedTAs = []
instructor=None
for i in range(len(instructorlist)):
if instructorlist[i].id==courselist[c].instructor:
instructor=instructorlist[i]
taneed = int(courselist[c].ta_need)
instructor_satisfaction=0
overall_ta_satisfaction=0
for t in range(taneed):
assignedTAs.append(chromosome[index])
index += index
# instructor satisfaction for c
for x in range(len(assignedTAs)):
for p in range(len(instructor.preferences)):
if instructor.preferences[p].ta_id == assignedTAs[x].id:
instructor_satisfaction+= (numberOfPref+1-instructor.preferences[p].rank)^empFactor
base_case=0
for x in range(len(assignedTAs)):
base_case+= (numberOfPref-x)^empFactor
instructor_satisfaction=(instructor_satisfaction/base_case)*100
# ta satisfaction for c
for k in range(len(assignedTAs)):
for p in range(len(assignedTAs[k].preferences)):
if assignedTAs[k].preferences[p].course_id== courselist[c].id:
overall_ta_satisfaction += (((numberOfPref+1-assignedTAs[k].preferences[p].rank)^empFactor) / (numberOfPref^empFactor))*100
overall_ta_satisfaction= overall_ta_satisfaction/taneed
# satisfaction for c
satisfaction=(wi*instructor_satisfaction + wta*overall_ta_satisfaction)/(wi+wta)
courseSatisfaction.append(satisfaction)
#tot = 0
#for cs in range(len(courseSatisfaction)):
# tot += courseSatisfaction[cs]
#return tot / (len(courseSatisfaction))
return courseSatisfaction
def assign(self):
print('no code yet')
def initialize(self,populationSize,totalTANeed,taList,population):
for n in range(populationSize):
chromosome = []
for j in range(totalTANeed):
if j < len(taList):
chromosome.append(taList[j])
else:
chromosome.append(None)
shuffle(chromosome)
population[n][:] = chromosome
return population
def crossover(self,chromosome1,chromosome2):
cp=random.randint(0,len(chromosome1)-1)
tempTa=chromosome1[cp]
chromosome1[cp]=chromosome2[cp]
chromosome2[cp]=tempTa
return chromosome1,chromosome2
def mutation(self,chromosome1):
mp1 =random.randint(0,len(chromosome1)-1)
mp2 =random.randint(0,len(chromosome1)-1)
while mp2 == mp1:
mp2 = random.randint(0,len(chromosome1)-1)
tempTa=chromosome1[mp1]
chromosome1[mp1]=chromosome1[mp2]
chromosome1[mp2]= tempTa
return chromosome1
def main():
print('Welcome to Senior Design Project.')
course_list = []
instructor_list = []
ta_list = []
preference_list = []
ta_course_list = []
populationSize = 50
totalTANeed = 0
numberOfPref=10
empFactor=3
wi=2
wta=1
#ca advisor constant
with open("courses.txt", "r") as ins:
array = []
count = 0
for line in ins:
if count > 0:
#print(line)
index = line.index("\t")
index2 = line.index("\t", index+1)
code = line[0: index]
ta_need =line[index+1 : index2]
instructor = line[index2 + 1: len(line)].strip()
array.append(line)
course = c.Course(code, ' ', ' ', instructor, ta_need)
course_list.append(course)
ta_course_list.append((instructor, code))
count = count + 1
with open("pref.txt", "r") as ins:
array = []
count = 0
for line in ins:
if count > 0:
array.append(line)
index = line.index("\t")
code = line[0: index]
index2 = line.index("\t", index+1)
course = line[index+1: index2]
prefs =line[index2+1 : len(line)].strip()
instructor = i.Instructor(code, ' ', [], [], ' ')
instructor_list.append(instructor)
rank = 1
prev_index = -1
index3 = 0
while index3 < len(prefs):
try:
index3 = prefs.index("\t", index3+1)
if len(prefs[prev_index+1:index3]) > 1:
instructor.createPreference(code, prefs[prev_index+1:index3], course, rank)
rank = rank + 1
prev_index = index3
except:
if len(prefs[prev_index + 1:index3]) > 1:
instructor.createPreference(code, prefs[prev_index+1:len(prefs)], course, rank)
break
count = count + 1
#print(instructor_list)
for instructor in instructor_list:
for tuple in ta_course_list:
if instructor.id in tuple:
for course in course_list:
if course.id == tuple[1]:
instructor.courses.append(course)
with open("talist.txt", "r") as ins:
array = []
count = 0
advisor_advisee = []
for line in ins:
if count > 0:
#line.strip().replace("\t", ' ')
index = line.index("\t")
tanumber = line[0: index]
advisor =line[index+1 : len(line)]
array.append(line)
ta = t.TeachingAssistant(tanumber, ' ', advisor, ' ')
ta_list.append(ta)
advisor_advisee.append((ta, advisor))
count = count + 1
for instructor in instructor_list:
for tuple in advisor_advisee:
if instructor.id in tuple:
instructor.students.append(tuple[0])
with open("tapref.txt", "r") as ins:
array = []
count = 0
for line in ins:
if count > 0:
# line.strip().replace("\t", ' ')
index = line.index("\t")
code = line[0: index]
prefs = line[index + 1: len(line)]
array.append(line)
rank = 1
prev_index = -1
index3 = 0
current_ta = None
for ta in ta_list:
if ta.id == code:
while index3 < len(prefs):
try:
index3 = prefs.index("\t", index3+1)
if len(prefs[prev_index + 1:index3]) > 1:
ta.createPreference(code, prefs[prev_index+1:index3], None, rank)
rank = rank + 1
prev_index = index3
except:
if len(prefs[prev_index + 1:index3]) > 1:
ta.createPreference(code, prefs[prev_index+1:len(prefs)], None, rank)
break
break
count = count + 1
a = Assignment(preference_list, ta_list, course_list, instructor_list)
#
# print('Before checking: ')
# for ins in instructor_list:
# print(ins.id)
# print('--')
# for pref in ins.preferences:
# print pref.course_id, pref.ta_id, pref.rank
# #
# for ta in ta_list:
# print(ta.id)
# print('--')
# for pref in ta.preferences:
# print pref.course_id, pref.ta_id, pref.rank
a.check_preferences()
# print('After checking: ')
# for ins in a.instructor_list:
# print(ins.id)
# print('--')
# for pref in ins.preferences:
# print pref.course_id, pref.ta_id, pref.rank
# for ta in a.ta_list:
# print(ta.id)
# print('--')
# for pref in ta.preferences:
# print pref.course_id, pref.ta_id, pref.rank
for z in range(len(course_list)):
totalTANeed += int(course_list[z].ta_need)
population = [[0 for m in xrange(totalTANeed)] for n in xrange(populationSize)]
a.initialize(populationSize,totalTANeed ,ta_list,population)
g1=population[1]
g2=population[2]
s1=a.score(g1, course_list, instructor_list,numberOfPref, empFactor, wi, wta)
s2=a.score(g2, course_list, instructor_list,numberOfPref, empFactor, wi, wta)
print s1
for j in range(10):
g1,g2=a.crossover(g1,g2)
mut_prob = random.randint(0, 100)
if mut_prob > 50:
a.mutation(g1)
print a.score(g1, course_list, instructor_list,numberOfPref, empFactor, wi, wta)
if __name__ == '__main__':
main()
| true |
e593ecbb7a23b3b4fa3654f25d5b87a47aea6374 | Python | danielgil1/resbaz_2019_nlp | /hangman_resbaz/flaskask/game.py | UTF-8 | 2,191 | 4.125 | 4 | [] | no_license | def hangman(secret_word, guesser, max_mistakes=8, verbose=True, **guesser_args):
"""
This game engine is part of material given by The University of Melbourne - Web Search and Text Analysis
secret_word: a string of lower-case alphabetic characters, i.e., the answer to the game
guesser: a function which guesses the next character at each stage in the game
The function takes a:
mask: what is known of the word, as a string with _ denoting an unknown character
guessed: the set of characters which already been guessed in the game
guesser_args: additional (optional) keyword arguments, i.e., name=value
max_mistakes: limit on length of game, in terms of allowed mistakes
verbose: be chatty vs silent
guesser_args: keyword arguments to pass directly to the guesser function
"""
secret_word = secret_word.lower()
mask = ['_'] * len(secret_word)
guessed = set()
if verbose:
print("Starting hangman game. Target is", ' '.join(mask), 'length', len(secret_word))
mistakes = 0
while mistakes < max_mistakes:
if verbose:
print("You have", (max_mistakes-mistakes), "attempts remaining.")
guess = guesser(mask, guessed, **guesser_args)
if verbose:
print('Guess is', guess)
if guess in guessed:
if verbose:
print('Already guessed this before.')
mistakes += 1
else:
guessed.add(guess)
if guess in secret_word:
for i, c in enumerate(secret_word):
if c == guess:
mask[i] = c
if verbose:
print('Good guess:', ' '.join(mask))
else:
if verbose:
print('Sorry, try again.')
mistakes += 1
if '_' not in mask:
if verbose:
print('Congratulations, you won.')
return mistakes,mask
if verbose:
print('Out of guesses. The word was', secret_word)
return mistakes,mask | true |
4a78e566d624a5cb9945273714ed0f81b2c014aa | Python | ngthuyn/BTVN | /10_7.py | UTF-8 | 362 | 3.28125 | 3 | [] | no_license | """Bài 10: Cho list sau: ["www.hust.edu.vn", "www.wikipedia.org", "www.asp.net", "www.amazon.com"].
Viết chương trình để in ra hậu tố (vn, org, net, com) trong các tên miền website trong list trên."""
a=["www.hust.edu.vn", "www.wikipedia.org", "www.asp.net", "www.amazon.com"]
for i in range(len(a)):
b=a[i].split(".")
print(b[-1]) | true |
8cff65427411babec7c2a49d84ee678f94a4f1d6 | Python | JianYiheng/leetcode-dialog | /226.invert-binary-tree-m1.py | UTF-8 | 698 | 3.1875 | 3 | [] | no_license | from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root: return None
TreeList = [root]
while 1:
ptr_node = TreeList.pop(0)
if ptr_node is None:
if not TreeList:
break
else:
continue
ptr_temp = ptr_node.left
ptr_node.left = ptr_node.right
ptr_node.right = ptr_temp
TreeList.append(ptr_node.left)
TreeList.append(ptr_node.right)
return root
| true |
4c422743f4496441d2dc98efa0416f91aabd91c5 | Python | megansmcguire/saucerbot | /saucerbot/groupme/handlers/saucer.py | UTF-8 | 4,518 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import logging
import random
from typing import List, Union
from lowerpines.endpoints.bot import Bot
from lowerpines.endpoints.message import Message
from lowerpines.message import ComplexMessage, RefAttach
from saucerbot.groupme.handlers import registry
from saucerbot.groupme.models import SaucerUser
from saucerbot.utils import (
brew_searcher,
get_insult,
get_tasted_brews,
get_new_arrivals,
)
logger = logging.getLogger(__name__)
CLARK_USER_ID = '6499167'
SHAINA_USER_ID = '6830949'
SAUCERBOT_MESSAGE_LIST: List[Union[ComplexMessage, str]] = [
"Shut up, ",
"Go away, ",
"Go find your own name, ",
"Stop being an asshole, ",
ComplexMessage('https://media.giphy.com/media/IxmzjBNRGKy8U/giphy.gif'),
'random',
]
@registry.handler()
def user_named_saucerbot(bot: Bot, message: Message, force_random: bool = False) -> bool:
"""
Chastise people who make their name saucerbot
"""
if message.name != 'saucerbot':
return False
# Send something dumb
user_attach = RefAttach(message.user_id, f'@{message.name}')
msg = random.choice(SAUCERBOT_MESSAGE_LIST)
if force_random or msg == 'random':
insult = get_insult()
prefix = "Stop being a"
if insult[0].lower() in ['a', 'e', 'i', 'o', 'u']:
prefix = prefix + 'n'
msg = prefix + ' ' + insult + ', '
if isinstance(msg, str):
msg = msg + user_attach
bot.post(msg)
return True
@registry.handler(r'my saucer id is (?P<saucer_id>[0-9]+)')
def save_saucer_id(bot: Bot, message: Message, match) -> None:
"""
Save a person's saucer ID, so we can lookup tasted beers later
"""
saucer_id = match.group('saucer_id')
tasted_beers = get_tasted_brews(saucer_id)
if not tasted_beers:
bot.post(f"Hmmm, it looks like {saucer_id} isn't a valid Saucer ID.")
return
# Otherwise it's valid. Just update or create
_, created = SaucerUser.objects.update_or_create(groupme_id=message.user_id,
defaults={'saucer_id': saucer_id})
user_attach = RefAttach(message.user_id, f'@{message.name}')
action = 'saved' if created else 'updated'
bot.post("Thanks, " + user_attach + f"! I {action} your Saucer ID.")
@registry.handler(r'^info (?P<search_text>.+)$')
def search_brews(bot: Bot, match) -> None:
"""
Search for beers from various saucers
"""
search_text = match.group('search_text').strip()
bot.post(brew_searcher.brew_info(search_text))
@registry.handler([r'new beers( (?P<location>[a-z ]+))?',
r'new arrivals( (?P<location>[a-z ]+))?'])
def new_arrivals(bot: Bot, match) -> None:
"""
Gets all the new arrivals
"""
location = match.group('location') or 'Nashville'
bot.post(get_new_arrivals(location.strip()))
@registry.handler([r'deep dish', r'thin crust'])
def pizza(bot: Bot) -> None:
"""
Complain about pizza
"""
bot.post("That is a false binary and you know it, asshole")
@registry.handler(r'like if')
def like_if(bot: Bot) -> None:
"""
Nobody else can use like if!
"""
bot.post("Hey that's my job")
@registry.handler([r' bot ', r'zo'])
def zo_is_dead(bot: Bot) -> None:
"""
Zo sux
"""
bot.post("Zo is dead. Long live saucerbot.")
@registry.handler([r'pong', r'beer pong'])
def troll(bot: Bot) -> None:
"""
LOL Shaina is the troll
"""
shaina = get_member(bot, SHAINA_USER_ID)
pre_message: Union[RefAttach, str]
if shaina:
pre_message = shaina
else:
pre_message = "Shaina"
bot.post(pre_message + " is the troll")
def get_member(bot: Bot, member_id: str) -> Union[RefAttach, None]:
for member in bot.group.members:
if member.user_id == member_id:
return RefAttach(member_id, f'@{member.nickname}')
return None
plate_party_messages: List[str] = [
"Yeah |, when is it???",
"Still waiting on that date |",
"*nudge* |",
"Don't hold your breath, | ain't gonna schedule it soon",
"|"
]
@registry.handler(r'plate party')
def plate_party(bot: Bot):
"""
This is to troll clark lolz but some future work could be fun on this
"""
clark = get_member(bot, CLARK_USER_ID)
if not clark:
logger.error("Somehow clark escaped the group!!!!")
else:
quip = random.choice(plate_party_messages).split('|')
bot.post(quip[0] + clark + quip[1])
| true |
9a76630f2e801e479bb26b06d262280d0adf1ef3 | Python | jdurakie/raycaster | /meshbuilder.py | UTF-8 | 708 | 2.703125 | 3 | [] | no_license | from Triangle import Triangle as T
from colormanip import RED, GREEN, BLUE, YELLOW, CYAN, MAGENTA
def makeBox():
A = (22, 26, 20)
B = (42, 26, 20)
C = (42, 6, 20)
D = (22, 6, 20)
E = (22, 26, 40)
F = (42, 26, 40)
G = (42, 6, 40)
H = (22, 6, 40)
tris = [
T(A, B, C, color=RED),
T(C, D, A, color=RED),
T(B, F, C, color=GREEN),
T(F, G, C, color=GREEN),
T(D, C, G, color=BLUE),
T(G, H, D, color=BLUE),
T(E, A, D, color=YELLOW),
T(D, H, E, color=YELLOW),
T(A, E, F, color=CYAN),
T(F, B, A, color=CYAN),
T(E, H, G, color=MAGENTA),
T(G, F, E, color=MAGENTA)
]
return tris | true |
046dd0bf748e8e4c084a1dda6d5f31995c058e10 | Python | slichlyter12/Aristotle | /tests/selenium/TestStudentUseCases.py | UTF-8 | 10,860 | 2.546875 | 3 | [] | no_license | import json
import unittest
import HTMLTestRunner
import time
from selenium import webdriver
class Test_Student_Use_Cases(unittest.TestCase):
@classmethod
def setUpClass(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(5)
def test_login_error(self):
driver = self.driver
driver.get("http://web.engr.oregonstate.edu/~lichlyts/cs561/pages/")
self.assertIn("Aristotle", driver.title)
login_home_btn = driver.find_element_by_class_name("btn")
self.assertIsNotNone(login_home_btn)
login_home_btn.click()
time.sleep(1)
username_element = driver.find_element_by_id("username")
self.assertIsNotNone(username_element)
username_element.send_keys('test_username')
password_element = driver.find_element_by_id("password")
self.assertIsNotNone(password_element)
password_element.send_keys('test_password')
login_btn = driver.find_element_by_name("_eventId_proceed")
self.assertIsNotNone(login_btn)
login_btn.click()
self.assertIn("login.oregonstate.edu", driver.current_url)
def test_login(self):
driver = self.driver
# user_pass.json format
# {
# "username":"xxx",
# "password":"xxx"
# }
f = open('user_pass.json', 'r')
userData = f.read()
f.close()
user = json.loads(userData)
time.sleep(1)
username_element = driver.find_element_by_id("username")
self.assertIsNotNone(username_element)
username_element.clear()
username_element.send_keys(user['username'])
password_element = driver.find_element_by_id("password")
self.assertIsNotNone(password_element)
password_element.clear()
password_element.send_keys(user['password'])
login_btn = driver.find_element_by_name("_eventId_proceed")
self.assertIsNotNone(login_btn)
login_btn.click()
time.sleep(1)
# if user is ta, redirect to studentDashboard.php
if "pages/ta.php" in driver.current_url:
link_to_student = driver.find_element_by_id("toStudent")
self.assertIsNotNone(link_to_student)
link_to_student.click()
# time.sleep(1)
self.assertIn("pages/studentDashboard.php", driver.current_url)
def test_manage_class(self):
driver = self.driver
# read selected classes
classes_list_elements = driver.find_element_by_xpath(".//*[@class='classList']")
self.assertIsNotNone(classes_list_elements)
selected_classes_elements = classes_list_elements.find_elements_by_xpath(".//*[@class='classes selectedClass']")
selected_class_name = []
for x in selected_classes_elements:
selected_class_name.append(x.get_attribute('name'))
# add & remove classes
manage_class_btn = driver.find_element_by_class_name("openAddClassFormDialog")
self.assertIsNotNone(manage_class_btn)
manage_class_btn.click()
time.sleep(1)
span_elements = driver.find_elements_by_xpath(".//*[@class='classCheckBox']")
self.assertIsNotNone(span_elements)
click_select_class_name = ''
click_unselect_class_name = ''
for x in span_elements:
if click_select_class_name != '' and click_unselect_class_name != '':
break
class_element = x.find_element_by_xpath(".//*[@type='checkbox']")
self.assertIsNotNone(class_element)
val = class_element.get_attribute('value')
# if class_element.get_attribute('checked') == 'true':
if len(selected_class_name) == 0:
x.click()
click_select_class_name = val
break
if val == selected_class_name[0] and click_unselect_class_name == '':
click_unselect_class_name = val
x.click()
elif click_select_class_name == '' and val not in selected_class_name:
click_select_class_name = val
x.click()
# submit
add_classes_btn = driver.find_element_by_xpath(".//*[@class='submitBtn button-primary']")
self.assertIsNotNone(add_classes_btn)
add_classes_btn.click()
# read new selected classes
time.sleep(1)
new_classes_list_elements = driver.find_element_by_xpath(".//*[@class='classList']")
self.assertIsNotNone(new_classes_list_elements)
new_selected_classes_elements = new_classes_list_elements.find_elements_by_xpath(".//*[@class='classes selectedClass']")
new_selected_class_name = []
for x in new_selected_classes_elements:
new_selected_class_name.append(x.get_attribute('name'))
if click_select_class_name != '':
self.assertIn(click_select_class_name, new_selected_class_name)
if click_unselect_class_name != '':
self.assertNotIn(click_unselect_class_name, new_selected_class_name)
# tear down
# add & remove classes
manage_class_btn.click()
time.sleep(1)
span_elements = driver.find_elements_by_xpath(".//*[@class='classCheckBox']")
self.assertIsNotNone(span_elements)
for x in span_elements:
if click_select_class_name == '' and click_unselect_class_name == '':
break
class_element = x.find_element_by_xpath(".//*[@type='checkbox']")
val = class_element.get_attribute('value')
if val == click_select_class_name:
click_select_class_name = ''
x.click()
elif val == click_unselect_class_name:
click_unselect_class_name = ''
x.click()
# submit
add_classes_btn.click()
# read new selected classes
time.sleep(1)
new_classes_list_elements = driver.find_element_by_xpath(".//*[@class='classList']")
self.assertIsNotNone(new_classes_list_elements)
new_selected_classes_elements = new_classes_list_elements.find_elements_by_xpath(".//*[@class='classes selectedClass']")
new_selected_class_name = []
for x in new_selected_classes_elements:
new_selected_class_name.append(x.get_attribute('name'))
self.assertEqual(new_selected_class_name, selected_class_name)
def test_questoin_list(self):
driver = self.driver
classes_list_elements = driver.find_element_by_xpath(".//*[@class='classList']")
self.assertIsNotNone(classes_list_elements)
selected_classes_elements = classes_list_elements.find_elements_by_xpath(".//*[@class='classes selectedClass']")
click_select_class_name = ''
if len(selected_classes_elements) > 0:
selected_classes_elements[0].click()
else:
# add & remove classes
manage_class_btn = driver.find_element_by_class_name("openAddClassFormDialog")
self.assertIsNotNone(manage_class_btn)
manage_class_btn.click()
time.sleep(1)
span_elements = driver.find_elements_by_xpath(".//*[@class='classCheckBox']")
self.assertIsNotNone(span_elements)
if len(span_elements) > 0:
class_element = span_elements[0].find_element_by_xpath(".//*[@type='checkbox']")
self.assertIsNotNone(class_element)
click_select_class_name = class_element.get_attribute('value')
span_elements[0].click()
# submit
add_classes_btn = driver.find_element_by_xpath(".//*[@class='submitBtn button-primary']")
self.assertIsNotNone(add_classes_btn)
add_classes_btn.click()
classes_list_elements = driver.find_element_by_xpath(".//*[@class='classList']")
self.assertIsNotNone(classes_list_elements)
selected_classes_elements = classes_list_elements.find_elements_by_xpath(".//*[@class='classes selectedClass']")
self.assertIsNotNone(selected_classes_elements)
if len(selected_classes_elements) > 0:
selected_classes_elements[0].click()
# question list test
self.assertIn("pages/studentQuestions.php", driver.current_url)
# ... ...
#tear down
time.sleep(1)
if click_select_class_name != '':
back_btn = driver.find_element_by_class_name("back")
self.assertIsNotNone(back_btn)
back_btn.click()
self.assertIn("pages/studentDashboard.php", driver.current_url)
# add & remove classes
manage_class_btn = driver.find_element_by_class_name("openAddClassFormDialog")
self.assertIsNotNone(manage_class_btn)
manage_class_btn.click()
time.sleep(1)
span_elements = driver.find_elements_by_xpath(".//*[@class='classCheckBox']")
self.assertIsNotNone(span_elements)
for x in span_elements:
class_element = span_elements[0].find_element_by_xpath(".//*[@type='checkbox']")
self.assertIsNotNone(class_element)
if click_select_class_name == class_element.get_attribute('value'):
span_elements[0].click()
break
# submit
add_classes_btn = driver.find_element_by_xpath(".//*[@class='submitBtn button-primary']")
self.assertIsNotNone(add_classes_btn)
add_classes_btn.click()
time.sleep(1)
@classmethod
def tearDownClass(self):
self.driver.quit()
def Suite():
suiteTest = unittest.TestSuite()
suiteTest.addTest(Test_Student_Use_Cases("test_login_error"))
suiteTest.addTest(Test_Student_Use_Cases("test_login"))
suiteTest.addTest(Test_Student_Use_Cases("test_manage_class"))
suiteTest.addTest(Test_Student_Use_Cases("test_questoin_list"))
return suiteTest
if __name__ == "__main__":
# unittest.main()
now = time.strftime("%Y%m%d_%H%M%S",time.localtime(time.time()))
# windows
# report_path = ".\\report\\report_student_" + now + ".html"
# linux or mac
report_path = "./report/report_student_" + now + ".html"
fp = open(report_path, 'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title='Student Use Cases TestReport', description='Test Student Use Cases')
runner.run(Suite())
fp.close()
| true |
63b218c32269fc23b20329308f27fd390ac6048f | Python | 863752027z/lab_server | /micro_detect/get_file.py | UTF-8 | 1,173 | 2.625 | 3 | [] | no_license | import os
import torch.utils.data as Data
from torchvision import transforms, datasets
def get_path(base_path):
path_list = []
for root, dirs, files in os.walk(base_path):
for i in range(len(dirs)):
temp_path = base_path + '/' + dirs[i]
path_list.append(temp_path)
break
return path_list
def testLoader(file_path, batch_size, shuffle, num_workers):
data_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5),
std=(0.5, 0.5, 0.5))])
data_set = datasets.ImageFolder(file_path,
transform=data_transform)
test_loader = Data.DataLoader(dataset=data_set,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers)
return test_loader
batch_size = 4
num_workers = 8
base_path = '/home/zlw/dataset/SAMM/train'
path_list = get_path(base_path)
for i in range(len(path_list)):
temp_train = testLoader(path_list[i], batch_size, False, num_workers)
print(temp_train)
| true |
bd245d25908174a44c070e4c0f1e61f966ea00ac | Python | tastypotinc/Taiwan_Stock_info | /python_code_for_ref/old_version_get_stock/html_parser.py | UTF-8 | 6,074 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#這裡將日盛網頁資料,分成index, data兩個檔案切出需要的文字後,再合併
#最後再重新整理資料,因為網頁的資料表格,有橫、直之分,所以資料有些分散
#要注意網頁的編碼、<p>標籤會多一次的取值
#import htmldom
from htmldom import htmldom
def html_parser(path_time,path_stock,path_file):
#print(path_time)
#print(path_file)
#print(path_file)
print("web_data\%s\%s\%s.djhtm" %(path_time,path_stock,path_file))
dom_temp = open("web_data\%s\%s\%s.djhtm" %(path_time,path_stock,path_file),"r")
dom = dom_temp.read()
#print( dom )
dom_htmldom = htmldom.HtmlDom().createDom(dom)
error_page = dom.find("無此股票資料")#error
if(error_page==-1):
print("ok")
else:
print("error page")
return(1)
#print( dom_htmldom )
index_element = dom_htmldom.find("td .t4t1") #index file use it
index_useless = {"董事長","總經理","發言人","營收比重","公司電話","網址","公司地址","說明"}
index_match = 0
#index fillter data
index_file = open("web_data\%s\%s\index_file" %(path_time,path_stock),"w")
for x in index_element:
for y in index_useless:
if(x.text() == y):
index_match =1
if(index_match == 0):
index_file.write(x.text())
index_file.write("\n")
index_match =0
index_file.close()
#data###################################################################################33
data_element = dom_htmldom.find("td .t3n1")
data_element_usless = dom_htmldom.find("td .t3n1 .t3n1") #cause <p> will one more record
#print(data_element.text())
data_unit =1
#data fillter data
data_file_temp = open("web_data\%s\%s\data_file_temp" %(path_time,path_stock),"w")
for x in data_element:
#print(x.text(0))
for y in data_element_usless:
if(x.text()==y.text()):
data_unit = 0
if(data_unit == 1): #correct data #cause <p> will one more record
data_file_temp.write(x.text())
data_file_temp.write("\n")
data_unit =1 #get next data
data_file_temp.close()
#cut space in file
data_file_temp = open("web_data\%s\%s\data_file_temp" %(path_time,path_stock),"r")
data_file = open("web_data\%s\%s\data_file" %(path_time,path_stock),"w")
for x in data_file_temp:
if(x!="\n"):
data_file.write(x)
data_file_temp.close()
data_file.close()
#combin index and data
index_file = open("web_data\%s\%s\index_file" %(path_time,path_stock),"r").read().split("\n")
data_file = open("web_data\%s\%s\data_file" %(path_time,path_stock),"r").read().split("\n")
finance_basic_file_temp = open("web_data\%s\%s\\finance_basic_file_temp" %(path_time,path_stock),"w")
str_data =""
#print (index_file[0])
for loop_num in range(58):
#print(loop_num)
finance_basic_file_temp.write(index_file[loop_num]+" "+data_file[loop_num]+"\n")
for loop_num in range(58,64):
#print(loop_num)
for x in range(6):
str_data = (str_data+" "+data_file[loop_num+x])
finance_basic_file_temp.write(index_file[loop_num]+str_data+"\n")
str_data =""
#year infor
enable =0
str_data =""
year_element = dom_htmldom.find("td .t4t")
for x in year_element:
if(x.text() == "年度"):
enable =1
str_data = x.text()
if(enable & int(x.text()!="年度")):
str_data = (str_data+" "+x.text())
#print(str_data)
finance_basic_file_temp.write(str_data+"\n")
finance_basic_file_temp.close()
#############################################################################
#重新整理資料
####################
#稅額扣抵率
#投資報酬率
#財務比例
#投資風險
#基本資料
#獲利能力
#前一年度配股
#財務預測#
########################################################################
finance_basic_file_temp = open("web_data\%s\%s\\finance_basic_file_temp" %(path_time,path_stock),"r")
finance_basic_file = open("web_data\%s\%s\\finance_basic_file.txt" %(path_time,path_stock),"w")
str_input =finance_basic_file_temp.readlines()
#print(finance_basic_file_temp.readlines()[13])
str_data=""
for x in range(18):
#print(str_input[x])
str_data =str_data+str_input[x]
#print(str_data)
#稅額扣抵率
str_0 = "稅額扣抵率\n"+str_input[22]+str_input[26]+str_input[29]+str_input[32]+str_input[35]
#print(str_0)
#投資報酬率
str_1 = "投資報酬率\n"+str_input[19]+str_input[23]+str_input[27]+str_input[30]+str_input[33]
#財務比例
str_2 = "財務比例\n"+str_input[20]+str_input[24]+str_input[28]+str_input[31]+str_input[34]+str_input[36]
#投資風險
str_3 = "投資風險\n"+str_input[21]+str_input[25]
#基本資料
str_4 = "基本資料\n"+str_input[37]+str_input[41]+str_input[45]+str_input[49]
#獲利能力
str_5 = "獲利能力\n"+str_input[38]+str_input[42]+str_input[46]+str_input[50]
#前一年度配股
str_6 = "前一年度配股\n"+str_input[39]+str_input[43]+str_input[47]+str_input[51]+str_input[54]+str_input[56]+str_input[57]
#財務預測#
str_7 = "財務預測\n"+str_input[40]+str_input[44]+str_input[48]+str_input[52]+str_input[55]
#年度資料
str_8 = "年度資料\n"+str_input[64]+str_input[58]+str_input[59]+str_input[60]+str_input[61]+str_input[62]+str_input[63]
str_data =str_data+str_0+str_1+str_2+str_3+str_4+str_5+str_6+str_7+str_8
#print(str_data)
finance_basic_file.write(str_data)
finance_basic_file.close()
return(0) #finish program
#test thins function
#a="2012-11-4"
#b="1108幸福"
#c="基本資料1108"
#b="1101台泥"
#c="基本資料1101"
#html_parser(a,b,c)
| true |
7c3cc3f23d778f253d196186767ddf79987c8f39 | Python | eegnom1807/raspy_led_pwm | /test_files/led.py | UTF-8 | 249 | 2.953125 | 3 | [] | no_license | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT)
led = GPIO.PWM(7, 100)
led.start(0)
while True:
led.start(0)
for i in range(0, 100, 25):
print(i)
led.ChangeDutyCycle(i)
time.sleep(0.5)
| true |
7d85888493b1250740d3078308ae266802048f4f | Python | esineokov/ml | /lesson/8/exercise/1.py | UTF-8 | 454 | 3.03125 | 3 | [] | no_license | class Data:
data = None
def __init__(self, data):
Data.data = data
@classmethod
def parse(cls):
return list(map(int, cls.data.split("-")))
@staticmethod
def validation():
day, month, year = tuple(Data.parse())
return 1 <= day <= 31 and 1 <= month <= 12 and year > 0
d = Data("15-10-1987")
print(d.parse())
print(d.validation())
d = Data("44-13-1987")
print(d.parse())
print(d.validation())
| true |
8688242a391fe2f3a775ea64efc3980735e91c1a | Python | tranquansp/KerasTools | /Dev/rl/agents/pg.py | UTF-8 | 2,591 | 3 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# Policy algorithm by https://github.com/DeepReinforcementLearning/DeepReinforcementLearningInAction
# Distributed there under the MIT license
import catch
import numpy as np
import keras
grid_size = 10
l1 = grid_size*grid_size*3
l2 = 150
l3 = 3
learning_rate = 0.001
def generate_model():
input_state = keras.layers.Input(shape=(l1,), name="Input_State")
x = keras.layers.Dense(l2)(input_state)
x = keras.layers.LeakyReLU()(x)
actions = keras.layers.Dense(l3, activation='softmax')(x)
def loss_fn(y_true, y_pred):
return -1.0 * keras.backend.sum(y_true * keras.backend.log(y_pred))
model = keras.models.Model(inputs=input_state, outputs=actions)
model.compile(loss=loss_fn, optimizer=keras.optimizers.RMSprop(learning_rate))
return model
model = generate_model()
model.summary()
MAX_DUR = 20
MAX_EPISODES = 10000
gamma_ = 0.95
time_steps = []
env = catch.Catch(grid_size=grid_size)
win_stats = []
loss_stats = []
for episode in range(MAX_EPISODES):
env.reset()
curr_state = env.get_state().flatten()
done = False
transitions = [] # list of state, action, rewards
for t in range(MAX_DUR): #while in episode
act_prob = model.predict(np.expand_dims(np.asarray(curr_state), axis=0))
action = np.random.choice(np.array([0,1,2]), p=act_prob[0])
prev_state = curr_state
curr_state, reward, done = env.play(action)
curr_state = curr_state.flatten()
transitions.append((prev_state, action, reward))
if done:
win_stats.append(1 if reward == 1.0 else 0)
break
# Optimize policy network with full episode
ep_len = len(transitions) # episode length
discounted_rewards = np.zeros((ep_len, l3))
train_states = []
for i in range(ep_len): #for each step in episode
discount = 1.0
future_reward = 0.0
# discount rewards
for i2 in range(i, ep_len):
future_reward += transitions[i2][2] * discount
discount = discount * gamma_
discounted_rewards[i][transitions[i][1]] = future_reward
train_states.append(transitions[i][0])
train_states = np.asarray(train_states)
# Backpropagate model with preds & discounted_rewards here
loss = model.train_on_batch(train_states, discounted_rewards)
loss_stats.append(loss)
if len(win_stats) >= 100:
print("Episode {: 4d} Win perc {:2.4f} Loss {:2.4f}".format(episode, sum(win_stats)/100.0, sum(loss_stats)/100.0))
win_stats = []
loss_stats = []
| true |
e860a91a4766c1c0997010178744c262d5dd2b2a | Python | frank12a/Gemma- | /XXX.py | UTF-8 | 1,736 | 3.9375 | 4 | [] | no_license | class Foo(object):
pass
class Bar(Foo):
pass
class WW(Foo):
pass
# obj = Bar()
# obj1 = WW()
# isinstance用于判断,对象是否是指定类的实例 (错误的)
# isinstance用于判断,对象是否是指定类或其派生类的实例
# print(isinstance(obj, Foo))
# print(isinstance(obj, Bar))
# print(isinstance(obj1, Foo))
# print(type(obj) == Bar)
# print(type(obj) == Foo)
class Foo(object):
def __init__(self,name):
self.name = name
# self.name="haiyan"
def func(self):
print(self.name)
# obj = Foo('egon')
# # obj.func()
# # Foo.func(Foo('égon'))
# Foo.func(obj)
# for x in range(1000):
# for y in range(1000):
# for z in range(1000):
# if x**2+y**2==z**2 and x+y+z==1000:
# print('%s,%s,%s'%(x,y,z))
# import time
# start_time=time.time()
# for i in range(0,1001):
# for j in range(0,1001-i):
# z=1000-i-j
# if i+j+z ==1000 and i**2+j**2==z**2:
# print("i==%s,j==%s,z==%s"%(i,j,z))
# end_time=time.time()
# print("times:%s"%(end_time-start_time))
import copy
# a=[1,2,[3,4,],'fuck']
# b=a.copy()
# a.append(5)
# print(b,id(b))
# print(a,id(a))
# for i in a:
# print(id(i))
# for i in b:
# print(id(i))
x=[3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,]
y=[1,1,1,1,1,1,1,1,1,1,1,]
z=[2,2,2,2,2,2]
# xyz=zip(x,y,z)
# print(list(xyz))
list=[]
i=1
for i in range(15):
if i < 15:
list.append(3)
if i < 10:
list.append(1)
if i < 5:
list.append(2)
if i < 5:
list.append(6)
print((list))
print(len(list))
# x = [
#
# y = [4, 5, 6]
#
# z = [7, 8, 9]
#
# xyz = zip(x, y, z)
# print(list(xyz)) | true |
05013dc748902d5b680fb6aece184c36d3627476 | Python | ice-bear-git/PyQt5-learn | /GUI/Basic-train/PyQtGraph/graph.py | UTF-8 | 3,555 | 2.59375 | 3 | [] | no_license | # import pyqtgraph.examples
# pyqtgraph.examples.run()
#!/bin/bash
# -*- coding: UTF-8 -*-
import pyqtgraph as pg
import sys
import numpy as np
import PyQt5
# 基本控件都在这里面
from PyQt5.QtWidgets import QApplication, QMainWindow, QDesktopWidget, QStyleFactory, QWidget
from PyQt5.QtGui import QPalette, QColor
from PyQt5.QtCore import Qt
from mainwidget import Ui_Form
from mainwindow import Ui_MainWindow
# 继承主窗口 Qmainwindow 和 自己画的界面 Ui_MainWindow
class MyMainWidget(QWidget, Ui_Form):
def __init__(self, parent = None):
super(MyMainWidget, self).__init__(parent)
pg.setConfigOption('background', '#f0f0f0')
pg.setConfigOption('foreground', 'd')
pg.setConfigOptions(antialias = True)
self.setupUi(self)
self.setLayout(self.gridLayout)
# 退出窗口
self.quit_btn.clicked.connect(self.quit_act)
# 绘图
self.pushButton.clicked.connect(self.graph_plot)
self.pushButton_2.clicked.connect(self.graph_plot1)
def graph_plot1(self):
self.pyqtgraph2.clear()
plt = self.pyqtgraph2.addPlot(title="test")
x = np.random.normal(size=20)
y1 = np.sin(x)
y2 = 1.1 * np.sin(x + 1)
bg1 = pg.BarGraphItem(x = x, height = y1, width = 2, brush = 'r')
bg2 = pg.BarGraphItem(x = x, height = y2, width = 2, brush = 'b')
plt.addItem(bg1)
plt.addItem(bg2)
# self.pyqtgraph2.nextColumn()
self.pyqtgraph2.nextRow()
plt2 = self.pyqtgraph2.addPlot(title="test1")
x = np.linspace(1, 20, 20)
y3 = np.random.normal(size=20)
plt2.plot(x, y3, pen = pg.mkPen(width = 2, color = 'd'))
plt2.showGrid(x=True, y=True)
def graph_plot(self):
self.pyqtgraph.clear()
# 两种绘图方式
# self.pyqtgraph.addPlot(y = np.random.normal(size=100),
# pen = pg.mkPen(color='b', width=2))
plt2 = self.pyqtgraph.addPlot(title="multi rules")
plt2.plot(np.random.normal(size=150),
pen = pg.mkPen(color='r', width=2))
plt2.plot(np.random.normal(size=150) + 5,
pen = pg.mkPen(color='b', width=2))
def quit_act(self):
# sender 是发送信号的对象
sender = self.sender()
print(sender.text() + '键被按下')
qApp = QApplication.instance()
qApp.quit()
class MyMainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, parent = None):
super(MyMainWindow, self).__init__(parent)
self.setupUi(self)
q = MyMainWidget()
self.setCentralWidget(q)
# 设置窗口透明
self.setWindowOpacity(0.9)
# self.resize(1000, 700)
# 默认的状态栏
# 可以设置其他按钮点击 参考多行文本显示 然而不行
self.status = self.statusBar()
self.status.showMessage("你在主页面~")
# 标题栏
self.setWindowTitle("建模协会录入信息")
# 窗口居中
self.center()
def center(self):
'''
获取桌面长宽
获取窗口长宽
移动
'''
screen = QDesktopWidget().screenGeometry()
size = self.geometry()
self.move((screen.width() - size.width()) / 2, (screen.height() - size.height()) / 2)
if __name__ == "__main__":
# 在shell中执行
app = QApplication(sys.argv)
mywin = MyMainWindow()
mywin.show()
# 开始主循环,直到退出
sys.exit(app.exec())
| true |
11195cf9bec61a758e7daf6be9d8b0ab2aa7d8b9 | Python | PhilippeOlivier/wsp | /tools/generate.py | UTF-8 | 3,387 | 2.96875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
################################################################################
#
# This script generates a batch of 'num_instances' instances of size
# 'num_items'. Every instance shares the same item weights, but has a different
# cost matrix. This script takes as its only argument batch name.
#
# Note: A necessary condition is that p+q+r <= 1.
#
# Example:
# python3 generate.py batch_name
#
################################################################################
### PARAMETERS #################################################################
num_instances = 5
num_items = 50
p = 0.33 # Probability of c<0
q = 0.33 # Probability of c>0
r = 0.00 # Probability of conflict
conflict = 9999
min_item_weight = 1
max_item_weight = 8
min_cost = -5
max_cost = 5
################################################################################
import io
import os
import random
import sys
# Generates the cost matrices
def generate_matrices():
matrices = []
for _c in range(num_instances):
matrix = [[0 for _ in range(num_items)] for _ in range(num_items)]
for i in range(num_items):
for j in range(0, i):
x = random.random()
if (x <= p):
cost = random.randint(min_cost, -1)
matrix[i][j] = cost
matrix[j][i] = cost
elif (x <= p+q):
cost = random.randint(1, max_cost)
matrix[i][j] = cost
matrix[j][i] = cost
elif (x <= p+q+r):
matrix[i][j] = conflict
matrix[j][i] = conflict
matrices.append(matrix)
return matrices
# Generates .wsp instance #n
def generate_instance(n):
with io.open(os.path.join(dir_path+dir_name+"-"+str(n)+".wsp"), "w") as f:
f.write(str(num_items)+"\n")
for i in range(num_items):
f.write(str(weights[i])+",")
f.write("\n")
for i in range(num_items):
for j in range(num_items):
f.write(str(matrices[n][i][j]) + ",")
f.write("\n")
f.close()
weights = [random.randint(min_item_weight, max_item_weight)
for _ in range(num_items)]
weights.sort(reverse=True)
matrices = generate_matrices()
num_bins = int(round(float(sum(weights))/10, 0))
# Create the directory
dir_name = str(sys.argv[1])
dir_path = "./../data/"+dir_name+"/"
os.mkdir(dir_path)
# Create the instances and the instances.txt file
with io.open(os.path.join(dir_path+"instances.txt"), "w") as f:
for i in range(0, num_instances):
generate_instance(i)
f.write(dir_name+"-"+str(i)+".wsp\n")
f.close()
# Create the parameters.txt file
with io.open(os.path.join(dir_path+"parameters.txt"), "w") as f:
f.write("num_items="+str(num_items)+"\n")
f.write("num_bins="+str(num_bins)+"\n")
f.write("total_weight="+str(sum(weights))+"\n")
mean_load = float(sum(weights))/float(num_bins)
f.write("mean_load="+str(mean_load)+"\n")
f.write("min_item_weight="+str(min_item_weight)+"\n")
f.write("max_item_weight="+str(max_item_weight)+"\n")
f.write("min_cost="+str(min_cost)+"\n")
f.write("max_cost="+str(max_cost)+"\n")
f.write("p="+str(p)+"\n")
f.write("q="+str(q)+"\n")
f.write("r="+str(r)+"\n")
| true |
10ca4a6c709a74ed60bb1c1d17b19782ddd1d356 | Python | byAbaddon/Advanced-Course-PYTHON-May-2020 | /3.0 Multidimensional Lists - Lab/04. Symbol in Matrix.py | UTF-8 | 337 | 3.53125 | 4 | [] | no_license | import sys
size_matrix = int(input())
matrix = []
for _ in range(size_matrix):
matrix.append(input())
symbol = input()
for row in range(len(matrix)):
for col in range(len(matrix[row])):
if matrix[row][col] == symbol:
print((row,col))
sys.exit()
print(symbol, 'does not occur in the matrix')
| true |
71961edab22183c51747f3f7ff6806efc044b51a | Python | ahmedeltaweel/searchly | /main.py | UTF-8 | 707 | 2.640625 | 3 | [
"MIT"
] | permissive | import sys
from utils import process_docs, valid_pathes, perform_scored_search
def main():
"""
Main application entrypoint, bootstrap the indexer and db.
"""
if len(sys.argv) < 2:
sys.exit('Please, provide a valid system dir path')
if not valid_pathes(sys.argv[1:]):
sys.exit('{} is not a valid dir path.'.format(path))
db, index = process_docs(sys.argv[1:])
# start command prompt
while True:
print('search (:q to exit) >', end=' ')
query = input()
if query == ':q':
print('Terminating the program ...')
sys.exit()
perform_scored_search(db, index, query)
if __name__ == '__main__':
main()
| true |
26470eee09ecce31079978cab58864fd2b98a0f5 | Python | phuchduong/essencero_restoration | /scripts/legacy/test_codecs.py | UTF-8 | 1,437 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | # Encodings
# Codec | Aliases | Languages
# cp850 | 850, IBM850 | Western Europe
# cp1252 | windows-1252 | Western Europe
# latin_1 | iso-8859-1, iso8859-1, 8859, cp819, latin, latin1, L1 | West Europe
# iso8859_3 | iso-8859-3, latin3, L3 | Esperanto, Maltese
# iso8859_15 | iso-8859-15 | Western Europe
codecs = [
"cp850",
"cp1252",
"latin_1",
"iso8859_3",
"iso8859_15",
"cp437",
"cp1256",
"cp1257",
"iso8859_2",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"iso8859_10",
"iso8859_13",
"iso8859_14",
"cp1250",
"cp1251",
"cp866",
"koi8_r",
"koi8_u",
"cp1253",
"cp1255",
"cp1254",
"cp1258",
]
works = []
for codec in codecs:
try:
main(codec=codec)
works.append(codec)
except (UnicodeDecodeError, UnicodeEncodeError) as e:
print("--------------------------------")
print("Error in codec: " + codec + ". Error: " + str(e))
print("--------------------------------")
pass
print("--------------------------------")
print("Script Finished-----------------")
print("These codecs work:")
print(",".join(works))
| true |
c0e359bbf80796e23aa10057f2fb92d2caa6a567 | Python | Paccy10/flask-ecommerce-api | /api/models/user.py | UTF-8 | 807 | 2.796875 | 3 | [] | no_license | """ Module for User Model """
from .database import db
from .base import BaseModel
class User(BaseModel):
""" User Model class """
__tablename__ = 'users'
firstname = db.Column(db.String(100), nullable=False)
lastname = db.Column(db.String(100), nullable=False)
email = db.Column(db.String(100), unique=True, nullable=False)
password = db.Column(db.String(250), nullable=False)
is_admin = db.Column(db.Boolean, default=False, nullable=False)
is_activated = db.Column(db.Boolean, default=False, nullable=False)
@classmethod
def find_by_email(cls, user_email):
""" Finds a user instance by email """
user = cls.query.filter_by(
email=user_email, is_activated=True).first()
if user:
return user
return None
| true |
46b788e74b608fcc997319150e07782dfc907ff6 | Python | njk8/AI-labs | /lab1/myvacuumagent.py | UTF-8 | 11,814 | 2.59375 | 3 | [] | no_license | from lab1.liuvacuum import *
from random import randint
DEBUG_OPT_DENSEWORLDMAP = False
AGENT_STATE_UNKNOWN = 0
AGENT_STATE_WALL = 1
AGENT_STATE_CLEAR = 2
AGENT_STATE_DIRT = 3
AGENT_STATE_HOME = 4
AGENT_DIRECTION_NORTH = 0
AGENT_DIRECTION_EAST = 1
AGENT_DIRECTION_SOUTH = 2
AGENT_DIRECTION_WEST = 3
def direction_to_string(cdr):
cdr %= 4
return "NORTH" if cdr == AGENT_DIRECTION_NORTH else \
"EAST" if cdr == AGENT_DIRECTION_EAST else \
"SOUTH" if cdr == AGENT_DIRECTION_SOUTH else \
"WEST" # if dir == AGENT_DIRECTION_WEST
"""
Internal state of a vacuum agent
"""
class MyAgentState:
def __init__(self, width, height):
# Initialize perceived world state
self.world = [[AGENT_STATE_UNKNOWN for _ in range(height)] for _ in range(width)]
self.world[1][1] = AGENT_STATE_HOME
# Agent internal state
self.last_action = ACTION_NOP
self.second_last_action = ACTION_NOP
self.direction = AGENT_DIRECTION_EAST
self.pos_x = 1
self.pos_y = 1
# Metadata
self.world_width = width
self.world_height = height
"""
Update perceived agent location
"""
# we are checking whether the matrix is already visited
def check_visited(self, visit, visitx, k):
counter = 0
for i in range(self.world_height):
for j in range(self.world_width):
if (i == k or j == k or (i == self.world_height - 1 - k) or (j == self.world_height - 1 - k)):
if visit[i][j] or visitx[i][j]:
print(visit[i][j], end=' ')
counter += 1
else:
print(" ", end=' ')
print()
print("Counter Value:", counter)
if counter == ((self.world_height - 2 * k) * (self.world_width - 2 * k) - (self.world_height - 2 - 2 * k) * (
self.world_width - 2 - 2 * k)) + 8 * k:
print("Final-Counter")
return True
else:
return False
# we are updating the direction of the agent after left
def update_pos_after_o(self, bump):
if bump and self.direction == AGENT_DIRECTION_EAST:
self.direction = AGENT_DIRECTION_NORTH
elif bump and self.direction == AGENT_DIRECTION_NORTH:
self.direction = AGENT_DIRECTION_WEST
elif bump and self.direction == AGENT_DIRECTION_WEST:
self.direction = AGENT_DIRECTION_SOUTH
elif bump and self.direction == AGENT_DIRECTION_SOUTH:
self.direction = AGENT_DIRECTION_EAST
# we are updating the direction of the agent after right
def update_pos_after_b(self, bump):
if bump and self.direction == AGENT_DIRECTION_EAST:
self.direction = AGENT_DIRECTION_SOUTH
elif bump and self.direction == AGENT_DIRECTION_SOUTH:
self.direction = AGENT_DIRECTION_WEST
elif bump and self.direction == AGENT_DIRECTION_WEST:
self.direction = AGENT_DIRECTION_NORTH
elif bump and self.direction == AGENT_DIRECTION_NORTH:
self.direction = AGENT_DIRECTION_EAST
def update_position(self, bump):
if not bump and self.last_action == ACTION_FORWARD:
if self.direction == AGENT_DIRECTION_EAST:
self.pos_x += 1
elif self.direction == AGENT_DIRECTION_SOUTH:
self.pos_y += 1
elif self.direction == AGENT_DIRECTION_WEST:
self.pos_x -= 1
elif self.direction == AGENT_DIRECTION_NORTH:
self.pos_y -= 1
"""
Update perceived or inferred information about a part of the world
"""
def update_world(self, x, y, info):
self.world[x][y] = info
"""
Dumps a map of the world as the agent knows it
"""
def print_world_debug(self):
for y in range(self.world_height):
for x in range(self.world_width):
if self.world[x][y] == AGENT_STATE_UNKNOWN:
print("?" if DEBUG_OPT_DENSEWORLDMAP else " ? ", end="")
elif self.world[x][y] == AGENT_STATE_WALL:
print("#" if DEBUG_OPT_DENSEWORLDMAP else " # ", end="")
elif self.world[x][y] == AGENT_STATE_CLEAR:
print("." if DEBUG_OPT_DENSEWORLDMAP else " . ", end="")
elif self.world[x][y] == AGENT_STATE_DIRT:
print("D" if DEBUG_OPT_DENSEWORLDMAP else " D ", end="")
elif self.world[x][y] == AGENT_STATE_HOME:
print("H" if DEBUG_OPT_DENSEWORLDMAP else " H ", end="")
print() # Newline
print() # Delimiter post-print
"""
Vacuum agent
"""
class MyVacuumAgent(Agent):
def __init__(self, world_width, world_height, log):
super().__init__(self.execute)
self.WH = world_height
self.WW = world_width
# initializing the two matrixes for tracking of the vaccum agent
self.visited = [[AGENT_STATE_UNKNOWN for _ in range(world_height)] for _ in range(world_width)]
self.visited2 = [[AGENT_STATE_UNKNOWN for _ in range(world_height)] for _ in range(world_width)]
self.k = 1
self.initial_random_actions = 0
self.iteration_counter = self.WH * self.WW * 2
self.state = MyAgentState(world_width, world_height)
self.log = log
def move_to_random_start_position(self, bump):
action = random()
self.initial_random_actions -= 1
self.state.update_position(bump)
if action < 0.1666666: # 1/6 chance
self.state.direction = (self.state.direction + 3) % 4
self.state.last_action = ACTION_TURN_LEFT
return ACTION_TURN_LEFT
elif action < 0.3333333: # 1/6 chance
self.state.direction = (self.state.direction + 1) % 4
self.state.last_action = ACTION_TURN_RIGHT
return ACTION_TURN_RIGHT
else: # 4/6 chance
self.state.last_action = ACTION_FORWARD
return ACTION_FORWARD
def execute(self, percept):
###########################
# DO NOT MODIFY THIS CODE #
###########################
bump = percept.attributes["bump"]
dirt = percept.attributes["dirt"]
home = percept.attributes["home"]
# Move agent to a randomly chosen initial position
if self.initial_random_actions > 0:
self.log("Moving to random start position ({} steps left)".format(self.initial_random_actions))
return self.move_to_random_start_position(bump)
# Finalize randomization by properly updating position (without subsequently changing it)
elif self.initial_random_actions == 0:
self.initial_random_actions -= 1
self.state.update_position(bump)
self.state.last_action = ACTION_SUCK
self.log("Processing percepts after position randomization")
for i in range(self.WH):
for j in range(self.WW):
if i == 0 or i == (self.WH - 1) or j == 0 or j == (self.WW - 1):
self.visited[i][j] = 4
self.visited2[i][j] = 4
self.visited2[1][1] = self.visited[1][1] = 1
return ACTION_SUCK
########################
# START MODIFYING HERE #
########################
# Max iterations for the agent ---- Change the No. of Iterations here -----
if self.iteration_counter < 1:
if self.iteration_counter == 0:
self.log("Iteration counter is now 0. Halting!")
self.log("Performance: {}".format(self.performance))
self.iteration_counter -= 1
self.state.last_action = ACTION_NOP
return ACTION_NOP
self.log("Position: ({}, {})\t\tDirection: {}".format(self.state.pos_x, self.state.pos_y,
direction_to_string(self.state.direction)))
self.iteration_counter -= 1
# Track position of agent ---- updates to NONE ---
self.state.update_position(bump)
if bump:
# Get an xy-offset pair based on where the agent is facing
offset = [(0, -1), (1, 0), (0, 1), (-1, 0)][self.state.direction]
# Mark the tile at the offset from the agent as a wall (since the agent bumped into it)
self.state.update_world(self.state.pos_x + offset[0], self.state.pos_y + offset[1], AGENT_STATE_WALL)
# Update perceived state of current tile
if dirt:
self.state.update_world(self.state.pos_x, self.state.pos_y, AGENT_STATE_DIRT)
else:
self.state.update_world(self.state.pos_x, self.state.pos_y, AGENT_STATE_CLEAR)
# Debug
##self.state.print_world_debug()
# Decide action
print("xxx200xxx")
print(self.state.pos_x, self.state.pos_y, self.state.direction)
print("xxx200xxx")
print("This is", self.WH, "x", self.WW, "Iteration: ", self.iteration_counter)
for i in range(self.WH):
for j in range(self.WW):
print(self.visited[i][j], end=' ')
print()
print("$$$$$$$$$")
print("$$$$$$$$$")
# self.state.check_visited(self.visited2, self.visited, self.k)
# check whether the state is visited
if (self.state.check_visited(self.visited2, self.visited, self.k)):
self.visited[self.k][self.k] = 4
if (self.state.pos_x != self.k and (
self.state.pos_x != self.WH - self.k - 1) and self.state.pos_y != self.k and (
self.state.pos_y != self.WW - self.k - 1)):
print("#$# OUT #$#")
for i in range(self.WH):
for j in range(self.WW):
if (i == self.k or j == self.k or (i == self.WH - 1 - self.k) or (j == self.WW - 1 - self.k)):
self.visited[i][j] = 4
self.k += 1
f = [(0, -1), (1, 0), (0, 1), (-1, 0)][self.state.direction]
l = [(-1, 0), (0, -1), (1, 0), (0, 1)][self.state.direction]
# self.state.second_last_action = self.state.last_action
print("****State****:", str(self.state.last_action))
print("****bump****:", bump)
if dirt:
self.log("DIRT -> choosing SUCK action!")
self.state.last_action = ACTION_SUCK
return ACTION_SUCK
# we are turning left if last action not left and if not visited.
elif (self.state.last_action != ACTION_TURN_LEFT) and (
self.visited2[self.state.pos_y + l[1]][self.state.pos_x + l[0]] == 0):
self.state.update_pos_after_o(True)
self.state.last_action = ACTION_TURN_LEFT
return ACTION_TURN_LEFT
# turning right
elif bump or self.visited[self.state.pos_y + f[1]][self.state.pos_x + f[0]]:
self.visited[self.state.pos_y + f[1]][self.state.pos_x + f[0]] = 4
self.visited2[self.state.pos_y + f[1]][self.state.pos_x + f[0]] = 4
self.state.update_pos_after_b(True)
self.state.last_action = ACTION_TURN_RIGHT
return ACTION_TURN_RIGHT
else:
self.visited2[self.state.pos_y][self.state.pos_x] = 1
self.state.last_action = ACTION_FORWARD
return ACTION_FORWARD
| true |
c075f96228e6d93b781e93b3d164a94db8b1441d | Python | abechoi/My_Python | /ABSP/login.py | UTF-8 | 239 | 3.15625 | 3 | [] | no_license | myFile = open('secretFile.txt')
secret = myFile.read()
print("enter password:")
password = input()
if password == secret:
print("access granted!")
if password == "12345":
print("weak password!")
else:
print("access denied") | true |
9f395f44e360e1ab39842759e355b75279fd6a09 | Python | poudrenoire/dessin | /dessin.py | UTF-8 | 1,169 | 2.734375 | 3 | [
"CC0-1.0"
] | permissive | # Génère des dessins aléatoirement
import turtle
import random
import time
import tkinter
import uuid
screen = turtle.Screen()
screen.colormode(255)
turtle.speed(9)
# Générateur de dessins
for _ in range(25):
turtle.pencolor(random.randint(0,255), random.randint(0,255), random.randint(0,255))
turtle.left(random.randint(0,360))
turtle.forward(random.randint(0,25))
turtle.right(random.randint(0,360))
turtle.pencolor(random.randint(0,255), random.randint(0,255), random.randint(0,255))
turtle.forward(random.randint(0,100))
turtle.pensize(random.randint(0,6))
turtle.tilt(random.randint(0,360))
turtle.right(random.randint(0,360))
turtle.pencolor(random.randint(0,255), random.randint(0,255), random.randint(0,255))
turtle.circle(random.randint(0,100), random.randint(0,360))
turtle.pencolor(random.randint(0,255), random.randint(0,255), random.randint(0,255))
turtle.forward(random.randint(0,100))
turtle.pensize(random.randint(0,6))
turtle.goto(random.randint(-200,200),random.randint(-200,200))
turtle.done()
ts = turtle.getscreen()
#ts.getcanvas().postscript(file=str(uuid.uuid4()))
ts.getcanvas().postscript(file="file.eps")
| true |
3308d861d4caf309be6e36b95c03f8eb757ebc06 | Python | goushan33/PycharmProjects | /learn_notes/leetcode/circul_queue.py | UTF-8 | 687 | 3.984375 | 4 | [] | no_license | #基于数组实现循环队列
class CirculQueue(object):
def __init__(self,capacity):
self.items=[None]*capacity
self.n=capacity
self.head=0
self.tail=0
#入队
def enqueue(self,val):
#队列已满
if (self.tail+1)%self.n==self.head:
return False
self.items[self.tail]=val
self.tail=(self.tail+1)%self.n
return True
#出队
def dequeue(self):
#队列为空
if self.tail==self.head:
return False
ret=self.items[self.head]
self.head=(self.head+1)%self.n
return ret
q=CirculQueue(8)
q.enqueue(5)
q.enqueue(7)
print(q.dequeue()) | true |
2820b4c353ef886f77af7648aacb9aac740a73c9 | Python | tianhanl/wiki-scrapper | /get_polling.py | UTF-8 | 1,052 | 2.765625 | 3 | [] | no_license | import sys
# This filed is created to allow get polling data and save them using command line
# Usage: python3 get_polling.py "query"
path = './pollings/'
if __name__ == '__main__':
from sys import argv
import wiki
import re
if len(argv) < 2:
print('usage: python3 get_polling "query"')
else:
query = argv[1]
url_pattern = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
# check is query a link
if re.search(url_pattern, query, re.IGNORECASE) is not None:
# if query is a link, directly use the link to get page content
polling_tables = wiki.get_poll_tables(
wiki.get_page_html_from_url(query))
wiki.save_tables(polling_tables,
path + 'polling_data' + '.json')
else:
polling_tables = wiki.get_poll_tables(wiki.get_page_html(query))
wiki.save_tables(polling_tables,
path + query.lower().replace(' ', '_') + '.json')
| true |
3ed25158647eed962f409010c514bf7136f65e09 | Python | ricardojoserf/triangle-position | /tripos.py | UTF-8 | 4,096 | 2.671875 | 3 | [] | no_license | import re, time, os, math, argparse
from geopy.distance import vincenty, great_circle
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
import sys
from plot import drawMap
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-c1', '--coordenadas1', required=True, action='store', help='Coordenadas 1')
parser.add_argument('-c2', '--coordenadas2', required=True, action='store', help='Coordenadas 2')
parser.add_argument('-c3', '--coordenadas3', required=True, action='store', help='Coordenadas 3')
parser.add_argument('-r1', '--ratio1', required=True, action='store', help='Ratio 1')
parser.add_argument('-r2', '--ratio2', required=True, action='store', help='Ratio 2')
parser.add_argument('-r3', '--ratio3', required=True, action='store', help='Ratio 3')
my_args = parser.parse_args()
return my_args
def getHeight(D,r1,r2):
s = float((D+r1+r2)/2.0)
h = (2/D)*math.sqrt(s*(s-D)*(s-r1)*(s-r2))
return h
def getCat(hip,cat):
c = math.sqrt(hip*hip-cat*cat)
return c
def generateCoord(coord_string):
lat = coord_string[:coord_string.index(",")]
lon = coord_string[(coord_string.index(",")+1):]
return( (lat,lon) )
def drawMap_basemap(c1,c2,c3,calculatedCoord):
deg_diff=1.5
m = Basemap(llcrnrlon=(float(c1[1])),llcrnrlat=(float(c1[0])-deg_diff),urcrnrlon=(float(c1[1])+deg_diff ),urcrnrlat=(float(c3[0]) +deg_diff), epsg=5520)
m.arcgisimage(service='ESRI_StreetMap_World_2D', xpixels = 1500, verbose= False)
drawPoint(c1[0],c1[1],m,'r')
drawPoint(c2[0],c2[1],m,'g')
drawPoint(c3[0],c3[1],m,'b')
drawPoint(calculatedCoord[0],calculatedCoord[1],m,'y')
plt.show()
def drawPoint(lat,lon,map_,color_):
xpt,ypt = map_(lon,lat)
lonpt, latpt = map_(xpt,ypt,inverse=True)
map_.plot(xpt,ypt,'bo', color=color_)
def checkValues(r1, r2, r3, D1, D2, D3):
problem = False
if(D1>(r1+r2)):
print("Problem: D1>(r1+r2)")
problem=True
if(D2>(r1+r3)):
print("Problem: D2>(r1+r3)")
problem=True
if(D3>(r2+r3)):
print("Problem: D3>(r2+r3)")
problem=True
if problem:
print ("\nr1="+str(r1)+"\nr2="+str(r2)+"\nr3="+str(r3)+"\nD1="+str(D1)+" \nD2="+str(D2)+" \nD3="+str(D3))
sys.exit(0)
def getCoords(args):
c1 = generateCoord(args.coordenadas1)
c2 = generateCoord(args.coordenadas2)
c3 = generateCoord(args.coordenadas3)
r1 = float(args.ratio1)
r2 = float(args.ratio2)
r3 = float(args.ratio3)
D1 = vincenty(c1,c2).kilometers
D2 = vincenty(c1,c3).kilometers
D3 = vincenty(c2,c3).kilometers
# D1 = great_circle(c1,c2).kilometers
# D2 = great_circle(c1,c3).kilometers
# D3 = great_circle(c2,c3).kilometers
checkValues(r1, r2, r3, D1, D2, D3)
h1 = getHeight(D=D1,r1=r1,r2=r2)
n1 = getCat(hip=r1,cat=h1)
h2 = getHeight(D=D2,r1=r1,r2=r3)
m2 = getCat(hip=r1,cat=h2)
gkmlat = (float(c1[0]), float(c1[1])+1.0)
gkmlon = (float(c1[0])+1.0, float(c1[1]))
gradeKmLat = vincenty( c1 , gkmlat ).kilometers
gradeKmLon = vincenty( c1 , gkmlon ).kilometers
newLat = float(c1[0]) + (float(n1)/float(gradeKmLat) )
newLon = float(c1[1]) + (float(m2)/float(gradeKmLon) )
calculatedCoord= (newLat,newLon)
#print ("D1 = "+str(D1)+" \nD2 = "+str(D2)+" \nD3 = "+str(D3)+"\nn1 = "+str(n1)+"\nm2 = "+str(m2) + "\ngradeKmLat = "+str(gradeKmLat)+"\ngradeKmLon = "+str(gradeKmLon) +"\n")
verbose = True
points = [calculatedCoord,c1,c2,c3]
try:
if verbose:
print("Trying to plot results using Plotly")
drawMap(points)
print ("Calculated points:\nc1 = ("+args.coordenadas1+") \nc2 = ("+args.coordenadas2+") \nc3 = ("+args.coordenadas3+") \ncalculatedCoord = ("+str(calculatedCoord)+")")
except:
if verbose:
print("\nIt was not possible to plot results using Plotly")
print("\nTrying to plot results using Matplotlib")
print ("Calculated points:\n[Red]\t c1 = ("+args.coordenadas1+") \n[Green]\t c2 = ("+args.coordenadas2+") \n[Blue]\t c3 = ("+args.coordenadas3+") \n[Yellow] calculatedCoord = ("+str(calculatedCoord)+")")
drawMap_basemap(c1,c2,c3,calculatedCoord)
def main():
args = get_args()
getCoords(args)
if __name__ == "__main__":
main()
| true |
abee29231b412daea132f3307fe118a632b1d979 | Python | ihuei801/leetcode | /MyLeetCode/python/Count of Smaller Numbers After Self.py | UTF-8 | 2,008 | 3.109375 | 3 | [] | no_license | ###
# Merge Sort
# Time Complexity: O(nlogn)
# Space Complexity: O(logn) + O(n)
###
class Solution(object):
def merge_sort(self, nums, start, end, small):
if end - start <= 1:
return
mid = (start + end) / 2
self.merge_sort(nums, start, mid, small)
self.merge_sort(nums, mid, end, small)
j = mid
for idx, n in nums[start:mid]:
while j < end and nums[j][1] < n:
j += 1
small[idx] += (j - mid)
nums[start:end] = sorted(nums[start:end], key=lambda (idx, v): v) #use TimSort to sort the already sorted two subarray, O(n)
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
if not nums:
return []
small = [0] * len(nums)
self.merge_sort(list(enumerate(nums)), 0, len(nums), small)
return small
class Solution(object):
def merge_sort(self, nums, start, end, small):
if end - start <= 1:
return
mid = (start + end) / 2
self.merge_sort(nums, start, mid, small)
self.merge_sort(nums, mid, end, small)
tmp = [0] * (end - start)
j = mid
k = 0
for i in xrange(start, mid):
while j < end and nums[j][1] < nums[i][1]:
tmp[k] = nums[j]
k += 1
j += 1
small[nums[i][0]] += (j - mid)
tmp[k] = nums[i]
k += 1
for i in xrange(k):
nums[start+i] = tmp[i]
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
if not nums:
return []
small = [0] * len(nums)
self.merge_sort(list(enumerate(nums)), 0, len(nums), small)
return small
| true |
b87a86f5efb9921952db4aedd83024721605ed3e | Python | ynikitenko/lena | /lena/flow/group_by.py | UTF-8 | 2,415 | 3.5625 | 4 | [
"Apache-2.0"
] | permissive | """Group data using :class:`.GroupBy` class."""
import lena.core
import lena.flow
class GroupBy(object):
"""Group values.
Data is added during :meth:`update`.
Groups dictionary is available as :attr:`groups` attribute.
:attr:`groups` is a mapping of *keys* (defined by *group_by*)
to lists of items with the same key.
"""
def __init__(self, group_by):
"""*group_by* is a function that returns
distinct hashable results for values from different groups.
It can be also a dot-separated formatting string.
In that case only the context part of the value is used
(see :func:`context.format_context <.format_context>`).
If *group_by* is not a callable or a string,
:exc:`.LenaTypeError` is raised.
"""
self.groups = dict()
if callable(group_by):
# callable(value) is allowed for generality.
# I use group_by exclusively with context,
# and the only example I can imagine when it can probe value
# is histograms with same variables
# but with different ranges (one wouldn't be able
# to plot graphs with them without changing context though).
# This is a weak example, because this information
# could be added to context.
self._group_by = group_by
elif isinstance(group_by, str):
fc = lena.context.format_context(group_by)
self._group_by = lambda val: fc(lena.flow.get_context(val))
else:
raise lena.core.LenaTypeError(
"group_by must be a callable or a string, "
"{} provided".format(group_by)
)
def update(self, val):
"""Find a group for *val* and add it there.
A group key is calculated by *group_by*.
If no such key exists, a new group is created.
If a formatting key was not found for *val*,
:exc:`~LenaValueError` is raised.
"""
try:
key = self._group_by(val)
except lena.core.LenaKeyError:
raise lena.core.LenaValueError(
"could not find a key for {}".format(val)
)
if key in self.groups:
self.groups[key].append(val)
else:
self.groups[key] = [val]
def clear(self):
"""Remove all groups."""
self.groups.clear()
| true |
c6eae374051020483d4800b01b6d0898a5d935c1 | Python | Aasthaengg/IBMdataset | /Python_codes/p03545/s786987870.py | UTF-8 | 345 | 2.6875 | 3 | [] | no_license | def main():
S = list(str(input()))
LenP = 3
ans = 0
for i in range(2**LenP):
P = ["-"]*LenP
for j in range(LenP):
if i >> j & 1:
P[j] = "+"
Res = [None] * (len(S)+len(P))
Res[1::2],Res[::2] = P,S
ResEval = "".join(Res)
if eval(ResEval) == 7:
ans = ResEval+"=7"
print(ans)
break
if __name__ == '__main__':
main() | true |
3f1f758fe269fa14b1be6ef4e429322743450bef | Python | ahmad0790/stock-trading-machine-learning-algos | /DTLearner.py | UTF-8 | 7,547 | 2.953125 | 3 | [] | no_license | """
Name: Ahmad Khan
GT ID: akhan361
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
from scipy import stats
import datetime as dt
class DTLearner(object):
def __init__(self, leaf_size = 1, verbose = False):
pass # move along, these aren't the drones you're looking for
self.leaf_size = leaf_size
def author(self):
return 'akhan361' # replace tb34 with your Georgia Tech username
def addEvidence(self,dataX,dataY):
"""
@summary: Add training data to learner
@param dataX: X values of data to add
@param dataY: the Y training values
"""
newdataX = np.column_stack((dataX, dataY))
# build and save the model
#self.model_coefs, residuals, rank, s = np.linalg.lstsq(newdataX, dataY)
#print(newdataX)
self.model = self.build_tree(newdataX, self.leaf_size)
#print(self.model)
def query(self,points):
"""
@summary: Estimate a set of test points given the model we built.
@param points: should be a numpy array with each row corresponding to a specific query.
@returns the estimated values according to the saved model.
"""
node = 'not leaf'
dTree = self.model
y_all = np.empty(shape=(points.shape[0]))
for i in range(0,points.shape[0]):
currentRow = 0
while dTree[currentRow,0] != 'leaf':
#print(dTree[currentRow,1])
#print((float(dTree[currentRow,0])))
#print(points[i, dTree[currentRow,0]])
if points[i, int(float(dTree[currentRow,0]))] <= float(dTree[currentRow,1]):
currentRow = currentRow + int(float(dTree[currentRow,2]))
#print currentRow
elif points[i, int(float(dTree[currentRow,0]))] > float(dTree[currentRow,1]):
currentRow = currentRow + int(float(dTree[currentRow,3]))
#print currentRow
y_predict = dTree[currentRow,1]
y_all[i] = y_predict
return y_all
#return (self.model_coefs[:-1] * points).sum(axis = 1) + self.model_coefs[-1]
def build_tree(self, data, leaf_size):
#print(data)
#print(data.shape)
if data.shape[0] == 1:
return np.array([['leaf', stats.mode(data[:,data.shape[1]-1])[0][0], 'NA', 'NA']])
if data.shape[0] <= leaf_size:
return np.array([['leaf', stats.mode(data[:,data.shape[1]-1])[0][0], 'NA', 'NA']])
#if all data.y same:
#return [leaf, data.y, NA, NA]
else:
#determine best feature i to split
i = self.computeCorrelations(data)
SplitVal = np.median(data[:,i])
#print (i)
#print(SplitVal)
if data.shape[0] == data[data[:,i]<=SplitVal].shape[0]:
#print stats.mode(data[:,data.shape[1]-1])[0][0]
return np.array([['leaf', stats.mode(data[:,data.shape[1]-1])[0][0], 'NA', 'NA']])
else:
#SplitVal = data[:,i].median()
#j = j+1
#print(j)
lefttree = self.build_tree(data[data[:,i]<=SplitVal], leaf_size)
#print(j)
righttree = self.build_tree(data[data[:,i]>SplitVal], leaf_size)
#print(lefttree)
#print(righttree)
root = np.array([[i, SplitVal, 1, lefttree.shape[0] + 1]])
#print(root)
return np.vstack((root, lefttree, righttree))
#return (np.append(root, lefttree, righttree))
def computeCorrelations(self, data):
#print(data)
correlations = np.empty(shape=(1,data.shape[1]-1))
for i in range(0,data.shape[1]-1):
corr = np.corrcoef(data[:,i], data[:,data.shape[1]-1])
corr = abs(corr[1,0])
correlations[0,i] = corr
correlations = np.nan_to_num(correlations)
corr_max = np.argmax(correlations)
SplitVal = np.median(data[:,corr_max])
#print(correlations)
#print(corr_max)
'''
while data[data[:,corr_max]<=SplitVal].shape[0] == data.shape[0]:
SplitVal = SplitVal - 0.005
'''
'''
while data[data[:,corr_max]<=SplitVal].shape[0] == data.shape[0]:
data = np.delete(data, corr_max, 1)
correlations = np.empty(shape=(1,data.shape[1]-1))
for i in range(0,data.shape[1]-1):
corr = np.corrcoef(data[:,i], data[:,data.shape[1]-1])
corr = abs(corr[1,0])
correlations[0,i] = corr
correlations = np.nan_to_num(correlations)
corr_max = np.argmax(correlations)
SplitVal = np.median(data[:,corr_max])
#print(data)
#print(correlations)
#print(corr_max)
'''
#print(correlations)
#print(corr_max)
return corr_max
if __name__=="__main__":
def compute_rmse(actual, predicted):
mse = ((actual - predicted) ** 2).mean(axis=None)
return np.sqrt(mse)
def create_array(data):
ind = data.shape[1]-1
data = data.iloc[:,1:data.shape[1]]
dataX = data.iloc[:,0:data.shape[1]-1]
dataY = data.iloc[:,data.shape[1]-1]
dataX = dataX.as_matrix()
dataY = dataY.as_matrix()
return dataX, dataY
def plot_data(df, title="DT Learner - RMSE vs Leaf Size", xlabel="leaf_size", ylabel="train_rmse"):
"""Plot stock prices with a custom title and meaningful axis labels."""
ax = df.plot(title=title, fontsize=12)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show()
data = pd.read_csv('./Data/simple.csv',delimiter=',')
train=data.sample(frac=0.6,random_state=200)
test=data.drop(train.index)
trainX, trainY = create_array(train)
testX, testY = create_array(test)
start_time = time.time()
learner = DTLearner(leaf_size = 1, verbose = False) # constructor
learner.addEvidence(trainX, trainY)
Y_pred = learner.query(trainX)
print "The IN Sample RMSE for DT Learner is:"
print compute_rmse(trainY, Y_pred)
print
Y_pred = learner.query(testX)
print "The OUT of Sample RMSE for DT Learner is:"
print compute_rmse(testY, Y_pred)
print
leaf_sizes = [1,3,5,7,10,15,20,25,30,35,40,45, 50,60,70, 80, 90,100]
rmses = np.empty(shape = (len(leaf_sizes),3))
#rmses = pd.DataFrame(index=index, columns=columns)
for i in range(0, len(leaf_sizes)):
leaf = leaf_sizes[i]
print "Leaf Size: " + str(i)
rmses[i,0] = leaf
learner = DTLearner(leaf_size = leaf, verbose = False) # constructor
learner.addEvidence(trainX, trainY)
print "The IN Sample RMSE for DT Learner is:"
Y_pred = learner.query(trainX)
rmse = compute_rmse(trainY, Y_pred)
rmses[i,1] = rmse
print rmse
print "The OUT of Sample RMSE for DT Learner is:"
Y_pred = learner.query(testX)
rmse = compute_rmse(testY, Y_pred)
rmses[i,2] = rmse
print rmse
df = pd.DataFrame(rmses, columns=['leaf_size', 'train_rmse', 'test_rmse'])
df = df.set_index('leaf_size')
print df
plot_data(df)
end_time = time.time()
print end_time - start_time
| true |
b7747baf6a008469937415e9da53f2a9b2679a0f | Python | MichalKacprzak99/Vpython | /lab6/zad2.py | UTF-8 | 856 | 3.015625 | 3 | [] | no_license | import random
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('classic')
x0=0
y0=0
X=[]
Y=[]
X.append(x0)
Y.append(y0)
sum=0
tmp=0
i=1
while(sum<10**6):
r = random.uniform(0, 100)
if r < 1.0:
x = 0
y = 0.16*y0
elif r < 86.0:
x = 0.85*x0 + 0.04*y0
y = -0.04*x0 + 0.85*y0+1.6
elif r < 93.0:
x = 0.2*x0 - 0.26*y0
y = 0.23*x0 + 0.22*y0 + 1.6
else:
x = -0.15*x0 + 0.28*y0
y = 0.26*x0 + 0.24*y0+ 0.44
x0 = x
y0 = y
if 1 <= x0 <= 1.3 and 2.5 <= y0 <= 3.1:
sum+=1
tmp+=1
if tmp==i*10**4:
print(tmp)
tmp=0
i+=1
X.append(x);Y.append(y)
plt.axis([1,1.3,2.5,3.1])
plt.plot(X,Y,',',color ='blue')
ax=plt.gca()
ax.set_facecolor("black")
plt.show() | true |
4179ac05fd53902821f1a25fbafadb50a16e036b | Python | timber8/ComputerVision | /demo.py | UTF-8 | 7,580 | 3.109375 | 3 | [] | no_license | import cv2
#to show the image
import numpy as np
from math import cos, sin
import os
def find_biggest_contour(image):
# Copy
image = image.copy()
image, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Isolate largest contour
contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]
mask = np.zeros(image.shape, np.uint8)
cv2.drawContours(mask, [biggest_contour], -1, (255,255,0), -1)
return biggest_contour, mask
def find_suit_in_a_card(image, verbose=False):
if verbose:
cv2.imshow('image', image);
cv2.imwrite('corner_used.png', image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
is_red = 1
# Blurs an image using a Gaussian filter. input, kernel size, how much to filter, empty)
image_blur = cv2.GaussianBlur(image, (7, 7), 0)
if verbose:
cv2.imshow('corner_blured', image_blur)
cv2.imwrite('corner_blured.png',image_blur)
#t unlike RGB, HSV separates luma, or the image intensity, from
# chroma or the color information.
#just want to focus on color, segmentation
image_blur_hsv = cv2.cvtColor(image_blur, cv2.COLOR_RGB2HSV)
# Filter by colour
# 0-10 hue
#minimum red amount, max red amount
min_red = np.array([0, 100, 80])
max_red = np.array([10, 256, 256])
#layer
mask1 = cv2.inRange(image_blur_hsv, min_red, max_red)
if verbose:
cv2.imshow('mask1_red', mask1)
cv2.imwrite('mask1.png',mask1)
#birghtness of a color is hue
# 170-180 hue
min_red2 = np.array([170, 100, 80])
max_red2 = np.array([180, 256, 256])
mask2 = cv2.inRange(image_blur_hsv, min_red2, max_red2)
if verbose:
cv2.imshow('mask2_red', mask2);
cv2.imwrite('mask2_red.png',mask2)
#looking for what is in both ranges
# Combine masks
mask = mask1 + mask2
array = np.asarray(mask)
#Check if is not a black suit
if np.count_nonzero(array) < 5000:
is_red = 0
min_black = np.array([0, 0, 0])
max_black = np.array([180, 256, 150])
#layer
mask = cv2.inRange(image_blur_hsv, min_black, max_black)
if verbose:
cv2.imshow('result mask', mask)
cv2.imwrite('result_mask.png',mask)
# Clean up
#we want to circle our strawberry so we'll circle it with an ellipse
#with a shape of 15x15
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
#morph the image. closing operation Dilation followed by Erosion.
#It is useful in closing small holes inside the foreground objects,
#or small black points on the object.
mask_closed = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
if verbose:
cv2.imshow('dilation', mask_closed)
cv2.imwrite('dilated_corner.png',mask_closed)
#erosion followed by dilation. It is useful in removing noise
mask_clean = cv2.morphologyEx(mask_closed, cv2.MORPH_OPEN, kernel)
if verbose:
cv2.imshow('erosion', mask_clean)
cv2.imwrite('dilated_corner_with_erosion.png',mask_clean)
# Find biggest strawberry
#get back list of segmented strawberries and an outline for the biggest one
suit_contour, mask_suit = find_biggest_contour(mask_clean)
if verbose:
cv2.imshow('isolated suit', mask_suit)
cv2.imwrite('isolated_suit.png', mask_suit)
cv2.waitKey(0)
cv2.destroyAllWindows()
return suit_contour, mask_suit, is_red
def redSuitContours(back_corner):
if back_corner == 0:
ouros = cv2.imread('corner_ouros_up.png')
ouros_contour, mask_ouros, is_red = find_suit_in_a_card(ouros, False)
hearts = cv2.imread('corner_copas_up.png')
hearts_contour, mask_hearts, is_red = find_suit_in_a_card(hearts, False)
else:
ouros = cv2.imread('corner_ouros_back.png')
ouros_contour, mask_ouros, is_red = find_suit_in_a_card(ouros, False)
hearts = cv2.imread('corner_copas_back.png')
hearts_contour, mask_hearts, is_red = find_suit_in_a_card(hearts, False)
return ouros_contour, hearts_contour
def blackSuitContours(back_corner):
if back_corner == 0:
espadas = cv2.imread('corner_espadas_up.png')
espadas_contour, mask_espadas, is_red = find_suit_in_a_card(espadas, False)
paus = cv2.imread('corner_paus_up.png')
paus_contour, mask_paus, is_red = find_suit_in_a_card(paus, True)
else:
espadas = cv2.imread('corner_espadas_back.png')
espadas_contour, mask_espadas, is_red = find_suit_in_a_card(espadas, False)
paus = cv2.imread('corner_paus_back.png')
paus_contour, mask_paus, is_red = find_suit_in_a_card(paus, False)
return espadas_contour, paus_contour
def cornerHasRed(im):
image_blur = cv2.GaussianBlur(im, (7, 7), 0)
image_blur_hsv = cv2.cvtColor(image_blur, cv2.COLOR_BGR2HSV)
# Filter by colour
# 0-10 hue
#minimum red amount, max red amount
min_red = np.array([0, 100, 80])
max_red = np.array([10, 256, 256])
#layer
mask1 = cv2.inRange(image_blur_hsv, min_red, max_red)
#birghtness of a color is hue
# 170-180 hue
min_red2 = np.array([170, 100, 80])
max_red2 = np.array([180, 256, 256])
mask2 = cv2.inRange(image_blur_hsv, min_red2, max_red2)
#looking for what is in both ranges
# Combine masks
mask = mask1 + mask2
array = np.asarray(mask)
print 'Numero de pixeis vermelhos: ' + str(np.count_nonzero(array))
return np.count_nonzero(array)
def cornerHasBlack(im):
image_blur = cv2.GaussianBlur(im, (7, 7), 0)
image_blur_hsv = cv2.cvtColor(image_blur, cv2.COLOR_BGR2HSV)
min_black = np.array([0, 0, 0])
max_black = np.array([180, 256, 150])
#layer
mask = cv2.inRange(image_blur_hsv, min_black, max_black)
array = np.asarray(mask)
print 'Numero de pixeis pretos: ' + str(np.count_nonzero(array))
return np.count_nonzero(array)
def compareCountours(cnt1,is_red, back_corner):
if is_red == 1:
ouros_contour, hearts_contour = redSuitContours(back_corner)
is_ouros = cv2.matchShapes(cnt1,ouros_contour,1,0.0)
is_hearts = cv2.matchShapes(cnt1,hearts_contour,1,0.0)
print 'Ouros' + str(is_ouros)
print 'Copas' + str(is_hearts)
return is_ouros, is_hearts
else:
espadas_contour, paus_contour = blackSuitContours(back_corner)
is_espadas = cv2.matchShapes(cnt1,espadas_contour,1,0.0)
is_paus = cv2.matchShapes(cnt1,paus_contour,1,0.0)
print 'Espadas' + str(is_espadas)
print 'Paus' + str(is_paus)
return is_espadas, is_paus
#read the image
#image = cv2.imread('cornerkq0.png')
#detect it
def getSuitString(image, back_corner, verbose):
suit_contour, mask_suit, is_red = find_suit_in_a_card(image, verbose)
if is_red == 1:
is_ouros, is_hearts = compareCountours(suit_contour, is_red, back_corner)
if is_ouros > is_hearts:
print 'Copas'
return 'Copas'
else:
print 'Ouros'
return 'Ouros'
else:
is_espadas, is_paus = compareCountours(suit_contour, is_red, back_corner)
if is_espadas > is_paus:
print 'Paus'
return 'Paus'
else:
print 'Espadas'
return 'Espadas'
cv2.waitKey(0)
cv2.destroyAllWindows()
#write the new image
#cv2.imwrite('yo2.jpg', result)
| true |
0e2cc31f8b44d787a8ca62f3ac5d90ec735a78ae | Python | lzxysf/python | /cli/python_003_variable.py | UTF-8 | 2,991 | 4.34375 | 4 | [] | no_license | # coding=utf-8
# python的数据类型:数字、字符串、复数,列表、元组、字典
import json
a = b = c = 1
a, b, c = 1, 3.4, 'alice'
s = "hello world!"
x = '你好'
print(s[1:7]) # 前包含后不包含
print(s[1:])
print(s[-1:-5]) # 倒着数时候是前不包含后包含
print(s[:-1])
print(s+x)
print(x*2)
# 列表可以包含数字、字符串、另一个列表
# 列表用[]表示,元素之间用,隔开
list = ['runoob', 786, 2.23, 'john', 70.2]
tinylist = [123, 'john']
print (list) # 输出完整列表
print (list[0]) # 输出列表的第一个元素
print (list[1:3]) # 输出第二个至第三个元素
print (list[2:]) # 输出从第三个开始至列表末尾的所有元素
print (tinylist * 2) # 输出列表两次
print (list + tinylist) # 打印组合的列表
# 元组和列表基本类似
# 元组用()表示,元素之间用,隔开
# 元组和列表的区别是元组不能二次赋值,相当于只读列表
tuple = ('runoob', 786, 2.23, 'john', 70.2)
tinytuple = (123, 'john')
print (tuple) # 输出完整元组
print (tuple[0]) # 输出元组的第一个元素
print (tuple[1:3]) # 输出第二个至第三个的元素
print (tuple[2:]) # 输出从第三个开始至列表末尾的所有元素
print (tinytuple * 2) # 输出元组两次
print (tuple + tinytuple) # 打印组合的元组
# 列表是有序的对象集合,字典是无序的对象集合
# 字典用{}表示,以key-value的形式存储
# key值在[]中而不是()中
dict = {}
dict['one'] = 'this is one'
dict[2] = 'i am john'
print(dict)
dict = {'1': 'apple', '星期': '明天', 'pic1': 1}
print(dict['1'])
print(dict['星期'])
print(dict.keys())
print(dict.values())
print(dict)
# print (json.dumps(dict, encoding="UTF-8", ensure_ascii=False))
a = 11
b = 2
print(a**b)
print(a//b)
a = 1
b = 0
if a:
print('true')
if a and b:
print('true')
else:
print('false')
if (a or b):
print('true')
if not(a and b):
print('true')
a = 12345
b = 12345
c = 20
list = [10, 2, 3, 4, 5 ]
if a in list:
print ('true')
if b not in list:
print('true')
# is和is not判断的是变量的内存地址是否相同
# python中两个相等的比较简单的变量指向了同一块内存地址
# 但是如果数据类型不一样,就不会指向同一块内存地址了,a=10,b=10.0时,a is not b
if a is b:
print('a is b')
if a is not c:
print('a is not c')
# id可以获取一个变量的地址
print (id(a))
print (id(b))
# 如下,虽然y和z都和x的值相等,但是y是引用,直接指向了x的地址,z是复制,地址和x不一样
x = [1, 2, 3, 4, 5]
y = x
z = x[:]
print(id(x))
print(id(y))
print(id(z))
# python不支持++和--这种语法,因为python中的数值和字符串都是不可变对象,对不可变对象操作都会生成一个新的对象
# 而++和--都是对该对象的内存直接加一或减一
| true |
563421cf4c5c78a911efae765ba929b96b17f24a | Python | alexlwn123/kattis | /Python/Karte.py | UTF-8 | 415 | 2.71875 | 3 | [] | no_license | import sys
deck = set()
line = str(sys.stdin.readline())
for i in range(len(line) // 3):
if (line[i*3: i*3+3]) in deck:
print("GRESKA")
break
else:
deck.add(line[i*3:i*3+3])
else:
P = sum(1 for x in deck if x.startswith('P'))
K = sum(1 for x in deck if x.startswith('K'))
H = sum(1 for x in deck if x.startswith('H'))
T = sum(1 for x in deck if x.startswith('T'))
print(13-P, 13-K, 13-H, 13-T)
| true |
36dfb3d566efde9a99d80a40a6972fc9ab7ef30b | Python | zhushh/PythonCode | /basicStudy/func_local_var.py | UTF-8 | 171 | 3.234375 | 3 | [] | no_license | #!/usr/bin/env python
# Filename: func_local_var.py
def func(x):
print 'x is', x
x = x + 1
print 'change local x to', x
x = 32
func(x)
print 'x is still', x
| true |
010e2385ac90107cf6e0ed7d0e7b666307ce3997 | Python | notbdu/tf-mnist | /mnist_basic_nn.py | UTF-8 | 5,743 | 3.625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
An example of implementing multinomial logistic (softmax) regression with a single layer of
perceptrons using Tensorflow
Ouput: Confidence prediction (as an array) of which class an observation in the class belongs to
"""
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
"""
mnist is a DataSet object containing the following:
- 55000 images and labels for primary training
- 5000 images and labesl for iterative validation of training accuracy
- 10000 images and labels for final testing of trained accuracy
"""
print("Number of images/labels for model:")
print("Primary training: " + str(mnist.train.num_examples))
print("Iterative validation: " + str(mnist.test.num_examples))
print("Final testing: " + str(mnist.validation.num_examples))
print("")
"""
Images are stored as a n-dim array [ n_observations x n_features]
Labels are stored as [n_observations x n_labels]
where each observation is a one-hot vector
"""
print("Dimensions of the Image and Label tensors: ")
print("Images: " + str(mnist.train.images.shape),"Labels: " + str(mnist.train.labels.shape))
with tf.Graph().as_default():
# Inputs
x = tf.placeholder(tf.float32, shape=[None, 784], name="image_inputs")
y_ = tf.placeholder(tf.float32, shape=[None, 10], name="actual_class")
"""
Placeholder creates a container for an input image using tensorflow's graph.
We allow the first dimension to be None, since this will eventually
represent out mini-batches, or how many images we feed into a network
at a time during training/validation/testing
x : 28px by 28px images converted into a [(Batch Size * 28^2) x 1] column vector
y : [Batch Size * 10] matrix of one-hot encodings representing the actual class of the image
(ie. [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 ] where the index of 1 is the class)
"""
with tf.name_scope("hidden1"):
W = tf.Variable(tf.zeros([784,10]), name="weights")
b = tf.Variable(tf.zeros([10]), name="biases")
# Sigmoid unit
y = tf.nn.softmax(tf.matmul(x,W) + b)
"""
function in the form of:
f(x_i, W, b) = Wx_i + b
which is a linear mapping of image pixels to class scores.
W and b are the parameters of this function which change after each iteration
1) W * x_i => [ 0.2, 0.5, 0.6, 0.3, 1.2, .5, .2, .9, .2, .6] # does not sum to 1
return a K element array representing the probabilities that an image belongs to each class K
2) + b => Adds biases to each of the classes
3) softmax() => [ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] # sums to 1
returns a K element array w/ normalized probabilities that an image belongs to each class K
Variables (Learning Parameters)
x_i : an image with all its pixels flattened out into a [D x 1] vector
b : "bias" vector of size [K x 1]
W : "weight" matrix of size [D * K] (transpose of x)
"""
cross_entropy = -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1],
name="xentropy")
"""
Represents the cross-entropy between the true (p) distribution and the estimated (q) distribution
Defined as:
H(p,q) = - summation{p(x)*log(q(x))}
As q converges to p, the product of p*log(q) will increase and therefore, H(p,q) will become
more negative
The cross-entropy function p*log(q) represents a a second order equation with a defined minima so gradient descent
converges to only 1 minima.
"""
# Loss function
loss = tf.reduce_mean(cross_entropy, name="xentropy_mean")
global_step = tf.Variable(0, name="global_step", trainable=False)
train_op = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
"""
cross_entropy example
assume inaccurate output:
output = [0.3, 0.2, 0.5]
expected = [0, 1, 0]
cross entropy would be -0.2
assume accurate output:
output = [0.3, 0.5, 0.2]
expected = [0, 1, 0]
cross entropy would be -0.5
Notice that the accurate output has a more negative value and therefore favored since
the loss function aims to minimize the cross entropy
"""
# SUMMARIES
tf.scalar_summary(loss.op.name, loss)
summary_op = tf.merge_all_summaries()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
summary_writer = tf.train.SummaryWriter("/tmp/tf-summaries/", sess.graph)
start = time.time()
for step in range(1000):
image_inputs, actual_classes = mnist.train.next_batch(50)
_, loss_value = sess.run([train_op, loss], feed_dict={x: image_inputs, y_: actual_classes})
summary_str = sess.run(summary_op, feed_dict={x: image_inputs, y_: actual_classes})
summary_writer.add_summary(summary_str, step)
if step % 100 == 0:
duration = time.time() - start
print "Step {}: loss = {:.2f} ({:.3f} sec)".format(step, loss_value,
duration)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
"""
Arguements of the maxima (argmax) refers to the point/s of the
domain of a function where the function is maximized
In this context, argmax returns the index of the greatest value in the array
"""
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Calling `Tensor.eval()` == `tf.get_default_session().run(Tensor)`
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
| true |
4cca3d99292b01445cf8ccbeae0529932759081b | Python | vidmo91/imgcode | /imGcode/env_imGcode/Lib/site-packages/matplotlib/tests/test_artist.py | UTF-8 | 9,061 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | import io
from itertools import chain
import numpy as np
import pytest
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.path as mpath
import matplotlib.transforms as mtransforms
import matplotlib.collections as mcollections
import matplotlib.artist as martist
from matplotlib.testing.decorators import image_comparison
def test_patch_transform_of_none():
# tests the behaviour of patches added to an Axes with various transform
# specifications
ax = plt.axes()
ax.set_xlim([1, 3])
ax.set_ylim([1, 3])
# Draw an ellipse over data coord (2, 2) by specifying device coords.
xy_data = (2, 2)
xy_pix = ax.transData.transform(xy_data)
# Not providing a transform of None puts the ellipse in data coordinates .
e = mpatches.Ellipse(xy_data, width=1, height=1, fc='yellow', alpha=0.5)
ax.add_patch(e)
assert e._transform == ax.transData
# Providing a transform of None puts the ellipse in device coordinates.
e = mpatches.Ellipse(xy_pix, width=120, height=120, fc='coral',
transform=None, alpha=0.5)
assert e.is_transform_set()
ax.add_patch(e)
assert isinstance(e._transform, mtransforms.IdentityTransform)
# Providing an IdentityTransform puts the ellipse in device coordinates.
e = mpatches.Ellipse(xy_pix, width=100, height=100,
transform=mtransforms.IdentityTransform(), alpha=0.5)
ax.add_patch(e)
assert isinstance(e._transform, mtransforms.IdentityTransform)
# Not providing a transform, and then subsequently "get_transform" should
# not mean that "is_transform_set".
e = mpatches.Ellipse(xy_pix, width=120, height=120, fc='coral',
alpha=0.5)
intermediate_transform = e.get_transform()
assert not e.is_transform_set()
ax.add_patch(e)
assert e.get_transform() != intermediate_transform
assert e.is_transform_set()
assert e._transform == ax.transData
def test_collection_transform_of_none():
# tests the behaviour of collections added to an Axes with various
# transform specifications
ax = plt.axes()
ax.set_xlim([1, 3])
ax.set_ylim([1, 3])
# draw an ellipse over data coord (2, 2) by specifying device coords
xy_data = (2, 2)
xy_pix = ax.transData.transform(xy_data)
# not providing a transform of None puts the ellipse in data coordinates
e = mpatches.Ellipse(xy_data, width=1, height=1)
c = mcollections.PatchCollection([e], facecolor='yellow', alpha=0.5)
ax.add_collection(c)
# the collection should be in data coordinates
assert c.get_offset_transform() + c.get_transform() == ax.transData
# providing a transform of None puts the ellipse in device coordinates
e = mpatches.Ellipse(xy_pix, width=120, height=120)
c = mcollections.PatchCollection([e], facecolor='coral',
alpha=0.5)
c.set_transform(None)
ax.add_collection(c)
assert isinstance(c.get_transform(), mtransforms.IdentityTransform)
# providing an IdentityTransform puts the ellipse in device coordinates
e = mpatches.Ellipse(xy_pix, width=100, height=100)
c = mcollections.PatchCollection([e],
transform=mtransforms.IdentityTransform(),
alpha=0.5)
ax.add_collection(c)
assert isinstance(c._transOffset, mtransforms.IdentityTransform)
@image_comparison(["clip_path_clipping"], remove_text=True)
def test_clipping():
exterior = mpath.Path.unit_rectangle().deepcopy()
exterior.vertices *= 4
exterior.vertices -= 2
interior = mpath.Path.unit_circle().deepcopy()
interior.vertices = interior.vertices[::-1]
clip_path = mpath.Path.make_compound_path(exterior, interior)
star = mpath.Path.unit_regular_star(6).deepcopy()
star.vertices *= 2.6
ax1 = plt.subplot(121)
col = mcollections.PathCollection([star], lw=5, edgecolor='blue',
facecolor='red', alpha=0.7, hatch='*')
col.set_clip_path(clip_path, ax1.transData)
ax1.add_collection(col)
ax2 = plt.subplot(122, sharex=ax1, sharey=ax1)
patch = mpatches.PathPatch(star, lw=5, edgecolor='blue', facecolor='red',
alpha=0.7, hatch='*')
patch.set_clip_path(clip_path, ax2.transData)
ax2.add_patch(patch)
ax1.set_xlim([-3, 3])
ax1.set_ylim([-3, 3])
def test_cull_markers():
x = np.random.random(20000)
y = np.random.random(20000)
fig, ax = plt.subplots()
ax.plot(x, y, 'k.')
ax.set_xlim(2, 3)
pdf = io.BytesIO()
fig.savefig(pdf, format="pdf")
assert len(pdf.getvalue()) < 8000
svg = io.BytesIO()
fig.savefig(svg, format="svg")
assert len(svg.getvalue()) < 20000
@image_comparison(['hatching'], remove_text=True, style='default')
def test_hatching():
fig, ax = plt.subplots(1, 1)
# Default hatch color.
rect1 = mpatches.Rectangle((0, 0), 3, 4, hatch='/')
ax.add_patch(rect1)
rect2 = mcollections.RegularPolyCollection(4, sizes=[16000],
offsets=[(1.5, 6.5)],
transOffset=ax.transData,
hatch='/')
ax.add_collection(rect2)
# Ensure edge color is not applied to hatching.
rect3 = mpatches.Rectangle((4, 0), 3, 4, hatch='/', edgecolor='C1')
ax.add_patch(rect3)
rect4 = mcollections.RegularPolyCollection(4, sizes=[16000],
offsets=[(5.5, 6.5)],
transOffset=ax.transData,
hatch='/', edgecolor='C1')
ax.add_collection(rect4)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
def test_remove():
fig, ax = plt.subplots()
im = ax.imshow(np.arange(36).reshape(6, 6))
ln, = ax.plot(range(5))
assert fig.stale
assert ax.stale
fig.canvas.draw()
assert not fig.stale
assert not ax.stale
assert not ln.stale
assert im in ax._mouseover_set
assert ln not in ax._mouseover_set
assert im.axes is ax
im.remove()
ln.remove()
for art in [im, ln]:
assert art.axes is None
assert art.figure is None
assert im not in ax._mouseover_set
assert fig.stale
assert ax.stale
@image_comparison(["default_edges.png"], remove_text=True, style='default')
def test_default_edges():
# Remove this line when this test image is regenerated.
plt.rcParams['text.kerning_factor'] = 6
fig, [[ax1, ax2], [ax3, ax4]] = plt.subplots(2, 2)
ax1.plot(np.arange(10), np.arange(10), 'x',
np.arange(10) + 1, np.arange(10), 'o')
ax2.bar(np.arange(10), np.arange(10), align='edge')
ax3.text(0, 0, "BOX", size=24, bbox=dict(boxstyle='sawtooth'))
ax3.set_xlim((-1, 1))
ax3.set_ylim((-1, 1))
pp1 = mpatches.PathPatch(
mpath.Path([(0, 0), (1, 0), (1, 1), (0, 0)],
[mpath.Path.MOVETO, mpath.Path.CURVE3,
mpath.Path.CURVE3, mpath.Path.CLOSEPOLY]),
fc="none", transform=ax4.transData)
ax4.add_patch(pp1)
def test_properties():
ln = mlines.Line2D([], [])
ln.properties() # Check that no warning is emitted.
def test_setp():
# Check empty list
plt.setp([])
plt.setp([[]])
# Check arbitrary iterables
fig, ax = plt.subplots()
lines1 = ax.plot(range(3))
lines2 = ax.plot(range(3))
martist.setp(chain(lines1, lines2), 'lw', 5)
plt.setp(ax.spines.values(), color='green')
# Check *file* argument
sio = io.StringIO()
plt.setp(lines1, 'zorder', file=sio)
assert sio.getvalue() == ' zorder: float\n'
def test_None_zorder():
fig, ax = plt.subplots()
ln, = ax.plot(range(5), zorder=None)
assert ln.get_zorder() == mlines.Line2D.zorder
ln.set_zorder(123456)
assert ln.get_zorder() == 123456
ln.set_zorder(None)
assert ln.get_zorder() == mlines.Line2D.zorder
@pytest.mark.parametrize('accept_clause, expected', [
('', 'unknown'),
("ACCEPTS: [ '-' | '--' | '-.' ]", "[ '-' | '--' | '-.' ]"),
('ACCEPTS: Some description.', 'Some description.'),
('.. ACCEPTS: Some description.', 'Some description.'),
('arg : int', 'int'),
('*arg : int', 'int'),
('arg : int\nACCEPTS: Something else.', 'Something else. '),
])
def test_artist_inspector_get_valid_values(accept_clause, expected):
class TestArtist(martist.Artist):
def set_f(self, arg):
pass
TestArtist.set_f.__doc__ = """
Some text.
%s
""" % accept_clause
valid_values = martist.ArtistInspector(TestArtist).get_valid_values('f')
assert valid_values == expected
def test_artist_inspector_get_aliases():
# test the correct format and type of get_aliases method
ai = martist.ArtistInspector(mlines.Line2D)
aliases = ai.get_aliases()
assert aliases["linewidth"] == {"lw"}
| true |
707c841c33d2550c92c6665949ab433af6e7889b | Python | mgarkusha/GB-Python | /Урок1/hw01_normal.py | UTF-8 | 1,383 | 4.40625 | 4 | [] | no_license | # Задача-1: Дано произвольное целое число, вывести самую большую цифру этого числа.
import math
num = int(input('Введите число: '))
razryad=int(math.log10(num))
i=0
while razryad >=0:
k = int(num / (10**razryad))
print ('цифра',k)
num -= 10**razryad*k
razryad -= 1
if i==0 :
bigNumber = k
elif k > bigNumber :
bigNumber = k
i+=1
print('Самое большое число', bigNumber)
# Задача-2: Исходные значения двух переменных запросить у пользователя.
# Поменять значения переменных местами. Вывести новые значения на экран.
# Решите задачу, используя только две переменные.
a = int(input('Введите число 1: '))
b = int(input('Введите число 2: '))
print('было', a, b)
a = a + b
b = a - b
a = a - b
print('стало', a, b)
# Задача-3: Напишите программу, вычисляющую корни квадратного уравнения вида ax2 + bx + c = 0.
# Для вычисления квадратного корня воспользуйтесь функцией sqrt() модуля math
# import math
# math.sqrt(4) - вычисляет корень числа 4
| true |
283fc1b8f9f273bd39b00f4e510acb1c57949857 | Python | mengjian0502/eee511_team03_finalproject | /eyeclosure/eyeclosure_extract.py | UTF-8 | 1,501 | 2.515625 | 3 | [
"MIT"
] | permissive | """
"""
import numpy as np
import torch
from six.moves import cPickle as pickle
pickle_files = ['./open_eyes.pickle', './closed_eyes.pickle']
i = 0
for pickle_file in pickle_files:
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
if i == 0:
train_dataset = save['train_dataset']
train_labels = save['train_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
else:
print("here")
train_dataset = np.concatenate((train_dataset, save['train_dataset']))
train_labels = np.concatenate((train_labels, save['train_labels']))
test_dataset = np.concatenate((test_dataset, save['test_dataset']))
test_labels = np.concatenate((test_labels, save['test_labels']))
del save # hint to help gc free up memory
i += 1
train_dataset_tensor = torch.Tensor(train_dataset)
train_label_tensor = torch.Tensor(train_labels).squeeze(1)
test_dataset_tensor = torch.Tensor(test_dataset)
test_label_tensor = torch.Tensor(test_labels).squeeze(1)
print('Training set', train_dataset_tensor.size(), train_label_tensor.size())
print('Test set', test_dataset_tensor.size(), test_label_tensor.size())
torch.save(train_dataset_tensor, './eyeclosure_train_data.pt')
torch.save(train_label_tensor, './eyeclosure_train_label.pt')
torch.save(test_dataset_tensor, './eyeclosure_test_data.pt')
torch.save(test_label_tensor, './eyeclosure_test_label.pt') | true |
3ef840bace16b1d0ed336cf18c5904407cdde3d0 | Python | zingpython/kungFuShifu | /day_two/6.py | UTF-8 | 185 | 3.65625 | 4 | [] | no_license |
dictonary = {"A":6,"B":4,"C":1,"D":3,"E":4,"F":1}
search_value = 5
result = []
for key in dictonary.keys():
if dictonary[key] == search_value:
result.append(key)
print(result)
| true |
93495f78f5265a1804dc1087adb55d4b8a7b8ccb | Python | nucleomis/Archivos_Python | /ejercicios 1er año/promedio de altura.py | UTF-8 | 719 | 4.28125 | 4 | [] | no_license | #Cargar por teclado y almacenar en una lista las alturas de 5 personas
# (valores float)
#Obtener el promedio de las mismas.
#Contar cuántas personas son más altas que el promedio y cuántas más bajas.
altura=[]
menor=[]
mayor=[]
for x in range(5):
ingreso=float(input("ingrese una altura: "))
altura.append(ingreso)
suma=sum(altura)
promedio=float(suma/5)
for x in range(5):
if altura[x]<promedio:
menor.append(altura[x])
if altura[x]>promedio:
mayor.append(altura[x])
print("las alturas ingresadas son: ", altura)
print("el promedio de las alturas es: ", promedio)
print("las alturas mayores al promedio son: ", mayor)
print("las alturas menores al promedio son: ", menor) | true |
59c6ad7766c5907acae83197faa927afe21e9203 | Python | agronholm/anyio | /src/anyio/streams/buffered.py | UTF-8 | 4,500 | 2.9375 | 3 | [
"MIT"
] | permissive | from __future__ import annotations
from collections.abc import Callable, Mapping
from dataclasses import dataclass, field
from typing import Any
from .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead
from ..abc import AnyByteReceiveStream, ByteReceiveStream
@dataclass(eq=False)
class BufferedByteReceiveStream(ByteReceiveStream):
"""
Wraps any bytes-based receive stream and uses a buffer to provide sophisticated
receiving capabilities in the form of a byte stream.
"""
receive_stream: AnyByteReceiveStream
_buffer: bytearray = field(init=False, default_factory=bytearray)
_closed: bool = field(init=False, default=False)
async def aclose(self) -> None:
await self.receive_stream.aclose()
self._closed = True
@property
def buffer(self) -> bytes:
"""The bytes currently in the buffer."""
return bytes(self._buffer)
@property
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
return self.receive_stream.extra_attributes
async def receive(self, max_bytes: int = 65536) -> bytes:
if self._closed:
raise ClosedResourceError
if self._buffer:
chunk = bytes(self._buffer[:max_bytes])
del self._buffer[:max_bytes]
return chunk
elif isinstance(self.receive_stream, ByteReceiveStream):
return await self.receive_stream.receive(max_bytes)
else:
# With a bytes-oriented object stream, we need to handle any surplus bytes
# we get from the receive() call
chunk = await self.receive_stream.receive()
if len(chunk) > max_bytes:
# Save the surplus bytes in the buffer
self._buffer.extend(chunk[max_bytes:])
return chunk[:max_bytes]
else:
return chunk
async def receive_exactly(self, nbytes: int) -> bytes:
"""
Read exactly the given amount of bytes from the stream.
:param nbytes: the number of bytes to read
:return: the bytes read
:raises ~anyio.IncompleteRead: if the stream was closed before the requested
amount of bytes could be read from the stream
"""
while True:
remaining = nbytes - len(self._buffer)
if remaining <= 0:
retval = self._buffer[:nbytes]
del self._buffer[:nbytes]
return bytes(retval)
try:
if isinstance(self.receive_stream, ByteReceiveStream):
chunk = await self.receive_stream.receive(remaining)
else:
chunk = await self.receive_stream.receive()
except EndOfStream as exc:
raise IncompleteRead from exc
self._buffer.extend(chunk)
async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes:
"""
Read from the stream until the delimiter is found or max_bytes have been read.
:param delimiter: the marker to look for in the stream
:param max_bytes: maximum number of bytes that will be read before raising
:exc:`~anyio.DelimiterNotFound`
:return: the bytes read (not including the delimiter)
:raises ~anyio.IncompleteRead: if the stream was closed before the delimiter
was found
:raises ~anyio.DelimiterNotFound: if the delimiter is not found within the
bytes read up to the maximum allowed
"""
delimiter_size = len(delimiter)
offset = 0
while True:
# Check if the delimiter can be found in the current buffer
index = self._buffer.find(delimiter, offset)
if index >= 0:
found = self._buffer[:index]
del self._buffer[: index + len(delimiter) :]
return bytes(found)
# Check if the buffer is already at or over the limit
if len(self._buffer) >= max_bytes:
raise DelimiterNotFound(max_bytes)
# Read more data into the buffer from the socket
try:
data = await self.receive_stream.receive()
except EndOfStream as exc:
raise IncompleteRead from exc
# Move the offset forward and add the new data to the buffer
offset = max(len(self._buffer) - delimiter_size + 1, 0)
self._buffer.extend(data)
| true |
2f06182b39e06d28e9a336f3a9f3b631c483b843 | Python | AreebaShakir/Initial-Tasks | /task3.py | UTF-8 | 893 | 3.015625 | 3 | [] | no_license | from flask import Flask, request, jsonify
import operator
app = Flask(__name__)
def reverse(func):
def inner():
data = request.get_json()
op = data['op']
inverse = {"+": "-",
"-": "+",
"*":"/",
"/":"*"}
data['op'] = inverse[op]
result = func()
return result
return inner
@app.route('/task3', methods = ['POST'])
@reverse #decorated
def calculator():
data = request.get_json()
op1 = data['op1']
op2 = data['op2']
op = data['op']
ops = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv
}
op_func = ops[op]
result = op_func(op1, op2)
return jsonify('result: ',result)
if __name__ == '__main__':
app.run(debug=True, port=5001) | true |
619e8b2343643c93e5eb255b24b575aaae8b8deb | Python | antrad1978/bigdata | /PySpark/json1.py | UTF-8 | 669 | 2.953125 | 3 | [] | no_license | # Spark
from pyspark import SparkContext
# Spark Streaming
from pyspark.streaming import StreamingContext
# Kafka
from pyspark.streaming.kafka import KafkaUtils
# json parsing
import json
sc = SparkContext(appName="json")
sc.setLogLevel("WARN")
import json
def jsonParse(dataLine):
parsedDict = json.loads(dataLine)
valueData = parsedDict.values()
return(valueData)
jsonData = '{"Time":"6AM", "Temperature":15}'
jsonParsedData = jsonParse(jsonData)
tempData = sc.textFile("sample.json",4)
tempData.take(4)
def createJSON(data):
dataDict = {}
dataDict['Name'] = data[0]
dataDict['Age'] = data[1]
return(json.dumps(dataDict))
| true |
549e190b1aea9c2f5968a24739e2296afda06d1b | Python | PPodhorodecki/Prework | /02_Typy_danych_w_Pythonie/Zadanie_1-Typy_danych/task.py | UTF-8 | 373 | 3.625 | 4 | [] | no_license | calkowita=5
rzeczywista=3.14
tekst="Python"
tak=True
result="Zmienna {} ma wartość {}".format("całkowita", calkowita)
print(result)
result="Zmienna {} ma wartość {}".format("rzeczywista", rzeczywista)
print(result)
result="Zmienna {} ma wartość {}".format("tekstowa", tekst)
print(result)
result="Zmienna {} ma wartość {}".format("logiczna", tak)
print(result) | true |
42db02aefdf3f91c213a6ec556e61b25e9bb5b2e | Python | FreulonManon/fakenews | /arc.py | UTF-8 | 2,554 | 2.734375 | 3 | [] | no_license | import arcpy
import json
import tweepy
import time
import csv
from tweepy.streaming import StreamListener
#Enter Twitter API Key information obtenu en créant une api twitter
consumer_key = ''
consumer_secret = ''
access_token = ''
access_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
# céation d'un fichier csv pour stocker les données colléctées
with open("D:/VIGNERON/Output.csv", "w") as csvfile:
file = csv.writer(csvfile, delimiter=';',quotechar='|', quoting=csv.QUOTE_MINIMAL)
file.writerow(['Nom_utilisateur'] + ['Date'] + ['Evenement'] + ['Source'] + ['Commune'] + ['Insee'] +['Localisation'] + ['Contenu'] + ['X'] + ['Y'])
data_list = []
count = 0
# entrez votre mot clé désiré
motcle = "Paris"
class StdOutListener(StreamListener):
def on_status(self, status):
json_data = status._json
global count
# compteur à modifier selon votre désire
if count <= 10 :
coord = json_data["coordinates"]
# affichage des tweets seulement si ils possèdent une localisation
if coord != None:
user = status.user.name.encode('ascii', 'ignore').decode('ascii')
print ("Nom d'utilisateur : " + user)
date = str(status.created_at)
print("Date de publication : " + str(status.created_at))
evenement = "Innondation"
print ("Evenement : " + evenement)
source = "twitter"
print ("source : " + source )
commune = "Null"
print ("Nom de la commune : " + commune)
insee = "Null"
print ("Code Insee commune : " + insee)
localisation = "Null"
print ("localisation : " + localisation)
contenu = status.text.encode('ascii', 'ignore').decode('ascii')
print("Tweet text: " + contenu)
lon = coord["coordinates"][0]
lat = coord["coordinates"][1]
print ("Longitude : " + str(lon))
print ("Latitude : " + str(lat))
#écriture des infos dans le fichier de sortie
file = csv.writer(open("D:/VIGNERON/Output.csv", "a"), csvfile, delimiter=';' , quotechar='|' , quoting=csv.QUOTE_MINIMAL )
file.writerow([user]+[date]+[evenement]+[source]+[commune]+[insee]+[localisation]+[contenu]+[lon]+[lat])
count += 1
print count
return True
else :
return False
file.close()
def on_error(self, status_code):
print('Got an error with status code: ' + str(status_code))
return True # To continue listening
def on_timeout(self):
print('Timeout...')
return True # To continue listening
#connexion au flux twitter
listener = StdOutListener()
stream = tweepy.Stream(auth, StdOutListener())
stream.filter(track=[motcle])
| true |
caa06eb951cd2e06db4c402db3a79fda9d27b8e7 | Python | csc522nbagroup/playoffsandsalary | /all 2.py | UTF-8 | 2,492 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 9 12:39:30 2018
@author: shaohanwang
"""
import pandas as pd
import numpy as np
train_data=pd.read_csv('all.csv')
train_data['Playoffs'] = train_data['Playoffs'].map({'Y': 1, 'N': 0})
train_data=train_data.dropna(axis='columns')
x=train_data.iloc[:,6:-2]
y= train_data.iloc[:,-1]
from keras.layers import Dropout
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=0)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
x_train=sc.fit_transform(x_train)
x_test=sc.transform(x_test)
input_dim = np.size(x_train, 1)
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.advanced_activations import LeakyReLU
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential
from keras.layers import Dense
classifier = Sequential()
classifier.add(Dense(output_dim=42,init='uniform',activation='relu',input_dim=input_dim))
classifier.add(Dense(output_dim=42,init='uniform',activation='relu'))
classifier.add(Dense(output_dim=1,init='uniform',activation='sigmoid'))
classifier.compile(optimizer='adam',loss='mse',metrics=['accuracy'])
classifier.fit(x_train,y_train,batch_size=10,epochs=100)
y_pre=classifier.predict(x_test)
y_pre=(y_pre>0.5)
from sklearn.metrics import confusion_matrix, classification_report
cm=confusion_matrix(y_test,y_pre)
print(classification_report(y_test, y_pre))
#from keras.wrappers.scikit_learn import KerasClassifier
#from sklearn.model_selection import GridSearchCV
#from keras.models import Sequential
#from keras.layers import Dense
#
#def build_classifier():
# classifier = Sequential()
# classifier.add(Dense(output_dim=42,init='uniform',activation='relu',input_dim=input_dim))
#
# classifier.add(Dense(output_dim=42,init='uniform',activation='relu'))
# classifier.add(Dense(output_dim=1,init='uniform',activation='sigmoid'))
# classifier.compile(optimizer='adam',loss='mse',metrics=['accuracy'])
# return classifier
#
#classifier=KerasClassifier(build_fn=build_classifier)
#parameters = {'batch_size':[25,32], 'epochs':[100,300]}
#
#grid_search=GridSearchCV(estimator=classifier, param_grid=parameters,scoring='accuracy',cv=10)
#grid_search=grid_search.fit(x_train,y_train)
#bset_parameters=grid_search.best_params_
#best_acc=grid_search.best_score_
| true |
96be8996f29fb4f34eb3e423e662cad74d5c17d7 | Python | AtharvaBhagat/email-using-python | /sendEmailUsingVoice.py | UTF-8 | 1,621 | 2.84375 | 3 | [] | no_license | # pip install pyttsx3
# pip install SpeechRecognition
import pyttsx3
import speech_recognition as rec
import smtplib
from email.message import EmailMessage
from win10toast import ToastNotifier
notify = ToastNotifier()
listener = rec.Recognizer()
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
def talk(text: str):
engine.say(text)
engine.runAndWait()
def hearYou(time=3):
with rec.Microphone() as source:
voice = listener.record(source, time)
command = listener.recognize_google(voice)
low = command.lower()
return low
def sendAnEmail():
talk("Whom to Send")
friend = hearYou()
talk("What is the subject")
subject = hearYou(5)
talk("What is the message")
message = hearYou(5)
# Initialize your emails dictionary
emails = {"person": 'email-id',
"many_people": 'email_id1, email_id2, so on'}
receiver = emails[friend]
notify.show_toast("Emails by AtharvaBhagat",
"Successfully Sent Email to :" + receiver, threaded=True)
talk(f'Successfully sent your email to {receiver}')
send(subject, receiver, message)
def send(subject, friend, message):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login('your-email-id', 'your-password')
email = EmailMessage()
email['From'] = 'your-email-id'
email['To'] = friend
email['Subject'] = subject
email.set_content(message)
server.send_message(email)
sendAnEmail()
| true |
2ac918164fb69f90dd53256157aa2253c55960bc | Python | soareswallace/mit-deep-learning | /tutorial_mnist/plot_images.py | UTF-8 | 275 | 2.9375 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
def plot_images(features, labels):
plt.figure(figsize=(10,2))
for i in range(5):
plt.subplot(1, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(features[i], cmap=plt.cm.binary)
plt.xlabel(labels[i])
plt.show() | true |
542959fbd1899f6b9f0120d07e49f075a1aecc9b | Python | StrikeR2018/TDD | /question1/third_test/test_fizzbuzz.py | UTF-8 | 827 | 3.046875 | 3 | [] | no_license | import unittest
import fizzbuzz
class testCase(unittest.TestCase):
def test_case_1(self):
self.assertEqual(fizzbuzz.fizzBuzz(3), "Fizz")
def test_case_2(self):
self.assertEqual(fizzbuzz.fizzBuzz(5), "Buzz")
def test_case_3(self):
self.assertEqual(fizzbuzz.fizzBuzz(15), "not multiple of 3, 5, 15")
def test_case_4(self):
self.assertEqual(fizzbuzz.fizzBuzz(30), "FizzBuzz")
def test_case_5(self):
self.assertEqual(fizzbuzz.fizzBuzz(27), "Fizz")
def test_case_6(self):
self.assertEqual(fizzbuzz.fizzBuzz(100), "not multiple of 3, 5, 15")
def test_case_7(self):
self.assertEqual(fizzbuzz.fizzBuzz(40), "Buzz")
def test_case_8(self):
self.assertEqual(fizzbuzz.fizzBuzz(99), "FizzBuzz")
if __name__ == "__main__":
unittest.main() | true |
b98482805d6241de565adfe11fe620589ab15bea | Python | BorisMs55/django-handy | /django_handy/url.py | UTF-8 | 1,247 | 2.6875 | 3 | [
"MIT"
] | permissive | from urllib.parse import parse_qs, urlencode, urlsplit, urlunsplit
def simple_urljoin(*parts, append_slash=False):
"""Normalize url parts and join them with a slash."""
parts = list(map(str, parts))
schemes, netlocs, paths, queries, fragments = zip(*(urlsplit(part) for part in parts))
scheme = _last(schemes)
netloc = _last(netlocs)
paths = [x for x in paths if x]
if scheme and not netloc and paths:
netloc, *paths = paths
path = '/'.join((x.strip('/') for x in paths if x.strip('/')))
if paths and parts[0].startswith('/'):
path = '/' + path
if append_slash or (paths and parts[-1].endswith('/')):
path += '/'
query = _last(queries)
fragment = _last(fragments)
return urlunsplit((scheme, netloc, path, query, fragment))
def _last(sequence, default=''):
not_empty = [x for x in sequence if x]
return not_empty[-1] if not_empty else default
def add_query(url: str, **params: str):
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params.update(params)
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
| true |
74c78bc28df423b759eb70b1ecad935248610bc1 | Python | Uthreloss/hearts_pepper | /scripts/make_map.py | UTF-8 | 2,191 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
from pepper_controller import PepperController
import numpy
# pip install pillow
try:
from PIL import Image
except ImportError:
import Image
robotIP = "westey.local" #Stevey
PORT = 9559
class MakeMap(PepperController):
def startingVariables(self):
## Verbal confirmation it's starting
self.say("boop")
## Turn of auto-interaction features
# self.lifeProxy.setState("solitary")
self.lifeProxy.setState("safeguard")
## Set how close Pepper is allowed to get to obstacles
self.motionProxy.setTangentialSecurityDistance(0.01)
self.motionProxy.setOrthogonalSecurityDistance(0.05)
def explore(self,r):
## SLAM in a radius of r metres
self.say("I'm going to have a look around.")
ret = self.navigationProxy.explore(r)
if ret != 0:
print "Exploration failed :("
self.say("Oops, something went wrong. Sorry!")
else:
print "Exploration success!"
self.say("I'm done exploring!")
## Save the map ##
# TODO write the path to a file for later use?
path = self.navigationProxy.saveExploration()
print "saved at: " + path
## start the localization routine so the Pepper can navigate
self.navigationProxy.startLocalization()
print "Started localization"
## Gets the generated map from the robot and displays it on the screen ##
arrMap = self.navigationProxy.getMetricalMap()
map_width = arrMap[1]
map_height = arrMap[2]
img = numpy.array(arrMap[4]).reshape(map_width, map_height)
img = (100 - img) * 2.55 # from 0..100 to 255..0
img = numpy.array(img, numpy.uint8)
Image.frombuffer('L', (map_width, map_height), img, 'raw', 'L', 0, 1).show()
print "Returning to origin"
self.say("I'm heading back to the origin.")
ret = self.goHere(0,0,0)
print ret
if __name__ == '__main__':
task = MakeMap(robotIP, PORT)
task.explore(4)
#task.goHere(1,-1,0)
task.say('Remember to update the map path in reload_map.py so you don\'t load an old map!')
| true |
d571080961ba901443d8276cf27c5e64000dab00 | Python | LancerEnk/softwareTesting_Work3 | /Code_ForModified/Task9/wrong_5_032.py | UTF-8 | 601 | 3.984375 | 4 | [] | no_license | # Task 9: wrong_5_032
# 错误原因:使用replace()函数对首字母进行处理时,只是将替换后的首字母赋给了b,但没有给b增加非首字母的字符串,因此应该在后续为b补上a的后续字母。
# 修改方法:为b赋予a的后续字符串,使用python中string的截取方法。
def fun(input):
a = input
if a[0].isupper() == True:
return(a)
elif a[0].isupper() == False:
b = a[0].replace(a[0],a[0].upper(),1)
b+=a[1:]
return(b)
# 获取输入数值时的代码段
# 输入格式:"konjac"
str1=eval(input())
print(fun(str1))
# wrong_5_032 end
| true |
5b6514e1053ccaeecb2adf26937b71e2a674f430 | Python | maliciousgroup/BugBountyConsole | /src/core/command/ExitCommand.py | UTF-8 | 585 | 2.546875 | 3 | [] | no_license | import asyncio
from src.core.command.base.BaseCommand import BaseCommand
class ExitCommand(BaseCommand):
helper: dict = {
'name': 'exit',
'help': 'This command will gracefully exit the application',
'usage': 'exit'
}
def __init__(self, command: str, print_queue: asyncio.Queue):
super().__init__()
self.command: str = command
self.print_queue: asyncio.Queue = print_queue
async def main(self) -> None:
await self.execute()
async def execute(self) -> None:
raise EOFError
| true |
dc43503080966efb3cc6ee909b8a2eb569c589ba | Python | zhongsangyang/PythonSimpleTest | /testPython/com/ht/pachogn/testnew.py | UTF-8 | 4,170 | 2.859375 | 3 | [] | no_license | # coding=utf-8
import urllib
import json
from urllib import request
import re
import os
class CrawlOptAnalysis(object):
def __init__(self,search_word="美女"):
self.search_word = search_word
self.headers={
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.100 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
'Host': 'www.toutiao.com',
'Referer': 'http://www.toutiao.com/search/?keyword={0}'.format(urllib.parse.quote(self.search_word)),
'Accept': 'application/json, text/javascript',
}
def _crawl_data(self,offset):
# 模拟依据传入 offset 进行分段式上拉加载更多 item 数据爬取
url = 'http://www.toutiao.com/search_content/?offset={0}&format=json&keyword={1}&autoload=true&count=20&cur_tab=1'.format(offset, urllib.parse.quote(self.search_word))
# print(url)
try:
with request.urlopen(url, timeout=10) as response:
content=response.read();
print(content);
except Exception as e:
content=None;
print("Exception:",str(e));
return content;
def _parseData(self,content):
'''
解析每次上拉加载更多爬取的 item 数据及每个 item 点进去详情页所有大图下载链接
[
{'article_title':XXX, 'article_image_detail':['url1', 'url2', 'url3']},
{'article_title':XXX, 'article_image_detail':['url1', 'url2', 'url3']}
]
'''
if content is None:
return None;
try:
data_list=json.loads(content)['data'];
#print(data_list);
print(data_list);
result_list = list()
for item in data_list:
#result_dict={'article_title':item['title']};
result_dict = {'article_title': item['title']}
url_list = list();
for url in item['image_detail']:
url_list.append(url['url']);
result_dic={'article_image_detail':url_list};
result_list.append(result_dic);
except Exception as e:
print('parse data exception.'+str(e))
return result_list
def save_file(self,page_title,url):
'''把爬取的所有大图下载下来
下载目录为./output/search_word/page_title/image_file
#For Windows File filter: '/\:*?"<>|'
'''
if url is None or page_title is None:
print('save picture params is None!')
return
reg_str1 = r"[\/\\\:\*\?\"\<\>\|]"
reg_str = r"[/\:*?<>|]"
page_title = re.sub(reg_str, "", page_title);
save_dir='./output/{0}{1}/'.format(self.search_word,page_title)
if os.path.exists(save_dir) is False:
os.makedirs(save_dir)
save_file=save_dir+url.split('/')[-1]+'.png';
if os.path.exists(save_dir):
return
try:
with request.urlopen(url,30) as response,open(save_file,"wb") as f_save:
f_save.write(response.read());
print('Image is saved! search_word={0}, page_title={1}, save_file={2}'.format(self.search_word, page_title, save_file))
except Exception as e:
print("can't save this file",str(e));
def go(self):
offset=0;
while True:
page_list=self._parseData(self._crawl_data(offset));
if page_list is None or len(page_list)<=0:
break
try:
print(page_list);
for page in page_list:
artice_title=page['article_title'];
for img in page['article_image_detail']:
self._save_picture(artice_title, img)
except Exception as e:
print("go Exception",str(e));
finally:
offset+=20;
tt=CrawlOptAnalysis('美女');
tt.go();
| true |
b829943cbe955cb595d080f8811fb2cf2b289a7a | Python | sumsar01/Lattice-gauge-theory | /Spin_1_fields/Projector_advanced.py | UTF-8 | 2,480 | 2.578125 | 3 | [] | no_license | from Gauss_law_advanced import *
from Storage import *
from qutip import *
from itertools import product
import itertools as itertools
import numpy as np
###############################################################################
# Making projection
###############################################################################
def make_projection(x, y):
Nm = (x*y)*4
G_n = [0]*x*y
save_dir = '/home/rasmus/Desktop/Uni/Speciale/Program/LGT/Data/Lattice_Projection/'
filename = 'good_stateReps_N=' + str(Nm) + '_x=' + str(x) + '_y=' +str(y)
if os.path.isfile(save_dir + filename + '.p'):
good_stateReps = load_data(save_dir, filename)
else:
good_stateReps = advanced_projector(x, y)
save_data(good_stateReps, save_dir, filename)
num_good_stateReps = len(good_stateReps)
# Initialize the projector as an array to begin with
projector_array = np.empty([2**(Nm),0])
# For each "good" configuration of spins add the corresponding state vector to the projector
i = 0
starttime = time.time()
while i < num_good_stateReps:
progress = round(i/num_good_stateReps*100)
speed = round(progress/(time.time() - starttime),6)
print('\rMaking projection, progress = ' + str(progress) + r'%, Speed = ' + str(speed) + r'%/s',end='')
projector_array = np.append(projector_array,tensor([basis(2,x) for x in good_stateReps[i]]).full(),axis=1)
i += 1
# Turn the projector into a Qobj with the correct dimensions
projector = Qobj(projector_array)
projector.dims = [[2]*Nm,[num_good_stateReps]]
return projector
def Transform_into_G(x, y, m, a, e, operator, name):
save_dir = '/home/rasmus/Desktop/Uni/Speciale/Program/LGT/Data/Symmetry_transformations/'
filename = 'Gauss_law_x=' + str(x) + '_y=' +str(y)
if os.path.isfile(save_dir + filename + '.p'):
G = load_data(save_dir, filename)
else:
G = make_projection(x, y)
save_data(G, save_dir, filename)
save_dir2 = '/home/rasmus/Desktop/Uni/Speciale/Program/LGT/Data/Projected_operators/'
filename2 = str(name) + '_x=' + str(x) + '_y=' + str(y) + '_m=' + str(m) + '_a=' + str(a) + '_e=' + str(e)
if os.path.isfile(save_dir2 + filename2 + '.p'):
operator = load_data(save_dir2, filename2)
else:
operator = G.dag()*operator*G
save_data(operator, save_dir2, filename2)
return operator
| true |
e924030346aa50175885f0203c7ce0678c2f3dd5 | Python | Scoowy/PythonLuisa | /tareanro1sb_a---expresiones-regulares-lfbermeo-main/src/problem2.py | UTF-8 | 372 | 3.15625 | 3 | [
"MIT"
] | permissive | import re
# Completar la función regex_ayuda para que tome una expresión regular (como una cadena) y
# una cadena a la que desea aplicar la expresión regular.
# La función debe devolver una lista con todas las apariciones del patrón en la cadena.
def regex_ayuda(patron, cadena_entrada):
pattern = re.compile(patron)
return pattern.findall(cadena_entrada)
| true |
c762eb94b2994f9ca2566fb73bba5b0f176d1fe1 | Python | Nivek-Stack/CPS3320 | /Project2/WordFinder.py | UTF-8 | 881 | 4.15625 | 4 | [] | no_license | from dictionary import *
dictionary = Dictionary()
# pip install dictionary
# words.txt OR A.txt
words = [] # Empty List that will store everything from the word file.
new_words = [] # Empty List to make Strings later on.
f = open('words.txt', 'r') # Opens the File in read mode only.
words = f.read().splitlines() # Stores everything into words.
f.close() # Closes the File.
new_words = str(words) # Converts everything in words to a String and is stored in new_words.
for i in range(len(words)): # For Loop the length of the words List.
new_words = words.pop() # Pops the words from the words List, and stores them into the new_words List.
if dictionary.isInDictionary(word=new_words): # If the word is in the Dictionary Library...
print(new_words.capitalize()) # Print the word to the user. .capitalize() capitalizes the first letter in the word.
| true |
44d7f596e9fad288916c6c4b9ad87bb858eec3d3 | Python | leandrogpv/chatbot-whats | /bot_whats.py | UTF-8 | 6,036 | 2.796875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from time import sleep
from selenium.webdriver.common.keys import Keys
'''
Bugs identificados a serem corrigios:
*_Caso houver mensagens diferentes do mesmo remetente que chegaram no mesmo
minuto ele nao considera como duas mensagens e acaba nao lendo a ultima
*_Caso nao haja mensagens novas o bot ficara esperando na tela inicial,
porem caso o usuario click em um chat qualquer o bot ira ler a ultima
mensagem da conversa e caso esta seja diferente da que esta no arquivo
de log ele começara uma conversa com o usuario clicado
'''
#############################################################################
#Classe Whatsapp
class WhatsappBot:
def enviarmensagens(self, mensagem, browser):
self.mensagem = mensagem
self.browser = browser#.find_element_by_xpath('./../../../..')
barra_envio = self.browser.find_element_by_class_name('_1SEwr')
chat_box = barra_envio.find_element_by_class_name('_13NKt')
#chat_box = self.browser.find_element_by_class_name('_13mgZ')
sleep(0.5)
chat_box.click()
chat_box.send_keys(self.mensagem)
botao_enviar = barra_envio.find_element_by_class_name('_4sWnG')
#"//span[@data-icon='send']")
sleep(0.5)
botao_enviar.click()
def pegar_novas_mensagens(self, browser):
mensagem = ''
pegardono = ''
ultimamensagem = ''
mensagensclicado = 0 #verifica se o chat selecionado no momento possui
#novas mensagens
#pegando lista de chats lateral esquerda
listaremetentes = browser.find_elements_by_class_name('_3m_Xw')
remetente_arquivo = ''
horaultima_arquivo = ''
mensagem_arquivo = ''
#pegando cada remetente e checando quais tem novas mensagens
for item in listaremetentes:
mensagensclicado = 0
msgnova = ''
remetente = ''
horaultima = ''
#pegando o remetente da conversa
remetente = item.find_element_by_class_name('zoWT4').text
#pegando a hora da ultima conversa
horaultima = item.find_element_by_class_name('_1i_wG').text
#variavel temporaria para armazenar o componente pai do numero de mensagens
temp = item.find_element_by_class_name('_37FrU')
#pegando o numero de novas mensagens na notificaçao
msgnova = temp.find_element_by_class_name('_1i_wG').text
try:
msgnova = int(msgnova)
except:
msgnova = 0
if msgnova > 0:
#clicando no chat que possui nova mensagem
item.find_element_by_class_name('zoWT4').click()
#aguardando alguns segundos para as mensagens serem carregadas
#time.sleep(2)
#subindo niveis no DOM para acessar todas as mensagens do chat
#selecionado acima com click
conversas = item.find_element_by_xpath('./../../../../../../..')
#pegando as conversas do remetente selecionado
conversas = conversas.find_elements_by_xpath("//div[@class='y8WcF']")
sleep(1)
for conversa in conversas:
mensagemteste = conversa.text.split('\n')
mensagem = str(mensagemteste[-2:-1]).strip('[]')
#adicionando a ultima mensagem a ser gravada no arquivo
remetente_arquivo = remetente
horaultima_arquivo = horaultima
mensagem_arquivo = mensagem
print(f'{remetente} enviou a seguinte mensagem {mensagem}')
else:
mensagensclicado = 1
if mensagensclicado == 1:
file = open('mensagenswhats.txt', 'r')
ultimamensagem = file.readline().split(',')
#tratando quando o arquivo de logs de mensagens esta vazio
#caso vazio nao sera lido pois possivelmente e a primeira vez
#que o script esta sendo executado
if len(ultimamensagem) > 1:
conversas = item.find_element_by_xpath('./../../../../../../..')
conversas = conversas.find_elements_by_xpath("//div[@class='y8WcF']")
for conversa in conversas:
pegardono = ''
#pegardono = conversa.find_element_by_xpath("//div[@class='_2F01v']")
pegardono = conversa.get_attribute('innerHTML').split('class="_2wUmf')
mensagemfull = conversa.text.split('\n')
mensagem = str(mensagemfull[-2:-1]).strip('[]')
if mensagem != ultimamensagem[2] and remetente == ultimamensagem[0]:
#print(pegardono[-1:])
if '<div class="_2F01v">' in str(pegardono[-1:]):
mensagem = ''
else:
#adicionando a ultima mensagem a ser gravada no arquivo
remetente_arquivo = remetente
horaultima_arquivo = horaultima
mensagem_arquivo = mensagem
print(f'{remetente} enviou a seguinte mensagem {mensagem} estou aqui')
else:
mensagem = ''
print('Não há novas mensagens1')
else:
mensagem = ''
print('Não há novas mensagens2')
file.close()
if remetente_arquivo != '':
file = open('mensagenswhats.txt', 'w')
file.write(f"{remetente_arquivo},{horaultima_arquivo},{mensagem_arquivo}")
file.close()
return mensagem
| true |
664fac06d8c70d7fdc08bb21d3eb88db5bce74c3 | Python | chandlersupple/Color-Pass-Filter | /ColorPassFilter.py | UTF-8 | 2,876 | 3.234375 | 3 | [
"MIT"
] | permissive | # Chandler Supple, 6/2/2018
# The algorithm may be unresponsive for a few seconds after having initialized depending on the file size.
# To add, some '.jpg' images may not work due to the PIL library.
import io
import pygame
from PIL import Image
from urllib2 import urlopen
url = raw_input('Image Url (png, jpg): ')
color_scale = raw_input('Color Pass Filter (red, blue, green, grey, inverted): ')
url_open = urlopen(url).read()
img = io.BytesIO(url_open)
img_open = Image.open(img)
pix_val = list(img_open.getdata())
pix = img_open.load()
dimensions = img_open.size
try:
pygame.init()
master = pygame.display.set_mode((dimensions[0], dimensions[1]))
pygame.display.set_caption('Color Pass Filter')
clock = pygame.time.Clock()
master.fill((0, 0, 0))
quit = 0
if (color_scale == 'grey'):
pix_num = 0
for y in range (0, dimensions[1]):
if (pix_num >= len(pix_val) - 1):
break
for x in range(0, dimensions[0]):
listed_rbg = [pix[x,y][0], pix[x,y][1], pix[x,y][2]]
color = (sorted(listed_rbg)[2], sorted(listed_rbg)[2], sorted(listed_rbg)[2])
pygame.draw.rect(master, color, (x, y, 1, 1), 0)
if (color_scale == 'red'):
pix_num = 0
for y in range (0, dimensions[1]):
if (pix_num >= len(pix_val) - 1):
break
for x in range(0, dimensions[0]):
color = (pix[x,y][0], 0, 0)
pygame.draw.rect(master, color, (x, y, 1, 1), 0)
if (color_scale == 'green'):
pix_num = 0
for y in range (0, dimensions[1]):
if (pix_num >= len(pix_val) - 1):
break
for x in range(0, dimensions[0]):
color = (0, pix[x,y][1], 0)
pygame.draw.rect(master, color, (x, y, 1, 1), 0)
if (color_scale == 'blue'):
pix_num = 0
for y in range (0, dimensions[1]):
if (pix_num >= len(pix_val) - 1):
break
for x in range(0, dimensions[0]):
color = (0, 0, pix[x,y][2])
pygame.draw.rect(master, color, (x, y, 1, 1), 0)
if (color_scale == 'inverted'):
pix_num = 0
for y in range (0, dimensions[1]):
if (pix_num >= len(pix_val) - 1):
break
for x in range(0, dimensions[0]):
color = (255 - pix[x,y][0], 255 - pix[x,y][1], 255 - pix[x,y][1])
pygame.draw.rect(master, color, (x, y, 1, 1), 0)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit = 1
pygame.quit()
pygame.display.flip()
clock.tick(60)
except:
if (quit != 1):
print("Sorry, but an error occured. It's likely that the image you inputted was invalid.")
| true |
c8fec20fce52112171d2941c2bc50bb41483bf07 | Python | Charleezy/WinrateForFamiliarChamps | /api_wrapper/api_wrapper.py | UTF-8 | 6,318 | 2.84375 | 3 | [] | no_license | from urllib.parse import urlencode
import urllib.request
from urllib.error import HTTPError
from API_KEY import API_KEY
import time
import logging
import json
import threading
import queue
SHORT_TIME_LIMIT = 11
SHORT_MAX_REQUESTS = 10
LONG_TIME_LIMIT = 605
LONG_MAX_REQUESTS = 500
MAX_REQUEST_HTTP_CODE = 429
HIGHEST_PRIORITY = 0
NUM_THREADS = 10
BASE_URL = 'https://na.api.pvp.net/api/lol/na'
class MaxReqsException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ApiWrapper(object):
"""
Contains a priority queue called self.request_queue. Excepts items to be
pushed onto this queue of the form (request_priority(int), url, callback)
Once the request is completed it pushes (data, callback) onto self.result_queue.
Items are expected to be pulled off this queue.
"""
def __init__(self):
self.lock = threading.Lock()
self.request_queue = queue.PriorityQueue()
self.result_queue = queue.Queue()
# Stores times of requests made with the last SHORT_TIME_LIMIT seconds
self.short_limit = [time.time(), 0]
# Similar to above except for LONG_TIME_LIMIT
self.long_limit = [time.time(), 0]
for i in range(NUM_THREADS):
t = threading.Thread(target=self.worker_thread)
t.daemon = True
t.start()
def _constraint_check(self):
""" Does all of its checks/updates with the class wide lock...so only
one thread will be accessing the API at a time. There is no persistance
between runs so this won't account for runs in the last
10 minutes that were stopped.
"""
with self.lock:
now = time.time()
if now - self.long_limit[0] > LONG_TIME_LIMIT:
# reset the long time limit and counter if we've crossed it
self.long_limit = [now, 0]
elif self.long_limit[1] == LONG_MAX_REQUESTS:
timeleft = LONG_TIME_LIMIT - (now - self.long_limit[0])
logging.info('LONG LIMIT REACHED, wait {0} seconds'.format(timeleft, ))
return timeleft
# This is pretty much repeating what the above checks do except for
# the short time limit, should probably refactor at some point.
if now - self.short_limit[0] > SHORT_TIME_LIMIT:
# reset the short time limit and counter
self.short_limit = [now, 0]
elif self.short_limit[1] == SHORT_MAX_REQUESTS:
timeleft = SHORT_TIME_LIMIT - (now - self.short_limit[0])
logging.info('SHORT LIMIT REACHED, wait {0} seconds'.format(timeleft, ))
return timeleft
self.long_limit[1] += 1
self.short_limit[1] += 1
return 0
def worker_thread(self):
""" Grabs a request off the queue, sleeps if its hitting the rate
limit and returns. If the RIOT API returns a 429 code
(too many request within the alotted time..although this should rarely
happen since the call to _constraint_check should handle the waiting)
the thread will just put the request back on the queue to be serviced by
another thread.
"""
while True:
# throwaway the first argument...its just the priority
_, url, callback = self.request_queue.get()
while True:
timeleft = self._constraint_check()
if timeleft > 0:
time.sleep(timeleft)
else:
break
try:
data = self.issue_api_call(url)
except MaxReqsException:
# If we've gotten this exception then our rate limiting hasn't
# worked for some reason. Put the request back on the queue
# with a high priority to get serviced
self.request_queue.put((HIGHEST_PRIORITY, url, callback))
else:
# Sucess case, put it on the result queue
# Sleep so we aren't too agressive in case we are breaking
# the longer time constraint because of a previous run.
time.sleep(10)
self.result_queue.put((data, callback))
finally:
self.request_queue.task_done()
@staticmethod
def issue_api_call(url):
try:
f = urllib.request.urlopen(url)
except HTTPError as e:
if e.code == 429:
logging.warning('Exceeded threshold, will retry')
raise MaxReqsException(e.code)
raise
data = json.loads(f.read().decode('utf-8'))
return data
def api_call(self, url, priority=10, callback=None):
""" If callback is specified the call will be added to a priority
queue with the highest priority being HIGHEST_PRIORITY. When
it is completed it will be pushed onto self.result_queue and can
popped off at any time.
If callback is not specified this funtion will be block until it
it gets a response and will then return the data
The rate limiting logic is able to handle both types of queries
in conjunction.
"""
if callback is not None:
priority = max(HIGHEST_PRIORITY, priority)
self.request_queue.put((priority, url, callback))
return None
while True:
timeleft = self._constraint_check()
if timeleft > 0:
time.sleep(timeleft)
else:
break
try:
data = self.issue_api_call(url)
except MaxReqsException:
# make another call if we've timed out for some reason
return self.api_call(url)
return data
@staticmethod
def game_by_summoner(sid):
parms = urlencode({'api_key': API_KEY})
return '{0}/v1.3/game/by-summoner/{1}/recent?{2}'.format(BASE_URL, sid, parms)
if __name__ == '__main__':
for _ in range(20):
print(ApiWrapper.issue_api_call('https://na.api.pvp.net/api/lol/na/v1.2/champion/102?api_key={0}'.format(API_KEY, )))
| true |
387c054696079f389983c3bfdae887a398fe4b05 | Python | brenda151295/scripts_AmazonBooks | /insertTableSalesRank.py | UTF-8 | 1,752 | 2.734375 | 3 | [] | no_license | # Importing MongoClient.
from pymongo import MongoClient
# Importing MySQL Connector.
import mysql.connector
import pymysql
# Connecting to MySQL.
mysql_conn = mysql.connector.connect(user='root', password='1234', port="3306", host='127.0.0.1', database='books_dataset')
# Geting the product details from 'asin'.
def getRankingAsin(asin):
client = MongoClient()
db = client.Amazon
dbFilter = {"asin": asin, "title": {'$exists': True}, "salesRank.Books": {'$exists': True}}
fields = {"asin": 1, "title": 1, "salesRank.Books": 1, "price": 1}
try:
for item in db.books_only.find(dbFilter, fields).limit(1):
return item
except:
try:
for item in db.metadata.find(dbFilter, fields).limit(1):
return item
except:
return None
finally:
client.close()
def getBooksAsin():
sql = "SELECT asin from nodes;"
cursor = mysql_conn.cursor()
cursor.execute(sql)
result = cursor.fetchall()
output = []
for i in result:
output.append(i[0])
return output
def insert():
books = getBooksAsin()
for asin in books:
item = getRankingAsin(asin)
if item != None:
asin = item['asin']
title = item['title']
salesRank = item['salesRank']['Books']
try:
price = item['price']
except:
price = 0
sql = "INSERT INTO books_salesRank (asin,title,salesRank, price) " \
"VALUES ('"+ asin +"','"+ pymysql.escape_string(title)+"'," +str(salesRank) + "," + str(price)+")"
cursor = mysql_conn.cursor()
cursor.execute(sql)
mysql_conn.commit()
insert() | true |
a99267e5f9797818aa37abe12a12462b266fea13 | Python | spuddie1984/Python3-Basics-Book-My-Solutions | /Graphical User Interfaces/review_exercises_geometry_manager.py | UTF-8 | 694 | 3.65625 | 4 | [] | no_license | import tkinter as tk
'''
1. Try to re-create all the screenshots in this section without looking
at the source code
'''
window = tk.Tk()
frame1 = tk.Frame(master=window, width=500, height=100, bg="red")
frame1.pack(fill=tk.BOTH, side=tk.LEFT, expand=True)
frame2 = tk.Frame(master=window, width=100, bg="yellow")
frame2.pack(fill=tk.BOTH, side=tk.LEFT, expand=True)
frame3 = tk.Frame(master=window, width=50, bg="blue")
frame3.pack(fill=tk.BOTH, side=tk.LEFT, expand=True)
window.mainloop()
'''
2. Below is an image of a window made with Tkinter (refer to page 592). Try to re-create
the window using the techniques you’ve learned thus far. You may
use any geometry manager you like.
'''
| true |
aa6e9b87ddcd785d96235d43b41c314d9967ab60 | Python | LukeChai/PythonExercise | /python basic/dataStructure.py | UTF-8 | 1,399 | 4.375 | 4 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Filename: dataStructure.py
# 数据结构
# 列表的基本操作
a = [3, 2, 34, 12.3]
print(a)
# 添加一个元素到列表后面
a.append(15)
print(a)
# 查看元素的位置
print(a.index(34))
# 移除一个元素
a.remove(2)
print(a)
# 翻转列表
a.reverse()
print(a)
# 排序
a.sort()
print(a)
# 元组的基本操作,元组的值不会被改变
b = ("a", "b", "c")
print(b)
print(len(b))
# 下表从0开始计算
print(b[1])
# 字典,一种键和值对应的数据结构,键是唯一的
c = {"a" : "aaa", "b" : "bbb", "c" : "ccc"}
print(c)
print(c["a"])
del c["b"]
c["d"] = "ddd"
for name, value in c.items():
print(name, value)
if "d" in c:
print("d is in c")
# 列表,元组和字符串都是序列,序列的特点就是索引操作和切片操作
d = "world"
print(d[0])
print(d[1])
print(d[-1])
e = ["a", "b", "c", "d"]
print(e[:])
print(e[1:2])
print(e[0:-1])
print(e[2:])
# 引用和复制
f = [1, 2, 3, 4]
# 引用,指向相同的内存块
g = f
print(f)
print(g)
del f[0]
print(f)
print(g)
# 复制
h = f[:]
del f[0]
print(f)
print(h)
# 更多字符串操作
i = "world"
if i.startswith("wor"):
print("i starts with 'wor'")
if "o" in i:
print("'o' is in i")
if i.find("ld"):
print("'ld' is found in i")
# 使用分隔符分隔列表的值
delimiter = "_"
j = ["a", "b", "c"]
print(delimiter.join(j))
| true |
92ef8c5b517957114740debdaab86be468889bf8 | Python | jacenfox/psd-tools2 | /src/psd_tools/utils.py | UTF-8 | 2,746 | 2.84375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
import sys
import struct
import array
try:
unichr = unichr
except NameError:
unichr = chr
def unpack(fmt, data):
fmt = str(">" + fmt)
return struct.unpack(fmt, data)
def read_fmt(fmt, fp):
"""
Reads data from ``fp`` according to ``fmt``.
"""
fmt = str(">" + fmt)
fmt_size = struct.calcsize(fmt)
data = fp.read(fmt_size)
assert len(data) == fmt_size, (len(data), fmt_size)
return struct.unpack(fmt, data)
def pad(number, divisor):
if number % divisor:
number = (number // divisor + 1) * divisor
return number
def read_pascal_string(fp, encoding, padding=1):
length = read_fmt("B", fp)[0]
if length == 0:
fp.seek(padding-1, 1)
return ''
res = fp.read(length)
padded_length = pad(length+1, padding) - 1 # -1 accounts for the length byte
fp.seek(padded_length - length, 1)
return res.decode(encoding, 'replace')
def read_unicode_string(fp):
num_chars = read_fmt("I", fp)[0]
data = fp.read(num_chars*2)
chars = be_array_from_bytes("H", data)
return "".join(unichr(num) for num in chars)
def read_be_array(fmt, count, fp):
"""
Reads an array from a file with big-endian data.
"""
arr = array.array(str(fmt))
if hasattr(arr, 'frombytes'):
arr.frombytes(fp.read(count * arr.itemsize))
else:
arr.fromstring(fp.read(count * arr.itemsize))
return fix_byteorder(arr)
def fix_byteorder(arr):
"""
Fixes the byte order of the array (assuming it was read
from a Big Endian data).
"""
if sys.byteorder == 'little':
arr.byteswap()
return arr
def be_array_from_bytes(fmt, data):
"""
Reads an array from bytestring with big-endian data.
"""
arr = array.array(str(fmt), data)
return fix_byteorder(arr)
def trimmed_repr(data, trim_length=30):
if isinstance(data, bytes):
if len(data) > trim_length:
return repr(data[:trim_length] + b' ... =' + str(len(data)).encode('ascii'))
return repr(data)
def synchronize(fp, limit=8):
# This is a hack for the cases where I gave up understanding PSD format.
signature_list = (b'8BIM', b'8B64')
start = fp.tell()
data = fp.read(limit)
for signature in signature_list:
pos = data.find(signature)
if pos != -1:
fp.seek(start+pos)
return True
fp.seek(start)
return False
def decode_fixed_point_32bit(data):
"""
Decodes ``data`` as an unsigned 4-byte fixed-point number.
"""
lo, hi = unpack("2H", data)
# XXX: shouldn't denominator be 2**16 ?
return lo + hi / (2**16 - 1)
| true |
f6187ec82a6e8091edc5f45deface464fdd87115 | Python | AbbyGeek/CodeWars | /8kyu/Calculate Average.py | UTF-8 | 112 | 3.078125 | 3 | [] | no_license | def find_average(array):
if len(array) == 0:
return 0
else:
return sum(array)/len(array) | true |