blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7b62691a1c877d01e69fc89db89745678495039e | Python | ankan17/popper | /cli/popper/commands/cmd_parameters.py | UTF-8 | 2,491 | 2.5625 | 3 | [
"MIT"
] | permissive | import click
import popper.utils as pu
import sys
from popper.cli import pass_context
from popper.exceptions import UsageError
@click.command('parameters', short_help='Parametrize a pipeline pipeline.')
@click.argument('pipeline', required=False)
@click.option(
'--add',
multiple=True,
help='Add a set of environment variables in the form of key=value',
required=False
)
@click.option(
'--rm',
multiple=True,
help='Remove a set of environment variable in the form of key=value',
required=False
)
@pass_context
def cli(ctx, pipeline, add, rm):
"""Define or remove executions of a pipeline."""
if not pipeline:
get_pipe = pu.in_pipeline(name=True)
if get_pipe is not None:
pipeline = get_pipe
else:
pu.fail("This is not a pipeline")
config, pipeline_config = pu.read_config(pipeline)
if add and rm:
raise UsageError("Both add and rm cannot be given at the same time. "
"See popper env-vars --help for more information.")
if add:
env_vars = pipeline_config.get('parameters', [])
vars_add = {}
for var in add:
key, val = var.split('=')
vars_add[key] = val
env_vars.append(vars_add)
pu.update_config(pipeline, parameters=env_vars)
sys.exit(0)
if rm:
env_vars = pipeline_config.get('parameters', None)
if not env_vars:
pu.fail("No parameters defined for this pipeline.")
vars_del = {}
for var in rm:
key, val = var.split('=')
vars_del[key] = val
index = -1
for vars in env_vars:
if len(vars.keys()) != len(vars_del.keys()):
continue
else:
successful = True
for key in vars_del:
if vars[key] != vars_del[key]:
successful = False
if successful:
index = env_vars.index(vars)
break
if index == -1:
pu.fail("Unable to find this parametrization in this pipeline.")
env_vars.pop(index)
pu.update_config(pipeline, parameters=env_vars)
sys.exit(0)
try:
env_vars = pipeline_config['parameters']
if len(env_vars) == 0:
raise KeyError
pu.print_yaml(env_vars)
except KeyError:
pu.info("No parameterization defined for this pipeline.")
| true |
c4571a920987e85e13fba28e7a3eaf9d5c119662 | Python | wolfdale/10MinutesEmailWrapper | /tinker.py | UTF-8 | 157 | 2.625 | 3 | [
"MIT"
] | permissive | //USe this to copy to clipboard
from Tkinter import Tk
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append('i can copy to clipboardz?')
r.destroy()
| true |
33a2b73840a156308c375037b36e3e07bbd52218 | Python | zuzinmeko/Python_lessons | /Chapter1/numError.py | UTF-8 | 231 | 3.125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 3 12:16:58 2021
@author: hp
"""
from math import sinh, exp, e, pi
x = 2*pi
r1 = sinh(x)
r2 = 0.5*(exp(x) - exp(-x))
r3 = 0.5*(e**x - e**(-x))
print('%.16f %.16f %.16f' % (r1,r2,r3)) | true |
900dcfbea1790d43f3c68324dcf3df3afa194321 | Python | WalLee2/holbertonschool-higher_level_programming | /0x04-python-more_data_structures/10-best_score.py | UTF-8 | 145 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python3
def best_score(my_dict):
if not my_dict:
return None
winner = max(my_dict, key=my_dict.get)
return winner
| true |
1da265d06ef1143bdeefcc36b22892e52c39b6f0 | Python | leekyeongm727/pythonProject | /chapter4/4-4.py | UTF-8 | 132 | 2.921875 | 3 | [] | no_license | n,m = map(int, input().split())
x,y,d = map(int, input().split()) #캐릭터가 서 있는 위치 (x,y), 바라보고 있는 방향
| true |
26938f2bd4d9e5b010bffa0251d4041bf6639f72 | Python | MellaLee/hello-vue-django | /backend/algorithm/step/preprocessing.py | UTF-8 | 525 | 2.734375 | 3 | [
"Unlicense"
] | permissive | #暂时无需对数据进行预处理,此文件不需要运行
import pandas as pd
import os
date = '2018.06.04'
csv_file_path = r'D:\GraduationThesis\graduation-code\hello-vue-django\backend/algorithm/download/originData/' + date
def startRun():
os.chdir(csv_file_path)
files = os.listdir(csv_file_path)
for filename in files:
print (filename)
df = pd.read_csv(filename, header=None, encoding='gbk')
print (df.head())
return
if __name__ == '__preprocessing':
startRun() | true |
1dfe1b4973d6a482a4ccc9583213e7b494e3a9c8 | Python | basaima/----------- | /定义函数.py | UTF-8 | 803 | 3.8125 | 4 | [] | no_license | '''def mya(x):
# x=int(input(':>'))
if not isinstance(x,(int,float)):#对参数类型做检查,只允许整数和浮点数类型的参数数据类型检查可以用内置函数isinstance()实现
raise TypeError('bad op type')
#if not isinstance(x, (int, float)):
# raise TypeError('bad operand type')#
if x>=0:
print (x)
return x
else:
print ("-")
return -x
mya(4.35556)
mya(-9)'''
import math
def quadratic(a,b,c):
if not isinstance(a,(int,float)):
raise TypeError("bad operand type")
y=b*b-4*a*c
if b*b-4*a*c>=0:
print ('Y')
x1=(-b+math.sqrt(y))/(2*a)
x2=(-b-math.sqrt(y))/(2*a)
print(x1,x2)
return x1,x2
else :
print ("无解")
quadratic(1,8,3)
quadratic(2,2,4)
| true |
3e638d3e84875ece33777569bdcb102d9173e217 | Python | tiqwab/atcoder | /abc214/c/main.py | UTF-8 | 561 | 2.53125 | 3 | [] | no_license | def main():
N = int(input())
ss = [int(x) for x in input().split(' ')]
ts = [int(x) for x in input().split(' ')]
min_idx = -1
min_t = 1000 * 1000 * 1000 + 10
for i, t in enumerate(ts):
if t < min_t:
min_idx = i
min_t = t
acc = min_t
i = min_idx
ans = [1 << 60] * N
for _ in range(0, N):
# print(i, acc)
ans[i] = min(ts[i], acc)
acc = ans[i] + ss[i]
i = (i + 1) % N
for i in range(0, N):
print(ans[i])
if __name__ == '__main__':
main()
| true |
7f4e29b658017688565963c06b3b2880ca11c58d | Python | ronnyandersson/zignal | /examples/ex_chunks.py | UTF-8 | 2,547 | 3.40625 | 3 | [
"MIT"
] | permissive | '''
Created on 12 Apr 2020
@author: Ronny Andersson (ronny@andersson.tk)
@copyright: (c) 2020 Ronny Andersson
@license: MIT
Demo of how to iterate over an instance of the Audio class, for chunk-based
processing. Typically the chunks have a size that is a power of two, for
example 256, 1024 or 4096. In this example the chunk size is set to 1000
for simplicity in the plots. The sample rate in this example is also set to
a value that enhances the effect of the example, since hera a chunk equals
to one second of data.
'''
# Standard library
import logging
# Third party
import matplotlib.pyplot as plt
import numpy as np
# Internal
import zignal
if __name__ == '__main__':
logging.basicConfig(
format='%(levelname)-7s: %(module)s.%(funcName)-15s %(message)s',
level='DEBUG',
)
logging.getLogger("matplotlib").setLevel(logging.INFO)
logging.getLogger("zignal").setLevel(logging.DEBUG)
fs = 1000
# Create various ramp signals, to visualise the chunks better. Not real
# audio, but shows in a plot what the chunks look like
a1 = zignal.Audio(fs=fs, initialdata=np.linspace(0, 1, num=(1000/2)))
a2 = zignal.Audio(fs=fs, initialdata=np.linspace(0, -1, num=(1000*1)+500))
a3 = zignal.Audio(fs=fs, initialdata=np.linspace(0, 1, num=(1000*2)+200))
a = zignal.Audio(fs=fs)
a.append(a1, a2, a3)
print(a)
# We now have 2.2 seconds of audio in three channels. This does not add up
# to even chunk sizes, so padding will have to be done in order to iterate.
#
# Three (3) chunks are expected.
for val in a.iter_chunks(chunksize=1000):
print("------------------------------------------------")
print("shape of data in chunk: %s" % str(val.shape))
print(val)
plt.figure(1)
plt.plot(val[:, 0], ls="-", label="a1")
plt.plot(val[:, 1], ls="--", label="a2")
plt.plot(val[:, 2], ls="-.", label="a3")
plt.grid()
plt.ylim(-1.1, 1.1)
plt.xlabel("samples in chunk")
plt.ylabel("magnitude [lin]")
plt.legend(loc="upper right")
plt.show()
# We can pad beforehand if we know how many samples are missing, then no
# padding will occur inside the iterator
b = a.copy()
b.gain(-20) # just to get a debug logging entry
b.pad(nofsamples=800)
print(b)
for val in b.iter_chunks(chunksize=1000):
print("------------------------------------------------")
print("shape of data in chunk: %s" % str(val.shape))
print(val)
print('-- Done --')
| true |
e418f6cc0874131a6f6354e7b847fde0281ed10e | Python | lumc-python/functions-bdmvantol | /Functions_Exercise 1.py | UTF-8 | 233 | 3.4375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 27 10:37:28 2019
@author: Bianca
"""
def max(number1, number2):
if number1 > number2:
return(number1)
else:
return(number2)
print(max(13,16))
print(max(25,3)) | true |
424c0377c193a830799020c8a43b8879ad397bc0 | Python | fernandaassi/minicurso-python | /minicurso-Python/aula-2/listas/ex-6.py | UTF-8 | 420 | 4.5 | 4 | [] | no_license | '''
***** Exercício 6 *****
Dada a seguinte lista: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] imprima somente os números pares
dessa lista.
'''
# Cria a lista
lista = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Armazena os números pares na lista "pares"
# Para isso é usado a indexação, começando no index 0, indo até o index 10 e pulando
# de dois em dois
pares = lista[1:10:2]
print("Números pares da lista: {}".format(pares))
| true |
3f6ff09a8bd21d80a95ad5474b8cd6bf76c0cf9b | Python | luisa-brueser/OSESM_1st-Homework | /function.py | UTF-8 | 1,034 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Wed May 6 09:32:28 2020
@author: zwickl-nb
"""
# function from the lecture for testing
def add(a, b):
return a + b
# check whether enough capacities are available
# note: yet not able to include a panda framework to test_validate
# that is why data type int is used in test_validate
def validate(cap, d):
if type(cap) == int and type(d) == int:
capacities = cap
demand = d
else:
capacities = sum(sum(cap.values))
demand = sum(d.values)
if capacities >= demand:
return 1
else:
return 0
# check whether enough renewable energy is available
# in this case: "priority feed" for renewable energy and no optimization is necessary
def only_renewable(cap, d):
if type(cap) == int and type(d) == int:
if cap > d:
return 1
else:
return 0
else:
if cap['wind'][0] >= d.values:
return 1
else:
return 0
| true |
ffd70d93a5121222a71190938f4a85b88f8cd8a1 | Python | shengng325/LeetCode.py | /subsetSum.py | UTF-8 | 470 | 3.125 | 3 | [] | no_license | def allSubsetSum(array):
subsetSumArray = []
allSubsetSumHelper(array, 0, subsetSumArray, 0)
return subsetSumArray
def allSubsetSumHelper(array, idx, subsetSumArray, curSum):
if idx >= len(array):
subsetSumArray.append(curSum)
return
allSubsetSumHelper(array, idx + 1, subsetSumArray, array[idx] + curSum)
allSubsetSumHelper(array, idx + 1, subsetSumArray, curSum)
array = [2, 4, 5]
results = allSubsetSum(array)
print(results) | true |
cf9f6e3dc97b16a0136d233d5597e0f7583ef4cc | Python | littlebai3618/bspider | /bspider/master/controller/node.py | UTF-8 | 2,750 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | """
所有和 Agent 进行交互的操作收缩到 node 蓝图下
1. 提供注册接口用于注册新 node
2. 删除接口删除节点 node 停止节点下所有work => 删除worker 表中节点对应的所有信息 => 停止Agent进程(supervisor)
4. 查询接口查询 节点信息 返回节点和节点下已经注册的所有worker信息
5. 停止节点
6. 启动节点
5. 在节点下启动并在master注册一个worker 启动进程 => 成功后注册进表单
6. 在节点下停止一个 worker 停止进程 status 设置为0
7. 在节点下重启一个 worker 停止进程、启动进程
8. 删除一个 worker 停止进程并删除注册信息
9. 查询 worker 得到节点下所有 worker 信息
"""
from flask import Blueprint
from bspider.core.api import auth
from .validators import PageForm
from .validators.node_forms import AddNodeForm, UpdateNodeForm, AddWorkerForm, UpdateWorkerForm
from bspider.master.service.node import Node
node = Blueprint('node_bp', __name__)
node_service = Node()
@node.route('/node', methods=['POST'])
@auth.login_required
def add_node():
form = AddNodeForm()
return node_service.add_node(**form.to_dict())
@node.route('/node/<int:node_id>', methods=['DELETE'])
@auth.login_required
def delete_node(node_id):
return node_service.delete_node(node_id)
@node.route('/node/<int:node_id>', methods=['PATCH'])
@auth.login_required
def update_node(node_id):
form = UpdateNodeForm()
return node_service.update_node(node_id, **form.to_dict())
@node.route('/node', methods=['GET'])
@auth.login_required
def get_nodes():
form = PageForm()
return node_service.get_nodes(**form.to_dict())
@node.route('/node/<int:node_id>', methods=['GET'])
@auth.login_required
def get_node(node_id):
return node_service.get_node(node_id)
@node.route('/worker', methods=['POST'])
@auth.login_required
def add_worker():
form = AddWorkerForm()
return node_service.add_worker(form.ip.data, form.name.data, form.type.data, form.description.data, int(form.status.data))
@node.route('/worker/<int:worker_id>', methods=['DELETE'])
@auth.login_required
def delete_worker(worker_id):
return node_service.delete_worker(worker_id)
@node.route('/worker/<int:worker_id>', methods=['PATCH'])
@auth.login_required
def update_worker(worker_id):
form = UpdateWorkerForm()
return node_service.update_worker(worker_id, **form.to_dict())
@node.route('/worker', methods=['GET'])
@auth.login_required
def get_workers():
"""获取全部worker信息"""
form = PageForm()
return node_service.get_workers(**form.to_dict())
@node.route('/worker/<int:worker_id>', methods=['GET'])
@auth.login_required
def get_worker(worker_id):
return node_service.get_worker(worker_id)
| true |
249876f932451357a66c1a97efe3eccf161e3124 | Python | drewvlaz/CODE2RACE | /SOLUTIONS/yP_ASCIIProduct.py | UTF-8 | 92 | 3.375 | 3 | [] | no_license | var= input()
lvar= list(var)
x=1
for a in range(0, len(lvar)):
x= x*ord(lvar[a])
print(x)
| true |
92989873a31ff82a1f6b57c7f5fa369b7c63e5b0 | Python | HitanshuSoni/Python_practice | /DemoTupleToList.py | UTF-8 | 265 | 3.109375 | 3 | [] | no_license | tup1=(32,43,65,67,[54,89],98,76,(65,94,92));
lst1=list(tup1)
lst2=[]
i=0
for val in lst1:
if isinstance(val,tuple):
lst2=list(val)
lst2[0]=99
tup2=tuple(lst2)
lst1[i]=tup2
i+=1
tup1=tuple(lst1)
print(tup1)
| true |
884ad78ec019f27a3121d8740188285fbf3de707 | Python | 13scoobie/advent-of-code | /2020/day3/problem_3_part_1.py | UTF-8 | 1,381 | 3.796875 | 4 | [] | no_license | # Turns out you get greeted with this message:
# Puzzle inputs differ by user. Please log in to get your puzzle input.
# which is pretty cool :)
# might look later at cookie or something, just use browser and copy for now
"""
import requests
def get_puzzle():
resp = requests.get("https://adventofcode.com/2020/day/3/input")
print(resp.content)
get_puzzle()
"""
TREE = "#"
PATH = "."
def load_file(filename):
for row in open(filename, "r"):
yield row
def load_puzzle_into_list():
grid = []
for idx, line in enumerate(load_file("./input.txt")):
grid.append([char for char in line if char != "\n"])
return grid
def traverse_puzzle(puzzle):
cur_row = cur_col = new_col = hit_tree = 0
while cur_row < len(puzzle) - 1:
# print(puzzle[cur_row][cur_col])
row_length = len(puzzle[cur_row + 1]) # 31
try:
new_col += 3
if new_col >= row_length:
new_col = new_col - row_length
if puzzle[cur_row + 1][new_col] == TREE:
hit_tree += 1
except IndexError as e:
print(new_col)
cur_row += 1
cur_col = new_col
return hit_tree
def solve():
puzzle = load_puzzle_into_list()
# for row in puzzle:
# print(row)
num_of_trees = traverse_puzzle(puzzle)
print(num_of_trees)
solve()
| true |
a2cac84658f3fb38615671f19a9e1f88d8d8f531 | Python | anarkia7115/g4g | /coin_change/sol.py | UTF-8 | 2,477 | 3.0625 | 3 | [] | no_license | MAX_COIN_SIZE = 399
class CoinChange(object):
def __init__(self):
self.methods_limited_by_coin = dict()
pass
def save_and_return(self, big_money, coin_change, methods_count):
if big_money not in self.methods_limited_by_coin:
self.methods_limited_by_coin[big_money] = dict()
self.methods_limited_by_coin[big_money][coin_change] = methods_count
return methods_count
def change_methods(self, coins, big_money, coin_change, last_coin_size):
# big_money = change + changed_money
# change_methods(big_money) = 1 + change_methods(changed_money)
if big_money in self.methods_limited_by_coin:
if coin_change in self.methods_limited_by_coin[big_money]:
return self.methods_limited_by_coin[big_money][coin_change]
orig_big_money = big_money
big_money = big_money - coin_change
# get method candidates
change_coin_candidates = []
for coin in coins:
if coin > last_coin_size:
continue
small_money = big_money - coin
if small_money >= 0: # legal change method
change_coin_candidates.append(coin)
if big_money == 0:
return self.save_and_return(orig_big_money, coin_change, 1)
elif len(change_coin_candidates) == 0:
return 0
else:
# sum
num_change_methods = sum(
[self.change_methods(coins, big_money, change_coin, change_coin) for change_coin in change_coin_candidates]
)
# self.change_methods_of[big_money] = num_change_methods
return self.save_and_return(orig_big_money, coin_change, num_change_methods)
def main(file_handle):
test_case_num = int(file_handle.readline().strip())
for _ in range(test_case_num):
arr_size = int(file_handle.readline().strip())
sol_instance = CoinChange()
coins = [int(coin) for coin in file_handle.readline().strip().split()]
big_money = int(file_handle.readline().strip())
print(sol_instance.change_methods(coins, big_money,
coin_change=0,
last_coin_size=MAX_COIN_SIZE))
if __name__ == "__main__":
import fileinput
f = fileinput.input()
# accepted!
main(f)
# input_file = "./input.txt"
# with open(input_file, 'r') as f:
# main(f)
| true |
805f46ee69b508241c5e28de4433519e6348c581 | Python | raghuu14/Scrambling-of-text | /scrambled.py | UTF-8 | 506 | 2.921875 | 3 | [] | no_license | import random
f = open("C:\\Users\\Admin\\Documents\\my doc\\sample.txt","r")
f1 = open("C:\\Users\\Admin\\Documents\\my doc\\samplescrambled.txt","w")
newone = f.read()
word1 = newone.split()
for i in word1 :
if len(i) <= 3 :
f1.write(i)
f1.write(" ")
else :
data = i[1:-1]
we = list(data)
random.shuffle(we)
words = "".join(i[0]) + "".join(we) + "".join(i[-1])
f1.write(words)
f1.write(" ")
f.close()
f1.close() | true |
d2445d8fafc27fcfc18924064f1699409fd9345c | Python | ADExxxxxx/local_libary | /机器学习相关/SupportVector.py | UTF-8 | 2,761 | 2.921875 | 3 | [] | no_license | """
item: 支持向量机相关算法实现
author by WIT_lyp
timestamp: 2019/11/6 18:14
"""
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import SVC
from datetime import datetime
from sklearn.metrics import accuracy_score, \
precision_score, recall_score, f1_score, cohen_kappa_score
import numpy as np
from pyecharts import Line
import matplotlib.pyplot as plt
"""
支持向量机类:实现SVM算法训练
@:param :[kernel] 核函数:"rbf", "poly", "sigmoid"
@:param :[C] 惩罚因子
@:param :[function] 多分类策略:"ovo", "ovr"
"""
class SupportVector:
def __init__(self, kernel, C, function):
self.kernel = kernel
self.C = C
self.function = function
def svm_model(self, datasets):
print("训练开始时间: ", datetime.now().strftime('%H:%M:%S'))
svm = SVC(kernel=self.kernel, C=self.C, decision_function_shape=self.function).fit(datasets["x_train"], datasets["y_train"])
print(svm)
print("训练结束时间: ", datetime.now().strftime('%H:%M:%S'))
imgs_pred = svm.predict(datasets["x_test"])
true = np.sum(imgs_pred == datasets["y_test"])
print("预测结果正确的数目: ", true)
print("预测结果错误的数目: ", datasets["y_test"].shape[0] - true)
print("预测结果准确率: ", true / datasets["y_test"].shape[0])
print(classification_report(svm.predict(datasets["x_test"]), datasets["y_test"]))
# 构建评价模型
print("\n\n########## 评价部分 ############\n\n")
print('使用SVM预测的数据准确率为: ',
accuracy_score(datasets["y_test"], imgs_pred))
print('使用SVM预测的数据精确率为: ',
precision_score(datasets["y_test"], imgs_pred, average="weighted"))
print('使用SVM预测的数据召回率为: ',
recall_score(datasets["y_test"], imgs_pred, average="weighted"))
print('使用SVM预测的数据的F1值为: ',
f1_score(datasets["y_test"], imgs_pred, average="weighted"))
print("使用SVM预测的数据的Cohen's Kappa系数为: ",
cohen_kappa_score(datasets["y_test"], imgs_pred))
self.svm = svm
def svm_predict(self, datasets):
pre = self.svm.predict(datasets["x_test"])
line = Line("预测-原始标签对比图(SVM)")
l = [i for i in range(30)]
print(l)
line.add("预测结果", l, pre[:30], is_smooth=True )
line.add("原始标签", l, datasets["y_test"][:30], is_smooth=True)
# line.show_config()
line.render("svm.html")
if __name__ == '__main__':
pass | true |
c948c898e0ba49e1bccdcba614a972737164319f | Python | cepGH1/dfesw3cep | /pyth5.py | UTF-8 | 331 | 3.6875 | 4 | [] | no_license | colNum = int(input("how many columns would you like? "))
rowNum = int(input("how many rows would you like? "))
print(colNum*rowNum)
my2DArray = [[0 for col in range(colNum)] for row in range(rowNum)]
print(my2DArray)
for row in range(rowNum):
for col in range(colNum):
my2DArray[row][col] = row*col
print(my2DArray)
| true |
6cf1b5e30d10795ee616e33e8073e3841fee6946 | Python | rlandesman/CS103-pythonDataStructures | /Huffman Project/huffman.py | UTF-8 | 13,889 | 3.78125 | 4 | [] | no_license | # ----------------< Module Imports >-----------------
import unittest
import os.path
import array_list
from linked_list import *
from huffman_bits_io import *
# ----------------< Class for Leaf >-----------------
class Leaf:
def __init__(self,ordinal,count):
self.ordinal = ordinal
self.count = count
def __eq__(self, other):
return type(other) == Leaf and self.ordinal == other.ordinal and self.count == other.count
# ----------------< Class for Node >-----------------
class Node:
def __init__(self,count,left,right):
self.ordinal = None
self.count = count
self.left = left
self.right = right
def __eq__(self, other):
return (type(other) == Node and self.left == other.left and self.right == other.right and self.count == other.count)
# ----------------< Class for Leaf Nodes >-----------------
#Root is either leaf or Node
class HuffmanTree:
def __init__(self,root):
self.root = root
def __eq__(self, other):
return type(other) == HuffmanTree and self.root == other.root
def __repr__(self):
return "Tree({!r})".format(self.root.count) #Using count because that will never be empty when initializing
#String -> ArrayList
#Returns an array in which the begginning positions are filled with the charachters from the text file
def txt_to_freq(inputFile):
f= open(inputFile, 'r')
allLines = f.read()
newList = array_list.empty_list()
newList.array = [0] * 256
newList.capacity = 256
for x in allLines:
temp = ord(x)
newList.array[temp-1] += 1
newList.size += 1
f.close()
return newList
# LinkedList, EmptyLinked -> LinkedList
# Returns an Array containing characters of all leaf nodes in tree
def leaf_to_string(inputTree):
myList = inputTree
if myList is not Node:
yield from iterate_htree(myList.left)
yield from iterate_htree(myList.right)
# Huffman Tree Huffman Tree -> Boolean
# A huffman node is either a Leaf or Node Object
# Compares the count of each node and returns True if a is less than b
# If the two are tied, this function returns the smaller ASCII value of the two nodes
def comes_before(a,b):
if a.root.count < b.root.count:
return True
elif a.root.count > b.root.count:
return False
else:
if a.root.ordinal < b.root.ordinal:
return True #TEST
else:
return False
def array_to_sortedList(inputArray):
return_list = empty_list()
idx = 0
for x in range(inputArray.capacity):
idx += 1
if inputArray.array[x] != 0:
occurence = inputArray.array[x]
newLeaf = Leaf(idx,occurence)
newTree = HuffmanTree(newLeaf)
return_list = insert_sorted(return_list,newTree,comes_before)
return return_list
#Tree -> LinkedList (With one element as tree)
def build_tree(inputTree):
myList = inputTree.root
while myList.rest is not None:
left = myList.first.root
myList = myList.rest
right = myList.first.root
myList = myList.rest
addedCount = left.count + right.count
newNode = HuffmanTree(Node(addedCount,left,right))
if left.ordinal > right.ordinal:
newNode.root.ordinal = right.ordinal
else:
newNode.root.ordinal = left.ordinal
myList = insert_sorted(myList,newNode,comes_before)
return myList.first
# ----------------< Artifacts of my "Deep" Debugging >-----------------
# # print(txt_to_array())
# list_with_frequency = count_frequency(txt_to_array()) #also used for testing
# # print(list_with_frequency)
# #
# myList = array_to_sortedList(list_with_frequency)
# my_tree = HuffmanTree(myList)
# #
# print(myList)
# print(myList.first.root.ordinal)
# print(myList.rest.first.root.ordinal)
# print(myList.rest.rest.first.root.ordinal)
# print(myList.rest.rest.rest.first.root.ordinal)
# print(myList.rest.rest.rest.rest.first.root.ordinal)
#LinkedList -> Generator Obj
def iterate_htree(my_tree,string = ""):
if my_tree is not None:
if isinstance(my_tree,Leaf):
yield (chr(my_tree.ordinal), string)
else:
yield from iterate_htree(my_tree.left, string + "0")
yield from iterate_htree(my_tree.right, string + "1")
def generator_to_string(tree):
temp = iterate_htree(tree.root,"")
return [i for i in temp]
#Tuple, Tuple -> Boolean
#Comes before function used for comparing the count in tuples
def comes_before_tuple(a,b):
if ord(a[0]) < ord(b[0]):
return True
else:
return False
# Returns a linkedList that contains tuples as values
# Basically used so the insert_sorted function can be run on this
def sort_final_array(inputArray):
newlist = empty_list()
returnlist = empty_list()
for x in inputArray:
newlist = add(newlist,0,x)
while newlist is not None:
returnlist = insert_sorted(returnlist,newlist.first,comes_before_tuple)
newlist = newlist.rest
return returnlist
#inputArray is a frequency array
def final_array(inputArray,inputList):
newArray = array_list.empty_list()
newArray.capacity = 256
newArray.array = [None] * 256
for x in range(length(inputList)):
temp = ord(inputList.first[0])
for x in range(inputArray.capacity):
if x == temp:
temp2 = inputArray.array[x-1]
newArray.array[x] = (inputList.first[1],temp2)
newArray.size += 1
inputList = inputList.rest
return newArray
#-------------------------< Main Encode Function >----------------------------------
def huffman_encode(inputFile,outputFile):
if os.path.exists(inputFile) is False:
raise IOError
hb_writer = HuffmanBitsWriter(outputFile)
freqArray = txt_to_freq(inputFile)
totalOccurence = freqArray.size
print(totalOccurence)
if totalOccurence == 0: #Encoding an empty file
hb_writer.write_byte(totalOccurence)
hb_writer.close()
return ""
elif totalOccurence == 1:
myChar = (array_to_sortedList(freqArray).first.root.ordinal)
hb_writer.write_byte(totalOccurence)
hb_writer.write_byte(myChar)
hb_writer.write_int(array_to_sortedList(freqArray).first.root.count)
hb_writer.close()
return(chr(myChar))
elif totalOccurence > 1:
sortedList = array_to_sortedList(freqArray)
listTree = HuffmanTree(sortedList)
myTree = build_tree(listTree)
sortedLinkedList = sort_final_array(generator_to_string(myTree))
finalArray = final_array(freqArray, sortedLinkedList)
compareTemp = (sortedList.first.root.ordinal)
allSame = True
while sortedList != None:
if sortedList.first.root.ordinal != compareTemp:
allSame = False
sortedList = sortedList.rest
if allSame:
totalSize = finalArray.size
ordChr = (ord(sortedLinkedList.first[0]))
repeatTime = (finalArray.array[ordChr][1])
hb_writer.write_byte(totalSize)
hb_writer.write_byte(ordChr)
hb_writer.write_int(repeatTime)
hb_writer.close()
return sortedLinkedList.first[0]
else:
sortedList = array_to_sortedList(freqArray)
listTree = HuffmanTree(sortedList)
myTree = build_tree(listTree)
sortedLinkedList = sort_final_array(generator_to_string(myTree))
finalArray = final_array(freqArray,sortedLinkedList)
totalOccurence = length(sortedLinkedList)
hb_writer.write_byte(totalOccurence)
for x in range(finalArray.capacity):
if finalArray.array[x] is not None:
hb_writer.write_byte(x)
hb_writer.write_int(finalArray.array[x][1])
emptyString = ""
f = open(inputFile, 'r')
allLines = f.read()
for x in allLines:
temp = ord(x)
emptyString += finalArray.array[temp][0]
hb_writer.write_code(emptyString)
tupleList = leaf_to_string(build_tree(HuffmanTree(array_to_sortedList(txt_to_freq(inputFile)))).root)
chrList = ([i[0] for i in tupleList])
finalString = ""
for x in chrList:
finalString += x
f.close()
hb_writer.close()
return finalString
#-------------------------< Main Decode Function >----------------------------------
def huffman_decode(inFile,outFile):
f = open(outFile, "w")
hb_reader = HuffmanBitsReader(inFile)
finalString = ""
try:
totalOccurence = hb_reader.read_byte()
newArray = array_list.empty_list()
newArray.capacity = 256
newArray.array = [0] * 256
for x in range(0, totalOccurence):
myCount = hb_reader.read_byte()
newFreak = hb_reader.read_int()
newArray.array[myCount - 1] = newFreak
Dtree = build_tree(HuffmanTree(array_to_sortedList(newArray)))
totalChr = Dtree.root.count
ogTree = Dtree.root
current = Dtree.root
while totalChr != 0:
if type(current) == Leaf:
finalString += chr(current.ordinal)
totalChr -= 1
current = ogTree
else:
oneBit = hb_reader.read_bit()
if oneBit is True:
current = current.right
else:
current = current.left
f.write(finalString)
hb_reader.close()
f.close()
return None
except: #isempty
f.close()
hb_reader.close()
return None
#-------------------------< Testing Units >----------------------------------
class TestCases(unittest.TestCase):
# def test_decode(self):
# self.assertEqual(huffman_decode("output.bin","sample.txt"),None) #abcd abc ab a
#
# def test_decode2(self):
# self.assertEqual(huffman_decode("output2.bin","sample2.txt"),None) #Decode empty file
#
# def test_decode3(self):
# self.assertEqual(huffman_decode("output_empty.bin", "empty.txt"), None) # Decode empty file
#
# def test_decode4(self):
# self.assertEqual(huffman_decode("output_empty2.bin", "code.txt"), None) # Decode empty file
#
# def test_decode5(self):
# self.assertEqual(huffman_decode("code.bin", "cool.txt"), None) # Decode empty file
#
def test_encode_full(self):
self.assertEqual(huffman_encode("sample.txt", "output.bin"), 'lnos abce') # abcd abc ab a
# def test_encode_fl(self):
# self.assertEqual(huffman_encode("sample2.txt", "output2.bin"), 'a') # aaaa
#
# def test_encode_empty(self):
# self.assertEqual(huffman_encode("empty.txt", "output_empty.bin"), '') # empty
#
# def test_encode_empty2(self):
# self.assertEqual(huffman_encode("code.txt", "output_empty2.bin"), 'a') # a
#
# def test_encode_more(self):
# self.assertEqual(huffman_encode("cool.txt", "code.bin"),'ehlnaFMINjpbytorE!?GJLPRzHT\'OUYi,kv\nwAg cC".msdu-SWBDf') # a
#
# def test_encode_IOerror(self):
# with self.assertRaises(OSError):
# (huffman_encode("text$file.txt","textfile_encoded.bin"))
#
# def test_eq(self):
# myList = empty_list()
# secondList = empty_list()
# self.assertEqual(myList, secondList)
#
# def test_empty(self):
# myList = array_list.empty_list()
# newArray = array_list.empty_list()
# self.assertEqual(myList, newArray)
#
# def test_pair_repr(self):
# myPair = Pair(5,None)
# self.assertEqual(myPair.__repr__(),"Pair(5, None)")
def test_array_repr(self):
myPair = array_list.empty_list()
self.assertEqual(myPair.__repr__(),"Array([None, None, None, None, None, None, None, None, None, None], Size 0)")
def test_add_IE2(self):
with self.assertRaises(IndexError):
add(empty_list(),-5,99)
def test_length1(self):
myList = array_list.empty_list()
self.assertEqual(array_list.length(myList),0)
def test_add_IE3(self):
with self.assertRaises(IndexError):
array_list.add(array_list.empty_list(),-5,99)
def test_add_mid(self):
myList = array_list.empty_list()
myList.size = 3
myList.array[0] = 1
myList.array[1] = 2
myList.array[2] = 3
compareList = array_list.empty_list()
compareList.size = 4
compareList.array[0] = 1
compareList.array[1] = 2
compareList.array[2] = 99
compareList.array[3] = 3
self.assertEqual(array_list.add(myList,2,99),compareList)
def test_eq_leaf(self):
myLeaf = Leaf(54,2)
secondLeaf = Leaf(54,2)
self.assertEqual(myLeaf == secondLeaf,True)
def test_eq_node(self):
myLeaf = Node(54,2,None)
secondLeaf = Node(54,2,None)
self.assertEqual(myLeaf == secondLeaf,True)
def test_eq_huffT(self):
myTree = HuffmanTree(Leaf(54 ,2))
secondTree = HuffmanTree(Leaf(54,2))
self.assertEqual(myTree == secondTree, True)
def test_repr_huffT(self):
myTree = HuffmanTree(Leaf(54,2))
self.assertEqual(myTree.__repr__(),"Tree(2)")
def test_comes_before_edge(self):
tree1 = HuffmanTree(Leaf(53,2))
tree2 = HuffmanTree(Leaf(52,2))
self.assertEqual(comes_before(tree2,tree1),True)
def test_comes_before_edge2(self):
tree1 = HuffmanTree(Leaf(53,2))
tree2 = HuffmanTree(Leaf(52,2))
self.assertEqual(comes_before(tree1,tree2),False)
if __name__ == "__main__":
unittest.main()
| true |
58c454f4780bebc8d05fbbbb449c2c2b582471d7 | Python | williamfhe/advent-of-code-2017 | /Day 25/turing_part1.py | UTF-8 | 1,103 | 3.109375 | 3 | [] | no_license | from collections import defaultdict
state_actions = {
'A': {
0: { "write": 1, "move": +1, "next": 'B' },
1: { "write": 0, "move": -1, "next": 'B' }
},
'B': {
0: { "write": 1, "move": -1, "next": 'C' },
1: { "write": 0, "move": +1, "next": 'E' }
},
'C': {
0: { "write": 1, "move": +1, "next": 'E' },
1: { "write": 0, "move": -1, "next": 'D' }
},
'D': {
0: { "write": 1, "move": -1, "next": 'A' },
1: { "write": 1, "move": -1, "next": 'A' }
},
'E': {
0: { "write": 0, "move": +1, "next": 'A' },
1: { "write": 0, "move": +1, "next": 'F' }
},
'F': {
0: { "write": 1, "move": +1, "next": 'E' },
1: { "write": 1, "move": +1, "next": 'A' }
}
}
diag_steps = 12861455
tape = defaultdict(int)
cursor_pos = 0
current_state = 'A'
for _ in range(diag_steps):
action = state_actions[current_state][tape[cursor_pos]]
tape[cursor_pos] = action["write"]
cursor_pos += action["move"]
current_state = action["next"]
print(list(tape.values()).count(1))
| true |
2dc4841435f5df54dd74d094b59050d0355bf065 | Python | nimiq-community/python-client | /example/example.py | UTF-8 | 685 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | from nimiqclient import *
# Create Nimiq RPC client
client = NimiqClient(
scheme="http", user="luna", password="moon", host="127.0.0.1", port=8648
)
try:
# Get consensus
consensus = client.consensus()
print("Consensus: {0}".format(consensus))
if consensus == ConsensusState.ESTABLISHED:
# Get accounts
print("Getting basic accounts:")
for account in client.accounts():
if account.type == AccountType.BASIC:
# Show basic account address
print(account.address)
except InternalErrorException as error:
print("Got error when trying to connect to the RPC server: {0}".format(str(error)))
| true |
b84ce12ac90eb740f40d1816fd4a27b2c6abb351 | Python | Aasthaengg/IBMdataset | /Python_codes/p02850/s676832975.py | UTF-8 | 744 | 2.640625 | 3 | [] | no_license | n=int(input())
kotaezyun=[]
renketu=[[] for i in range(n+1)]
ans_dict={}
for i in range(n-1):
a,b=map(int, input().split())
renketu[a].append(b)
kotaezyun.append([a,b])
from collections import deque
que=deque()
que.append(1)
iro=[-1]*(n+1)
iro[1]=0
irokazu=0
tansaku_moto=[1]
while que:
tansaku_moto=que.popleft()
ironuri=1
for tansaku_saki in renketu[tansaku_moto]:
if ironuri==iro[tansaku_moto]:
ironuri+=1
iro[tansaku_saki]=ironuri
ans_dict[tansaku_moto,tansaku_saki]=ironuri
que.append(tansaku_saki)
irokazu=max(irokazu,ironuri)
ironuri+=1
print(irokazu)
for i in range(n-1):
tmp=(kotaezyun[i][0],kotaezyun[i][1])
print(ans_dict[tmp]) | true |
14c3f8918577b40f06aa60ac1bb9b7c7e3b4abec | Python | hicdnh/Adong_Chen_GitProject | /Auto_UI_Test/main.py | UTF-8 | 976 | 2.59375 | 3 | [] | no_license | #!usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/5/23 15:03
# @Author : Adong_Chen
# @Email : hicdnh@163.com
import settings
from selenium import webdriver
import cases
import time
def get_func_names(__driver):
cases_lst = []
for i in settings.SMOKE_TEST_CASES:
case = getattr(cases.smoketest.SmokeTest(__driver), i)
cases_lst.append(case)
# print cases_lst
return cases_lst
def get_driver():
driver = webdriver.Chrome()
driver.get("https://www.cnblogs.com/")
driver.implicitly_wait(3)
return driver
def run():
log_file = open(settings.LOGFILE, "w+")
for i in range(0, len(settings.SMOKE_TEST_CASES)):
driver = get_driver()
func_lst = get_func_names(driver)
# print "run: " + str(case.func_name)
r = func_lst[i]()
temp = "Case: " + func_lst[i].func_name + "result: " + str(r) + "\n"
# print case.func_name
log_file.write(temp)
log_file.flush()
time.sleep(5)
log_file.close()
if __name__ == "__main__":
run()
# run()
| true |
0ec4b53467e922d70597374ed4b3cdaf948c9a9a | Python | Aasthaengg/IBMdataset | /Python_codes/p03239/s101251567.py | UTF-8 | 217 | 2.59375 | 3 | [] | no_license | N, T = map(int, input().split())
l = []
for _ in range(N):
l.append(tuple(map(int, input().split())))
di = dict(l)
keys = [k for k, v in di.items() if v <=T]
if keys:
print(min(keys))
else:
print("TLE") | true |
cc05afe64f2b2bc80343b70741513fc24229dba4 | Python | ZhaoQian888/try | /try.py | UTF-8 | 890 | 3.640625 | 4 | [] | no_license | class employee:
def __init__(self, *args, **kwargs):
self.name=args[0]
self.position=args[1]
print("I am employee")
pass
def funcion1(self,*args):
print(args)
pass
class employer:
def __init__(self, *args, **kwargs):
print("my name is ",args[0])
pass
xiaowang=employee("小明","管理")
xiaoming=employer("小明")
print(xiaowang.name)
print(xiaowang.position)
xiaowang.funcion1("fs","sf","fs")
class father:
def speak(self):
print("I can fly")
pass
class son(father):
pass
class mother():
def power(self):
print("I have power")
pass
pass
class daughter(father,mother):
def run(self):
print("I can run")
pass
pass
class son2(father):
def speak(self):
print("I can fly speed")
d=daughter()
d.power()
d.run()
d.speak()
s2=son2()
s2.speak() | true |
486b23ce39f551793d587b2fcf061f6e97aa8c2b | Python | Kawser-nerd/CLCDSA | /Source Codes/AtCoder/abc057/B/4881622.py | UTF-8 | 261 | 2.578125 | 3 | [] | no_license | N,M=map(int,input().split())
X=[list(map(int,input().split())) for i in range(N)]
check=[list(map(int,input().split())) for i in range(M)]
for x,y in X:
dist=[abs(x-c[0])+abs(y-c[1]) for c in check]
MIN=min(dist)
print(dist.index(MIN)+1) | true |
6d385c812a4babd165040d17f7c5ffe3f12184e7 | Python | praneethreddypanyam/DataStructures-Algorithms | /Graphs/MinimumCostPath.py | UTF-8 | 1,561 | 2.6875 | 3 | [] | no_license | from collections import deque
import sys
row = [-1,1,0,0]
col = [0,0,1,-1]
def valid(i,j,m,n,visited):
return i >=0 and i < m and j >=0 and j < n and visited[i][j] == False
def shortestPath(srcI,srcJ,posI,posJ,n,m,a):
global row, col
if posI == srcI and posJ == srcJ:
return a[posI][posJ]
visited = [[False]*n for i in range(n)]
q = deque()
q.append((srcI,srcJ,a[srcI][srcJ]))
visited[srcI][srcJ] = True
while q:
currI, currJ, dist = q.popleft()
if posI == currI and posJ == currJ:
return dist
k = sys.maxsize
reqI = -1
reqJ = -1
for i in range(4):
presI = currI + row[i]
presJ = currJ + col[i]
if valid(presI,presJ,n,m,visited):
visited[presI][presJ] = True
if presI == posI and presJ == posJ:
k = a[presI][presJ]
reqI = presI
reqJ = presJ
break
if a[presI][presJ] < k:
k = a[presI][presJ]
reqI = presI
reqJ = presJ
if reqI != -1 and reqJ != -1:
q.append((reqI,reqJ,dist+k))
for _ in range(int(input())):
n = int(input())
l = list(map(int,input().split()))
k = 0
a = []
for i in range(0,len(l),n):
a.append(l[i:i+n])
print(shortestPath(0,0,n-1,n-1,n,n,a))
'''
Sample Input:
2
5
31 100 65 12 18 10 13 47 157 6 100 113 174 11 33 88 124 41 20 140 99 32 111 41 20
2
42 93 7 14
''' | true |
5acf7bafbdb2b96842549d8c34a086c6e59987d3 | Python | AlexanderLazarev/emc-game | /python/log_functions.py | UTF-8 | 977 | 2.921875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# Модуль с функциями для логирования
import datetime
from constants import LOG_DIR
def log(message, prefix = 'Main'):
prefix = '[' + prefix + ']'
now = datetime.datetime.today()
date = now.strftime("%m.%d.%Y")
time = now.strftime("%H:%M:%S")
log_file = open(LOG_DIR + 'log_' + date, 'a')
output = '[' + date + '-' + time + ']' + prefix + ' ' + message + '\n'
log_file.write(output)
print output
log_file.close()
# Логирование ошибок
def log_error(message):
log(str(message), 'Error')
# Логирование сообщение вк
def log_message(message):
log(str(message), 'Mess')
# Логирование игровых действий
def log_actions(message):
log(str(message), 'Action')
# Логирование действий при начале/конце игрового цикла
def log_cicle(message):
log(str(message), 'Cicle')
| true |
32006dfceb4c6d3ce013c2aeda3d1de8b451a72c | Python | NandakrishnanR/intermediate-programming | /Desktop/opencv/leap.py | UTF-8 | 689 | 4.28125 | 4 | [] | no_license | year = 2000
# To get year (integer input) from the user
# year = int(input("Enter a year: "))
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
print("{0} is a leap year".format(year))
else:
print("{0} is not a leap year".format(year))
else:
print("{0} is a leap year".format(year))
else:
print("{0} is not a leap year".format(year))
x = 5
y = 10
# To take inputs from the user
#x = input('Enter value of x: ')
#y = input('Enter value of y: ')
# create a temporary variable and swap the values
temp = x
x = y
y = temp
print('The value of x after swapping: {}'.format(x))
print('The value of y after swapping: {}'.format(y)) | true |
b5f3743448d2ad35595dac571ff72e65d4d65a64 | Python | srik1040/hackerrank-py-solutions | /Introduction/py-if-else.py | UTF-8 | 230 | 3.34375 | 3 | [] | no_license | N = int(raw_input().strip())
if N % 2 != 0:
print "Weird"
elif N % 2 == 0:
if N>=2 and N<=5:
print "Not Weird"
elif N >=6 and N <= 20:
print "Weird"
elif N > 20:
print "Not Weird" | true |
f4d5ff9145aaef162bd2d157bd598d3443908659 | Python | shlokabhardwaj/housing_eda | /life cycle of project part 1.py | UTF-8 | 830 | 3.1875 | 3 | [] | no_license | ## ...............................LIFE CYCLE OF PROJECT..................................
## 1. Data Analysis
## 2.Feature Engineering
## 3. Feature Selection
## 4. Model Building
## 5. Model Deployment
##..................................Data Analysis...................................
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option("display.max_columns",None)
sns.set(rc={"figure.figsize":(12,8)})
data=pd.read_csv("train.csv")
## in the DATA ANALYSIS ,we will analysis to find out the below stuff..................
##1. Missing values
##2. All the numerical variables
##3. Distribution of the numerical variables
##4. Cardinality of categorical variables
##5. Outliers
##6. Relationship between independent and independent features
| true |
16067f3f643be904d5756ad3568a43accf1a223f | Python | iang12/grafo_labirinto_binario | /grafo.py | UTF-8 | 2,524 | 3.203125 | 3 | [] | no_license | from random import *
matriz_grafo = []
matriz_visitas = []
lista_de_posicoes = []
posicao_inicial = []
posicao_final = []
lista_de_passos = []
passo = 1
#matriz de visitas
#print(matriz_visitas)
f = open("entrada.txt", "r")
dim = f.readline().replace("\n","").split(" ")
n = int(dim[0]) #numero de linhas
m = int(dim[1]) #numero de colunas
cont = 1
matrix = []
for i in range(n):
matrix.append(f.readline().replace("\n","").split(" "))
begin = f.readline().replace("\n","").split(" ")
end = f.readline().replace("\n","").split(" ")
matriz_visitas = [ [ 0 for i in range(m) ] for j in range(n) ]
print(matrix)
print(begin)
print(end)
# print(lista_de_posicoes)
class Point:
def __init__(self,x=0,y=0,dist = 0):
self.x = x
self.y = y
self.dist = dist
def __str__(self):
return '('+str(self.x)+','+str(self.y)+','+str(self.dist)+')'
def isValid(row, col):
# return true if row number and
# column number is in range
return (row >= 0) and (row < n) and (col >= 0) and (col < m);
p = Point(x=int(begin[0]),y=int(begin[1]))
lista = []
lista.append(p)
matriz_visitas[p.x][p.y] = 1
find = False
while lista:
q = lista.pop(0)
print(q)
if(q.x==int(end[0]) and q.y==int(end[1])):
print("Caminho mais curto: "+str(q.dist))
find = True
break
#cima
if(isValid(q.x-1,q.y)):
if(matrix[q.x-1][q.y]=='1' and (not matriz_visitas[q.x-1][q.y])):
lista.append(Point(x=q.x-1,y=q.y,dist=q.dist+1))
matriz_visitas[q.x-1][q.y] = 1
#baixo
if(isValid(q.x+1,q.y)):
if(matrix[q.x+1][q.y]=='1' and (not matriz_visitas[q.x+1][q.y])):
lista.append(Point(x=q.x+1,y=q.y,dist=q.dist+1))
matriz_visitas[q.x+1][q.y] = 1
# print("não testou")
#esquerda
if(isValid(q.x,q.y-1)):
if(matrix[q.x][q.y-1]=='1' and (not matriz_visitas[q.x][q.y-1])):
lista.append(Point(x=q.x,y=q.y-1,dist=q.dist+1))
matriz_visitas[q.x][q.y-1] = 1
# print('esquerda')
#direita
if(isValid(q.x,q.y+1)):
if(matrix[q.x][q.y+1]=='1' and (not matriz_visitas[q.x][q.y+1])):
lista.append(Point(x=q.x,y=q.y+1,dist=q.dist+1))
matriz_visitas[q.x][q.y+1] = 1
# print('direita')
#print(matriz_visitas)
if(not find):
print("Não tem caminho!")
#print(p.x,p.y,p.dist)
#Positions = []
#Positions.append([p.x,p.y,p.dist])
#print(Positions)
| true |
e6ace0a0e8f736935ae5f4b074e68aa98f6338a5 | Python | rohbot/Singing-Plants-python | /PdConnection.py | UTF-8 | 768 | 3.0625 | 3 | [] | no_license | import socket
class PdConnection:
def __init__(self, address, port):
self.__address = address
self.__port = port
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__connect()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __connect(self):
server_address = (self.__address, self.__port)
self.__socket.connect(server_address)
def close(self):
self.__socket.close()
def sendValue(self, name, value):
message = name + " " + str(value) + ";"
try:
self.__socket.send(message)
except socket.timeout:
self.__connect()
self.__socket.send(message) | true |
1766b5e7dc60da51f4c630776b5cb9769f1c190f | Python | jchenluo/Src-Assert-Collection | /core/util.py | UTF-8 | 1,389 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2020/5/27 11:47
# @Author : huha
import os,re,urllib3
from selenium import webdriver
urllib3.disable_warnings()
# 正则匹配获取一级域名
def get_one_level_domain(path):
key_list = []
with open (path,'r',encoding='utf-8') as f:
url = f.readline()
while url:
url_match = re.findall(r'(\w*)\.(.*-?\w*)\.(\w*)\.?(\w*)', url)
# print(url_match)
if url_match:
key = url_match[0][1]+'.'+url_match[0][2]
key_list.append(key)
url = f.readline()
return key_list
# 去重去空行
def out_file(path):
file_list = [] # 创建一个空列表
with open(path, "r", encoding="utf-8") as f:
file_2 = f.readlines()
for file in file_2:
file_list.append(file)
out_file1 = set(file_list) #set()函数可以自动过滤掉重复元素
last_out_file = list(out_file1)
os.remove(path)
for out in last_out_file:
with open(path,"a+",encoding="utf-8") as f: #去重后文件写入文件里
if out != '\n':
f.write(out)
# 模拟浏览器获取页面元素
def getajaxpage(url):
browser = webdriver.PhantomJS()
# browser.get('https://security.alibaba.com/global.htm?spm=0.0.0.0.jbZNqu')
browser.get(url)
html = browser.page_source
browser.close()
return html
| true |
8cc6cf370c2e35f1624181f4c143d5b8bc05a49a | Python | Aasthaengg/IBMdataset | /Python_codes/p02616/s988680128.py | UTF-8 | 1,360 | 2.875 | 3 | [] | no_license | def make_set_list(array,valid):
new_array = []
for i in range(len(array)//2):
new_array.append(array[2*i]*array[2*i+1])
if len(array)%2 == 1 and valid == 1:
new_array.append(array[len(array)-1])
return new_array
N, K = list(map(int,input().split()))
As = list(map(int,input().split()))
p_nums = []
n_nums = []
for i in range(N):
n = As[i]
if n < 0:
n_nums.append(n)
else:
p_nums.append(n)
As.sort(key = abs)
n_nums.sort()
p_nums.sort(reverse = True)
if len(p_nums) > 0:
if N > K:
ok = True
elif len(n_nums) % 2 == 0:
ok = True
else:
ok = False
elif K % 2 == 0:
ok = True
else:
ok = False
ans = 1
if ok:
if K % 2 == 0:
set_p_num = make_set_list(p_nums,1)
set_n_num = make_set_list(n_nums,0)
set_num = set_p_num + set_n_num
set_num.sort(reverse = True)
for i in range(K//2):
ans = (ans*(set_num[i]%(10**9+7)))%(10**9+7)
else:
ans = p_nums.pop(0)
set_p_num = make_set_list(p_nums,1)
set_n_num = make_set_list(n_nums,0)
set_num = set_p_num + set_n_num
set_num.sort(reverse = True)
for i in range(K//2):
ans = (ans*(set_num[i]%(10**9+7)))%(10**9+7)
else:
for i in range(K):
ans = (ans*(As[i]%(10**9+7)))%(10**9+7)
print(ans)
| true |
6c1332178f8ceb620f5782e5f7dc86c94aa48c64 | Python | JuanPabloGonzalezA/Analytics | /16x32/16x32_meses.py | UTF-8 | 1,892 | 2.96875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anm
dias=["Lunes","Martes","Miercoles","Jueves","Viernes","Sabado","Domingo","Festivo","Antes Festivo"]
cdias=[42,52,47,49,51,52,42,15,15]
n=9
datos=np.loadtxt('choques3.txt').reshape((31,14,n))#y,x,d
print datos.shape
print datos[7,6,0]
fig=plt.figure()
for i in range(n):
datos[:,:,i]=datos[:,:,i]/cdias[i]
#im=plt.imshow(datos[:,:,0],cmap='YlOrRd')
#plt.xlim(-0.5,7.5)
#plt.ylim(14.5,-0.5)
#plt.colorbar()
#plt.show()
ims=[]
for i in range(n):
ttl = plt.text(0.5, 0.5, dias[i], horizontalalignment='left', verticalalignment='bottom')
#txt = plt.text(i,i,i)
im=plt.imshow(datos[:,:,i], animated=True,vmin=0,vmax=5,cmap='YlOrRd')#cmap='YlOrRd', animated=True)
ims.append([im,ttl])#,txt])
gif=anm.ArtistAnimation(fig,ims,interval=750)
#plt.colorbar()
cbar = plt.colorbar()
cbar.set_label('# choques')
plt.title('Choques por celda cada dia')
plt.xlabel('longitud')
plt.ylabel('latitud')
plt.xticks(np.linspace(-0.5,13.5,4),[-74.21,-74.16,-74.10,-74.05])#np.linspace(-74.21022,-74.02318,8))
plt.yticks(np.linspace(-0.5,30.5,15),[4.84,4.82,4.79,4.77,4.74,4.72,4.69,4.67,4.65,4.62,"4.60",4.57,4.55,4.52,"4.50",4.47])#np.linspace(4.84300,4.49752,15))
gif.save('mes.gif')
"""
uxt=np.loadtxt('datos.txt')
x=np.linspace(0,100,len(uxt[0,:]))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set(xlabel=r'$x$',ylabel=r'$u(x,t)$',title='Cuerda vibrando',ylim=(-1,1),xlim=(0,100))
datos, = ax.plot(x,uxt[0,:])
def iteracion(i):
datos.set_ydata(uxt[i,:])
gif = anm.FuncAnimation(fig, iteracion,frames=len(uxt[:,0]), interval=50)
gif.save('cuerda.gif')
datos = np.loadtxt('datos.txt').reshape((N,N,3))#(x,y,en 0 V en 1 Ex en 2 Ey)
plt.imshow(datos[:,:,0],extent=[-2.5,2.5,-2.5,2.5],cmap='hot')
plt.colorbar()
plt.xlim(-2.5,2.5)
plt.ylim(-2.5,2.5)
plt.title('Potencial placas [V] y lineas de campo electrico')
"""
| true |
f07c1274273afa1a8887600189a0ba9001861ae9 | Python | raushan007/InstaAutomation | /unfollow.py | UTF-8 | 2,034 | 2.953125 | 3 | [] | no_license |
#importing all library
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.common.exceptions import NoSuchElementException
import time
#Put username
usernameStr = ''
#Put password
passwordStr = ''
#First download chromedriver and then Give the path of the chromedriver.exe file in driver
driver=webdriver.Chrome("F:\work\DIWALI\chromedriver.exe")
#Opening instagram
driver.get('https://www.instagram.com/accounts/login')
sleep(2)
#entering username
username=driver.find_element_by_name('username')
username.send_keys(usernameStr)
#entering password
password=driver.find_element_by_name('password')
password.send_keys(passwordStr)
sleep(1)
#clicking loging button
driver.find_element_by_xpath("//button[contains(.,'Log in')]").click()
sleep(3)
#Opening the account
driver.get('https://www.instagram.com/'+usernameStr)
sleep(3)
#Finding the follow button and then clicking it
all_spans = driver.find_elements_by_xpath("//ul/li[3]/a/span")
all_spans[0].click()
sleep(10)
#scrolling down the follow list to open all the account
for n in range(1,50):
unfollowWindow2 = driver.find_element_by_xpath("//button[contains(.,'Following')]")
unfollowWindow2.send_keys(Keys.END)
print(n)
sleep(2)
#Start unfollowing all the account
for i in range(1,150):
try:
unfollowWindow = driver.find_element_by_xpath("//button[contains(.,'Following')]")
unfollowWindow.click()
sleep(2)
unfollow=driver.find_element_by_xpath("//button[contains(.,'Unfollow')]")
unfollow.click()
print(i)
sleep(45)
except Exception as e:
print(e)
| true |
8a41037dab86e01a3c6a390483f7733084cb4c68 | Python | nopasanadamindy/Algorithms | /01. List 1/02. List1 실습/전기버스_1.py | UTF-8 | 1,389 | 3.234375 | 3 | [] | no_license | def find(power):
last = 0 + K # 0 다음부터 시작
cnt = 0
while last < N: # 도착보다 작은 동안에
# 충전소 있으면 cnt 증가
if power[last]:
cnt += 1
# 충전소 없으면 충전기를 찾을 때 까지 이동 위치를 K-1, K-2처럼 하나씩 줄여감
# 만약 K=0이 되면 종점에 도착할 수 없는 경우임
else:
i = 0
flag = 0 # 0: 충전기 없음, 1: 충전기 있음
while i < K and last-i > 0: #K 범위와 last-i가 양수일 때
if power[last-i] :
last -= i
cnt += 1
flag = 1
break
i += 1
if flag == 0:
return 0
last += K
return cnt
import sys
sys.stdin = open("전기버스_input.txt")
T = int(input())
for tc in range(T):
# K : 한번 충전으로 이동할 수 있는 정류장 수
# N : 정류장 수
# M : 충전기 설치된 정류장 번호
K, N, M = map(int, input().split()) #1 ≤ K, N, M ≤ 100
power = [0] * (N+1) # 마지막 도착 추가
temp = list(map(int, input().split()))
# 번호를 인덱스로 해서 충전기 위치를 표시(1)
for i in range(len(temp)):
power[temp[i]] = 1
print("#{} {}".format(tc+1, find(power))) | true |
a0ba7c94e1a24729f1c4e35015200035a6159f8b | Python | britig/Document-Summerization | /TextRank.py | UTF-8 | 6,790 | 3.296875 | 3 | [] | no_license | # program for implementation of TextRank for summarization of a given text document
import os
#importing nltk package for preprocessing steps
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from operator import itemgetter
import re
import math
import networkx as nx
import itertools
import pandas
tokenized_words={} #Dictionary used for storing the tokens of sentences with associated node number(key
'''Function to tokenize the data'''
def tokenize(data):
sent_tokens = sent_tokenize(data)
# list of words
word_tokens = [word_tokenize(sent) for sent in sent_tokens]
for k in sent_tokens:
tokenized_words[k]=[]
tokenized_words[k]=word_tokenize(k)
return sent_tokens, word_tokens
'''Function to Calculate the idf score for each word
Input : word list, number of document
Return: word to idf dictionary
'''
def idfScoreCalculation(wordlist,N):
wordToidf={}
for word in wordlist:
df = 0
for filename in os.listdir('./Assignement2_IR/Topic2'):
filepath = './Assignement2_IR/Topic2/' + filename
file = open(filepath, 'r')
text = file.read()
if text.count(word) > 0:
df = df + 1
file.close()
if df==0:
print(filename)
print( word)
if word not in wordToidf.keys():
wordToidf[word]=math.log10(N/df)
return wordToidf
'''Function to calculate tf_idf similarity score
Input:
sentence1, sentence2, idf dictionary
Returns
-------
cosine similarity value
'''
def tf_idf_calculation1(s1, s2, idf):
try:
num = 0
comb = s1 + s2
# Calculate the numerator for cosine similarity
for word in comb:
tf1 = s1.count(word)
tf2 = s2.count(word)
num += (int(tf1) * int(tf2) * (float(idf[word] ** 2)))
total1 = 0
total2 = 0
#norm for sentence one
for word in s1:
tf = s1.count(word)
total1 += ((int(tf) * float(idf[word]))**2)
#norm for sentence 2
for word in s2:
tf = s2.count(word)
total2 += ((int(tf) * float(idf[word]))**2)
#Calculate the denominator for cosine similarity
deno = (math.sqrt((total1))) * (math.sqrt((total2)))
if deno == 0:
deno = 1
return float(num) / deno
except Exception as e:
print(e)
'''Function to build the graph based on certain threashold
Input:
sentences, threshold value, idf dictionary
Returns
-------
g : Graph based on similarity value
'''
def build_graph(nodes, threshold, idf):
g = nx.Graph() # initialize an undirected graph
g.add_nodes_from(nodes)
nodePairs = list(itertools.combinations(nodes, 2))
# add edges to the graph (weighted by cosine similarity)
for pair in nodePairs:
node1 = pair[0]
node2 = pair[1]
s1=tokenized_words[node1]
s2=tokenized_words[node2]
simval = tf_idf_calculation1(s1, s2, idf)
if simval > threshold:
g.add_edge(node1, node2, weight=simval)
return g
''' Function to calculate key sentences
Input:
Graph
Returns
-------
keysentences : list
list of nodes with sorted in descending order of pagerank'''
def get_keysentences(graph):
# weight is the similarity value obtained from the idf_modified_cosine
calculated_page_rank = clculate_pagerank(graph, weight='weight')
# most important words in descending order of pagerank
keysentences = sorted(calculated_page_rank, key=calculated_page_rank.get, reverse=True)
return keysentences
'''Pagerank specific functionalities'''
#Power iteration method for calculating pagerank
def clculate_pagerank(G, alpha=0.85, max_iter=100, tol=1.0e-6,weight='weight'):
if not G.is_directed():
D = G.to_directed()
else:
D = G
# Create a copy in (right) stochastic form
W = nx.stochastic_graph(D, weight=weight)
N = W.number_of_nodes()
x = dict.fromkeys(W, 1.0 / N)
p = dict.fromkeys(W, 1.0 / N)
dangling_weights = p
dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
# power iteration: make up to max_iter iterations
for _ in range(max_iter):
xlast = x
x = dict.fromkeys(xlast.keys(), 0)
danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
for n in x:
for nbr in W[n]:
x[nbr] += alpha * xlast[n] * W[n][nbr][weight]
x[n] += danglesum * dangling_weights[n] + (1.0 - alpha) * p[n]
# check convergence, l1 norm
err = sum([abs(x[n] - xlast[n]) for n in x])
if err < N*tol:
return x
'''Function to read file, do preprocessing and print/write summary'''
def readFile():
sentenceList = []
wordTokenList = []
wordsList = []
docIDListSent = {}
list_doc = os.listdir("./Assignement2_IR/Topic2")
# stop_words = set(stopwords.words('english'))
i = 0
for doc in list_doc:
file_doc = open("./Assignement2_IR/Topic2/" + str(doc), "r", encoding="utf8")
data = file_doc.read()
data = data.split('<TEXT>')
data = data[1].split('</TEXT>')
# Remove XML Tags
data = re.sub('<[^<]+>', "", data[0])
data=data.strip()
# Tokenize the data into sentences and words
sent_tokens, word_tokens = tokenize(data)
for sent in sent_tokens:
sentenceList.append(sent)
for word in word_tokens:
wordTokenList.append(word)
for word in word_tokenize(data):
wordsList.append(word)
docIDListSent[i] = wordTokenList
i = i + 1
# Calculate the idf score
N = 25
idfScore = idfScoreCalculation(wordsList,N)
print(str("here"))
g = build_graph(sentenceList, 0.3, idfScore)
keysentences = get_keysentences(g)
print ("Printing Top 12 Key sentences:--------------------------\n")
for sent in keysentences[:12]:
print (str(sent) + "\n")
file = open("./Assignement2_IR/Summaries/Topic20.3TR.txt", "w")
#Break if more than 250 words
count = 0
for sent in keysentences[:12]:
file.write(str(sent) + "\n")
#limit to 250 words
wordToken = word_tokenize(sent)
count = count+ len(wordToken)
if count > 250:
break;
file.close()
if __name__ == "__main__":
readFile() | true |
0de35fa3441ef0f26aef4fd8cf388b07d2b24762 | Python | mrtwon/ShopScript | /DatabaseLogicJobs.py | UTF-8 | 1,732 | 3.0625 | 3 | [] | no_license | import sqlite3
import DatabaseQuery
class DBJobClass:
db = DatabaseQuery.MainDatabaseQuery()
queryCreate = "CREATE TABLE job(id integer primary key, summery text)"
queryUpdateJob = "INSERT INTO job(summery) VALUES('{}')"
queryGetData = "SELECT summery FROM job ORDER BY id DESC LIMIT 1"
queryGetID = "SELECT id FROM job ORDER BY id DESC LIMIT 1"
queryGetAll = "SELECT id, summery FROM job"
queryDelete = "DELETE FROM job"
queryCount = "SELECT COUNT(*) FROM job"
def setData(self, summery):
query = self.queryUpdateJob.format(summery)
print(query)
self.db.queryDB(query)
return True
def getData(self):
query = self.queryGetData
row = self.db.queryAndAnswerDB(query)
if (len(row) > 0):
return "👔 Последняя заявка\n\n" + row[0][0]
return "👔 Заявок нет"
def getAllData(self):
query = self.queryGetAll
row = self.db.queryAndAnswerDB(query)
if len(row) == 0:
return "👔 Заявок нет"
list = "💼 Все заявки 💼\n"
for item in row:
list += "\n\n👔 Заявка #{}\n\n{}\n\n".format(str(item[0]), item[1])
return list
def getID(self):
query = self.queryGetID
row = self.db.queryAndAnswerDB(query)
if (len(row) > 0):
return row[0][0]
return 0
def clearData(self):
row = self.db.queryAndAnswerDB(self.queryCount)
if not int(row[0][0]) > 0:
return "⚠️ Нет данных для удаление"
self.db.queryDB(self.queryDelete)
return "✅ Данные успешно удаленны."
| true |
e4a29b11f6b9b53acecdc85ece9225d4fca9379f | Python | plahteenlahti/tiras20 | /fliptwo.py | UTF-8 | 271 | 3.25 | 3 | [] | no_license | from collections import deque
def solve(n,k):
l = deque(range(1,n+1))
for i in range(k):
l.append(l[1])
l.append(l[0])
l.popleft()
l.popleft()
return l[0]
if __name__ == "__main__":
print(solve(4,3)) # 4
print(solve(12,5)) # 11
print(solve(99,555)) # 11 | true |
1e4dfd5976b2642e94fb56beee9c408637927318 | Python | BahaNordi/TicTacPillClassification | /data_loader/tictac_loader.py | UTF-8 | 1,763 | 2.5625 | 3 | [] | no_license | import os
import numpy as np
# import scipy.misc as misc
import torch
from torch.utils import data
from train_utils.train_config import config
from PIL import Image
class TicTacLoader(data.Dataset):
def __init__(self, root_dir, tictac_file_list, augmentations=None, transform_image=None):
self.augmentations = augmentations
self.transform_image = transform_image
self.mean = config.mean_val
if root_dir:
self.root = root_dir
else:
self.root = os.getcwd()
self.file_list_dir = tictac_file_list
# line.rstrip remove \n enter from the end of file
self.ids = [os.path.join(self.root, line.rstrip()) for line in open(self.file_list_dir)]
def __len__(self):
"""__len__"""
return len(self.ids)
def __getitem__(self, index):
img_path = self.ids[index]
label = 0
if "defect" in img_path:
label = 1
img = Image.open(img_path)
img = np.array(img, dtype=np.uint8)
if not os.path.isfile(img_path) or not os.path.exists(img_path):
raise Exception("{} is not a file.".format(img_path))
if self.augmentations is not None:
img = self.augmentations(img)
if self.transform_image:
img = self.transform(img)
img = torch.from_numpy(img).float()
return img, label
def transform(self, img):
img = img.astype(float)
img -= self.mean
img /= 255.0
img = img[None, :, :]
return img
if __name__ == "__main__":
root_dir1 = '/home/baha/codes/tictac/data/'
tictac_file_list1 = '/home/baha/codes/tictac/data/list_of_files.txt'
my_loader = TicTacLoader(root_dir1, tictac_file_list1)
| true |
eb04c695161e0acf7dbc54505c1384735742fb6a | Python | weishengteo/storage | /FIT1045/w11labtasks/w11t1.py | UTF-8 | 2,534 | 3.46875 | 3 | [] | no_license | #Name = Teo Wei Sheng
#Student ID = 29800668
#Date = 28 May 2019
#Workshop 11
#Task 1
heap = [1,3,2,7,4,8]
def min_child(v, heap):
left = 2*v + 1
right = 2*v + 2
if right>len(heap)-1 and left<=len(heap)-1:
return left
elif heap[right]> heap[left]:
return left
elif heap[left]> heap[right]:
return right
def insert(heap, item):
heap.append(item)
currenti = len(heap) - 1
parent = (currenti-1)//2
while currenti > 0 and heap[parent] > heap[currenti]:
heap[parent] , heap[currenti] = heap[currenti], heap[parent]
currenti = parent
parent = (currenti-1)//2
return heap
def extract_min(heap):
heap[0], heap[-1] = heap[-1], heap[0]
extracted = heap.pop()
v = 0
left = (2*v)+1
right = (2*v)+2
## while heap[v] > heap[left] or heap[v] > heap[right]:
while left <= len(heap)-1 or right < len(heap)-1:
if heap[v] > heap[left] and right > len(heap)-1:
heap[v], heap[left] = heap[left], heap[v]
v = left
left = (2*v)+1
right = (2*v)+2
elif right <= (len(heap))-1:
if heap[v] > heap[left] and heap[v] > heap[right]:
if heap[left] > heap[right]:
heap[v], heap[right] = heap[right], heap[v]
v = right
left = (2*v)+1
right = (2*v)+2
else:
heap[v], heap[left] = heap[left], heap[v]
v = left
left = (2*v)+1
right = (2*v)+2
elif heap[v] > heap[left] and heap[v] < heap[right]:
heap[v], heap[left] = heap[left], heap[v]
v = left
left = (2*v)+1
right = (2*v)+2
elif heap[v] < heap[left] and heap[v] > heap[right]:
heap[v], heap[right] = heap[right], heap[v]
v = right
left = (2*v)+1
right = (2*v)+2
else:
break
return extracted, heap
def heapsort(items):
newitems = []
for i in range(len(items)):
a = items.pop()
insert(newitems,a)
sortedlist = []
while len(newitems) != 0:
res = extract_min(newitems)
sortedlist.append(res[0])
newitems = res[1]
return sortedlist
| true |
e60843e589078155424101371ae1d596e3107d15 | Python | CraigLangford/Python-Mini-Projects | /Phone-Book/phonebook_project/phonebook_project/phonebook/tests.py | UTF-8 | 1,325 | 2.71875 | 3 | [
"MIT"
] | permissive | from django.test import TestCase
from django.core.urlresolvers import resolve
from .models import Book, Contact
class TableAndItemsModelTest(TestCase):
def test_saving_and_retrieving_items(self):
book = Book()
book.save()
first_contact = Contact()
first_contact.first_name = "Charlie"
first_contact.last_name = "Chaplin"
first_contact.phone_number = "+337312345678"
first_contact.book = book
first_contact.save()
saved_book = Book.objects.first()
self.assertEqual(saved_book, book)
second_contact = Contact()
second_contact.first_name = "Chelsea"
second_contact.last_name = "Football"
second_contact.phone_number = "+331232345678"
second_contact.book = book
second_contact.save()
saved_contacts = Contact.objects.all()
self.assertEqual(saved_contacts.count(), 2)
first_saved_contact = saved_contacts[0]
second_saved_contact = saved_contacts[1]
self.assertEqual(first_saved_contact.first_name, "Charlie")
self.assertEqual(first_saved_contact.book, book)
self.assertEqual(second_saved_contact.phone_number, "+331232345678")
self.assertEqual(second_saved_contact.book, book)
class PhonebookViewTest(TestCase):
| true |
d91a644ba53fca954d79a453b908f5bfefa4952a | Python | cosmonautd/STSIM | /vehicle.py | UTF-8 | 685 | 3.109375 | 3 | [] | no_license | class Vehicle:
def __init__(self, G, initial, terminal):
self.map = G
self.initial = initial
self.terminal = terminal
self.current = initial
self.path = None
self.next = None
def set_path(self, path):
self.path = path
self.next = 1
def get_initial(self):
return self.initial
def get_terminal(self):
return self.terminal
def get_current(self):
return self.current
def get_path(self):
return self.path
def update_state(self):
if self.next < len(self.path):
self.current = self.map.node[self.path[self.next]]
self.next += 1
| true |
d8687c506503dfd70bc816802646b7f8d4f30fe4 | Python | Sandylin520/Python_Learning | /L08_OOP/8.1_oop.py | UTF-8 | 1,695 | 4.59375 | 5 | [] | no_license | #EX_1
class Sample():
pass
my_sample = Sample()
print(type(my_sample))#<class '__main__.Sample'>
#EX_2
class Dog():
def __init__(self,breed):
self.breed = breed
my_dog = Dog(breed='Lab')
print(type(my_dog))
print(my_dog.breed)#Lab
#EX_3
class Cat:
#Class object attribute
#Same for any instance of a class
species = "mammal"
def __init__(self,breed,name,spots):
#Attributes
#We take in the argument
#Assign it using self.attribute_name
self.breed = breed
self.name = name
#Expect boolean True/False
self.spots = spots
#Operations/Actions --->Methods
def bark(self,number):
print("WOOF!My name is {} and the number is {}".format(self.name,number))
#注意format 是要寫self.name 而不是傳進物件的name
#注意這邊number不需要寫self.number,是因為是直接從method的參數傳來,而不是物件本身
my_cat = Cat("Golden","Sammy",False)
print(type(my_dog))#<class '__main__.Dog'>
print(my_cat.breed)#Golden
print(my_cat.name)#Sammy
print(my_cat.spots)#False
print(my_cat.species)#mammal
print(my_cat.bark(5))
#EX_4
class Circle:
#class object attribute
pi = 3.14
def __init__(self,radius=1):
self.radius = radius
self.area = radius * radius * Circle.pi # attribute (ex: area)並不一定要是從()傳來的
#Method
def get_circumference(self):
#return self.radius * self.pi * 2
#因為pi是class attribute,寫Circle.pi較清楚
return self.radius * Circle.pi *2
my_circle = Circle(30)
print(my_circle.pi)#3.14
print(my_circle.radius)#30
print(my_circle.get_circumference())#188.4 | true |
b950f3a4a9af892d13596394f88816c03c6b35d0 | Python | Robin-Wujw/leetcode_py | /005.py | UTF-8 | 1,980 | 3.8125 | 4 | [] | no_license | # class Solution:
# def __init__(self):
# self.longestSize = 0
# self.longestStart = 0
# def longestPalindrome(self, s):
# """
# :type s: str
# :rtype: str
# """
# for index, value in enumerate(s):
# self.checkOddPalindrome(s, index)
# self.checkEvenPalindrome(s, index)
# return s[self.longestStart:self.longestStart + self.longestSize + 1]
# def checkOddPalindrome(self, s, index):
# start = index
# end = index
# while start >= 1 and end < len(s) - 1 and s[start - 1] == s[end + 1]:
# start -= 1
# end += 1
# if end - start > self.longestSize:
# self.longestSize = end - start
# self.longestStart = start
# def checkEvenPalindrome(self, s, index):
# start = index
# end = min(index + 1, len(s) - 1)
# while start >= 1 and end < len(s) - 1 and s[start - 1] == s[end + 1] and s[start] == s[end]:
# start -= 1
# end += 1
# if end - start > self.longestSize and s[start] == s[end]:
# self.longestSize = end - start
# self.longestStart = start
class Solution:
#采用双指针法,从中央向两边延伸
'''
s = "babad" 输出 bab
s = "cbbd" 输出 bb
'''
def __init__(self):
self.left = 0
self.right = 0
self.maxlen = 0
def longestPalindrome(self,s):
for i in range(len(s)):
self.extend(s,i,i,len(s))
self.extend(s,i,i+1,len(s))
return s[self.left:self.right+1]
def extend(self,s,i,j,len):
while(i>=0 and j < len and s[i]==s[j]):
if(j-i+1 > self.maxlen):
self.left = i
self.right = j
self.maxlen = j-i+1
i -= 1
j += 1
if __name__ == "__main__":
s = Solution()
st = "babad"
print(s.longestPalindrome(st))
| true |
248f681b381123bd16c2dbf3900fda01d9f8d1eb | Python | bsolisduran/MyLogbookApp | /StatisticsFrame.py | UTF-8 | 7,692 | 3.109375 | 3 | [] | no_license | import datetime as dt
import tkinter as tk
from globals import *
from models.HeaderFrame import HeaderFrame
from models.LogbookDataFrame import LogbookDataFrame
class StatisticsFrame(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
# Data needed (TODO, pass as an arg):
self.timeFilterVar = tk.StringVar(value='All Time')
dfObj = LogbookDataFrame("data/sentbook_8anu.csv")
df = dfObj.df
yearsList = dfObj.get_yearsList()
statsData = dfObj.get_statsData(df)
# Main Frame of the class:
statsFrame = tk.Frame(self, width=bodyWidth, height=400)
statsFrame.pack()
# Widgets in Navigation Frame:
# -- Header Frame:
headerText = "STATISTICS "
headerFrame = HeaderFrame(statsFrame, headerText)
headerFrame.grid(row=0, column=0, sticky='nsew')
# -- Radiobuttons Frame:
radioFrame = tk.Frame(statsFrame, bg=navGreyColor, height=100)
radioFrame.grid(row=1, column=0, sticky='nsew', pady=5, padx=5)
# -- Table Frame:
tableFrame = tk.Frame(statsFrame)
tableFrame.grid(row=2, column=0, sticky='nsew')
self.getRadioButtons(radioFrame, self.timeFilterVar, dfObj, tableFrame)
self.getStatisticsTable(tableFrame, statsData)
def getRadioButtons(self, parent, variable, dataframeObject, TableParent):
df = dataframeObject.df
yearsList = dataframeObject.get_yearsList()
rankingLabel = tk.Label(parent, text='Ranking Routes: ',
fg=whiteColor, bg=navGreyColor, font=navBoldFont)
annualRb = tk.Radiobutton(parent, text='12 Months', variable=variable, value='12 Months', fg=whiteColor, bg=navGreyColor, font=navFont,
command=lambda: self.update_frame(TableParent, variable.get(), dataframeObject))
alltimeRb = tk.Radiobutton(parent, text='All Time', variable=variable, value='All Time', fg=whiteColor, bg=navGreyColor, font=navFont,
command=lambda: self.update_frame(TableParent, variable.get(), dataframeObject))
yearsLabel = tk.Label(parent, text='\tSelect year: ',
fg=whiteColor, bg=navGreyColor, font=navBoldFont)
rankingLabel.pack(side=tk.LEFT, pady=15, padx=2)
annualRb.pack(side=tk.LEFT, padx=2)
alltimeRb.pack(side=tk.LEFT, padx=2)
yearsLabel.pack(side=tk.LEFT, padx=2)
for year in yearsList:
radiobtn = tk.Radiobutton(parent, text=str(year), variable=variable, value=year, fg=whiteColor, bg=navGreyColor, font=navFont,
command=lambda: self.update_frame(TableParent, variable.get(), dataframeObject))
radiobtn.pack(side=tk.LEFT, padx=2)
def update_frame(self, parent, var, dataframeObject):
if var == '12 Months':
dataframe = dataframeObject.annualdf
elif var == 'All Time':
dataframe = dataframeObject.df
else:
iniDate = dt.datetime(int(var), 1, 1)
endDate = dt.datetime(int(var), 12, 31)
dataframe = dataframeObject.get_yeardf(iniDate, endDate)
# update the list of the data for the table plot:
newStatsData = dataframeObject.get_statsData(dataframe)
# update the table plot:
for widget in parent.winfo_children():
widget.destroy()
self.getStatisticsTable(parent, newStatsData)
def getStatisticsTable(self, parent, dataList):
# get the total number of routes and the maximum routes of a grade:
numRoutes = 0
numRoutesxGrade = []
for i in range(0, len(dataList)):
numRoutes += dataList[i][4]
numRoutesxGrade.append(dataList[i][4])
maxRoutesxGrade = max(numRoutesxGrade)
self.getHeaderStatisticsTable(parent, numRoutes)
ind = 2
for i in range(0, len(dataList)):
if ind % 2 != 0:
bgColor = whiteColor
else:
bgColor = lightGreyColor
self.getRowStatisticsTable(
parent, dataList[i], ind, maxRoutesxGrade, bgColor)
ind += 1
def getHeaderStatisticsTable(self, parent, totalRoutes):
RPimg = tk.PhotoImage(file="images/RPicon.gif")
FLimg = tk.PhotoImage(file="images/Ficon.gif")
OSimg = tk.PhotoImage(file="images/OSicon.gif")
# row 0: icons and total number of routes:
OSicnLabel = tk.Label(parent, image=OSimg, width=90)
OSicnLabel.image = OSimg
FLicnLabel = tk.Label(parent, image=FLimg, width=90)
FLicnLabel.image = FLimg
RPicnLabel = tk.Label(parent, image=RPimg, width=90)
RPicnLabel.image = RPimg
totalNumLabel = tk.Label(
parent, text=totalRoutes, font=totalFont, width=7)
# row 1: labels of the icons:
OSlabel = tk.Label(parent, text='ONSIGHT', fg=OSColor, font=headerFont)
FLlabel = tk.Label(parent, text='FLASH', fg=FLColor, font=headerFont)
RPlabel = tk.Label(parent, text='REDPOINT',
fg=RPColor, font=headerFont)
totalLabel = tk.Label(parent, text='TOTAL', font=headerFont)
# empty label to grid the bar plot rows and grades
emptyLabel = tk.Label(parent, text=' ')
# Layout:
OSicnLabel.grid(row=0, column=0, sticky='nsew')
FLicnLabel.grid(row=0, column=1, sticky='nsew')
RPicnLabel.grid(row=0, column=2, sticky='nsew')
totalNumLabel.grid(row=0, column=3, sticky='nsew')
OSlabel.grid(row=1, column=0, sticky='nsew')
FLlabel.grid(row=1, column=1, sticky='nsew')
RPlabel.grid(row=1, column=2, sticky='nsew')
totalLabel.grid(row=1, column=3, sticky='nsew')
emptyLabel.grid(row=0, column=4, rowspan=2,
columnspan=2, sticky='nsew')
def getRowStatisticsTable(self, parent, dataRow, index, maxRoutesInAGrade, bg):
# Row numbers:
for column in range(0, len(dataRow)-1):
label = tk.Label(
parent, text=dataRow[column+1], bg=bg, font=numberFont)
label.grid(row=index, column=column, sticky='nsew')
# Grade label:
gradeLabel = tk.Label(
parent, text=dataRow[0], bg=whiteColor, font=gradeFont)
gradeLabel.grid(row=index, column=4, sticky='nsew', padx=15)
# Bar Plot:
barCanvas = tk.Canvas(parent, width=450, height=30, bg=bg)
barCanvas.grid(row=index, column=5, sticky='nsew')
xcenter = barCanvas.winfo_reqwidth() / 2
ycenter = barCanvas.winfo_reqheight() / 2
barLength = (dataRow[3] + dataRow[1] + dataRow[2]
) / maxRoutesInAGrade * xcenter
barCanvas.create_rectangle(xcenter - barLength, ycenter - 7, xcenter + barLength, ycenter + 7,
fill=RPColor, outline=RPColor)
if dataRow[2] != 0:
barLength = (dataRow[1] + dataRow[2]) / maxRoutesInAGrade * xcenter
barCanvas.create_rectangle(xcenter - barLength, ycenter - 7, xcenter + barLength, ycenter + 7,
fill=FLColor, outline=FLColor)
if dataRow[1] != 0:
barLength = dataRow[1] / maxRoutesInAGrade * xcenter
barCanvas.create_rectangle(xcenter - barLength, ycenter - 7, xcenter + barLength, ycenter + 7,
fill=OSColor, outline=OSColor)
def get_timeFilterVar(self):
return self.timeFilterVar.get() | true |
1b4599299049c790ba1a7d87c8925af80b2645e5 | Python | YiSoJeong/Algorithm_Python | /SW/List/4837_부분집합의 합.py | UTF-8 | 655 | 3.140625 | 3 | [] | no_license | import sys
sys.stdin = open('../String/sample_input.txt', 'r')
# 분류 : Greedy
# 발상 : 전체 부분집합 중에서 n개인 것 골라야 해서
# 변형 : 전체를 탐색하되 조건이 맞는 경우의 수만 비교
# 조합 : 비트 연산자 -> 모든 부분 집합 구할 때
T = int(input())
for t in range(1, T+1):
n, k = map(int, input().split())
arr = [i for i in range(1, 13)]
cnt = 0
for i in range(1 << 12):
sub = []
for j in range(12):
if i & (1 << j):
sub.append(arr[j])
if len(sub) == n and sum(sub) == k:
cnt += 1
print('#{} {}'.format(t, cnt))
| true |
9eec159e7b30cbb87cb7d7f69fbe7d4d40e1377f | Python | ChipoXD/ProjectEuler | /Problems/Problem1-20/Problem17.py | UTF-8 | 2,214 | 3.640625 | 4 | [] | no_license | # ProblemURL: https://projecteuler.net/problem=17
def numberspelled(argin):
onesDigitSpell = {0: "", 1: "one", 2: "two", 3: "three", 4: "four", 5: "five", 6: "six", 7: "seven", 8: "eight",
9: "nine"}
tensDigitSpell = {0: "", 1: "teen", 2: "twenty", 3: "thirty", 4: "forty", 5: "fifty", 6: "sixty", 7: "seventy",
8: "eighty", 9: "ninety"}
output = ""
start = 0
numberList = [int(x) for x in str(argin)]
if len(numberList) >= 2:
if numberList[-2] == 1 and numberList[-1] == 0:
output = "ten"
start = 2
elif numberList[-2] == 1 and numberList[-1] == 1:
output = "eleven"
start = 2
elif numberList[-2] == 1 and numberList[-1] == 2:
output = "twelve"
start = 2
elif numberList[-2] == 1 and numberList[-1] == 3:
output = "thirteen"
start = 2
elif numberList[-2] == 1 and numberList[-1] == 4:
output = "fourteen"
start = 2
elif numberList[-2] == 1 and numberList[-1] == 5:
output = "fifteen"
start = 2
elif numberList[-2] == 1 and numberList[-1] == 6:
output = "sixteen"
start = 2
elif numberList[-2] == 1 and numberList[-1] == 7:
output = "seventeen"
start = 2
elif numberList[-2] == 1 and numberList[-1] == 8:
output = "eighteen"
start = 2
elif numberList[-2] == 1 and numberList[-1] == 9:
output = "nineteen"
start = 2
for i in range(1 + start, len(numberList) + 1, 1):
if i == 1:
output = output + onesDigitSpell[numberList[-i]]
if i == 2:
output = tensDigitSpell[numberList[-i]] + output
if i == 3:
if numberList[-1] != 0 or numberList[-2] != 0:
output = "and" + output
output = onesDigitSpell[numberList[-i]] + "hundred" + output
if i == 4:
output = onesDigitSpell[numberList[-i]] + "thousand"
return output
longstring = ""
for i in range(1, 1001):
longstring += numberspelled(i)
print(len(longstring))
| true |
95df91b656f51211f1870cc16f0f8474ff84990b | Python | giriprasad23/Cloud-Projects | /Cloud_Assignment_1/src/project1/all_stuff.py | UTF-8 | 3,340 | 2.625 | 3 | [] | no_license | '''
Created on Feb 9, 2015
@author: Puneeth U Bharadwaj
'''
import gnupg
from pprint import pprint
def exportkeys():
gpg = gnupg.GPG()
ascii_armored_public_keys = gpg.export_keys('66C8F2C8')
ascii_armored_private_keys = gpg.export_keys('66C8F2C8', True)
with open('pub_keys.asc', 'w') as f:
f.write(ascii_armored_public_keys)
f.write(ascii_armored_private_keys)
gpg.send_keys('keyserver.ubuntu.com')
def importkeys(filename, servername, keyid):
gpg = gnupg.GPG()
if filename:
key_data = open(filename, 'r').read()
import_result = gpg.import_keys(key_data)
if servername:
res = gpg.recv_keys(servername, keyid)
pprint(res.results)
def listkeys():
gpg = gnupg.GPG()
pub_key = gpg.list_keys()
print 'public key'
pprint (pub_key)
pri_key = gpg.list_keys(True)
print 'private key'
pprint(pri_key)
def enc_string(unencrypted_string ):
gpg = gnupg.GPG()
encrypted_data = gpg.encrypt(unencrypted_string, 'recipient')
encrypted_string = str(encrypted_data)
print 'encrypted string'
print 'ok: ', encrypted_data.ok
print 'status: ', encrypted_data.status
print 'stderr: ', encrypted_data.stderr
print 'unencrypted_string: ', unencrypted_string
print 'encrypted_string: ', encrypted_string
return encrypted_string
def dec_string(encrypted_string):
gpg = gnupg.GPG()
encrypted_data= str(encrypted_string)
decrypted_data = gpg.decrypt(encrypted_data, passphrase='M@ster2016')
print 'decrypted string'
print 'ok: ', decrypted_data.ok
print 'status: ', decrypted_data.status
print 'stderr: ', decrypted_data.stderr
print 'decrypted string: ', decrypted_data.data
def enc_file(infile):
gpg = gnupg.GPG()
with open(infile, 'rb') as f:
status = gpg.encrypt_file(f,
recipients=['recipient],
output='leo_enc.jpg',
always_trust=True)
print 'encrypted file'
print 'ok: ', status.ok
print 'status: ', status.status
print 'stderr: ', status.stderr
print 'decrypted string: ', status.data
return 'pub_enc.txt.gpg'
def dec_file(infile):
gpg = gnupg.GPG()
with open(infile, 'rb') as f:
status = gpg.decrypt_file(f, passphrase='M@ster2016', output='Image0010.jpg')
print 'decrypted file'
print 'ok: ', status.ok
print 'status: ', status.status
print 'stderr: ', status.stderr
print 'decrypted string: ', status.data
def sign_file(infile):
gpg = gnupg.GPG()
with open(infile, 'rb') as f:
status = gpg.sign_file(f, passphrase='M@ster2016', detach=True, output=infile+'.gpg')
def verify_file(infile, signfile):
gpg = gnupg.GPG()
with open(signfile, 'rb') as f:
status = gpg.verify_file(f, infile)
if status:
print 'verified'
else:
print 'unverified'
# exportkeys()
# importkeys(None, 'keyserver.ubuntu.com', '0x31BC35BA')
# listkeys()
# encrypted_string = enc_string('cloud computing')
# decrypted_string = dec_string(encrypted_string)
# encrypted_file = enc_file('leodec.jpg')
decrypted_file = dec_file('Image0010.jpg.enc')
# sign_file('a.jpg')
# verify_file('a.jpg', 'a.jpg.gpg')
| true |
bf52369bb71e84dbc644e3ca11f30d2cd8adc171 | Python | gabriellaec/desoft-analise-exercicios | /backup/user_337/ch2_2020_03_07_22_57_31_327752.py | UTF-8 | 103 | 2.6875 | 3 | [] | no_license | def calcula_velocidade_media (distancia, tempo):
velocidade = distancia/tempo
return velocidade | true |
309fb9fb149db56143147d2376f71699f3e071e0 | Python | ac998/Machine_failure_prediction | /models/lstm_utils.py | UTF-8 | 8,449 | 2.671875 | 3 | [] | no_license | import pandas as pd
import numpy as np
import sklearn
from sklearn import metrics
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split, cross_val_score
import keras
import keras.backend as K
from keras.layers.core import Activation
from keras.layers import Dense , LSTM, Dropout
from keras.models import Sequential, load_model
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from pylab import rcParams
import math
def prepare_train_dataset(data):
df = data.copy()
# drop columns op_set_3, sm_1, sm_5, sm_10, sm_16, sm_18, sm_19 as they have constant values (std = 0)
# drop op_set_1, op_set_2 because they have low correlation with the output.
# drop sm_14 because it's highly correlated with sm_9
df.drop(columns=['op_set_3', 'sm_1', 'sm_5', 'sm_10', 'sm_16', 'sm_18', 'sm_19','sm_14', 'op_set_1', 'op_set_2'],
inplace=True)
rul = df.groupby('unit_number')['time_in_cycles'].max().reset_index()
rul = pd.DataFrame(rul)
rul.columns = ['unit_number', 'last_cycle']
df = df.merge(rul, on=['unit_number'], how='left')
df['rul'] = df['last_cycle'] - df['time_in_cycles']
df.drop(columns=['last_cycle'], inplace=True)
return df[df['time_in_cycles'] > 0]
def prepare_test_dataset(data):
df = data.copy()
# drop features not used in train set
df.drop(columns=['op_set_3', 'sm_1', 'sm_5', 'sm_10', 'sm_16', 'sm_18', 'sm_19', 'sm_14', 'op_set_1', 'op_set_2'],
inplace=True)
return df
def gen_sequence(id_df, seq_length, seq_cols):
"""
Only sequences that meet the window-length are considered, no padding is used. This means for testing
we need to drop those which are below the window-length. An alternative would be to pad sequences so that
we can use shorter ones
"""
# for one id, put all the rows in a single matrix
data_matrix = id_df[seq_cols].values
num_elements = data_matrix.shape[0]
# Iterate over two lists in parallel.
# For example id1 have 192 rows and sequence_length is equal to 50
# so zip iterate over two following list of numbers (0,112),(50,192)
# 0 50 -> from row 0 to row 50
# 1 51 -> from row 1 to row 51
# 2 52 -> from row 2 to row 52
# ...
# 111 191 -> from row 111 to 191
for start, stop in zip(range(0, num_elements-seq_length), range(seq_length, num_elements)):
yield data_matrix[start:stop, :]
def gen_labels(id_df, seq_length, label):
data_matrix = id_df[label].values
num_elements = data_matrix.shape[0]
# Remove the first seq_length labels because for one id the first sequence of seq_length size have as target
# the last label (the previus ones are discarded).
# All the next id's sequences will have associated step by step one label as target.
return data_matrix[seq_length:num_elements, :]
def lstm_preprocessing(raw_train_df, raw_test_df, raw_truth_df):
train_df = raw_train_df
test_df = raw_test_df
truth_df = raw_truth_df
# Normalize columns except [id , cycle, rul]
cols_normalize = train_df.columns.difference(['unit_number','time_in_cycles', 'rul'])
# MinMax normalization (from 0 to 1)
min_max_scaler = MinMaxScaler()
norm_train_df = pd.DataFrame(min_max_scaler.fit_transform(train_df[cols_normalize]),
columns=cols_normalize,
index=train_df.index)
# Train set
join_df = train_df[train_df.columns.difference(cols_normalize)].join(norm_train_df)
train_df = join_df.reindex(columns = train_df.columns)
# Test set
norm_test_df = pd.DataFrame(min_max_scaler.transform(test_df[cols_normalize]),
columns=cols_normalize,
index=test_df.index)
test_join_df = test_df[test_df.columns.difference(cols_normalize)].join(norm_test_df)
test_df = test_join_df.reindex(columns = test_df.columns)
test_df = test_df.reset_index(drop=True)
# We use the ground truth dataset to generate labels for the test data.
# generate column max for test data
rul = pd.DataFrame(test_df.groupby('unit_number')['time_in_cycles'].max()).reset_index()
rul.columns = ['unit_number','max']
truth_df.columns = ['more']
truth_df['unit_number'] = truth_df.index + 1
truth_df['max'] = rul['max'] + truth_df['more'] # adding true-rul vlaue + max cycle of test data set w.r.t M_ID
truth_df.drop('more', axis=1, inplace=True)
# generate RUL for test data
test_df = test_df.merge(truth_df, on=['unit_number'], how='left')
test_df['RUL'] = test_df['max'] - test_df['time_in_cycles']
test_df.drop('max', axis=1, inplace=True)
## pick a large window size of 50 cycles
sequence_length = 50
# pick the feature columns
sequence_cols = list(test_df.columns[:-3])
# generator for the sequences transform each id of the train dataset in a sequence
seq_gen = (list(gen_sequence(train_df[train_df['unit_number']==id], sequence_length, sequence_cols))
for id in train_df['unit_number'].unique())
# convert generated sequences to numpy array
seq_array = np.concatenate(list(seq_gen)).astype(np.float32)
# generate labels
label_gen = [gen_labels(train_df[train_df['unit_number']==id], sequence_length, ['rul']) for id in train_df['unit_number'].unique()]
label_array = np.concatenate(label_gen).astype(np.float32)
return seq_array, label_array, test_df, sequence_length, sequence_cols
def R2(y_true, y_pred):
"""Coefficient of Determination
"""
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
def lstm_model(seq_array, label_array, sequence_length):
# The first layer is an LSTM layer with 100 units followed by another LSTM layer with 50 units.
# Dropout is also applied after each LSTM layer to control overfitting.
# Final layer is a Dense output layer with single unit and linear activation since this is a regression problem.
nb_features = seq_array.shape[2]
nb_out = label_array.shape[1]
model = Sequential()
model.add(LSTM(input_shape=(sequence_length, nb_features), units=100, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=nb_out))
model.add(Activation("linear"))
model.compile(loss='mean_squared_error', optimizer='adam',metrics=['mae', R2])
print(model.summary())
return model
def test_model(lstm_test_df, model, sequence_length, sequence_cols):
# We pick the last sequence for each id in the test data
seq_array_test_last = [lstm_test_df[lstm_test_df['unit_number']==id][sequence_cols].values[-sequence_length:]
for id in lstm_test_df['unit_number'].unique() if len(lstm_test_df[lstm_test_df['unit_number']==id]) >= sequence_length]
seq_array_test_last = np.asarray(seq_array_test_last).astype(np.float32)
# Similarly, we pick the labels
y_mask = [len(lstm_test_df[lstm_test_df['unit_number']==id]) >= sequence_length for id in lstm_test_df['unit_number'].unique()]
label_array_test_last = lstm_test_df.groupby('unit_number')['RUL'].nth(-1)[y_mask].values
label_array_test_last = label_array_test_last.reshape(label_array_test_last.shape[0],1).astype(np.float32)
estimator = model
# test metrics
scores_test = estimator.evaluate(seq_array_test_last, label_array_test_last, verbose=2)
print('MAE: {}'.format(scores_test[1]))
print('\nR^2: {}'.format(scores_test[2]))
y_pred_test = estimator.predict(seq_array_test_last)
y_true_test = label_array_test_last
test_set = pd.DataFrame(y_pred_test)
# Plot in blue color the predicted data and in orange color the actual data to verify visually the accuracy of the model.
fig_verify = plt.figure(figsize=(10, 5))
plt.plot(y_pred_test)
plt.plot(y_true_test, color="orange")
plt.title('prediction')
plt.ylabel('RUL')
plt.legend(['predicted', 'actual data'], loc='upper left')
plt.show()
return scores_test[1], scores_test[2] | true |
8edf4c62c7c3011bd047b3048da0794524b9f357 | Python | eliasanzoategui/Estudo039 | /progamas/teste de progamação02.py | UTF-8 | 6,522 | 2.8125 | 3 | [] | no_license |
import random
def ativar(neuronios,entrada,limiar):
x = 0
for i in neuronios:
x += entrada*i['peso']+i['bias']
if x <= limiar:
return 0
else:
return x
def ajustes(neuronios,taxa_de_erro):
x = 0
for i in neuronios:
x += 1
return taxa_de_erro/x
neuronio1_da_camada1 = {'peso':random.random(),'bias':random.random()}
neuronio2_da_camada1 = {'peso':random.random(),'bias':random.random()}
neuronio3_da_camada1 = {'peso':random.random(),'bias':random.random()}
neuronio4_da_camada1 = {'peso':random.random(),'bias':random.random()}
neuronio5_da_camada1 = {'peso':random.random(),'bias':random.random()}
neuronio6_da_camada1 = {'peso':random.random(),'bias':random.random()}
neuronio7_da_camada1 = {'peso':random.random(),'bias':random.random()}
neuronio8_da_camada1 = {'peso':random.random(),'bias':random.random()}
neuronio1_da_camada2 = {'peso':random.random(),'bias':random.random()}
neuronio2_da_camada2 = {'peso':random.random(),'bias':random.random()}
neuronio3_da_camada2 = {'peso':random.random(),'bias':random.random()}
neuronio4_da_camada2 = {'peso':random.random(),'bias':random.random()}
neuronio5_da_camada2 = {'peso':random.random(),'bias':random.random()}
neuronio6_da_camada2 = {'peso':random.random(),'bias':random.random()}
neuronio7_da_camada2 = {'peso':random.random(),'bias':random.random()}
neuronio8_da_camada2 = {'peso':random.random(),'bias':random.random()}
neuronio1_da_camada3 = {'peso':random.random(),'bias':random.random()}
neuronio2_da_camada3 = {'peso':random.random(),'bias':random.random()}
neuronio3_da_camada3 = {'peso':random.random(),'bias':random.random()}
neuronio4_da_camada3 = {'peso':random.random(),'bias':random.random()}
neuronio5_da_camada3 = {'peso':random.random(),'bias':random.random()}
neuronio6_da_camada3 = {'peso':random.random(),'bias':random.random()}
neuronio7_da_camada3 = {'peso':random.random(),'bias':random.random()}
neuronio8_da_camada3 = {'peso':random.random(),'bias':random.random()}
camada1 = [neuronio1_da_camada1,neuronio2_da_camada1,neuronio3_da_camada1,neuronio4_da_camada1,neuronio5_da_camada1,neuronio6_da_camada1,neuronio7_da_camada1,neuronio8_da_camada1]
camada2 = [neuronio1_da_camada2,neuronio2_da_camada2,neuronio3_da_camada2,neuronio4_da_camada2,neuronio5_da_camada2,neuronio6_da_camada2,neuronio7_da_camada2,neuronio8_da_camada2]
camada3 = [neuronio1_da_camada3,neuronio2_da_camada3,neuronio3_da_camada3,neuronio4_da_camada3,neuronio5_da_camada3,neuronio6_da_camada3,neuronio7_da_camada3,neuronio8_da_camada3]
for i in range(0,10000):
valor_de_entrada = 5
valor_da_camada1 = ativar(camada1,valor_de_entrada,0)
print(valor_da_camada1)
valor_da_camada2 = ativar(camada2,valor_da_camada1,0)
print(valor_da_camada2)
valor_da_camada3 = ativar(camada3,valor_da_camada2,0)
print(valor_da_camada3)
valor_de_saida = valor_da_camada3
resultado = 10
taxa_de_erro = valor_de_saida - resultado
taxa_de_erro_da_camada3 = ajustes(camada3,taxa_de_erro)
taxa_de_erro_da_camada2 = ajustes(camada2,(taxa_de_erro_da_camada3/3)*2)
taxa_de_erro_da_camada1 = ajustes(camada1,(taxa_de_erro_da_camada2/3)*2)
for i in camada3:
numero_aleatorio = random.randint(0,1)
if numero_aleatorio == 1:
i['bias'] += taxa_de_erro_da_camada3
else:
i['bias'] -= taxa_de_erro_da_camada3
numero_aleatorio = random.randint(0,1)
if numero_aleatorio == 1:
i['peso'] += (taxa_de_erro_da_camada3/100)*5
else:
i['peso'] -= (taxa_de_erro_da_camada3/100)*5
for i in camada2:
numero_aleatorio = random.randint(0,1)
if numero_aleatorio == 1:
i['bias'] += taxa_de_erro_da_camada2
else:
i['bias'] -= taxa_de_erro_da_camada2
numero_aleatorio = random.randint(0,1)
if numero_aleatorio == 1:
i['peso'] += (taxa_de_erro_da_camada2/100)*5
else:
i['peso'] -= (taxa_de_erro_da_camada2/100)*5
for i in camada1:
numero_aleatorio = random.randint(0,1)
if numero_aleatorio == 1:
i['bias'] += taxa_de_erro_da_camada1
else:
i['bias'] -= taxa_de_erro_da_camada1
numero_aleatorio = random.randint(0,1)
if numero_aleatorio == 1:
i['peso'] += (taxa_de_erro_da_camada1/100)*5
else:
i['peso'] -= (taxa_de_erro_da_camada1/100)*5
comando = 'sim'
while comando == 'sim':
valor_de_entrada = float(input('digite a entrada: '))
valor_da_camada1 = ativar(camada1,valor_de_entrada,0)
print(valor_da_camada1)
valor_da_camada2 = ativar(camada2,valor_da_camada1,0)
print(valor_da_camada2)
valor_da_camada3 = ativar(camada3,valor_da_camada2,0)
print(valor_da_camada3)
valor_de_saida = valor_da_camada3
resultado = float(input('digite o resultado: '))
taxa_de_erro = valor_de_saida - resultado
taxa_de_erro_da_camada3 = ajustes(camada3,taxa_de_erro)
taxa_de_erro_da_camada2 = ajustes(camada2,(taxa_de_erro_da_camada3/3)*2)
taxa_de_erro_da_camada1 = ajustes(camada1,(taxa_de_erro_da_camada2/3)*2)
for i in camada3:
numero_aleatorio = random.randint(0,1)
if numero_aleatorio == 1:
i['bias'] += taxa_de_erro_da_camada3
else:
i['bias'] -= taxa_de_erro_da_camada3
numero_aleatorio = random.randint(0,1)
if numero_aleatorio == 1:
i['peso'] += (taxa_de_erro_da_camada3/100)*5
else:
i['peso'] -= (taxa_de_erro_da_camada3/100)*5
for i in camada2:
numero_aleatorio = random.randint(0,1)
if numero_aleatorio == 1:
i['bias'] += taxa_de_erro_da_camada2
else:
i['bias'] -= taxa_de_erro_da_camada2
numero_aleatorio = random.randint(0,1)
if numero_aleatorio == 1:
i['peso'] += (taxa_de_erro_da_camada2/100)*5
else:
i['peso'] -= (taxa_de_erro_da_camada2/100)*5
for i in camada1:
numero_aleatorio = random.randint(0,1)
if numero_aleatorio == 1:
i['bias'] += taxa_de_erro_da_camada1
else:
i['bias'] -= taxa_de_erro_da_camada1
numero_aleatorio = random.randint(0,1)
if numero_aleatorio == 1:
i['peso'] += (taxa_de_erro_da_camada1/100)*5
else:
i['peso'] -= (taxa_de_erro_da_camada1/100)*5
comando = input('deseja continuar?: ')
| true |
3d3d34da9d188e9106b0880858ceddf3cb010c23 | Python | anyezhihu/Python2.7 | /project/Crawl_Linux_Cmd/crawl/Crawl_Linux_CMD_V2.py | UTF-8 | 8,042 | 2.875 | 3 | [] | no_license | #coding=utf-8
"""
相比于版本1的函数式做法,第2版采用了面向对象的方法。
"""
import chardet
import time
import re
import urllib2
import sqlite3
import sys
import os
#定义程序运行起始时间
Process_Begin_Time=time.time()
#设置系统默认字符集
reload(sys)
sys.setdefaultencoding('utf-8')
print "当前IDE环境默认字符集:%s" % sys.getdefaultencoding()
#定义数据库模块
class DB(object):
def __init__(self,db,sql=None,table=None):
"""
:param db:要连接的数据库文件的路径
:param sql: 要执行的sql语句
:param table: 要处理的数据库表
"""
self.__db__=db
self.__sql__=sql
self.__table__=table
#数据库初始化
def init_db(self):
try:
print "开始初始化数据库"
print "数据库路径:%s" % self.__db__
conn=sqlite3.connect(self.__db__)
cursor=conn.cursor()
print "开始创建表 Main_Url"
sql_create_table_Main_Url = 'CREATE TABLE Main_Url \
(ID INTEGER PRIMARY KEY autoincrement NOT NULL,\
NAME VARCHAR (20) NOT NULL,\
URL VARCHAR (50) NOT NULL,\
PATH VARCHAR (50) NOT NULL );'
cursor.execute(sql_create_table_Main_Url)
print "创建表 Main_Url 成功"
print "开始创建表 Second_Url "
sql_create_table_Second_Url='CREATE TABLE Second_Url \
(ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL ,\
NAME VARCHAR (20) NOT NULL ,\
DES VARCHAR (100),\
PATH VARCHAR (50) NOT NULL );'
cursor.execute(sql_create_table_Second_Url)
print "创建表 Second_Url 成功"
except sqlite3.DatabaseError as e:
print "数据库初始化失败:%s" % e.message
#定义数据库连接方法
def connect_db(self):
try:
conn=sqlite3.connect(self.__db__)
print "连接数据库成功:%s" % self.__db__
except sqlite3.DatabaseError as e:
print "连接数据库失败:%s" % e.message
finally:
conn.close()
#定义数据库操作方法
def opertaor_db(self):
try:
conn=sqlite3.connect(self.__db__)
cursor=conn.cursor()
cursor.execute(self.__sql__)
except sqlite3.DatabaseError as e:
print "操作数据库失败:%s" % e.message
finally:
cursor.close()
conn.commit()
#定义网页爬取模块
class Crawl_Html(object):
def __init__(self,url,current_excute_time=0,max_excute_time=10):
"""
:param url:要爬取的网址链接
:param current_excute_time: 当前重试次数
:param max_excute_time: 最大重试次数
"""
self.__url__=url
self.__current_excute_time__ = current_excute_time
self.__max_excute_time__=max_excute_time
def Get_Html_Content(self):
try:
Request=urllib2.Request(self.__url__)
response=urllib2.urlopen(Request)
content=response.read()
return content
except urllib2.HTTPError as e:
self.__current_excute_time__=self.__current_excute_time__ + 1
print "下载失败,失败原因:%s" % e.reason
print "HTTP错误代码:%s" % e.code
if self.__max_excute_time__ > 0:
if hasattr(e,'code') and 500 <= e.code < 600:
print "进行第%d次重试" % self.__current_excute_time__
self.__max_excute_time__=self.__max_excute_time__ - 1
return self.Get_Html_Content()
#定义正则表达式处理模块
class Re_Get_Results(object):
def __init__(self,str,content):
"""
:param str: 用来生成表达式对象的字符串
:param content: 进行查找的目标字符串
"""
self.__str__=str
self.__content__=content
def Get_Results(self):
try:
pattern=re.compile('%s' % self.__str__)
results=re.findall(pattern,self.__content__)
if results == "[]":
print "获取的是一个空列表,正则表达式命中失败"
raise "获取的是一个空列表,正则表达式命中失败"
else:
return results
except BaseException as e:
print "正则表达式匹配失败:%s" % e.message
class Operator_File(object):
def __init__(self,file,content):
"""
:param file:要写入的文件路径
:param content: 要写入的内容
"""
self.__file__=file
self.__content__=content
#写入文件
def Write_File(self):
try:
file=open(self.__file__,'wb')
file.write(self.__content__)
file.close()
except BaseException as e:
print "文件操作失败:%s" % e.message
#定义入口函数
def main():
BaseUrl="http://man.linuxde.net/"
BasePath=os.path.split(os.getcwd())[0]
db_path=BasePath+"\\sql\\"+"Linux_CMD.db"
a=DB(db=db_path)
#初始化数据库只有在第一次执行时才需要
a.init_db()
Main_Url_Pattern='<li id="menu-item-[0-9]" class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-[0-9]"><a href="(.*?)</a></li>'
b=Crawl_Html(BaseUrl)
c=Re_Get_Results(Main_Url_Pattern,b.Get_Html_Content())
for line in c.Get_Results():
#解决部分命令名中存在\导致路径创建失败的问题
result=line.split("\">")
Path_GBK=BasePath+"\\Download\\"+result[1].encode('gbk')
Path_Utf=BasePath+"\\Download\\"+result[1]
print Path_Utf
if not os.path.exists(Path_GBK):
os.makedirs(Path_GBK)
sql="INSERT INTO Main_Url (NAME,URL,PATH) VALUES ('%s','%s','%s')" %(result[1],result[0],Path_Utf)
d = DB(db=db_path, sql=sql)
d.opertaor_db()
print result[0]
e=Crawl_Html(result[0])
#e.Get_Html_Content()
Page_Num_Url_Pattern='<div class=\'paging\'>.*class=\'inactive\' >(.*?)</a>.*</div>'
Page_Num=Re_Get_Results(Page_Num_Url_Pattern,e.Get_Html_Content()).Get_Results()
Num=1
while Num <= int(Page_Num[0]):
time.sleep(1)
url=result[0] + "/page/%s" % Num
print "当前爬取链接:%s" % url
f=Crawl_Html(url)
#f.Get_Html_Content()
Second_Name = '<div class="name"><a href="http://man.linuxde.net/.*" title=".*">(.*?)</a></div>'
Second_Url = ' <div class="name"><a href="(.*?)" title=".*">.*</a></div>'
Second_Des ='<div class="des">(.*?)</div>'
g=Re_Get_Results(Second_Name,f.Get_Html_Content()).Get_Results()
h=Re_Get_Results(Second_Url,f.Get_Html_Content()).Get_Results()
i=Re_Get_Results(Second_Des,f.Get_Html_Content()).Get_Results()
Num += 1
print "爬取链接信息完毕,包含命令名称、介绍链接、命令描述"
for abc in h:
k=g[h.index(abc)]
l=i[h.index(abc)]
print Path_Utf+k
m=Path_Utf+"\\"+k
j = Operator_File(Path_GBK + "\\" + k, Crawl_Html(abc).Get_Html_Content()).Write_File()
sql = "INSERT INTO Second_Url (NAME,DES,PATH) VALUES ('%s','%s','%s')" % (k, l, m)
n = DB(db=db_path, sql=sql)
n.opertaor_db()
if __name__ == "__main__":
main()
print "运行总耗时:%d" %(time.time() - Process_Begin_Time)
| true |
b722ec0aafb246944128840596932ba08eda31d6 | Python | resource-watch/cartosql.py | /cartosql/cli.py | UTF-8 | 2,933 | 3.125 | 3 | [
"MIT"
] | permissive | '''
Utility library for interacting with CARTO via the SQL API
Usage:
csql (post|get) [options] <sql>
csql select [options] <fields> <table> [-w <where>] [-o <order>] [-l <limit>]
csql ls [options]
csql exists [options] <table>
csql drop [options] [--confirm] <table>
Options:
-h --help Print this text
-u <user> Carto user (default: read from env CARTO_USER)
-k <key> Carto API key (default: read from env CARTO_KEY)
-s Silence output
-v Increase verbosity
-f <format> Response format (default: json)
Other:
-w <where> Adds 'WHERE <where>' clause
-o <order> Adds 'ORDER BY <order>' clause
-l <limit> Adds 'LIMIT <limit>' clause
'''
# Python 2
try: input = raw_input
except: pass
import cartosql
import logging
from docopt import docopt
import json
def prettyJson(obj):
return json.dumps(obj, sort_keys=True, indent=4)
def returnFormat(response, f=None):
if f == 'json' or f is None:
return prettyJson(response.json())
else:
return response.text
def processArgs(args):
opts = {}
if args['-u']:
opts['user'] = args['-u']
if args['-k']:
opts['key'] = args['-k']
f = args['-f']
if f:
opts['f'] = f
if args['--help'] or not cartosql.init():
return __doc__
if args['<sql>']:
if args['post']:
r = cartosql.post(args['<sql>'], **opts)
return returnFormat(r, f)
elif args['get']:
r = cartosql.get(args['<sql>'], **opts)
return returnFormat(r, f)
elif args['select']:
if args['<fields>'] and args['<table>']:
if args['-w']:
opts['where'] = args['-w']
if args['-o']:
opts['order'] = args['-o']
if args['-l']:
opts['limit'] = args['-l']
r = cartosql.getFields(args['<fields>'], args['<table>'], **opts)
return returnFormat(r, f)
elif args['ls']:
r = cartosql.getTables(**opts)
if f is None or f == 'csv':
return prettyJson(r)
return returnFormat(r, f)
elif args['exists'] and args['<table>']:
r = cartosql.tableExists(args['<table>'], **opts)
return r
elif args['drop'] and args['<table>']:
confirm = args['--confirm']
if not confirm:
confirm = input('Drop table {}? (y/N)'.format(args['<table>'])) == 'y'
if confirm:
r = cartosql.dropTable(args['<table>'], **opts)
return returnFormat(r, f)
else:
print('Pass option --confirm to drop table')
return __doc__
def main(args=None):
args = docopt(__doc__, args)
if args['-v']:
logging.getLogger().setLevel(logging.DEBUG)
if args['-s']:
logging.getLogger().setLevel(logging.WARNING)
r = processArgs(args)
if not args['-s']:
print(r)
if __name__ == "__main__":
main()
| true |
b2c916e28dcd41fb63110272c20501bce649ae6f | Python | zaxhutchinson/leylines | /ley_debug.py | UTF-8 | 405 | 3.21875 | 3 | [] | no_license | ############################################
# ley_debugger.py
#
# Print messages to console, if debugging is on.
#
###########################################
class Debugger:
def __init__(self, is_active=False):
self.is_active = is_active
def makeActive(self):
self.is_active = True
def makeInactive(self):
self.is_active = False
def debugMsg(self, msg):
if(self.is_active):
print(msg)
| true |
79ad3c8aad3a4cbff77eef19ea9edffa1e93ad37 | Python | btannenw/CMS_machineLearning | /diHiggsMLProject/higgsReconstruction/plotterClass.py | UTF-8 | 2,431 | 2.640625 | 3 | [] | no_license | ## Author: Ben Tannenwald
## Date: April 3, 2019
## Purpose: Class to hold functions for plotting stuff
import os
import matplotlib.pyplot as plt
class hepPlotter:
def __init__ (self, _subdirectoryName):
# Class Defaults
self.transparency = 0.5 # transparency of plots
self.subdirectory = _subdirectoryName
if os.path.isdir( self.subdirectory )==False:
os.mkdir( self.subdirectory )
def setTransparency(self, _userTransparency):
self.transparency = _userTransparency
def getTransparency(self):
print ("Transparency: ", self.transparency)
##############################################################
## FUNCTIONS FOR PLOTTING ##
##############################################################
def compareManyHistograms(self, _pairingAlgorithm, _labels, _nPlot, _title, _xtitle, _xMin, _xMax, _nBins, _normed=False):
#_mean_arrAll = np.mean(_arrAll)
#_stdev_arrAll = np.std(_arrAll)
#_nEntries_arrAll = len(_arrAll)
#s1 = _xtitle + ':Entries = {0}, mean = {1:4F}, std dev = {2:4f}\n'.format(_nEntries_arrAll, _mean_arrAll, _stdev_arrAll)
if len( self.plottingData[_pairingAlgorithm].keys()) < len(_labels):
print ("!!! Unequal number of arrays and labels. Learn to count better.")
return 0
plt.figure(_nPlot)
if _normed:
plt.title(_title + ' (Normalized)')
else:
plt.title(_title)
plt.xlabel(_xtitle)
_bins = np.linspace(_xMin, _xMax, _nBins)
for iLabel in _labels:
plt.hist(self.plottingData[_pairingAlgorithm][iLabel], _bins, alpha=self.transparency, normed=_normed, label= iLabel+' Events')
plt.legend(loc='upper right')
#plt.text(.1, .1, s1)
# store figure copy for later saving
fig = plt.gcf()
# draw interactively
plt.show()
# save an image files
_scope = _title.split(' ')[0].lower()
_variable = _xtitle.lstrip('Jet Pair').replace(' ','').replace('[GeV]','')
_allLabels = ''.join(_labels)
_filename = self.subdirectory + '/' + _scope + '_' + pairingAlgorithm + '_' + _allLabels + '_' + _variable
if _normed:
_filename = _filename + '_norm'
fig.savefig( _filename+'.png' )
return
| true |
43f74e26baea3691cd6902670181ba70aebfc70e | Python | Kose-i/python_test | /BaseKnowledge/iterator/iterator.py | UTF-8 | 523 | 3.59375 | 4 | [
"Unlicense"
] | permissive | #! /usr/bin/env python3
def func1():
x = iter([1,2,3,21,43,4])
print(x)
print(next(x))
def func2():
def generator():
yield 1
yield 2
yield 3
yield 4
for e in generator():
print(e)
def func3():
def func3_inner():
receive = 0
receive = (yield receive)
print("receive =", receive, " from func3_inner()")
gen = func3_inner()
next(gen)
print(gen.send(2))
if __name__=='__main__':
print("\nfunc1()")
func1()
print("\nfunc2()")
func2()
print("\nfunc3()")
func3()
| true |
b5a6b5804bf877240a8e7ba5771ff3cd789042d3 | Python | pratiksan45/Python3.7 | /fibonacci.py | UTF-8 | 252 | 3.921875 | 4 | [] | no_license | def fibonacci_seq(n):
a=0
b=1
print(a, end=' ')
print(b, end=' ')
for i in range(1,n-1):
c=a+b
a=b
b=c
print(c, end=' ')
num=int(input("Enter any Number\n"))
fibonacci_seq(num) | true |
26b9b9b62c5b31b158f9241eb05cbe2678634605 | Python | gauravshilpakar/WhatsThatGenre-DeployedModel | /models/model_app.py | UTF-8 | 3,510 | 2.890625 | 3 | [] | no_license | import os
import sys
import argparse
from joblib import load
from tensorflow.keras.models import load_model
from models.make_dataset import make_dataset_dl
import numpy as np
import matplotlib.pyplot as plt
class AppManager:
def __init__(self, args, model, genres):
self.args = args
self.genres = genres
self.model = model
def run(self):
X, image = make_dataset_dl(self.args)
model = load_model(self.model)
preds = model.predict(X)
votes = majority_voting(preds, self.genres)
message = "\nIt is a '{}' song.\n Most likely genres are: {}".format(
votes[0][0].upper(), votes[:3])
predicted_genre = votes[0][0]
prediction = f"{self.args.split('.')[0]}_prediction.png"
with plt.style.context('dark_background'):
x = []
y = []
for k, v in votes:
x.append(k)
y.append(round((v * 100), 2))
fig, ax = plt.subplots(dpi=200)
plt.title("Predictions")
ax.bar(x, y)
ax.set_xlabel('Genres')
ax.set_ylabel('Probability')
xlocs, xlabs = plt.xticks()
plt.xticks(fontsize=7)
plt.savefig(f"./static/prediction_output/{prediction}", dpi=200)
for i, v in enumerate(y):
ax.text(xlocs[i] - 0.25, v + 0.25, str(v), color="white")
plt.tight_layout()
# plt.show()
print(message)
return message, image, prediction, predicted_genre
# Constants
genres = {
'metal': 0, 'disco': 1, 'classical': 2, 'hiphop': 3, 'jazz': 4,
'country': 5, 'pop': 6, 'blues': 7, 'reggae': 8, 'rock': 9
}
# @RUN: Main function to call the appmanager
def majority_voting(scores, dict_genres):
preds = np.argmax(scores, axis=1)
values, counts = np.unique(preds, return_counts=True)
counts = np.round(counts / np.sum(counts), 2)
votes = {k: v for k, v in zip(values, counts)}
votes = {k: v for k, v in sorted(
votes.items(), key=lambda item: item[1], reverse=True)}
return [(get_genres(x, dict_genres), prob) for x, prob in votes.items()]
def get_genres(key, dict_genres):
# Transforming data to help on transformation
labels = []
tmp_genre = {v: k for k, v in dict_genres.items()}
return tmp_genre[key]
def PredictModel(args):
# if args.type not in ["dl", "ml"]:
# raise ValueError("Invalid type for the application. You should use dl or ml.")
# args = "../music/cant_stop.mp3"
model = os.path.join('models/MyModel.h5')
app = AppManager(args, model, genres)
message, image, prediction, predictedGenre = app.run()
return message, image, prediction, predictedGenre
if __name__ == '__main__':
# # Parse command line arguments
# parser = argparse.ArgumentParser(description='Music Genre Recognition on GTZAN')
# # Required arguments
# parser.add_argument('-t', '--type', help='dl or ml for Deep Learning or Classical ML approaches, respectively.', type=str, required=True)
# # Nearly optional arguments. Should be filled according to the option of the requireds
# parser.add_argument('-m', '--model', help='Path to trained model', type=str, required=True)
# parser.add_argument('-s', '--song', help='Path to song to classify', type=str, required=True)
# args = parser.parse_args()
# # Call the main function
args = "Taylor Swift - End Game ft Ed Sheeran Future.mp4"
PredictModel(args)
| true |
c42631a29e2da6dbf13c2c947c398c637d97bdf7 | Python | JazimLatif/Bubble-chamber-image-analysis | /CleanupCode.py | UTF-8 | 1,581 | 3.265625 | 3 | [] | no_license | import PIL
from PIL import Image
import requests
from io import BytesIO
from PIL import ImageFilter
from PIL import ImageEnhance
from IPython.display import display
import numpy as np
import imageio
import matplotlib.pyplot as plt
#add path to the image file
filename = R'C:\Users\clarkj5\Documents\Python Scripts\TestImage.jpg'
#input image dimensions
with Image.open(filename) as image:
width, height = image.size
#print image details to check image has been found
print (image.size)
print(image)
print(type(image))
image = Image.open(filename)
output_image = Image.new('I', image.size, 0xffffff)
#iterate through pixels
def binarize(image, threshold):
#Show starting image
image.show()
output_image=image.convert("L")
#show greyscale image
output_image.show()
#iterate through pixels changing to black or white deoending on if they are above or below the threshold
for x in range(output_image.width):
for y in range(output_image.height):
# for the given pixel at w,h, lets check its value against the threshold
if output_image.getpixel((x,y))< threshold:
# lets set this to zero (Black)
output_image.putpixel( (x,y), 0 )
else:
# otherwise lets set this to 255 (White)
output_image.putpixel( (x,y), 255 )
return output_image
#(image to be examined, threshold)
binarize(image, 100)
#show final image
output_image.show(R'C:\Users\clarkj5\Documents\Python Scripts\circleout.jpg', image) | true |
c838a60d0718f41f1942cd4316e8df6b2edfc481 | Python | YLyeliang/MSI_FCN | /core/metrics.py | UTF-8 | 6,222 | 2.703125 | 3 | [] | no_license | import os
import tensorflow as tf
import numpy as np
from PIL import Image
def fast_hist(a, b, n):
k = (a >= 0) & (a < n)
return tf.math.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)
def get_hist(predictions, labels):
"""
This is for multi-classes metric calculation by calculating confusion matrix.
:param predictions:
:param labels:
:return:
"""
num_class = predictions.shape[3]
batch_size = predictions.shape[0]
hist = np.zeros((num_class, num_class))
for i in range(batch_size):
hist += fast_hist(labels[i].flatten(), predictions[i].argmax(2).flatten(), num_class)
return hist
class Metrics():
def __init__(self):
self.metrics = {}
# accumulate
self.metrics['tp'] = []
self.metrics['tn'] =[]
self.metrics['fp'] =[]
self.metrics['fn'] =[]
self.tp = tf.metrics.TruePositives()
self.tn = tf.metrics.TrueNegatives()
self.fp = tf.metrics.FalsePositives()
self.fn = tf.metrics.FalseNegatives()
self.p = tf.keras.metrics.Precision()
self.r = tf.keras.metrics.Recall()
auc = tf.keras.metrics.AUC()
self.acc = tf.keras.metrics.Accuracy()
self.MeanIou = tf.keras.metrics.MeanIoU(num_classes=2)
def update_state(self,true,pred,is_train=True):
if is_train:
y_pred = tf.argmax(pred, axis=-1)
y_pred = tf.reshape(y_pred, [-1])
y_true = tf.reshape(true, [-1])
else:
y_pred = tf.argmax(pred, axis=-1)
y_pred = tf.reshape(y_pred,[-1])
y_true = tf.reshape(true,[-1])
self.tp.reset_states()
self.tn.reset_states()
self.fp.reset_states()
self.fn.reset_states()
self.p.reset_states()
self.r.reset_states()
self.acc.reset_states()
self.MeanIou.reset_states()
self.tp.update_state(y_true, y_pred)
self.tn.update_state(y_true, y_pred)
self.fp.update_state(y_true, y_pred)
self.fn.update_state(y_true, y_pred)
self.p.update_state(y_true, y_pred)
self.r.update_state(y_true, y_pred)
self.acc.update_state(y_true, y_pred)
self.MeanIou.update_state(y_true, y_pred)
num_tp = self.tp.result().numpy()
num_tn = self.tn.result().numpy()
num_fp = self.fp.result().numpy()
num_fn = self.fn.result().numpy()
if is_train:
num_p = self.p.result().numpy()
num_r = self.r.result().numpy()
num_acc = self.acc.result().numpy()
num_miou = self.MeanIou.result().numpy()
self.metrics['p'] = num_p
self.metrics['r'] = num_r
self.metrics['acc'] = num_acc
self.metrics['IUcrack'] = num_tp / (num_tp + num_fp + num_fn)
self.metrics['IUbackground'] = num_tn / (num_tn + num_fn + num_fp)
self.metrics['MIU'] = num_miou
self.metrics['tp'].append(num_tp)
self.metrics['tn'].append(num_tn)
self.metrics['fp'].append(num_fp)
self.metrics['fn'].append(num_fn)
return self.metrics
def overall_metrics(self):
tp_all = np.sum(self.metrics['tp'])
tn_all = np.sum(self.metrics['tn'])
fp_all = np.sum(self.metrics['fp'])
fn_all = np.sum(self.metrics['fn'])
overall_metrics={}
acc = (tp_all+tn_all)/(tp_all+tn_all+fp_all+fn_all)
specificity = tn_all/(tn_all+fp_all)
precision = tp_all/(tp_all+fp_all)
recall = tp_all/(tp_all+fn_all)
gmean = np.sqrt(recall*specificity)
f1 = 2*recall*precision/(recall+precision)
IUcrack = tp_all/(tp_all+fp_all+fn_all)
IUback = tn_all/(tn_all+fn_all+fp_all)
MIU = (IUcrack+IUback)/2
bacc = (recall+specificity)/2
overall_metrics['accuracy'] = acc
overall_metrics['Balanced accuracy']= bacc
overall_metrics['precision']=precision
overall_metrics['recall']=recall
overall_metrics['gmean']=gmean
overall_metrics['f1']=f1
overall_metrics['IUcrack']=IUcrack
overall_metrics['IUback']=IUback
overall_metrics['MIU']=MIU
return overall_metrics
def metrics_with_images(pred_dir,lab_dir):
files = os.listdir(pred_dir)
metrics = Metrics()
for file in files:
pred_file = os.path.join(pred_dir,file)
lab_file = os.path.join(lab_dir,file[:-7]+".png")
pred,true = decode_image_label(pred_file,lab_file)
metrics.update_state(true,pred,is_train=False)
metric = metrics.overall_metrics()
for k,v in metric.items():
print("{}: {}".format(k,v))
def decode_image_label(pred_file,lab_file):
""" RGB to Gray and calculate the metrics"""
image = tf.io.read_file(pred_file)
image = tf.image.decode_png(image)
image = tf.image.rgb_to_grayscale(image)
image = tf.where(image>0,1.,0.)
image = tf.image.resize(image,(256,256))
label = tf.io.read_file(lab_file)
label = tf.image.decode_png(label, dtype=tf.uint8)
label = tf.image.resize(label,(256,256))
return image,label
# pred_format = "/home/yel/yel/experiments/deepcrack_{}"
# true_format = "/home/yel/yel/experiments/condition{}"
# for i in range(1,4):
# pred_dir = pred_format.format(i)
# true_dir = true_format.format(i)
# print("condition: {}".format(i))
# metrics_with_images(pred_dir,true_dir)
# print()
# debug
# m = tf.keras.metrics.MeanIoU(num_classes=2)
# m.update_state([0, 0, 1, 1], [0, 1, 0, 1])
# print('Final result: ', m.result().numpy()) # Final result: 0.33
# m = tf.keras.metrics.TruePositives()
# true = tf.random.uniform([10,256,256,1],0,maxval=2,dtype=tf.int32)
# b = tf.argmax(true,axis=-1)
# debug=1
# pred = tf.random.uniform([10,256,256,2],0,maxval=2,dtype=tf.int32)
# m.update_state(true,pred)
# num = m.result()
# b=num.numpy()
# print(num)
# import time
# s =time.time()
# Metric =Metrics()
# metric=Metric.update_state(true,pred,True)
# print(metric)
# ovr_metric = Metric.overall_metrics()
# # print(ovr_metric)
# e =time.time()
# print(e-s)
# print('Final result: ', m.result().numpy()) # Final result: 2
| true |
4f5f4aadfeabb13790b417b334c5f73c6d0345a7 | Python | KomorebiL/OJ | /stack_from_queue.py | UTF-8 | 896 | 3.859375 | 4 | [] | no_license | from queue import Queue
class Stack:
def __init__(self):
self.q1 = Queue()
self.q2 = Queue()
def empty(self):
return self.q1.empty()
def push(self, element):
if self.empty():
self.q1.enqueue(element)
else:
self.q2.enqueue(element)
while not self.q1.empty():
self.q2.enqueue(self.q1.dequeue())
self.q1, self.q2 = self.q2, self.q1
def pop(self):
return self.q1.dequeue()
def top(self):
return self.q1.head.next.element
def __repr__(self):
return str(self.q1)
def test_stack():
s = Stack()
s.push(1)
s.push(2)
s.push(3)
s.push(4)
assert str(s) == 'head > 4 > 3 > 2 > 1 > '
assert s.pop() == 4
assert s.pop() == 3
assert s.pop() == 2
assert s.pop() == 1
if __name__ == '__main__':
test_stack() | true |
d14b0e7748d04a61a8faaeb3ae035c39b39456de | Python | TurtleZhong/Particle_Filter_Demo | /particle_filter_demo.py | UTF-8 | 7,274 | 3.5 | 4 | [] | no_license | """
This project is an implement of particle filter.
* Author: ZhongXinliang
* Email: xinliangzhong@foxmail.com
* Date: 2018.01.17
"""
from math import *
import random
import cv2
import numpy as np
landmarks = [[20.0, 20.0], [80.0, 80.0], [20.0, 80.0], [80.0, 20.0]]
world_size = 100.0
class Robot:
"""
This class aims for describing a robot.
"""
def __init__(self):
self.x = random.random() * world_size
self.y = random.random() * world_size
self.orientation = random.random() * 2.0 * pi
self.forward_noise = 0.0
self.turn_noise = 0.0
self.sense_noise = 0.0
def set(self, new_x, new_y, new_orientation):
"""
This function aims to set the 2d pose of the robot.
:param new_x: The x coordinate of the robot.
:param new_y: The y coordinate of the robot.
:param new_orientation: The orientation of the robot.
:return: None.
"""
if new_x < 0 or new_x >= world_size:
raise ValueError, 'X coordinate out of bound'
if new_y < 0 or new_y >= world_size:
raise ValueError, 'Y coordinate out of bound'
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError, 'Orientation must be in [0..2pi]'
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
def set_noise(self, new_f_noise, new_t_noise, new_s_noise):
"""
This function makes it possible to change the noise parameters and
it is often useful in particle filters.
:param new_f_noise: The forward noise.
:param new_t_noise: The turn noise.
:param new_s_noise: The observation noise.
:return: None
"""
self.forward_noise = float(new_f_noise)
self.turn_noise = float(new_t_noise)
self.sense_noise = float(new_s_noise)
def sense(self):
"""
This function aims to get the observation of the robot.
:return: A list of the observation. The size depend on the landmarks.
"""
Z = []
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
dist += random.gauss(0.0, self.sense_noise)
Z.append(dist)
return Z
def move(self, turn, forward):
"""
This function realize the movement of the robot.
:param turn: The orientation of the robot.
:param forward: The length of the robot need to move.
:return: The robot which is moved according to the input params.
"""
if forward < 0:
raise ValueError, 'Robot cant move backwards'
# turn, and add randomness to the turning command
orientation = self.orientation + float(turn) + random.gauss(0.0, self.turn_noise)
orientation %= 2 * pi
# move, and add randomness to the motion command
dist = float(forward) + random.gauss(0.0, self.forward_noise)
x = self.x + (cos(orientation) * dist)
y = self.y + (sin(orientation) * dist)
x %= world_size # cyclic truncate
y %= world_size
# set particle
res = Robot()
res.set(x, y, orientation)
res.set_noise(self.forward_noise, self.turn_noise, self.sense_noise)
return res
def gaussian(self, mu, sigma, x):
"""
This function aims to calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
:param mu: The mean of the gaussian distribution.
:param sigma: The variance of the gaussian distribution.
:param x: The input value.
:return: The probability of the input x.
"""
return exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / sqrt(2.0 * pi * (sigma ** 2))
def measurement_prob(self, measurement):
"""
This function aims to calculates how likely a measurement should be.
:param measurement: The observation of the robot or particle.
:return: The probability of the measurement.
"""
prob = 1.0
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
prob *= self.gaussian(dist, self.sense_noise, measurement[i])
return prob
def __repr__(self):
"""
This function aims to overload the print.
:return: The position of the robot.
"""
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y), str(self.orientation))
def create_map():
"""
This function aims to create the map.
:return: An image of the map.
"""
map_image = np.zeros(shape=(500, 500, 3), dtype=np.uint8)
for landmark in landmarks:
cv2.circle(map_image, tuple((5 * int(landmark[0]), 5 * int(landmark[1]))), radius=5, color=(0, 0, 255), thickness=-1)
return map_image
def show_robot_pose(robot, map_image):
"""
This function aims to show the robot pose with blue point.
:param robot: Robot to show.
:param map_image: Show in which image.
:return: The image with the robot pose.
"""
cv2.circle(map_image, tuple((int(5 * robot.x), int(5 * robot.y))), radius=5, color=(255, 0, 0), thickness=-1)
return map_image
def show_particles(particles, map_image):
"""
This function aims to show the particles.
:param particles: Particles to show
:param map_image: Show in which image.
:return: The image with the particles.
"""
map_image_copy = map_image.copy()
for particle in particles:
cv2.circle(map_image_copy, tuple((int(5 * particle.x), int(5 * particle.y))),
radius=1, color=(0, 255, 0), thickness=-1)
return map_image_copy
# Step1: Show the map.
map_image = create_map()
cv2.namedWindow('map_image')
cv2.imshow('map_image', map_image)
# Create the robot.
myrobot = Robot()
# Show the robot pose in the map.
map_image = show_robot_pose(myrobot, map_image)
cv2.imshow('map_image', map_image)
cv2.waitKey(0)
# Generate particles.
N = 1000
p = []
for i in range(N):
x = Robot()
x.set_noise(0.05, 0.05, 5)
p.append(x)
initial_image = show_particles(p, map_image.copy())
cv2.imshow('map_image', initial_image)
cv2.waitKey(0)
for i in range(60):
myrobot = myrobot.move(0.1, 5.0)
cv2.imshow('map_image', map_image)
cv2.waitKey(300)
map_image = show_robot_pose(myrobot, map_image)
Z = myrobot.sense()
p2 = []
for i in range(N):
p2.append(p[i].move(0.1, 5.0))
p = p2
w = []
for i in range(N):
w.append(p[i].measurement_prob(Z))
# Re-sampling
p3 = []
index = int(random.random() * N)
beta = 0.0
mw = max(w)
for i in range(N):
beta += random.random() * 2.0 * mw
while beta > w[index]:
beta -= w[index]
index = (index + 1) % N
p3.append(p[index])
p = p3
initial_image = show_particles(p, map_image.copy())
cv2.imshow('map_image', initial_image)
cv2.waitKey(300)
print p
initial_image = show_particles(p, map_image.copy())
cv2.imshow('map_image', initial_image)
initial_image = show_robot_pose(myrobot, initial_image)
cv2.imshow('map_image', initial_image)
cv2.waitKey(0)
| true |
8fa8256e07cb9811b891de414f14a3e111968cee | Python | zhou-jia-ming/leetcode-py | /interview_04_04.py | UTF-8 | 1,553 | 3.453125 | 3 | [] | no_license | # coding:utf-8
# Created by: Jiaming
# Created at: 2020-04-29
from typing import *
from utils import TreeNode, generate_tree, null
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
# 遍历每个节点的深度比较,总复杂度为O(nlogn)
if not root:
return True
if not root.left and not root.right:
return True
if abs(self.depth(root.left) - self.depth(root.right)) > 1:
return False
return self.isBalanced(root.left) and self.isBalanced(root.right)
def depth(self, root):
# 求二叉树深度Log(n)
if not root:
return 0
if not root.left and not root.right:
return 1
return max(self.depth(root.left), self.depth(root.right)) + 1
def isBalanced1(self, root: TreeNode):
# 更高效的解法,将判断平衡和深度写在一个dfs,向上传递平衡的树的深度,
# 如果不平衡向上传递-1。时间复杂度O(n*logn)
def DFS(root):
if root is None:
return 0
ld = DFS(root.left)
rd = DFS(root.right)
if ld == -1 or rd == -1 or abs(rd - ld) > 1:
return -1
else:
return 1 + max(ld, rd)
return DFS(root) >= 0
if __name__ == "__main__":
s = Solution()
data = generate_tree([3, 9, 20, null, null, 15, 7])
print(s.isBalanced(data))
data = generate_tree([1, 2, 2, 3, 3, null, null, 4, 4])
print(s.isBalanced1(data))
| true |
876ad37f67416ae4925f9752d5cec876d7bc5f6b | Python | Isaivargas/neuronalNetwork | /artificialNeuron.py | UTF-8 | 3,149 | 2.796875 | 3 | [] | no_license |
#Single Artificial Neuron.
#created by Isaí vargas Chávez-Age 19 All rigths reserved /Users/Isai/eclipse-workspace/Perceptron/src/NeuronalNetworks/ArtificialNeuron.java@
# Compilador Clang 6.0 (clang-600.0.57)] on darwin.
# version 1.0.
# 03/02/2018 in Mexico City.
import random
class Neuron :
#Atributes
bias = 0
y = 0 #Output
v = 0
inputsignal = 0
inputweight = 0
inputsVector = [ ]
weightsVector = [ ]
trainingVector = [ ]
learningRate = 0
iterathion = 0
def __init__( self ) :
self.bias = 1
self.inputweight = float(self.inputweight)
self.learningRate = 0.5
self.inputsVector = [ 1 ]
self.weightsVector = [ ]
self.trainingVector = [ ]
def add_inputVector(self,inputsVector) :
self.inputsVector.append ( inputsVector )
def add_trainingVector(self,inputTrainSet) :
self.trainingVector.append (inputTrainSet)
def add_weightsVector (self,numberinputs):
self.num =numberinputs
start = -1.0
end = 1.0
for j in range(self.num):
self.weightsVector.append( random.uniform(start, end) )
def Synapsis( self,iterathion,numberinputs,inputsVector): #where the function spect a variable number of arguments.
v = 0
self.inputsVector
self.weightsVector
for k in range (iterathion) :
self.numberinputs = numberinputs +1
for j in range(self.numberinputs) :
v = v + ( (self.inputsVector [ j ]*[ iterathion ]) *(self.weightsVector[ j ]*[ iterathion ] ) )
print (' V : ' ,v)
return v
def Signum( self ):#Activation FUNCTION.
if self.v > 0: # Signum Function!
self.y = 1
else :
self.y = 0
print ('Out put:',self.y)
return self.y
def Learn( self,y,iterathion,inputsVector,weightsVector,trainOutput,k) : # Learning function
k = 1
if self.y == self.trainOutput:
self.weightsVector = self.inputweight*(iterathion )
elif self.y > 0:
self.inputweight = self.inputweight + self.learningRate *(self. trainOutput*self.iterathion) - (self.y *self.iterathion) *(self.inputsVector*self.iterathion)
else:
self.weightsVector = self.weightsVector - self.learningRate *(self. trainOutput*self.iterathion) - (self.y *self.iterathion) *(self.inputsVector*self.iterathion)
return self.weightsVector
| true |
7542c60d21a4224c8ab498821f562327660ddf30 | Python | MengyingGIRL/python-100-days-learning | /Day07/set2.py | UTF-8 | 508 | 3.234375 | 3 | [] | no_license | '''
集合的常用操作
- 交集
- 并集
- 差集
- 子集
- 超集
@Time : 2020/1/14 9:57
@Author : wangmengying
@File : set2.py
'''
def main():
set1 = set(range(1,7))
print(set1)
set2 = set(range(2,11,2))
print(set2)
set3 = set(range(1,5))
print(set3)
print(set1 & set2)
print(set1 | set2)
print(set1 ^ set2)
print(set1 - set2)
print(set2 <= set1)
print(set3 <= set1)
print(set1 >= set2)
print(set1 >= set3)
if __name__ == '__main__':
main() | true |
195d0d700dd7c3690b9ca6c8ff44e133fcb0d88d | Python | oskar404/aoc | /aoc2020/day09.py | UTF-8 | 1,795 | 3.515625 | 4 | [] | no_license | #!/usr/bin/env python3
import sys
def get_xmas_weakness(data, preamble, verbose=False):
index = None
value = None
assert preamble < len(data), f"Not enough data for preamble: {preamble}"
for i in range(preamble, len(data)):
v = data[i]
window = data[i - preamble : i]
if verbose:
print(f"{i}: {data[i]} -> {window}")
for a in window:
b = v - a
if b in window:
break
else:
index = i
value = v
break
return (index, value)
def solve_part1(data, preamble, verbose=False):
return get_xmas_weakness(data, preamble, verbose)
def solve_part2(data, preamble, verbose=False):
_, weakness = get_xmas_weakness(data, preamble, verbose)
sequence = None
assert len(data) > 1, f"Too little data, len(data): {len(data)}"
for start in range(len(data) - 1):
seq = [data[start]]
for i in range(start + 1, len(data)):
seq.append(data[i])
if sum(seq) >= weakness:
break
if verbose:
print(f"{start}: seq{seq}")
if sum(seq) == weakness:
sequence = seq
break
if sequence and len(sequence) >= 2:
return (min(sequence) + max(sequence), sequence)
return (None, None)
def read_data(file):
with open(file) as f:
return [int(l.strip()) for l in f if l.strip()]
def main():
assert len(sys.argv) == 2, "Missing input"
data = read_data(sys.argv[1])
idx, val = solve_part1(data, 25)
print(f"Part 1: XMAS attack - invalid value: (idx: {idx}, value: {val})")
val, seq = solve_part2(data, 25)
print(f"Part 2: XMAS attack - sequence sum: {val} -> {seq}")
if __name__ == "__main__":
main()
| true |
b400761e381a92c31028b05657434617563002dc | Python | matthewnorman/geosearch | /tests/test_geosearch.py | UTF-8 | 3,601 | 3 | 3 | [
"MIT"
] | permissive | import pytest
import random
import geohash
from haversine import haversine
from unittest import mock
from geosearch import geolocate
def test_encoding_decoding():
"""
Let's see if we can locate the Palace of Westminster.
:return:
"""
ID = 1
geolocator = geolocate.LatLonGeolocator()
geolocator._encode_and_store_(latitude=51.499167,
longitude=-0.124722,
ID=ID)
result = geolocator._decode_by_hash(hash='gcpuvpjhmu89')
assert result == ID
def test_adjoining_boxes():
"""
What are the nearest eight boxes to the Palace
:return:
"""
geolocator = geolocate.LatLonGeolocator()
hashcode = 'gcpuvpjhmu89'
with pytest.raises(ValueError):
geolocator._get_adjoining_hashes(hashcode=hashcode,
precision=15)
result = geolocator._get_adjoining_hashes(hashcode=hashcode,
precision=12)
assert result == ['gcpuvpjhmu83', 'gcpuvpjhmu8c', 'gcpuvpjhmu88',
'gcpuvpjhmu82', 'gcpuvpjhmu8b', 'gcpuvpjhmu8d',
'gcpuvpjhmu86', 'gcpuvpjhmu8f']
result = geolocator._get_adjoining_hashes(hashcode=hashcode,
precision=6)
assert result == ['gcpuuz', 'gcpuvr', 'gcpuvn', 'gcpuuy',
'gcpuvq', 'gcpvj0', 'gcpvhb', 'gcpvj2']
def test_proximity_search():
"""
Search for something around the Palace of Westminster.
:return:
"""
geolocator = geolocate.LatLonGeolocator()
geolocator.add_location(latitude=51.499168, longitude=-0.124722, ID=2)
geolocator.add_location(latitude=51.499178, longitude=-0.124722, ID=3)
geolocator.add_location(latitude=51.499268, longitude=-0.124722, ID=4)
geolocator.add_location(latitude=51.500168, longitude=-0.124722, ID=5)
geolocator.add_location(latitude=51.699168, longitude=-0.124722, ID=6)
geolocator.add_location(latitude=52.499168, longitude=-0.124722, ID=7)
results = geolocator.proximity_search(latitude=51.499167,
longitude=-0.124722,
radius=1000)
assert results == [2, 3, 4, 5]
def test_proximity_search_long():
"""
Create a very, very long test that runs to find things.
:return:
"""
generated_locations = {}
radius = 10000
for ID in range(1000):
generated_locations[ID] = {
'latitude': 51.5 + random.uniform(-0.5, 0.5),
'longitude': 0.0 + random.uniform(-0.5, 0.5)
}
geolocator = geolocate.LatLonGeolocator()
for ID, latlon in generated_locations.items():
geolocator.add_location(latitude=latlon['latitude'],
longitude=latlon['longitude'],
ID=ID)
results = geolocator.proximity_search(latitude=51.5,
longitude=0.0,
radius=radius)
for result in results:
location = generated_locations[result]
dist = haversine((51.5, 0.0),
(location['latitude'],
location['longitude'])) * 1000
assert dist <= radius
neg_results = generated_locations.keys() - results
for ID in neg_results:
location = generated_locations[ID]
point = (location['latitude'], location['longitude'])
dist = 1000 * haversine((51.5, 0.0), point)
assert dist > radius
| true |
9233c2e1eb62c1a58241a30c89fcdf74db63c719 | Python | python-practice-b02-006/PhilippK | /Lab_3/task1_from_Sinitsa.py | UTF-8 | 2,735 | 3.40625 | 3 | [] | no_license | import pygame
import numpy as np
from pygame.draw import *
pygame.init()
FPS = 30
screen = pygame.display.set_mode((400, 400))
YELLOW = (255, 255, 0)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
WHITE = (255, 255, 255)
def main():
pygame.display.update()
clock = pygame.time.Clock()
finished = False
a = 0
while not finished:
clock.tick(10)
d = int(100*np.cos(a))
r = int(10*np.cos(a))
rect(screen, WHITE, [0, 0, 400, 400]) # Makes screen white.
circle(screen, YELLOW, (200, 200 + d), 100) # Draws main face in fixed coordinates.
rect(screen, BLACK, [150, 250 + d, 100, 20 + r]) # Draws mouth in fixed coordinates.
draw_eye(150, 160 + d, 20 + r)
draw_eye(250, 160 + d, 20 - r)
draw_left_eyebrow(50, 80 + d)
draw_right_eyebrow(220, 55 + d)
a += np.pi / 20
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
finished = True
def draw_eye(x, y, r):
"""
Draws eye in (x, y) coordinates and with radius r.
Returns
-------
None.
"""
circle(screen, RED, (x, y), 2*r)
circle(screen, BLACK, (x, y), r)
def draw_left_eyebrow(x, y):
"""
Draws left eyebrow in fixed coordinates.
Parameters
----------
x : TYPE int
DESCRIPTION. x coordinate of top left corner of minimum rectangle(
sides are parallel to coordinate axes) that consists the rectangle
of eyebrow.
y : TYPE int
DESCRIPTION. y coordinate of top left corner of minimum rectangle(
sides are parallel to coordinate axes) that consists the rectangle
of eyebrow.
Returns
-------
None.
"""
surface = pygame.Surface([200, 100], pygame.SRCALPHA)
rect(surface, BLACK, [0, 0, 100, 10])
surface_rot = pygame.transform.rotate(surface, -30)
screen.blit(surface_rot, [x, y])
def draw_right_eyebrow(x, y):
"""
Draws right eyebrow in fixed coordinates.
Parameters
----------
x : TYPE int
DESCRIPTION. x coordinate of top left corner of minimum rectangle(
sides are parallel to coordinate axes) that consists the rectangle
of eyebrow.
y : TYPE int
DESCRIPTION. y coordinate of top left corner of minimum rectangle(
sides are parallel to coordinate axes) that consists the rectangle
of eyebrow.
Returns
-------
None.
"""
surface = pygame.Surface([200, 100], pygame.SRCALPHA)
rect(surface, BLACK, [0, 0, 90, 10])
surface_rot = pygame.transform.rotate(surface, 25)
screen.blit(surface_rot, [x, y])
main()
pygame.quit()
| true |
90cd45178baf6d355676d5bc92d3a681483f2d5c | Python | stubird/micStudy | /leetcode/SubsConc.py | UTF-8 | 1,046 | 2.78125 | 3 | [] | no_license | """
Copyright 2018 (c) Jinxin Xie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
s="barfoothefoobarmanxfoobardnfooifoobar"
words = ["foo", "bar"]
qwords = words[:]
i = 0
indice = []
while True:
if i >= len(s):
break
if s[i:i+3] in qwords:
qwords.pop(qwords.index(s[i:i+3]))
i+=3
if qwords == []:
indice.append(i - len(words[0])*len(words))
qwords = words[:]
elif len(qwords) < len(words):
qwords = words[:]
i += 1
else:
i+=1
print(indice)
| true |
491d66647320943e147e84dd3f7089b8f6e77e9a | Python | bethewind/pylite | /tests/range/next.py | UTF-8 | 119 | 3.1875 | 3 | [] | no_license | range_object = range(10)
iterator = range_object.__iter__()
while True:
item = iterator.__next__()
print(item)
| true |
2012382ba2ae3caed4b22425cc1537ed1cd6d2c6 | Python | portobello-boy/Pokemon-Data-Visualization | /mpltest.py | UTF-8 | 7,497 | 3.109375 | 3 | [] | no_license | import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import itertools
from chord import Chord
from math import pi
data = pd.read_csv("Pokemon.csv")
# Associate a color with each type:
colors = {
"Grass": "#7AC74C",
"Fire": "#EE8130",
"Water": "#6390F0",
"Bug": "#A6B91A",
"Normal": "#A8A77A",
"Poison": "#A33EA1",
"Electric": "#F7D02C",
"Ground": "#E2BF65",
"Fairy": "#D685AD",
"Fighting": "#C22E28",
"Psychic": "#F95587",
"Rock": "#B6A136",
"Ghost": "#735797",
"Ice": "#96D9D6",
"Dragon": "#6F35FC",
"Dark": "#705746",
"Steel": "#B7B7CE",
"Flying": "#A98FF3",
}
colorList = ["#A6B91A", "#705746", "#6F35FC", "#F7D02C", "#D685AD",
"#C22E28", "#EE8130", "#A98FF3", "#735797", "#7AC74C",
"#E2BF65", "#96D9D6", "#A8A77A", "#A33EA1", "#F95587",
"#B6A136", "#B7B7CE", "#6390F0"]
def barplt(): # Bar Plot of Averages
fig, axes = plt.subplots(nrows=2, ncols=1)
df1 = data.groupby('Type 1').mean().drop(columns=['#', 'Total'])#.plot(kind='bar')
df2 = data.groupby('Type 2').mean().drop(columns=['#', 'Total'])#.plot(kind='bar')
df1.plot(kind='bar', ax=axes[0])
df2.plot(kind='bar', ax=axes[1])
plt.show()
plt.clf()
def kdeplt(attr1, attr2, whichtype): # Kdeplot Test
sns.set(style="whitegrid")
_, axes = plt.subplots(3, 6, sharex=True, sharey=True)
types = data['Type 1'].unique()
for type1, ax in zip(types, axes.flatten()[:18]):
df = data[data[whichtype] == type1]
# print(df)
sns.kdeplot(df[attr1], df[attr2], ax=ax, color=colors[type1], shade_lowest=False)
sns.regplot(x=attr1, y=attr2, data=df, ax=ax, color=colors[type1])
ax.set_title(type1)
plt.xlim(-20, 250)
plt.ylim(-20, 250)
plt.show()
plt.clf()
def jointkdeplt(attr1, attr2, whichtype):
sns.set(style="whitegrid")
types = data['Type 1'].unique()
for type1 in types:
df = data[data[whichtype] == type1]
# print(df)
# The following commented code also works, but doesn't allow changing the marginal graph types or the primary graph types.
# Basically, the JointGrid type allows for more flexibility. However, this has more interesting display options (i.e. kind="hex").
# sns.jointplot(
# pd.Series(df['Attack'], name="Attack"), pd.Series(df['Sp. Atk'], name="Special Attack"),
# kind="kde",
# color=colors[type1],
# height=7,
# space=0)
p = sns.JointGrid(x=attr1, y=attr2, data=df, space=0, xlim=(-20, 250), ylim=(-20, 250))
p = p.plot_joint(sns.kdeplot, color=colors[type1])
p = p.plot_joint(sns.regplot, color=colors[type1])
p = p.plot_marginals(sns.kdeplot, color=colors[type1], shade=True)
print(type1)
plt.title(type1)
plt.show()
def pairplt(whichtype):
sns.set(style="whitegrid")
types = data['Type 1'].unique()
for type1 in types:
df = data[data[whichtype] == type1]
df["color"] = colors[type1]
sns.pairplot(df, height=2, vars=["Attack", "Sp. Atk", "Defense", "Sp. Def", "HP", "Speed"], hue="color", kind="reg", diag_kind="kde")
plt.show()
def heatplt():
df = data.groupby('Type 1').mean().drop(columns=['#', 'Total'])
sns.set()
sns.heatmap(df.transpose(), cmap="summer", annot=True, fmt=".2f", linewidths=.5)
plt.show()
plt.clf()
# [df.Defense, df.Attack]
df = df.reset_index()
categories=list(df)[1:]
N = len(categories)
angles = [n / float(N) * 2 * pi for n in range(N)]
angles += angles[:1]
ax = plt.subplot(111, polar=True)
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
plt.xticks(angles[:-1], categories)
ax.set_rlabel_position(0)
plt.yticks([10,20,30,40,50,60,70,80,90,100,110,120,130], ["10","20","30","40","50","60","70","80","90","100","110","120","130"], color="grey", size=7)
plt.ylim(0,130)
values=df.loc[0].drop('Type 1').values.flatten().tolist()
values += values[:1]
ax.plot(angles, values, linewidth=1, linestyle='solid', label="group A")
ax.fill(angles, values, 'b', alpha=0.1)
values=df.loc[16].drop('Type 1').values.flatten().tolist()
values += values[:1]
ax.plot(angles, values, linewidth=1, linestyle='solid', label="group B")
ax.fill(angles, values, 'r', alpha=0.1)
plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1))
plt.show()
plt.clf()
def make_spider(row, title, color, df):
# number of variable
categories=list(df)[1:]
N = len(categories)
# What will be the angle of each axis in the plot? (we divide the plot / number of variable)
angles = [n / float(N) * 2 * pi for n in range(N)]
angles += angles[:1]
# Initialise the spider plot
ax = plt.subplot(3,6,row+1, polar=True)
# If you want the first axis to be on top:
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
# Draw one axe per variable + add labels labels yet
plt.xticks(angles[:-1], categories, color='grey', size=8)
# Draw ylabels
ax.set_rlabel_position(0)
plt.yticks([10,40,70,100,130], ["10","40","70","100","130"], color="grey", size=7)
plt.ylim(0,130)
# Ind1
values=df.loc[row].drop('Type 1').values.flatten().tolist()
values += values[:1]
ax.plot(angles, values, color=color, linewidth=2, linestyle='solid')
ax.fill(angles, values, color=color, alpha=0.4)
# Add a title
plt.title(title, size=11, color=color, y=1.1)
def radialplt():
sns.set_style("white")
df = data.groupby('Type 1').mean().drop(columns=['#', 'Total'])
df = df.reset_index()
plt.suptitle("Means of stats across primary types")
plt.subplots_adjust(wspace=0.5, hspace=0.5)
for row in range(0, len(df.index)):
make_spider( row=row, title=df.iloc[row]['Type 1'], color=colors[df.iloc[row]['Type 1']], df=df)
plt.show()
def typeRelation():
df = pd.DataFrame(data[['Type 1', 'Type 2']].values)
df = df.dropna()
df = list(itertools.chain.from_iterable((i, i[::-1]) for i in df.values))
matrix = pd.pivot_table(
pd.DataFrame(df), index=0, columns=1, aggfunc="size", fill_value=0
).values.tolist()
print(pd.DataFrame(matrix))
names = np.unique(df).tolist()
print(pd.DataFrame(names))
Chord(matrix, names, colors=colorList).to_html()
def main():
# Examine parts of the data
print(data.head())
print(data.tail())
print(data.sample(5))
print("Count of rows in data: {}".format(data.count()))
# Grab Data Frame using columns Type 1 and Type 2
print(data[['Type 1', 'Type 2']])
# Get mean for one numeric column
print("Mean defense across all Pokemon: {}".format(data.Defense.mean()))
# Grab a specific pokemon
print(data.loc[200])
# Bar Plot
# barplt()
typeRelation()
# KDE Plots
print("Stats to examine: HP, Speed, Attack, Sp. Atk, Defense, Sp. Def")
attr1 = input("Provide a stat to examine: ").title()
attr2 = input("Provide a second stat to examine: ").title()
whichtype = input("Examine primary type (Type 1) or secondary type (Type 2): ").title()
# kdeplt("Speed", "HP", "Type 2")
jointkdeplt(attr1, attr2, whichtype)
# Radial Plot
# radialplt()
# Pair Plot
# pairplt("Type 1")
if __name__ == '__main__':
main() | true |
c23c21616ef7af5d63b0c57178fc9cd8509973c2 | Python | dbreddyAI/ml-deploy | /pred.py | UTF-8 | 1,019 | 2.890625 | 3 | [] | no_license | """
Script for sending data to the server to make predictions
"""
import pandas as pd
import json
import requests
# Run gunicorn from the terminal:
# gunicorn --bind 0.0.0.0:8000 server:app
header = {'Content-Type': 'application/json', \
'Accept': 'application/json'}
test_x = pd.read_csv("program/data/test_x.txt", sep = ' ')
# Make the POST request to the server
# When testing without Docker
resp = requests.post("http://0.0.0.0:8000/predict",
data = json.dumps(test_x.to_json(orient = "records")),
headers = header)
# When testing with Docker (assuming the address binding was 8181:8000)
#resp = requests.post("http://0.0.0.0:8181/predict",
# data = json.dumps(test_x.to_json(orient = "records")),
# headers = header)
# Test set and predictions
pred_y = pd.read_json(resp.json()["predictions"], orient = "records")
test_y = pd.read_csv("program/data/test_y.txt", sep = ' ')
# MSE
print(pred_y)
print("")
print(sum((test_y.values - pred_y.values) ** 2) / len(test_y))
| true |
de0f98ebe77ffef07c8a0bc0df90906139e54d1a | Python | shyzik93/dna_simulator | /dnasim/enzyme/dna_methyltransferase.py | UTF-8 | 438 | 2.625 | 3 | [] | no_license | from dnasim.enzyme.enzyme import Enzyme
class DNAMethyltransferase(Enzyme):
def __init__(self, can_restrict=False, can_methylate=True):
"""
Объект фермента Метилтрансфераза
https://en.wikipedia.org/wiki/DNA_methyltransferase
"""
ec_number = None # TODO: Указать Шифр КФ для метилтрансферазы
super().__init__(ec_number)
| true |
d6445c04d394b6aecac89051e77d501f9528d5c0 | Python | celord/PythonGreencore | /ExamenFinal/ej7.py | UTF-8 | 441 | 3.578125 | 4 | [] | no_license | """
Modo sólo escritura posicionándose al final del archivo (a).
En este caso se crea el archivo, si no existe, pero en caso
de que exista se posiciona al final, manteniendo el contenido original.
Y agregar texto a su criterio
"""
def prepararArchivo(a):
try:
f = open(a,'a+')
return f
except:
print('Error')
def agregarTexto(t):
a = prepararArchivo('ej7.txt')
a.write(t)
a.close()
agregarTexto('Agreagado texto al final\n')
| true |
38479e3a31d252665a8cf395686806960755afc4 | Python | mrallc/retro | /extensions/sqlite/sqlite.py | UTF-8 | 1,408 | 2.609375 | 3 | [
"ISC"
] | permissive | # rxSQL ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright (c) 2010 - 2011, Charles Childers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Filesystem based interface to sqlite3
#
# Usage
# - Write SQL query to file named 'sql'
# - Create a file named 'query'
# - Read results from file named 'results'
# - First line is integer specifying number of rows
# - Rest is db results, in tab separated value format
# - When finished, create a file named 'quit'
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import os, sys, math, time, struct, sqlite3
a = True
db = sqlite3.connect('retro.db')
c = db.cursor()
while a == True:
if os.path.exists("query") and os.path.exists("sql"):
os.remove("query")
if os.path.exists("results"):
os.remove("results")
f = open("sql", "r")
query = f.read()
f.close()
f = open("results0", "w")
c.execute("select count(*) as count from (" + query + ")")
for row in c:
f.write(str(row[0]) + "\n")
c.execute(query)
for row in c:
for item in row:
f.write(str(item) + "\t")
f.write("\n")
f.close()
db.commit()
if os.path.exists("sql"):
os.remove("sql")
if os.path.exists("results0"):
os.rename("results0", "results")
if os.path.exists("quit"):
os.remove("quit")
a = False
| true |
51467d60c52f0dc84ce1b218219eb60bcf456027 | Python | ytangjf/CS498_AppliedMachineLearning | /HW1/prob1/main.py | UTF-8 | 4,266 | 3.328125 | 3 | [] | no_license | # Example of Naive Bayes implemented from Scratch in Python
import csv
import random
import math
def loadCsv(filename):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
for i in range(len(dataset)):
temp = [float(x) for x in dataset[i]]
# values = [None if x == 0 else x for x in temp[:-1]]
# values.append(temp[-1])
# dataset[i] = values
dataset[i] = temp
return dataset
def splitDataset(dataset, splitRatio):
trainSize = int(len(dataset) * splitRatio)
trainSet = []
copy = list(dataset)
while len(trainSet) < trainSize:
index = random.randrange(len(copy))
trainSet.append(copy.pop(index))
return [trainSet, copy]
def mean(numbers):
numbersNotNone = numbers
# numbersNotNone = [x for x in numbers if x is not None]
# if len(numbersNotNone) == 0:
# return 0
return sum(filter(None, numbersNotNone)) / float(len(numbersNotNone))
def stdev(numbers):
numbersNotNone = numbers
# numbersNotNone = [x for x in numbers if x is not None]
# if len(numbersNotNone) == 0:
# return 0
avg = mean(numbersNotNone)
variance = sum([pow(x - avg, 2) for x in numbersNotNone]) / float(len(numbersNotNone) - 1)
return math.sqrt(variance)
def getMeanandStddev(dataset):
# Seperate the dataset
separated = {}
for i in range(len(dataset)):
vector = dataset[i]
if (vector[-1] not in separated):
separated[vector[-1]] = []
separated[vector[-1]].append(vector)
# Obtain the mean and standard deviation for each feature
meanAndStddevGroup = {}
for classValue, instances in separated.items():
summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*instances)]
del summaries[-1]
meanAndStddevGroup[classValue] = summaries
return meanAndStddevGroup
def calculateProbability(x, mean, stdev):
# using Standard normal distribution
exponent = math.exp(-(math.pow(x - mean, 2) / (2 * math.pow(stdev, 2))))
return (1 / (math.sqrt(2 * math.pi) * stdev)) * exponent
def calculateClassProbabilities(summaries, inputVector, classProb):
probabilities = {}
for classValue, classSummaries in summaries.items():
probabilities[classValue] = math.log10(classProb[classValue])
for i in range(len(classSummaries)):
mean, stdev = classSummaries[i]
x = inputVector[i]
if x is not None:
probabilities[classValue] += math.log10(calculateProbability(x, mean, stdev))
return probabilities
def getPredictions(summaries, testSet):
# SGet probabilistic of each class in test data
classProb = {}
for i in range(len(testSet)):
vector = testSet[i]
if (vector[-1] not in classProb):
classProb[vector[-1]] = 1
else:
classProb[vector[-1]] += 1
classProb = {k: v / len(testSet) for k, v in classProb.items()}
#
predictions = []
for i in range(len(testSet)):
probabilities = calculateClassProbabilities(summaries, testSet[i], classProb)
bestLabel, bestProb = None, -1
for classValue, probability in probabilities.items():
if bestLabel is None or probability > bestProb:
bestProb = probability
bestLabel = classValue
predictions.append(bestLabel)
return predictions
def getAccuracy(testSet, predictions):
correct = 0
for i in range(len(testSet)):
if testSet[i][-1] == predictions[i]:
correct += 1
return (correct / float(len(testSet))) * 100.0
def main():
filename = 'pima-indians-diabetes.csv'
accuracy = 0
for i in range(10):
# Split dataset
splitRatio = 0.8
dataset = loadCsv(filename)
trainingSet, testSet = splitDataset(dataset, splitRatio)
# prepare model
summaries = getMeanandStddev(trainingSet)
# test model
predictions = getPredictions(summaries, testSet)
accuracy += getAccuracy(testSet, predictions)
accuracy /= 10
print("Accuracy: {}%".format(accuracy))
| true |
79164f880b3e724b02f45bb7f00e046bad2a70d7 | Python | 49257620/reboot | /studysrc/mytest/re/retest003.py | UTF-8 | 1,836 | 3.59375 | 4 | [] | no_license | # encoding: utf-8
# Author: LW
import re
"""
\A 匹配字符串开始位置 等于 ^
\Z 匹配字符串结束位置 等于 $
"""
print(' \A 匹配字符串开始位置 等于 ^')
print(' \Z 匹配字符串结束位置 等于 $')
print('-' * 50)
"""
\b 匹配单词边界
"""
print(' \\b 匹配单词边界 ')
print("re.findall(r'\\babc\\b','abc.com')",re.findall(r'\babc\b','abc.com'))
print("re.findall(r'\\babc\\b','(abc).com')",re.findall(r'\babc\b','(abc).com'))
print("re.findall(r'\\babc\\b','abc_com')",re.findall(r'\babc\b','abc_com'))
print("re.findall(r'\\babc\\b','abc efg')",re.findall(r'\babc\b','abc efg'))
print('-' * 50)
"""
\B 匹配非单词边界
"""
print(' \\B 匹配非单词边界 ')
print("re.findall(r'py\\B','python')",re.findall(r'py\B','python'))
print("re.findall(r'py\\B','py.')",re.findall(r'py\B','py.'))
print('-' * 50)
"""
\d 匹配数字 等于 [0-9]
\D 匹配非数字 等于 [^0-9]
\s 匹配空字符
\S 匹配非空字符
"""
print('\\d 匹配数字 等于 [0-9]')
print('\\D 匹配非数字 等于 [^0-9]')
print('\\s 匹配空字符 ')
print('\\S 匹配非空字符')
print('-' * 50)
"""
\w 匹配单词字符 [a-zA-Z0-9]
\W 与\w 相反
"""
print('\\w 匹配单词字符 [a-zA-Z0-9]')
print('\\W 与\w 相反')
print("re.findall(r'\\w*','你好 abc 123 ABC')",re.findall(r'\w*','你好 abc 123 ABC'))
print("re.findall(r'\\w','你好 abc 123 ABC . _ + !')",re.findall(r'\w','你好 abc 123 ABC . _ + !'))
print("re.findall(r'\\W','你好 abc 12 3ABC . _ + !')",re.findall(r'\W','你好 abc 123 ABC . _ + !'))
print('-' * 50)
"""
编译正则表达式 ,以便频繁使用
"""
print("""
p = re.compile(r'[A-Z]')
print(p.search('ABC abc'))
print(p.findall('ABC abc'))
""")
p = re.compile(r'[A-Z]')
print(p.search('ABC abc'))
print(p.findall('ABC abc'))
print('-' * 50) | true |
87b96df02437f4e2e965a12136fab7bcd8a90429 | Python | aoeu/DWIM | /tests/rules/test_git_bisect_usage.py | UTF-8 | 1,030 | 2.5625 | 3 | [
"MIT"
] | permissive | import pytest
from tests.utils import Command
from dwim.rules.git_bisect_usage import match, get_new_command
@pytest.fixture
def stderr():
return ("usage: git bisect [help|start|bad|good|new|old"
"|terms|skip|next|reset|visualize|replay|log|run]")
@pytest.mark.parametrize('script', [
'git bisect strt', 'git bisect rset', 'git bisect goood'])
def test_match(stderr, script):
assert match(Command(script=script, stderr=stderr))
@pytest.mark.parametrize('script', [
'git bisect', 'git bisect start', 'git bisect good'])
def test_not_match(script):
assert not match(Command(script=script, stderr=''))
@pytest.mark.parametrize('script, new_cmd, ', [
('git bisect goood', ['good', 'old', 'log']),
('git bisect strt', ['start', 'terms', 'reset']),
('git bisect rset', ['reset', 'next', 'start'])])
def test_get_new_command(stderr, script, new_cmd):
new_cmd = ['git bisect %s' % cmd for cmd in new_cmd]
assert get_new_command(Command(script=script, stderr=stderr)) == new_cmd
| true |
ed4932a7d405f63ba8c42181f5ef544a5e6e4cab | Python | VanillaHellen/VanillaBot | /vanilla_bot.py | UTF-8 | 8,148 | 2.71875 | 3 | [] | no_license | from pathlib import Path
from discord.ext import commands
import discord
import random
import json
import mysql.connector
import datetime
import os
import requests
import io
import aiohttp
script_location = Path(__file__).absolute().parent
db_data = {}
db_data['user']=os.environ['DB_USER']
db_data['password']=os.environ['DB_PASSWORD']
db_data['host']=os.environ['DB_HOST']
db_data['database']=os.environ['DB_DB']
def getUwuNumber(userId: str):
number = 0
try:
dbcon = mysql.connector.connect(**db_data)
cursor = dbcon.cursor(buffered=True, dictionary=True)
cursor.execute(f"SELECT number FROM uwu_stats WHERE user_id = {userId}")
result = cursor.fetchone()
if result:
number = result['number']
cursor.close()
except mysql.connector.Error as err:
print(err)
else:
dbcon.close()
return number
def dbInsertUserUwu(userId: str, numberToAdd: int):
try:
number = getUwuNumber(userId)
dbcon = mysql.connector.connect(**db_data)
cursor = dbcon.cursor(buffered=True)
add_uwu = (f"""INSERT INTO uwu_stats
(user_id, number)
VALUES
({userId}, {number + numberToAdd})
ON DUPLICATE KEY UPDATE
number = VALUES(number);
""")
cursor.execute(add_uwu)
dbcon.commit()
cursor.close()
except mysql.connector.Error as err:
print(err)
else:
dbcon.close()
description = '''A custom bot made by VanillaHellen'''
intents = discord.Intents.default()
bot = commands.Bot(command_prefix='?', help_command=None, description=description, intents=intents)
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game(name='Doki Doki UwU Club'))
@bot.command(
description='Get info on number. You can specify the type (trivia, math, date, year) and the number - the order is not important - or leave them blank. For date type, the number should be in month/day format. Default type is Trivia, and not defining a number will cause it to be generated randomly.',
usage=f'{bot.command_prefix}n [number/type] [number/type]',
help=f'{bot.command_prefix}n math 489, {bot.command_prefix}n 489 math, {bot.command_prefix}n year, {bot.command_prefix}n 1232, {bot.command_prefix}n'
)
async def n(ctx, arg1 = None, arg2 = None):
type = 'trivia'
number = None
if arg1:
if arg1.isnumeric() or "/" in arg1:
number = arg1
else:
type = arg1
if arg2:
if number:
type = arg2
else:
number = arg2
if number and "/" in number:
type = 'date'
if number:
url = "http://numbersapi.com/" + str(number) + "/" + type
else:
url = "http://numbersapi.com/random/" + type
response = requests.get(url)
if response.status_code != 200:
return await ctx.send("Something went wrong with the request. Do give it another try, perhaps!")
await ctx.send(response.text)
@bot.command(
description='Sends a random dog pic.',
usage=f'{bot.command_prefix}dog',
help=f'{bot.command_prefix}dog'
)
async def dog(ctx):
response = requests.get("https://dog.ceo/api/breeds/image/random")
if response.status_code != 200:
return await ctx.send("Something went wrong with the request. Do give it another try, perhaps!")
async with aiohttp.ClientSession() as session:
async with session.get(response.json()['message']) as resp:
if resp.status != 200:
return await ctx.send("Could not download the image! Try again?")
data = io.BytesIO(await resp.read())
await ctx.send(file=discord.File(data, 'doggo.jpg'))
@bot.command(
description='Randomly picks an option from given ones.',
usage=f'{bot.command_prefix}choose <option1 option2 ...>',
help=f'{bot.command_prefix}choose 1 something "multiple words"'
)
async def choose(ctx, *choices: str):
await ctx.send(random.choice(choices))
@bot.command(
description='Returns all commands available',
usage=f'{bot.command_prefix}help',
help=f'{bot.command_prefix}help'
)
async def help(ctx):
helptext = '''```
Welcome to VanillaBot! :D\n\n
Available commands:\n'''
for command in bot.commands:
helptext+=f'''
{command}:'''
helptext+=f'''
{command.description}
usage:
{command.usage}
example:
{command.help}
'''
helptext+="```"
await ctx.send(helptext)
@bot.command(
description='The bot sends a prayer circle of emojis. Adding -h flag causes the bot to remove the command message',
usage=f'{bot.command_prefix}pc [-h]',
help=f'{bot.command_prefix}pc'
)
async def pc(ctx, flag: str = ''):
b = '<:blank:773987871085953026>'
c = ':candle:'
p = ':pray:'
first_line = b * 4 + c
second_line = b * 2 + c + b * 3 + c
third_line = b + c + b * 5 + c
fourth_line = c + b * 3 + p + b * 3 + c
message = '\n'.join([first_line, second_line, third_line, '', fourth_line, '', third_line, second_line, first_line])
if flag == '-h':
await ctx.message.delete()
await ctx.send(message)
@bot.command(
description='Rolls a Y dice X times.',
usage=f'{bot.command_prefix}roll XdY',
help=f'{bot.command_prefix}roll 2d6'
)
async def roll(ctx, dice: str):
try:
rolls, limit = map(int, dice.split('d'))
except Exception:
await ctx.send('Format has to be in XdY, where X and Y are numbers!')
return
result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))
await ctx.send(result)
@bot.command(
description='Check your uwu stats. Adding a @tag after the command causes the bot to check the tagged user\'s stats',
usage=f'{bot.command_prefix}uwu [@tag]',
help=f'{bot.command_prefix}uwu @someone'
)
async def uwu(ctx, user: discord.User = None):
if user:
if user == bot.user:
await ctx.send('{} IS the uwu.'.format(user.mention))
else:
number = getUwuNumber(user.id)
await ctx.send('{} has used uwu **{}** times!'.format(user.mention, number))
else:
number = getUwuNumber(ctx.message.author.id)
await ctx.send('{}, you have used uwu **{}** times!'.format(ctx.message.author.mention, number))
@bot.event
async def on_message(message):
allowed_prefixes = ['??', '?!', '?x', '?/']
if message.content.lower().startswith(tuple(allowed_prefixes)):
return
if message.author == bot.user:
return
if 'uwu' in message.content.lower() and not message.content.startswith(f'{bot.command_prefix}uwu'):
number = message.content.count('uwu')
emoji = bot.get_emoji(372490965723643907)
if not emoji:
emoji = bot.get_emoji(505712821913255941)
dbInsertUserUwu(message.author.id, number)
await message.add_reaction(emoji)
await bot.process_commands(message)
@choose.error
async def choose_error(ctx, error):
if isinstance(error, discord.ext.commands.CommandInvokeError):
await ctx.send('You must provide *something* to choose from!')
@bot.event
async def on_command_error(ctx, error):
response = ''
if isinstance(error, discord.ext.commands.BadArgument):
response += 'Wrong argument! '
if isinstance(error, discord.ext.commands.MissingRequiredArgument):
response += 'Missing argument! '
response += 'If you\'re not sure how to use a command, call **?help** and try reading about it!'
with open(script_location/'log.txt', 'a+') as log:
log.write(datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S")
+ '\nSENT BY: ' + ctx.message.author.name + '#' + ctx.message.author.discriminator
+ ' (' + ctx.message.author.display_name + ')'
+ '\nMESSAGE: ' + ctx.message.content
+ '\nERROR: ' + str(error)
+ '\n----------------------------------------------\n')
await ctx.send(response)
bot.run(os.environ["ACCESS_TOKEN"]) | true |
3b9693f0f4a95406cf24e8861180a7911b07c6e5 | Python | ManosB435/PythonPorgramms | /Python Programms/OPAP.py | UTF-8 | 1,096 | 3.15625 | 3 | [] | no_license | import requests
import json
import datetime
from collections import Counter
d = datetime.datetime.now()
M = d.strftime("%m")
if d.hour < 9:
D = d.day-1
else:
D = d.day
Numbers = []
for i in range(1, D+1):
number_str = str(i)
A = number_str.zfill(2)
x = requests.get('https://api.opap.gr/draws/v3.0/1100/draw-date/2021-{M}-{D}/2021-{M}-{D}/draw-id'.format(D=A, M=M))
z = x.json()
Z = z[0]
X = requests.get("https://api.opap.gr/draws/v3.0/1100/{Z}".format(Z=Z))
a = json.loads(X.text)
winning = a["winningNumbers"]['list']
Numbers.extend(winning)
print("Τα νουμερα απο την πρωτη κληρωση για καθε μερα του συγκεκριμενου μηνα ειναι:", Numbers)
print("Στο συνολο ειναι:", len(Numbers))
g = Counter(Numbers)
print(g)
print(type(g))
y = len(Numbers) / D
G = [(i, g[i] / y * 100) for i, count in g.most_common()]
print(G)
for (x, y) in G:
print("Ο αριθμος", x, "εχει", y, "% πιθανοτητες να εμφανιστει")
| true |
489ca1e5d88a19c208504e36e5f7f096a862d03b | Python | IanSMoyes/SpiderPi | /Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/rfm9x_transmit.py | UTF-8 | 2,032 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# Example to send a packet periodically
# Author: Jerry Needell
#
import time
import board
import busio
import digitalio
import adafruit_rfm9x
# set the time interval (seconds) for sending packets
transmit_interval = 10
# Define radio parameters.
RADIO_FREQ_MHZ = 915.0 # Frequency of the radio in Mhz. Must match your
# module! Can be a value like 915.0, 433.0, etc.
# Define pins connected to the chip.
CS = digitalio.DigitalInOut(board.CE1)
RESET = digitalio.DigitalInOut(board.D25)
# Initialize SPI bus.
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
# Initialze RFM radio
rfm9x = adafruit_rfm9x.RFM9x(spi, CS, RESET, RADIO_FREQ_MHZ)
# Note that the radio is configured in LoRa mode so you can't control sync
# word, encryption, frequency deviation, or other settings!
# You can however adjust the transmit power (in dB). The default is 13 dB but
# high power radios like the RFM95 can go up to 23 dB:
rfm9x.tx_power = 23
# initialize counter
counter = 0
# send a broadcast mesage
rfm9x.send(bytes("message number {}".format(counter), "UTF-8"))
# Wait to receive packets.
print("Waiting for packets...")
# initialize flag and timer
send_reading = False
time_now = time.monotonic()
while True:
# Look for a new packet - wait up to 5 seconds:
packet = rfm9x.receive(timeout=5.0)
# If no packet was received during the timeout then None is returned.
if packet is not None:
# Received a packet!
# Print out the raw bytes of the packet:
print("Received (raw bytes): {0}".format(packet))
# send reading after any packet received
if time.monotonic() - time_now > transmit_interval:
# reset timeer
time_now = time.monotonic()
# clear flag to send data
send_reading = False
counter = counter + 1
rfm9x.send(bytes("message number {}".format(counter), "UTF-8"))
| true |
a8bd6b6abd4836b810b82d265e73b4a059f0870f | Python | idleyui/ml-in-action | /02-knn/knn_letter_recognition.py | UTF-8 | 1,216 | 3.078125 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
import numpy as np
from collections import Counter
def classify0(test_data, data_set, labels, k):
diff = np.tile(test_data, (data_set.shape[0], 1)) - data_set
distances = (diff ** 2).sum(axis=1) ** 0.5
k_labels = labels[distances.argsort()[:k]]
return Counter(k_labels).most_common()[0][0]
def letter_recognition():
data_size = 20000
train_size_list = [10000, 15000, 19000]
for train_size in train_size_list:
test_size = data_size - train_size
for k in range(1, 10):
d = np.loadtxt('data/letter-recognition.data', delimiter=',', dtype='<U3')
labels = d[:, 0]
data = np.delete(d, 0, axis=1).astype(np.float)
train_data, test_data = np.vsplit(data, np.array([train_size]))
re = [classify0(item, train_data, labels, k) == labels[train_size + i] for i, item in enumerate(test_data)]
right = Counter(re).most_common()[0][1]
print("train with %d data item and k=%d, test with %d data item, %d item is right, accuracy is %.2f%%"
% (train_size, k, test_size, right, (right / test_size) * 100))
if __name__ == '__main__':
letter_recognition()
| true |
b474ad1b9d2976ad9a0ac056d74fc6ee04b1c2f2 | Python | MichaelSchmidt82/sound-count | /utils.py | UTF-8 | 2,613 | 2.609375 | 3 | [
"MIT"
] | permissive | """
MIT License
Copyright (c) 2018 Michael Schmidt
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
#import io
import wave
import logging
import contextlib
import nltk
import speech_recognition as sr
from environment import APP_VARS as config
import recognizers
def duration(filename):
"""
Opens a wave file and finds the number of frames per rate
:filename: str() A filename (relative to __main__)
:returns: float() Duration in seconds
"""
with contextlib.closing(wave.open(filename, 'r')) as data:
frames = data.getnframes()
rate = data.getframerate()
return frames / float(rate)
def speech_rec(filename):
"""
Create the recognition engine and perform speech-to-text
:filename: str() A filename (relative to __main__)
:returns: list() A list of words
"""
# TODO: pass in engine as paramater (a.k.a sphinx)
# TODO: use io.BytesIO() as a buffer instead of a filename
# buff = BytesIO()
audio_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), filename)
s_rec = sr.Recognizer()
# with sr.AudioFile(buff) as source:
with sr.AudioFile(audio_file) as source:
audio = s_rec.record(source)
words = recognizers.sphinx(s_rec, audio)
return words
def pos_tagger(words):
"""
Tag each word with associated POS.
:words: list() list of str(), element: word
:retruns list[list()] list([word, pos])
"""
tag_words = []
for word in words:
tag_words.append(nltk.pos_tag(word))
return tag_words[0]
| true |
0a5ccca3ff94d21e588ad4663ee2ba7d7aa768cd | Python | smartnova/sgvis | /src/stream_graph.py | UTF-8 | 3,172 | 3.078125 | 3 | [] | no_license | import time
from z3 import *
import functools
from render.render_stream_graph import render_stream_graph
from random import randint
import sys
def optimize_stream_graph(nodes, edges):
s = Optimize()
L = len(nodes)
# Index
index = [Int('i_%s' % i) for i in range(L)]
index_choices = [And(index[i] >= 0, index[i] < L) for i in range(L)]
index_unique = [Implies(index[i] == index[j], i == j) for i in range(L) for j in range(L)]
s.add(index_choices)
s.add(index_unique)
def get_index(u):
for single_index in index:
if str(single_index) == 'i_%s' % u:
return single_index
raise RuntimeError('Tried finding current index of node:' + str(u) + ', but it did not exist.')
# Distance
edge_time_tuples = []
for time, edge_list in enumerate(edges):
for edge in edge_list:
edge_time_tuples.append((time, edge[0], edge[1]))
edge_lengths = [Int('d_%s,%s_%s' % (t, u, v)) for t, u, v in edge_time_tuples]
# Helper function to access the above variable by node i and j instead of index in the array.
def get_distance(t, u, v):
for single_distance in edge_lengths:
if str(single_distance) == 'd_%s,%s_%s' % (t, u, v):
return single_distance
raise RuntimeError('Tried finding distance between node ' + str(u) + ' and node ' + str(v)
+ ' at time ' + str(t) + ', but it did not exist.')
def Abs(x):
return If(x >= 0, x, -x)
# Contrains for edge distanceing
distance_positive = [edge_lengths[i] >= 0 for i in range(len(edge_lengths))]
distance_actual_distances = [get_distance(t, u, v) == Abs(get_index(u) - get_index(v))
for t, u, v in edge_time_tuples]
s.add(distance_positive + distance_actual_distances)
s.minimize(Sum(edge_lengths))
return s, index
# n: number of nodes. m: number of timestamps
def generate_random_input_data(n, m):
example_nodes = list(range(n))
example_edges = []
for i in range(m):
number_of_edges = randint(3, int(n / 2))
example_edges.append([])
for j in range(number_of_edges):
source = randint(0, n - 1)
target = source
while target == source:
target = randint(0, n - 1)
example_edges[i].append((source, target))
return example_nodes, example_edges
def run(nodes, edges):
print(nodes)
print(example_edges)
print("Calculating...")
start_time = time.time()
s, index = optimize_stream_graph(nodes, example_edges)
s.check()
print("Finished!")
end_time = time.time()
elapsed = round(end_time - start_time, 2)
print("Computation took", elapsed, "seconds.")
# Render the result
m = s.model()
r = [m.evaluate(index[i]) for i in range(len(nodes))]
d = [(i, x.as_long()) for i, x in enumerate(r)]
nodes_in_order = list(map(lambda x: x[0], sorted(d, key=lambda x: x[1])))
render_stream_graph(nodes_in_order, edges)
example_nodes, example_edges = generate_random_input_data(15, 3)
run(example_nodes, example_edges)
| true |
ce5d58c76c9a11829d6c45ad108c4f0952282f1b | Python | PetKimQA/Practice_1 | /radioButton-validationfailed.py | UTF-8 | 4,095 | 2.796875 | 3 | [] | no_license | from selenium import webdriver
driver = webdriver.Chrome("C:/Users/PetruKim/Desktop/Automation/chromedriver.exe")
driver.get("https://www.seleniumeasy.com/test/basic-radiobutton-demo.html")
#locators
maleRadioIndv = driver.find_element_by_xpath('//*[@id="easycont"]/div/div[2]/div[1]/div[2]/label[1]/input')
femaleRadioIndv = driver.find_element_by_xpath('//*[@id="easycont"]/div/div[2]/div[1]/div[2]/label[2]/input')
maleRadioGrp = driver.find_element_by_xpath('//*[@id="easycont"]/div/div[2]/div[2]/div[2]/div[1]/label[1]/input')
femaleRadioGrp = driver.find_element_by_xpath('//*[@id="easycont"]/div/div[2]/div[2]/div[2]/div[1]/label[2]/input')
ageGrp1 = driver.find_element_by_xpath('//*[@id="easycont"]/div/div[2]/div[2]/div[2]/div[2]/label[1]/input')
ageGrp2 = driver.find_element_by_xpath('//*[@id="easycont"]/div/div[2]/div[2]/div[2]/div[2]/label[2]/input')
ageGrp3 = driver.find_element_by_xpath('//*[@id="easycont"]/div/div[2]/div[2]/div[2]/div[2]/label[3]/input')
getCheckedButton = driver.find_element_by_xpath('//*[@id="buttoncheck"]')
getValueButton = driver.find_element_by_xpath('//*[@id="easycont"]/div/div[2]/div[2]/div[2]/button')
getCheckedText = driver.find_element_by_xpath('//*[@id="easycont"]/div/div[2]/div[1]/div[2]/p[3]')
getValueText = driver.find_element_by_xpath('//*[@id="easycont"]/div/div[2]/div[2]/div[2]/p[2]')
#text expected result
maleIndvExpTxt = "Radio button 'Male' is checked"
femaleIndvExpTxt = "Radio button 'female' is checked"
maleGrpExpTxt1 = "Sex : Male" + "\n" + "Age group: 0 - 5"
maleGrpExpTxt2 = "Sex : Male" + "\n" + "Age group: 5 - 15"
maleGrpExpTxt3 = "Sex : Male" + "\n" + "Age group: 15 - 50"
femaleGrpExpTxt1 = "Sex : Female" + "\n" + "Age group: 0 - 5"
femaleGrpExpTxt2 = "Sex : Female" + "\n" + "Age group: 5 - 15"
femaleGrpExpTxt3 = "Sex : Female" + "\n" + "Age group: 15 - 50"
#age05Txt = "Age group: 0 - 5"
#age515Txt = "Age group: 5 - 15"
#age1550Txt = "Age group: 15 - 50"
expectedResultArry = [maleIndvExpTxt,
femaleIndvExpTxt,
maleGrpExpTxt1,
maleGrpExpTxt2,
maleGrpExpTxt3,
femaleGrpExpTxt1,
femaleGrpExpTxt2,
femaleGrpExpTxt3]
#default verification
driver.implicitly_wait(3)
if maleRadioIndv.is_selected() or femaleRadioIndv.is_selected() or maleRadioGrp.is_selected() or femaleRadioGrp.is_selected() or ageGrp1.is_selected() or ageGrp2.is_selected() or ageGrp3.is_selected():
print("Default status failed")
#individual check
maleRadioIndv.click()
getCheckedButton.click()
#print(getCheckedText.text)
maleIndvAcText = getCheckedText.text
femaleRadioIndv.click()
getCheckedButton.click()
#print(getCheckedText.text)
femaleIndvAcText = getCheckedText.text
#Group check
a = [ageGrp1, ageGrp2, ageGrp3]
maleRadioGrp.click()
maleRadioGrpAcText = []
femaleRadioGrpAcText = []
for i in a:
i.click()
getValueButton.click()
#print(getValueText.text)
maleRadioGrpAcText.append(getValueText.text)
femaleRadioGrp.click()
for i in a:
i.click()
getValueButton.click()
#print(getValueText.text)
femaleRadioGrpAcText.append(getValueText.text)
#Better way must be for Array in array(GrpAcText)
testActualResult = [maleIndvAcText,
femaleIndvAcText,
maleRadioGrpAcText[0],
maleRadioGrpAcText[1],
maleRadioGrpAcText[2],
femaleRadioGrpAcText[0],
femaleRadioGrpAcText[1],
femaleRadioGrpAcText[2]]
print(testActualResult)
print(expectedResultArry)
#Validation failed!!!!!!!!!!!
i = 0
while i <= len(testActualResult):
if testActualResult[i] == expectedResultArry[i]:
i = i + 1
print("Pass")
else:
print("Fail")
#if numpy.array_equal(expectedResultArry, testActualResult):
# print("All variations are verified- Test Pass")
#else:
# print("Test failed")
driver.quit()
| true |
6b22158574c7f2eeae4ac042a3f11edd64f52781 | Python | danluna/TourMap | /crawl.py | UTF-8 | 2,119 | 2.765625 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
unicode_Error_Counter = 0
def tour_spider(max_year):
year = 1982
page = 1
fw = open('metallica_data.txt', 'w')
while year <= max_year:
url = 'http://www.metallica.com/tour_date_list.asp?year=' + str(year) + '&page=' + str(page)
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
data = soup.find('table', class_="DDT-wrap").find('tbody')
try:
for date_set in data.findAll('tr'):
fw.write(date_set.contents[1].string + '\n')
fw.write(date_set.contents[3].a.string + '\n')
fw.write(date_set.contents[5].string + '\n')
a_tag = date_set.contents[7].a
try:
# Go to the setlist page and get the songs
fw.write(a_tag.get('href') + '\n')
setlist_url = a_tag.get('href')
source_code = requests.get(setlist_url)
plain_text = source_code.text
soup2 = BeautifulSoup(plain_text, "html.parser")
data = soup2.find('table', class_="DDT-panel")
firstItem = True
for song in data.findAll('tr'):
# Skip the first item, it's not a song, only a title
if firstItem:
firstItem = False
continue
try:
print(song.find('a').string.strip())
fw.write(song.find('a').string.encode('utf-8').strip())
fw.write('\n')
except AttributeError:
try:
print(song.contents[3].contents[1].strip())
fw.write(song.contents[3].contents[1].strip() + '\n')
except IndexError:
print("ENCORE HERE")
except AttributeError:
# Get the link fromt he <a> tag and href attribute since there is no setlist link
setlist_url = date_set.contents[3].a.get('href')
print(setlist_url)
fw.write(setlist_url.strip())
fw.write('\n')
fw.write('\n')
except AttributeError:
# No tour dates this given year
# Check for more pages for this year
liTags = soup.find(attrs={'class': 'next_page'})
if not liTags:
year+=1
page=1
else:
page+=1
# End of spider, close file write
fw.close()
// Start the crawler
tour_spider(2015) | true |
6c6157dd9fb61dabfd5d407d4becda7386a70698 | Python | Techercise/Nobel_Peace_Lecture_Analyis_with_NLP | /htmlparser.py | UTF-8 | 605 | 2.6875 | 3 | [] | no_license | # This function comes from StackOverflow user Eloff at:
# https://stackoverflow.com/questions/753052/strip-html-from-strings-in-python/925630#925630 And improved for Python
# 3.2+ by another StackOverflow user Thomas K. here:
# https://stackoverflow.com/questions/11061058/using-htmlparser-in-python-3-2
from abc import ABC
from html.parser import HTMLParser
class MLStripper(HTMLParser, ABC):
def __init__(self):
super().__init__()
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed) | true |
7c32796d89262f80b831676d77656daf25b03b85 | Python | Priyanka9496/Whether_app | /The_whether/whether/views.py | UTF-8 | 950 | 2.71875 | 3 | [] | no_license | from django.shortcuts import render
from django.http import HttpResponse
import requests
from .models import City
from .forms import City_Form
# Create your views here.
def weather(request):
cities=City.objects.all()
url = 'http://api.openweathermap.org/data/2.5/weather?q=kochi&units=imperial&appid=YOUR-API-KEY'
if request.method=='POST':
form=City_Form(request.POST)
form.save()
form = City_Form()
weather_data = []
for city in cities:
city_weather = requests.get(url.format(city)).json() #request the API data and convert the JSON to Python data types
weather = {
'city' : city,
'temperature' : city_weather['main']['temp'],
'description' : city_weather['weather'][0]['description'],
'icon' : city_weather['weather'][0]['icon']
}
weather_data.append(weather)
context={'weather_data': weather_data, 'form' : form}
| true |
c698b47d561d136fe348922338a4d5e18283a540 | Python | taizilinger123/pythonjichu | /04-系统编程-1/12-join子进程.py | UTF-8 | 370 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from multiprocessing import Process
import time
import random
def test():
for i in range(random.randint(1,5)):
print("-----%d-----"%i)
time.sleep(1)
p = Process(target=test)
p.start()
p.join(1)#堵塞
print("------main-----")
# import time
# time.ctime()#字符串时间
# time.time() | true |
6170abca954e845cef92e28961f508d4010cab25 | Python | bstacy0015/Integration | /POGIL12.py | UTF-8 | 1,513 | 4.53125 | 5 | [] | no_license | """
A calculator that calculates the area of a circle with radius inputted by the
user. Test of functions.
__author__ = Benton Stacy
"""
def circle_area():
"""
The top-level function run in Main.py. Controls all user input,
calculation, and output.
"""
import math # To use pi
def number_format(num):
"""
Will check for errors when user inputs the number (used for all 4
numbers).
:param num: the number inputted
:return: the proper number
"""
while True:
try:
user_input = float(input(num))
return user_input
except ValueError:
print("Error. Please enter the desired number. You may use "
"decimals.")
except:
print("Error: unknown.")
def calculate_area(radius):
"""
The function that calculates the area of the circle.
:param radius: The radius of the circle.
"""
area = math.pi * radius ** 2
print("Area of a circle with a radius of", radius, "is",
format(area, ".3f"))
def main():
"""
Will ask the user for the radius of the circle, then print the area
after consulting the function calculate_area(radius).
"""
radius = number_format("Please enter the radius of the circle: ")
calculate_area(radius)
main() # Call to main function, which will run a function within a
# function.
| true |
41faf4b1e4b3ba74cd3e776203c54b1d6a211980 | Python | FabioCastle5/wil-auiproject | /python_plot/dispersion_evaluation.py | UTF-8 | 2,388 | 3.03125 | 3 | [] | no_license | import string
import re
import math
def low_pass_filter (a, a0, factor):
return (a * factor + (1 - factor) * a0)
filename = "./../arduino/will_measurement_sketch/measure2_ypos.txt"
# open file with data and plot them
in_file = open(filename,"r")
x_list = []
filt_x_list = []
y_list = []
filt_y_list = []
threshold = 0.1
filter_factor = 0.65
entry = in_file.readline()
while len(entry) > 1:
if entry.startswith("Ax"):
[x,y] = re.findall("[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?", entry)
x_list.append(float(x))
y_list.append(float(y))
entry = in_file.readline()
in_file.close()
# evaluate the mean value
sumx = 0
sumy = 0
for i in range (len(x_list)):
sumx = sumx + x_list[i]
sumy = sumy + y_list[i]
avgx = sumx / len(x_list)
avgy = sumy / len(y_list)
# subtract mean value of the error
for i in range (len(x_list)):
x_list[i] = x_list[i] - avgx
y_list[i] = y_list[i] - avgy
# filter data
filt_x_list.append(low_pass_filter(x_list[0], 0, filter_factor))
filt_y_list.append(low_pass_filter(y_list[0], 0, filter_factor))
for i in xrange(1, len(x_list)):
filt_x_list.append(low_pass_filter(x_list[i], x_list[i-1], filter_factor))
filt_y_list.append(low_pass_filter(y_list[i], y_list[i-1], filter_factor))
# evaluate the quadratic mean value
sumx = 0
sumy = 0
for i in range (len(x_list)):
sumx = sumx + x_list[i]
sumy = sumy + y_list[i]
avgx = sumx / len(x_list)
avgy = sumy / len(y_list)
print ("MEAN DISPERISION FOR X: " + str(avgx))
print ("MEAN DISPERISION FOR Y: " + str(avgy))
# # make 0/1 decision
# for i in range(len(x_list)):
# if x_list[i] > threshold:
# x_list[i] = 1
# elif x_list[i] < - threshold:
# x_list[i] = -1
# else:
# x_list[i] = 0
# for j in range(len(y_list)):
# if y_list[j] > threshold:
# y_list[j] = 1
# elif y_list[j] < - threshold:
# y_list[j] = -1
# else:
# y_list[j] = 0
# for i in range(len(filt_x_list)):
# if filt_x_list[i] > threshold:
# filt_x_list[i] = 1
# elif filt_x_list[i] < - threshold:
# filt_x_list[i] = -1
# else:
# filt_x_list[i] = 0
# for j in range(len(filt_y_list)):
# if filt_y_list[j] > threshold:
# filt_y_list[j] = 1
# elif filt_y_list[j] < - threshold:
# filt_y_list[j] = -1
# else:
# filt_y_list[j] = 0
| true |
c21c5be66d0453b9c4d02b3ebab551b09451d2bb | Python | rmodi6/sentence-representations | /loss.py | UTF-8 | 401 | 2.53125 | 3 | [
"MIT"
] | permissive | import tensorflow as tf
from tensorflow import math
def cross_entropy_loss(logits: tf.Tensor, labels: tf.Tensor) -> tf.Tensor:
# import pdb; pdb.set_trace()
# why is loss 0 sometimes!?
one_hot_labels = tf.one_hot(labels, logits.shape[1])
batch_loss = tf.nn.softmax_cross_entropy_with_logits(one_hot_labels, logits)
loss_value = math.reduce_mean(batch_loss)
return loss_value
| true |
c3e3218ec16fa5bbf414ede048d43c3ff138841b | Python | Egemenatilla/Machine-Learning-Essentials | /2)Classification/3) SVM (Support Vector Machine)/2)/Svm2.py | UTF-8 | 1,295 | 3.359375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 1 20:42:24 2021
@author: egeme
"""
# Libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Read data from csv file
data = pd.read_csv('data.csv')
# Drop the columns we cannot use
data.drop(["id","Unnamed: 32"],axis=1,inplace=True)
M = data[data.diagnosis == "M"]
B = data[data.diagnosis == "B"]
# scatter plot
plt.scatter(M.radius_mean,M.texture_mean,color="red",label="bad",alpha= 0.3)
plt.scatter(B.radius_mean,B.texture_mean,color="green",label="good",alpha= 0.3)
plt.xlabel("radius_mean")
plt.ylabel("texture_mean")
plt.legend()
plt.show()
# Apply label encode to diagnosis column
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
data['diagnosis'] = le.fit_transform(data['diagnosis'])
#Slicing
x_data = data.drop(["diagnosis"],axis=1)
x = (x_data - np.min(x_data))/(np.max(x_data)-np.min(x_data))
y = data.diagnosis.values
# Split train and test
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.3,random_state=1)
# SVM
from sklearn.svm import SVC
svm = SVC(random_state = 1)
svm.fit(x_train,y_train)
# Accuracy
print("Accuracy of svm: ",svm.score(x_test,y_test)) | true |
6b4f6fd067bc1b0787d34a9cd4b4f08c32d1e412 | Python | markosolopenko/python | /data_structures/singly_linked_list.py | UTF-8 | 2,966 | 3.953125 | 4 | [] | no_license | class Node(object):
def __init__(self, data=None):
self.data = data
self.next = next
# def get_data(self):
# """
# This method like -> @property
# :return:
# """
# return self.data
#
#
# def get_next(self):
# """
# getter
# :return:
# """
# return self.next_node
# def set_next(self, new_next):
# """
# setter
# :param new_next:
# :return:
# """
# self.next_node = new_next
class SinglyLinkedList(object):
def __init__(self, head: Node = None):
self.head = head
def insert(self, new_data):
"""
Insert data in linked list
:param new_data:
:return:
"""
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
# current = self.head
# if
# new_node = Node(new_data)
# new_node.next = self.head
# self.head =
def size(self):
"""
Find length of linked list
:return:
"""
current = self.head
count = 0
while current:
count += 1
current = current.get_next()
return count
def search(self, data):
"""
Searching data in linked list
:param data:
:return:
"""
current = self.head
while current:
if current.get_data() == data:
break
else:
current = current.get_next()
if current is None:
raise ValueError("Data not in list")
return current
def delete(self, data_to_remove):
"""
Removing data from linked list
:param data_to_remove:
:return:
"""
current_value = self.head
previous = None
while current_value:
if current_value.get_data() == data_to_remove:
break
else:
previous = current_value
current_value = current_value.get_next()
if current_value is None:
raise ValueError("Data is not in list")
if previous is None:
self.head = current_value.get_next()
else:
previous.set_next(current_value.get_next())
def print_list(self):
"""
Traversing the list
:return:
"""
current = self.head
while current is not None:
print(current.data, end=' ')
current = current.next
# linked_list = SinglyLinkedList()
# linked_list.head = Node('Hello')
# e1 = Node('Mi')
# e2 = Node('Si')
#
# linked_list.head.set_next(e1)
# e1.set_next(e2)
# print(linked_list.search("Mi").get_data())
# print(linked_list.delete("Hello"))
# print(linked_list.size())
# print(linked_list.print_list())
a = SinglyLinkedList()
a.insert(12)
a.insert(2)
a.insert(48)
a.print_list()
| true |