blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
6f13e4fff998b65900494d717139194249b61bf6 | Python | webclinic017/2020-lfd | /LFD_Project2/src/tuning.py | UTF-8 | 3,304 | 2.828125 | 3 | [] | no_license | import DataGenerator
from hmmlearn.hmm import GaussianHMM
import numpy as np
from sklearn.metrics import mean_absolute_error
import pickle
from DataGenerator import make_features_for_tuning, create_all_features
import matplotlib.pyplot as plt
def validate_model(model, test_x, past_price):
hidden_states = model.predict(test_x)
expected_diff_price = np.dot(model.transmat_, model.means_)
diff = list(zip(*expected_diff_price))[0]
predicted_price = list()
for idx in range(10): # predict gold price for 10 days
state = hidden_states[idx]
current_price = past_price[idx]
next_day_price = current_price + diff[state] # predicted gold price of next day
predicted_price.append(next_day_price)
predict = np.array(predicted_price)
return predict
def clustering_for_features_selection(start_date, end_date):
all_features_df, gold_price = create_all_features(start_date, end_date, is_training=False)
n_components = 3 # TODO tuning
input_days = 3 # TODO tuning
n_clusters_list = list(range(10, len(all_features_df.columns), 50))
print(n_clusters_list)
results_file = open('features/clustering_features_selection_results.txt', 'w', encoding='utf-8')
mae_results = []
for n_cluster in n_clusters_list:
training_x, test_x, past_price, target_price, selected_features_name_list = make_features_for_tuning(
all_features_df, gold_price, n_cluster, input_days)
model = GaussianHMM(n_components)
model.fit(training_x)
predict = validate_model(model, test_x, past_price)
res_mae = mean_absolute_error(target_price, predict)
# print predicted_prices
# print('past price : {}'.format(np.array(past_price)))
# print('predicted price : {}'.format(predict))
# print('real price : {}'.format(np.array(target_price)))
# print()
# print('mae :', mean_absolute_error(target_price, predict))
if not mae_results or min(mae_results) > res_mae:
# Save features
with open('features/clustering_selected_features.txt', 'w', encoding='utf-8') as f:
f.write('{}, {}\n'.format(n_cluster, res_mae))
f.write(', '.join(selected_features_name_list))
f.close()
# Save model
# TODO: fix pickle file name
filename = 'model_kmeans_clustering_best.pkl'
pickle.dump(model, open(filename, 'wb'))
print('saved {}'.format(filename))
mae_results.append(res_mae)
print('mae for {} clusters with {}: {}'.format(n_cluster, len(selected_features_name_list), res_mae))
results_file.write('mae for {} clusters: {}\n'.format(n_cluster, res_mae))
plt.plot(n_clusters_list, mae_results, 'b-')
plt.grid(which='both')
plt.xticks(list(range(10, max(n_clusters_list), 50)))
plt.yticks(list(range(0, int(max(mae_results)), 5)))
# plt.axis([0, max(n_clusters_list), 0, max(mae_results)])
plt.ylabel('MAE')
plt.xlabel('number of clusters')
plt.show()
plt.savefig('features/clustering_features_selection_results.png')
if __name__ == "__main__":
start_date = '2010-01-01'
end_date = '2020-04-18'
clustering_for_features_selection(start_date, end_date) | true |
a5e9ffdf42f1672076d4941d391b602ea4ccab9f | Python | uvkrishnasai/algorithms-datastructures-python | /preparation/GraphBFS.py | UTF-8 | 539 | 3.53125 | 4 | [] | no_license | """
input = [
(1, 3), (3, 2), (2, 4), (4, 5), (8, 5), (5, 9),
(3, 6), (10, 6), (6, 4), (4, 7), (7, 9)
]
in_1, in_2 = [], []
for elem in input:
in_1.append(elem[0])
in_2.append(elem[1])
out_1 = set(in_1) - set(in_2)
in_4 = Counter(in_2)
out_2 = []
for k, v in in_4.items():
if v == 1:
out_2.append(k)
print(in_4)
print(out_1)
print(out_2)
"""
nums = [2, 7, 11, 15]
k = 9
mape = {v: i for i, v in enumerate(nums)}
for i, v in enumerate(nums):
if k-v in mape:
print([i, mape[k-v]])
break
| true |
4d2bcfa1108ffbe667d8ad209607bdb876c56dc6 | Python | shaunharker/2016-12-15-Workshop | /source/pyCHomP/Braids.py | UTF-8 | 4,663 | 3.171875 | 3 | [
"MIT"
] | permissive | ### Braids.py
### MIT LICENSE 2016 Shaun Harker
from CubicalComplex import *
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
class BraidDiagram:
def __init__(self, braid_skeleton):
"""
Inputs:
braid_skeleton : a list of lists such that
braid_skeleton[i][j] gives the value of strand i at position j
Outputs:
braid_specification: a tuple (n, m, x, pi)
where n : number of strands
m : number of waypoints
x(i,j) means the value of strand i at position j
pi is a permutation such that x(i,j+m) == x(pi(i),j)
"""
self.n = len(braid_skeleton)
self.m = len(braid_skeleton[0]) - 1
self.permute_ = {}
for i in range(0,self.n):
for j in range(0,self.n):
if braid_skeleton[i][self.m] == braid_skeleton[j][0]:
self.permute_[i] = j
self.braid_skeleton_ = braid_skeleton
self.min_ = min([ min(braid_skeleton[i]) for i in range(0,len(braid_skeleton)) ])
self.max_ = max([ max(braid_skeleton[i]) for i in range(0,len(braid_skeleton)) ])
def __call__(self, i, j):
"""
Return the height of strand i at position j
"""
return self.braid_skeleton_[i][j]
def pi(self,i):
"""
pi is a permutation such that x(i,j+m) == x(pi(i),j)
"""
return self.permute_[i]
def lap(self, domain):
"""
Compute the lap number for a domain
"""
midpoints = [ sum(domain.bounds()[j]) / 2.0 for j in (list(range(0,self.m)) + [0]) ]
return sum(self(i,j) <= midpoints[j] and self(i,j+1) >= midpoints[j+1] for j in range(0,self.m) for i in range(0,self.n))
def draw(self, domain=None):
x = np.arange(self.m+1)
for i in range(0,self.n):
plt.plot(x, [self(i,j) for j in range(0,self.m+1)])
if domain:
def f(x):
if x[0] == -float("inf") and x[1] == -float("inf"):
return self.min_ - 1.0
if x[0] == float("inf") and x[1] == float("inf"):
return self.max_ + 1.0
if x[0] == -float("inf"):
return x[1] - .5
if x[1] == float("inf"):
return x[0] + .5
return (x[0] + x[1]) / 2.0
strand = [ f(domain.bounds()[d]) for d in range(0, self.m) ]
strand = strand + [strand[0]]
plt.plot(x, strand, '--', color='b',)
plt.show()
def __repr__(self):
self.draw()
return "Braid Diagram"
def BraidComplex( braid_diagram ):
"""
Overview:
Given a specification for a "braids" dynamical system,
return the associated cubical complex and flow graph.
"""
# Unpack the input
n = braid_diagram.n
m = braid_diagram.m
x = lambda i,j : braid_diagram(i,j)
pi = lambda i : braid_diagram.pi(i)
# Create the associated cubical complex
thresholds = [ [float("-inf")] + sorted( [x(i,j) for i in range(0,n)] ) + [float("inf")] for j in range(0,m) ]
complex = CubicalComplex(CubicalGrid(thresholds))
# Construct the domains
domains = [cell for cell in complex.cells() if cell.dimension() == m]
walls = [cell for cell in complex.cells() if cell.dimension() == m-1]
## NOTE: DRY SMELL BEGIN
# We need coboundary information
coboundary = defaultdict(list)
for b in complex:
for a in [ a for a in complex.boundary(b) ]:
coboundary[a].append(b)
# We also need to know the top cells surrounding a vertex
def star(cell):
result = set()
stack = [ v for v in coboundary[cell] ]
while stack:
v = stack.pop()
result.add(v)
for u in coboundary[v]:
stack.append(u)
return result
## NOTE: DRY SMELL END
# Construct the edge set
edges = defaultdict(set)
for wall in walls:
# A wall can have 1 adjacent domain if it is off at infinity
if len(coboundary[wall]) == 1: continue
# Otherwise, it has precisely 2 adjacent domains
[u, v] = coboundary[wall]
if braid_diagram.lap(u) <= braid_diagram.lap(v):
edges[v].add(u)
if braid_diagram.lap(u) >= braid_diagram.lap(v):
edges[u].add(v)
# Identify collapsed strands
collapsed_strands = [ i for i in range(0,n) if pi(i) == i ]
collapsed_vertices = [ CubicalCell([ [ x(i,j), x(i,j) ] for j in range(0,m) ]) for i in collapsed_strands ]
# Connect all cubes in the star of any collapsed strand
for v in collapsed_vertices:
surrounding_walls = [ cell for cell in star(v) if cell.dimension() == m-1 ]
for wall in surrounding_walls:
if len(coboundary[wall]) == 1: continue
[u, v] = coboundary[wall]
edges[u].add(v)
edges[v].add(u)
return (complex, lambda v : edges[v] )
| true |
53d5667dd0e8522848bd23e9dc128b246dc7667b | Python | varshinireddyt/Python | /Arrays/Sort Array By Increasing Frequency.py | UTF-8 | 775 | 4.15625 | 4 | [] | no_license | """
Leetcode 1636. Sort Array by Increasing Frequency
Given an array of integers nums, sort the array in increasing order based on the frequency of the values.
If multiple values have the same frequency, sort them in decreasing order.
Return the sorted array
Input: nums = [1,1,2,2,2,3]
Output: [3,1,1,2,2,2]
Explanation: '3' has a frequency of 1, '1' has a frequency of 2, and '2' has a frequency of 3.
Input: nums = [-1,1,-6,4,5,-6,1,4,1]
Output: [5,-1,4,4,-6,-6,1,1,1]
"""
def frequencySort(nums):
count = {}
for i in nums:
if i in count:
count[i] += 1
else:
count[i] = 1
sortedNums = sorted(nums,reverse = True)
return sorted(sortedNums, key=lambda x:count[x])
nums = [1,1,2,2,2,3]
print(frequencySort(nums)) | true |
fa94081cb8f8c2e54506fc1231d32439c90bcb21 | Python | Zadigo/my_python_codes | /exercises/google_mapsz/utils.py | UTF-8 | 378 | 2.5625 | 3 | [] | no_license | import os, re
def get_local_files(directory):
LOCAL_FILES = list(os.walk(os.path.dirname(__file__)))[0][-1]
PATTERNS = [
r'(setup\.(json|txt|py))',
]
for LOCAL_FILE in LOCAL_FILES:
var = re.search(PATTERNS[0], LOCAL_FILE)
if var:
setup_path=os.path.join(directory,var.group(0))
# get_local_files(os.path.dirname(__file__)) | true |
762e4d2a44d9a607d9a91fffd565185c4e12fb32 | Python | Aasthaengg/IBMdataset | /Python_codes/p02632/s009293034.py | UTF-8 | 1,212 | 3.265625 | 3 | [] | no_license | def f_strivore(MOD=10**9 + 7):
K = int(input())
S = input()
length = len(S)
class Combination(object):
"""素数 mod に対する二項係数の計算"""
__slots__ = ['mod', 'fact', 'factinv']
def __init__(self, max_val_arg: int = 10**6, mod: int = 10**9 + 7):
fac, inv = [1], []
fac_append, inv_append = fac.append, inv.append
for i in range(1, max_val_arg + 1):
fac_append(fac[-1] * i % mod)
inv_append(pow(fac[-1], -1, mod))
for i in range(max_val_arg, 0, -1):
inv_append((inv[-1] * i) % mod)
self.mod, self.fact, self.factinv = mod, fac, inv[::-1]
def combination(self, n, r):
return (0 if n < 0 or r < 0 or n < r
else self.fact[n] * self.factinv[r] * self.factinv[n - r] % self.mod)
comb = Combination(length + K).combination
f = [1] * (K + 1)
tmp = 1
for n in range(K + 1):
f[n] = (comb(length + n - 1, length - 1) * tmp) % MOD
tmp = (tmp * 25) % MOD
g = [1] * (K + 1)
for n in range(1, K + 1):
g[n] = (f[n] + 26 * g[n - 1]) % MOD
return g[K]
print(f_strivore()) | true |
7489e7787d918c8609249ecda262b0fc9f01d93d | Python | jhonsonsamueltua/rpi-arduino-tobalobs | /sketchbook/tobalobs/rpi-ws.py | UTF-8 | 3,299 | 2.734375 | 3 | [] | no_license | from flask import Flask, jsonify
import serial
import time
import requests
API_GET_KONDISI_MENYIMPANG = 'http://66.70.190.240:8000/api/penyimpangan-kondisi-tambak'
if __name__ == '__main__':
app = Flask(__name__)
@app.route('/get-monitor')
def monitor():
s = []
ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1)
ser.flush()
while True:
pH = 0.0
suhu = 0.0
do = 0.0
ket = ''
phMin = 6
phMax = 8
suhuMin = 24
suhuMax = 30
doMin = 3
doMax = 100
cekMenyimpang = False
try:
if ser.in_waiting > 0:
data = ser.readline().decode('utf-8').rstrip() #get data from arduino
listData = data.split(';')
pH = float(listData[0])
suhu = float(listData[1])
do = float(listData[2])
# get parameter penyimpangan
r = requests.get(url = API_GET_KONDISI_MENYIMPANG)
data = r.json()
if (data['status'] == 'OK'):
for kondisi in data['data']:
if (kondisi['tipe'] == 'ph-min'):
phMin = int(kondisi['nilai'])
elif (kondisi['tipe'] == 'ph-max'):
phMax = int(kondisi['nilai'])
elif (kondisi['tipe'] == 'suhu-min'):
suhuMin = int(kondisi['nilai'])
elif (kondisi['tipe'] == 'suhu-max'):
suhuMax = int(kondisi['nilai'])
elif (kondisi['tipe'] == 'do-min'):
doMin = int(kondisi['nilai'])
elif (kondisi['tipe'] == 'do-max'):
doMax = int(kondisi['nilai'])
if (pH < phMin or pH > phMax):
ket = "pH " + str(pH) + " bermasalah (pH < " + str(phMin) + "):" if pH < phMin else "pH " + str(pH) + " bermasalah (pH > " + str(phMax) + "):"
cekMenyimpang == True
if (suhu < suhuMin or suhu > suhuMax):
ket = ket + "Suhu " + str(suhu) + "C bermasalah (suhu < " + str(suhuMin) + "):" if suhu < suhuMin else "Suhu " + str(suhu) + "C bermasalah (suhu > " + str(suhuMax) + "C):"
cekMenyimpang = True
if (do < doMin or do > doMax):
ket = ket + "DO " + str(do) + " bermasalah (DO < " + str(doMin) + "):" if do < doMin else "DO " + str(do) + " bermasalah (DO > " + str(doMax) + "):"
cekMenyimpang = True
if (cekMenyimpang == False):
ket = 'Kondisi tambak normal'
break;
except:
continue;
return jsonify(
ph = pH,
suhu = suhu,
do = do,
keterangan = ket
)
app.run(debug=True, port=80, host='0.0.0.0')
| true |
90216233109420cd864e8106f3eba38ab1c1ba57 | Python | Roarpalm/LOL-Teamfight-Tactics | /S3.5.py | UTF-8 | 8,716 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python3
#-*-coding:utf-8-*-
import itertools
from time import time
from tqdm import tqdm
start = time()
class Hero():
'''英雄属性'''
def __init__(self, cost, name, origin, class_):
# 等级
self.cost = cost
# 名称
self.name = name
# 种族
self.origin = origin
# 职业
self.class_ = class_
崔斯特 = Hero(1, '崔斯特', '未来战士', '法师')
凯特琳 = Hero(1, '凯特琳', '未来战士', '狙神')
墨菲特 = Hero(1, '墨菲特', '奥德赛', '斗士')
魔腾 = Hero(1, '魔腾', '战地机甲', '刺客')
嘉文四世 = Hero(1, '嘉文四世', '暗星', '圣盾使')
波比 = Hero(1, '波比', '星之守护者', '重装战士')
蕾欧娜 = Hero(1, '蕾欧娜', '源计划', '重装战士')
格雷福斯 = Hero(1, '格雷福斯', '太空海盗', '强袭枪手')
菲奥娜 = Hero(1, '菲奥娜', '源计划', '剑士')
吉格斯 = Hero(1, '吉格斯', '奥德赛', '爆破专家')
佐伊 = Hero(1, '佐伊', '星之守护者', '法师')
俄洛伊 = Hero(1, '俄洛伊', '战地机甲', '斗士')
霞 = Hero(1, '霞', '星神', '剑士')
安妮 = Hero(2, '安妮', '银河魔装机神', '法师')
赵信 = Hero(2, '赵信', '星神', '圣盾使')
布里茨 = Hero(2, '布里茨', '未来战士', '斗士')
莫德凯撒 = Hero(2, '莫德凯撒', '暗星', '重装战士')
克格莫 = Hero(2, '克格莫', '战地机甲', '强袭枪手')
慎 = Hero(2, '慎', '未来战士', '剑士')
阿狸 = Hero(2, '阿狸', '星之守护者', '法师')
诺提勒斯 = Hero(2, '诺提勒斯', '宇航员', '重装战士')
德莱厄斯 = Hero(2, '德莱厄斯', '太空海盗', '破法战士')
亚索 = Hero(2, '亚索', '奥德赛', '剑士')
卢锡安 = Hero(2, '卢锡安', '源计划', '强袭枪手')
劫 = Hero(2, '劫', '奥德赛', '刺客')
洛 = Hero(2, '洛', '星神', '圣盾使')
易 = Hero(3, '易', '奥德赛', '剑士')
艾希 = Hero(3, '艾希', '星神', '狙神')
萨科 = Hero(3, '萨科', '暗星', '刺客')
卡尔玛 = Hero(3, '卡尔玛', '暗星', '秘术师')
薇恩 = Hero(3, '薇恩', '源计划', '狙神')
兰博 = Hero(3, '兰博', '银河魔装机神', '爆破专家')
卡西奥佩娅 = Hero(3, '卡西奥佩娅', '战地机甲', '秘术师')
伊泽瑞尔 = Hero(3, '伊泽瑞尔', '未来战士', '强袭枪手')
杰斯 = Hero(3, '杰斯', '太空海盗', '重装战士')
辛德拉 = Hero(3, '辛德拉', '星之守护者', '法师')
蔚 = Hero(3, '蔚', '源计划', '斗士')
巴德 = Hero(3, '巴德', '宇航员', '秘术师')
妮蔻 = Hero(3, '妮蔻', '星之守护者', '圣盾使')
索拉卡 = Hero(4, '索拉卡', '星之守护者', '秘术师')
提莫 = Hero(4, '提莫', '宇航员', '狙神')
艾瑞莉娅 = Hero(4, '艾瑞莉娅', '源计划', '剑士')
孙悟空 = Hero(4, '孙悟空', '未来战士', '重装战士')
锐雯 = Hero(4, '锐雯', '未来战士', '剑士')
菲兹 = Hero(4, '菲兹', '银河魔装机神', '刺客')
维克托 = Hero(4, '维克托', '战地机甲', '法师')
纳尔 = Hero(4, '纳尔', '宇航员', '斗士')
烬 = Hero(4, '烬', '暗星', '狙神')
金克斯 = Hero(4, '金克斯', '奥德赛', '强袭枪手')
厄加特 = Hero(5, '厄加特', '战地机甲', '圣盾使')
迦娜 = Hero(5, '迦娜', '星之守护者', '大魔法使')
普朗克 = Hero(5, '普朗克', '太空海盗', '爆破专家')
泽拉斯 = Hero(5, '泽拉斯', '暗星', '法师')
璐璐 = Hero(5, '璐璐', '星神', '秘术师')
龙王 = Hero(5, '龙王', '奥德赛', '星舰龙神')
艾克 = Hero(5, '艾克', '源计划', '刺客')
锤石 = Hero(5, '锤石', '未来战士', '破法战士')
all_heroes = [
崔斯特,
凯特琳,
墨菲特,
魔腾,
嘉文四世,
波比,
蕾欧娜,
格雷福斯,
菲奥娜,
吉格斯,
佐伊,
俄洛伊,
霞,
安妮,
赵信,
布里茨,
莫德凯撒,
克格莫,
慎,
阿狸,
诺提勒斯,
德莱厄斯,
亚索,
卢锡安,
劫,
洛,
易,
艾希,
萨科,
卡尔玛,
薇恩,
兰博,
卡西奥佩娅,
伊泽瑞尔,
杰斯,
辛德拉,
蔚,
巴德,
妮蔻,
索拉卡,
提莫,
艾瑞莉娅,
锐雯,
孙悟空,
菲兹,
维克托,
纳尔,
烬,
金克斯,
厄加特,
迦娜,
普朗克,
泽拉斯,
璐璐,
龙王,
艾克,
锤石
]
def main(people, couple):
'''统计羁绊组合'''
星之守护者 = 0
银河魔装机神 = 0
星神 = 0
奥德赛 = 0
未来战士 = 0
太空海盗 = 0
源计划 = 0
暗星 = 0
战地机甲 = 0
宇航员 = 0
剑士 = 0
爆破专家 = 0
刺客 = 0
斗士 = 0
法师 = 0
圣盾使 = 0
狙神 = 0
秘术师 = 0
破法战士 = 0
强袭枪手 = 0
重装战士 = 0
couples = 0
names = []
for i in couple:
if people == 4:
if i.cost >= 4 or i.cost == 5:
return '', 0
if people == 5:
if i.cost == 5:
return '', 0
if people == 6:
if i.cost == 5:
return '', 0
names.append(i.name)
if i.origin == '星之守护者':
星之守护者 += 1
if i.origin == '银河魔装机神':
银河魔装机神 += 1
if i.origin == '星神':
星神 += 1
if i.origin == '奥德赛':
奥德赛 += 1
if i.origin == '未来战士':
未来战士 += 1
if i.origin == '太空海盗':
太空海盗 += 1
if i.origin == '源计划':
源计划 += 1
if i.origin == '暗星':
暗星 += 1
if i.origin == '战地机甲':
战地机甲 += 1
if i.origin == '宇航员':
宇航员 += 1
if i.class_ == '剑士':
剑士 += 1
if i.class_ == '爆破专家':
爆破专家 += 1
if i.class_ == '刺客':
刺客 += 1
if i.class_ == '斗士':
斗士 += 1
if i.class_ == '法师':
法师 += 1
if i.class_ == '圣盾使':
圣盾使 += 1
if i.class_ == '狙神':
狙神 += 1
if i.class_ == '秘术师':
秘术师 += 1
if i.class_ == '破法战士':
破法战士 += 1
if i.class_ == '强袭枪手':
强袭枪手 += 1
if i.class_ == '重装战士':
重装战士 += 1
#print(names)
num = 星之守护者 // 3
if num:
couples += num
num = 银河魔装机神 // 3
if num:
couples += num
num = 星神 // 2
if num:
couples += num
num = 奥德赛 // 3
if num:
couples += num
num = 未来战士 // 2
if num:
couples += num
num = 太空海盗 // 2
if num:
couples += num
num = 源计划 // 3
if num:
couples += num
num = 暗星 // 2
if num:
couples += num
num = 战地机甲 // 2
if num:
couples += num
num = 宇航员 // 3
if num:
couples += num
num = 剑士 // 3
if num:
couples += num
num = 爆破专家 // 2
if num:
couples += num
num = 刺客 // 2
if num:
couples += num
num = 斗士 // 2
if num:
couples += num
num = 法师 // 2
if num:
couples += num
num = 圣盾使 // 2
if num:
couples += num
num = 狙神 // 2
if num:
couples += num
num = 秘术师 // 2
if num:
couples += num
num = 破法战士 // 2
if num:
couples += num
num = 强袭枪手 // 2
if num:
couples += num
num = 重装战士 // 2
if num:
couples += num
return names, couples
# 人口等级
people = 9
all_couples = itertools.combinations(all_heroes, people)
def eight():
'''计算组合数量'''
def digui(n):
if n == 1:
return 1
return n * digui(n - 1)
Cnm = int(digui(len(all_heroes)) / (digui(len(all_heroes) - people) * digui(people)))
return Cnm
def save_and_run():
'''运行并写入文件'''
n = 0
with open(f'lol{people}.txt', 'a', encoding='utf-8') as f:
with tqdm(total=eight(), ncols=85) as pbar:
for x in all_couples:
pbar.update(1)
names, couple = main(people, x)
if couple >= n:
n = couple
f.write(f'{str(names)}:{couple}\n')
save_and_run()
print(f'用时{int((time()-start) // 60)}分{int((time()-start) % 60)}秒') | true |
d66a738ca87ff422bb43cbad3b6f3d6ce0f142b1 | Python | d80b2t/python | /HackerRank/Algorithms/Sorting/Intro.py | UTF-8 | 389 | 3.53125 | 4 | [] | no_license | '''
Sample Challenge::
This is a simple challenge to get things started. Given a sorted array () and a number (), can you print the index location of in the array?
'''
v = int(eval(input()))
n = int(eval(input()))
for a0 in range(n):
a = list(map(int,input().strip().split(' ')))
for index, item in enumerate(a, start=0):
if item == v:
print(index)
| true |
ddfdb806ad779d84d285fae19fb67739e4927882 | Python | UA-RCL/RANC | /software/tealayers/tealayer1.0/tealayers/additivepooling.py | UTF-8 | 3,202 | 3.15625 | 3 | [
"MIT"
] | permissive | """Contains the code for a tea layer for use in TeaLearning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from keras import backend as K
from keras.engine.topology import Layer
class AdditivePooling(Layer):
"""A helper layer designed to format data for output during TeaLearning.
If the data input to the layer has multiple spikes per classification, the
spikes for each tick are summed up. Then, all neurons that correspond to a
certain class are summed up so that the output is the number of spikes for
each class. Neurons are assumed to be arranged such that each
`num_classes` neurons represent a guess for each of the classes. For
example, if the guesses correspond to number from 0 to 9, the nuerons are
arranged as such:
neuron_num: 0 1 2 3 4 5 6 7 8 9 10 11 12 ...
guess: 0 1 2 3 4 5 6 7 8 9 0 1 2 ..."""
def __init__(self,
num_classes,
use_additive_pooling_processing = False,
additive_pooling_processing_max = 128,
**kwargs):
"""Initializes a new `AdditivePooling` layer.
Arguments:
num_classes -- The number of classes to output.
"""
self.num_classes = num_classes
self.num_inputs = None
self.use_additive_pooling_processing = use_additive_pooling_processing
self.additive_pooling_processing_max = additive_pooling_processing_max
super(AdditivePooling, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) >= 2
# The number of neurons must be collapsable into the number of classes
assert input_shape[-1] % self.num_classes == 0
self.num_inputs = input_shape[-1]
def call(self, x):
# Sum up ticks if there are ticks
if len(x.shape) >= 3:
output = K.sum(x, axis=1)
else:
output = x
# Reshape output
output = tf.reshape(
output,
[-1, int(self.num_inputs / self.num_classes), self.num_classes]
)
# Sum up neurons
output = tf.reduce_sum(output, 1)
if self.use_additive_pooling_processing:
# Scaling the outputs between 0 - additive_pooling_processing_max.
max_val = tf.constant(self.additive_pooling_processing_max, dtype=tf.float32)
max_output = tf.stack([tf.reduce_max(output, 1) for i in range(self.num_classes)], axis = 1)
max_output = tf.divide(max_val, max_output)
output = tf.multiply(output, max_output)
# Converting any NaN's (0 / 0) to 0
output = tf.where(tf.is_nan(output), tf.zeros_like(output), output)
return output
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
# Last dimension will be number of classes
output_shape[-1] = self.num_classes
# Ticks were summed, so delete tick dimension if exists
if len(output_shape) >= 3:
del output_shape[1]
return tuple(output_shape)
| true |
2f42d0214018a7f1bc01d3dfc8be2da6230dbd48 | Python | sankamuk/PythonCheatsheet | /Advance/21/context_manager_02.py | UTF-8 | 445 | 3.953125 | 4 | [] | no_license |
class LoggingContext:
def __enter__(self):
print("Initializing logging context")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print("Cleaning logging context")
print("Exception details: {}, {}, {}".format(exc_type, exc_val, exc_tb))
return True
def info(self, mssg):
print("INFO - {}".format(mssg))
def error(self, mssg):
print("ERROR - {}".format(mssg))
| true |
c86f43ce872258bea4b4a86b9815b15bf752927d | Python | The-Anonymous-pro/projects | /week 1 assignment/Assingment.py | UTF-8 | 1,027 | 3.96875 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# ## ASSIGNMENT
#
# **Tomiwa Emmanuel O. Am a python programmer and this script will be solving quadratic equations**. A quadratic equation form is: **(ax² + bx + c = 0)** which is solved using a quadratic formular: **(-b +- √(b²-4ac))/2a** where a, b, c are numbers and **a** is not equals to 0.
#
# In[2]:
#First we import cmath module
import math
#Then we allow three(3) float inputs (a,b,c) for the quadratic equation
a= float(input("Enter a: "))
b= float(input("Enter b: "))
c= float(input("enter c: "))
#using the quadratic formular -b +- √(b² - 4ac)/2a we calculate x is (b²-4ac)
x= b**2 - 4*a*c
#since we have x ,we can now find the two solutions to the equation
first_sol = (-b - math.sqrt(x)) / (2*a)
second_sol = (-b + math.sqrt(x)) / (2*a)
#note that we already calculated for x, therefore the first and second solution need only to apply the quardratic formular.
print ( first_sol, second_sol )
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| true |
e7a36b5f4cd241336e83df244cb69d0ae9fc65b4 | Python | hemanta212/blogger-cli | /blogger_cli/commands/cmd_info.py | UTF-8 | 1,872 | 2.703125 | 3 | [
"MIT"
] | permissive | from itertools import zip_longest
import click
from blogger_cli import __version__
from blogger_cli.cli import pass_context
@click.command("info", short_help="Show blog's properties")
@click.argument("blog", required=False)
@click.option("--all", "show_all", is_flag=True)
@click.option(
"-V", "--version", is_flag=True, help="Show version of blogger-cli and exit"
)
@click.option("-v", "--verbose", is_flag=True)
@pass_context
def cli(ctx, blog, show_all, version, verbose):
"""
Get details about blogs and app itself\n
Usage:\n
bloggger info\n
blogger info <blogname>
"""
ctx.verbose = verbose
blog_exists = ctx.blog_exists(blog)
if version:
ctx.log(__version__)
raise SystemExit(0)
ctx.log("\nBlogger-cli version:", __version__)
if blog and not blog_exists:
ctx.log("Invalid blog name. No such blog", blog)
elif not blog:
# List all blogs
ctx.log("\nRegistered Blogs:")
for i in ctx.blog_list:
default = ctx.default_blog
ctx.log(" ", i) if i != default else ctx.log(" ", i, "[default]")
if len(ctx.blog_list) == 0:
ctx.log(" ", "No blog registered yet!")
ctx.log("\nBlog:configs [standard]")
for key in ctx.config_keys:
ctx.log(" ", key)
if show_all:
ctx.log("\nOptional:configs [Advanced]")
for key in ctx.optional_config:
ctx.log(" ", key)
ctx.log("\nTip: Use blogger info blogname for blog details\n")
else:
if blog == ctx.default_blog:
ctx.log("\nBlog Name:", blog, "[Default]")
else:
ctx.log("\nBlog Name:", blog)
ctx.log("Configurations:")
blog_dict = ctx.config.read(blog)
for k, v in sorted(blog_dict.items()):
ctx.log(" ", k, "->", v)
| true |
e560cc583d65f1621b3b261396e131a1f3b0314b | Python | kimhyunkwang/algorithm-study-02 | /4주차/정소원/4주차_위험한 동굴.py | UTF-8 | 492 | 3.34375 | 3 | [] | no_license | from itertools import permutations
N = int(input())
# 방법 1: permutations 함수 모듈 이용하기
p = permutations([str(i+1) for i in range(N)], N)
for line in p:
print(' '.join(list(line)))
# 방법 2: dfs로 직접구현
def dfs(n, cur):
if len(cur) == n:
print(' '.join(list(map(lambda x: str(x), cur))))
return
for i in range(n):
if i+1 not in cur:
cur.append(i+1)
dfs(n, cur)
cur.pop()
res = dfs(N, [])
| true |
ed8fd7183ea3553e04b8c4716b1f75a86952d69d | Python | minh1061998/D-ng-Quang-Minh-python-c4e27 | /Buoi 4/Bai2.py | UTF-8 | 630 | 3.328125 | 3 | [] | no_license | prices={
'banana':4,
'apple': 2,
'orange': 1.5,
'pear': 3
}
stock={
'banana': 6,
'apple': 0,
'orange': 32,
'pear': 15
}
for i in prices:
print(i)
print('price: ',prices[i])
print('stock: ',stock[i])
for x in stock:
print(x)
print('price: ',prices[x])
print('stock:', stock[x])
total = 0
for i in prices:
t=prices[i]*stock[i]
print('Total of: ',i,'is: ',t)
total = total +t
print('All: ',total)
# print('apple')
# print('price:',prices['apple'])
# print('stock:', stock['apple'])
# print('pear')
# print('price:',prices['pear'])
# print('stock:', stock['pear']) | true |
e20dcbfce13f0218eb308a590ae0ce07122a49af | Python | zambbo/naver-wordcloud | /coupang/coupang_extract_noun_frequency.py | UTF-8 | 1,500 | 2.8125 | 3 | [] | no_license | import pandas as pd
from konlpy.tag import Okt
import os
from collections import Counter
from datetime import date
def run():
os.chdir('./coupang/')
item_file_name = 'Coupang_보석십자수_2021_7_4.csv'
coupang_df = pd.read_csv(item_file_name)
review_s = coupang_df['review_list']
#전체리뷰 깔끔하게 한 str로 만들기.
review = review_s.str.cat(sep=' ').replace('\\n','').replace(',','').replace('[','').replace(']','').replace('\'','')
# print(type(review_s))
# print(review_s[:500])
okt = Okt()
review_list = list(map(''.join,zip(*[iter(review)]*1000)))
print('before noun')
coupang_nouns = [okt.nouns(review) for review in review_list]
print('middle noun')
coupang_noun = []
for noun in coupang_nouns:
print('processing...')
coupang_noun.extend(noun)
print('after noun')
print('before count')
count = Counter(coupang_noun)
print('after count')
print(count)
coupang_frequency_list = count.most_common()
for i,(key,value) in enumerate(coupang_frequency_list):
if len(key) <2:
coupang_frequency_list.pop(i)
coupang_dict = dict()
for noun,noun_count in coupang_frequency_list:
coupang_dict[noun] = noun_count
coupang_noun_df = pd.DataFrame(coupang_dict,index=['noun_count'])
coupang_noun_df.to_csv(f'coupang-frequency-{date.today().isoformat()}.csv')
print("finish saving")
if __name__ == '__main__':
run() | true |
5511b915d556c57e9b4c6d823cd92d220772d1b1 | Python | zixu4728/mypyutil | /testdb.py | UTF-8 | 1,370 | 2.703125 | 3 | [] | no_license | #!/bin/env python
import re
import sys
import json
import MySQLdb
import time
import random
def select_mysql():
try:
conn = MySQLdb.connect(host='127.0.0.1',user='scrapy',passwd='123456',charset='utf8')
conn.select_db('maijiainfo')
cur = conn.cursor()
count = cur.execute('select * from student where id=%s', (11,))
print 'there has %s rows record' % count
results = cur.fetchall() # ((v1,v2,...),(v1,v2,...),)
print results
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
all_items = ['aa','bb','cc','dd']
def insert_mysql():
try:
conn = MySQLdb.connect(host='127.0.0.1',user='scrapy',passwd='123456',charset='utf8')
conn.select_db('maijiainfo')
cur = conn.cursor()
vals = []
for i in all_items:
para = (int(time.time())+random.randint(10,100),i+'_'+str(random.randint(10,100)), random.randint(10,100))
vals.append(para)
print vals
cur.executemany('insert into student (id,name,age) values (%s,%s,%s)', vals)
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
select_mysql()
#insert_mysql()
| true |
a2155839cdd136a5f507821cb9ff0d1127227c83 | Python | shnehna/machine_study | /特征抽取/特征过程.py | UTF-8 | 558 | 3.203125 | 3 | [] | no_license | from sklearn.feature_extraction import DictVectorizer
def dictvec():
"""
字典数据抽取
:return: None
"""
# 实例化
dicts = DictVectorizer()
# 调用 fit_transform
city_list = [
{'city': '北京', 'temperature': 30},
{'city': '上海', 'temperature': 60},
{'city': '深圳', 'temperature': 90}
]
transform = dicts.fit_transform(city_list)
print(dicts.get_feature_names())
print(dicts.inverse_transform(transform))
print(transform)
if __name__ == '__main__':
dictvec()
| true |
4be15d068ae4ad2a690f6180ab61985ae492b59f | Python | Aasthaengg/IBMdataset | /Python_codes/p03050/s947168664.py | UTF-8 | 169 | 2.96875 | 3 | [] | no_license | import math
N, res = int(input()), 0
for i in range(1, int(math.sqrt(N) + 1)):
if N >= i * (i + 1) + i and (N - i) % i == 0:
res += (N - i) // i
print(res)
| true |
3f560a6f0d7c2dd75d64e5c4be80e1fdc4f57256 | Python | Aissen-Li/lintcode | /54.atoi.py | UTF-8 | 761 | 3.5625 | 4 | [] | no_license | class Solution:
"""
@param str: A string
@return: An integer
"""
def atoi(self, str):
if not str:
return 0
str = str.strip()
res = ''
if str[0] == '-' or str[0] == '+':
if str[1] != '-' and str[1] != '+':
res += str[0]
str = str[1:]
else:
return 0
for i in str:
if '0' <= i <= '9':
res += i
else:
break
if len(res) == 0:
return 0
if int(res) > 2147483647:
return 2147483647
if int(res) < -2147483648:
return -2147483648
return int(res)
"""101ms,没有太大意义,一点一点试出来""" | true |
a779c1cbf016f1777f8277f5a01bf4ea83f4c13c | Python | hari-bhandari/LinkedList.py | /BST.py | UTF-8 | 3,943 | 4 | 4 | [] | no_license | class BST:
"""Binary Search algorithm is a logarithmic search For more information regarding BSTs, see:
http://en.wikipedia.org/wiki/Binary_search_tree
"""
def __init__(self, value=None):
self.left = None
self.right = None
self.value = value
# def isEmpty(self):
# return self.head == None # If BST has no head that means BST is an empty tree
def insert(self, value: float):
"""adds new element to BST"""
current = self
while True:
if self.value is None:
self.value=value
if value < current.value:
if current.left is None:
current.left = BST(value)
break
else:
current = current.left
else:
if current.right is None:
current.right = BST(value)
break
else:
current = current.right
return self
def _len(self):
if self.left and self.right:
return 1 + self.left._len() + self.right._len()
if self.left:
return 1 + self.left._len()
if self.right:
return 1 + self.left._len()
else:
return 1
def len(self):
"""returns the length of BST"""
if self.value:
return self._len()
else:
return 0
def find(self, val):
"""Finds an element in BST"""
current = self
if current is not None: # This makes sure that our tree is not empty
while current is not None:
if current.value == val:
return True
elif val > current.value:
current = current.right
elif val < current.value:
current = current.left
else:
return False
def addList(self, numbers: list):
"""Converts a list into a BST"""
for number in numbers:
self.insert(number)
def delete(self, value, parentNode=None):
currentNode = self
while currentNode is not None:
if value < currentNode.value:
parentNode = currentNode
currentNode = currentNode.left
elif value > currentNode.value:
parentNode = currentNode
currentNode = currentNode.right
else:
if currentNode.left is not None and currentNode.right is not None:
currentNode.value = currentNode.right.getMinValue()
currentNode.right.remove(currentNode.value, currentNode)
elif parentNode is None:
if currentNode.left is not None:
currentNode.value = currentNode.left.value
currentNode.right = currentNode.left.right
currentNode.left = currentNode.left.left
elif currentNode.right is not None:
currentNode.value = currentNode.right.value
currentNode.left = currentNode.right.left
currentNode.right = currentNode.right.right
else:
currentNode.value = None
elif parentNode.left == currentNode:
parentNode.left = currentNode.left if currentNode.left is not None else currentNode.right
elif parentNode.right == currentNode:
parentNode.right = currentNode.left if currentNode.left is not None else currentNode.right
break
return self.value
def getMinValue(self):
current = self
while current.left is not None:
current = current.left
return current.value
BST1 = BST()
BST1.addList([i for i in range(10)])
print(BST1.find(2)) | true |
b129b2be4b08700b5461b804fc687d85c88fd82b | Python | ujiuji1259/disease_normalizer | /src/japanese_disease_normalizer/preprocessor/abbr_preprocessor.py | UTF-8 | 3,632 | 3.015625 | 3 | [] | no_license | """Abbreviation Preprocessor
This module expands abbreviation by using abbreviation dictionary.
"""
import os
import re
import json
from pathlib import Path
from dataclasses import dataclass
import jaconv
from .base_preprocessor import BasePreprocessor
from .. import utils
BASE_URL = "http://aoi.naist.jp/norm/abb_dic.json"
@dataclass
class AbbrEntry:
"""Data of abbreviation
Attributes:
abbr str: abbreviation
name str: disease name expanded
freq int: frequency of disease name in case reports
"""
abbr: str
name: str
freq: int
class AbbrPreprocessor(BasePreprocessor):
"""Abbreviation prerpocessor
Attributes:
abbr_dict Dict[List[AbbrEntry]]: entry of abbriviation
"""
def __init__(self):
self.abbr_dict = self.load_abbr_dict()
def preprocess(self, word):
"""Expand abbreviation
Because of the ambiguation of abbreviation, we return all of the expanded forms of the abbreviation.
All expanded forms will be scored by the converter and choose one with muximum score.
Args:
word str: disease name
Returns:
List[str]: all possible names
"""
word = jaconv.z2h(word, kana=False, ascii=True, digit=True)
iters = re.finditer(r'([a-zA-Z][a-zA-Z\s]*)', word)
pos = 0
output_words = []
for ite in iters:
s_pos, e_pos = ite.span()
abbr = ite.groups()[0].strip()
if pos != s_pos:
output_words.append(word[pos:s_pos])
s_word = [abbr]
if abbr in self.abbr_dict:
s_word += [w.name for w in self.abbr_dict[abbr]]
elif word.lower() in self.abbr_dict:
s_word += [w.name for w in self.abbr_dict[abbr.lower()]]
output_words.append(s_word)
pos = e_pos
output_words.append(word[pos:])
def flatten_words(word_list):
if len(word_list) == 0:
return [[]]
if isinstance(word_list[0], str):
results = [[word_list[0]] + l for l in flatten_words(word_list[1:])]
elif isinstance(word_list[0], list):
results = [[w] + l for w in word_list[0] for l in flatten_words(word_list[1:])]
return results
results = flatten_words(output_words)
results = [''.join(l) for l in results]
#results = [jaconv.h2z(r, kana=True, digit=True, ascii=True) for r in results]
return results
def load_abbr_dict(self):
"""Load abbreviation dictionary
Load dnorm model from ~/.cache/norm/abb_dict.json if the path exists.
Otherwise, automatically download the dnorm file from remote-url.
You can specify the cache directory by setting the environment variable "DEFAULT_CACHE_PATH"
Returns:
Dict[str, List[AbbrEntry]]: abbreviation dictionary that will be used in preprocess.
"""
DEFAULT_CACHE_PATH = os.getenv("DEFAULT_CACHE_PATH", "~/.cache")
DEFAULT_ABBR_PATH = Path(os.path.expanduser(
os.path.join(DEFAULT_CACHE_PATH, "norm")
))
DEFAULT_ABBR_PATH.mkdir(parents=True, exist_ok=True)
if not (DEFAULT_ABBR_PATH / "abb_dict.json").exists():
utils.download_fileobj(BASE_URL, DEFAULT_ABBR_PATH / "abb_dict.json")
with open(DEFAULT_ABBR_PATH / "abb_dict.json", 'r') as f:
abbr_dict = json.load(f)
results = {key: [AbbrEntry(key, v[1], v[0]) for v in abbr_dict[key]] for key in abbr_dict.keys()}
return results
| true |
3ae37130fa7e21abeb79e442f87e5902c7a5cbd6 | Python | JasonAHendry/mmp-pipelines | /simulate_fastq-creation.py | UTF-8 | 2,176 | 2.734375 | 3 | [] | no_license | """
MMPP: Mobile Malaria Project Pipelines
--------------------
Simulate
the generation of
.fastq files
from a MinION
--------------------
JHendry, 2019/03/28
"""
import getopt
import sys
import os
import numpy as np
import time
# Parse user inputs
try:
opts, args = getopt.getopt(sys.argv[1:], ":s:t:w:r:", ["source", "target", "wait", "reset"])
except getopt.GetoptError:
print("Option Error.")
for opt, value in opts:
if opt in ("-s", "--source"):
source_dir = value
elif opt in ("-t", "--target"):
target_dir = value
elif opt in ("-w", "--wait"):
mean_wait = float(value)
elif opt in ("-r", "--reset"):
reset = bool(value)
else:
print("Parameter %s not recognized." % opt)
sys.exit(2)
print("User Inputs:")
print(" Source:", source_dir)
print(" Target:", target_dir)
print(" Wait Time:", mean_wait)
print(" Reset?:", reset)
# Run file moving
n_source = len(os.listdir(source_dir))
n_target = len(os.listdir(target_dir))
while n_source > 0:
print("--------------------------------------------------------------------------------")
print("Number of files in source: %d" % n_source)
print("Number of files in target: %d" % n_target)
print("Drawing wait time...")
wait_time = np.random.exponential(scale=mean_wait)
print("...wait time (s): %.02f" % wait_time)
print("Begining wait...")
time.sleep(wait_time)
print("Done waiting.")
print("Moving file:")
source_file = os.path.join(source_dir, os.listdir(source_dir)[0])
cmd = "mv %s %s" % (source_file, target_dir)
print(" %s" % cmd)
os.system(cmd)
print("Done.")
n_target = len(os.listdir(target_dir))
n_source = len(os.listdir(source_dir))
print("--------------------------------------------------------------------------------")
# Optionally reset
if reset:
cmd="mv %s %s" % (os.path.join(target_dir, "*.fastq"), source_dir)
os.system(cmd)
n_target = len(os.listdir(target_dir))
n_source = len(os.listdir(source_dir))
print("Final State:")
print("Number of files in source: %d" % n_source)
print("Number of files in target: %d" % n_target)
| true |
b4251901dc049658d35a772e562a66245d7642f4 | Python | tybens/ProjectEuler | /pe20/pe20.py | UTF-8 | 211 | 3.59375 | 4 | [] | no_license | # PROBLEM 20 - (09/08)
def factorial(num):
res = 1
for i in range(num, 0, -1):
res=res*i
return res
the_num = factorial(100)
res = 0
for i in str(the_num):
res+=int(i)
print(res)
| true |
9479824d6eb3f2a89bfac359804f924afe193ee0 | Python | ubante/poven | /projects/cached_results/something_checker.py | UTF-8 | 2,583 | 3.125 | 3 | [] | no_license | import time
import sys
from somethinglib import Numbre, Whale, Bird
"""
Check something
"""
def main():
print "Here we go."
b = Bird("b")
print b.__str__()
# b.get_flock_travel_distance()
b.compute_travel_distance()
print b.__str__()
# b.get_flock_travel_distance()
# b.compute_travel_distance()
# print b.__str__()
# b.stat_cache()
print
sleepy_time = 1
time.sleep(sleepy_time)
k = Bird("k")
print k.__str__()
k.compute_travel_distance()
# k.stat_cache()
for interval in range(0, 13):
# k.get_flock_travel_distance()
time.sleep(sleepy_time)
k.compute_travel_distance()
print k.__str__()
# k.stat_cache()
# b.stat_cache()
print
b.compute_travel_distance()
print b.__str__()
sys.exit()
print "\n\nPRIME TIME\n"
num = Numbre()
# prime_count = num.get_count()
# print "The number of primes in the range of 2 to 100 is", prime_count
#
# print "\nSleeping for {} seconds".format(sleepy_time)
# time.sleep(sleepy_time)
# num.cache.stat()
# prime_count = num.get_count()
# print "The number of primes in the range of 2 to 100 is", prime_count
for interval in range(0, 7):
prime_count = num.get_count()
print "The number of primes in the range of 2 to 100 is", prime_count
print "\nSleeping for {} seconds".format(sleepy_time)
time.sleep(sleepy_time)
# num.cache.stat()
print num.cache
# print num.cache.__str__()
#
# print "\nSleeping for {} seconds".format(sleepy_time)
# time.sleep(sleepy_time)
# num.cache.stat()
# prime_count = num.get_count()
# print "The number of primes in the range of 2 to 100 is", prime_count
sys.exit()
print "This above has the Cache object be a member of the Numbre object so each object will " \
"have its own cache. This makes sense if you reuse an object. But if you don't....\n"
aWhale = Whale("Alfred")
bWhale = Whale("Barnie")
whales = [aWhale, bWhale]
for whale in whales:
print whale.__str__()
if __name__ == "__main__":
main()
"""
Here we go.
Setting list in cache
The number of primes in the range of 2 to 100 is 25
Sleeping for 2 seconds
CACHE now:1467509590.64 expiration:1467509591.64 state:False
The number of primes in the range of 2 to 100 is 25
Sleeping for 2 seconds
CACHE now:1467509592.64 expiration:1467509591.64 state:True
the cache is expired
Setting list in cache
The number of primes in the range of 2 to 100 is 25
""" | true |
ea4cd9025065e8bcc2327ad429e2c6f4654c3b3e | Python | rnijhara/question-paper-generator | /app/subset_sum.py | UTF-8 | 1,800 | 3.0625 | 3 | [] | no_license | from typing import List
from app.question import Question
class SubsetSum:
def __init__(self):
self.dp = None
self.subsets = None
def _populate_subsets(self, questions: List[Question], i: int, total: int, subset: List[Question]):
if i == 0 and total != 0 and self.dp[0][total]:
subset.append(questions[i])
self.subsets.append(subset.copy())
subset.clear()
return
if i == 0 and total == 0:
self.subsets.append(subset.copy())
subset.clear()
return
if self.dp[i - 1][total]:
new_subset = subset.copy()
self._populate_subsets(questions, i - 1, total, new_subset)
if total >= questions[i].marks and self.dp[i - 1][total - questions[i].marks]:
subset.append(questions[i])
self._populate_subsets(questions, i - 1, total - questions[i].marks, subset)
def get_all_subsets(self, questions: List[Question], total: int) -> List[List[Question]]:
n = len(questions)
if n == 0 or total < 0:
return []
self.dp = [[False for _ in range(total + 1)] for _ in range(n)]
self.subsets = []
for i in range(n):
self.dp[i][0] = True
if questions[0].marks <= total:
self.dp[0][questions[0].marks] = True
for i in range(1, n):
for j in range(total + 1):
if questions[i].marks <= j:
self.dp[i][j] = self.dp[i - 1][j] or self.dp[i - 1][j - questions[i].marks]
else:
self.dp[i][j] = self.dp[i - 1][j]
if not self.dp[n - 1][total]:
return []
self._populate_subsets(questions, n - 1, total, [])
return self.subsets
| true |
456666443ca8a3376c45445d43dcabbb871b207c | Python | mwaghela92/AB_testing | /Code/AB_testing.py | UTF-8 | 3,614 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 11:52:32 2019
@author: mayur
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
import scipy.stats as st
# importing data into a dataframe
data = pd.read_csv('/Users/mayur/Documents/GitHub/A_B_testing/'
'AB_testing/Data/ab_data.csv')
data.dtypes
# converting timestamp to datetime()
data['timestamp'] = pd.to_datetime(data['timestamp'])
# finding results not aligned for campaign
not_aligned_data_control = data[(data['group']=='control')
& (data['landing_page']== 'new_page')]
not_aligned_data_treatment = data[(data['group']=='treatment')
& (data['landing_page']== 'old_page')]
# dropping data that is not aligned
data_aligned = data.merge(not_aligned_data_control, how = 'left', indicator = True)
data_aligned = data_aligned[data_aligned['_merge'] == 'left_only']
data_aligned = data_aligned.drop('_merge', axis = 1)
data_aligned = data.merge(not_aligned_data_treatment, how = 'left', indicator = True)
data_aligned = data_aligned[data_aligned['_merge'] == 'left_only']
data_aligned = data_aligned.drop('_merge', axis = 1)
# segregating data into different dataframes
converted_control = data_aligned[(data_aligned['converted']==1) &
(data_aligned['group']== 'control')].reset_index()
non_converted_control = data_aligned[(data_aligned['converted']==0) &
(data_aligned['group']== 'control')].reset_index()
converted_treatment = data_aligned[(data_aligned['converted']==1) &
(data_aligned['group']== 'treatment')].reset_index()
non_converted_treatment = data_aligned[(data_aligned['converted']==0) &
(data_aligned['group']== 'treatment')].reset_index()
data_control = pd.concat([converted_control, non_converted_control], ignore_index = True)
data_treatment = pd.concat([converted_treatment, non_converted_treatment], ignore_index= True)
Y_values_aligned = [converted_control.user_id.count() + (
non_converted_control.user_id.count()),
converted_treatment.user_id.count() + non_converted_treatment.user_id.count()
]
Y_values_not_aligned = [not_aligned_data_control.user_id.count(),
not_aligned_data_treatment.user_id.count()
]
# Plotting bar chart to see how many records were aligned and how many weren't
N = len(Y_values_aligned)
ind = np.arange(N)
width = 0.35
X_values = ['Aligned_control', 'Not_aligned_control', 'Aligned_treatment',
'Not_alignmed_treatment']
plt.bar(ind,Y_values_aligned, width, label = 'Aligned' )
plt.bar(ind+width,Y_values_not_aligned, width, label = 'Not Aligned' )
plt.ylabel('Count of records')
plt.xticks(ind+width/2, ('Control', 'Treatment'))
plt.legend(loc = 'best')
plt.show()
# Finding different statistical values
total_control = converted_control.user_id.count() + non_converted_control.user_id.count()
total_treatment = converted_treatment.user_id.count() + non_converted_treatment.user_id.count()
percent_converted_control = converted_control.user_id.count()/total_control
percent_converted_treatment = converted_treatment.user_id.count()/total_treatment
p = percent_converted_control
pc = percent_converted_treatment
n = total_control
nc = total_treatment
z_score = (p-pc)/math.sqrt((p*(1-p)/n) + (pc*(1-pc)/nc))
p_value = st.norm.sf(abs(z_score))*2
df.resample('D', on='Date_Time').mean()
C_C = converted_control.resample('d', on='timestamp').count()
| true |
cdf7ba68b4d74477c0eb70437019eed5f8fbdc6c | Python | PoolC/algospot | /BOARDCOVER/doodoori2.py | UTF-8 | 2,604 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python
import sys
#import pdb
rl = lambda: sys.stdin.readline()
#f = open('input.dat', 'r')
#rl = lambda: f.readline()
#pdb.set_trace()
check_tiles = []
check_tiles.append([[0,0], [0,1] , [1, 1]])
check_tiles.append([[0,0], [0,1] , [-1, 1]])
check_tiles.append([[0,0], [1,0] , [0, 1]])
check_tiles.append([[0,0], [1,0] , [1, 1]])
class Board:
board = []
def __init__(self, max_x, max_y, lines):
self.board = []
self.mx = max_x
self.my = max_y
for xi in range(self.mx):
self.board.append([-1] * self.my)
for yi in range(self.my):
dx = lines[yi]
for xi in range(self.mx):
self.board[xi][yi] = 0 if dx[xi] == '.' else 1
def checkBoardFull(self):
for yi in range(self.my):
for xi in range(self.mx):
if self.board[xi][yi] == 0:
return False
return True
def getEmptyPosition(self):
for yi in range(self.my):
for xi in range(self.mx):
if self.board[xi][yi] == 0:
return [xi, yi]
return False
def isTileAvailable(self, point, tile):
x = point[0]
y = point[1]
for ti in tile:
tx = x + ti[0]
ty = y + ti[1]
if tx < 0 or tx >= self.mx:
return False
if ty < 0 or ty >= self.my:
return False
if self.board[tx][ty] != 0:
return False
return True
def setTile(self, point, tile, value):
x = point[0]
y = point[1]
for ti in tile:
tx = x + ti[0]
ty = y + ti[1]
if tx < 0 or tx >= self.mx:
continue
if ty < 0 or ty >= self.my:
continue
self.board[tx][ty] = value
def checkProcess(self):
current_sum = 0
p = self.getEmptyPosition()
if p == False:
return 1 if self.checkBoardFull() else 0
for tile in check_tiles:
if not self.isTileAvailable(p, tile):
continue
self.setTile(p, tile, 1)
current_sum += self.checkProcess()
self.setTile(p, tile, 0)
return current_sum
def main():
n = int(rl())
for i in range(n):
my, mx = [int(x) for x in rl().split()]
board_lines = []
for yi in range(my):
board_lines.append(rl())
board = Board(mx, my, board_lines)
print board.checkProcess()
if __name__ == "__main__":
main()
| true |
ad48f57a33a99d757165246536548aec3d4af40d | Python | justinembawomye/python-fun | /numbers.py | UTF-8 | 195 | 3.484375 | 3 | [] | no_license | import sys
numbers = [7,8, 9, 10, 11, 12, 13]
# Prints numbers if they are found or not search algorithm
if 90 in numbers:
print("Found")
sys.exit(0)
print("Not found")
sys.exit(1) | true |
4b4a13a2ce3c18faf3e53919578e055975d7efe4 | Python | phildue/cnn_object_detection | /src/python/utils/fileaccess/XmlParser.py | UTF-8 | 7,159 | 2.828125 | 3 | [] | no_license | import glob
import xml.etree.ElementTree as ET
import numpy as np
from utils.image import Image
from utils.image.imageprocessing import imwrite, imread
from utils.labels.ImgLabel import ImgLabel
from utils.labels.ObjectLabel import ObjectLabel
from utils.labels.Polygon import Polygon
from utils.labels.Pose import Pose
class XmlParser:
def __init__(self, directory: str, color_format, start_idx=0, image_format='jpg'):
self.color_format = color_format
self.image_format = image_format
self.directory = directory
self.idx = start_idx
def write(self, images: [Image], labels: [ImgLabel]):
for i, l in enumerate(labels):
filename = '{}/{:05d}'.format(self.directory, self.idx)
self.write_img(images[i], filename + '.' + self.image_format)
self.write_label(l, filename)
self.idx += 1
def write_img(self, image: Image, path: str):
imwrite(image, path)
def write_label(self, label: ImgLabel, path: str):
root = ET.Element('annotation')
ET.SubElement(root, 'filename').text = path.replace('xml', self.image_format)
# TODO extend this with color format and other informations about the dataset
for obj in label.objects:
obj_root = ET.SubElement(root, 'object')
ET.SubElement(obj_root, 'name').text = '{0:s}'.format(obj.name)
bnd_box = ET.SubElement(obj_root, 'bndbox')
x1 = obj.poly.x_min
x2 = obj.poly.x_max
y1 = obj.poly.y_min
y2 = obj.poly.y_max
xmin = min((x1, x2))
xmax = max((x1, x2))
ymin = min((y1, y2))
ymax = max((y1, y2))
ET.SubElement(bnd_box, 'xmin').text = '{0:d}'.format(int(xmin))
ET.SubElement(bnd_box, 'xmax').text = '{0:d}'.format(int(xmax))
ET.SubElement(bnd_box, 'ymin').text = '{0:d}'.format(int(ymin))
ET.SubElement(bnd_box, 'ymax').text = '{0:d}'.format(int(ymax))
if obj.pose is not None:
pose_root = ET.SubElement(obj_root, 'pose')
ET.SubElement(pose_root, 'north').text = '{0:03f}'.format(obj.pose.north)
ET.SubElement(pose_root, 'east').text = '{0:03f}'.format(obj.pose.east)
ET.SubElement(pose_root, 'up').text = '{0:03f}'.format(obj.pose.up)
ET.SubElement(pose_root, 'yaw').text = '{0:03f}'.format(obj.pose.yaw)
ET.SubElement(pose_root, 'pitch').text = '{0:03f}'.format(obj.pose.pitch)
ET.SubElement(pose_root, 'roll').text = '{0:03f}'.format(obj.pose.roll)
corner_root = ET.SubElement(obj_root, 'corners')
ET.SubElement(corner_root, 'top_left').text = '{},{}'.format(
int(obj.poly.points[3, 0]),
int(obj.poly.points[3, 1]))
ET.SubElement(corner_root, 'top_right').text = '{},{}'.format(
int(obj.poly.points[2, 0]),
int(obj.poly.points[2, 1]))
ET.SubElement(corner_root, 'bottom_left').text = '{},{}'.format(
int(obj.poly.points[0, 0]),
int(obj.poly.points[0, 1]))
ET.SubElement(corner_root, 'bottom_right').text = '{},{}'.format(
int(obj.poly.points[1, 0]),
int(obj.poly.points[1, 1]))
tree = ET.ElementTree(root)
tree.write(path + '.xml')
def read(self, n=0) -> ([Image], [ImgLabel]):
files = sorted(glob.glob(self.directory + '/*.xml'))
samples = []
labels = []
for i, file in enumerate(files):
if n > 0 and 0 < n < i: break
label = XmlParser.read_label(file)
if label is None: continue
image = imread(file.replace('xml', self.image_format), self.color_format)
samples.append(image)
labels.append(label)
return samples, labels
@staticmethod
def _parse_gate_corners(gate_corners_xml: str) -> Polygon:
top_left = tuple([int(e) for e in gate_corners_xml.find('top_left').text.split(',')])
top_right = tuple([int(e) for e in gate_corners_xml.find('top_right').text.split(',')])
bottom_left = tuple([int(e) for e in gate_corners_xml.find('bottom_left').text.split(',')])
bottom_right = tuple([int(e) for e in gate_corners_xml.find('bottom_right').text.split(',')])
gate_corners = Polygon(np.array([[bottom_left[0], bottom_left[1]],
[bottom_right[0], bottom_right[1]],
[top_right[0], top_right[1]],
[top_left[0], top_left[1]]]))
return gate_corners
@staticmethod
def _parse_pose(pose_xml: str) -> Pose:
north = float(pose_xml.find('north').text)
east = float(pose_xml.find('east').text)
up = float(pose_xml.find('down').text)
yaw = float(pose_xml.find('yaw').text)
pitch = float(pose_xml.find('pitch').text)
roll = float(pose_xml.find('roll').text)
pose = Pose(north, east, up, roll, pitch, yaw)
return pose
@staticmethod
def _parse_bndbox(bndbox_xml: str) -> Polygon:
x1 = int(np.round(float(bndbox_xml.find('xmin').text)))
y1 = int(np.round(float(bndbox_xml.find('ymin').text)))
x2 = int(np.round(float(bndbox_xml.find('xmax').text)))
y2 = int(np.round(float(bndbox_xml.find('ymax').text)))
x_min = min((x1, x2))
x_max = max((x1, x2))
y_min = min((y1, y2))
y_max = max((y1, y2))
poly = Polygon.from_quad_t_minmax(np.array([[x_min, y_min, x_max, y_max]]))
return poly
@staticmethod
def read_label(path: str) -> [ImgLabel]:
with open(path, 'rb') as f:
try:
tree = ET.parse(f)
objects = []
for element in tree.findall('object'):
name = element.find('name').text
try:
confidence_field = element.find('conf').text
confidence = float(confidence_field)
except AttributeError:
confidence = 1.0
try:
pose_xml = element.find('pose')
pose = XmlParser._parse_pose(pose_xml)
except AttributeError:
pose = None
try:
gate_corners_xml = element.find('gate_corners')
if gate_corners_xml is None:
gate_corners_xml = element.find('corners')
poly = XmlParser._parse_gate_corners(gate_corners_xml)
except AttributeError:
box = element.find('bndbox')
poly = XmlParser._parse_bndbox(box)
label = ObjectLabel(name, confidence,poly, pose)
objects.append(label)
return ImgLabel(objects)
except ET.ParseError:
print('Error parsing: ' + path)
| true |
b07648fde9ce631ffc18bb61a87eba8b1a5dc43b | Python | moduIo/Artificial-Intelligence | /HW2/dfsb.py | UTF-8 | 8,247 | 2.8125 | 3 | [] | no_license | # Tim Zhang
# 110746199
# CSE537 HW 2
#---------------------------------------------------
import sys
import time
import re
#---------------------------------------------------
# Transforms input file into graph representation
#---------------------------------------------------
def generateCSP():
global N, M, K, constraints, assignment, domains, arcs
fin = open(sys.argv[1])
lines = fin.readlines()
parameters = re.findall(r'\d+', str(lines[0].split(' ')))
N = int(parameters[0])
M = int(parameters[1])
K = int(parameters[2])
constraints = [[] for x in range(N)]
assignment = [None] * N
domains = [[0] * K for x in range(N)]
# Initialize domains to all be [0..K-1]
for d in domains:
for i in range(0, K):
d[i] = i
# Remove CSP parameters
lines.pop(0)
# Set constraints
for l in lines:
constraint = re.findall(r'\d+', str(l.split(' ')))
constraints[int(constraint[0])].append(int(constraint[1]))
constraints[int(constraint[1])].append(int(constraint[0]))
# Explicitly transform constraints into arcs
for i in range(0, N):
for constraint in constraints[i]:
arcs.append([i, constraint])
#---------------------------------------------------
# Outputs assignment to file.
#---------------------------------------------------
def writeAssignment(solution):
fout = open(sys.argv[2], "w")
if solution == "failure":
fout.write("No Answer")
else:
newline = ""
for s in solution:
fout.write(newline + str(s))
newline = "\n"
#---------------------------------------------------
# DFSB implementation
#---------------------------------------------------
def dfsb(end):
global K, constraints, assignment, searches
# Timer for comparison
if time.time() >= end:
return 'failure'
if isComplete(assignment):
return assignment
var = selectUnassignedVariable(assignment)
searches += 1
for color in range(0, K):
assignment[var] = color
if isConsistent(assignment, constraints):
result = dfsb(end)
if result != 'failure':
return result
# Remove {var = value} from assignment
assignment[var] = None
else:
assignment[var] = None
return 'failure'
#---------------------------------------------------
# DFSB++ implementation
#---------------------------------------------------
def improved_dfsb():
global K, constraints, assignment, searches, arcs
if isComplete(assignment):
return assignment
a = arcs[:]
AC3(a) # Run AC3 in each iteration
var = MRV_selectUnassignedVariable(assignment)
if var == 'failure':
return 'failure'
colors = LCV(var)
searches += 1
for color in colors:
assignment[var] = color
if isConsistent(assignment, constraints):
result = improved_dfsb()
if result != 'failure':
return result
# Remove {var = value} from assignment
assignment[var] = None
else:
assignment[var] = None
return 'failure'
#---------------------------------------------------
# Checks if an assignment is complete
#---------------------------------------------------
def isComplete(assignment):
for x in assignment:
if x is None:
return False
return True
#---------------------------------------------------
# Checks if an assignment is consistent
#---------------------------------------------------
def isConsistent(assignment, constraints):
global N
for var in range(0, N):
for constraint in constraints[var]:
if constraints[var] is None:
continue
elif assignment[var] == assignment[constraint] and assignment[constraint] is not None:
return False
return True
#---------------------------------------------------
# Returns the first unassigned variable
#---------------------------------------------------
def selectUnassignedVariable(assignment):
for position, x in enumerate(assignment):
if x is None:
return position
#---------------------------------------------------
# Returns the first unassigned variable using the
# MRV heuristic
#---------------------------------------------------
def MRV_selectUnassignedVariable(assignment):
global K
mrv = K # Current MRV
var = 0 # Index of MRV variable
for position, x in enumerate(assignment):
# If the variable is unassigned
if x is None:
remaining = K - calculateIllegalValues(position)
if remaining == 0:
return 'failure'
elif remaining <= mrv:
mrv = remaining
var = position
return var
#---------------------------------------------------
# Returns the amount of illegal values remaining on
# the variable indexed by the argument
#---------------------------------------------------
def calculateIllegalValues(index):
global constraints, assignment, domains
illegalValues = set()
for var in constraints[index]:
if assignment[var] is not None:
illegalValues.add(assignment[var])
if int(assignment[var]) in domains[index]:
domains[index].remove(int(assignment[var]))
return len(illegalValues)
#---------------------------------------------------
# Returns the values for the variable ordered by
# LCV heuristic by ordering the assignments by the
# number of remaining colors that the neighbors
# can still take on.
#---------------------------------------------------
def LCV(var):
global K, assignment, constraints
colors = [0] * K
lcv = [0] * K
illegalValues = set()
# If there are no constraints on the variable the order doesn't matter
if constraints[var] is None:
for i in range(0, K):
lcv[i] = i
return lcv
for color in range(0, K):
illegalValues.add(color)
# Count the number colors all constrained neighbors can still be assigned
for neighbor in constraints[var]:
if assignment[neighbor] is None:
if constraints[neighbor] is not None:
# Look at each neighbor of the neighbor
for n_neighbor in constraints[neighbor]:
# If the constrained neighbor has an assignment then the neighbor can not have the same assignment
if assignment[n_neighbor] is not None:
illegalValues.add(assignment[n_neighbor])
# Add the amount of legal values for the neighbor
colors[color] += K - len(illegalValues)
illegalValues.clear()
illegalValues.add(color)
# Map the colors array onto LCV array
for i in range(0, K):
lcv[i] = colors.index(max(colors))
colors[colors.index(max(colors))] = -1
return lcv
#---------------------------------------------------
# Implements AC3 algorithm
#---------------------------------------------------
def AC3(arcs):
global constraints, N
# Remove inconsistent values from domains of all arcs
while arcs:
arc = arcs.pop(0)
if removeInconsistentValues(arc):
# For each neighbor that isn't the current neighbor
for neighbor in constraints[arc[0]]:
if neighbor != arc[1]:
# Add it's constraint to the current source to the queue
arcs.append([neighbor, arc[0]])
#---------------------------------------------------
# Implements removeInconsistentValues subroutine
# INPUT: arc[0] is x and arc[1] is y where (x, y)
# is the directed edge.
#---------------------------------------------------
def removeInconsistentValues(arc):
global prunings, domains
removed = False
satisfiable = False
for xcolor in domains[arc[0]]:
for ycolor in domains[arc[1]]:
if ycolor != xcolor:
satisfiable = True
if not satisfiable:
domains[arc[0]].remove(xcolor)
removed = True
prunings += 1
satisfiable = False
return removed
#---------------------------------------------------
# Main
#---------------------------------------------------
N = 0 # Number of variables
M = 0 # Number of constraings
K = 0 # Size of domain
searches = 0 # Number of search calls
prunings = 0 # Number of arc prunings
constraints = [] # List of constraints with the interpretation that the ith index is a list of constraints for the ith variable
assignment = [] # Assignment of colors to variables
domains = [] # Domains used in AC3
arcs = [] # Queue of arcs
generateCSP()
start = time.clock() * 1000.0
if int(sys.argv[3]) == 0:
solution = dfsb(time.time() + 60)
else:
solution = improved_dfsb()
end = time.clock() * 1000.0
writeAssignment(solution)
print("Solution Time: " + str(end - start) + "ms")
print("Searches: " + str(searches))
print("Prunings: " + str(prunings)) | true |
a5f631ce886b15a3dec2671b8b4efc70361a48c0 | Python | GabrielSPereira/Python-Exercises | /Lista01/Exer15Lista01.py | UTF-8 | 313 | 4.1875 | 4 | [] | no_license | # Questão 15. Elabore um programa que permita a entrada de dois valores ( x, y ),
# troque seus valores entre si e então exiba os novos resultados.
x = int(input("Digite o valor de X\n"))
y = int(input("Digite o valor de Y\n"))
aux = x
x = y
y = aux
print("Valor de X agora é",x, "e o valor de Y agora é",y) | true |
a1389b7289ee0e419648175a6ce31461ffc1c346 | Python | maki-nage/rxsci | /tests/data/test_to_array.py | UTF-8 | 299 | 2.578125 | 3 | [
"MIT"
] | permissive | from array import array
import rx
import rxsci as rs
def test_to_array():
actual_result = []
source = [1, 2, 3, 4]
rx.from_(source).pipe(
rs.data.to_array('d')
).subscribe(
on_next=actual_result.append
)
assert actual_result == [array('d', [1, 2, 3, 4])]
| true |
a173d6300f94d722e2c13c9369374adedc1867ba | Python | PankillerG/Public_Projects | /Programming/PycharmProjects/untitled/Algorithms/Contest_4/E.py | UTF-8 | 2,177 | 2.953125 | 3 | [] | no_license | def length(x, y, dist, xlow, ylow):
if str(x) + ' ' + str(y) not in place:
dist1 = 0
for i in cities:
dist1 = dist1 + abs(
x - int(i[:i.find(' ')])) + abs(
y - int(i[i.find(' ') + 1:]))
if dist == 0 or dist > dist1:
dist = dist1
xlow = x
ylow = y
return xlow, ylow, dist
def search(x0, x1, y0, y1):
for x in range(x0, x1):
for y in range(y0, y1):
if str(x) + ' ' + str(y) not in place:
return x, y
dist = 0
xlow = 0
ylow = 0
for i in cities:
xlow, ylow, dist = length(
int(i[:i.find(' ')]) + 1, int(
i[i.find(' ') + 1:]),
dist, xlow, ylow)
xlow, ylow, dist = length(
int(i[:i.find(' ')]), int(
i[i.find(' ') + 1:]) + 1,
dist, xlow, ylow)
xlow, ylow, dist = length(
int(i[:i.find(' ')]) - 1, int(
i[i.find(' ') + 1:]),
dist, xlow, ylow)
xlow, ylow, dist = length(
int(i[:i.find(' ')]), int(
i[i.find(' ') + 1:]) - 1,
dist, xlow, ylow)
return xlow, ylow
n = int(input())
cities = []
for i in range(n):
cities.append(input())
place = set(cities)
l = len(cities)
if l > 1 and l % 2 == 0:
cities = sorted(cities, key=lambda x: int(x[:x.find(' ')]))
x0 = int(cities[l // 2 - 1][:cities[l // 2 - 1].find(' ')])
x1 = int(cities[l // 2][:cities[l // 2].find(' ')])
cities = sorted(cities, key=lambda x: int(x[x.find(' ') + 1:]))
y0 = int(cities[l // 2 - 1][cities[l // 2 - 1].find(' ') + 1:])
y1 = int(cities[l // 2][cities[l // 2].find(' ') + 1:])
print(*search(x0, x1, y0, y1))
elif l == 1:
print(int(cities[0][:cities[0].find(' ')]),
int(cities[0][cities[0].find(' ') + 1:]) + 1)
else:
cities = sorted(cities, key=lambda x: int(x[:x.find(' ')]))
x0 = int(cities[l // 2][:cities[l // 2].find(' ')])
cities = sorted(cities, key=lambda x: int(x[x.find(' ') + 1:]))
y0 = int(cities[l // 2][cities[l // 2].find(' ') + 1:])
print(*search(x0, x0, y0, y0))
| true |
1e9d692520586a9d276c8f45b09dc0ef6c230ba6 | Python | SoapClancy/Python_Project_common_package | /Time_Processing/datetime_utils.py | UTF-8 | 9,773 | 2.640625 | 3 | [] | no_license | import time
from .format_convert_Func import datetime64_ndarray_to_datetime_tuple
from numpy import ndarray
import numpy as np
from typing import Iterable, Union, Callable
import pandas as pd
from pandas import DataFrame
from itertools import product
from datetime import datetime
import copy
from datetime import date
from Data_Preprocessing.TruncatedOrCircularToLinear_Class import CircularToLinear
# from Ploting.fast_plot_Func import *
def get_holiday_from_datetime64_ndarray(_datetime: Iterable[np.datetime64]):
"""
返回0表示不是holiday,返回1表示是holiday
"""
raise Exception('Not implemented yet')
# _datetime = datetime64_ndarray_to_datetime_tuple(_datetime)
# return
def datetime_one_hot_encoder(_datetime: Iterable[np.datetime64], *,
including_year=False,
including_month=True,
including_day=True,
including_weekday=True,
including_hour=True,
including_minute=True,
including_second=False,
including_holiday=False,
**kwargs) -> ndarray:
holiday = get_holiday_from_datetime64_ndarray(_datetime) if including_holiday else None
_datetime = datetime64_ndarray_to_datetime_tuple(_datetime)
# 从datetime.datetime中提取各种属性,编码方式:年,月,日,星期,小时,分钟,秒,是否是节假日
datetime_iterator = np.full((_datetime.__len__(), 8), np.nan)
for i, this_datetime_ in enumerate(_datetime):
datetime_iterator[i, 0] = this_datetime_.year if including_year else -1
datetime_iterator[i, 1] = this_datetime_.month if including_month else -1
datetime_iterator[i, 2] = this_datetime_.day if including_day else -1
datetime_iterator[i, 3] = this_datetime_.weekday() if including_weekday else -1
datetime_iterator[i, 4] = this_datetime_.hour if including_hour else -1
datetime_iterator[i, 5] = this_datetime_.minute if including_minute else -1
datetime_iterator[i, 6] = this_datetime_.second if including_second else -1
datetime_iterator[i, 7] = holiday[i] if including_holiday else -1 # 是否是节假日,1表示是,0表示不是
# 删除无效列(i.e., 某个时间feature)
del_col = datetime_iterator[-1, :] == -1 # 看任何一行都可以,不一定是-1行
datetime_iterator = datetime_iterator[:, ~del_col]
datetime_iterator = datetime_iterator.astype('int')
# 每个col提取unique值并排序
col_sorted_unique = []
for col in range(datetime_iterator.shape[1]):
col_sorted_unique.append(sorted(np.unique(datetime_iterator[:, col]), reverse=True))
# one hot 编码
one_hot_dims = [len(this_col_sorted_unique) for this_col_sorted_unique in col_sorted_unique]
one_hot_results = np.full((_datetime.__len__(), sum(one_hot_dims)), 0)
for i, this_datetime_iterator in enumerate(datetime_iterator):
for j in range(one_hot_dims.__len__()):
encoding_idx = np.where(this_datetime_iterator[j] == col_sorted_unique[j])[0]
if j == 0:
one_hot_results[i, encoding_idx] = 1
else:
one_hot_results[i, sum(one_hot_dims[:j]) + encoding_idx] = 1
return one_hot_results
class DatetimeOnehotORCircularEncoder:
__slots__ = ('encoding_df_template', '__mode_property')
def __init__(self, to_encoding_args=('month', 'day', 'weekday', 'holiday', 'hour', 'minute', 'summer_time'),
mode: str = "onehot"):
"""
设置哪些变量需要被encode,可选包括:
'month' 👉 12 bit,
'day' 👉 31 bit,
'weekday' 👉 7 bit,
'holiday' 👉 1 bit,
'hour' 👉 24 bit,
'minute' 👉 60 bit,
'second' 👉 60 bit,
'summer_time 👉 1 bit.
TODO:支持year。方法是让用户给定最小年和最大年,然后动态生成year对应的bit数
e.g., to_encoding_args=('month', 'day', 'weekday', 'holiday', 'hour', 'minute', 'second')
"""
self.__mode_property = mode
self.encoding_df_template = self._initialise_encoding_df(to_encoding_args)
if mode not in ("onehot", "circular"):
raise ValueError("'mode' should be either 'onehot' or 'circular'")
@property
def mode(self):
return self.__mode_property
def _initialise_encoding_df(self, to_encoding_args) -> DataFrame:
# 动态初始化encoding_df
columns = []
if self.mode == "onehot":
for this_to_encoding_args in to_encoding_args:
if this_to_encoding_args == 'month':
columns.extend(list(product(('month',), range(1, 13)))) # 从1开始
if this_to_encoding_args == 'day':
columns.extend(list(product(('day',), range(1, 32)))) # 从1开始
if this_to_encoding_args == 'weekday':
columns.extend(list(product(('weekday',), range(1, 8)))) # 从1开始,实际是isoweekday,1代表Monday
if this_to_encoding_args == 'holiday':
columns.extend(list(product(('holiday',), [1])))
if this_to_encoding_args == 'hour':
columns.extend(list(product(('hour',), range(24))))
if this_to_encoding_args == 'minute':
columns.extend(list(product(('minute',), range(60))))
if this_to_encoding_args == 'second':
columns.extend(list(product(('second',), range(60))))
if this_to_encoding_args == 'summer_time':
columns.extend(list(product(('summer_time',), [1])))
else:
for this_to_encoding_args in to_encoding_args:
if (this_to_encoding_args == 'holiday') or this_to_encoding_args == 'summer_time':
columns.extend(list(product((this_to_encoding_args,), [1])))
else:
columns.extend(list(product((this_to_encoding_args,), ['cos', 'sin'])))
encoding_df = pd.DataFrame(columns=pd.MultiIndex.from_tuples(columns))
return encoding_df
def _circular_func(self, date_time_dim: str) -> Callable:
if date_time_dim == 'month':
period = 12
elif date_time_dim == 'day':
period = 31
elif date_time_dim == 'weekday':
period = 7
elif date_time_dim == 'hour':
period = 24
elif date_time_dim in ('minute', 'second'):
period = 60
else:
raise NotImplementedError
return CircularToLinear(period=period).transform
def __call__(self, datetime_like: pd.DatetimeIndex,
tz=None,
country=None) -> DataFrame:
"""
输入typing中指定格式的包含datetime信息的对象,返回包含one hot encoder结果的DataFrame
所有的输入会被转成Tuple[datetime,...]然后loop
"""
# 初始化numpy数据
encoding_df = np.full((datetime_like.shape[0], self.encoding_df_template.shape[1]), 0, dtype=int)
# 把索引算出来
required_dim_index = dict()
for this_datetime_dim in self.encoding_df_template.columns.levels[0]:
if (self.mode == 'circular') and (this_datetime_dim not in ('summer_time', 'holiday')):
continue
if this_datetime_dim != 'weekday':
if (this_datetime_dim != 'holiday') and (this_datetime_dim != 'summer_time'):
required_dim_index.setdefault(this_datetime_dim, datetime_like.__getattribute__(this_datetime_dim))
elif this_datetime_dim == 'summer_time':
summer_time_results = np.array(list(map(lambda x: 1 if x.dst() else 0,
datetime_like)))
required_dim_index.setdefault(this_datetime_dim, summer_time_results)
else:
holiday_results = np.array(list(map(lambda x: country.is_holiday(x), datetime_like)))
required_dim_index.setdefault(this_datetime_dim, holiday_results)
else:
required_dim_index.setdefault(this_datetime_dim, datetime_like.__getattribute__(this_datetime_dim) + 1)
# 写入encoding_df
for i, this_dim_name in enumerate(self.encoding_df_template.columns):
if (self.mode == 'circular') and (this_dim_name[0] not in ('summer_time', 'holiday')):
encoding_df = encoding_df.astype('float')
func = self._circular_func(this_dim_name[0])
values = datetime_like.__getattribute__(this_dim_name[0]).values
encoding_df[:, i] = func(values)[this_dim_name[1]]
continue
# 取得这一列对应的boolean数组并转成int
this_dim = np.array(required_dim_index[this_dim_name[0]] == this_dim_name[1], dtype=int) # type: ndarray
encoding_df[:, i] = this_dim
# 写入pd.DataFrame
encoding_df = pd.DataFrame(encoding_df, columns=self.encoding_df_template.columns)
return encoding_df
def find_nearest_datetime_idx_in_datetime_iterable(datetime_iterable: Iterable[datetime],
datetime_to_find: datetime) -> int:
datetime_to_find = time.mktime(datetime_to_find.timetuple())
date_time_delta = np.array([(time.mktime(x.timetuple()) - datetime_to_find) for x in datetime_iterable])
return int(np.argmin(np.abs(date_time_delta)))
| true |
a5e1a9a965fea9a9c6221749a5cc7a1d519785bf | Python | MolecularAI/aizynthfinder | /aizynthfinder/context/collection.py | UTF-8 | 4,565 | 3.28125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """ Module containing a class that is the base class for all collection classes (stock, policies, scorers)
"""
from __future__ import annotations
import abc
from typing import TYPE_CHECKING
from aizynthfinder.utils.logging import logger
if TYPE_CHECKING:
from aizynthfinder.utils.type_utils import Any, List, StrDict, Union
class ContextCollection(abc.ABC):
"""
Abstract base class for a collection of items
that can be loaded and then (de-)selected.
One can obtain individual items with:
.. code-block::
an_item = collection["key"]
And delete items with
.. code-block::
del collection["key"]
"""
_single_selection = False
_collection_name = "collection"
def __init__(self) -> None:
self._items: StrDict = {}
self._selection: List[str] = []
self._logger = logger()
def __delitem__(self, key: str) -> None:
if key not in self._items:
raise KeyError(
f"{self._collection_name.capitalize()} with name {key} not loaded."
)
del self._items[key]
def __getitem__(self, key: str) -> Any:
if key not in self._items:
raise KeyError(
f"{self._collection_name.capitalize()} with name {key} not loaded."
)
return self._items[key]
def __len__(self) -> int:
return len(self._items)
@property
def items(self) -> List[str]:
"""The available item keys"""
return list(self._items.keys())
@property
def selection(self) -> Union[List[str], str, None]:
"""The keys of the selected item(s)"""
if self._single_selection:
return self._selection[0] if self._selection else None
return self._selection
@selection.setter
def selection(self, value: str) -> None:
self.select(value)
def deselect(self, key: str = None) -> None:
"""
Deselect one or all items
If no key is passed, all items will be deselected.
:param key: the key of the item to deselect, defaults to None
:raises KeyError: if the key is not among the selected ones
"""
if not key:
self._selection = []
return
if key not in self._selection:
raise KeyError(f"Cannot deselect {key} because it is not selected")
self._selection.remove(key)
@abc.abstractmethod
def load(self, *_: Any) -> None:
"""Load an item. Needs to be implemented by a sub-class"""
@abc.abstractmethod
def load_from_config(self, **config: Any) -> None:
"""Load items from a configuration. Needs to be implemented by a sub-class"""
def select(self, value: Union[str, List[str]], append: bool = False) -> None:
"""
Select one or more items.
If this is a single selection collection, only a single value is accepted.
If this is a multiple selection collection it will overwrite the selection completely,
unless ``append`` is True and a single key is given.
:param value: the key or keys of the item(s) to select
:param append: if True will append single keys to existing selection
:raises ValueError: if this a single collection and value is multiple keys
:raises KeyError: if at least one of the keys are not corresponding to a loaded item
"""
if self._single_selection and not isinstance(value, str) and len(value) > 1:
raise ValueError(f"Cannot select more than one {self._collection_name}")
keys = [value] if isinstance(value, str) else value
for key in keys:
if key not in self._items:
raise KeyError(
f"Invalid key specified {key} when selecting {self._collection_name}"
)
if self._single_selection:
self._selection = [keys[0]]
elif isinstance(value, str) and append:
self._selection.append(value)
else:
self._selection = list(keys)
self._logger.info(f"Selected as {self._collection_name}: {', '.join(keys)}")
def select_all(self) -> None:
"""Select all loaded items"""
if self.items:
self.select(self.items)
def select_first(self) -> None:
"""Select the first loaded item"""
if self.items:
self.select(self.items[0])
def select_last(self) -> None:
"""Select the last loaded item"""
if self.items:
self.select(self.items[-1])
| true |
b5588ab7a82dcc9f0dcefcdf82a8ad2ce4a9b5eb | Python | jstr045329/public_IB_data_ac | /cmdLineParser.py | UTF-8 | 3,557 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
"""ACTION: Write a bash script that sends 5-10 combinations of command line arguments in
different orders so we can test this."""
import argparse
DEF_ACTION = "store_true" # In most cases we want to store a value either way, with a default of False if an option is not passed in.
OPP_ACTION = "store_false" # Ok to store w/ default True if there's a reason to
def cmdLineParseObj():
y = argparse.ArgumentParser(description="Automated Trading system based on the ideas of Shawn Keller.")
y.add_argument("-p", "--port", action="store", type=int,
dest="port", default=7497, help="The TCP port to use")
y.add_argument("-d", "--date", action="store", type=str,
dest="dateFilt", default="", help="only return trades that happen on this date")
y.add_argument("-a", "--account", action="store", type=str, dest="account", default=None, help="Enter your account number")
y.add_argument("-ia", "--ignoreAccount", action = DEF_ACTION)
y.add_argument("-C", "--global-cancel", action=DEF_ACTION,
dest="global_cancel", default=False,
help="whether to trigger a globalCancel req")
y.add_argument("--forceTicker", default='None') # Follow with a ticker name
y.add_argument("--forceLongTermUp", action=DEF_ACTION)
y.add_argument("--forceShortTermUp", action=DEF_ACTION)
y.add_argument("--forceLongTermDown", action=DEF_ACTION)
y.add_argument("--forceShortTermDown", action=DEF_ACTION)
y.add_argument("--forceBubbleUpDaily", action=DEF_ACTION)
y.add_argument("--forceBubbleDownDaily", action=DEF_ACTION)
# Following force all 4 k/d variables:
y.add_argument("--forceCallTrigDaily", action=DEF_ACTION)
y.add_argument("--forcePutTrigDaily", action=DEF_ACTION)
y.add_argument("--forceCallTrig15", action=DEF_ACTION)
y.add_argument("--forcePutTrig15", action=DEF_ACTION)
# Following set k1 and d2 (or vice versa) but not all 4 k/d variables:
y.add_argument("--nudgeCallTrig15", action=DEF_ACTION)
y.add_argument("--nudgePutTrig15", action=DEF_ACTION)
y.add_argument("--nudgeCallTrigDaily", action=DEF_ACTION)
y.add_argument("--nudgePutTrigDaily", action=DEF_ACTION)
y.add_argument("--firstTickerOnly", action=DEF_ACTION)
y.add_argument("--first3TickersOnly", action=DEF_ACTION)
y.add_argument("--first10TickersOnly", action=DEF_ACTION)
y.add_argument("--plotEveryTimestep", action=DEF_ACTION)
y.add_argument("--plotName", default="plot")
y.add_argument("--testBuyCall", action=DEF_ACTION)
y.add_argument("--testBuyPut", action=DEF_ACTION)
y.add_argument("--testSellCall", action=DEF_ACTION)
y.add_argument("--testSellPut", action=DEF_ACTION)
y.add_argument("--disableTws", action=DEF_ACTION)
y.add_argument("--barSourceFile") # Follow with filename
y.add_argument("--useTestHarness") # Follow with test name. Will use the named test harness instead of MyTradingApp.
y.add_argument("--initOrderId", action="store", type=int,
dest="initOrderId", default=-1, help="Choose the first order ID")
return y
if __name__=="__main__":
"""Parser Test"""
dut = cmdLineParseObj()
print(dut.parse_args())
| true |
685a3eb7691a798059890b0fe0db1e648fd0f794 | Python | Cr1stalf/Python | /LR4/b.2.py | UTF-8 | 89 | 2.78125 | 3 | [] | no_license | a1, p = map(int, input().split())
A = [a1 + p * (i - 1) for i in range(1, 11)]
print(A) | true |
9bc92a916205e180f290b65587d08fb9dd65c0f8 | Python | vtsartas/ejpython | /ejpy/ejElenaPY/30_hoja-VII-1_metros_cubicos.py | UTF-8 | 1,179 | 4.3125 | 4 | [] | no_license | # Ejercicio 30 - Hoja VII (1) - Indicar el coste del agua de una piscina
# Creamos una función para calcular el importe
def impfinal(p,vol):
return (p*vol)
# Creamos una función para calcular el volumen de la piscina
def volum(anch,larg,prof):
return (anch*larg*prof)
otro="s"
# Pedimos el coste por m3, que será el mismo durante la ejecución del programa
precio=float(input("AVISO: usar el punto para los decimales:\nIntroduce el precio por metro cúbico:\n"))
while otro=="s":
# Pedimos las dimensiones de la piscina
ancho=float(input("Introduce (en metros) el ancho de la piscina:\n"))
largo=float(input("Introduce (en metros) el largo de la piscina:\n"))
profundidad=float(input("Introduce (en metros) la profundidad de la piscina:\n"))
print(f"\nLa piscina tiene {volum(ancho,largo,profundidad):.2f} metros cúbicos de agua.")
print(f"\nEl importe para esta piscina es de {impfinal(precio,volum(ancho,largo,profundidad)):.2f}.");
otro=input("\n¿Quieres introducir los datos de una nueva piscina? (s/n)\n")
print(f"\nRecuerda que el importe por m3 es de {precio:.2f}.\n")
| true |
cf8e0af9d6ae5484a0b60c732d1102f96c784187 | Python | vmsgiridhar/DSHackerRank | /Python_Prac/DS/Queue.py | UTF-8 | 406 | 3.5625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 9 11:51:52 2019
@author: C5232886
"""
class Queue:
def __init__(self):
self.data = []
def push(self, data):
self.data.append(data)
print(self.data)
def pop(self):
#FIFO
if len(self.data) != 0:
self.data.remove(self.data[0])
else:
print('Queue is empty!') | true |
f9241dcdb1ff7a579a161e3f786ec7696bd2c59f | Python | ncfgrill/Advent-of-Code | /2015/d13.py | UTF-8 | 1,378 | 3.265625 | 3 | [] | no_license | '''
AoC 2015 Day 13 Parts 1 and 2
'''
from itertools import permutations
graph = []
def create_graph():
seen = set()
with open('d13') as f:
i, s = -1, len(seen)
for l in f.readlines():
l = l.strip().split(' ')
h = int(l[3]) if l[2] == 'gain' else -(int(l[3]))
seen.add(l[0])
if len(seen) > s:
i += 1
s = len(seen)
graph.append([])
graph[i].append(h)
graph.append([0 for i in range(len(graph[0]))])
for j in range(len(graph)):
graph[j].insert(j, 0)
graph[j].append(0)
def tsp(s, n):
vert = [i for i in range(0, len(graph) - n) if i != s]
min_path, max_path, perm = 99999999, 0, permutations(vert)
for p in perm:
h, k = 0, s
for j in p:
h += graph[k][j] + graph[j][k]
k = j
h += graph[k][s] + graph[s][k]
min_path, max_path = min(min_path, h), max(max_path, h)
return (min_path, max_path)
def find_seating(n):
if n: create_graph()
min_h, max_h = 99999999, 0
for s in range(0, len(graph) - n):
h = tsp(s, n)
min_h, max_h = min(min_h, h[0]), max(max_h, h[1])
print('Optimal happiness:', max_h)
def main():
find_seating(1)
find_seating(0)
if __name__ == "__main__":
main()
| true |
05c5d1bab87d0cd348f937d79f68b125693dbcfe | Python | bitwalk123/PySide2_sample | /qt_label_image_base64.py | UTF-8 | 1,018 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# reference
# https://stackoverflow.com/questions/53252404/how-to-set-qlabel-using-base64-string-or-byte-array
import sys
from bz2 import decompress
from PySide2.QtCore import QByteArray
from PySide2.QtGui import QPixmap
from PySide2.QtWidgets import (
QApplication,
QLabel,
QVBoxLayout,
QWidget,
)
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
self.setWindowTitle('Label')
self.show()
def initUI(self):
vbox = QVBoxLayout()
self.setLayout(vbox)
lab = QLabel()
lab.setPixmap(self.get_pixmap())
vbox.addWidget(lab)
def get_pixmap(self):
base64data = b'iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAABhGlDQ1BJQ0MgcHJvZmlsZQAAKJF9kT1Iw0AcxV9TtSIVByOIOGSoThb8Qhy1CkWoEGqFVh1MLv0QmjQkKS6OgmvBwY/FqoOLs64OroIg+AHi5uak6CIl/i8ttIjx4Lgf7+497t4BQrXIdLttFNANx0rGY1I6syKFXhFEH0SMoUNhtjkrywn4jq97BPh6F+VZ/uf+HN1a1mZAQCKeYablEK8TT206Jud9YpEVFI34nHjEogsSP3JdrfMb57zHAs8UrVRyjlgklvItrLYwK1g68SRxRNMNyhfSddY4b3HWi2XWuCd/YThrLC9xneYg4ljAImRIUFHGBopwEKXVIMVGkvZjPv4Bzy+TSyXXBhg55lGCDsXzg//B727t3MR4PSkcA9pfXPdjCAjtArWK634fu27tBAg+A1dG01+qAtOfpFeaWuQI6NkGLq6bmroHXO4A/U+mYimeFKQp5HLA+xl9UwbovQW6Vuu9NfZx+gCkqKvEDXBwCAznKXvN592drb39e6bR3w9kRXKh+gOhzgAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAAN1wAADdcBQiibeAAAAAd0SU1FB+UDFRQzCTyiK9QAAAowSURBVHja7Zt5dFTVHce/b5vJZDaSkIQEskBMyEIgJEqAsipqIlY5olTsafWc9nTR1uoptcLxYNxO63aqree0+o+tngqtSsWK7LJoQbRJE0MCkYRsZN9mMvPmzdvu7R9DPMZ5bwJmG2x//737+87Lu593f7/7u/e+MLgU+3ZqrEMRysGSRaBMiitOmJOzIDY3fiaXYLPzMZpKVU0lohTQWhQZpxWNfoQADh5/6nQ7otyYiN47wNn9GQ8ylD4CwM0wDAqKY7GgxAGWZca8uRQguujTPcGg3qZKtE5RyElVYfafePJ0U/QDKCkRHIm9OwHmthFl6So3svJs4/6jqkyI30d8waB+QVfJOVVlPuV09uC+rdWfRg0AZ1nasxTMlpHrrFwbSte4J/VhZJlQcZgMy5LeEZT1s4pMT+mEPfzR9trKKQXgLk/L0inTAIADAI4DbvluEmyx7LQMU1Wh1O/TApJIumSZNARFvUYK0KP/jj1zGBUgEw7gq28/NcOKNeVxUZfAdB2QRF2SRL1HlkijqtNKopGjnviYw5U/rlS/NgBHWXoVgMUj14uXOpFXZMeVYppGIfqIFJT0nmBAP6eqqPpga+3DRlre5B7zvnwRE8vhSjKeZ+CO42zuOC4TQCaA6z8ADAGYBbXryxeCBd9YY79WffA/AAD/BzDxSQk8Y0Os4AQTRQOMn5QOqwStjTJ62jUUZBZg47duw3WF1yPeOTM0fREN7d4GNA/VobLzMM4NVH9zAPR0KPj4mBdpKUl4+5E/IyclN0zDsTwy4wqQGVeAtfM24dxANXbVv4Tzg7VXdgh0tso4smcIeXMzcOp3Jww7b2TZCUX41YqXsSrzNlNNV7uMt17thb8pAxXX7sQDy/+Ae4q3w2GZER0ABvtUfHTQg4z0eOx9fC9Y5vJuzTIc7lr0EIpTrzX093apUGSC5bnLkOKci7zEJViWth52i2v6AVBKceqoF6DAaw+9ghhLjKGOUIL3q3fhiTcrUNtWYzD3Mvhe0TYkxKaE+fq6FIAB1l9zS/SFQGtTEEMDGlYtLUTR3KsNNQHVh6eP/wDvtD6N96r+jpX33ortb2wN08UKTmxa8OBocCQ0wuLjbMhMzIo+AI11EgDgwQ2/MNXsqnsJLZ56MACKSp1InCXgxb/uwIGaPWHaRbNWItmRPiq8NI0i/6q50ZcE5SBBX7eKGXExWLNgnaEmqIk42f7+qDqzoNgBQigef/234aHAsFg7947Rwx/A8gXXRN80eKFFBqUUy4sWmmrqe09BI8qotqRUAdYYFnWfX4Aoi7BbR682C2etwOaHH4bVKuDQc7vRvKEJRRklYfcuTlmLLn8LRGUYouJFt78FhJKpA9DTIQMAbihZZ6rpGG40eMsMEpIFdLbKOPjZPmy4ZuMo/8zYVGTNzkBzxwVkJ+ciO9l4St2Qf++o6y37yuCTh6YuBLyDGgCgNGepqabb32rY7p4RWmbXttQY+tctWR7dhRAF4PPq4AUW+XMKzSEF+w3bHe7QAGy8cN7QPz8ja9IBjCsEAj4dmkaRMssRsfDxK17Ddqs19JtBn8fQn5OWidI1buT/qAjZ6enY/ci7YZq9dTuhEQ0OqxMOmwM60aYOQDAQSjYuV+TtMlEZNmy3XAQwLPoN/cmuOUhIYtHePgSiGye2k91vo8ffNj0hoGoUAGCzWCPriGzYLlhCy2JRChj6x1vmTjoATQm9FZs1MgCNGG/QslwIgG7ydi1cTJQDuBhutpjIp0VmcTlyuqbpunGO4GzRDYCSUAhYBH6M2YKajABEHAE8Z5n03aNxARjpgKwoEXUcY7ytTsjIBomZXzeFFxUAuIsxHJTlMQDwkQHwrEnyVKI7BDghBEBWI59CcSwfMYQ41gSALkc3AIsQ+rkYlCLqrHyscXLUQwAEXjD0S5oY3QBiHRcruSFfRJ3ZfK7IIQAuuzEgj9Qb3QBi7Bw4DvB4I48Au2AMQJZDScDtcBr6h4JRDoABYHPwUBWCrqEOU12cLckYgBQCkOCKN/QPBDqjGwAAxM8MJbhDtQdMNUn2NONFkjdUAGXPucrQ3zJ0JvoBJM4KHR1/8J8jpppU1zzDdp83VCEuylxs6G/1nLnEcTitAEIZvPrzelNNTkIJmK8slwkB+npVWGM4rC4IPwvo8bfCE+wbOw+ZzDBTGAICHG4e51v60NzbZDoL5CSMfsvd7TJ0laKoIAsCFz4NVnYeDj0gP7LuMF5PzHaFNk28Qxo8gzqsnH1qAYABsvNsIAR49u2nTWVl2Xd/UddTSlFb6QfPMXjy7kfDCyRK8MmF/aHbMwwsVhZ+0bgqvL3gfmRz67HQeSseu/EvKE5dM8UAAMzLtUGwMNh97AhE2XhzIz+pFLfm/QSEMPjk2DB8Xh2P/fTnWJq9Ikz7acdBdPlavrhOSBIgiioaOs8YjC43tty8Hds2bkduasHUhwAAWGNYLFzixLBXwc/+dK+prjznHjy09FX8euM2nH3tFO5f/8vwqVGTsPvsy6MBzw/tCzy365nomwW+SHQFNsyabcE/Dh7HnqrdprrslDxsXvF9JLqSDJfNr1Y9hn5xdE2RcZUNWbk2vLXvCHZ8+Lpx2az6caLtPTQN1lx2LRNmjrL0UWvQVWUzMCdz7N0ZTaM4umcIngENO574I25YWHbpmytEwRs1z+Bfbf801fR3q+jpUrAkfzFWL1gJq2DFQKALHcNNaPOcjbh6fGXDJ8ykAwAAXaOorxbR8FkAm28qxws/fBE8J0T8zef9VXir7veXOO9/PTMDMOFfiHA8g8KrHZg334aapuNYdF8Jrp6/ENvvqkBWcnjFd6hpB948/QKmyybtIym7k0N+kR0JKQS7DnyI500SWLp7/uT3kk4DgBFLnh0qlT+uM/4QKjuhCDkziyf1GVSV0mkDEJ8owBXHoam5F40958KTEMPivtLncefCLbhu3mbDr0PGa1KABKcNAABk59tBCMVv/vaUaT2/du4d2FT4gOnKcTzm86gN0wugIBbueB67Dh0z/DZoss3v1Z+ZVgAsC6wpj4fNzuLmrd9BQ2f9lHW+r0vpOPZo3Q4zPz9VD2J3sii/PQHnz0q489lNWFm4DGXFZchNy4dVsEBUvOgPdGIg0DVhfzMQIFp/G3PDlFWC0WTeIV1qa9XXnayoPhFJx+MbYppKIfp1MeDXOwN++m63rG+rq6gb82TligNACCD6NFnyk75gUG9SFFSpin6Mo/V7jlZAu9z78VdaRwcps/dS3ux4Aei4+C9zU9FRSdIVSST9skQbdY1UywF1f6/OHprIjl4WAIaht1PK7ARgnbiOUkgBogZEfTAokWZVpp/JEv1QQfDdUxWNw9M10gwB+Pa2v2MvS1/PAO8AcExER33g36uuqPZEW6iZ5gBxX9th141zriMMuxdA/KV2VAN9/2RF3eCVklTHPFWw35RZtGy184DTxWuSRJq1IKlSVRwPUmn/dA7dibL/AnBLZx6+byUmAAAAAElFTkSuQmCC'
byte_array = QByteArray.fromBase64(base64data)
pixmap = QPixmap()
pixmap.loadFromData(byte_array)
return pixmap
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| true |
c54244422d19bcd73d4be42eff0b73e808fcc27a | Python | mitsuk-maksim/tg_mpei_course | /69. Sqrt(x).py | UTF-8 | 595 | 3.515625 | 4 | [] | no_license | #https://leetcode.com/problems/sqrtx/
class Solution:
def mySqrt(self, x: int) -> int:
return int(x**(1/2))
def main():
import sys
import io
def readlines():
for line in io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8'):
yield line.strip('\n')
lines = readlines()
while True:
try:
line = next(lines)
x = int(line);
ret = Solution().mySqrt(x)
out = str(ret);
print(out)
except StopIteration:
break
if __name__ == '__main__':
main()
| true |
6d113abe685d18c9931c0fab52cd66cd8f1bcffe | Python | aczdev/votebot | /tornado.py | UTF-8 | 1,555 | 2.578125 | 3 | [
"MIT"
] | permissive | import psutil
from stem import Signal
from stem.control import Controller
import stem.process
from time import sleep
from urllib3.contrib.socks import SOCKSProxyManager
def kill_tor(tor_path='./tor'):
"""Finds a name of the TOR instance and kills it"""
for proc in psutil.process_iter():
# Get only the application's name from TOR_PATH
if proc.name() == tor_path.split('/')[-1]:
proc.kill()
class TorUnit(object):
def __init__(self, tor_path, listen_port, control_port):
self.listen_port = listen_port
self.control_port = control_port
self.tor_process = stem.process.launch_tor_with_config(
tor_cmd=tor_path,
config={
'SocksPort': str(listen_port),
'ControlPort': str(control_port),
},
)
def __del__(self):
self.tor_process.kill()
def new_tor_identity(self, sleep_duration=10):
"""Sends a NEWNYM signal to TOR controller to change current exit node"""
with Controller.from_port(port=self.control_port) as controller:
controller.authenticate()
controller.signal(Signal.NEWNYM)
sleep(sleep_duration)
def check_tor_ip(self):
"""Returns a string with IP address obtained from ifconfig.co webpage"""
http = SOCKSProxyManager('socks5://localhost:%d/' % self.listen_port)
# rstrip() to remove newline at the end
ip = http.request('GET', 'http://ifconfig.co/ip').data.rstrip()
return str(ip.decode("UTF-8"))
| true |
f57603d71f56eebb237b76a9debcdfa19490de98 | Python | adlev/Learning-Analytics-Fellows-Feedback-Analysis | /sentiment_analysis.py | UTF-8 | 13,052 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python
import csv, re, hashlib, numpy
from collections import defaultdict
from ngram import NGram
from math import log, fabs
#input file must be a csv of the form [stringfeedback, studentscore, possiblescore]
input_file = '/Users/adam/Desktop/SI110-Gradeswh.csv'
pos_file = '/Users/adam/Desktop/positive-words.txt'
neg_file = '/Users/adam/Desktop/negative-words.txt'
freq_file = '/Users/adam/Desktop/english-freq-all.txt'
### Some methods can be edited to alter the way data is split
#returns True if the line is a duplicate
def remove_copy(line):
h = hashlib.md5(line).hexdigest()
if h in hash_dict.keys():
return True
else:
hash_dict[h] = 1
return False
#takes a list of feedback (unsplit), cleans it, and returns a dictionary of the number of times each word appears
def clean_word_counts(list):
word_counts = defaultdict(int)
for line in list:
clean_line = re.sub(r'[^\w\s]', '', line)
linewords = clean_line.split()
clean_lines.append(clean_line)
for word in linewords:#could do a regex match here
word_counts[word.lower()] += 1
return word_counts
#takes a list of words and returns a dict of the number of times each word appears
def word_count_from_list(list):
count_dict = defaultdict(int)
for word in list:
if word in count_dict.keys():
count_dict[word] += 1
else:
count_dict[word] = 1
return count_dict
#returns an int of all values in a dict
def total_word_counts(dict):
total_words = 0
for key in dict:
total_words += dict[key]
return total_words
#calculates the probability that a word will appear in all of the feedback for a course
def word_probability(p_dict, d_int):
word_p = dict()
for key in p_dict:
word_p[key] = float(p_dict[key])/d_int
return word_p
# compares a dict of feedback word frequencies with common english frequences and uses log in order to make differences more salient
def log_compare(p_dict):
log_dict = dict()
for key in p_dict:
log_dict[key] = log((float(p_dict.get(key, 0)) + 0.000001)/(float(freq_dict.get(key, 0))+0.000001))
#sorted_compare_dict = sorted(log_dict.keys(), key=lambda x: -log_dict[x])
#for words in sorted_compare_dict:
# words+'\t'+str(sorted_compare_dict[words])
#add print here for sorted list of relative frequency of words
return log_dict
##### EDIT THIS METHOD to change upper and lower bounds. Un-comment the elif to add the c2 lower bound (you will also need to uncomment part of the main code
def cluster_string_by_score(fg,s):
#### EDIT THIS: c1 is upper bound, c2 is lower bound
c1 = numpy.percentile(s, 50)
c2 = numpy.percentile(s, 50)
print c1
print c2
cluster_dict_list = list()
c1_list = list()
c2_list = list()
c3_list = list()
for pair in fg:
if pair[1] < c1:
c1_list.append(pair[0])
#elif pair[1] ==c2:
# c2_list.append(pair[0])
else:
c3_list.append(pair[0])
c1_dict = clean_word_counts(c1_list)
c2_dict = clean_word_counts(c2_list)
c3_dict = clean_word_counts(c3_list)
cluster_dict_list.append(c1_dict)
cluster_dict_list.append(c2_dict)
cluster_dict_list.append(c3_dict)
return cluster_dict_list
###### Put all feedback strings into single grade bins
def feedback_bins(fg):
bin_dict = defaultdict()
for pair in fg:
find_grade = re.search(r'(?<=\.)\d\d',(str(pair[1]) + '0'))
grade = int(find_grade.group(0))
clean_line = re.sub(r'[^\w\s]', '', pair[0])
if grade == 00:
grade = 100
if grade in bin_dict.keys():
bin_dict[grade] += (' ' + clean_line)
else:
bin_dict[grade] = clean_line
return bin_dict
### returns a string giving the sentiment score of a dictionary and the number of words.
def sentiment_score(dict):
cluster_score = 0
pos = 0
neg = 0
words = 0
for key in dict.keys():
words += dict[key]
if key in pos_dict.keys():
cluster_score += dict[key]
pos += dict[key]
elif key in neg_dict.keys():
cluster_score -= dict[key]
neg += dict[key]
return 'pos/neg: ' + str(pos/float(neg)) + ' words:' + str(words)
### returns a list of sentiment scores that also includes the positive and negative numbers of words in a list
def sentiment_score_posneg_brokenout(dict):
cluster_score = 0
pos = 0
neg = 0
pos_list = list()
neg_list = list()
for key in dict.keys():
if key in pos_dict.keys():
cluster_score += dict[key]
pos += dict[key]
pos_list.append(key)
elif key in neg_dict.keys():
cluster_score -= dict[key]
neg += dict[key]
neg_list.append(key)
return [pos, neg, word_count_from_list(pos_list), word_count_from_list(neg_list)]
### returns list(type, ratio of pos to neg or total pos/neg depending on type, score, and total words)
def feedback_counts_with_score(fg):
full = list()
for pair in fg:
add_list = list()
clean_one = list()
clean_one.append(pair[0])
cleaned = clean_word_counts(clean_one)
word_count = 0
for key in cleaned:
word_count += cleaned[key]
s = sentiment_score_posneg_brokenout(cleaned)
add_list.append(s[0])
add_list.append(s[1])
add_list.append(pair[1])
add_list.append(word_count)
add_list.append(s[2])
add_list.append(s[3])
full.append(add_list)
return full
### used to find top positive and negative words in a bin of feedback
def bin_sort_select(bin_list):
top_bin = list()
for cdict in bin_list:
for key in cdict:
b = dict()
sort_list = sorted(cdict[key], key=cdict[key].get, reverse=True)[:5]
d = defaultdict()
for word in sort_list:
if cdict[key][word] > 1:
d[word] = cdict[key][word]
b[key] = d
top_bin.append(b)
return top_bin
### Start main code
# Load Course Data
with open(input_file, 'rU') as csvfile:
reader = csv.reader(csvfile, dialect='excel') # this is supposed to remove unwanted commas
feedback_grade = list()
skipfirst = True # skips first line
hash_dict = dict()
scores = list()
for line in reader:
if skipfirst == True:
skipfirst = False
else:
if remove_copy(line[0]): #feedback = line[0]
continue
this_line = list()
if line[1] != '0' and line[2] != '0': #ignores all 0 scores and assignments worth 0, appends the grade to feedback_grade
this_line.append(line[0])
this_line.append(float(line[1])/float(line[2]))
feedback_grade.append(this_line)
scores.append(float(line[1])/float(line[2]))
#lists of lists that pair feedback with percent score on an assignment
clean_lines = list()
word_percents = defaultdict(int)
for line in feedback_grade:
linewords = line[0].split()
for word in linewords:
word_percents[word.lower()] += line[1]
#word, wordcount
#sorted_word_count = sorted(total_word_counts.keys(), key=lambda x: total_word_counts[x])
#for words in sorted_word_count:
# print words+'\t'+str(word_counts[words])
### This area of the code is used to find relative word frequencies in all feedback compared to normal english
freq_load = open(freq_file, 'rU')
lines_read = 0
freq_dict = dict()
for line in freq_load:
if lines_read >= 10000:
break
[a, term, freq, d] = line.split('\t')
freq_dict[term] = freq
lines_read += 1
clustered_dicts = cluster_string_by_score(feedback_grade, scores)
c1_total = total_word_counts(clustered_dicts[0])
c2_total = total_word_counts(clustered_dicts[1])
c3_total = total_word_counts(clustered_dicts[2])
c1_sorted = log_compare(word_probability(clustered_dicts[0], c1_total))
#c2_sorted = log_compare(word_probability(clustered_dicts[1], c2_total))
c3_sorted = log_compare(word_probability(clustered_dicts[2], c3_total))
c_diffs = defaultdict(int)
for key in c1_sorted:
if key in c3_sorted:
c_diffs[key] = c3_sorted[key] - c1_sorted[key]
#for key in c1_sorted:
#if key in c3_sorted:
#if (fabs(c3_sorted[key] - c1_sorted[key])) > 1.5:
#print fabs(c3_sorted[key] - c1_sorted[key])
#print 'under 92: ' + key+'\t'+str(c1_sorted[key])
#if key in c2_sorted:
# print 'middle 50: ' + key+'\t'+str(c2_sorted[key])
#print 'above 92: ' + key+'\t'+str(c3_sorted[key])
#separate by low medium and high
###### Load Sentiment Dictionaries
sentiment_dict = dict()
pos_dict = dict()
neg_dict = dict()
### Load positives
pos_sent = open(pos_file, 'rU')
for key in pos_sent:
clean_key = key.strip()
sentiment_dict[clean_key] = 1
pos_dict[clean_key] = 1
### Load negatives
neg_sent = open(neg_file, 'rU')
for key in neg_sent:
clean_key = key.strip()
sentiment_dict[clean_key] = -1
neg_dict[clean_key] = -1
#### This is used to find top used negative and positive words for a bin of feedback
binned = feedback_bins(feedback_grade)
binned_pos = list()
binned_neg = list()
for key in binned:
pos = defaultdict()
neg = defaultdict()
pos_words = list()
neg_words = list()
words = re.sub("[^\w]", " ", binned[key]).split()
for word in words:
if word in pos_dict:
pos_words.append(word)
elif word in neg_dict:
neg_words.append(word)
pos_counts = word_count_from_list(pos_words)
neg_counts = word_count_from_list(neg_words)
pos[key] = pos_counts
neg[key] = neg_counts
binned_pos.append(pos)
binned_neg.append(neg)
positive_words = bin_sort_select(binned_pos)
negative_words = bin_sort_select(binned_neg)
sentiment_gd = defaultdict(int)
count_gd = defaultdict(int)
number_gd = defaultdict(int)
feed = feedback_counts_with_score(feedback_grade)
for flist in feed:
score = flist[0] - flist[1]
find_grade = re.search(r'(?<=\.)\d\d',(str(flist[2]) + '0'))
grade = int(find_grade.group(0))
if grade == 00:
grade = 100
if grade in sentiment_gd.keys():
number_gd[grade] += 1
sentiment_gd[grade] += score
count_gd[grade] += flist[3]
else:
sentiment_gd[grade] = score
count_gd[grade] = flist[3]
number_gd[grade] = 1
avg_sentiment_gd = defaultdict(int)
avg_count_gd = defaultdict(int)
for key in sentiment_gd.keys():
avg_sentiment_gd[key] = (sentiment_gd[key] / float(number_gd[key]))
avg_count_gd[key] = (count_gd[key] / float(number_gd[key]))
### OUT_FILES BELOW
with open('diff.csv', 'wb') as diff_csv:
diff_write = csv.writer(diff_csv)
diff_write.writerow(['word', 'Under 92', 'Above 92'])
rows = list()
for key in c1_sorted.keys():
row = list()
if key in c3_sorted.keys():
row.append(key)
row.append(c1_sorted[key])
row.append(c3_sorted[key])
else:
row.append(key)
row.append(c1_sorted[key])
rows.append(row)
for key in c3_sorted.keys():
row = list()
if key not in c1_sorted.keys():
row.append(key)
row.append('')
row.append(c3_sorted[key])
rows.append(row)
for row in rows:
diff_write.writerow(row)
with open('sentiment_grade_dict.csv', 'wb') as sentiment_gd_out:
sentiment_gd_writer = csv.writer(sentiment_gd_out)
sentiment_gd_writer.writerow(['grade','sentiment score average','average words', 'n'])
for key in sentiment_gd.keys():
sentiment_gd_writer.writerow([key, avg_sentiment_gd[key], avg_count_gd[key], number_gd[key]])
with open('sentiment.csv', 'wb') as sentiment_out:
sentiment_writer = csv.writer(sentiment_out)
sentiment_writer.writerow(['pos', 'neg', 'grade score', 'word count'])
for row in feed:
sentiment_writer.writerow(row)
with open('sentimentwords.csv', 'wb') as sentiment_w_out:
sentiment_word_writer = csv.writer(sentiment_w_out)
sentiment_word_writer.writerow(['Grade', 'pos'])
prow = list()
for cdict in positive_words:
for key in cdict:
pentry = ''
for wkey in cdict[key].keys():
pentry += (wkey + ': ' + str(cdict[key][wkey]) + ' ')
prow.append([key, pentry])
for row in prow:
sentiment_word_writer.writerow(row)
sentiment_word_writer.writerow(['Grade', 'neg'])
nrow = list()
for cdict in negative_words:
for key in cdict:
nentry = ''
for wkey in cdict[key].keys():
nentry += (wkey + ': ' + str(cdict[key][wkey]) + ' ')
nrow.append([key,nentry])
for row in nrow:
sentiment_word_writer.writerow(row)
| true |
201cd50c6ca7982e929b59a11821c487913ed6ce | Python | bakkurt/python_calismalarim | /email_parser.py | UTF-8 | 635 | 3.1875 | 3 | [] | no_license | #bu program, isim <e-posta>, isim <e-posta> biçimindeki belgeden
#e-posta adreslerini ayıklayıp yeni bir belgeye yapıştırır.
e_posta_giris = input("E-posta adreslerinin bulunduğu dosyanın adını giriniz: ")
dosya_oku = open(e_posta_giris, "r")
dosya_yaz = open("e-posta_yaz.txt","w")
metin = dosya_oku.readline()
eposta =""
for i in range (0, len(metin)):
if metin[i] == "<":
for j in range(i+1, len(metin)):
if metin[j] == ">":
break
else:
eposta += metin[j]
eposta += ", "
print(eposta)
dosya_yaz.write(eposta)
dosya_oku.close()
dosya_yaz.close()
| true |
0cb58e905976e95169d840a0339eed396d318070 | Python | noobgrow/pointing_game | /compute_score.py | UTF-8 | 3,992 | 2.65625 | 3 | [
"MIT"
] | permissive | import numpy as np
def compute_metric(records, metric='pointing', idx=None):
N, C = records.shape
if idx is None:
example_idx, class_idx = np.where(records != 0)
else:
idx = idx[:len(records), :]
example_idx, class_idx = np.where(idx)
if metric == 'pointing':
hits = np.zeros(C)
misses = np.zeros(C)
elif metric == 'average_precision':
sum_precs = np.zeros(C)
num_examples = np.zeros(C)
else:
assert(False)
count = 0
for i in range(len(example_idx)):
j = example_idx[i]
c = class_idx[i]
rec = records[j, c]
if metric == 'pointing':
if rec == 1:
hits[c] += 1
elif rec == -1:
misses[c] += 1
else:
count += 1
elif metric == 'average_precision':
sum_precs[c] += rec
num_examples[c] += 1
else:
assert(False)
print(count)
if metric == 'pointing':
acc = hits / (hits + misses)
avg_acc = np.mean(acc)
print('Avg Acc: %.4f' % avg_acc)
for c in range(len(acc)):
print(acc[c])
return acc, avg_acc
elif metric == 'average_precision':
class_mean_avg_prec = sum_precs / num_examples
mean_avg_prec = np.mean(class_mean_avg_prec)
print('Mean Avg Prec: %.4f' % mean_avg_prec)
for c in range(len(class_mean_avg_prec)):
print(class_mean_avg_prec[c])
return class_mean_avg_prec, mean_avg_prec
else:
assert(False)
def compute_metrics(out_path, metric='pointing', dataset='voc_2007'):
records = np.loadtxt(out_path)
print(f'Computing metrics from {out_path}')
print(f'Overall Performance on {dataset}')
compute_metric(records, metric=metric)
hard_idx = np.loadtxt(f'data/hard_{dataset}.txt', delimiter=',')
if 'coco' in dataset:
print('Resorting COCO hard indices.')
pointing_paths = np.loadtxt('data/coco_pointing_game_file_list.txt', delimiter='\n', dtype=str)
pytorch_paths = np.loadtxt('data/coco_val2014_pytorch_filelist.txt', delimiter='\n', dtype=str)
d = {path: i for i, path in enumerate(pytorch_paths)}
new_hard_idx = np.zeros_like(records)
for i in range(len(hard_idx)):
if d[pointing_paths[i]] < len(new_hard_idx):
try:
new_hard_idx[d[pointing_paths[i]]] = hard_idx[i]
except:
import pdb; pdb.set_trace();
hard_idx = new_hard_idx
#reverse_sorted_idx = np.loadtxt('data/coco_val2014_reverse_idx.txt', dtype=int, delimiter='\n')
#import pdb; pdb.set_trace()
#hard_idx = hard_idx[reverse_sorted_idx]
if records.shape != hard_idx.shape:
print('Different shapes between records %s and hard idx %s.' % (records.shape,
hard_idx.shape))
print(f'Difficult Performance on {dataset}')
compute_metric(records, metric=metric, idx=hard_idx)
if __name__ == '__main__':
import argparse
import sys
import traceback
try:
parser = argparse.ArgumentParser(description='Learn perturbation mask')
parser.add_argument('--out_path', type=str, default=None)
parser.add_argument('--dataset',
choices=['voc_2007', 'coco_2014', 'coco_2017'],
default='voc_2007',
help='name of dataset')
parser.add_argument('--metric',
type=str,
choices=['pointing', 'average_precision'],
default='pointing')
args = parser.parse_args()
except:
traceback.print_exc(file=sys.stdout)
sys.exit(1)
compute_metrics(out_path=args.out_path,
dataset=args.dataset,
metric=args.metric)
| true |
15d0f0b19dba80da041e5d08aa2170ebd77c7487 | Python | dhockaday/ismir2018 | /scattering_autoencoder/utils/utils_torch.py | UTF-8 | 2,471 | 2.53125 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
def apply_func_at_some_coords(v, func):
m = v.size(1)
if m > 1:
return torch.cat(
(v.narrow(1, 0, 1), func(v.narrow(1, 1, m - 1))), dim=1)
else:
return v
def pad1D(x, pad_left, pad_right, mode='constant', value=0):
return F.pad(x.unsqueeze(2),
(pad_left, pad_right, 0, 0),
mode=mode, value=value).squeeze(2)
class WeightedMSELoss(nn.Module):
def __init__(self, weight=None):
super(WeightedMSELoss, self).__init__()
self.register_buffer('weight', weight)
def forward(self, input, target):
if self.weight is None:
return F.mse_loss(input, target)
else:
return F.mse_loss(input * Variable(self.weight),
target * Variable(self.weight))
class ModulusStable(Function):
@staticmethod
def forward(ctx, input, p=2, dim=-1, keepdim=False):
ctx.p = p
ctx.dim = dim
ctx.keepdim = False if keepdim is None else keepdim
if dim is None:
norm = input.norm(p)
output = input.new((norm,))
else:
if keepdim is not None:
output = input.norm(p, dim, keepdim=keepdim)
else:
output = input.norm(p, dim)
ctx.save_for_backward(input, output)
return output
@staticmethod
def backward(ctx, grad_output):
input, output = ctx.saved_variables
if ctx.dim is not None and ctx.keepdim is False and input.dim() != 1:
grad_output = grad_output.unsqueeze(ctx.dim)
output = output.unsqueeze(ctx.dim)
if ctx.p == 2:
grad_input = input.mul(grad_output).div(output)
else:
input_pow = input.abs().pow(ctx.p - 2)
output_pow = output.pow(ctx.p - 1)
grad_input = input.mul(input_pow).mul(grad_output).div(output_pow)
# Special case at 0 where we return a subgradient containing 0
grad_input.masked_fill_(output == 0, 0)
return grad_input, None, None, None
def dictionary_to_tensor(h):
# compute the size of the array
any_key = list(h.keys())[0]
out = torch.zeros(
(len(h.keys()),) + h[any_key].size()).type(type(h[any_key]))
for i, k in enumerate(sorted(list(h.keys()))):
out[i] = h[k]
return out
| true |
820550332119c39b63ad67cf21ca7b845821c1b6 | Python | alexandre-mbm/bancadaruralista | /scripts/normaliza-nomes-parlamentares.py | UTF-8 | 721 | 2.9375 | 3 | [] | no_license | from compare import *
# Script que compara os nomes dos candidatos e gera uma lista csv para correções
# Utiliza o compare.py que precisa da difflib e do unidecode
# pip install difflib
# pip install unidecode
# Cria um objeto Matcher - específique o arquivo que vai servir de base para comparações e o campo para comparar
m = Matcher("senadocut.csv", "NOME_CANDIDATO")
votatoon = open('senado-votacoes.csv', "r")
votacoes = csv.DictReader(votatoon)
test_list = []
for v in votacoes:
test_list.append(v['NOME'])
# A função test e a função csv_comparisson recebem uma lista como paramêtro de entrada.
# Use o print_result para ver o resultado das comparações.
m.test(test_list, print_result=True)
| true |
7fb61851327e315401d18de12794d742320a0af2 | Python | jjones203/DecisionTrees | /dec_tree.py | UTF-8 | 16,398 | 3.234375 | 3 | [] | no_license | # Jessica Jones
# CS 429
# Project 1
import csv
import math
import numpy
import node
import mushroom
# info about mushroom dataset
target_attrib = mushroom.target_attrib
positive = mushroom.positive
negative = mushroom.negative
null_class = mushroom.unknown_class
unknown_val = mushroom.unknown_val
fields = mushroom.attributes
attribute_values = mushroom.attribute_values
# info for table of chi-squared critical values
chi_sq_path = './chi_squared_table.txt'
chi_sq_vals = ['dof', '50', '95', '99']
chi_squared_table = []
# print tree for debugging
def print_tree(root, level):
if root.parent:
parent_name = str(root.parent.split_attrib)
else:
parent_name = ''
if root.label:
print("#",root.count," Level-",level," Label-",root.label," Parent-",parent_name," ",str(root.attributes))
else:
print("#",root.count," Level-",level," Split attrib-",root.split_attrib," Parent-",parent_name," ",str(root.attributes))
for child in root.children:
print_tree(child, level+1)
# get depth of tree
def get_max_depth(root, level):
if root.label:
return level
else:
max_depth = 0
for child in root.children:
depth = get_max_depth(child, level+1)
if depth > max_depth:
max_depth = depth
return max_depth
# get number nodes in tree
def get_num_nodes(root):
if root == None:
return 0
else:
count = 1
for child in root.children:
count += get_num_nodes(child)
return count
# when classifying instance with missing value for an attribute,
# returns token value at leaf; method from Quinlan 1986
def get_leaf_token(node, instance, token):
# return token value after reaching leaf
if node.label:
if node.label == positive:
return token
else:
return -token
# if not at leaf, find attribute node splits on. Multiply token
# by percent of training examples that share instance's attribute value.
# Then recurse.
else:
split_attrib = node.split_attrib
instance_value = instance[split_attrib]
percent_match = len(search_examples(split_attrib, instance_value, node.examples))/len(node.examples)
token *= percent_match
child_match = [child for child in node.children if child.attributes[split_attrib] == instance_value][0]
return get_leaf_token(child_match, instance, token)
# takes list of instances, classifies each and
# returns list of classifications
def classify_instance_list(root, target_attrib, instance_list):
class_list = []
for instance in instance_list:
classification = classify_instance(root, instance)
class_list.append(classification)
return class_list
# classifies an instance using decision tree
def classify_instance(root, instance):
# if root has label, return label
if root.label:
return root.label
# else get root's split attribute
else:
split_attrib = root.split_attrib
# if value of attribute unknown, call other function to get probable class
# assume an instance does not have unknown values for > 1 attribute
if instance[split_attrib] == unknown_val:
return classify_unknown_attrib(root,instance)
# check which of root's children's value of split attribute matches instance's value
for child in root.children:
if child.attributes[split_attrib] == instance[split_attrib]:
# recurse
return classify_instance(child, instance)
# for instances with an unknown attribute value,
# return probable class of instance by passing token value through tree
# method from Quinlan 1986
def classify_unknown_attrib(root, instance):
# pos_token and neg_token sum values returned from leaves
pos_token = 0
neg_token = 0
num_root = len(root.examples)
# pass token to each child proportional to the number of
# training examples at root that went to each child
for child in root.children:
num_children = len(child.examples)
leaf_val = get_leaf_token(child, instance, num_children/num_root)
# if token value is positive, returned from leaf labeled +
if leaf_val > 0:
pos_token += leaf_val
else:
neg_token -= leaf_val
if pos_token > neg_token:
return positive
else:
return negative
# return list of examples for whom attrib == value
def search_examples(attrib, value, examples):
matches = [ex for ex in examples if ex[attrib] == value]
#print (len(matches))
return matches
# get the most common class from set of examples
def get_most_common_value(target_attrib, examples):
num_examples = len(examples)
num_positive = len(search_examples(target_attrib, positive, examples))
if num_positive > (num_examples - num_positive):
return positive
else:
return negative
# calculates classification error
def get_classification_error(prob_positive, prob_negative):
classification_error = 1 - max(prob_positive, prob_negative)
return classification_error
# calculates entropy
def get_entropy(prob_positive, prob_negative):
entropy = 0
if prob_positive:
entropy += - prob_positive * math.log(prob_positive,2)
if prob_negative:
entropy += - prob_negative * math.log(prob_negative,2)
return entropy
# calculate resulting impurity for splitting on given attribute
def eval_attrib(examples, target_attrib, attrib_to_eval, criterion):
num_examples = len(examples)
impurity = 0
unknowns = None
# if value of target_attrib is '?' for any example
if (search_examples(attrib_to_eval, unknown_val, examples)):
# get number of '?' in positive and negative classes
unknowns = search_examples(attrib_to_eval, unknown_val, examples)
num_unknowns = len(unknowns)
num_pos_unknowns = len(search_examples(target_attrib, positive, unknowns))
num_neg_unknowns = len(search_examples(target_attrib, negative, unknowns))
# for each valid value of target_attrib
# get number of positive & negative instance, adjusting for
# unknowns if necessary
for val in attribute_values[attrib_to_eval]:
examples_val = search_examples(attrib_to_eval, val, examples)
if unknowns:
raw_num_pos = len(search_examples(target_attrib, positive, examples_val))
raw_num_neg = len(search_examples(target_attrib, negative, examples_val))
adj_ratio = (raw_num_pos+raw_num_neg)/(num_examples-num_unknowns)
# from Quinlan: pi = pi + pu * (pi + ni)/(sum(pi + ni))
num_positive = raw_num_pos + num_pos_unknowns * adj_ratio
num_negative = raw_num_neg + num_neg_unknowns * adj_ratio
else:
num_positive = len(search_examples(target_attrib, positive, examples_val))
num_negative = len(search_examples(target_attrib, negative, examples_val))
# calculate prob(+) and prob(-); need to avoid / by 0 error
if (num_positive+num_negative):
prob_positive = num_positive/(num_positive+num_negative)
else:
prob_positive = 0
prob_negative = 1 - prob_positive
# calculate entropy or classification error per criterion
if criterion == 'entropy':
impurity_val = get_entropy(prob_positive, prob_negative)
else:
impurity_val = get_classification_error(prob_positive, prob_negative)
# calculate weighted sum of entropy/classification
impurity += impurity_val * (num_positive+num_negative)/num_examples
# return weighted sum
return impurity
# decide which of available attributes to split on
def get_best_attrib(examples, target_attrib, attributes, criterion):
start_impurity = 0
num_positive = len(search_examples(target_attrib, str(positive), examples))
prob_positive = num_positive/len(examples)
if (criterion == 'entropy'):
start_impurity = get_entropy(prob_positive, 1-prob_positive)
else:
start_impurity = get_classification_error(prob_positive, 1-prob_positive)
best_attrib = None
best_gain = 0
for attrib in attributes:
impurity = eval_attrib(examples, target_attrib, attrib, criterion)
gain = start_impurity - impurity
if gain > best_gain:
best_gain = gain
best_attrib = attrib
return best_attrib
# get chi-squared value for attribute chosen for split;
# formula and variable names from:
# http://isites.harvard.edu/fs/docs/icb.topic539621.files/lec7.pdf
def get_chi_squared(examples, target_attrib, attrib_for_split):
# p & n are number of +/- examples prior to splitting on node
p = len(search_examples(target_attrib, positive, examples))
n = len(search_examples(target_attrib, negative, examples))
devX = 0
for x in attribute_values[attrib_for_split]:
# Dx is subset of examples with split_attrib == x
Dx = search_examples(attrib_for_split, x, examples)
if len(Dx) == 0:
continue
# p_hat & n_hat are number of +/- examples in Dx if
# Dx has same distribution as all examples in node
p_hat = p/(p+n) * len(Dx)
n_hat = n/(p+n) * len(Dx)
# px and nx are actual number of +/- examples in Dx
px = len(search_examples(target_attrib, positive, Dx))
nx = len(search_examples(target_attrib, negative, Dx))
# devX is deviation from absence of pattern
devX += ((px - p_hat) ** 2)/p_hat + ((nx - n_hat) ** 2)/n_hat
return devX
# calculate threshold value for chi-squared test
def get_threshold(attrib_for_split, confidence):
# get degrees of freedome
deg = len(attribute_values[attrib_for_split])-1
# get critical values for attribute's degrees of freedom
critical_vals = [row for row in chi_squared_table if row['dof'] == str(deg)][0]
return float(critical_vals[str(confidence)])
# helper method labels node with most common class of its examples
def label_most_common(node, target_attrib):
if (get_most_common_value(target_attrib, node.examples)) == positive:
node.label = positive
node.attributes[target_attrib] = positive
else:
node.label = negative
node.attributes[target_attrib] = negative
return node
# ID3 from Mitchell
def make_id3_tree(examples, target_attrib, attributes, parent, inherited_attributes, criterion, confidence):
root = node.Node(parent)
# Node.attributes represents conjunction of attribute tests which are true
# for training examples at node
root.attributes = inherited_attributes.copy()
if parent:
parent.children.append(root)
root.examples = examples[:]
# if all examples are +, return single-node tree Root with label +
if len(search_examples(target_attrib, positive, examples)) == len(examples):
root.label = positive
root.attributes[target_attrib] = positive
# if all examples are -, return single-node tree Root with label -
elif len(search_examples(target_attrib, negative, examples)) == len(examples):
root.label = negative
root.attributes[target_attrib] = negative
# if set of attributes to be tested is empty, return single-node
# tree Root with label == most common value of target_attrib in examples
elif not attributes:
root = label_most_common(root, target_attrib)
else:
# else
# find attribute A w/highest info gain/accuracy
attrib_for_split = get_best_attrib(examples, target_attrib, attributes, criterion)
# perform chi-squared test on attribute A if confidence level entered
if confidence:
chi_sq = get_chi_squared(examples, target_attrib, attrib_for_split)
threshold = get_threshold(attrib_for_split, confidence)
if chi_sq < threshold:
root = label_most_common(root, target_attrib)
# if root not yet labeled, either passed chi-square test or wasn't tested
if not root.label:
# set decision attribute for root == A
root.split_attrib = attrib_for_split
# for each possible value vi of A
for value in attribute_values[attrib_for_split]:
# add a branch below root corresponding to A == vi
# let examples_vi be subset of examples with A == vi
examples_val = search_examples(attrib_for_split, value, examples)
# if |examples_vi| == 0, add leaf node w/most common value of target_attrib in examples
if not examples_val and not root.children:
child = node.Node(parent)
root.children.append(child)
label = get_most_common_value(target_attrib,examples)
child.label = label
child.attributes = root.attributes.copy()
child.attributes[target_attrib] = label
# else add subtree make_id3_tree(examples_vi, target_attrib, (attributes-A), criterion, root)
else:
attributes_child = attributes[:]
attributes_child.remove(attrib_for_split)
attribute_values_child = root.attributes.copy()
attribute_values_child[attrib_for_split] = value
make_id3_tree(examples_val, target_attrib, attributes_child, root, attribute_values_child, criterion, confidence)
return root
# populate lists of dictionaries from csv files
def get_file_data(filename, filefields, data):
with open(filename) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
data.append(dict(zip(filefields, row)))
csvfile.close()
# output list of classes to csv
def output_csv(filepath, class_list):
filename = filepath +'output.txt'
with open(filename, 'w') as csvfile:
writer = csv.writer(csvfile)
for item in class_list:
writer.writerow([item,])
csvfile.close()
# generate confusion matrix for instances w/known classes
def get_confusion_mtx(target_attrib, instances, predictions):
mtx = numpy.zeros((2,2))
num_instances = len(instances)
if num_instances != len(predictions):
print ('Unequal number of instances and predictions')
else:
for i in range(0,len(instances)):
# find if true pos/neg, false pos/neg
gt = instances[i][target_attrib]
pred = predictions[i]
# increment confusion mtx
if gt == pred:
if gt == positive:
mtx[0][0] += 1
else:
mtx[1][1] += 1
else:
if gt == positive:
mtx[0][1] += 1
else:
mtx[1][0] += 1
return mtx
def get_tree(training_data, confidence, entropy):
# attributes = attributes available to split on
attributes = fields[:]
attributes.remove(target_attrib)
if entropy:
criterion = 'entropy'
else:
criterion = 'class_error'
tree_root = make_id3_tree(training_data, target_attrib, attributes, None, {}, criterion, int(confidence))
return tree_root
def test_tree(tree_root, results_table, column, test_data):
predictions = classify_instance_list(tree_root, target_attrib, test_data)
conf_mtx = get_confusion_mtx(target_attrib, test_data, predictions)
print (conf_mtx.astype(int))
accuracy = numpy.sum(conf_mtx.diagonal())/numpy.sum(conf_mtx)
depth = get_max_depth(tree_root, 0)
num_nodes = get_num_nodes(tree_root)
results_table[1][column] = str(int(accuracy*100))+'%'
results_table[2][column] = str(num_nodes)
results_table[3][column] = str(depth)
def do_id3(train_file, test_file, validation_file, output_path):
training_data = []
get_file_data(train_file, fields, training_data)
get_file_data(chi_sq_path, chi_sq_vals, chi_squared_table)
test_data = []
get_file_data(test_file, fields, test_data)
validation_data = []
get_file_data(validation_file, fields, validation_data)
ce_results = [['0' for i in range(5)] for i in range(4)]
ent_results = [['0' for i in range(5)] for i in range(4)]
tab_labels = ['Confidence Level', 'Accuracy\t', 'Number Nodes\t', 'Tree Depth\t']
for i in range(4):
ce_results[i][0] = tab_labels[i]
ent_results[i][0] = tab_labels[i]
for i in range(1,4):
ce_results[0][i+1] = str(chi_sq_vals[i])
ent_results[0][i+1] = str(chi_sq_vals[i])
print ('Using Classification Error as Criterion:')
for i in range(1,5):
cl = ce_results[0][i]
print ('Confidence Level ',cl,'%')
tree_root = get_tree(training_data, cl, False)
test_tree(tree_root, ce_results, i, test_data)
print ('\nUsing Entropy as Criterion:')
for i in range(1,5):
cl = ent_results[0][i]
print ('Confidence Level ',cl,'%')
tree_root = get_tree(training_data, cl, True)
test_tree(tree_root, ent_results, i, test_data)
print('\n')
print ('Criterion = Classification Error')
for row in ce_results:
print ('\t'.join(row))
print ('\nCriterion = Entropy')
for row in ent_results:
print ('\t'.join(row))
# handle validation data
val_tree = get_tree(training_data, 0, True)
predictions = classify_instance_list(val_tree, target_attrib, validation_data)
if output_path:
output_csv(output_path, predictions)
| true |
3e083dba3e59492fb7341d53c62107d86bebcf88 | Python | Somg10/PythonBasic | /M1 Repetitive Printing - Python.py | UTF-8 | 414 | 3.984375 | 4 | [] | no_license | # Function to print given string 'x' times
def print_fun(string, x):
# Your code here
print(string*x)
#{
#Driver Code Starts.
# Driver Code
def main():
testcases = int(input())
# Loop for testcases
while(testcases > 0):
string = input()
x = int(input())
print_fun(string, x)
testcases -= 1
if __name__ == '__main__':
main()
#} Driver Code Ends
| true |
b041d4aa59d32b038af5e130b980c6508ffbbdd4 | Python | shoubhikraj/geodesic-interpolate | /geodesic_interpolate/__main__.py | UTF-8 | 4,585 | 3.203125 | 3 | [] | no_license | """Performing geodesic interpolation or smoothing.
Optimize reaction path using geometric information by minimizing path length with metrics defined by
redundant internal coordinates. Avoids the discontinuity and convergence problems of conventional
interpolation methods by incorporating internal coordinate structure while operating in Cartesian,
avoiding unfeasibility.
Xiaolei Zhu et al, Martinez Group, Stanford University
"""
import logging
import argparse
import numpy as np
from .fileio import read_xyz, write_xyz
from .interpolation import redistribute
from .geodesic import Geodesic
logger = logging.getLogger(__name__)
def main():
"""Main entry point of the geodesic interpolation package.
Parse command line arguments then activate the interpolators and smoothers."""
ps = argparse.ArgumentParser(description="Interpolates between two geometries",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
ps.add_argument("filename", type=str, help="XYZ file containing geometries. If the number of images "
"is smaller than the desired number, interpolation points will be added. If the "
"number is greater, subsampling will be performed.")
ps.add_argument("--nimages", type=int, default=17, help="Number of images.")
ps.add_argument("--sweep", action="store_true", help="Sweep across the path optimizing one image at "
"a time, instead of moving all images at the same time. Default is to perform sweeping "
"updates if there are more than 30 atoms.")
ps.add_argument("--no-sweep", dest='sweep', action="store_false", help="Do not perform sweeping.")
ps.set_defaults(sweep=None)
ps.add_argument("--output", default="interpolated.xyz", type=str, help="Output filename. "
"Default is interp.xyz")
ps.add_argument("--tol", default=2e-3, type=float, help="Convergence tolerance")
ps.add_argument("--maxiter", default=15, type=int, help="Maximum number of minimization iterations")
ps.add_argument("--microiter", default=20, type=int, help="Maximum number of micro iterations for "
"sweeping algorithm.")
ps.add_argument("--scaling", default=1.7, type=float, help="Exponential parameter for morse potential")
ps.add_argument("--friction", default=1e-2, type=float, help="Size of friction term used to prevent "
"very large change of geometry.")
ps.add_argument("--dist-cutoff", dest='dist_cutoff', default=3, type=float, help="Cut-off value for the "
"distance between a pair of atoms to be included in the coordinate system.")
ps.add_argument("--logging", default="INFO", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
help="Logging level to adopt [ DEBUG | INFO | WARNING | ERROR ]")
ps.add_argument("--save-raw", dest='save_raw', default=None, type=str, help="When specified, save the "
"raw path after bisections be before smoothing.")
args = ps.parse_args()
# Setup logging based on designated logging level
logging.basicConfig(format="[%(module)-12s]%(message)s", level=args.logging)
# Read the initial geometries.
symbols, X = read_xyz(args.filename)
logger.info('Loaded %d geometries from %s', len(X), args.filename)
if len(X) < 2:
raise ValueError("Need at least two initial geometries.")
# First redistribute number of images. Perform interpolation if too few and subsampling if too many
# images are given
raw = redistribute(symbols, X, args.nimages, tol=args.tol * 5)
if args.save_raw is not None:
write_xyz(args.save_raw, symbols, raw)
# Perform smoothing by minimizing distance in Cartesian coordinates with redundant internal metric
# to find the appropriate geodesic curve on the hyperspace.
smoother = Geodesic(symbols, raw, args.scaling, threshold=args.dist_cutoff, friction=args.friction)
if args.sweep is None:
args.sweep = len(symbols) > 35
try:
if args.sweep:
smoother.sweep(tol=args.tol, max_iter=args.maxiter, micro_iter=args.microiter)
else:
smoother.smooth(tol=args.tol, max_iter=args.maxiter)
finally:
# Save the smoothed path to output file. try block is to ensure output is saved if one ^C the
# process, or there is an error
logging.info('Saving final path to file %s', args.output)
write_xyz(args.output, symbols, smoother.path)
if __name__ == "__main__":
main()
| true |
e42805b9ffe0e5918191b657c054b0125baed9c5 | Python | RezoApio/WDUTechies | /python/compass.py | UTF-8 | 2,729 | 3.5625 | 4 | [
"MIT"
] | permissive | __DEBUG__ = False
def log(text: str):
if __DEBUG__:
print(text)
class Point:
def __str__(self):
return "Point ({},{})".format(self.x, self.y)
def __init__(self, a, b):
self.x = a
self.y = b
def move(self,dx:int,dy:int):
return Point(self.x + dx, self.y + dy)
def int_dist(a: Point, b: Point) -> int:
if a.x == b.x or a.y == b.y :
dist = abs(a.x - b.x) + abs(a.y - b.y)
log(dist)
return dist
#Horiz or Vert distance
else:
#go 1 step in diag and return 1 + int_dist
if a.x > b.x:
dx = -1
else:
dx = 1
if a.y > b.y:
dy = -1
else:
dy = 1
log("dx:="+str(dx))
log("dy:="+str(dy))
c = a.move(dx,dy) #cannot move a to prevent wrong values for next distance
log(c)
log("1+dist("+str(c)+","+str(b)+")")
return 1 + int_dist(c,b)
def navigation(seaside):
#replace this for solution
log(seaside)
log(len(seaside))
for rows in range(len(seaside)):
row=seaside[rows]
for cols in range(len(row)):
if row[cols] == 'Y':
Y=Point(rows,cols)
elif row[cols] == 'C':
C=Point(rows,cols)
elif row[cols] == 'M':
M=Point(rows,cols)
elif row[cols] == 'S':
S=Point(rows,cols)
log("Y:"+str(Y))
log("M:"+str(M))
log("S:"+str(S))
log("C:"+str(C))
log("calcul ds"); ds = int_dist(Y,S); log("ds:="+str(ds))
log("calcul dm"); dm = int_dist(Y,M); log("dm:="+str(dm))
log("calcul dc"); dc = int_dist(Y,C); log("dc:="+str(dc))
return dm+dc+ds
if __name__ == '__main__':
print("Example:")
print(navigation([['Y', 0, 0, 0, 'C'],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
['M', 0, 0, 0, 'S']]))
#These "asserts" using only for self-checking and not necessary for auto-testing
assert navigation([['Y', 0, 0, 0, 'C'],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
['M', 0, 0, 0, 'S']]) == 11
assert navigation([[ 0, 0, 'C'],
[ 0, 'S', 0],
['M','Y', 0]]) == 4
assert navigation([[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 'M', 0, 'S', 0],
[ 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 'C', 0, 0, 0],
[ 0, 'Y',0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0]]) == 9
print("Coding complete? Click 'Check' to earn cool rewards!")
| true |
3919aeb85d43f34c930893777589f4a122d74f2d | Python | BALPRES/BALPRES_BE | /website/models.py | UTF-8 | 2,661 | 2.59375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from django.db import models
# Create your models here.
class OurCompanyContent( models.Model ) :
title = models.CharField( max_length = 500, default = "", null = True )
content = models.CharField( max_length = 1000, default = "", null = True )
img_url_1 = models.CharField( max_length = 500, default = "http://i.imgur.com/0i6g2kx.png" )
img_url_2 = models.CharField( max_length = 500, default = "http://i.imgur.com/0i6g2kx.png" )
def set_default_models( self ) :
OurCompanyContent( title = "Our Company has created 1928..." ).save()
def __str__( self ) :
return self.title
class OurServicesContent( models.Model ) :
icon = models.CharField( max_length = 50, default = "", null = True )
title = models.CharField( max_length = 500, default = "", null = True )
content = models.CharField( max_length = 1000, default = "", null = True )
def set_default_models( self ) :
OurServicesContent( icon = "fa fa-cog", title = "Easy to Customize" ).save()
OurServicesContent( icon = "fa fa-dropbox", title = "Ready to Use" ).save()
OurServicesContent( icon = "fa fa-desktop", title = "Responsive Layout" ).save()
def __str__( self ) :
return self.title
class Recomendations( models.Model ) :
content = models.CharField( max_length = 1000, default = "", null = True )
source = models.CharField( max_length = 500, default = "", null = True )
def set_default_models( self ) :
Recomendations( source = "Fuente 1" ).save()
Recomendations( source = "Fuente 2" ).save()
def __str__( self ) :
return self.source
class OurPersonalContent( models.Model ) :
content = models.CharField( max_length = 1000, default = "", null = True )
img_url = models.CharField( max_length = 500, default = "http://i.imgur.com/0i6g2kx.png" )
def set_default_models( self ) :
OurPersonalContent().save()
def __str__( self ) :
return self.img_url
class OurProductsContent( models.Model ) :
price_range = models.CharField( max_length = 100, default = "", null = True )
product_name = models.CharField( max_length = 100, default = "", null = True )
content = models.CharField( max_length = 1000, default = "", null = True )
def set_default_models( self ) :
OurProductsContent( product_name = "Cabañas" ).save()
OurProductsContent( product_name = "Albercas" ).save()
OurProductsContent( product_name = "Áreas Recreativas" ).save()
def __str__( self ) :
return self.product_name | true |
c2c25882cb51fe02d88d73d0a9af6ad51748291a | Python | mkoron/virtual-tea-party | /room.py | UTF-8 | 2,542 | 3.203125 | 3 | [] | no_license | """
Represents normal chat rooms and other states.
"""
import handler
from exceptions import EndSession
class Room(handler.CommandHandler):
"""
A generic environment that may contain one or more users.
It takes care of basic command handling and broadcasting.
"""
def __init__(self, server):
self.server = server
self.sessions = []
def add(self, session):
self.sessions.append(session)
def remove(self, session):
self.sessions.remove(session)
def broadcast(self, line):
for session in self.sessions:
session.push(line)
def do_logout(self, session, line):
raise EndSession
class LoginRoom(Room):
"""
A room for a single person who has just connected.
"""
def add(self, session):
Room.add(self, session)
self.broadcast('Welcome to {}\r\n'.format(self.server.name).encode())
def unknown(self, session, cmd):
session.push(b'Please log in\nUse "login <nick>"\r\n')
def do_login(self, session, line):
name = line.strip()
if not name:
session.push(b'Please enter a name\r\n')
elif name in self.server.users:
session.push(b'The name "{}" is taken.\r\n'.format(name).encode())
session.push(b'Please try again.\r\n')
else:
session.name = name
session.enter(self.server.main_room)
class ChatRoom(Room):
"""
A room for multiple users who can chat with each others.
"""
def add(self, session):
self.broadcast(session.name + b' has entered the room.\r\n')
self.server.users[session.name] = session
Room.add(self, session)
def remove(self, session):
Room.remove(self, session)
self.broadcast(session.name.encode() + b' has left the room.\r\n')
def do_say(self, session, line):
self.broadcast(session.name + b': ' + line + b'\r\n')
def do_look(self, session, line):
session.push(b'The following are in this room:\r\n')
for other in self.sessions:
session.push(other.name + b'\r\n')
def do_who(self, session, line):
session.push(b'The following are logged in:\r\n')
for name in self.server.users:
session.push(name + b'\r\n')
class LogoutRoom(Room):
"""
The sole purpose is to remove the user's name from the server.
"""
def add(self, session):
try:
del self.server.users[session.name]
except KeyError:
pass
| true |
583baf3b9d10edd19a5d8ed95da5e0ab91aa0a8d | Python | biznixcn/algorithm_quiz | /3.py | UTF-8 | 559 | 3.9375 | 4 | [] | no_license | #!/usr/bin/python
#-*-coding:utf-8-*-
"""
Given two sorted integer arrays, write an algorithm to get back the intersection.
"""
array1 = [2,5,8,23,56,89,125,169,196]
array2 = [5,8,9,34,78,123,125]
flag1 = 0
flag2 = 0
result = []
while flag1 != len(array1) and flag2 != len(array2):
if array1[flag1] > array2[flag2]:
flag2+=1
elif array1[flag1] < array2[flag2]:
flag1+=1
else:
result.append(array1[flag1])
flag1+=1
flag2+=1
print "array1:",array1
print "array2:",array2
print "result:",result
| true |
e41cd58e0829f047f9850b56f0ecf654407a56a8 | Python | egeorgiev699/exercicis-classroom | /Exercici5.py | UTF-8 | 337 | 3.421875 | 3 | [] | no_license | comida = input ("cuanto te ha costado la comida")
IVA = (float(comida) * 0.21)
propina = (float(comida) * 0.1)
total = (float(comida) + (float(propina)) + (float(IVA)))
print ("precio comida = " + str(float( comida)))
print ("IVA = " + str(float(IVA)))
print ("propina = " + str(float(propina)))
print ("total = " + str(float(total)))
| true |
3c489bcfff2c24a6a893e30aaa10597bbc50c76d | Python | dsiegler2000/Coeus | /src/communication.py | UTF-8 | 27,082 | 2.703125 | 3 | [] | no_license | """
All code to communicate with an interface. Also includes main driver and logging setup currently.
"""
import datetime
import logging
import os
import sys
import threading
import traceback
from typing import List, Optional, Tuple
import chess
import chess.polyglot
import chess.pgn
from engine import CoeusEngine, EngineGoParams
logger = logging.getLogger(os.path.basename(__file__))
class UCIHandler:
"""
Implements a handler for the UCI protocol with threading to independently handle input and computation.
Follows the protocol outlined at http://wbec-ridderkerk.nl/html/UCIProtocol.html.
"""
def __init__(self, engine: CoeusEngine):
self.engine = engine
self.debug = False
def start(self):
logger.debug(f"Starting {self.__class__.__name__}")
self._listen()
def cleanup(self):
"""
Cleanup method when the engine needs to quit.
:return: None
"""
pass # For now, do nothing
def _identify(self):
"""
Identifies the engine in response to a "uci" command
:return: None
"""
UCIHandler.output(f"id name {self.engine.name}")
UCIHandler.output(f"id author Dylan Siegler")
UCIHandler.output(f"uciok")
# No options can be set so no option command is sent
def _go(self, split):
"""
Handles to "go" command.
:param split: The full split argument string (including the go)
:return: None
"""
# Go from the back picking off arguments, keep flags for the go mode/type
use_time_control = False
use_fixed_time = False
use_depth = False
use_nodes = False
infinite = False
params = EngineGoParams()
for i, s in reversed(list(enumerate(split))):
if s == "searchmoves": # Search only the following moves (e.g. consider only e2e4 and d2d4)
UCIHandler.output("info string searchmoves currently not supported!")
elif s == "ponder": # "Ponder" (search during opponent's move)
UCIHandler.output(f"info string ponder currently not supported!")
elif s == "wtime":
params.wtime = int(split[i + 1])
use_time_control = True
elif s == "btime":
params.btime = int(split[i + 1])
use_time_control = True
elif s == "winc":
params.winc = int(split[i + 1])
use_time_control = True
elif s == "binc":
params.binc = int(split[i + 1])
use_time_control = True
elif s == "movestogo":
params.moves_to_go = int(split[i + 1])
use_time_control = True
elif s == "depth":
params.target_depth = int(split[i + 1])
use_depth = True
elif s == "nodes":
params.target_nodes = int(split[i + 1])
use_nodes = True
elif s == "mate":
UCIHandler.output(f"info string mate currently not supported!")
elif s == "movetime":
self.engine.fixed_time = int(split[i + 1])
use_fixed_time = True
elif s == "infinite":
infinite = True
valid_mode = params.set_mode(use_time_control, use_fixed_time, use_depth, use_nodes, infinite)
if not valid_mode:
UCIHandler.output(f"info string unsupported go command!")
def on_engine_completed(pv_line: List[chess.Move]):
if len(pv_line) == 1:
UCIHandler.output(f"bestmove {pv_line[0].uci()}")
elif len(pv_line) > 1:
UCIHandler.output(f"bestmove {pv_line[0].uci()} ponder {pv_line[1].uci()}")
t = threading.Thread(target=self.engine.go, args=[params], kwargs={"on_completed": on_engine_completed},
daemon=False)
t.start()
def _listen(self):
"""
Listens for all commands and properly handles them.
:return: None
"""
counter = 0
while True:
try:
line = input()
logger.debug(f"UCI(received):{line}")
counter += 1
split = line.split(" ")
if line == "uci": # Engine must identify itself
self._identify()
elif line.startswith("debug"): # Set debug mode
self.debug = split[1] == "on"
elif line == "isready": # Synchronization command, for now simply echo the readyok response
UCIHandler.output(f"readyok")
elif line.startswith("setoption"): # No options are currently supported
UCIHandler.output(f"info string {split[1]} is not a currently supported option!")
elif line.startswith("register"): # No registration needed
UCIHandler.output(f"info string this engine doesn't need registration!")
elif line == "ucinewgame": # No specific handling for new game
self.engine.new_game()
UCIHandler.output(f"info string ready for new game")
elif line.startswith("position"): # Handle the position parsing
if split[1] == "fen":
self.engine.set_position(split[9:], starting_fen=" ".join(split[2:8]))
elif split[1] == "startpos":
moves = split[3:] # In long algebraic notation
self.engine.set_position(moves)
elif line.startswith("go"):
self._go(split)
elif line == "stop":
self.engine.stop()
elif line.startswith("ponderhit"):
UCIHandler.output(f"info string ponderhit is currently not supported!")
elif line == "quit":
self.cleanup()
break
except IndexError as e:
logger.warning(f"UCIHandler encountered an error")
tb = traceback.format_exc()
logger.warning(tb)
UCIHandler.output(f"info string encountered a UCI exception {e}!")
@staticmethod
def output(line):
print(line)
logger.debug(f"UCI(sent):{line}")
class ConsoleHandler:
"""
Implements a handler for playing in the console that uses the following simple commands:
- undo/t: takeback move
- pv: print PV line
- pfen: print position FEN
- fen: print FEN
- s/search: search to depth 6 and print the result
- q/quit: quit
- any move string: move and if it is black's turn the engine will recommend a move to make (note that to accept the
recommendation the user then has to type in that move and hit enter again)
"""
def __init__(self, engine: CoeusEngine, profile: bool = False):
self.engine = engine
self.profile = profile
self.profile_commands = ["s", "q"]
def start(self):
print_next_loop = True
post = True
force = False
ponder = False
use_opening_book = True
use_endgame_tablebase = True
ponder_thread = None
fixed_time = 30 * 1_000
depth = None
time_control: Tuple[Optional[int], Optional[int]] = (None, None) # t in x moves
i = 0
ConsoleHandler.output("Welcome to Coeus Chess Engine - Console Mode")
ConsoleHandler.output(f"Loaded engine from {self.engine.config_filepath}")
ConsoleHandler.output(f"Loaded searcher from {self.engine.searcher.config_filepath}")
ConsoleHandler.output(f"Loaded evaluator from {self.engine.evaluator.config_filepath}")
with chess.polyglot.open_reader(self.engine.searcher.opening_book_filepath) as reader:
ConsoleHandler.output(f"Loaded opening book from {self.engine.searcher.opening_book_filepath} "
f"with {len(reader)} entries")
ConsoleHandler.output("type `help` or `h` for help at any time")
while not self.engine.board.outcome():
if print_next_loop:
ConsoleHandler.output("Current board state:")
ConsoleHandler.output(self.engine.board)
ConsoleHandler.output(f"FEN: {self.engine.board.fen()}")
else:
print_next_loop = True
if self.profile:
if i > len(self.profile_commands):
break
else:
input_str = self.profile_commands[i]
else:
input_str = input("> ")
logger.debug(f"Console(received):{input_str}")
i += 1
split = input_str.split(" ")
input_str = input_str.lower()
if input_str == "help" or input_str == "h": # Help
ConsoleHandler.output("Commands:")
ConsoleHandler.output("quit/q - quit")
ConsoleHandler.output("undo/t - takeback the previous move")
ConsoleHandler.output("pv [depth] - print the current principal variation line up to depth (default 6)")
ConsoleHandler.output("pfen - print the current position FEN")
ConsoleHandler.output("fen - print the current FEN")
ConsoleHandler.output("openbook - print the entries in the current opening book for the board position")
ConsoleHandler.output("useopenbook/noopenbook - whether to use the opening book (default true)")
ConsoleHandler.output("useendgametablebase/noendgametablebase - whether to use the endgame tablebase\n"
" (default true)")
ConsoleHandler.output("pgn filename - save the current game to a PGN file")
ConsoleHandler.output("search/s - set the engine to think using the currently set parameters\n"
" (will NOT execute the move it found)")
ConsoleHandler.output("probe/p move - probe the line for the given move in the transposition table ")
ConsoleHandler.output("ponder/noponder - whether the engine will ponder, or think during the \n"
" opponent's time, recommended nopost if pondering is enabled,\n"
" and pondering will only happen if force is disabled\n"
" (default noponder)")
ConsoleHandler.output("stopponder - stop the engine from pondering if it currently is")
ConsoleHandler.output("post/nopost/noponderpost - whether to print or not print the debug information\n"
" of the engine, noponderpost means that pondering\n"
" won't print the debug information but regular\n"
" searching will")
ConsoleHandler.output("force/noforce - whether the engine will automatically think after player move,\n"
"noforce means that it will think, force means that it will (default noforce)")
ConsoleHandler.output("newgame - tell the engine that a new game has begun")
ConsoleHandler.output("setboard fen - sets the board to the specified FEN")
ConsoleHandler.output("depth d - sets the engine to search to the specified depth (time ignored)")
ConsoleHandler.output("time t - sets the engine to search to the specified time in seconds \n"
" (depth ignored), default is 30s")
ConsoleHandler.output("settimecontrol t in x [moves] - sets the time control for both the player and \n"
"the engine to t minutes in x moves")
ConsoleHandler.output("view - view the current engine parameters and console settings")
ConsoleHandler.output("")
ConsoleHandler.output("Enter moves using e7e8q format")
ConsoleHandler.output("Press return with an empty line to view the board")
ConsoleHandler.output("Note that the engine thinking blocks input")
print_next_loop = False
elif input_str == "undo" or input_str == "t": # Takeback
try:
self.engine.board.pop()
except IndexError:
ConsoleHandler.output("Cannot take back!", err=True)
elif split[0].lower() == "pv": # Print principal variation line
try:
depth = int(split[1]) if len(split) > 1 else 6
pv_line = [m.uci() for m in self.engine.board.generate_pv_line(depth)]
if len(pv_line) == 0:
ConsoleHandler.output("Engine has no PV line, may have accessed opening book, "
"not searched yet, or pondering is enabled!", err=True)
else:
ConsoleHandler.output("PV line: " + " ".join(pv_line))
except ValueError:
ConsoleHandler.output(f"Invalid depth {split[1]}", err=True)
print_next_loop = False
elif input_str == "pfen":
ConsoleHandler.output(self.engine.board.position_fen())
print_next_loop = False
elif input_str == "fen":
ConsoleHandler.output(self.engine.board.fen())
print_next_loop = False
elif input_str == "openbook":
with chess.polyglot.open_reader(self.engine.searcher.opening_book_filepath) as reader:
weight_sum = sum(e.weight for e in reader.find_all(self.engine.board))
for e in reader.find_all(self.engine.board):
pct = round(100 * e.weight / weight_sum, 2)
if pct > 1:
print(f"({pct: >5}%): {e.move.uci()} (weight {e.weight})")
print_next_loop = False
elif input_str == "useopenbook":
use_opening_book = True
ConsoleHandler.output("opening book enabled")
print_next_loop = False
elif input_str == "noopenbook":
use_opening_book = False
ConsoleHandler.output("opening book disabled")
print_next_loop = False
elif input_str == "useendgametablebase":
use_endgame_tablebase = True
ConsoleHandler.output("endgame tablebase enabled")
print_next_loop = False
elif input_str == "noendgametablebase":
use_endgame_tablebase = False
ConsoleHandler.output("endgame tablebase disabled")
print_next_loop = False
elif split[0].lower() == "pgn":
if len(split) == 2:
filepath = split[1] + ("" if split[1].endswith(".pgn") else ".pgn")
self._save_to_pgn(filepath)
else:
ConsoleHandler.output("Usage: pgn filepath!", err=True)
print_next_loop = False
elif input_str == "s" or input_str == "search": # Search
params = EngineGoParams()
params.fixed_time = fixed_time
params.target_depth = depth
params.set_mode(False, bool(fixed_time), bool(depth), False, False)
pv_line = self.engine.go(params)
ConsoleHandler.output("PV line: " + " ".join([m.uci() for m in pv_line]))
elif split[0] == "probe" or split[0] == "p":
try:
move = chess.Move.from_uci(split[1].lower())
if move in self.engine.board.legal_moves:
self.engine.board.push(move)
pv_line = [m.uci() for m in self.engine.board.generate_pv_line(depth=6)]
self.engine.board.pop()
if len(pv_line) == 0:
ConsoleHandler.output(f"Engine has no line for move {move.uci()}, may have accessed "
f"opening book, not searched that line yet, or pondering is enabled!",
err=True)
else:
ConsoleHandler.output(f"Line following move {move.uci()}: " + " ".join(pv_line))
else:
raise ValueError("Illegal move!")
print_next_loop = False
except (ValueError, IndexError, AttributeError):
ConsoleHandler.output("Invalid move, usage: probe move", err=True)
elif input_str == "q" or input_str == "quit":
break
elif input_str == "post": # Post output
self.engine.log_func = ConsoleHandler.output
post = True
ConsoleHandler.output("post enabled")
print_next_loop = False
elif input_str == "nopost": # Don't post output
self.engine.log_func = lambda s: None
post = False
ConsoleHandler.output("post disabled")
print_next_loop = False
elif input_str == "noponderpost":
post = "noponderpost"
self.engine.log_func = lambda s: None
ConsoleHandler.output("post set to noponderpost")
print_next_loop = False
elif input_str == "force": # Force inputs (engine WON'T move)
force = True
ConsoleHandler.output("force enabled")
print_next_loop = False
elif input_str == "noforce": # No forcing inputs (engine WILL move)
force = False
ConsoleHandler.output("force disabled")
print_next_loop = False
elif input_str == "ponder": # Ponder
ponder = True
post = "noponderpost"
ConsoleHandler.output("ponder enabled (post set to noponderpost)")
print_next_loop = False
elif input_str == "noponder": # Don't ponder
ponder = False
post = True
self.engine.log_func = ConsoleHandler.output
self.engine.stop()
ponder_thread = self._kill_ponder_thread(ponder_thread, post)
ConsoleHandler.output("ponder disabled (and post enabled)")
print_next_loop = False
elif input_str == "stopponder":
self.engine.stop()
if ponder_thread:
ponder_thread = self._kill_ponder_thread(ponder_thread, post)
ConsoleHandler.output("Stopped pondering")
else:
ConsoleHandler.output("The engine was not pondering!", err=True)
print_next_loop = False
elif input_str == "newgame": # Start new game
if ponder:
ConsoleHandler.output("ponder must be disabled to start newgame!", err=True)
else:
ConsoleHandler.output("New game created")
self.engine.new_game()
elif split[0].lower() == "setboard": # Set board
try:
if ponder:
ConsoleHandler.output("ponder must be disabled to set board!", err=True)
else:
if len(split) > 1:
fen = " ".join(split[1:])
self.engine.board.set_fen(fen)
else:
ConsoleHandler.output("Usage: setboard fen", err=True)
except ValueError:
ConsoleHandler.output(f"{split[1]} is not a valid FEN!", err=True)
elif split[0].lower() == "time": # Set time
try:
if len(split) > 1:
t = int(split[1])
fixed_time = t * 1_000
depth = None
time_control = (None, None)
ConsoleHandler.output(f"time set to {t}s")
else:
ConsoleHandler.output("Usage: time t", err=True)
except ValueError:
ConsoleHandler.output(f"{split[1]} is not a valid time!", err=True)
print_next_loop = False
elif split[0].lower() == "depth": # Set depth
try:
if len(split) > 1:
fixed_time = None
d = int(split[1])
depth = d
time_control = (None, None)
ConsoleHandler.output(f"depth set to {d}")
else:
ConsoleHandler.output("Usage: depth d", err=True)
except ValueError:
ConsoleHandler.output(f"{split[1]} is not a valid depth!", err=True)
print_next_loop = False
elif split[0].lower() == "settimecontrol": # settimecontrol t in x [moves]
try:
t = int(split[1])
x = int(split[3])
fixed_time = None
depth = None
time_control = (t, x)
except (ValueError, IndexError):
pass
elif input_str == "view": # View searching parameters
if fixed_time:
ConsoleHandler.output(f"Fixed time mode: {fixed_time // 1_000}s time limit")
if depth:
ConsoleHandler.output(f"Fixed depth mode: {depth} plies limit")
if post == "noponderpost":
post_str = "noponderpost"
else:
post_str = ("enabled" if post else "disabled")
ConsoleHandler.output(" post: " + post_str)
ConsoleHandler.output(" force: " + ("enabled" if force else "disabled"))
ConsoleHandler.output(" ponder: " + ("enabled" if ponder else "disabled"))
ConsoleHandler.output(" opening book: " + ("enabled" if use_opening_book else "disabled"))
ConsoleHandler.output("endgame tablebase: " + ("enabled" if use_endgame_tablebase else "disabled"))
print_next_loop = False
elif input_str != "":
try:
move = chess.Move.from_uci(input_str.replace(" ", "").replace("-", ""))
if self.engine.board.is_legal(move):
self.engine.stop()
if ponder_thread:
ponder_thread = self._kill_ponder_thread(ponder_thread, post)
if post == "noponderpost":
self.engine.log_func = ConsoleHandler.output
self.engine.board.push(move)
ConsoleHandler.output(f"Received move {move.uci()}")
if not force:
ConsoleHandler.output(f"Engine thinking using specified parameters...")
# Play the engine's recommended move
params = EngineGoParams()
params.fixed_time = fixed_time
params.target_depth = depth
params.use_opening_book = use_opening_book
params.use_endgame_tablebase = use_endgame_tablebase
params.set_mode(False, bool(fixed_time), bool(depth), False, False)
pv_line = self.engine.go(params)
if len(pv_line) > 0:
pv_move = pv_line[0]
self.engine.board.push(pv_move)
# Print the result as a colored string
ConsoleHandler.output(f"\033[92mCoeus moved {pv_move.uci()}\033[0m")
if ponder: # Pondering
if post == "noponderpost":
self.engine.log_func = lambda s: None
ponder_params = EngineGoParams()
params.set_mode(False, False, False, False, False)
ponder_thread = threading.Thread(target=self.engine.go, args=[ponder_params],
kwargs={"ponder": True}, daemon=False)
ponder_thread.start()
else: # Invalid move
raise ValueError("Non-legal move!")
except ValueError:
ConsoleHandler.output(f"{input_str}: Invalid move or unknown command, "
f"type `help` or `h` for help!", err=True)
outcome = self.engine.board.outcome()
if outcome.winner is not None:
winner = "White" if outcome.winner == chess.WHITE else "Black"
ConsoleHandler.output(f"{winner} won!")
else:
ConsoleHandler.output(f"Draw/stalemate due to {str(outcome.termination)}")
self._kill_ponder_thread(ponder_thread, post)
pgn = input("Save to PGN? [Y/n]: ").lower()
if pgn == "y" or pgn == "yes":
timestamp = datetime.datetime.now().strftime("%y-%m-%d %H:%M:%S")
filepath = os.path.join(f"logs/{timestamp}.pgn")
self._save_to_pgn(filepath)
ConsoleHandler.output("Thank you for playing!")
def _save_to_pgn(self, filepath):
game = chess.pgn.Game.from_board(self.engine.board)
with open(filepath, "w") as f:
f.write(str(game))
ConsoleHandler.output(f"Saved PGN to {filepath}")
def _kill_ponder_thread(self, ponder_thread: threading.Thread, post) -> Optional[bool]:
if ponder_thread:
ponder_thread.join(timeout=0.5)
while ponder_thread.is_alive():
import time
time.sleep(0.5)
ConsoleHandler.output("Waiting to kill ponder thread...")
if post == "noponderpost":
self.engine.log_func = ConsoleHandler.output
return None
@staticmethod
def output(line, err=False):
logger.debug(line)
print(line, file=sys.stderr if err else sys.stdout)
| true |
5a1dfafe93afb2a5d25136e57f6dc672f040ef5d | Python | ventura1981/CursoemVideo-Python | /Desafio_005.py | UTF-8 | 317 | 4.53125 | 5 | [] | no_license | # Faça um programa que leia um número inteiro e mostre na tela o seu sucessor e antecessor.
valor = int(input('Digite um número inteiro:'))
#sucessor = valor + 1
#antecessor = valor -1
#print('{:=20}')
print('Valor digitado: {:>10} \nSucessor: {:>10} \nAntecessor: {:>10}'.format(valor, valor+1, valor-1)) | true |
929f84a5b082410705f00852b91cba557328452a | Python | jiadaizhao/LeetCode | /0601-0700/0664-Strange Printer/0664-Strange Printer.py | UTF-8 | 539 | 3.171875 | 3 | [
"MIT"
] | permissive | class Solution:
def strangePrinter(self, s: str) -> int:
table = [[0] * len(s) for i in range(len(s))]
def dfs(s, l, r):
if l > r:
return 0
if table[l][r]:
return table[l][r]
count = dfs(s, l + 1, r) + 1
for i in range(l + 1, r + 1):
if (s[i] == s[l]):
count = min(count, dfs(s, l + 1, i - 1) + dfs(s, i, r))
table[l][r] = count
return count
return dfs(s, 0, len(s) - 1)
| true |
d6e7798d19eb89b3a125b2c97192a6fb85a68f8b | Python | nnim99/Introduction-to-Programming-Python- | /Lab4/Task 4/Task 3.py | UTF-8 | 466 | 3.28125 | 3 | [] | no_license | def func():
late = int(input("Enter Number of Days a person is late on submitting the book=",))
day = int(input("Enter number of days:"))
if late <=5:
days = day
fine = days*0.5
print ("The fine is:", fine)
elif late > 5 and late <= 10 :
days = day
fine = days*1
print ("The fine is:", fine)
elif late > 10 and late <= 30 :
days = day
fine = days*5
print ("The fine is:", fine)
else:
print ("Your membership is cancelled")
func()
| true |
2fcc13e76d0c1d842674e696743e42f603003bfa | Python | algo74/predictsim | /simulation/pyss/src/predictors/valopt/algos/sgd.py | UTF-8 | 719 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env python3
# encoding: utf-8
#import numpy as np
class SGD(object):
"""Stochastic Gradient Descent learner with eta/n learning rate"""
def __init__(self, model, loss, eta, verbose=False):
self.model=model
self.loss=loss
self.eta=eta
self.verbose=verbose
self.n=200
def predict(self, x):
#print(self.model.get_param_vector())
return self.model.predict(x)
def fit(self, x,y,w=1):
W=self.model.get_param_vector()
G=self.loss.grad_loss(x,y,w)
print(G)
for i in range(0,self.model.dim):
W[i]-=self.eta*0.00000000001*G[i]/float(self.n)
self.n+=1
self.model.set_param_vector(W)
| true |
0a74bb1a01b4ff31292c79e50b589df8bd76e8ab | Python | hyperskill/hs-test | /src/test/java/projects/python/coffee_machine/stage4/machine/coffee_machine.py | UTF-8 | 1,160 | 3.78125 | 4 | [] | no_license | water = 400
milk = 540
beans = 120
cups = 9
money = 550
def machine_status():
print(f'''The coffee machine has:
{water} of water
{milk} of milk
{beans} of coffee beans
{cups} of disposable cups
{money} of money''')
machine_status()
action = input("Write action (buy, fill, take):\n")
if action == 'buy':
typ = int(input("What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino:\n"))
if typ == 1:
water -= 250
beans -= 16
money += 4
cups -= 1
elif typ == 2:
water -= 350
milk -= 75
beans -= 20
money += 7
cups -= 1
else:
water -= 200
milk -= 100
beans -= 12
money += 6
cups -= 1
elif action == 'fill':
water += int(input("Write how many ml of water do you want to add:\n"))
milk += int(input("Write how many ml of milk do you want to add:\n"))
beans += int(input("Write how many grams of coffee beans do you want to add:\n"))
cups += int(input("Write how many disposable cups of coffee do you want to add:\n"))
elif action == 'take':
print(f"I gave you ${money}")
money = 0
machine_status()
| true |
3859a700a8353d2fcec1427f80a26777b39734db | Python | Sanjayvaradha/Projects | /Face and Emotion detection.py | UTF-8 | 1,372 | 2.9375 | 3 | [] | no_license |
import cv2
face_detect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_detect = cv2.CascadeClassifier('haarcascade_eye.xml')
smile_detect = cv2.CascadeClassifier('haarcascade_smile.xml')
def detection(gray,frame):
faces = face_detect.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_frame = frame[y:y+h, x:x+w]
eye = eye_detect.detectMultiScale(roi_gray,1.1,20)
for (ex,ey,ew,eh)in eye:
cv2.rectangle(roi_frame,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
smile = smile_detect.detectMultiScale(roi_gray,1.7,22)
for (sx,sy,sw,sh) in smile:
cv2.rectangle(roi_frame,(sx,sy),(sx+sw,sy+sh),(0,0,255),2)
return frame
video_capture = cv2.VideoCapture(0)
while True: # We repeat infinitely (until break):
_, frame = video_capture.read() # We get the last frame.
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # We do some colour transformations.
canvas = detection(gray, frame) # We get the output of our detect function.
cv2.imshow('Video', canvas) # We display the outputs.
if cv2.waitKey(1) & 0xFF == ord('q'): # If we type on the keyboard:
break # We stop the loop.
video_capture.release() # We turn the webcam off.
cv2.destroyAllWindows() #
| true |
32943b4c36adc522d51c567cd971342c82b63248 | Python | jmontara/Purchase-Guidance-from-Quickbooks-report | /functions/getshipments.py | UTF-8 | 8,940 | 2.921875 | 3 | [] | no_license | # filename: getshipments.py
import datetime
import cycletimes
def getshipments(items):
"""
returns dictionary of shipments
Inputs:
items - list of item objects
Outputs:
buyShipmentsByItem
- list of dictionaries
example: {item: [shipment1, shipment2, shipment3]}
"""
print "\n\nentering getshipments()"
buyShipmentsByItem = {}
# startTransactionTypes
# - list, list of strings describing start transaction types.
# endTransactionTypes
# - list, list of strings describing end transaction types.
buyStartTransactionTypes = ['Purchase Order', 'Credit Card Charge']
buyEndTransactionTypes = ['Bill', 'Item Receipt']
for item in items:
startTransactions = []
endTransactions = []
buys = []
# print "item:", item
# print "item.getItemName():", item.getItemName()
# assert False
for transaction in item.getXactions():
type = transaction.getType()
qty = transaction.getQty()
if type in buyStartTransactionTypes\
and qty == '0':
startTransactions.append(transaction)
# print transaction, "\ntype,qty:", type, qty, "\n"
# assert False
if type in buyStartTransactionTypes\
and not qty == '0'\
and type == 'Credit Card Charge':
startTransactions.append(transaction)
# print transaction, "\ntype,qty:", type, qty, "\n"
# assert False
elif type in buyEndTransactionTypes\
and not(qty == '0'):
endTransactions.append(transaction)
# print transaction, "\ntype,qty:", type, qty, "\n"
# assert False
# look at most recent start transaction.
def getDate(transaction):
return transaction.getDate()
sortedStartTransactions = sorted(startTransactions,
key=getDate,
reverse=True)
sortedEndTransactions = sorted(endTransactions,
key=getDate,
reverse=True)
for buy in sortedStartTransactions:
# Append to buys where there is no start + end transaction in data
if buy.getType() == 'Credit Card Charge':
startTransaction = buy
endTransaction = buy
buys.append(cycletimes.Buy(startTransaction,endTransaction))
# print "after appending Credit Card Charge, buys:", buys
# print "item.getItemName():", item.getItemName()
# for buy in buys:
# print "buy:", buy
# assert False
# Append to buys where there is start + end transaction in data
elif buy.getQty() == '0':
buyDte = buy.getDate()
# get the most recent end transaction
leadTime = datetime.timedelta(999)
for ship in sortedEndTransactions:
shipDte = ship.getDate()
thisLeadTime = shipDte - buyDte
zeroLeadTime = shipDte - shipDte
if zeroLeadTime <= thisLeadTime < leadTime:
leadTime = thisLeadTime
startTransaction = buy
endTransaction = ship
# scrub transaction(s) entered with error:
# <Shipment,Buy: "Logic Hydraulic Controls Inc" --> "Manufacturing Warehouse"
# Lead time (days): 90
# <xaction: Pump, Air, 230V, Air Compressor, 230V, 27745, Purchase Order, 2015-01-20, 2043, 0, , Logic Hydraulic Controls Inc, Air Compressor, 230V>
# <xaction: Pump, Air, 230V, Air Compressor, 230V, 28541, Bill, 2015-04-20, 60621, 7, , Logic Hydraulic Controls Inc, Air Compressor, 230V>>
if leadTime.days >89:
continue
buys.append(cycletimes.Buy(startTransaction,endTransaction))
# only make entries if there are buys
itemName = item.getItemName()
if len(buys)>0:
buyShipmentsByItem[itemName] = buys
return buyShipmentsByItem
def getshipmentscustomer(items):
"""
returns dictionary of shipments of items to customers
The dictionary is populated only with shipments that are
directly shipped to customers.
Inputs:
items - list of item objects
Outputs:
ret itemshipments
example: {item: [shipment1, shipment2, shipment3]}
"""
print "entering getshipmentscustomer(items)"
itemshipments = {}
sellStartTransactionTypes = ['Sales Order']
sellEndTransactionTypes = ['Invoice']
for item in items:
startTransactions = []
endTransactions = []
sells = []
# print "item:", item
# print "item.getItemName():", item.getItemName()
# assert False
for transaction in item.getXactions():
type = transaction.getType()
qty = transaction.getQty()
if type in sellStartTransactionTypes\
and qty == '0':
startTransactions.append(transaction)
# print transaction, "\ntype,qty:", type, qty, "\n"
# assert False
elif type in sellEndTransactionTypes\
and not(qty == '0'):
endTransactions.append(transaction)
# print transaction, "\ntype,qty:", type, qty, "\n"
# assert False
# look at most recent start transaction.
def getDate(transaction):
return transaction.getDate()
sortedStartTransactions = sorted(startTransactions,
key=getDate,
reverse=True)
sortedEndTransactions = sorted(endTransactions,
key=getDate,
reverse=True)
for sale in sortedStartTransactions:
saleDte = sale.getDate()
saleCustomer = sale.getName()
# get the most recent end transaction for the customer
#
leadTime = datetime.timedelta(9999)
for ship in sortedEndTransactions:
shipCustomer = ship.getName()
shipDte = ship.getDate()
thisLeadTime = shipDte - saleDte
zeroLeadTime = shipDte - shipDte
# print "sale.getName():", sale.getName()
# print "ship.getName():", ship.getName()
# assert False
try:
assert shipCustomer == saleCustomer
assert zeroLeadTime <= thisLeadTime
assert thisLeadTime < leadTime
leadTime = thisLeadTime
startTransaction = sale
endTransaction = ship
except:
pass
demandShipment = cycletimes.Sell(startTransaction,endTransaction)
sells.append(demandShipment)
# print "\nSell object (demand shipment precursor): "
# print cycletimes.Sell(startTransaction,endTransaction)
try:
# show an errant shipment
assert demandShipment.getDestination() == "Bristol-Myers Squibb - Basso"
print "demandShipment.getDestination()", demandShipment.getDestination()
print "demandShipment:", demandShipment
print "\n"
except:
pass
# only make entries if there are sells
itemName = item.getItemName()
if len(sells)>0:
itemshipments[itemName] = sells
# assert False
return itemshipments
def addDemandShipments(itemShipments, items, toTest = True):
"""
Looks at shipments for each item in itemshipments.
Adds a representative demand shipment to the item and to
each item in the item's indented bill of materials.
Inputs:
itemshipments - dict,
example, itemshipments[itemName] = list of sell shipments
items - list of item objects
Outputs:
itemDemandShipments
example: {item: [shipment1, shipment2, shipment3]}
"""
print "\n\nentering addDemandShipments()"
itemDemandShipments = {}
#Item name to object lookup dictionary
itemName2Object = {}
for item in items:
itemName2Object[item.getItemName()] = item
for itemSoldName in itemShipments.keys():
# print "\nitemSoldName:", itemSoldName
itemSold = itemName2Object[itemSoldName]
itemsInItemSoldIbom = itemSold.getIbom().getItems()
# print "\nitemsInItemSoldIbom:", itemsInItemSoldIbom
# assert False
for itemSoldShipment in itemShipments[itemSoldName]:
# print "\nitemSoldShipment:", itemSoldShipment
# assert False
itemSoldQty = itemSoldShipment.getQty()
for itemInItemSoldIbom in itemsInItemSoldIbom:
# print "itemInItemSoldIbom:", itemInItemSoldIbom
# assert False
demandQty = str(float(itemSoldQty) * float(itemInItemSoldIbom[0]))[:4]
demandItemName = itemInItemSoldIbom[2]
demandItemDesc = itemInItemSoldIbom[3]
# generate a new shipment taking dates from the itemSoldShipment
# and quantities from above
demandShipment = itemSoldShipment.getModifiedClone(demandQty, demandItemName,
demandItemDesc)
itemName2Object[demandItemName].addDemandShipment(demandShipment)
# show an errant shipment
# try:
# assert demandShipment.getDestination() == "Bristol-Myers Squibb - Basso"
# print "demandShipment.getDestination()", demandShipment.getDestination()
# print "demandShipment:", demandShipment
# print "\n"
# except:
# pass
# assert False
# if toTest:
# print "toTest:", toTest
# break
# assert False
for key in itemName2Object.keys():
item = itemName2Object[key]
# populate with all shipments, including those of parts not purchased
itemDemandShipments[item.getItemName()] = item.getDemandShipments()
# Alternative implementation:
# limit population to look only at purchased items:
# if item.isPurchased():
# itemDemandShipments[item.getName()] = item.getDemandShipments()
return itemDemandShipments
if __name__ == "__main__":
pass
| true |
96150b623621ed984ca1c098fb1255d7eb5edc88 | Python | CoinArcade/LISA | /logic_module.py | UTF-8 | 11,160 | 2.796875 | 3 | [] | no_license | #############################################################################################################################################################
# LISA's Backend Functions
#############################################################################################################################################################
from speech_to_text import Voice_in as listen
from text_to_speech import Voice_out as speak
from time import sleep
from misc_methods import align,clean_slate,choice
import pickle
def age():
from datetime import date
original = date(2020,7,13)
today = date.today()
age = today - original
speak('Well....i was created on July 13th ,2020.')
speak('So that means.....I\'m '+str(age.days)+' days old .')
def greet() :
from datetime import datetime as dt
now = dt.now()
ct = int(now.strftime('%H'))
fd = open('User_data.pkl','rb')
adm = pickle.load(fd)
if ct < 12 :
speak(' Good Morning,'+adm.f_name+'!')
elif ct < 17 :
speak(' Good Afternoon, '+adm.f_name+' ! ')
else :
speak('Good Evening, '+adm.f_name+' ! ')
def menu() :
clean_slate()
print('\n'*4+'''
Some of the things I can do :)
> Introduce myself
> I can play your favourite music
> Get you the current date or time
> Do a Wikipedia search !
> Recommend some movies
\n\n\n
<.> Just ask me " What can you do ? " , to see this Inventory again. <.>
''')
sleep(7)
def interact(cmd) :
pass
def name() :
clean_slate()
print(align+':)'.center(130))
speak(' I\'m technically called Logically Interactive Software Algorithm. But, you can call me LISA ! ')
def ver(v) :
speak(' My current operating version is '+str(v)+'.')
def creator() :
clean_slate()
print(align+':)'.center(130))
speak('I was created by Moh nish on July 13th, 2020.')
def techspex(v):
clean_slate()
print(align+':)'.center(130))
speak(" I'm a Virtual Voice Assistant built using Python 3. I can also function as a ChatBot.")
speak(' My current operating version is '+str(v)+'.')
def play_song() :
import vlc
import os
import random
import getch
playing = True
s_dir = 'C:/Users/mohni/Documents/Personal/Projects/LISA/Songs/'
song_list = os.listdir(s_dir)
n = None
while playing :
c = 0
m = random.randint(0,len(song_list))
while m == n :
m = random.randint(0,len(song_list))
n = m
try :
song = song_list[n]
s = vlc.MediaPlayer(s_dir+song)
clean_slate()
print(align+':)'.center(130))
try :
speak(f'PLaying : {song[:-4]} ',bk=False)
s.play()
except:
print('error')
while True :
clean_slate()
if c%2 == 0 :
print('\n'*16+"|>".center(132))
else :
print('\n'*16+'||'.center(132))
print('\n'*15+'<..> Press SPACEBAR to pause and ENTER to stop playing <..>'.rjust(100))
print('\n'+'<.> Press X to skip this song <.>'.rjust(85))
try :
sleep(0.1)
x = getch.getch()
x = x.decode('utf-8')
x = x.lower()
if x == ' ' :
s.pause()
c += 1
elif x.lower() == 'x' :
c = 0
clean_slate()
print('\n'*16+">>".center(132))
s.stop()
speak('Skipping this song.')
break
elif x == '\r' :
clean_slate()
print('\n'*16+'::'.center(132))
playing = False
s.stop()
break
except :
continue
except :
continue
def today_date() :
from datetime import datetime
x = datetime.now()
d = x.strftime("%A . %B %d , %Y")
speak('Today is : '+d)
def today_time() :
from datetime import datetime
c = 0
x = datetime.now()
t = x.strftime(" %I:%M %p")
speak('The current time is : '+t,bk=False)
while c < 10 :
clean_slate()
y = datetime.now()
ti = y.strftime(" %I:%M:%S %p")
print(align+ti.center(133))
sleep(1)
c += 1
def greetx() :
from random import randint
clean_slate()
g = ['Hello !','Hi !','Hola !','Hey there !','Well hello there !','Hey !']
op = randint(0,5)
speak(g[op])
def personal_mode() :
clean_slate()
talking = True
speak('Alright then ! Switching to personal mode .')
print(align+'\n'*15+'<.> Just say " Switch back to Virtual Assistant " to use LISA as an Virtual Assistant <.>'.center(134))
def wiki_search() :
from wikipedia import summary as wiki
clean_slate()
search = True
while search == True :
clean_slate()
speak('What do you want to search for ?')
query = listen(z=False)
while True :
clean_slate()
try :
data = wiki(query,sentences=2)
clean_slate()
print(align+query.center(130))
speak(data)
sleep(2)
clean_slate()
except :
clean_slate()
speak('Hmm. No data found.')
a = choice('Was that what you were looking for ?')
if a in ' yes yeah s yea yep yup ' :
break
else :
clean_slate()
print(align+'Kindly type in your search term :\n '.center(134))
query = input('\n'.center(120))
continue
clean_slate()
speak('Do you want to search for anything else ?\n')
a = choice('Wanna search another term ?')
if a in ' yes yeah s yea yep yup okay alright ' :
continue
else :
speak('Okay !')
sleep(1)
search = False
def location() :
import requests
import json
send_url = "http://api.ipstack.com/check?access_key=4451c6bc43183503744b9f5b2f948aff"
geo_req = requests.get(send_url)
geo_json = json.loads(geo_req.text)
latitude = geo_json['latitude']
longitude = geo_json['longitude']
country = geo_json['country_name']
state = geo_json['region_name']
city = geo_json['city']
speak('You\'re currently in : '+city+' , '+state+' , '+country)
def movie_prediction():
# Importing required libraries and reading the dataset.
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from pickle import load,dump
from os import system
df = pd.read_csv('Datasets/movies_data.csv')
clean_slate()
# Function to create a string of important features for each movie.
def get_imp_features(data) :
important_features = []
for i in range(0,data.shape[0]):
important_features.append(data['Title'][i]+' '+data['Certificate'][i]+' '+data['Genre'][i]+' '+data['Director'][i]+' '+data['Star1'][i]+' '+data['Star2'][i])
return important_features
# Getting the title from the user.
speak('Enter the most recent movie you watched: ')
title = input(align+'Movie: '.rjust(60))
# Function to find a similar title to the one user has entered.
def alt_title(df,title):
cm1 = CountVectorizer().fit_transform([title]+list(df['Title']))
cs1 = cosine_similarity(cm1)
scores = list(enumerate(cs1[0]))
sorted_scores = sorted(scores,key = lambda x : x[1], reverse = True)
sorted_scores = sorted_scores[1:]
t = df['Title'][sorted_scores[0][0]-1]
return t
# Vectorizing all the important features and creating a cosine-similarity matrix.
df['imp_features'] = get_imp_features(df)
cm = CountVectorizer().fit_transform(list(df['imp_features']))
cs = cosine_similarity(cm)
# Findig the movie id of the title the user entered.
f= 0
try:
mov_id = df[df.Title == title]['movie_id'].values[0]
except:
f=1
title = alt_title(df,title)
mov_id = df[df.Title == title]['movie_id'].values[0]
# Creating a list, where each element is a tuple having the index and similarity score as its elements, and sorting it.
scores = list(enumerate(cs[mov_id]))
sorted_scores = sorted(scores,key = lambda x : x[1], reverse = True)
if f == 0:
sorted_scores = sorted_scores[1:]
# Printing the the most recommended movies.
m = 0
clean_slate()
speak('The 7 most recommended movies are: ')
print('\n'*10)
for item in sorted_scores:
movie_title = df[df.movie_id == item[0]]['Title'].values[0]
sleep(0.5)
print('\n',str(m+1).rjust(55),movie_title)
m += 1
if m > 6:
break
if f == 1:
speak('Due to insufficient data, these results might not be 100 percent accurate.')
sleep(5)
def doomsday_protocol():
from misc_methods import getPass,file_load,align,User
import os,time,shutil
from colorama import init,Fore
atv = True
attempts = 3
speak('Activating this protocol will erase me permanently from your computer. To confirm, please enter your password.')
while atv:
if attempts == 0:
speak('Sorry you are out of attempts. PROTOCOL activation failed.')
return False
else:
speak('You have '+str(attempts)+' attempts left.')
clean_slate()
print(align+'Password --> '.rjust(70),end='')
password = getPass()
boss = User()
boss = file_load(boss,'User_data.pkl')
if password == boss.password :
clean_slate()
speak('Activating DOOMSDAY PROTOCOL in:')
speak('3')
time.sleep(0.1)
speak('2')
time.sleep(0.1)
speak('1')
time.sleep(1)
print(align+Fore.RED+'DOOMSDAY PROTOCOL ACTIVATED'.rjust(80))
speak('Goodbye '+boss.f_name)
time.sleep(2)
os.startfile('C:/Users/mohni/Documents/Personal/Projects/LISA/GOODBYE.txt')
os.unlink('C:/Users/mohni/Documents/Personal/Projects/LISA/LISA.py')
os.unlink('C:/Users/mohni/Documents/Personal/Projects/LISA/logic_module.py')
os.unlink('C:/Users/mohni/Documents/Personal/Projects/LISA/misc_methods.py')
os.unlink('C:/Users/mohni/Documents/Personal/Projects/LISA/speech_to_text.py')
os.unlink('C:/Users/mohni/Documents/Personal/Projects/LISA/text_to_speech.py')
os.unlink('C:/Users/mohni/Documents/Personal/Projects/LISA/Version_Log.txt')
os.unlink('C:/Users/mohni/Documents/Personal/Projects/LISA/User_count.pkl')
os.unlink('C:/Users/mohni/Documents/Personal/Projects/LISA/User_data.pkl')
shutil.rmtree('C:/Users/mohni/Documents/Personal/Projects/LISA/Songs')
shutil.rmtree('C:/Users/mohni/Documents/Personal/Projects/LISA/LISA_ResponseCache')
shutil.rmtree('C:/Users/mohni/Documents/Personal/Projects/LISA/__pycache__')
shutil.rmtree('C:/Users/mohni/Documents/Personal/Projects/LISA/Tones')
atv = False
return True
else :
speak('Wrong password! Wanna try again ?')
ans = choice('Try again ?')
if ans in 'yes yeah ye yup yep ok k alright':
attempts -= 1
continue
else:
return False
| true |
78e781e776593d4c47e5f8e9e99abc8b93d7621c | Python | scoutnolan/School | /GEN/ENG 101/Python/npa0002_prob#1.py | UTF-8 | 1,482 | 2.828125 | 3 | [] | no_license | # Nolan Anderson
# ENG 101 Exam #1
# 4/25/2019
wlist=['promise', 'superb', 'husky', 'torpid', 'field', 'ill', 'macho', 'want',\
'warm', 'high', 'callous', 'star', 'twist', 'high', 'worm', 'grate', 'lame',\
'previous', 'righteous', 'push', 'release', 'pass', 'striped', 'quick',\
'desert', 'crash', 'repeat', 'time', 'star', 'kittens', 'sky', 'shape', \
'excuse', 'dam', 'vague', 'brass', 'neat', 'frail', 'shop', 'frequent', 'slap',\
'dance', 'cast', 'thread', 'helpless', 'paddle', 'old', 'morning', 'pastoral', \
'thing', 'growth', 'poke', 'sail', 'ill', 'deceive', 'stale', 'giraffe', 'slap',\
'dam', 'reply', 'trucks', 'start', 'beg', 'peep', 'panoramic', 'wink', 'holiday',\
'adaptable', 'breezy', 'sun', 'crabby', 'apologise', 'aloof', 'taboo', \
'disastrous', 'dizzy', 'reaction', 'reading', 'wave', 'pull', 'noxious', 'bells', \
'request', 'disarm', 'cause', 'wing', 'telling', 'seal', 'habitual', 'gleaming', \
'picture', 'science', 'trouble', 'luxuriant', 'aware', 'lick', 'clover',\
'existence', 'road', 'profit', 'pen', 'alive', 'queue', 'wing']
new_list = []
for i in wlist:# for each item in wlist, do this
if len(i) < 5 and i not in new_list: # if the length is less than 5 and it is not in the new list, do this
new_list.append(i) # appends the items to the new_list
print(new_list)# prints the new list with the correct items
| true |
8227eb651fb655641c25e4d12ff801cd8943540a | Python | danieldugas/Vitrified-Code | /Python/Diffusion/sim3.py | UTF-8 | 7,541 | 2.578125 | 3 | [] | no_license | # Diffusion Simulation Through Porous Medium
# Python 2.7
# Daniel Dugas
# Currently
# Implements walls
import sys
import math
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.patches import Rectangle
np.seterr(all = 'ignore')
plt.ion() # plots don't hang execution
plt.close("all")
## Physical parameters
D = 0.0000001 # diffusion coefficient
## Simulation Parameters
num_steps = 20000
num_particles = 1000
max_dt = 1
## flags
enable_variable_dt = True
enable_velocity_culling = False
enable_velocity_alert = True
skew_initial_particle_distribution = False
## Define Experiment Boundaries
## Cells spill over boundary ( by 1 cellwidth )
## Walls exist on the edge of cells
## _|_|_|_|_|_|_|_
## _|_|_|_|_|_|_|_
## _|_|_|_|_|_|_|_
## | | | | | | |
# Generate Cells ( NX x NY )
# 2 ghost cells ( 1 on each side )
NX = 200+2
NY = 100+2
viewaxis = [-0.5,2.5,-0.5,1.5]
boundaxis= [0.,2.,0.,1.]
boundwdth = boundaxis[1]-boundaxis[0]
boundhght = boundaxis[3]-boundaxis[2]
cellwdth = boundwdth/(NX-2)
cellhght = boundhght/(NY-2)
# a cell position is that of its center
Cell = np.zeros((NX,NY))
CellPosX = np.tile(
np.linspace(boundaxis[0]-0.5*cellwdth, boundaxis[1]+0.5*cellwdth, NX)
, (NY,1) ).T
CellPosY = np.tile(
np.linspace(boundaxis[2]-0.5*cellhght, boundaxis[3]+0.5*cellhght, NY)
, (NX,1) )
# Generate Walls
# WallX[0,0] is the right-wall of Cell[0,0]
# WallY[0,0] is the top-wall of Cell[0,0]
WallX = np.zeros((NX,NY)) # vertical walls
WallY = np.zeros((NX,NY)) # horizontal walls
# Fill in Wall boundary cells and walls
Cell[ CellPosX<boundaxis[0] ] = 1
Cell[ CellPosX>boundaxis[1] ] = 1
Cell[ CellPosY<boundaxis[2] ] = 1
Cell[ CellPosY>boundaxis[3] ] = 1
WallX[ 0, : ] = 1
WallX[ -2, : ] = 1
WallY[ :, 0 ] = 1
WallY[ :,-2 ] = 1
## Create membrane
WallX[ 101, ::10 ] = 1
WallX[ 103, ::15 ] = 1
WallX[ 105, ::10 ] = 1
## Generate N Particles
N = num_particles
# positions
X = np.random.rand(N)/2 + 0.25
Y = np.random.rand(N)/2 + 0.25
# velocities
VX = ( np.random.rand(N) - 0.5 ) * cellwdth
VY = ( np.random.rand(N) - 0.5 ) * cellhght
VX[:] = 0
VY[:] = 0
# species and mass
Species = (np.random.rand(N)>=0.5).astype(int) # water or protein
Mass = np.ones(N)
Mass[Species==1] = 1
Mass[Species==0] = 1
# skewing
if skew_initial_particle_distribution:
X = X + 0.5*Species
Y = Y + 0.5*Species
## Pre-simulation plot
fig1 = plt.figure()
nyplots = 4
nxplots = 4
plt.subplot(nyplots,nxplots,1)
plt.pcolor(CellPosX, CellPosY, -WallX, cmap=cm.gray)
plt.scatter(X, Y, s=20, c=Species, cmap=cm.Greys)
plt.axis(viewaxis)
#rectangle
ca = plt.gca()
ca.add_patch(Rectangle((0,0),2,1,
alpha=1, facecolor='none'))
subplot_counter = 1
## Pre-define arrays
time = np.zeros((num_steps+1,1))
alerts = np.zeros((num_steps))
InCells = np.zeros((num_particles,2))
DVX = VX * 0
DVY = VY * 0
# Probes
x0_probe = np.zeros((num_steps))
dt_probe = np.zeros((num_steps))
## Iteration
for step in range(1,num_steps+1):
# calculate largest allowable dt ( time to first encounter /2)
if enable_variable_dt:
dt = min( cellwdth / abs(VX).max() , cellhght / abs(VY).max() )
dt = min( dt, max_dt )
dt_probe[step-1] = dt
else:
dt = 1
# Alert excessive velocities
max_vx = cellwdth/dt
max_vy = cellhght/dt
if enable_velocity_alert:
alerts[step-1] = sum(
( abs(VX) > max_vx ).astype(int)
+ ( abs(VY) > max_vy ).astype(int) )
# Cull excessive velocities
if enable_velocity_culling:
VX[ abs(VX) > max_vx ] = max_vx * np.sign(VX[ abs(VX) > max_vx ])
VY[ abs(VY) > max_vy ] = max_vy * np.sign(VY[ abs(VY) > max_vy ])
## Assign particles to cells
InCells[:,0] = np.floor( (NX-2)*X/boundwdth ) + 1
InCells[:,1] = np.floor( (NY-2)*Y/boundhght ) + 1
InCells = InCells.astype(int)
# calculate force felt by each particle
for particle in range(num_particles):
# find neighbouring particles
incell = InCells[particle]
right = [ 1, 0]
left = [-1, 0]
up = [ 0, 1]
down = [ 0,-1]
NeighbourCells = np.vstack( (
incell,
incell + right,
incell + left ,
incell + up ,
incell + down ,
incell + right + up ,
incell + right + down,
incell + left + up ,
incell + left + down ) )
InCells1D = InCells[:,0] + InCells[:,1] * NX
NeighbourCells1D = NeighbourCells[:,0] + NX * NeighbourCells[:,1]
Neighbours = np.where( (InCells1D[:,None] == NeighbourCells1D).any(axis=1) )[0]
# remove particle from list of its neighbours
Neighbours = np.delete(Neighbours, np.where(Neighbours==particle))
# move to miniature 9-cell system
MiniX = X[Neighbours]
MiniY = Y[Neighbours]
MiniN = len(Neighbours)
# within mini system:
# find distance between every particle R
# stored as inverse of R
DX = MiniX - X[particle]
DY = MiniY - Y[particle]
Rinv = ( DX**2 + DY**2 )**(-0.5)
# TODO: check if distance too small?
# find forces exerted by neighbours
F = D * Rinv**2
DVX[particle] = (-DX * Rinv * F).sum()/Mass[particle] *dt
DVY[particle] = (-DY * Rinv * F).sum()/Mass[particle] *dt
# check wether particle will run out of its cell
cellcenterx = CellPosX[ incell[0], incell[1] ]
offcenterx = X[particle] - cellcenterx
movedx = abs( VX[particle]*dt + offcenterx ) > cellwdth/2
movedleft = np.sign(VX[particle]) < 0
cellcentery = CellPosY[ incell[0], incell[1] ]
offcentery = Y[particle] - cellcentery
movedy = abs( VY[particle]*dt + offcentery ) > cellhght/2
moveddown = np.sign(VY[particle]) < 0
# bounce back
wallinthewayx = WallX[ incell[0] - int(movedleft), incell[1] ] == 1
if movedx and wallinthewayx :
DVX[particle] = -VX[particle]
VX[particle] = 0
wallinthewayy = WallY[ incell[0], incell[1] - int(moveddown) ] == 1
if movedy and wallinthewayy :
DVY[particle] = -VY[particle]
VY[particle] = 0
# update position
x0_probe[step-1] = X[0]
X += VX *dt
Y += VY *dt
# update velocity
VX += DVX
VY += DVY
# update time-vector
time[step] = time[step-1] + dt
# progress
if np.mod(step*100,num_steps) == 0:
print str(100*step/num_steps) + "%",
sys.stdout.write('\r')
sys.stdout.flush()
# display results
if np.mod(step*10,num_steps) == 0:
subplot_counter += 1
plt.subplot(nyplots,nxplots,subplot_counter)
plt.pcolor(CellPosX, CellPosY, -WallX, cmap=cm.gray)
plt.scatter(X, Y, s=20, c=Species, cmap=cm.Greys)
plt.axis(viewaxis)
#rectangle
ca = plt.gca()
ca.add_patch(Rectangle((0,0),2,1,
alpha=1, facecolor='none'))
# trim last time value
time = time[:-1]
plt.figure()
plt.subplot(2,1,1)
plt.plot(time, x0_probe, 'ro')
plt.title('x position of single particle')
plt.subplot(2,1,2)
plt.plot(time, dt_probe)
plt.title('dt')
| true |
c0eaaa3592eb4487b859d7606e7005758d0ec20b | Python | LiXiaoRan/Data_handle_practice | /sanitizer_reptile.py | UTF-8 | 1,881 | 2.515625 | 3 | [] | no_license | import requests
import json
import pandas as pd
import numpy as np
my_url = 'http://221.228.242.3:11090/api/FeiFengDataManagement/FFCityManager/GetVehicleData'
my_head = {
'Content-Type': 'application/x-www-form-urlencoded'
}
payload = {'Page': '1000', 'Size': '500', 'dtForm': '2016/7/2 8:00:16', 'dtEnd': '2018/7/22 10:00:16'}
r = requests.post(url=my_url, headers=my_head, data=payload)
rawData = r.text
data = json.loads(rawData)
# df = pd.DataFrame(
# columns=['equipmentTime', 'carCode', 'gpsValid', 'gpsLatitude', 'gpsLongitude', 'gpsSpeed', 'gpsDirection',
# 'gpsAltitude', 'oilLevel', 'oilLevelUnit'])
df = pd.DataFrame()
dataList = data['data']
print data
if dataList:
print 'true'
else:
print 'false'
# for dataitem in dataList:
# df1 = pd.DataFrame([dataitem['equipmentTime'], dataitem['carCode'], dataitem['gpsValid'], dataitem['gpsLatitude'],
# dataitem['gpsLongitude'], dataitem['gpsSpeed'], dataitem['gpsDirection'],
# dataitem['gpsAltitude'], dataitem['oilLevel'], dataitem['oilLevelUnit']],
# columns=['equipmentTime', 'carCode', 'gpsValid', 'gpsLatitude', 'gpsLongitude', 'gpsSpeed',
# 'gpsDirection', 'gpsAltitude', 'oilLevel', 'oilLevelUnit'])
# df.append(df1, ignore_index=True)
#
# df.to_csv('asd.csv')
# pd.DataFrame({
# 'equipmentTime': dataitem['equipmentTime'],
# 'carCode': dataitem['carCode'],
# 'gpsValid': dataitem['gpsValid'],
# 'gpsLatitude': dataitem['gpsLatitude'],
# 'gpsLongitude': dataitem['gpsLongitude'],
# 'gpsSpeed': dataitem['gpsSpeed'],
# 'gpsDirection': dataitem['gpsDirection'],
# 'gpsAltitude': dataitem['gpsAltitude'],
# 'oilLevel': dataitem['oilLevel'],
# 'oilLevelUnit': dataitem['oilLevelUnit']
# })
| true |
bd8731fe251c372ebf931ea28d6d3e92a432cd7b | Python | SifeiMexico/ejemplosTimbradoPython | /ejemplo_sellado_cadena_original.py | UTF-8 | 2,249 | 2.734375 | 3 | [] | no_license | import base64
#instalar con > pip install pycryptodome que es mas nuevo y mantiene soporte a diferencia de pycrypto
from Cryptodome.Hash import SHA256
from Cryptodome.Signature import PKCS1_v1_5
from Cryptodome.PublicKey import RSA
from Cryptodome.IO import PEM
from base64 import b64decode
import lxml.etree as ET # para generar cadenaOriginal
class CFDIUtils:
def sellar(self,cadenaOriginal,llavePem,passw,mode='PEM'):
digest = SHA256.new()
#print(cadenaOriginal)
digest.update(cadenaOriginal.encode('utf-8'))
read=''
if mode=='PEM':
read='r'
elif mode=='DER':
read='rb'
else:
raise Exception('Modo no valido'+read)
with open (llavePem, read) as llavePEM:
private_key = RSA.importKey(llavePEM.read(),passw)
signer = PKCS1_v1_5.new(private_key)
sig = signer.sign(digest)
#por ultimo se codifica en base64
return base64.b64encode(sig) #base64 sin nueva linea
def generaCadenaOriginal(self,xml_filename):
dom = ET.parse(xml_filename)
xslt = ET.parse("./sat/xslt/cadenaoriginal_3_3.xslt")
transform = ET.XSLT(xslt)
return str(transform(dom))
def pegarSello(xml_filename,sello):
dom = ET.parse(xml_filename)
xsi="http://www.w3.org/2001/XMLSchema-instance"
#ns = {"xsi": xsi,"cfdi": "http://www.sat.gob.mx/cfd/3"}
dom.getroot().attrib['Sello']=sello
#ET.SubElement(dom.getroot(),'{cfdi}Comprobante',nsmap=ns).set('Sello',sello)
dom.write('./assets/file_new.xml')
xmlPath="./assets/CFDI33_sellado.xml"
cfdiUtils= CFDIUtils()
cadenaOriginal=cfdiUtils.generaCadenaOriginal(xmlPath)
#con PEM,suponiendo que el pem no tenga password
print("con pem:")
sello=cfdiUtils.sellar(cadenaOriginal,"llave.pem",None)
print(sello)
print('Con key(DER):')
sello=cfdiUtils.sellar(cadenaOriginal,"CSD.key","12345678a",'DER')
print(sello)
print(cadenaOriginal)
pegarSello(xmlPath,sello)
pemKeyWithPassPhrase=PEM.encode(open('key.key','rb').read(),'ENCRYPTED PRIVATE KEY')
print(pemKeyWithPassPhrase)
open('ENCRYPTED_KEY.pem','w').write(pemKeyWithPassPhrase)
sello2=cfdiUtils.sellar(cadenaOriginal,"ENCRYPTED_KEY.pem","12345678a",'PEM')
print(sello2)
| true |
8c1a37e9d52630c485ddcbdeb312dd97bbc5cb77 | Python | Niteshkr123/KU-hackfest | /pir.py | UTF-8 | 688 | 2.578125 | 3 | [] | no_license | import RPi.GPIO as GPIO
import time
import pygame
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN) #PIR
pygame.mixer.init()
try:
time.sleep(2)
while True:
a = GPIO.input(18)
if a:
print("Motion Detected...")
pygame.mixer.music.load("C:/Users/user/Downloads/KU HackFest/audio/someone ahead.mp3")
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
else:
print("Motion not Detected...")
#time.sleep(0.1) #loop delay, should be less than detection dela
except:
GPIO.cleanup()
| true |
2e16ff59a17f925dcee5836ef271905ecdd3fd90 | Python | Msadat97/fairness-vision | /lcifr/code/models/logistic_regression.py | UTF-8 | 555 | 2.90625 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn as nn
class LogisticRegression(nn.Module):
def __init__(self, input_dim):
super().__init__()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.linear = nn.Linear(input_dim, 1).to(device)
self.sigmoid = nn.Sigmoid().to(device)
def forward(self, x):
return self.linear(x).squeeze()
def predict(self, x):
return (0.5 <= self.sigmoid(self.linear(x))).float().squeeze()
def logits(self, x):
return self.sigmoid(self.linear(x))
| true |
51dcf4f630ac39e53d3bb6743f056f1f4c1503f5 | Python | NyarukouSAMA/py_geekbrains | /PythonOOP/FromTeacher/Lesson2/2.3/script.py | UTF-8 | 165 | 2.734375 | 3 | [] | no_license | import re
with open("index.html") as f:
s = f.read()
li = re.findall("<a class=\"home-link home-link_black_yes\" aria-label=\"([^\"]+)\" href=", s)
print(li)
| true |
fa3392c7ce7563d87a7b974210591f5ff1ebc0b8 | Python | nanthu0123/django-blog-app | /blog/views.py | UTF-8 | 4,999 | 3 | 3 | [] | no_license | '''
view is Python function or class that takes a web request and return a web response.
Views are used to do things like fetch objects from the database,
modify those objects if needed, render forms, return HTML, and much more
'''
from datetime import datetime
from django.shortcuts import render
from blog.models import BlogPost
def home(request):
'''function to reterive all blog data'''
# get the all records from db table
# pylint:disable=no-member
blog_post_all = BlogPost.objects.all()
if blog_post_all.exists():
# iterate the every records then send it ti the user
blog_data = []
for data in blog_post_all:
blog_data.append(data)
return render(request, 'home.html', {'blog_data': blog_data})
# if no records in db table, send the message to the user
return render(request, 'home.html', {'message': 'no blog data'})
def create_blog(request):
'''function for create blog post by user'''
if request.method == 'POST':
# get the form data
title = request.POST['title']
description = request.POST['description']
author = request.POST['author']
reference_link = request.POST['referenceLink']
# filter the records based on form title
# pylint:disable=no-member
blog_post_title_filtered = BlogPost.objects.filter(Title=title)
if blog_post_title_filtered.exists():
# if form title already exist with title in record send the message to user
return render(request, 'createBlog.html',
{'message': 'already has a blog post in same title'})
# else save the form data to db
# pylint:disable=no-member
blog_post = BlogPost(Title=title, Description=description,
Author=author, ReferenceLink=reference_link)
blog_post.save()
return render(request, 'createBlog.html', {'message': 'blog has been posted'})
return render(request, 'createBlog.html')
def search_blog(request):
'''function for search a blog post by blog title contained character'''
if request.method == 'POST':
# get the blog title from form data
blog_title = request.POST['BlogTitle']
if blog_title:
# if blog title in db table contains
# character from title of form data, filter the records
#pylint: disable=no-member
blog_post_title_contained = BlogPost.objects.filter(
Title__icontains=blog_title)
if blog_post_title_contained.exists():
# there is chance to more records in blog_post_title_contained,
# so iterate the each records then send it to the user
blog_data = []
for data in blog_post_title_contained:
blog_data.append(data)
return render(request, 'searchBlog.html',
{'blog_data': blog_data})
# if no record matches the send message to the user
return render(request, 'searchBlog.html', {'message': 'no blog data for this title'})
print('no input search data')
return render(request, 'searchBlog.html')
def update_blog(request, _pk):
'''function for update the individual blog post by user'''
if request.method == 'POST':
# get the form data from updateBlog template
title = request.POST['title']
description = request.POST['description']
author = request.POST['author']
reference_link = request.POST['referenceLink']
# update blog post and save the blog to db
BlogPost.objects.filter(Id=_pk).update(Title=title, Description=description, Author=author, # pylint:disable=no-member
ReferenceLink=reference_link,
CreatedDate=datetime.today().strftime('%Y-%m-%d'))
return render(request, 'updateBlog.html', {'message': 'your blog has been updated'})
# get the blog post based on primary key
# pylint:disable=no-member
blog_post_filter = BlogPost.objects.filter(
Id=_pk)
if blog_post_filter.exists(): # pylint:disable=no-else-return
# if blog post exist for the primary key, send the blog post record to user
return render(request, 'updateBlog.html', {'blog_post_filter': blog_post_filter})
else:
print('no data for this primary key')
return render(request, 'updateBlog.html')
def delete_blog(request, _pk):
'''function to delete the individual blog post by user'''
if request.method == 'POST':
# in post method get the primary key from url parameter,
# filter the record based on _pk then delete the record
BlogPost.objects.filter(Id=_pk).delete() # pylint:disable=no-member
return render(request, 'deleteBlog.html', {'message': 'blog post has been deleted'})
return render(request, 'deleteBlog.html')
| true |
e61480b945808249281130941b50c3119cf1d2a2 | Python | hirekatsu/MyNLTKBOOK | /ch05_02.py | UTF-8 | 5,917 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import nltk, re, pprint
from nltk import word_tokenize
print("""
----------------------------------------------------------------------
2 Tagged Corpora
2.1 Representing Tagged Tokens
----------------------------------------------------------------------
""")
tagged_token = nltk.tag.str2tuple('fly/NN')
print(tagged_token)
print(tagged_token[0])
print(tagged_token[1])
print("-" * 40)
sent = """
The/AT grand/JJ jury/NN commented/VBD on/IN a/AT number/NN of/IN
other/AP topics/NNS ,/, AMONG/IN them/PPO the/AT Atlanta/NP and/CC
Fulton/NP-tl County/NN-tl purchasing/VBG departments/NNS which/WDT it/PPS
said/VBD ''/'' ARE/BER well/QL operated/VBN and/CC follow/VB generally/RB
accepted/VBN practices/NNS which/WDT inure/VB to/IN the/AT best/JJT
interest/NN of/IN both/ABX governments/NNS "/" ./.
"""
print([nltk.tag.str2tuple(t) for t in sent.split()])
print("-" * 40)
print("""
----------------------------------------------------------------------
2.2 Reading Tagged Corpora
----------------------------------------------------------------------
""")
print(nltk.corpus.brown.tagged_words())
print(nltk.corpus.brown.tagged_words(tagset='universal'))
print("-" * 40)
print(nltk.corpus.nps_chat.tagged_words())
print(nltk.corpus.conll2000.tagged_words())
print(nltk.corpus.treebank.tagged_words())
print("-" * 40)
print(nltk.corpus.brown.tagged_words(tagset='universal'))
print(nltk.corpus.treebank.tagged_words(tagset='universal'))
print("-" * 40)
print(nltk.corpus.sinica_treebank.tagged_words())
print(nltk.corpus.indian.tagged_words())
print(nltk.corpus.mac_morpho.tagged_words())
print(nltk.corpus.conll2002.tagged_words())
print(nltk.corpus.cess_cat.tagged_words())
print("-" * 40)
print("""
----------------------------------------------------------------------
2.3 A Universal Part-of-Speech Tagset
----------------------------------------------------------------------
""")
from nltk.corpus import brown
brown_news_tagged = brown.tagged_words(categories='news', tagset='universal')
tag_fd = nltk.FreqDist(tag for (word, tag) in brown_news_tagged)
print(tag_fd.most_common())
print("-" * 40)
tag_fd.plot(5, cumulative=True)
print("-" * 40)
print("""
----------------------------------------------------------------------
2.4 Nouns
----------------------------------------------------------------------
""")
word_tag_pairs = nltk.bigrams(brown_news_tagged)
noun_preceders = [a[1] for (a, b) in word_tag_pairs if b[1] == 'NOUN']
fdist = nltk.FreqDist(noun_preceders)
print([tag for (tag, _) in fdist.most_common()])
print("-" * 40)
print("""
----------------------------------------------------------------------
2.5 Verbs
----------------------------------------------------------------------
""")
wsj = nltk.corpus.treebank.tagged_words(tagset='universal')
word_tag_fd = nltk.FreqDist(wsj)
print([wt[0] for (wt, _) in word_tag_fd.most_common() if wt[1] == 'VERB'])
print("-" * 40)
cfd1 = nltk.ConditionalFreqDist(wsj)
print(cfd1['yield'].most_common())
print(cfd1['cut'].most_common())
print("-" * 40)
wsj = nltk.corpus.treebank.tagged_words()
cfd2 = nltk.ConditionalFreqDist((tag, word) for (word, tag) in wsj)
print(list(cfd2['VBN']))
print("-" * 40)
cfd3 = nltk.ConditionalFreqDist(wsj)
print([w for w in cfd3.conditions() if 'VBD' in cfd3[w] and 'VBN' in cfd3[w]])
idx1 = wsj.index(('kicked', 'VBD'))
print(wsj[idx1-4:idx1+1])
idx2 = wsj.index(('kicked', 'VBN'))
print(wsj[idx2-4:idx2+1])
print("-" * 40)
vbn_words = list(cfd2['VBN'])[:10]
for word in vbn_words:
idx = wsj.index((word, 'VBN'))
print(wsj[idx-1:idx], '->', word)
print("-" * 40)
print("""
----------------------------------------------------------------------
2.6 Adjectives and Adverbs
----------------------------------------------------------------------
""")
print("----- no code -----")
print("""
----------------------------------------------------------------------
2.7 Unsimplified Tags
----------------------------------------------------------------------
""")
def findtags(tag_prefix, tagged_text):
cfd = nltk.ConditionalFreqDist((tag, word) for (word, tag) in tagged_text
if tag.startswith(tag_prefix))
return dict((tag, cfd[tag].most_common(5)) for tag in cfd.conditions())
tagdict = findtags('NN', nltk.corpus.brown.tagged_words(categories='news'))
for tag in sorted(tagdict):
print(tag, tagdict[tag])
print("-" * 40)
print("""
----------------------------------------------------------------------
2.8 Exploring Tagged Corpora
----------------------------------------------------------------------
""")
brown_learned_text = brown.words(categories='learned')
print(sorted(set(b for (a, b) in nltk.bigrams(brown_learned_text) if a == 'often')))
print("-" * 40)
brown_lrnd_tagged = brown.tagged_words(categories='learned', tagset='universal')
tags = [b[1] for (a, b) in nltk.bigrams(brown_lrnd_tagged) if a[0] == 'often']
fd = nltk.FreqDist(tags)
fd.tabulate()
print("-" * 40)
from nltk.corpus import brown
def process(sentence):
for (w1, t1), (w2, t2), (w3, t3) in nltk.trigrams(sentence):
if t1.startswith('V') and t2 == 'TO' and t3.startswith('V'):
print(w1, w2, w3)
for tagged_sent in brown.tagged_sents():
process(tagged_sent)
print("-" * 40)
brown_news_tagged = brown.tagged_words(categories='news')
data = nltk.ConditionalFreqDist((word.lower(), tag) for (word, tag) in brown_news_tagged)
for word in sorted(data.conditions()):
if len(data[word]) > 3:
tags = [tag for (tag, _) in data[word].most_common()]
print(word, ' '.join(tags))
print("-" * 40)
nltk.app.concordance()
print("-" * 40)
| true |
4edec32061ed558321f26d25b3c683b4f9c04253 | Python | estefaniazuluaga/LabElectro1 | /LabElectro.py | UTF-8 | 2,747 | 3.203125 | 3 | [] | no_license | import math
import numpy as np
Nx = 330; # Número de cuadrículas en el eje x. Cada cuadrícula = 1mm
Ny = 3; # Número de cuadrículas en el eje x. Cada cuadrícula = 1mm
mpx = math.ceil(Nx/2);# % Mid-point of x
mpy = math.ceil(Ny/2); #% Mid point of y
N = 500;
V = np.zeros(Nx,Ny); # Potential (Voltage) matrix
T = 0; # % Top-wall potential
B = 0; #% Bottom-wall potential
L = 0; #% Left-wall potential
R = 0; #% Right-wall potential
V[1,:] = L;
V[Nx,:] = R;
V[:,1] = B;
V[:,Ny] = T;
V[1,1] = 0.5*(V(1,2)+V(2,1));
V[Nx,1] = 0.5*(V(Nx-1,1)+V(Nx,2));
V[1,Ny] = 0.5*(V(1,Ny-1)+V(2,Ny));
V[Nx,Ny] = 0.5*(V(Nx,Ny-1)+V(Nx-1,Ny));
length_plate = 150; #Length of plate in terms of number of grids
lp = math.floor(length_plate/2);
position_plate = 15; #Position of plate on x axis
pp1 = mpx+position_plate;
pp2 = mpx-position_plate;
for z in range(1, N): # Number of iterations
for i in range(2, Nx-1):
for j in range(2, Ny-1):
# % The next two lines are meant to force the matrix to hold the
# % potential values for all iterations
V[pp1,mpy-lp:mpy+lp] = 100;
V[pp2,mpy-lp:mpy+lp] = -100;
V(i,j)=0.25*(V(i+1,j)+V(i-1,j)+V(i,j+1)+V(i,j-1));
V = V1;
[Ex,Ey]=gradient(V);
Ex = -Ex;
Ey = -Ey;
E = sqrt(Ex**2+Ey**2);
x = [1, Nx]-mpx;
y = [1, Ny]-mpy;
figure(1)
contour_range_V = -101:0.5:101;
contour(x,y,V,contour_range_V,'linewidth',0.5);
axis([min(x) max(x) min(y) max(y)]);
colorbar('location','eastoutside','fontsize',14);
xlabel('x-axis in meters','fontsize',14);
ylabel('y-axis in meters','fontsize',14);
title('Electric Potential distribution, V(x,y) in volts','fontsize',14);
h1=gca;
set(h1,'fontsize',14);
fh1 = figure(1);
set(fh1, 'color', 'white')
figure(2)
contour_range_E = -20:0.05:20;
contour(x,y,E,contour_range_E,'linewidth',0.5);
axis([min(x) max(x) min(y) max(y)]);
colorbar('location','eastoutside','fontsize',14);
xlabel('x-axis in meters','fontsize',14);
ylabel('y-axis in meters','fontsize',14);
title('Electric field distribution, E (x,y) in V/m','fontsize',14);
h2=gca;
set(h2,'fontsize',14);
fh2 = figure(2);
set(fh2, 'color', 'white')
figure(3)
contour(x,y,E,'linewidth',0.5);
hold on, quiver(x,y,Ex,Ey,2)
title('Electric field Lines, E (x,y) in V/m','fontsize',14);
axis([min(x) max(x) min(y) max(y)]);
colorbar('location','eastoutside','fontsize',14);
xlabel('x-axis in meters','fontsize',14);
ylabel('y-axis in meters','fontsize',14);
h3=gca;
set(h3,'fontsize',14);
fh3 = figure(3);
set(fh3, 'color', 'white')
| true |
003b7ee464be269c03f8efadf16264f284ca3821 | Python | keith-pedersen/iitthesis | /regexRemove.py | UTF-8 | 2,388 | 2.9375 | 3 | [] | no_license | #!/usr/bin/python3
# Copyright (C) 2018 by Keith Pedersen (Keith.David.Pedersen@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# This file defines a python function which opens a file and removes
# any regular expressions matching the supplied patterns
import re
import sys
def regexRemove(patternList, requiredExtension = ""):
if ((type(patternList) == str) or (type(patternList) == bytes)):
patternList = [patternList,]
if (type(patternList) != list):
raise TypeError("patternList should be a string or a list of strings")
lines = list()
filePath = sys.argv[1] if (len(sys.argv) > 1) else "."
if((filePath == ".") or (filePath.find(requiredExtension) < 0)):
print("\n", "No *." + requiredExtension + " file supplied, exiting.")
exit()
# Open the file, read the lines, and remove citations
try:
with open(filePath, "r") as file:
lines = file.readlines()
for i in range(len(lines)): # must do by index, otherwise it's not a reference
for pattern in patternList:
# Replace pattern with blank space
lines[i] = re.sub(pattern, b"", lines[i].encode()).decode()
# Open the file for writing, deleting existing file, and write corrected lines
with open(filePath, "w") as file:
file.writelines(lines)
# Catch the exception if the file could not be opened
except FileNotFoundError:
print("\n", "File <" + filePath + "> could not be found, exiting.")
exit()
| true |
6ef8c5eb0adf2fdc9e955d85c2bdb1e7833410c6 | Python | byronduenas/cpen442 | /assignment4/question1.py | UTF-8 | 448 | 3.21875 | 3 | [] | no_license | import hashlib
for x in xrange(0, 9999):
number = str(x).zfill(4)
hash1 = hashlib.sha1(number + "ug").hexdigest()
hash2 = hashlib.sha1("ug" + number).hexdigest()
if hash1 == "7FE36DBE8F148316349EC3435546DB4076FE195F".lower():
print number + "ug" + " is the password"
break
elif hash2 == "7FE36DBE8F148316349EC3435546DB4076FE195F".lower():
print "ug" + number + " is the password"
break
# ug0812 | true |
b28f0bc9de78816ecffc1dc41b321fa1bd0f6e73 | Python | LeilaelRico/semanaTec-herramientasC-arteProgra | /PingPong.py | UTF-8 | 5,656 | 3.328125 | 3 | [] | no_license | import turtle
"""
cambiar la aceleracion de la bola
"""
def pong(name1, score_a, name2, score_b):
# Canvas
win = turtle.Screen()
win.title("Pong")
win.bgcolor("black")
win.setup(width=800, height=600)
win.tracer(0)
rootwindow = win.getcanvas().winfo_toplevel()
rootwindow.call('wm', 'attributes', '.', '-topmost', '1')
rootwindow.call('wm', 'attributes', '.', '-topmost', '0')
score_a = 0
score_b = 0
square_size = 20
linea_posicion = 0
turtle.color("gray")
turtle.shape("square")
turtle.shapesize(stretch_wid=3, stretch_len=1)
turtle.penup()
for i in range(6):
turtle.setpos(linea_posicion, (250-(i*square_size*5)))
turtle.stamp()
# Barra Izquierda
barra_Izq = turtle.Turtle()
barra_Izq.speed(0)
barra_Izq.shape("square")
barra_Izq.color("white")
barra_Izq.shapesize(stretch_wid=5, stretch_len=1)
barra_Izq.penup()
barra_Izq.goto(-350, 0)
# Barra Derecha
barra_Der = turtle.Turtle()
barra_Der.speed(0)
barra_Der.shape("square")
barra_Der.color("white")
barra_Der.shapesize(stretch_wid=5, stretch_len=1)
barra_Der.penup()
barra_Der.goto(350, 0)
# Bola
bola = turtle.Turtle()
bola.speed(0)
bola.shape("circle")
bola.color("white")
bola.penup()
bola.goto(0, 0)
bola.dx = 0.9
bola.dy = 0.9
# Score
Dscore = turtle.Turtle()
Dscore.color("white")
Dscore.penup()
Dscore.hideturtle()
Dscore.goto(0, 250)
Dscore.write("{}: 0 {}: 0"
.format(name1,
name2), align="center", font=("Arial", 24, "normal"))
# Intrucciones
Tutorial = turtle.Turtle()
Tutorial.color("gray")
Tutorial.penup()
Tutorial.hideturtle()
Tutorial.goto(0, -250)
Tutorial.write("Player A: 'w' 's' Player B: '↑' '↓'",
align="center", font=("Arial", 16, "normal"))
esc = turtle.Turtle()
esc.color("gray")
esc.penup()
esc.hideturtle()
esc.goto(-350, -290)
esc.write("Press esc to quit")
# Funciones
def barra_Izq_up():
y = barra_Izq.ycor()
y += 40
barra_Izq.sety(y)
def barra_Izq_down():
y = barra_Izq.ycor()
y -= 40
barra_Izq.sety(y)
def barra_Der_up():
y = barra_Der.ycor()
y += 40
barra_Der.sety(y)
def barra_Der_down():
y = barra_Der.ycor()
y -= 40
barra_Der.sety(y)
# Keyboard bindings
win.listen()
win.onkeypress(barra_Izq_up, "w") or win.onkeypress(barra_Izq_up, "W")
win.onkeypress(barra_Izq_down, "s") or win.onkeypress(barra_Izq_down, "S")
win.onkeypress(barra_Der_up, "Up")
win.onkeypress(barra_Der_down, "Down")
# Game loop
continua = True
while continua:
win.update()
# Movimiento bola
bola.setx(bola.xcor()+bola.dx)
bola.sety(bola.ycor()+bola.dy)
win.listen()
win.onkey(salir, "Escape")
# Canvas Border
if bola.ycor() > 290:
bola.sety(290)
bola.dy *= -1
elif bola.ycor() < -290:
bola.sety(-290)
bola.dy *= -1
elif bola.xcor() > 390:
bola.goto(0, 0)
bola.dx *= -1
score_a += 1
Dscore.clear()
Dscore.write("{}: {} {}: {}"
.format(name1, score_a, name2,
score_b), align="center",
font=("Arial", 24, "normal"))
elif bola.xcor() < -390:
bola.goto(0, 0)
bola.dx *= -1
score_b += 1
Dscore.clear()
Dscore.write("{}: {} {}: {}"
.format(name1, score_a, name2,
score_b), align="center",
font=("Arial", 24, "normal"))
# Colisiones
elif bola.xcor() > 340 and\
(bola.ycor() < barra_Der.ycor() + 40 and
bola.ycor() > barra_Der.ycor() - 40):
bola.setx(340)
bola.dx *= -1
elif bola.xcor() < -340 and\
(bola.ycor() < barra_Izq.ycor() + 40 and
bola.ycor() > barra_Izq.ycor() - 40):
bola.setx(-340)
bola.dx *= -1
elif score_a >= 5:
turtle.clearscreen()
WinA = turtle.Turtle()
WinA.color("black")
WinA.penup()
WinA.hideturtle()
WinA.goto(0, 0)
WinA.write("{} WINS!".format(name1),
align="center", font=("Arial", 24, "normal"))
elif score_b >= 5:
turtle.clearscreen()
WinB = turtle.Turtle()
WinB.color("black")
WinB.penup()
WinB.hideturtle()
WinB.goto(0, 0)
WinB.write("{} WINS!".format(name2),
align="center", font=("Arial", 24, "normal"))
def crea_matriz(name1, score_a, name2, score_b):
matriz = [[name1, score_a], [name2, score_b]]
return matriz
def imprime_matriz(matriz):
for i in range(0, len(matriz)):
for j in range(0, len(matriz[i])):
print(matriz[i][j], end=" ")
print()
def salir():
turtle.bye()
def main():
continua = True
while continua:
name1 = str(input("First player name: "))
name2 = str(input("Second player name: "))
score_a = 0
score_b = 0
m = crea_matriz(name1, score_a, name2, score_b)
imprime_matriz(m)
pong(name1, score_a, name2, score_b)
main()
| true |
840d966c0eb2cd3cbbfd6f39bc008169a0088c92 | Python | WMQ777/CS420-Final-Project | /MLP.py | UTF-8 | 4,666 | 3 | 3 | [] | no_license | """ Multilayer Perceptron.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
# Import MNIST data
mnist_train_data = np.fromfile("mnist_train_data",dtype=np.uint8)
mnist_train_label = np.fromfile("mnist_train_label",dtype=np.uint8)
mnist_test_data = np.fromfile("mnist_test_data",dtype=np.uint8)
mnist_test_label = np.fromfile("mnist_test_label",dtype=np.uint8)
mnist_train_data = mnist_train_data.reshape(60000,45,45)
mnist_train_data = mnist_train_data.astype(np.float32)
mnist_test_data = mnist_test_data.reshape(10000,45,45)
mnist_test_data = mnist_test_data.astype(np.float32)
mnist_train_label = mnist_train_label.astype(np.int32)
mnist_test_label = mnist_test_label.astype(np.int32)
mnist_train_data=mnist_train_data.flatten()
mnist_train_data=mnist_train_data.reshape(60000,45*45)
mnist_test_data=mnist_test_data.flatten()
mnist_test_data=mnist_test_data.reshape(10000,45*45)
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_epochs = 15
training_steps = 10000
batch_size = 100
display_step = 100
# Network Parameters
n_hidden_1 = 516 # 1st layer number of neurons
n_hidden_2 = 516 # 2nd layer number of neurons
n_input = 45*45 # MNIST data input (img shape: 45*45)
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, n_input])
Y = tf.placeholder("float", [None])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
def next_batch(x, y, batch, batch_size):
num=int(x.shape[0]/batch_size)
batch=batch%num
if batch == 0:
index = [i for i in range(0, x.shape[0])]
np.random.shuffle(index)
x=x[index]
y=y[index]
start = batch_size*batch
end = batch_size * (batch+1)
return x[start:end], y[start:end]
# Create model
def multilayer_perceptron(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
# Hidden fully connected layer with 256 neurons
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Construct model
logits = multilayer_perceptron(X)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=tf.cast(Y, dtype=tf.int32)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.cast(Y, dtype=tf.int64))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
step_num=[]
loss_value=[]
accu_value=[]
with tf.Session() as sess:
sess.run(init)
for step in range(1, training_steps + 1):
batch_x, batch_y = next_batch(mnist_train_data, mnist_train_label, step, batch_size)
# Reshape data to get 45 seq of 45 elements
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
step_num.append(step)
loss_value.append(loss)
accu_value.append(acc)
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
print("Optimization Finished!")
# Calculate accuracy for mnist test images
test_data = mnist_test_data
test_label = mnist_test_label
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))
# Plot the loss and accuracy of MLP
fig=plt.figure()
ax1=fig.add_subplot(1,2,1)
ax2=fig.add_subplot(1,2,2)
ax1.plot(step_num, loss_value, 'r')
ax2.plot(step_num, accu_value, 'b')
ax1.set_xlabel("step")
ax1.set_ylabel("loss")
ax2.set_xlabel("step")
ax2.set_ylabel("accuracy")
plt.show() | true |
737889734e6cd0b0e620ba492e28ed7400a20d38 | Python | adm6/lessons | /les9/t1.py | UTF-8 | 203 | 3.46875 | 3 | [] | no_license | import random
fib_list = [0, 1]
while (fib_list[-2] + fib_list[-1]) < 5000:
fib_list.append(fib_list[-1] + fib_list[-2])
for i in range(3):
print(fib_list[random.randint(0, len(fib_list)-1)]) | true |
8ea733c60123f00ff1240176acfb2b9d7723d0e9 | Python | minnonong/Codecademy_Python | /06.PygLatin/06_04.py | UTF-8 | 124 | 3.5625 | 4 | [] | no_license | # 06_04 Check Yourself!
original = raw_input("Input: ")
if len(original) > 0:
print original
else:
print "empty"
| true |
7932ee03410cdded508439d3f3a552a3774d8c8f | Python | BIGY2333/hhf | /SEnet.py | UTF-8 | 3,457 | 2.890625 | 3 | [] | no_license | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def get_data():
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
return mnist
# 设置权重函数
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev = 0.1)
return tf.Variable(initial)
# 设置阈值函数
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# 设置卷积层
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding = "SAME")
# 设置池化层
def pool(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides = [1,2,2,1],padding = "SAME")
def SE_block(x,ratio):
shape = x.get_shape().as_list()
channel_out = shape[3]
# print(shape)
with tf.variable_scope("squeeze_and_excitation"):
# 第一层,全局平均池化层
squeeze = tf.nn.avg_pool(x,[1,shape[1],shape[2],1],[1,shape[1],shape[2],1],padding = "SAME")
# 第二层,全连接层
w_excitation1 = weight_variable([1,1,channel_out,channel_out/ratio])
b_excitation1 = bias_variable([channel_out/ratio])
excitation1 = conv2d(squeeze,w_excitation1) + b_excitation1
excitation1_output = tf.nn.relu(excitation1)
# 第三层,全连接层
w_excitation2 = weight_variable([1, 1, channel_out / ratio, channel_out])
b_excitation2 = bias_variable([channel_out])
excitation2 = conv2d(excitation1_output, w_excitation2) + b_excitation2
excitation2_output = tf.nn.sigmoid(excitation2)
# 第四层,点乘
excitation_output = tf.reshape(excitation2_output,[-1,1,1,channel_out])
h_output = excitation_output * x
return h_output
def graph_build():
x_image = tf.placeholder(tf.float32, (None, 32, 32, 1), name='X')
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h1_conv = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h1_conv = SE_block(h1_conv, 4)
# 配置第一层池化层
h1_pool = pool(h1_conv)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h2_conv = tf.nn.relu(conv2d(h1_pool, W_conv2) + b_conv2)
h2_conv = SE_block(h2_conv, 4)
# 配置第二层池化层
h2_pool = pool(h2_conv)
# 配置全连接层1
W_fc1 = weight_variable([8 * 8 * 64, 256])
b_fc1 = bias_variable([256])
h2_pool_flat = tf.reshape(h2_pool, shape=[-1, 8 * 8 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h2_pool_flat, W_fc1) + b_fc1)
# 配置dropout层
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([256, 10])
b_fc2 = bias_variable([10])
h3_pool_flat = tf.reshape(h_fc1_drop, shape=[-1, 8 * 8 * 64])
h_fc2 = tf.nn.relu(tf.matmul(h3_pool_flat, W_fc2) + b_fc2)
# 配置全连接层2
W_fc3 = weight_variable([10, 1])
b_fc3 = bias_variable([1])
y_predict = tf.nn.softmax(tf.matmul(h_fc2, W_fc3) + b_fc3)
# 接受y_label
y_label = tf.placeholder(tf.float32, [None, 1])
mnist = get_data()
# 交叉熵
cross_entropy = -tf.reduce_sum(y_label * tf.log(y_predict))
# 梯度下降法
train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
# 求准确率
correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) # 精确度计算
| true |
94dcd270edc78a4ff5e298595ae33a4a8bd5f9ae | Python | hungrygeek/RNA_clusterin_584 | /final/.svn/pristine/94/94dcd270edc78a4ff5e298595ae33a4a8bd5f9ae.svn-base | UTF-8 | 1,599 | 2.734375 | 3 | [] | no_license | import numpy as np
import random
#assign seqs to clusters based on distance matrix
def assignClusters(medoids, dMatrix):
disMedoids = dMatrix[:,medoids]
clusters = medoids[np.argmin(disMedoids, axis=1)]
clusters[medoids] = medoids
return clusters
#update the medoid based on the current cluster results
def updateMedoids(kMedoidsCluster, dMatrix):
mask = np.ones(dMatrix.shape)
mask[np.ix_(kMedoidsCluster,kMedoidsCluster)] = 0.
clusterDis = np.ma.masked_array(data=dMatrix, mask=mask, fill_value=10e9)
costs = clusterDis.sum(axis=1)
return costs.argmin(axis=0, fill_value=10e9)
#main method
def kMedoidsCluster(dMatrix, k,maxIter):
numPoints = dMatrix.shape[0]
# Initialize k random medoids.
currMedoids = np.array([-1]*k)
while not len(np.unique(currMedoids)) == k:
currMedoids = np.array([random.randint(0, numPoints - 1) for _ in range(k)])
preMedoids = np.array([-1]*k)
newMedoids = np.array([-1]*k)
# while loop for medoids to updates
counter=1
while not ((preMedoids == currMedoids).all() or counter>maxIter):
# Assign each point to kMedoidsCluster with closest medoid.
clusters = assignClusters(currMedoids, dMatrix)
# Update medoids
for medoid in currMedoids:
kMedoidsCluster = np.where(clusters == medoid)[0]
newMedoids[currMedoids == medoid] = updateMedoids(kMedoidsCluster, dMatrix)
preMedoids[:] = currMedoids[:]
currMedoids[:] = newMedoids[:]
counter=counter+1
return clusters
| true |
d1a83f25b297c5472ab3d4280f853b3ee8fcfd6c | Python | TechInTech/dataStructure | /set_and_dictory/11.10/linkedbst.py | UTF-8 | 6,080 | 3.4375 | 3 | [] | no_license | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2019/2/24 11:26
# @Author : Despicable Me
# @Email :
# @File : linkedbst.py
# @Software: PyCharm
# @Explain :
from bstnode import BSTNode
from abstractcollection import AbstractCollection
from stack.linkedstack import LinkedStack
from list.linkedlist import LinkedList
from queue.linkedqueue import LinkedQueue
from math import log
class LinkedBST(AbstractCollection):
def __init__(self, sourceCollection = None):
self._root = None
AbstractCollection.__init__(self, sourceCollection)
def __contains__(self, item):
return self.find(item) != None
def find(self, item):
"""搜素二叉搜索树"""
def recurse(node):
if node is None:
return None
elif item == node.data:
return node.data
elif item < node.data:
return recurse(node.left)
else:
return recurse(node.right)
return recurse(self._root)
def inorder(self):
"""中序遍历"""
lyst = list()
def recurse(node):
if node != None:
recurse(node.left)
lyst.append(node.data)
recurse(node.right)
recurse(self._root)
return iter(lyst)
def postorder(self):
"""后序遍历"""
lyst = list()
def recurse(node):
if node != None:
recurse(node.left)
recurse(node.right)
lyst.append(node.data)
recurse(self._root)
return iter(lyst)
def levelorder(self):
"""层级遍历"""
def recurse(queue):
if not queue.isEmpty():
node = queue.pop()
BSTlist.add(node.data)
if node.left != None:
queue.add(node.left)
if node.right != None:
queue.add(node.right)
recurse(queue)
BSTlist = LinkedList()
BSTqueue = LinkedQueue()
if not self.isEmpty():
BSTqueue.add(self._root)
recurse(BSTqueue)
return iter(BSTlist)
def preorder(self):
"""前序遍历(效率不高:线性运行时间和线性的内存使用)"""
lyst = list()
def recurse(node):
if node != None:
lyst.append(node.data)
recurse(node.left)
recurse(node.right)
recurse(self._root)
return iter(lyst)
# def __iter__(self):
# """前序遍历"""
# BSTstack = LinkedStack()
# node = self._root
# if node != None:
# BSTstack.push(node)
# while not BSTstack.isEmpty:
# newnode = BSTstack.pop()
# yield newnode.data
# if newnode.right != None:
# BSTstack.push(newnode.right)
# if newnode.left != none:
# BSTstack.push(newnode.left)
def __iter__(self):
"""前序遍历(推荐此方法)"""
if not self.isEmpty():
BSTstack = LinkedStack()
node = self._root
BSTstack.push(node)
while not BSTstack.isEmpty():
newnode = BSTstack.pop()
yield newnode.data
if newnode.right != None:
BSTstack.push(newnode.right)
if newnode.left != None:
BSTstack.push(newnode.left)
def __str__(self):
"""返回将树结构逆时针旋转90度之后的树结构"""
def recurse(node, level):
s = ""
if node != None:
s += recurse(node.right, level + 1)
s += "| " * level
s += str(node.data) + "\n"
s += recurse(node.left, level + 1)
return s
return recurse(self._root, 0)
def add(self, item):
"""在二叉搜索树中插入一项"""
def recurse(node):
if item < node.data:
if node.left == None:
node.left = BSTNode(item)
else:
recurse(node.left)
elif node.right == None:
node.right = BSTNode(item)
else:
recurse(node.right)
# If tree is empty, so new item goes at the root
if self.isEmpty():
self._root = BSTNode(item)
else:
recurse(self._root)
self._size += 1
def remove(self, item):
pass
def replace(self, item, newitem):
"""
If item is in self, replaces it with newItem and
returns True, or returns False."""
node = self._root
while node != None:
if node.data == item:
node.data = newitem
retun True
elif node.data > item:
node = node.right
else:
node = node.left
return False
def clear(self):
self._size = 0
self._root = None
def height(self):
"""Returns the height of the tree (the length of the longest path
from the root to a leaf node).
When len(t) < 2, t.height() == 0."""
def recurse(node):
if node == None:
return 0
else:
return 1 + max(recurse(node.left), recurse(node.right))
h = recurse(self._root)
if not sef.isEmpty():
h -= 1
return h
def isBalance(self):
return self.height() < 2 * log(len(self) + 1, 2) - 1
def rebalance(self):
def rebuild(data, first, last):
if first <= last:
mid = (first + last) // 2
self.add(data[mid])
rebuild(data, first, mid - 1)
rebuild(data, mid + 1, last)
if not self.isBalance():
data = list(self.inorder())
print(data)
self.clear()
rebuild(data, 0, len(data) - 1) | true |
bf7824ec429d1d393c497c65e12427a9e021e9aa | Python | ken437/TOP500-machine-learning | /train_set_select_strats.py | UTF-8 | 2,067 | 2.875 | 3 | [] | no_license | """
training set size selection strategy that always
returns a size of 1
@param test_pos: position of the test set
@return: recommended train set size (in files)
"""
def one_prev(test_pos):
return 1
"""
training set size selection strategy that always
returns a size of 2, unless the test set is
dataset #2, in which case it returns 1
@param test_pos: position of the test set
@return: recommended train set size (in files)
"""
def two_prev(test_pos):
if test_pos == 2:
return 1
else:
return 2
"""
training set size selection strategy that always
returns a size of 3, unless the test set is before
dataset #4, in which case it uses all previous datasets
@param test_pos: position of the test set
@return: recommended train set size (in files)
"""
def three_prev(test_pos):
if test_pos >= 4:
return 3
else:
return test_pos - 1
"""
training set size selection strategy that always
returns a size of 4, unless the test set is before
dataset #5, in which case it uses all previous datasets
@param test_pos: position of the test set
@return: recommended train set size (in files)
"""
def four_prev(test_pos):
if test_pos >= 5:
return 4
else:
return test_pos - 1
"""
training set size selection strategy that
always includes all previous datasets
@param test_pos: position of the test set
@return: recommended train set size (in files)
"""
def all_prev(test_pos):
return test_pos - 1
"""
training set size selection strategy that
includes the most recent half of the previous
datasets
@param test_pos: position of the test set
@return: recommended train set size (in files)
"""
def half_prev(test_pos):
return test_pos // 2
"""
training set size selection strategy that
includes the most recent third of the previous
datasets
@param test_pos: position of the test set
@return: recommended train set size (in files)
"""
def third_prev(test_pos):
if test_pos == 2:
return 1
else:
return test_pos // 3
ALL_TRAIN_SET_SELECT_STRATS = [one_prev, two_prev, three_prev, four_prev, all_prev, half_prev, third_prev]
| true |
db6772a705619e5ca56375bcb9d6409a8a1a0a11 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_136/3261.py | UTF-8 | 307 | 3.453125 | 3 | [] | no_license |
T = int(raw_input())
for i in range(T):
C,F,X = map(float,raw_input().split())
rate = 2
time = 0
while True:
if X/rate < C/rate:
time += X/rate
break
time += C/rate
if X/(rate+F) < (X-C)/rate:
rate+=F
else:
time+=(X-C)/rate
break
print("Case #"+str(i+1)+": "+str(time))
| true |
69f0304d8bd1d85c5528fe6c9d3ddf80b885d8ab | Python | gaaalmeida/trab_benchmark | /bench/benchmark/score.py | UTF-8 | 367 | 3.1875 | 3 | [
"MIT"
] | permissive | def calcScore(t, w):
return (t + (-t/w)) * 100
def getScore(bm_time):
final = []
# Pesos
# Escrever arquivos -> 10
# Processar dados -> 7
# Ler arquivos/RAM -> 3
final.append(calcScore(bm_time[0], 3))
final.append(calcScore(bm_time[1], 7))
final.append(calcScore(bm_time[2], 10))
final.append(sum(final))
return final | true |
493f163e6a518bd68bf91c1e8dddac97ab095574 | Python | smautner/ubergauss | /ubergauss/optimization/blackboxTPE.py | UTF-8 | 3,608 | 2.578125 | 3 | [] | no_license |
import numpy as np
from sklearn.neighbors import KernelDensity
import multiprocessing as mp
from matplotlib.patches import Ellipse
from lmz import *
def mpmap(func, iterable, chunksize=1, poolsize=5):
"""pmap."""
pool = mp.Pool(poolsize)
result = pool.map(func, iterable, chunksize=chunksize)
pool.close()
pool.join()
return list(result)
class TPE:
'''
we should discuss how the space is defined:
space = [(mi,ma),...],{a:(mi,ma)}
'''
def __init__(self,f, space, n_init =10, kde_top = 5):
self.f = f
self.space = space
self.n_init = n_init
self.params = []
self.values = []
self.kde_top = kde_top
def register(self,params:list, values: list):
self.params+=params
self.values+= values
def suggest(self, n= 5):
if self.n_init > len(self.params):
return [self.randomsample() for i in range(n)]
else:
self.TPEfit()
return [self.TPEsample() for i in range(n)]
def randomsample(self):
# returns 1 random parameter set
return [np.random.uniform(a,b) for a,b in self.space[0]],\
{k:np.random.uniform(a,b) for k,(a,b) in self.space[1].items()}
def minimize(self,n_iter=20, n_jobs = 5):
while n_iter > 0:
args = self.suggest(n_jobs)
values = mpmap(self.f,args)
self.register(args, values)
n_iter -= n_jobs
################
# TPE model
##################
def TPEfit(self):
assert len(self.params) > 1, 'do some random guessing first'
#num_v = int(len(self.values)/2)# or self.kde_top
num_v = self.kde_top
goodparams = [self.params[i] for i in np.argsort(self.values)[:num_v] ]
self.tpemodels = [ self._fitkd([l[i] for l,_ in goodparams]) for i in range(len(self.space[0]))]
#exp = [kd.bandwidth for kd in self.tpemodels]
#for l,_ in goodparams: plt.Circle(l,exp[0])
def _fitkd(self, values):
# n**(-1./(d+4)) heuristic to determine bandwith; n= num_sampled, d = dimensions
bandwidth = np.std(values)/5 #len(values)**(-1/5)
print(f"{ bandwidth = }")
kd = KernelDensity(kernel='gaussian', bandwidth = bandwidth)
kd.fit(np.array(values).reshape(-1,1))
return kd
def TPEsample(self):
return [kd.sample()[0,0] for kd in self.tpemodels],{}
################
# BAYES model ..
##################
def Bayesfit(self):
assert len(self.params) > 1, 'do some random guessing first'
self.baymodels = [ self._fitbayes(s,[(l[i],v) for s,v,(l,_) in zip(self.values,self.params)]) for i,s in enumerate(self.space[0])]
def _fitbayes(self, space, xyL):
X,y = Transpose(xyL)
bayes().fit(space, X,y)
def Bayessample(self):
return [bay.sample() for kd in self.baymodels],{}
def myf(args):
x,y = args[0]
return abs(x)+abs(y)
if __name__ == "__main__":
opti = TPE(myf,([(-100,100),(-100,100)],{}),n_init = 10)
n_iter = 30
n_jobs = 5
import matplotlib
matplotlib.use("module://matplotlib-sixel")
import matplotlib.pyplot as plt
while n_iter > 0:
args = opti.suggest(n_jobs)
values = mpmap(opti.f,args)
opti.register(args, values)
tmp = np.array([a for a,b in args])
n_iter -= n_jobs
plt.scatter(tmp[:,0], tmp[:,1])
plt.show()
plt.close()
print(np.mean(values))
#opti.minimize()
print ( [p for p,d in opti.params] )
print (opti.values)
| true |
e155abeccbdd69686b26b3e7f71cda0748299b41 | Python | maciek16180/masters_thesis | /SQuAD/layers/MaskedSoftmaxLayer.py | UTF-8 | 981 | 2.8125 | 3 | [] | no_license | import theano.tensor as T
from lasagne.layers import MergeLayer
class MaskedSoftmaxLayer(MergeLayer):
'''
This layer performs row-wise softmax operation on a 2D tensor.
Mask parameter specifies which parts of incoming tensor are parts
of input, so that rows can contain sequences of different length.
'''
def __init__(self, incoming, mask, **kwargs):
assert len(incoming.output_shape) == 2
assert len(mask.output_shape) == 2
super(MaskedSoftmaxLayer, self).__init__([incoming, mask], **kwargs)
def get_output_for(self, inputs, **kwargs):
assert len(inputs) == 2
input_, mask = inputs
input_max = input_.max(axis=1).dimshuffle(0, 'x')
input_ = T.exp(input_ - input_max) * mask
sums = input_.sum(axis=1).dimshuffle(0, 'x')
return input_ / sums
def get_output_shape_for(self, input_shapes, **kwargs):
assert len(input_shapes) == 2
return input_shapes[0]
| true |
b858787ea196b99bdf4e970883b3d830ed42789b | Python | fireairforce/leetCode-Record | /程序员面试经典/面试题 01.03. URL化.py | UTF-8 | 166 | 3.125 | 3 | [
"MIT"
] | permissive | class Solution:
def replaceSpaces(self, S: str, length: int) -> str:
return S[:length].replace(' ', '%20')
# return '%20'.join(S[:length].split(' ')) | true |
27b835a522ba83537f95aa4cb18a0d167cfa1f5b | Python | Aasthaengg/IBMdataset | /Python_codes/p03242/s807242298.py | UTF-8 | 71 | 3.046875 | 3 | [] | no_license | s = input()
print(s.replace("1","x").replace("9","1").replace("x","9")) | true |
fc7e5de6668fc7a4531574505d77108280f20efe | Python | KobiShashs/Python | /17_OOP/7_my_first_class.py | UTF-8 | 667 | 3.96875 | 4 | [] | no_license | class BankAccount:
def __init__(self):
self.balance = 0
def greet(self, name):
print("Welcome", name)
def deposite(self, amount):
self.balance += amount
def withdraw(self, amount):
self.balance -= amount
def print_blance(self):
print("Current Balance is", self.balance)
def main():
kobis_account = BankAccount()
kobis_account.greet("Kobi")
kobis_account.deposite(400)
kobis_account.print_blance()
rogers_account = BankAccount()
rogers_account.greet("Roger")
rogers_account.deposite(20)
rogers_account.print_blance()
if __name__ == "__main__":
main()
| true |
c6c6b68dab26b520395869232dde3d7a8d969f72 | Python | iyeranush/100daysOfCoding | /014_reverse_array_inplace.py | UTF-8 | 514 | 3.890625 | 4 | [] | no_license | # Time Complexity: O(n/2) = O(n)
# SPace complexity: Constan. Inplace swaping
def swap(a, b):
temp = a
a = b
b = temp
return a, b
def reverse_inplace(arr):
length = len(arr)
for i in range(int(length/2)):
if length-1-i != i:
arr[i], arr[length-1-i] = swap(arr[i], arr[length-1-i])
def main():
arr = [int(item) for item in input().strip().split(' ')]
reverse_inplace(arr)
print(' '.join([str(item) for item in arr]))
if __name__ == '__main__':
main()
| true |
d92dfed6be7619757425cfc516990fa2fe7e0195 | Python | Wanbli83470/P11_ESTIVAL_THOMAS | /RENDU/test_P11_03_update.py | UTF-8 | 1,316 | 3.28125 | 3 | [] | no_license | import unittest #Test tools
import datetime #For get the date
from P11_01_codesource import update #My project
date_test = datetime.datetime.now()
"""standardize the date for the test"""
if date_test.day < 10 and date_test.month < 10:
date_test = f"{date_test.year}-0{date_test.month}-0{date_test.day}"
print(date_test)
elif date_test.month < 10:
date_test = f"{date_test.year}-0{date_test.month}-{date_test.day}"
print(date_test)
elif date_test.day < 10:
date_test = f"{date_test.year}-{date_test.month}-0{date_test.day}"
print(date_test)
else:
date_test = f"{date_test.year}-{date_test.month}-{date_test.day}"
print(date_test)
DATE_TEST = str(date_test)
class WidgetTestCase(unittest.TestCase):
"""Are the updates done? test by comparing the dates of the datetime module
and the date recorded in DB"""
def setUp(self):
"""Starting the update, and retrieving the date"""
print("\n\n---------- TEST DE LA MISE À JOUR BDD ----------\n\n")
self.date_control = update()
def test_date(self):
"""Compare the SQL update date and today's date"""
self.assertEqual(self.date_control, DATE_TEST)
print("\n\n---------- TEST DE LA MISE À JOUR BDD OK ----------\n\n")
if __name__ == '__main__':
unittest.main()
| true |
ccc8a9288566421dcd6f3af50f127318cec6196b | Python | mod-1/networking2 | /Alice.py | UTF-8 | 2,066 | 2.71875 | 3 | [] | no_license | from socket import *
import sys
import zlib
import time
class UDPClient:
def start(self, unreliNetPort, start):
servername = 'localhost'
serverport = unreliNetPort
clientsocket = socket(AF_INET, SOCK_DGRAM)
clientsocket.settimeout(0.05)
seq_no = '0'
message = sys.stdin.read()
size = len(message.encode())
i = 0
while size > 0:
buffer = seq_no + message[i:i+50]
checksum, length = self.gen_checksum(buffer.encode())
buffer = length + checksum + buffer
clientsocket.sendto(buffer.encode(), (servername, serverport))
print('message sent: ' + buffer)
try:
modifiedMessage, addr = clientsocket.recvfrom(1024)
except timeout:
continue
print('response received: '.encode() + modifiedMessage)
if self.parse_response(modifiedMessage.decode(), seq_no):
i = i + 50
size = size - 50
seq_no = self.compliment_seqn(seq_no)
clientsocket.close()
end = time.time()
print(end-start)
def gen_checksum(self, bytes):
checksum = zlib.crc32(bytes)
length = len((str(checksum)).encode())
if length < 10:
length = '0' + str(length)
else:
length = str(length)
return str(checksum), length
def compliment_seqn(self, sqn):
if sqn == '0':
return '1'
return '0'
def parse_response(self, message, sqn):
if message[0:2].isdigit():
length = int(message[0:2])
checksum = message[2: 2 +length]
message_actual = message[2 + length:]
chksm = zlib.crc32(message_actual.encode())
seq_no = message_actual[-1]
if str(chksm) == checksum and seq_no == sqn:
return True
return False
if __name__ == '__main__':
start = time.time()
myclient = UDPClient()
myclient.start(int(sys.argv[1]), start)
| true |
c543e8ae0a14ac7793b845af5007e3b4bfa9dbd9 | Python | cgdilley/AdventOfCode2018 | /day07_1/day07_1.py | UTF-8 | 2,966 | 3.125 | 3 | [] | no_license | import re
REGEX = re.compile(r"Step (.) .* step (.)")
SECONDS = {val: index + 61 for index, val in enumerate("ABCDEFGHIJKLMNOPQRSTUVWXYZ")}
def main():
with open("../input/day7.txt", "r") as f:
lines = [read_instruction(line) for line in f.readlines()]
lines = merge_instructions(lines)
ordered = execute_build(lines, 5)
print("Result: %s" % "".join([n for n in ordered]))
def read_instruction(s):
match = REGEX.match(s)
return {
"self": match.group(2),
"parent": match.group(1),
"children": []
}
def merge_instructions(instructions):
instruction_map = dict()
for inst in instructions:
if inst["self"] not in instruction_map:
instruction_map[inst["self"]] = {
"parents": [inst["parent"]], "self": inst["self"], "children": []
}
else:
instruction_map[inst["self"]]["parents"].append(inst["parent"])
if inst["parent"] not in instruction_map:
instruction_map[inst["parent"]] = {
"parents": [], "self": inst["parent"], "children": [inst["self"]]
}
else:
instruction_map[inst["parent"]]["children"].append(inst["self"])
return instruction_map
def execute_build(instructions, worker_count):
workers = [[] for _ in range(worker_count)]
time = 0
available = set(instructions.keys())
completed = []
while len(completed) < len(instructions):
while True:
top_choice = get_next_choice(instructions, available, completed)
if top_choice is None:
break
queued = queue_instruction(workers, top_choice["self"], time)
if queued:
available.remove(top_choice["self"])
else:
break
time += 1
for worker in workers:
if len(worker) == time and worker[-1] != "-":
completed.append(worker[-1])
duration = max([len(worker) for worker in workers])
print("Total duration = %d" % duration)
# for i in range(duration):
# s = ""
# for worker in workers:
# s += "-" if len(worker) <= i else worker[i]
# print(s)
return completed
def queue_instruction(workers, next_instruction, time):
for worker in workers:
if len(worker) <= time:
worker += ['-'] * (time - len(worker))
worker += [next_instruction] * SECONDS[next_instruction]
return True
return False
def get_next_choice(instructions, remaining, seen):
top_choice = None
for r in remaining:
inst = instructions[r]
available = True
for parent in inst["parents"]:
if parent not in seen:
available = False
break
if available and (top_choice is None or inst["self"] < top_choice["self"]):
top_choice = inst
return top_choice
main()
| true |
b66f19d654bee9eb71f63ae78ed2d625e1bbda73 | Python | OwnDie/python_ex | /ex15_2.py | UTF-8 | 347 | 2.546875 | 3 | [] | no_license | import re
def parse_sh_ip_int_br(filen_name):
regex = '(\S+) +([\d.]+|unassigned) +\w+ +\w+ +(up|down|administratively down) +(up|down)'
with open(filen_name, 'r') as f:
file = f.read()
result = [match.groups() for match in re.finditer(regex, file)]
return result
if __name__ == '__main__':
print(parse_sh_ip_int_br('sh_ip_int_br.txt')) | true |
6daf9975c020d14dcf20c449cbc2349cdc6c673d | Python | infcnwangjie/opencv | /宝贵的测试经验/grow_test.py | UTF-8 | 2,907 | 2.875 | 3 | [] | no_license | import cv2
import numpy as np
import matplotlib.pyplot as plt
#初始种子选择
def originalSeed(gray, th):
ret, thresh = cv2.threshold(gray, th, 255, cv2.THRESH_BINARY)#二值图,种子区域(不同划分可获得不同种子)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))#3×3结构元
thresh_copy = thresh.copy() #复制thresh_A到thresh_copy
thresh_B = np.zeros(gray.shape, np.uint8) #thresh_B大小与A相同,像素值为0
seeds = [] #为了记录种子坐标
#循环,直到thresh_copy中的像素值全部为0
while thresh_copy.any():
Xa_copy, Ya_copy = np.where(thresh_copy > 0) #thresh_A_copy中值为255的像素的坐标
thresh_B[Xa_copy[0], Ya_copy[0]] = 255 #选取第一个点,并将thresh_B中对应像素值改为255
#连通分量算法,先对thresh_B进行膨胀,再和thresh执行and操作(取交集)
for i in range(200):
dilation_B = cv2.dilate(thresh_B, kernel, iterations=1)
thresh_B = cv2.bitwise_and(thresh, dilation_B)
#取thresh_B值为255的像素坐标,并将thresh_copy中对应坐标像素值变为0
Xb, Yb = np.where(thresh_B > 0)
thresh_copy[Xb, Yb] = 0
#循环,在thresh_B中只有一个像素点时停止
while str(thresh_B.tolist()).count("255") > 1:
thresh_B = cv2.erode(thresh_B, kernel, iterations=1) #腐蚀操作
X_seed, Y_seed = np.where(thresh_B > 0) #取处种子坐标
if X_seed.size > 0 and Y_seed.size > 0:
seeds.append((X_seed[0], Y_seed[0]))#将种子坐标写入seeds
thresh_B[Xb, Yb] = 0 #将thresh_B像素值置零
return seeds
#区域生长
def regionGrow(gray, seeds, thresh, p):
seedMark = np.zeros(gray.shape)
#八邻域
if p == 8:
connection = [(-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1)]
elif p == 4:
connection = [(-1, 0), (0, 1), (1, 0), (0, -1)]
#seeds内无元素时候生长停止
while len(seeds) != 0:
#栈顶元素出栈
pt = seeds.pop(0)
for i in range(p):
tmpX = pt[0] + connection[i][0]
tmpY = pt[1] + connection[i][1]
#检测边界点
if tmpX < 0 or tmpY < 0 or tmpX >= gray.shape[0] or tmpY >= gray.shape[1]:
continue
if abs(int(gray[tmpX, tmpY]) - int(gray[pt])) < thresh and seedMark[tmpX, tmpY] == 0:
seedMark[tmpX, tmpY] = 255
seeds.append((tmpX, tmpY))
return seedMark
path = "_rg.jpg"
img = cv2.imread(path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#hist = cv2.calcHist([gray], [0], None, [256], [0,256])#直方图
seeds = originalSeed(gray, th=253)
seedMark = regionGrow(gray, seeds, thresh=3, p=8)
#plt.plot(hist)
#plt.xlim([0, 256])
#plt.show()
cv2.imshow("seedMark", seedMark)
cv2.waitKey(0) | true |
e630af364a80ca759cc4ba11b72bd77788261275 | Python | gschen/sctu-ds-2020 | /1906101013-代恒/day0226.py/4.py | UTF-8 | 251 | 2.9375 | 3 | [] | no_license | l1=[1,2,3,4]
l2=[]
for a in l1:
for b in l1:
for c in l1:
if a!=b and b!=c and a!=c:
d=a*100+b*10+c
l2.append(d)
s=len(l2)
print(l2)
print("可以组成%d个无重复三位数"%s) | true |