blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bcc19f7a10ba031740bbf674176f4414eaa094a1 | Python | jsbed/Design-III-Robot | /Robot/locators/contour/contours_finder.py | UTF-8 | 1,145 | 2.75 | 3 | [] | no_license | import cv2
import numpy
def find_extracted_shape_contour(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
contours = cv2.findContours(gray, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)[1]
return contours
def find_cube_corners_contours(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
contours = cv2.findContours(gray, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)[1]
return _extract_corners_from_contours(contours)
def get_central_pixel_from_contour(contour):
moments = cv2.moments(contour)
centroid_x = int(moments['m10'] / moments['m00'])
centroid_y = int(moments['m01'] / moments['m00'])
return centroid_x, centroid_y
def _extract_corners_from_contours(contours):
biggest = None
max_area = 0
for i in contours:
area = cv2.contourArea(i)
if area > 100:
peri = cv2.arcLength(i, True)
approx = cv2.approxPolyDP(i, 0.02 * peri, True)
if area > max_area and len(approx) < 8:
biggest = approx
max_area = area
return numpy.squeeze(biggest)
| true |
58ef1d126b08ec8ff06432e444b111e3b564cfe3 | Python | ArnabBasak/PythonRepository | /python programs/python alphabetic order.py | UTF-8 | 144 | 3.8125 | 4 | [] | no_license | # python program for alphabetic order
my_string = input("enter any string")
words = my_string.split()
words.sort()
for i in words:
print(i) | true |
f6cbc2147e794e612c6242684dddecaf56b07b17 | Python | sestevez/HLT-Opt | /hltopt/utils.py | UTF-8 | 1,044 | 3.9375 | 4 | [
"MIT"
] | permissive | def pow2value(number):
"""Devuelve una lista de potencia de 2 hasta el número pasado"""
n = 1
while n<number:
n = 2*n
yield n
def pow2(exp):
"""Devuelve una lista de potencia de 2 con el exponente pasado"""
n = 1
e = 1
while e<=exp:
n = 2*n
e += 1
yield n
def convert_BtoI(vector):
"""Convierte un vector booleano a un vector de ceros y unos"""
result = []
for i in vector:
if i == True:
result.append(1)
else:
result.append(0)
return result
def convert_ItoB(vector):
"""Convierte un vector de ceros y unos a un vector booleano"""
result = []
for i in vector:
if i == 1:
result.append(True)
else:
result.append(False)
return result
def index(vector, count):
value = 0
newvector = convert_BtoI(vector)
l = pow2(len(vector))
l = list(l)
for i in range(len(vector)):
value += newvector[i]*(1.0/l[i])
return int(value * count)
| true |
88d98bde1d93fbd7d2201959355f1fa9f914a28a | Python | harshitpoddar09/InterviewBit-Solutions | /Programming/Math/Base Conversion/Excel Column Title.py | UTF-8 | 245 | 3.3125 | 3 | [] | no_license | class Solution:
# @param A : integer
# @return a strings
def convertToTitle(self, A):
ans=''
while A:
A=A-1
i=A%26
ans=chr(i+65)+ans
A=A//26
return ans | true |
f62641462600e9f84ad2307c1013c2dd80f7ae5f | Python | hangelwen/Bayesian-Optimization-with-Gaussian-Processes | /examples/visualization.py | UTF-8 | 9,190 | 2.84375 | 3 | [] | no_license | # Python 2.7 users.
from __future__ import print_function
from __future__ import division
import numpy
import matplotlib.pyplot as plt
from math import log, fabs, sqrt, exp
from bayes_opt.bo.bayes_opt import bayes_opt
from bayes_opt.bo.GP import GP
from bayes_opt.support.objects import acquisition
# ------------------------------ // ------------------------------ // ------------------------------ #
# ------------------------------ // ------------------------------ // ------------------------------ #
def my_function1(x):
return 2.5*exp(-(x - 2)**2) + 5*exp(-(x - 5)**2)+\
3*exp(-(x/2 - 4)**2)+ 8*exp(-2*(x - 11)**2)
def my_function2(x):
return exp(-(x - 2)**2) + 5*exp(-(x - 5)**2) +\
3*exp(-(x/2 - 4)**2) + \
8*exp(-10*(x - 0.1)**2) - exp(-2*(x - 9)**2)
# ------------------------------ // ------------------------------ // ------------------------------ #
# ------------------------------ // ------------------------------ // ------------------------------ #
def show_functions(grid, log_grid):
data1 = numpy.asarray([my_function1(x) for x in grid])
data2 = numpy.asarray([my_function2(x) for x in numpy.arange(0.01,13,0.01)])
ax1 = plt.subplot(1, 1, 1)
ax1.grid(True, color='k', linestyle='--', linewidth=1, alpha = 0.5)
p1, = ax1.plot(grid, data1)
plt.show()
ax2 = plt.subplot(2, 1, 1)
ax2.grid(True, color='k', linestyle='--', linewidth=1, alpha = 0.5)
p2, = ax2.plot(grid, data2)
ax3 = plt.subplot(2, 1, 2)
ax3.grid(True, color='k', linestyle='--', linewidth=1, alpha = 0.5)
p3, = ax3.plot(log_grid, data2)
plt.show()
# ------------------------------ // ------------------------------ // ------------------------------ #
def gp1(grid):
x = numpy.asarray([3,6,8,10]).reshape((4, 1))
y = numpy.asarray([my_function1(x[0]) for x in x])
gp = GP(kernel = 'squared_exp', theta = 1, l = 1)
gp.fit(x, y)
mean, var = gp.fast_predict(grid.reshape((len(grid), 1)))
# ------------------------------ // ------------------------------ #
ax1 = plt.subplot(2, 1, 1)
ax1.grid(True, color='k', linestyle='--', linewidth= 0.8, alpha = 0.4)
p1, = ax1.plot(x, y, 'b-', marker='o', color = 'k')
p2, = ax1.plot(grid, [(mean[i] + 2*sqrt(fabs(var[i]))) for i in range(len(mean))])
p3, = ax1.plot(grid, [(mean[i] - 2*sqrt(fabs(var[i]))) for i in range(len(mean))])
p4, = ax1.plot(grid, numpy.asarray([my_function1(x) for x in grid]), 'm--')
p5, = ax1.plot(grid, [mean[i] for i in range(len(mean))], 'y')
p1.set_linestyle(' ')
ax1.legend([p1, p2, p3, p4, p5],\
['Data','Upper 95% bound','Lower 95% bound', 'True function', 'Predicted mean'], loc = 2)
# ------------------------------ // ------------------------------ #
x = numpy.arange(0, 14, 1).reshape((14, 1))
y = numpy.asarray([my_function1(x[0]) for x in x])
gp = GP(kernel = 'squared_exp', theta = 0.5, l = .9)
gp.fit(x, y)
mean, var = gp.fast_predict(grid.reshape((len(grid), 1)))
# ------------------------------ // ------------------------------ #
ax2 = plt.subplot(2, 1, 2)
ax2.grid(True, color='k', linestyle='--', linewidth=.8, alpha = 0.4)
p12, = ax2.plot(x, y, 'b-', marker='o', color = 'k')
p22, = ax2.plot(grid, [(mean[i] + 2*sqrt(fabs(var[i]))) for i in range(len(mean))])
p32, = ax2.plot(grid, [(mean[i] - 2*sqrt(fabs(var[i]))) for i in range(len(mean))])
p42, = ax2.plot(grid, numpy.asarray([my_function1(x) for x in grid]), 'm--')
p52, = ax2.plot(grid, [mean[i] for i in range(len(mean))], 'y')
p12.set_linestyle(' ')
ax2.legend([p12, p22, p32, p42, p52],\
['Data','Upper 95% bound','Lower 95% bound', 'True function', 'Predicted mean'], loc = 2)
plt.show()
# ------------------------------ // ------------------------------ // ------------------------------ #
def find_max(grid):
bo = bayes_opt(my_function1, {'x' : (0, 13)})
ymax, xmax, y, x = bo.maximize(init_points = 5, full_out = True)
ax = plt.subplot(1,1,1)
ax.grid(True, color='k', linestyle='--', linewidth=.8, alpha = 0.4)
p1, = ax.plot(x, y, 'b-', marker='o', color = 'k')
p1.set_linestyle(' ')
p2, = ax.plot(grid, numpy.asarray([my_function1(x) for x in grid]), 'm-')
ax.legend([p1, p2],\
['Sampled points','Target function'], loc = 2)
plt.show()
return x
# ------------------------------ // ------------------------------ // ------------------------------ #
def gp2(grid, sampled_x):
x = sampled_x
y = numpy.asarray([my_function1(x) for x in x])
gp = GP(kernel = 'squared_exp')
gp.best_fit(x, y)
mean, var = gp.fast_predict(grid.reshape((len(grid), 1)))
ymax = y.max()
ac = acquisition()
ucb = ac.full_UCB(mean, var)
ei = ac.full_EI(ymax, mean, var)
poi = ac.full_PoI(ymax, mean, var)
# ------------------------------ // ------------------------------ #
ax1 = plt.subplot(2, 1, 1)
ax1.grid(True, color='k', linestyle='--', linewidth= 0.8, alpha = 0.4)
p1, = ax1.plot(x, y, 'b-', marker='o', color = 'k')
p2, = ax1.plot(grid, [(mean[i] + 2*sqrt(fabs(var[i]))) for i in range(len(mean))])
p3, = ax1.plot(grid, [(mean[i] - 2*sqrt(fabs(var[i]))) for i in range(len(mean))])
p4, = ax1.plot(grid, numpy.asarray([my_function1(x) for x in grid]), 'm--')
p5, = ax1.plot(grid, [mean[i] for i in range(len(mean))], 'y')
p1.set_linestyle(' ')
ax1.legend([p1, p2, p3, p4, p5],\
['Data','Upper 95% bound','Lower 95% bound', 'True function', 'Predicted mean'], loc = 3)
ax2 = plt.subplot(2,1,2)
ax2.grid(True, color='k', linestyle='--', linewidth= 0.8, alpha = 0.4)
p21, = ax2.plot(grid, ucb/ucb.max(), 'r')
p22, = ax2.plot(grid, ei/(ei.max() + 1e-6), 'orange')
p23, = ax2.plot(grid, poi, 'green')
ax2.legend([p21, p22, p23], ['Upper Confidence Bound', 'Expected Improvement', 'Probability of Improvement'], loc = 3)
plt.show()
# ------------------------------ // ------------------------------ // ------------------------------ #
def find_max_log(grid, log_grid):
bo = bayes_opt(my_function2, {'x' : (0.01, 13)})
ymax, xmax, y, x = bo.log_maximize(init_points = 5, full_out = True)
ax = plt.subplot(1,1,1)
ax.grid(True, color='k', linestyle='--', linewidth=.8, alpha = 0.4)
p1, = ax.plot(numpy.log10(x/0.01) / log(13/0.01, 10), y, 'b-', marker='o', color = 'k')
p1.set_linestyle(' ')
p2, = ax.plot(log_grid, numpy.asarray([my_function2(x) for x in grid]), 'm-')
ax.legend([p1, p2],\
['Sampled points','Target function'], loc = 2)
plt.show()
return x
# ------------------------------ // ------------------------------ // ------------------------------ #
def gp3_log(grid, log_grid, sampled_x):
'''This is broken, something wrong with the GP and plots, fix it!'''
x = sampled_x
y = numpy.asarray([my_function2(x) for x in x])#numpy.asarray([my_function2(0.01 * (10 ** (x * log(13/0.01, 10)))) for x in x])
gp = GP(kernel = 'squared_exp')
gp.best_fit(x, y)
mean, var = gp.fast_predict(grid.reshape((len(log_grid), 1)))
ymax = y.max()
ac = acquisition()
ucb = ac.full_UCB(mean, var)
ei = ac.full_EI(ymax, mean, var)
poi = ac.full_PoI(ymax, mean, var)
# ------------------------------ // ------------------------------ #
ax1 = plt.subplot(2, 1, 1)
ax1.grid(True, color='k', linestyle='--', linewidth= 0.8, alpha = 0.4)
p1, = ax1.plot(numpy.log10(x/0.01) / log(13/0.01, 10), y, 'b-', marker='o', color = 'k')
p2, = ax1.plot(log_grid, [(mean[i] + 2*sqrt(fabs(var[i]))) for i in range(len(mean))])
p3, = ax1.plot(log_grid, [(mean[i] - 2*sqrt(fabs(var[i]))) for i in range(len(mean))])
p4, = ax1.plot(log_grid, numpy.asarray([my_function2(x) for x in grid]), 'm--')
p5, = ax1.plot(log_grid, [mean[i] for i in range(len(mean))], 'y')
p1.set_linestyle(' ')
ax1.legend([p1, p2, p3, p4, p5],\
['Data','Upper 95% bound','Lower 95% bound', 'True function', 'Predicted mean'], loc = 3)
ax2 = plt.subplot(2,1,2)
ax2.grid(True, color='k', linestyle='--', linewidth= 0.8, alpha = 0.4)
p21, = ax2.plot(log_grid, ucb/ucb.max(), 'r')
p22, = ax2.plot(log_grid, ei/(ei.max() + 1e-6), 'orange')
p23, = ax2.plot(log_grid, poi, 'green')
ax2.legend([p21, p22, p23], ['Upper Confidence Bound', 'Expected Improvement', 'Probability of Improvement'], loc = 3)
plt.show()
# ------------------------------ // ------------------------------ // ------------------------------ #
# ------------------------------ // ------------------------------ // ------------------------------ #
if __name__ == "__main__":
grid = numpy.arange(0.01,13,0.01)
log_grid = numpy.log10(numpy.arange(0.01,13,0.01)/0.01)/log(13/0.01, 10)
# ------------------------------ // ------------------------------ #
show_functions(grid, log_grid)
gp1(grid)
sampled_x = find_max(grid)
gp2(grid, sampled_x)
sampled_x_log = find_max_log(grid, log_grid)
gp3_log(grid, log_grid, sampled_x_log)
| true |
e51ea2b3da54ea5f978433febe002f116c6ee290 | Python | markgreene74/bitesofpy | /bytes/129/exercise_129.py | UTF-8 | 1,920 | 3.3125 | 3 | [] | no_license | import requests
from collections import defaultdict
STOCK_DATA = "https://bit.ly/2MzKAQg"
# pre-work: load JSON data into program
with requests.Session() as s:
data = s.get(STOCK_DATA).json()
# your turn:
def _cap_str_to_mln_float(cap):
"""If cap = 'n/a' return 0, else:
- strip off leading '$',
- if 'M' in cap value, strip it off and return value as float,
- if 'B', strip it off and multiple by 1,000 and return
value as float"""
if cap == "n/a":
return 0
elif "M" in cap:
return float(cap.strip("$").strip("M"))
elif "B" in cap:
return float(cap.strip("$").strip("B")) * 1000
def get_industry_cap(industry):
"""Return the sum of all cap values for given industry, use
the _cap_str_to_mln_float to parse the cap values,
return a float with 2 digit precision"""
# total = float(0)
# for i in data:
# if i['industry'] == industry:
# total += _cap_str_to_mln_float(i['cap'])
# return round(total, 2)
return round(
sum(
[_cap_str_to_mln_float(i["cap"]) for i in data if i["industry"] == industry]
),
2,
)
def get_stock_symbol_with_highest_cap():
"""Return the stock symbol (e.g. PACD) with the highest cap, use
the _cap_str_to_mln_float to parse the cap values"""
d = defaultdict(float)
for i in data:
d[i["symbol"]] += _cap_str_to_mln_float(i["cap"])
return sorted(d.items(), key=lambda x: x[1])[-1][0]
def get_sectors_with_max_and_min_stocks():
"""Return a tuple of the sectors with most and least stocks,
discard n/a"""
d = defaultdict(float)
for i in data:
if i["cap"] != "n/a":
d[i["sector"]] += _cap_str_to_mln_float(i["cap"])
d_sorted = sorted(d.items(), key=lambda x: x[1])
return (d_sorted[-1][0], d_sorted[0][0])
| true |
8833632354eef0f3f4eee1f256abf4f16afac8c7 | Python | forswj/TestPython | /forspider/do_five.py | UTF-8 | 1,258 | 3.109375 | 3 | [] | no_license | # coding:utf-8
'''
Created on 2016年10月14日
@author: Administrator
'''
import requests
import os
import re
#目录
def do_mkdir(dirth):
if not os.path.isdir(dirth):
os.mkdir(dirth)
print("不存在该目录")
else:
print("已存在该目录,可存储图片")
#获取url
def get_html(url):
html = requests.get(url).text
return html
#爬取页面图片
def get_image(html, dirth):
index = 1
urls = re.findall(r' src="(.*?\.jpg)"', html)
for url in urls:
print("下载中:", url)
try:
res = requests.get(url)
#判断报错
if str(res.status_code)[0] == "4":
print("下载失败:", url)
continue
except Exception as e:
print("下载失败,", url)
filename = os.path.join(dirth, str(index) +".jpg")
with open(filename, 'wb') as f:
f.write(res.content)
index += 1
print("下载结束,共下载%s张图片" %index)
if __name__ == '__main__':
dirth = r'F:\222222'
do_mkdir(dirth)
url = r'http://www.hm5988.com'
html = get_html(url)
get_image(html, dirth) | true |
98013fc523203ebbb1857c8cbd1ea529d25dc253 | Python | jiaxinr/ProjectOfDataScience | /其他代码/tf-idf/keywords-tfidf.py | UTF-8 | 6,470 | 3 | 3 | [] | no_license | import datetime
import math
import os
import sys
import jieba
import re
def segment(sentence, cut_all=True):
sentence = re.sub('[a-zA-Z0-9]', '', sentence.replace('\n', '')) # 过滤
sentence = sentence.replace('用户:', '')
sentence = sentence.replace('点赞数:', '')
return jieba.lcut(sentence, cut_all=cut_all) # 分词
def getStopWords(path): # 获取停用词表
swlist = []
with open(path, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip()
swlist.append(line)
return swlist
def calTFIDF(inputdir): # 根据语料库目录计算每一个词的词频和TF-IDF
documents = MyDocuments(inputdir)
stopwords = getStopWords(path='my_stopwords.txt') # 获取停用词表
# 排除中文标点符号
ignored = stopwords + ['', ' ', '', '。', ':', ',', ')', '(', '!', '?', '”', '“', '!', '?', '[', ']', '][', '.', '~', '·', '\"', ',']
id_freq = {} # 统计一个词的频数
txt_freq = {} # 统计一个词是否出现在这个文档里
isInFile = {} # 用来标记这个词是否出现在文件中
i = 0 # 总文档数
for doc in documents:
# 每次进入一个新文档,isInFile这个标记就要全部置为false
for key in isInFile:
if isInFile[key]: #如果不是false的话就置为false
isInFile[key] = False
doc = (x for x in doc if x not in ignored)
for x in doc: # 统计每个词的词频
if not (isInFile.get(x, False)): #如果这个词是在所有文档中第一次出现,把他加入进去并默认置为false;如果这个词在这个文档中是第一次出现,把他加进去并默认置为false
isInFile[x] = True
txt_freq[x] = txt_freq.get(x, 0) + 1 # 如果出现在某个文档中这个词在文档中的出现数目+1
id_freq[x] = id_freq.get(x, 0) + 1
if i % 1000 == 0: # 每隔1000篇输出状态
print('Documents processed: ', i, ', time: ',
datetime.datetime.now())
i += 1
# 计算逆文档频率并且存储
outputfile = "IDF.txt"
with open(outputfile, 'w', encoding='utf-8') as f:
total = sum(id_freq.values()) # 所有词的总value数,也就是所有词的总词数
for key, value in id_freq.items():
# TF-IDF的log是以二为底
tf = value / total # tf是对每一个词词频的归一化
idf = math.log(i / (txt_freq.get(key, 0) + 1), 2) # 注意要在分母上加一个1以避免0的情况
f.write(key + ' ' + str(value) + ' ' + str(tf) + ' ' + str(idf) + ' ' + str(tf * idf) + '\n')
def printTopK(tfidf, select_words, topK):
for i in range(0, topK):
word = select_words[i]
freq = tfidf.freq[word]
tf_freq = tfidf.tf_freq[word]
idf_freq = tfidf.idf_freq[word]
tfidf_freq = tfidf.tfidf_freq[word]
print(word + " " + "Freq = " + str(freq) + " " + "TF = " + str(tf_freq) + " "
+ "IDF = " + str(idf_freq) + " " + "TF-IDF = " + str(tfidf_freq))
class MyDocuments(object): # 实现高效读取文本并且进行分词
def __init__(self, dirname):
self.dirname = dirname
if not os.path.isdir(dirname):
print(dirname, '- not a directory!')
sys.exit()
def __iter__(self):
for dirfile in os.walk(self.dirname):
for fname in dirfile[2]:
try:
text = open(os.path.join(dirfile[0], fname),
'r', encoding='utf-8').read()
yield segment(text)
except UnicodeDecodeError as e:
pass
class TFIDFLoader(object):
def __init__(self, idf_path):
self.idf_path = idf_path
self.freq = {} # 词频
self.tf_freq = {} # tf
self.mean_tf = 0.0 # tf均值
self.idf_freq = {} # idf
self.mean_idf = 0.0 # idf均值
self.tfidf_freq = {} # tfidf
self.load_idf()
def load_idf(self): # 从文件中载入idf,对这个idf文件的每一行(词语和idf值的一一对应)输入到一个字典中
# 这个字典就是self.idf_freq这个对象
cnt = 0
with open(self.idf_path, 'r', encoding='utf-8') as f:
for line in f:
try:
word, freq, tf, idf, tfidf = line.strip().split(' ')
cnt += 1
except Exception as e:
pass
self.freq[word] = int(freq)
self.tf_freq[word] = float(tf)
self.idf_freq[word] = float(idf)
self.tfidf_freq[word] = float(tfidf)
print('Vocabularies loaded: %d' % cnt)
# self.mean_idf = sum(self.idf_freq.values()) / cnt
class TFIDF(object):
def __init__(self, idf_path):
# 分别获取Loader的每个属性
self.idf_loader = TFIDFLoader(idf_path)
self.freq = self.idf_loader.freq
self.tf_freq = self.idf_loader.tf_freq
self.idf_freq = self.idf_loader.idf_freq
self.tfidf_freq = self.idf_loader.tfidf_freq
# self.mean_idf = self.idf_loader.mean_idf
def extract_keywordsInSentence(self, sentence, topK=30):
# 分词
seg_list = segment(sentence)
freq = {}
for w in seg_list:
freq[w] = freq.get(w, 0.0) + 1.0 # 统计词频
if '' in freq:
del freq['']
total = sum(freq.values()) # 总词数
for k in freq: # 计算 TF-IDF
freq[k] *= self.idf_freq.get(k) / total
tags = sorted(freq, key=freq.__getitem__, reverse=True) # 排序,reverse=true标志着是降序排序
if topK: # 返回topK
return tags[:topK]
else:
return tags
def extract_keywordsInCorpus(self, topK=20):
tags = sorted(self.tfidf_freq, key=self.tfidf_freq.__getitem__, reverse=True)
return tags[:topK], topK
if __name__ == '__main__':
inputdir = "D:\\NJU\大二上\数据科学基础\大作业\Coding\评论数据\微博评论分时段\微博评论分时段\评论至122"
calTFIDF(inputdir)
idffile = "IDF.txt"
tfidf = TFIDF(idffile)
select_words, topK = tfidf.extract_keywordsInCorpus() # 获得的topK个关键词
printTopK(tfidf, select_words, topK=10)
| true |
73a268c49c314a29ce7bf759bb1e729346584c4a | Python | Pandinosaurus/arsenal | /arsenal/maths/stats/permutation_test.py | UTF-8 | 2,980 | 3.390625 | 3 | [] | no_license | import numpy as np
def mc_perm_test(xs, ys, samples=10000, statistic=np.mean):
def effect(xs,ys): return np.abs(statistic(xs) - statistic(ys))
n, k = len(xs), 0.0
diff = np.abs(np.mean(xs) - np.mean(ys))
zs = np.concatenate([xs, ys])
for _ in range(samples):
np.random.shuffle(zs)
k += diff <= effect(zs[:n], zs[n:])
return k / samples
def mc_paired_perm_test(xs, ys, samples=10000, statistic=np.mean):
"""
Paired permutation test
>>> xs = np.array([1,2,3,4,5,6])
>>> ys = np.array([2,3,4,5,6,7])
>>> u = mc_perm_test(xs, ys, 1000)
>>> p = mc_paired_perm_test(xs, ys, 1000)
Under the unpaired test, we do not have a significant difference because the
systems only differ in two positions.
>>> assert u > .40
Under the Paired test, we have more power!
>>> assert p < .05
"""
def effect(xs,ys): return np.abs(statistic(xs) - statistic(ys))
assert len(xs) == len(ys)
n, k = len(xs), 0
diff = effect(xs,ys) # observed difference
for _ in range(samples): # for each random sample
swaps = np.random.randint(0,2,n).astype(bool) # flip n coins
k += diff <= effect(np.select([swaps,~swaps],[xs,ys]), # swap elements accordingly
np.select([~swaps,swaps],[xs,ys]))
return k / samples # fraction of random samples that achieved at least the observed difference
from itertools import product
def bf_paired_perm_test(xs, ys, statistic=np.mean):
def effect(xs,ys): return np.abs(statistic(xs) - statistic(ys))
assert len(xs) == len(ys)
observed = effect(xs, ys)
p = 0.0; n = len(xs)
for swaps in product(*([0,1] for _ in range(n))):
swaps = np.array(swaps, dtype=bool)
pe = 2**-n
E = effect(np.select([swaps,~swaps],[xs,ys]), # swap elements accordingly
np.select([~swaps,swaps],[xs,ys]))
p += pe * (E >= observed)
return p
def verbose_paired_perm_test(xs, ys, nmc=10_000, threshold=0.05, fmt='%.4f'):
"Let xs be the system you want be greater."
from arsenal import colors
p = mc_paired_perm_test(xs, ys, nmc)
mx = np.mean(xs)
my = np.mean(ys)
if p <= threshold:
if mx > my:
c = colors.green
d = '>'
else:
c = colors.red
d = '<'
else:
c = colors.yellow
d = '~'
#print('brute-force', bf_paired_perm_test(xs, ys))
print('[paired perm] %s (p=%s)' % (c % 'X (%s) %s Y (%s)' % (fmt % mx,
d,
fmt % my),
fmt % p))
return p
if __name__ == '__main__':
def test():
xs = np.array([1,2,3,4,5,6])
verbose_paired_perm_test(xs+1, xs)
verbose_paired_perm_test(xs, xs+1)
verbose_paired_perm_test(xs, xs)
test()
| true |
df63444dc4c919df68ae0715fef3d6a3592251a4 | Python | Mzleesir/lewin | /day13_面向对象/day1_面向对象_02属性.py | UTF-8 | 322 | 3.421875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2021/2/10 8:11 下午
# @Author : Lewin
# @FileName: day1_面向对象_02属性.py
# @Software: PyCharm
# 类和对象的特征数据称之为属性
class Point:
name = "点" # 定义一个类属性
"""
表达平面坐标系里的一个点
"""
print(Point.name)
| true |
bf87455a4696e7de5fb0a1ad18a0cb20ef182f9f | Python | SKO7OPENDRA/gb-algorithm | /hw/hw_4/hw_4_1_v3.py | UTF-8 | 583 | 2.8125 | 3 | [] | no_license | import cProfile
import timeit
from random import random
def m_array():
N = 15
arr = [] * N
for i in range(N):
arr.append(int(random() * 100))
print(arr[i], end=' ')
print()
imx = arr.index(max(arr))
imn = arr.index(min(arr))
min_m_array = min(arr)
max_m_array = max(arr)
arr[imn] = max_m_array
arr[imx] = min_m_array
print('arr[%d]=%d arr[%d]=%d' % (imn + 1, arr[imn], imx + 1, arr[imx]))
return arr
print(*m_array())
# python -m timeit -n 1000 -s "import hw_4_1_v3"
# 1000 loops, best of 5: 9.3 nsec per loop
| true |
0eed2ee16cd334a8719a59f861e702b74f20e3a0 | Python | inverseTrig/leet_code | /1537_get_the_maximum_score.py | UTF-8 | 848 | 3.21875 | 3 | [] | no_license | from typing import List
class Solution:
def maxSum(self, nums1: List[int], nums2: List[int]) -> int:
intersection = list(set(nums1).intersection(nums2))
intersection.sort()
intersection = [0] + intersection + [0]
max_score = 0
for i in range(1, len(intersection)):
left, right = intersection[i - 1], intersection[i]
n1_left = nums1.index(left) if left else 0
n1_right = nums1.index(right) if right else None
n2_left = nums2.index(left) if left else 0
n2_right = nums2.index(right) if right else None
max_score += max(sum(nums1[n1_left:n1_right]),
sum(nums2[n2_left:n2_right]))
return max_score % (10**9 + 7)
sol = Solution()
print(sol.maxSum(nums1=[2, 4, 5, 8, 10], nums2=[4, 6, 8, 9]))
| true |
632e3d1ef8b441a7a957b9962cae133e872f8660 | Python | zeroTwozeroTwo/MyNoteBook | /Python/Example/05_高级数据类型/ex_02_del关键字.py | UTF-8 | 454 | 4.4375 | 4 | [] | no_license | name_list = ["张三", "李四", "王五"]
# del 关键字 可以删除列表中的元素
# 提示: 在日常开发中,要从列表中删除数据,建议使用列表提供的方法
del name_list[1]
# del 关键字本质上的用来将一个变量从内存中删除的
name = "小明"
del name
# 注意: 如果使用 del 关键字将变量从内存中删除
# 后续的代码就不能再使用这个变量了
print(name)
print(name_list)
| true |
c861714375aff2d63beb0201b90153e1eb2f3fa5 | Python | joshsilverman/sk_analytics | /estimators/utils/imputer.py | UTF-8 | 1,886 | 2.90625 | 3 | [] | no_license | from IPython import embed
import numpy as np
from sklearn import preprocessing
import operator
class Imputer:
def __init__(self):
self._modes = None
def impute_continuous(self, training_features_cont, features_cont):
sklearn_imputer = preprocessing.Imputer(missing_values='NaN', strategy='mean', axis=0)
sklearn_imputer.fit(training_features_cont)
imputed_impute_continuous = sklearn_imputer.transform(features_cont)
return imputed_impute_continuous
def impute_categorical(self, training_features_cat, features_cat):
modes = self.get_category_modes(training_features_cat)
imputed_features_cat = []
for features_dictionary in features_cat:
imputed_dictionary = {}
for category, value in features_dictionary.iteritems():
if value is None:
mode = modes[category]
imputed_dictionary[category] = mode
else:
imputed_dictionary[category] = value
imputed_features_cat.append(imputed_dictionary)
return imputed_features_cat
def get_category_modes(self, training_features_cat):
if self._modes is None:
counts_by_category = {}
for features_dictionary in training_features_cat:
for category, value in features_dictionary.iteritems():
counts_by_category.setdefault(category, {})
counts_by_category[category].setdefault(value, 0)
counts_by_category[category][value] += 1
self._modes = {}
for category, counts in counts_by_category.iteritems():
mode = max(counts.iteritems(), key=operator.itemgetter(1))[0]
self._modes[category] = mode
return self._modes
else:
return self._modes
| true |
18a65ada67939334e538f769e07f3a912c62176c | Python | jboegeholz/easypattern | /easy_pattern/easy_pattern.py | UTF-8 | 1,003 | 3.5 | 4 | [
"MIT"
] | permissive |
ANY_CHAR = '.'
DIGIT = '\d'
NON_DIGIT = '\D'
WHITESPACE = '\s'
NON_WHITESPACE = '\S'
ALPHA = '[a-zA-Z]'
ALPHANUM = '\w'
NON_ALPHANUM = '\W'
def zero_or_more(string):
return string + '*'
def zero_or_one(string):
return string + '?'
def one_or_more(string):
return string + '+'
def exactly(number, string):
return string + '{' + str(number) + '}'
def at_least(number, string):
return string + '{' + str(number) + ',}'
def between(start, end, string):
return string + '{' + str(start) + ',' + str(end) + '}'
class Pattern:
def __init__(self):
self.pattern = ''
def starts_with(self, start_str):
self.pattern += start_str
return self
def followed_by(self, next_string):
self.pattern += next_string
return self
def not_followed_by(self, next_string):
self.pattern += "[^" + next_string + "]"
def __str__(self):
return self.pattern
def __repr__(self):
return self.pattern
| true |
15fce279085f1704cdd011328541533190462605 | Python | 1040891015/PythonPractice | /matplotlib/test.py | UTF-8 | 100 | 2.796875 | 3 | [] | no_license | import numpy
num = 0
numpy.random.seed(5)
while(num<5):
print(numpy.random.random())
num+=1 | true |
307eb5fbe782d4dfcfcd7c8134ba2b9201185893 | Python | tohfaakib/python_playground | /coroutines/example.py | UTF-8 | 345 | 3.171875 | 3 | [] | no_license | def grep(pattern):
print("Searching for pattern: ", pattern)
while True:
line = (yield)
if pattern in line:
print(line)
search = grep('tin')
next(search)
search.send("How are you tintin?")
search.send("You like tantin?")
search.send("I don't know!")
search.send("You like coroutine?")
search.close()
| true |
67ac17f261c1ecbf173847b2cb30f68089b8c85a | Python | ameli/imate | /examples/plot_traceinv_ill_conditioned_cheb.py | UTF-8 | 14,845 | 2.59375 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #! /usr/bin/env python
# SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <sameli@berkeley.edu>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the license found in the LICENSE.txt file in the root
# directory of this source tree.
# =======
# Imports
# =======
import sys
import numpy
# Package modules
from imate import InterpolateSchatten
from _utilities.data_utilities import generate_matrix
from _utilities.plot_utilities import * # noqa: F401, F403
from _utilities.plot_utilities import load_plot_settings, save_plot, plt, \
matplotlib, InsetPosition, mark_inset, NullFormatter, \
PercentFormatter, ScalarFormatter
# ====
# plot
# ====
def plot_fun_and_error(TI, test):
"""
Plots the curve of trace of Kn inverse versus eta.
"""
print('Plotting ... (may take a few minutes!)')
# Load plot settings
load_plot_settings()
# If not a list, embed the object into a list
if not isinstance(TI, list):
TI = [TI]
num_plots = len(TI)
# Range to plot
if test:
eta_resolution = 20
else:
eta_resolution = 500
eta = numpy.r_[-numpy.logspace(-9, -3.0001, eta_resolution)[::-1], 0,
numpy.logspace(-9, 3, eta_resolution)]
zero_index = numpy.argmin(numpy.abs(eta))
# Functions
trace_exact = TI[0].eval(eta)
trace_lowerbound = TI[0].bound(eta)
trace_estimate = numpy.zeros((num_plots, eta.size))
for j in range(num_plots):
trace_estimate[j, :] = TI[j].interpolate(eta)
# Tau
trace_B = 1
tau_exact = trace_exact / trace_B
tau_lowerbound = trace_lowerbound / trace_B
tau_estimate = trace_estimate / trace_B
# Plots trace
textwidth = 9.0 # in inches
# fig, ax = plt.subplots(nrows=1, ncols=2,
# figsize=(textwidth, textwidth/2))
fig, ax = plt.subplots(nrows=1, ncols=2,
figsize=(textwidth, textwidth/2.5))
ax[0].plot(eta, tau_exact, color='black', label='Exact')
ax[0].plot(eta[zero_index:], tau_lowerbound[zero_index:], '--',
color='black', label=r'Lower bound (at $t \geq 0$)')
ax[0].plot(eta[:zero_index], tau_lowerbound[:zero_index], '-.',
color='black', label=r'Upper bound (at $t < 0$)')
colors_list = ["#d62728",
"#2ca02c",
"#bcbd22",
"#ff7f0e",
"#1f77b4",
"#9467bd",
"#8c564b",
"#17becf",
"#7f7f7f",
"#e377c2"]
for j in reversed(range(num_plots)):
q = TI[j].q
h = ax[0].plot(eta, tau_estimate[j, :],
label=r'Interpolation, $q=%d$' % q,
color=colors_list[j])
if j == 0:
h[0].set_zorder(20)
ax[0].set_xscale('symlog', linthresh=1e-8)
ax[0].set_yscale('log')
ax[0].set_xlim([eta[0], eta[-1]])
ax[0].set_ylim([1e-4, 1e3])
ax[0].set_xlabel(r'$t$')
ax[0].set_ylabel(r'$\tau_p(t)$')
ax[0].set_title(r'(a) Interpolation of $\tau_p(t), p=-1$')
ax[0].grid(True)
ax[0].legend(fontsize='x-small', loc='upper left')
ax[0].set_xticks(numpy.r_[-10**numpy.arange(-3, -7, -3, dtype=float), 0,
10**numpy.arange(-6, 4, 3, dtype=float)])
ax[0].tick_params(axis='x', which='minor', bottom=False)
# Inset plot
ax2 = plt.axes([0, 0, 1, 1])
# Manually set the position and relative size of the inset axes within ax1
ip = InsetPosition(ax[0], [0.14, 0.25, 0.45, 0.35])
ax2.set_axes_locator(ip)
# Mark the region corresponding to the inset axes on ax1 and draw lines
# in grey linking the two axes.
# Avoid inset mark lines intersect the inset axes itself by setting anchor
inset_color = 'oldlace'
mark_inset(ax[0], ax2, loc1=1, loc2=4, facecolor=inset_color,
edgecolor='0.5')
ax2.plot(eta, tau_exact, color='black', label='Exact')
ax2.plot(eta[zero_index:], tau_lowerbound[zero_index:], '--',
color='black', label=r'Lower bound (at $t \geq 0$)')
ax2.plot(eta[:zero_index], tau_lowerbound[:zero_index], '-.',
color='black', label=r'Upper bound (at $t < 0$)')
for j in reversed(range(num_plots)):
ax2.plot(eta, tau_estimate[j, :], color=colors_list[j])
ax2.set_xlim([1e-2, 1.15e-2])
ax2.set_xticks([1e-2, 1.15e-2])
ax2.set_ylim(0.0111, 0.0125)
ax2.set_yticks([0.0111, 0.0125])
ax2.xaxis.set_minor_formatter(NullFormatter())
ax2.set_xticklabels(['0.01', '0.0115'])
ax2.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax2.set_facecolor(inset_color)
ax2.xaxis.set_tick_params(labelsize=8)
ax2.yaxis.set_tick_params(labelsize=8)
# plt.setp(ax2.get_yticklabels(), backgroundcolor='white')
# Plot errors
ax[1].semilogx(eta[zero_index:],
100*(1-tau_lowerbound[zero_index:]/tau_exact[zero_index:]),
'--', color='black', label=r'Lower bound (at $t \geq 0$)',
zorder=15) # Relative error
ax[1].semilogx(eta[:zero_index],
100*(1-tau_lowerbound[:zero_index]/tau_exact[:zero_index]),
'-.', color='black', label=r'Upper bound (at $t < 0$)',
zorder=15) # Relative error
for j in reversed(range(num_plots)):
q = TI[j].q
h = ax[1].semilogx(eta, 100*(1-tau_estimate[j, :]/tau_exact),
label=r'Interpolation, $q=%d$' % q,
color=colors_list[j]) # Relative error
if j == 0:
h[0].set_zorder(20)
ax[1].set_xscale('symlog', linthresh=1e-8)
ax[1].set_yscale('linear')
ax[1].set_xlim([eta[0], eta[-1]])
ax[1].set_ylim([-1, 2])
ax[1].set_yticks(numpy.arange(-1, 2.1, 0.5))
ax[1].set_xlabel(r'$t$')
ax[1].set_ylabel(
r'$1-\tau_{\mathrm{approx}}(t)/\tau_{\mathrm{exact}}(t)$')
ax[1].set_title(r'(b) Relative error of interpolation, $p=-1$')
ax[1].grid(True)
ax[1].legend(fontsize='x-small', loc='upper left')
ax[1].set_xticks(numpy.r_[-10**numpy.arange(-3, -7, -3, dtype=float), 0,
10**numpy.arange(-6, 4, 3, dtype=float)])
ax[1].tick_params(axis='x', which='minor', bottom=False)
ax[1].yaxis.set_major_formatter(PercentFormatter(decimals=1))
if not test:
plt.tight_layout()
# Save Plot
filename = 'traceinv_ill_conditioned_cheb'
if test:
filename = "test_" + filename
save_plot(plt, filename, transparent_background=False)
# If no display backend is enabled, do not plot in the interactive mode
if (not test) and (matplotlib.get_backend() != 'agg'):
plt.show()
# ===============
# plot error only
# ===============
def plot_error_only(TI, test):
"""
Plots the curve of trace of Kn inverse versus eta.
"""
print('Plotting ... (may take a few minutes!)')
# Load plot settings
load_plot_settings()
# If not a list, embed the object into a list
if not isinstance(TI, list):
TI = [TI]
num_plots = len(TI)
# Range to plot
if test:
eta_resolution = 20
else:
eta_resolution = 500
eta = numpy.r_[-numpy.logspace(-9, -3.0001, eta_resolution)[::-1], 0,
numpy.logspace(-9, 3, eta_resolution)]
zero_index = numpy.argmin(numpy.abs(eta))
# Functions
trace_exact = TI[0].eval(eta)
trace_lowerbound = TI[0].bound(eta)
trace_estimate = numpy.zeros((num_plots, eta.size))
for j in range(num_plots):
trace_estimate[j, :] = TI[j].interpolate(eta)
# Tau
trace_B = 1
tau_exact = trace_exact / trace_B
tau_lowerbound = trace_lowerbound / trace_B
tau_estimate = trace_estimate / trace_B
# Plots trace
textwidth = 9.0 # in inches
# fig, ax = plt.subplots(nrows=1, ncols=2,
# figsize=(textwidth, textwidth/2))
fig, ax = plt.subplots(figsize=(textwidth/1.946, textwidth/2.5))
# ax[0].plot(eta, tau_exact, color='black', label='Exact')
# ax[0].plot(eta[zero_index:], tau_lowerbound[zero_index:], '--',
# color='black', label=r'Lower bound (at $t \geq 0$)')
# ax[0].plot(eta[:zero_index], tau_lowerbound[:zero_index], '-.',
# color='black', label=r'Upper bound (at $t < 0$)')
colors_list = ["#d62728",
"#2ca02c",
"#bcbd22",
"#ff7f0e",
"#1f77b4",
"#9467bd",
"#8c564b",
"#17becf",
"#7f7f7f",
"#e377c2"]
# for j in reversed(range(num_plots)):
# q = TI[j].q
# h = ax[0].plot(eta, tau_estimate[j, :],
# label=r'Interpolation, $q=%d$' % q,
# color=colors_list[j])
# if j == 0:
# h[0].set_zorder(20)
#
# ax[0].set_xscale('symlog', linthresh=1e-8)
# ax[0].set_yscale('log')
# ax[0].set_xlim([eta[0], eta[-1]])
# ax[0].set_ylim([1e-4, 1e3])
# ax[0].set_xlabel(r'$t$')
# ax[0].set_ylabel(r'$\tau_p(t)$')
# ax[0].set_title(r'(a) Interpolation of $\tau_p(t), p=-1$')
# ax[0].grid(True)
# ax[0].legend(fontsize='x-small', loc='upper left')
# ax[0].set_xticks(numpy.r_[-10**numpy.arange(-3, -7, -3, dtype=float), 0,
# 10**numpy.arange(-6, 4, 3, dtype=float)])
# ax[0].tick_params(axis='x', which='minor', bottom=False)
#
# # Inset plot
# ax2 = plt.axes([0, 0, 1, 1])
# # Manually set the position and relative size of inset axes within ax1
# ip = InsetPosition(ax[0], [0.14, 0.25, 0.45, 0.35])
# ax2.set_axes_locator(ip)
# # Mark the region corresponding to the inset axes on ax1 and draw lines
# # in grey linking the two axes.
#
# # Avoid inset mark lines intersect inset axes itself by setting anchor
# inset_color = 'oldlace'
# mark_inset(ax[0], ax2, loc1=1, loc2=4, facecolor=inset_color,
# edgecolor='0.5')
# ax2.plot(eta, tau_exact, color='black', label='Exact')
# ax2.plot(eta[zero_index:], tau_lowerbound[zero_index:], '--',
# color='black', label=r'Lower bound (at $t \geq 0$)')
# ax2.plot(eta[:zero_index], tau_lowerbound[:zero_index], '-.',
# color='black', label=r'Upper bound (at $t < 0$)')
# for j in reversed(range(num_plots)):
# ax2.plot(eta, tau_estimate[j, :], color=colors_list[j])
# ax2.set_xlim([1e-2, 1.15e-2])
# ax2.set_xticks([1e-2, 1.15e-2])
# ax2.set_ylim(0.0111, 0.0125)
# ax2.set_yticks([0.0111, 0.0125])
# ax2.xaxis.set_minor_formatter(NullFormatter())
# ax2.set_xticklabels(['0.01', '0.0115'])
# ax2.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
# ax2.set_facecolor(inset_color)
# ax2.xaxis.set_tick_params(labelsize=8)
# ax2.yaxis.set_tick_params(labelsize=8)
# # plt.setp(ax2.get_yticklabels(), backgroundcolor='white')
# Plot errors
ax.semilogx(eta[zero_index:],
100*(1-tau_lowerbound[zero_index:]/tau_exact[zero_index:]),
'--', color='black', label=r'Lower bound (at $t \geq 0$)',
zorder=15) # Relative error
ax.semilogx(eta[:zero_index],
100*(1-tau_lowerbound[:zero_index]/tau_exact[:zero_index]),
'-.', color='black', label=r'Upper bound (at $t < 0$)',
zorder=15) # Relative error
for j in reversed(range(num_plots)):
q = TI[j].q
h = ax.semilogx(eta, 100*(1-tau_estimate[j, :]/tau_exact),
label=r'Interpolation, $q=%d$' % q,
color=colors_list[j]) # Relative error
if j == 0:
h[0].set_zorder(20)
ax.set_xscale('symlog', linthresh=1e-8)
ax.set_yscale('linear')
ax.set_xlim([eta[0], eta[-1]])
ax.set_ylim([-1, 2])
ax.set_yticks(numpy.arange(-1, 2.1, 0.5))
ax.set_xlabel(r'$t$')
ax.set_ylabel(
r'$1-\tau_{\mathrm{approx}}(t)/\tau_{\mathrm{exact}}(t)$')
ax.set_title(r'(b) Relative error of interpolation, $p=-1$')
ax.grid(True)
ax.legend(fontsize='x-small', loc='upper left')
ax.set_xticks(numpy.r_[-10**numpy.arange(-3, -7, -3, dtype=float), 0,
10**numpy.arange(-6, 4, 3, dtype=float)])
ax.tick_params(axis='x', which='minor', bottom=False)
ax.yaxis.set_major_formatter(PercentFormatter(decimals=1))
if not test:
plt.tight_layout()
# Save Plot
filename = 'traceinv_ill_conditioned_cheb'
if test:
filename = "test_" + filename
save_plot(plt, filename, transparent_background=False)
# If no display backend is enabled, do not plot in the interactive mode
if (not test) and (matplotlib.get_backend() != 'agg'):
plt.show()
# ====
# main
# ====
def main(test=False):
"""
Run the script by
::
python examples/Plot_imate_IllConditioned.py
The script generates the figure below (see also Figure 3 of
[Ameli-2020]_).
.. image:: https://raw.githubusercontent.com/ameli/imate/main/docs/images/E
xample2.svg
:align: center
**References**
.. [Ameli-2020] Ameli, S., and Shadden. S. C. (2020). Interpolating the
Trace of the Inverse of Matrix **A** + t **B**. `arXiv:2009.07385
<https://arxiv.org/abs/2009.07385>`__ [math.NA]
"""
# shift to make singular matrix non-singular
# shift = 2e-4
# shift = 4e-4
shift = 1e-3
# Generate a nearly singular matrix
if test:
n = 100
m = 50
else:
n = 1000
m = 500
K = generate_matrix(n, m, shift)
# Interpolating points
interpolant_points_1 = [1e-3, 4e-3, 1e-2, 4e-2, 1e-1, 1]
interpolant_points_2 = [1e-3, 1e-2, 1e-1, 1]
interpolant_points_3 = [1e-3, 1e-1]
# Interpolating objects
options = {'method': 'cholesky', 'invert_cholesky': True}
# kind = 'RPF'
kind = 'CRF'
scale = None
p = -1
TI_1 = InterpolateSchatten(K, p=p, ti=interpolant_points_1, kind=kind,
scale=scale, options=options)
TI_2 = InterpolateSchatten(K, p=p, ti=interpolant_points_2, kind=kind,
scale=scale, options=options)
TI_3 = InterpolateSchatten(K, p=p, ti=interpolant_points_3, kind=kind,
scale=scale, options=options)
# List of interpolating objects
TI = [TI_1, TI_2, TI_3]
# Plot interpolations
# plot_func_and_error(TI, test)
plot_error_only(TI, test)
# ===========
# script main
# ===========
if __name__ == "__main__":
sys.exit(main())
| true |
e065325308e23ae69ac2a19fed8f519a0fd34f88 | Python | spiritdan/dingdingAutoClockIn | /dingding_V2/test_timer.py | UTF-8 | 1,678 | 2.609375 | 3 | [] | no_license | import schedule
import time
import holiday as holiday, DingDing as Ding
import configparser
import check_holiday
config = configparser.ConfigParser(allow_no_value=False)
config.read("dingding.cfg",encoding='utf-8')
directory = config.get("ADB","directory")
gowork_time=config.get("time","go_time")
offwork_time=config.get("time","off_time")
now = int(time.time())
timeStruct = time.localtime(now)
year = timeStruct.tm_year
month = timeStruct.tm_mon
day = timeStruct.tm_mday
#工作日对应结果为 0, 休息日对应结果为 1, 节假日对应的结果为 2;
date="{0}{1:02d}{2:02d}".format(year,month,day)
holiday_status=check_holiday.checkholiday(date)['work_status']
print(holiday_status)
def job_gowork():
print("开始上班打卡调度")
if holiday_status == 0:
dingding = Ding.dingding(directory)
png = dingding.goto_work()
print(dingding.filename)
else:
print('今天不干活')
def job_offwork():
print("开始下班打卡调度")
if holiday_status == 0:
dingding = Ding.dingding(directory)
png = dingding.after_work()
print(dingding.filename)
else:
print('今天不干活')
schedule.every().day.at(gowork_time).do(job_gowork)
schedule.every().day.at(offwork_time).do(job_offwork)
while True:
schedule.run_pending()
'''
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
schedule.every(10).seconds.do(run_threaded, job2)
schedule.every(10).minutes.do(job)
schedule.every().hour.do(job)
schedule.every().day.at("10:30").do(job)
schedule.every().monday.do(job)
schedule.every().wednesday.at("13:15").do(job)
''' | true |
50ebac5ae65233e4186c92333087b248f942eb35 | Python | vvthakral/Python_code_learn | /merge_sort.py | UTF-8 | 774 | 3.53125 | 4 | [] | no_license | def merge_sort(arr):
c = len(arr)//2
if c>=1:
l = arr[:c]
r = arr[c:]
l = merge_sort(l)
r = merge_sort(r)
else:
return arr
p_l,p_r = 0,0
arr = []
while True:
if p_l < len(l) and p_r < len(r):
if l[p_l]<r[p_r]:
arr.append(l[p_l])
p_l+=1
else:
arr.append(r[p_r])
p_r+=1
elif p_l == len(l) and p_r<len(r):
arr.extend(r[p_r:])
break
elif p_r == len(r) and p_l<len(l):
arr.extend(l[p_l:])
break
return arr
a = [int(x) for x in input ('Enter the array :').split(' ')]
merge_sorted = merge_sort(a)
print(merge_sorted) | true |
a2c5480439b2903c5127471273c0b6f4b2e238b0 | Python | cdcai/autism_surveillance | /src/stuff/metrics.py | UTF-8 | 4,642 | 2.828125 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-us-govt-public-domain",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, accuracy_score, confusion_matrix
from sklearn.metrics import precision_score, recall_score
from scipy.stats import chi2
# Quick function for thresholding probabilities
def threshold(probs, cutoff=.5):
return np.array(probs >= cutoff).astype(np.uint8)
# Calculates McNemar's chi-squared statistic
def mcnemar_test(true, pred, cc=False):
confmat = confusion_matrix(true, pred)
b = int(confmat[0, 1])
c = int(confmat[1, 0])
if cc:
stat = (abs(b - c) - 1)**2 / (b + c)
else:
stat = (b - c)**2 / (b + c)
p = 1 - chi2(df=1).cdf(stat)
outmat = np.array([b, c, stat, p]).reshape(-1, 1)
out = pd.DataFrame(outmat.transpose(),
columns=['b', 'c', 'stat', 'pval'])
return out
# Runs basic diagnostic stats on binary (only) predictions
def binary_diagnostics(true, pred, accuracy=False, counts=True):
confmat = confusion_matrix(true, pred)
tp = confmat[1, 1]
fp = confmat[0, 1]
tn = confmat[0, 0]
fn = confmat[1, 0]
sens = tp / (tp + fn)
spec = tn / (tn + fp)
ppv = tp / (tp + fp)
npv = tn / (tn + fn)
f1 = 2 * (sens * ppv) / (sens + ppv)
outmat = np.array([tp, fp, tn, fn, sens,
spec, ppv, npv, f1]).reshape(-1, 1)
out = pd.DataFrame(outmat.transpose(),
columns=['tp', 'fp', 'tn', 'fn', 'sens',
'spec', 'ppv', 'npv', 'f1'])
if accuracy:
out['acc'] = accuracy_score(true, pred)
if counts:
true_prev = int(np.sum(true == 1))
pred_prev = int(np.sum(pred == 1))
abs_diff = (true_prev - pred_prev) * -1
rel_diff = abs_diff / true_prev
mcnemar = mcnemar_test(true, pred).pval
count_outmat = np.array([true_prev, pred_prev,
abs_diff, rel_diff, mcnemar]).reshape(-1, 1)
count_out = pd.DataFrame(count_outmat.transpose(),
columns=['true', 'pred', 'abs', 'rel',
'mcnemar'])
out = pd.concat([out, count_out], axis=1)
return out
# Runs basic diagnostic stats on binary or multiclass predictions
def diagnostics(true, pred, average='binary', accuracy=False, counts=False):
sens = recall_score(true, pred, average=average)
ppv = precision_score(true, pred, average=average)
f1 = f1_score(true, pred, average=average)
out = pd.DataFrame([sens, ppv, f1]).transpose()
out.columns = ['sens', 'ppv', 'f1']
if accuracy:
out['acc'] = accuracy_score(true, pred)
if counts:
out['true'] = int(np.sum(true == 1))
out['pred'] = int(np.sum(pred == 1))
out['abs_diff'] = (out.true - out.pred) * -1
out['rel_diff'] = out.abs_diff / out.true
return out
# Finds the optimal threshold for a classifier based on a metric
def grid_metrics(targets,
guesses,
step=.01,
min=0.0,
max=1.0,
by='f1',
average='binary',
counts=True):
cutoffs = np.arange(min, max, step)
if len((guesses.shape)) == 2:
guesses = guesses[:, 1]
if average == 'binary':
scores = pd.DataFrame(np.zeros(shape=(int(1/step), 15)),
columns=['cutoff', 'tp', 'fp', 'tn', 'fn',
'sens', 'spec', 'ppv', 'npv', 'f1',
'true', 'pred', 'abs', 'rel',
'mcnemar'])
for i, cutoff in enumerate(cutoffs):
threshed = threshold(guesses, cutoff)
stats = binary_diagnostics(targets, threshed)
scores.iloc[i, 1:] = stats.values
scores.cutoff[i] = cutoff
else:
scores = pd.DataFrame(np.zeros(shape=(int(1/step), 4)),
columns=['cutoff', 'sens', 'ppv', 'f1'])
if counts:
new = pd.DataFrame(np.zeros(shape=(int(1/step), 4)),
columns=['true', 'pred',
'abs_diff', 'rel_diff'])
scores = pd.concat([scores, new], axis=1)
for i, cutoff in enumerate(cutoffs):
threshed = threshold(guesses, cutoff)
stats = diagnostics(targets,
threshed,
average=average,
counts=counts)
scores.iloc[i, 1:] = stats.values
scores.cutoff[i] = cutoff
return scores
| true |
1032c4fd109a7421505dd910c16540ed384260e6 | Python | FredCox3/public-jss | /PSU 2015 Casper API Examples/Python/jss_DuplicateFromFilePSU.py | UTF-8 | 6,782 | 2.625 | 3 | [
"MIT"
] | permissive | #!/usr/local/bin/python3
import sys
import requests
import argparse
import time
import json
import csv
import xml.etree.cElementTree as ET
jssURL = "https://jssurl.com:8443"
class ArgParser(object):
def __init__(self):
parser = argparse.ArgumentParser(description="JSS Duplicate Cleanup.", epilog="You can export a list from your MySQL Server hosting the JSS Database. See GitHub Readme for more details.")
parser.add_argument('file', type=argparse.FileType('rU'), help="Path to CSV file with serials.")
parser.add_argument('-u', '--user', type=str, default=None, help="JSS API Username")
parser.add_argument('-p', '--passw', type=str, default=None, help="JSS API Password")
parser.add_argument('-l', '--logonly', help='Log Only. Do not delete object.', action="store_true", )
args = parser.parse_args()
self.logonly = args.logonly
self.username = args.user
self.password = args.passw
self.file = args.file
if self.logonly:
print("[INFO] Log only flag set")
else:
pass
def main():
args = ArgParser()
jsonheaders = {'accept': 'application/json'}
importfile = args.file
rowReader = csv.DictReader(importfile)
if args.username is None or args.password is None:
print("[ERROR] Please supply username and password using -u and -p option")
sys.exit()
else:
pass
for row in rowReader:
serialNum = (row["serial_number"])
# Build the JSS Search URI
searchUri = jssURL + "/JSSResource/computers/match/" + serialNum
print("[INFO] Removing Duplicates for " + serialNum)
# Send GET request
searchResults = requests.get(searchUri, auth=(args.username, args.password), headers=jsonheaders)
# Load the Search Results as a json file
jsonData = searchResults.json()
if len(jsonData['computers']) != 2:
print("[INFO] No Duplicate Found for " + serialNum + ". My job here is done!")
continue
else:
# Loop through entries to determine newest and get data from oldest
jssid = ()
id = []
username = []
report_date = []
report_date_obj2 = []
asset_tag = []
bin_number = []
for record in jsonData['computers']:
jssid = (record['id'])
print("[INFO] JSSID: ", + jssid)
idUri = jssURL + "/JSSResource/computers/id/" + str(jssid) + "/subset/General&Location&extension_attributes"
loopCall = requests.get(idUri, auth=(args.username, args.password), headers=jsonheaders)
# Append to respective variables.
id.append(loopCall.json()['computer']['general']['id'])
username.append(loopCall.json()['computer']['location']['username'])
report_date = loopCall.json()['computer']['general']['report_date']
asset_tag.append(loopCall.json()['computer']['general']['asset_tag'])
# Create time as object, not string so we can do some logic on it later.
report_date_obj = time.strptime(report_date, "%Y-%m-%d %H:%M:%S")
report_date_obj2.append(report_date_obj)
# Loop through the extension attributes until you find one with expected ID
# then append that ID's value so we can add it to the new record.
for c in loopCall.json()['computer']['extension_attributes']:
if c['id'] == 32:
bin_number.append(c['value'])
# Expect the first record (smallest ID) to be the oldest check-in. If not, report.
deleteURI = ()
if report_date_obj2[0] < report_date_obj2[1]:
print("[INFO] Old Computer:", id[0], asset_tag[0], username[0], bin_number[0])
print("[INFO] New Computer:", id[1], asset_tag[1], username[1], bin_number[1])
updateURI = jssURL + "/JSSResource/computers/id/" + str(id[1])
deleteURI = jssURL + "/JSSResource/computers/id/" + str(id[0])
# Build the XML Representation
root = ET.Element("computer")
general = ET.SubElement(root, 'general')
ET.SubElement(general, 'asset_tag').text = asset_tag[0]
site = ET.SubElement(general, 'site')
ET.SubElement(site, 'id').text = "-1"
ET.SubElement(site, 'name').text = "None"
location = ET.SubElement(root, 'location')
ET.SubElement(location,'username').text = username[0]
ext_attrs = ET.SubElement(root, 'extension_attributes')
ext_attr = ET.SubElement(ext_attrs, 'extension_attribute')
ET.SubElement(ext_attr, 'id').text = "32"
store_bin = ET.SubElement(ext_attr, 'name').text = "Storage Bin"
ET.SubElement(ext_attr, 'value').text = bin_number[0]
xmlData = ET.tostring(root)
# Delete Old Duplicate Record first, otherwise PUT will fail.
if args.logonly:
print("[INFO] LOG ONLY Flag Set. No changes made to ID:",id[0])
else:
# Help out the impatient human.
print("[INFO] Deleting old record. This could take some time. Delete ID:", id[0])
print("[INFO] URL Requested for Delete: " + deleteURI)
delReq = requests.delete(deleteURI, auth=(args.username,args.password), timeout=240)
# Expect status code of 200. If something else, report it.
if int(delReq.status_code) == 200:
print("[INFO] Delete Successful! Status Returned:", delReq.status_code)
else:
print("[INFO] Delete Failed! Status Returned:", delReq.status_code)
# Put New Info Up not that duplicate has been removed.
print("[INFO] URL Requested for update: " + updateURI)
putReq = requests.put(updateURI, auth=(args.username, args.password), data=xmlData, timeout=240)
# Expect Status Return of 201. If something else, report it.
if int(putReq.status_code) != 201:
print("[ERROR] Error Occurred Updating! Status Returned:", putReq.status_code)
else:
print("[INFO] Update Successful! Status Returned:", putReq.status_code)
else:
print("[ERROR] Unexpected result. Manually inspect duplicates.")
if __name__ == "__main__": main() | true |
4fb9449998b845152867ffd7bfd9b68aeba4e21c | Python | NIKHILDUGAR/leetcodemonthlyQs | /August2020/Day 12.py | UTF-8 | 220 | 2.9375 | 3 | [] | no_license | class Solution:
def getRow(self, rowIndex: int) -> List[int]:
l = [1]*(rowIndex + 1)
for i in range(1, rowIndex):
for j in range(i, 0, -1):
l[j] += l[j-1]
return l
| true |
5f1d6ca2c180f106189d576ee4ad4b7e0a717e7d | Python | alephist/edabit-coding-challenges | /python/test/test_is_average_whole_number.py | UTF-8 | 634 | 3.203125 | 3 | [] | no_license | import unittest
from typing import List, Tuple
from is_average_whole_number import is_avg_whole
test_values: Tuple[Tuple[List[int], bool]] = (
([3, 5, 9], False),
([1, 1, 1, 1], True),
([1, 2, 3, 4, 5], True),
([5, 2, 4], False),
([11, 22], False),
([4, 1, 7, 9, 2, 5, 7, 2, 4], False)
)
class IsAverageWholeNumberTestCase(unittest.TestCase):
def test_check_if_average_of_numbers_is_whole_number(self):
for lst, expected_value in test_values:
with self.subTest():
self.assertEqual(is_avg_whole(lst), expected_value)
if __name__ == '__main__':
unittest.main()
| true |
215d6834c9d427e767d5881218a52e53ebd47e75 | Python | jieya907/animal_head | /main.py | UTF-8 | 1,963 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import jinja2
import os
import urllib2
import face_detect
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
# class HelloHandler(webapp2.RequestHandler):
# def get(self):
# template_vars = {'name': self.request.get('name')}
# if(True):
# name = "CSSI Seattle"
# else:
# name = "CSSI Chicago"
# template_vars = {"name": name}
# template = jinja_environment.get_template('templates/hello.html')
# self.response.out.write(template.render(template_vars))
# class MainHandler(webapp2.RequestHandler):
# def get(self):
# template = jinja_environment.get_template('templates/main.html')
# self.response.out.write(template.render())
# def post(self):
# self.response.out.write("Your answers have been submitted! You think you know Chicago?")
class IndexHandler(webapp2.RequestHandler):
def get(self):
template = jinja_environment.get_template('templates/index.html')
self.response.out.write(template.render())
def post(self):
link = self.request.get('textline')
face_detect.process_web(link)
self.response.out.write("your link is " + link)
app = webapp2.WSGIApplication([
('/hello', HelloHandler),
('/main', MainHandler),
('/index', IndexHandler)
], debug=True)
| true |
e86dd9f64232ac107d3d19f689f82ccf3c3475b5 | Python | Pamtzo/GMM | /imagen.py | UTF-8 | 1,422 | 3.125 | 3 | [] | no_license | from PIL import Image
import os
data=open("dataset256.txt",'a')
data.write("RED,GREEN,BLUE,NUMERO,ESTILO\n")
data.close()
data=open("dataset512.txt",'a')
data.write("RED,GREEN,BLUE,NUMERO,ESTILO\n")
data.close()
def datagen(directory, etiqueta,size):
paso=0
for name in os.listdir(directory):
print("Paso #{} de {}, {}".format(paso, len(os.listdir(directory)), name))
im = Image.open(directory+'/'+name) # Can be many different formats.
im=im.resize((size,size),Image.NEAREST)
im.save(etiqueta+"/"+str(size)+"/"+name)
pix = im.load()
print (im.size) # Get the width and hight of the image for iterating over
if type(pix[0,0]) is tuple:
data=open("dataset"+str(size)+".txt",'a')
colores=[]
RED=0
GREEN=0
BLUE=0
for x in range(im.size[0]):
for y in range(im.size[1]):
if pix[x,y] not in colores:
colores.append(pix[x,y])
RED+=pix[x,y][0]
GREEN+=pix[x,y][1]
BLUE+=pix[x,y][2]
paso+=1
data.write(str(RED/40000)+","+str(GREEN/40000)+","+str(BLUE/40000)+","+str(len(colores))+","+etiqueta+'\n')
data.close()
datagen("Dataset","americano",512)
datagen("Asiatico","asiatico",512) | true |
604c1e332a798b4771421bef59dc175775767089 | Python | anushas123/spectral | /spectral-subtraction-master/ss1.py | UTF-8 | 1,875 | 2.921875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | # coding: utf-8
# spectral subtraction: one of noise reduction method
# This is a simple python script, is not advanced one.
#
# Usage:
# specify wav file names below,
# infile: input wav file including noise
# outfile: output wav file
# noisefile: noise only wav file, that is some (noise only) portion of input wav edited by manual (ex: Audacity)
#
# then, python3 ss1.py
# Check version:
#
# Python 3.6.4 win32 64bit
# Windows 10, 64bit
# numpy (1.14.0)
# scipy (1.0.0)
# librosa (0.6.0)
import numpy as np
import scipy
import librosa
# edit following wav file name
infile='mantram_short.wav'
outfile='output_short.wav'
noisefile='noise_short.wav'
# load input file, and stft (Short-time Fourier transform)
print ('load wav', infile)
w, sr = librosa.load( infile, sr=None, mono=True) # keep native sr (sampling rate) and trans into mono
s= librosa.stft(w) # Short-time Fourier transform
ss= np.abs(s) # get magnitude
angle= np.angle(s) # get phase
b=np.exp(1.0j* angle) # use this phase information when Inverse Transform
# load noise only file, stft, and get mean
print ('load wav', noisefile)
nw, nsr = librosa.load( noisefile, sr=None, mono=True)
ns= librosa.stft(nw)
nss= np.abs(ns)
mns= np.mean(nss, axis=1) # get mean
# subtract noise spectral mean from input spectral, and istft (Inverse Short-Time Fourier Transform)
sa= ss - mns.reshape((mns.shape[0],1)) # reshape for broadcast to subtract
sa0= sa * b # apply phase information
y= librosa.istft(sa0) # back to time domain signal
# save as a wav file
scipy.io.wavfile.write(outfile, sr, (y * 32768).astype(np.int16)) # save signed 16-bit WAV format
#librosa.output.write_wav(outfile, y , sr) # save 32-bit floating-point WAV format, due to y is float
print ('write wav', outfile) | true |
193e1f4098abe5a47d17eecf10d22cc96a023816 | Python | parthoza08/python | /ch-9_prq2.py | UTF-8 | 447 | 3.125 | 3 | [] | no_license | def game ():
pass
usrscore = int(input("enter your score\n"))
with open("highscore.txt") as f:
hiscore=f.read()
if hiscore=="":
with open("highscore.txt", "w") as new:
new.write(str(usrscore))
print("congo got first highscore", usrscore)
elif usrscore>int(hiscore):
with open("highscore.txt", "w") as new:
new.write(str(usrscore))
print("you got highscore" , usrscore)
else:
print("try next time")
| true |
6456b0e649d38e2156abc260fd8c4876abb27d78 | Python | omer-goder/python-skillshare-beginner | /dictionary/looping_through_a_dictionary(2).py | UTF-8 | 377 | 4.09375 | 4 | [] | no_license | ### 24/04/2020
### Author: Omer Goder
### Other ways to loop through a dictionary
birthday_months = {
'tony' : 'november',
'pat' : 'june',
'mary' : 'may',
}
for name in birthday_months.keys():
print(name.title())
for month in birthday_months.values():
print(month)
print(birthday_months.keys())
this_set = {"apple" , 'apple' , "cherry"}
print(this_set)
| true |
d83f6caeb742f2b199f74fac44de3ddb36f495be | Python | MoisesFreitas1/Algoritmos-e-Estrutura-de-Dados | /Q9.py | UTF-8 | 306 | 3.5625 | 4 | [] | no_license | num1 = [0,0]
num1[0] = float(input("1o. número: "))
num1[1] = float(input("2o. número: "))
aux = 0
for i in range(0,2):
for j in range(0,2):
if num1[i]<num1[j]:
aux = num1[i]
num1[i] = num1[j]
num1[j] = aux
sub = num1[1] - num1[0]
print("Subtracao: ",sub)
| true |
428870adc7cb3fe7fb28181ac3db9f0ae4124e49 | Python | Morvram/SingleTransferrableVote | /Single_Transferrable_Vote/VoterGenerationAlgorithm.py | UTF-8 | 4,404 | 3.546875 | 4 | [] | no_license | #Joshua Mandell
from District import *
def voterGenerationAlgorithm(districts, numVoters, candidatesPerDistrict, choiceVotesForCandidates): # first draft
#districts = a list (1d) of district names
#numVoters = a list (1d) of the number of voters in each district.
#candidatesPerDistrict = a list (2d) of lists of candidates, of equal length.
#choiceVotesForCandidate = a list (3d):
#1st layer: one list corresponding to each district.
#2nd layer one list corresponding to each candidate in that district.
#3rd layer number of votes at first, second, third etc. choice.
#For each district, create the district object using:
i = 0
while i < len(districts):
name = districts[i]
print("Moving through " + name)
#1) The relevant name,
#2) The candidate names in that district, (initialize the Candidates as objects)
canNames = candidatesPerDistrict[i]
candidates = []
for c in canNames:
candidates.append(Candidate(c))
repsToElect = len(choiceVotesForCandidates[i][0])
#3) OK this one's more complicated:
#(1) Create a list voters[] of Voter objects of length equal to the total number of first-choice votes that exist in the district,
# with empty lists as their contents
# (2) For each Voter in that list, append a first choice candidate. If the first candidate has 754 first-choice votes,
# the first 754
#Voter objects will have that candidate inserted as their first choice.
# An aside on this: basically, we'll be setting the first choice of the Voter in the i spot of the list of voters,
# while i < choiceVotesForCandidate[currentDistrict][i][0].
# 0 in this case should be the initial value for j, which corresponds to choice number -1.
#Voter.votes[j] = candidatesPerDistrict[currentDistrict][k]
#Where k is the index of the candidate we're currently on (i will have to reset each time we increment k)
#YEAH OKAY THIS IS COMPLICATED BUT INTERESTING.
#Do the same for second choice, third choice, etc etc until the end
#1
voters = []
for n in range(0, numVoters[i]):
voters.append(Voter([])) #voters[n] = Voter([])
# follow above todo (2) -
for n in range(0, len(choiceVotesForCandidates[i][0])):
k = 0 #Candidate index in the candidates[] list
j = 0 #Voter index in the voters[] list
ij = 0 #counter up to number of first-choice votes for that candidate.
while j < len(voters):
if ij < choiceVotesForCandidates[i][k][0]: #i represents current district, k represents candidate index in candidates[] list
ij += 1
voters[j].votes.append(candidates[k])
j += 1
else: #switch to the next candidate
ij = 0 #reset this because ij is what counts up to the number of first-choice voters for that candidate.
k += 1
#We will eventually run out of voters (j will reach len(voterS))
# repsToElect = the number of representatives that the district in question is electing.
# determine how we will decide how many representatives will be elected in a district!
#note above, line 27: repsToElect = len(choiceVotesForCandidates[currentDistrict][0])
# the number of candidates to elect from a district should be equal to the number of choices each voter may cast in that election.
# if there are going to be three candidates elected in a particular district race, each voter should choose a first, second, and third choice candidate.
#Then initialize the District object to the current slot in the districts[] list
districts[i] = District(repsToElect, candidates, voters, name)
print("For District: " + name + ": we will elect " + str(repsToElect) + " representatives from the following: " + str(candidates) + ". There are " + str(len(voters)) + " voters.")
i += 1
#You should now have a list of Districts, each of which has Candidates (whose Voter pools are not yet filled)
#and Voters (with their choices determined) within.
return districts | true |
0a84fda111a4bd6bd61c485a7426956b8e5554d0 | Python | Creditas/messengerbot | /messengerbot/quick_replies.py | UTF-8 | 1,413 | 2.921875 | 3 | [] | no_license | class QuickReplyItem(object):
def __init__(self, content_type, title=None, payload=None, image_url=None):
if content_type == 'text':
if not title and not payload:
raise ValueError('<Message> must be set')
if len(title) > 20:
raise ValueError('Quick reply title limit is 20 characters')
if len(payload) > 1000:
raise ValueError('Quick reply payload limit is 1000 characters')
self.content_type = content_type
self.title = title
self.payload = payload
self.image_url = image_url
def to_dict(self):
if self.content_type == 'location':
return {
'content_type': self.content_type,
'image_url': self.image_url
}
if self.content_type == 'text':
return {
'content_type': self.content_type,
'title': self.title,
'payload': self.payload,
'image_url': self.image_url
}
class QuickReplies(object):
def __init__(self, quick_replies):
if not isinstance(quick_replies, list):
raise ValueError(
'quick_replies should be a list of QuickReplyItems'
)
self._quick_replies = quick_replies
def to_dict(self):
return [quick_reply.to_dict() for quick_reply in self._quick_replies]
| true |
5e595c282884dffcf84dc85895509c571d9369ff | Python | judyfun/python | /db-example/python2/Common.py | UTF-8 | 537 | 3.078125 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
import datetime
def formatDate(date, hour):
date = date.replace('-', '')
hour = hour[:2]
return date, hour
# 2019-11-12-18
def last_hour2():
last = (datetime.datetime.now() - datetime.timedelta(hours=1)).strftime("%Y-%m-%d-%H")
print(last)
return last
def last_hour():
hour = datetime.datetime.now().hour
last_hour = (datetime.datetime.now() - datetime.timedelta(minutes=60)).hour
print(last_hour)
if __name__ == '__main__':
last_hour2()
| true |
270daebf61050d6b58031d8722550711dffe61e8 | Python | LucasAVasco/datalogger-tensao-de-rede | /interface/main.py | UTF-8 | 752 | 3.046875 | 3 | [] | no_license | #!/usr/bin/python3.8
# coding=UTF-8
"""
Código principal.
Lee o arquivo de configuração 'config.txt' e os dados do diretório 'data/'
e gera os gráficos no diretório 'interface/'.
"""
# Modules
import modules.readFile as readFile
import modules.configClass as configClass
import modules.interface as interface
# Open config file
input_file = open('config.txt')
graphs = []
# Main loop
while True:
line = readFile.getNextLine(input_file)
if line == '':
break
# Configura o gráfico
graphs.append(configClass.graph())
graphs[-1].readProperties(input_file)
graphs[-1].addAllFiles()
# Ceia a interface no navegador
interface.add_all_graphs(graphs)
interface.show()
# Close config file
input_file.close()
| true |
01f9504efacdecbe205251ed55087e20a55f85f8 | Python | naponmeka/timeseries_final_project | /dba.py | UTF-8 | 2,899 | 2.609375 | 3 | [] | no_license | from math import *
from scipy.optimize import basinhopping
import sys
import time
import statistics
import matplotlib.pyplot as plt
from computeAccuracy import *
start = time.time()
train_filename = 'Trace_ALL'
test_filename = 'Beef_TEST'
f = open('ClassificationClusteringDatasets/' + train_filename)
data_train = []
train_lower_b = []
train_upper_b = []
window_size = 10
r = 5
data_train_dict = {}
for line in f:
floats = [float(x) for x in line.strip().split()]
ts = floats[1:]
ts = normalizeSeries(ts)
if floats[0] in data_train_dict:
data_train_dict[floats[0]].append(ts)
else:
data_train_dict[floats[0]] = [ts]
# data_train.append(floats)
train_upper_b.append(upper_keogh(ts))
train_lower_b.append(lower_keogh(ts))
f.close()
def dba(D, I):
# D - the set of sequence to average
# I - number of iteration
T = D[0] # get the medoid? from the set D
for i in range(I):
T = dba_update(T,D)
return T
def dba_update(T, D):
# T - the average sequence to refine ( lenght = L )
# D - the set of sequence to average
#STEP 1 : compute the multiple alignment for T
L = len(T)
alignment = [set() for _ in range(L)]
for s in D:
alignment_for_s = dtw_multiple_alignment(T, s)
for i in range(L):
alignment[i] = alignment[i].union(alignment_for_s[i])
#STEP2 : compute the multiple alignment for the alignment
T = [0]*L
for i in range(L):
if len(alignment[i]) == 0:
T[i] = 0
else:
T[i] = statistics.mean(alignment[i])
return T
def dtw_multiple_alignment(s_ref, s):
# STEP 1 : Compute the accumulated cost matrix of DTW
cost, path = DTWCostMatrix(s_ref, s, 20)
# STEP 2 : Store element associate to s_ref
L = len(s_ref)
alignment = [set() for _ in range(L)]
i = len(s_ref) - 1
j = len(s) - 1
while i > 1 and j > 1 :
alignment[i] = alignment[i].union(set([s[j]]))
if i == 1: j = j -1
elif j ==1: i = i-1
else:
score = min(cost[(i-1, j-1)], cost[(i, j-1)], cost[(i-1, j)])
if score == cost[(i-1, j-1)]:
i = i - 1
j = j - 1
elif score== cost[(i-1, j)]:
i = i - 1
else: j = j -1
return alignment
the_mean = 0
sum_weight = 0
meanDistances = []
weights_to_mean = []
for key, one_class_data in data_train_dict.items():
mean = dba(one_class_data, 10)
distance = avgMeanErrorEuclideanDistance(mean, one_class_data)
meanDistances.append(distance)
weights_to_mean.append(len(one_class_data))
print("MEAN DIS")
print(meanDistances)
print("AVG MEAN DIS")
the_mean = 0
sum_weight = 0
for idx,m in enumerate(meanDistances):
the_mean += m * weights_to_mean[idx]
sum_weight += weights_to_mean[idx]
the_mean = the_mean/sum_weight
print(the_mean) | true |
23f04b52842ec6a6b33274989a0849dcebbdce3b | Python | dyrroth-11/Information-Technology-Workshop-I | /Python Programming Assignments/Final Assignment/4.py | UTF-8 | 956 | 4.53125 | 5 | [] | no_license | #!/usr/bin/env python3
"""
Created on Fri Jun 19 10:22:12 2020
@author: Ashish Patel
"""
"""
Question: Write a python program using a given string “S” and width “W” to wrap the string “S”
into a wordof width “W”. Also, print the first and last character of each word in a
string of two characters.
Example
Input: S : ABCDEFGHIJKL
W : 4
Output: Output_1:ABCD Output_2: AD
EFGH EH
IJKL IL
Logic: Here we slice our string s into substing of size w by using makeSlice function which return
list of all those substring .Finally we output those substtring as we as their first and
last character.
"""
def makeSlice(s, w):
return [s[i:i + w] for i in range(0, len(s), w)]
s = str(input("Enter string S : "))
w = int(input("Enter width W : "))
listOfSlices = makeSlice(s,w)
print("Output_1 :")
for i in listOfSlices:
print(i)
print("Output_2 :")
for i in listOfSlices:
print(i[0],i[-1]) | true |
ea4d4463e92b3c45398002737319c0fc9e46322a | Python | cpt-r3tr0/mathematics | /Number Theory/GCDProduct.py | UTF-8 | 381 | 2.828125 | 3 | [] | no_license | #!/bin/python3
N, M = map(int, input().split())
lim = min(N,M) + 1
mark = bytearray(lim >> 1)
primes = [2]
for i in range(3,lim,2):
if mark[i>>1]: continue
primes.append(i)
for j in range(3*i,lim,2*i):
mark[j>>1] = 1
mod = 10**9 + 7
prod = 1
for p in primes:
q = p
while q < lim:
prod = (prod * pow(p, (N//q)*(M//q), mod)) %mod
q *= p
print(prod)
| true |
39e7089de5a8087be82547032fe96e20a7a0d0c5 | Python | enrikiko/Python | /9.Intermediate/Spyder/Artificial_Neural_Networks/4init.py | UTF-8 | 2,777 | 2.59375 | 3 | [] | no_license | from joke import save
from request import sendHttp
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
startDate={ "Start" : time.time(),
"Version" : "4init.py" }
sendHttp(startDate)
# Importing the dataset
dataset = pd. read_csv('Churn_Modelling.csv')
x = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_x_1 = LabelEncoder()
labelencoder_x_2 = LabelEncoder()
x[:, 1] = labelencoder_x_1.fit_transform(x[:, 1]) #convert Srtring to Number
x[:, 2] = labelencoder_x_2.fit_transform(x[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [1], handle_unknown='ignore', dtype=np.integer) #encode dumy parameters
x = onehotencoder.fit_transform(x).toarray()
x = x[:, 1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
# Part 2 - Now let's make the ANN!
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
def build_classifier(neuronN, neuronP):
classifier = Sequential()
classifier.add(Dense(units = neuronN, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))
classifier.add(Dense(units = neuronP, kernel_initializer = 'uniform', activation = 'relu'))
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
classifier = KerasClassifier(build_fn = build_classifier, batch_size = 10, epochs = 300)
parameters = {
'neuronN' : [6, 7],
'neuronP' : [6, 7]
}
sendHttp(parameters)
grid_search = GridSearchCV(estimator = classifier,
param_grid = parameters,
scoring = 'accuracy',
cv = 10)
grid_search = grid_search.fit(x_train, y_train)
best_parameters = grid_search.best_params_
best_accuracy = grid_search.best_score_
print(best_parameters)
print(best_accuracy)
final={ "best_parameters" : best_parameters,
"best_accuracy" : best_accuracy,
"Finish" : time.time() }
sendHttp(final)
save(str(parameters))
save(str(best_parameters))
save(str(best_accuracy))
#accurancies = cross_val_score(estimator = classifier, X = x_train, y = y_train, cv = 10, n_jobs = -1 )
#mean = accurancies.mean()
#variance = accurancies.std()
| true |
3ccc6e284b09e5bbd13b007f30ccd4c4b138bfb3 | Python | jorgemauricio/alermapweb | /algoritmos/generar_mapas.py | UTF-8 | 8,104 | 2.625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
#######################################
# Script que permite la generación de mapas
# meteorológicos extremos
# Author: Jorge Mauricio
# Email: jorge.ernesto.mauricio@gmail.com
# Date: Created on Thu Sep 28 08:38:15 2017
# Version: 1.0
#######################################
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from scipy.interpolate import griddata as gd
from time import gmtime, strftime
import time
import os
from time import gmtime, strftime
import ftplib
import shutil
import csv
import math
from api import claves
def main():
print("Init")
FECHA_PRONOSTICO = descargarInfo()
mapasExtremos(FECHA_PRONOSTICO)
def descargarInfo():
# datos del servidor
serverInfo = claves()
# conexión al server
ftp = ftplib.FTP(serverInfo.ip)
# login al servidor
ftp.login(serverInfo.usr, serverInfo.pwd)
# arreglo para determinar fecha
arregloArchivos = []
arregloFechas = []
ftp.dir(arregloArchivos.append)
for archivo in arregloArchivos:
arregloArchivo = archivo.split()
arregloFechas.append(arregloArchivo[8])
FECHA_PRONOSTICO = arregloFechas[-1]
rutaPronostico = "data/{}".format(FECHA_PRONOSTICO)
ftp.cwd(FECHA_PRONOSTICO)
# validar la ruta para guardar los datos
if not os.path.exists(rutaPronostico):
os.mkdir(rutaPronostico)
else:
print("***** Carpeta ya existe")
# descarga de información
for i in range(1,6):
rutaArchivoRemoto = "d{}.txt".format(i)
rutaArchivoLocal = "{}/d{}.txt".format(rutaPronostico,i)
lf = open(rutaArchivoLocal, "wb")
ftp.retrbinary("RETR " + rutaArchivoRemoto, lf.write, 8*1024)
lf.close()
ftp.close()
return FECHA_PRONOSTICO
def generarFechas(f):
"""
Función que permite generar una lista de fechas a partir del día actual
param: f: fecha actual
"""
arrayF = []
tanio, tmes, tdia = f.split('-')
anio = int(tanio)
mes = int(tmes)
dia = int(tdia)
dirAnio = anio
dirMes = mes
dirDia = dia
# generar lista de fechas
for i in range(0,5,1):
if i == 0:
newDiaString = '{}'.format(dia)
if len(newDiaString) == 1:
newDiaString = '0' + newDiaString
newMesString = '{}'.format(mes)
if len(newMesString) == 1:
newMesString = '0' + newMesString
fecha = '{}'.format(anio)+"-"+newMesString+"-"+newDiaString
arrayF.append(fecha)
if i > 0:
dia = dia + 1
if mes == 2 and anio % 4 == 0:
diaEnElMes = 29
elif mes == 2 and anio % 4 != 0:
diaEnElMes = 28
elif mes == 1 or mes == 3 or mes == 5 or mes == 7 or mes == 8 or mes == 10 or mes == 12:
diaEnElMes = 31
elif mes == 4 or mes == 6 or mes == 9 or mes == 11:
diaEnElMes = 30
if dia > diaEnElMes:
mes = mes + 1
dia = 1
if mes > 12:
anio = anio + 1
mes = 1
newDiaString = '{}'.format(dia)
if len(newDiaString) == 1:
newDiaString = '0' + newDiaString
newMesString = '{}'.format(mes)
if len(newMesString) == 1:
newMesString = '0' + newMesString
fecha = '{}'.format(anio)+"-"+newMesString+"-"+newDiaString
arrayF.append(fecha)
return arrayF
def generarTexto(f, k,vMn, vMx):
"""
Función que nos permite generar el texto correspondiente para cada mapa
param: f: fecha
param: k: nombre de la columna
param: vMn: valor mínimo
param: vMx: valor máximo
"""
titulo = ""
if k == "Rain":
titulo = "Precipitación acumulada en 24h {} a {} mm\n Pronóstico válido para: {}".format(vMn, vMx, f)
return titulo
elif k == "Tmax":
titulo = "Temperatura máxima en 24h {} a {} ºC \n Pronóstico válido para: {}".format(vMn, vMx, f)
return titulo
elif k == "Tmin":
titulo = "Temperatura mínima en 24h {} a {} ºC \n Pronóstico válido para: {}".format(vMn, vMx, f)
return titulo
elif k == "Windpro":
titulo = "Viento promedio en 24h {} a {} km/h \n Pronóstico válido para: {}".format(vMn, vMx, f)
return titulo
else:
pass
def colorPuntoEnMapa(variable,rango):
"""
Función que permite generar el color del putno que se va a mostrar en el mapa
"""
if variable == "Rain":
if rango == "20/50":
return 'aqua'
elif rango == "50/70":
return 'powderblue'
elif rango == "70/150":
return 'darkblue'
elif rango == "150/300":
return 'tomato'
elif rango == "300/500":
return 'crimson'
if variable == "Tmax":
if rango == "30/35":
return 'coral'
elif rango == "35/40":
return 'orange'
elif rango == "40/45":
return 'crimson'
elif rango == "45/50":
return 'tomato'
elif rango == "50/60":
return 'maroon'
if variable == "Tmin":
if rango == "3/6":
return 'powderblue'
elif rango == "0/3":
return 'lightskyblue'
elif rango == "-3/0":
return 'dodgerblue'
elif rango == "-6/-3":
return 'steelblue'
elif rango == "-9/-6":
return 'darkblue'
if variable == "Windpro":
if rango == "62/74":
return 'gold'
elif rango == "75/88":
return 'sandybrown'
elif rango == "89/102":
return 'darksalmon'
elif rango == "103/117":
return 'darkorange'
elif rango == "118/150":
return 'maroon'
def mapasExtremos(fp):
"""
Función que permite generar los mapas de eventos extremos
"""
# ********** fecha pronóstico
fechaPronostico = fp
# fechaPronostico = strftime("%Y-%m-%d")
# ********** path
# path server
# path = "/home/jorge/Documents/work/autoPronosticoSonora"
# os.chdir(path)
# path local
# ********* Lat y Long
LONG_MAX = -86.1010
LONG_MIN = -118.2360
LAT_MAX = 33.5791
LAT_MIN = 12.37
# ********** Path
path = "/Users/jorgemauricio/Documents/Research/alermapweb"
os.chdir(path)
# ********** dict de análisis
d = {"Rain" : ['20/50', '50/70', '70/150', '150/300', '300/500'], "Tmax":['30/35', '35/40', '40/45', '45/50', '50/60'], "Tmin" : ['-9/-6','-6/-3','-3/0','0/3','3/6'], "Windpro" : ['62/74', '75/88', '89/102', '103/117', '118/150']}
# ********** array colores
# generar fechas mediante función
arrayFechas = generarFechas(fechaPronostico)
# leer csv
for j in range(1, 6, 1):
pathFile = '{}/data/{}/d{}.txt'.format(path,fechaPronostico,j)
data = pd.read_table(pathFile, sep=',')
for key, value in d.items():
for i in value:
# comenzar con el proceso
tiempoInicio = strftime("%Y-%m-%d %H:%M:%S")
print("Empezar procesamiento {} {} tiempo: {}".format(key, i, tiempoInicio))
# obtener rangos
vMin, vMax = i.split('/')
vMin = int(vMin)
vMax = int(vMax)
# título temporal de la columna a procesar
tituloTemporalColumna = key
dataTemp = data.loc[(data[tituloTemporalColumna] >= vMin) & (data[tituloTemporalColumna] <= vMax)]
#obtener valores de x y y
lons = np.array(dataTemp['Long'])
lats = np.array(dataTemp['Lat'])
#%% set up plot
plt.clf()
fig = plt.figure(figsize=(8,4))
m = Basemap(projection='mill',llcrnrlat=LAT_MIN,urcrnrlat=LAT_MAX,llcrnrlon=LONG_MIN,urcrnrlon=LONG_MAX,resolution='h')
# generar xp
xp = np.array(dataTemp['Long'])
# generar yp
yp = np.array(dataTemp['Lat'])
# leer archivo shape Estados
m.readshapefile('shapes/Estados', 'Estados')
# agregar raster
# m.bluemarble()
# gráficar puntos
colorTemporalDelPunto = colorPuntoEnMapa(key, i)
m.scatter(xp, yp, latlon=True, s=3, marker='o', color=colorTemporalDelPunto, zorder=25, alpha=0.5)
# titulo del mapa
tituloTemporalMapa = generarTexto(arrayFechas[j-1], key, vMin, vMax)
plt.title(tituloTemporalMapa)
tituloTemporalArchivo = "{}/data/{}/{}_{}_{}_{}.png".format(path,fechaPronostico,arrayFechas[j-1],key,vMin, vMax)
# crear anotación
latitudAnotacion = (LAT_MAX + LAT_MIN) / 2
longitudAnotacion = (LONG_MAX + LONG_MIN) / 2
plt.annotate('@2018 INIFAP', xy=(longitudAnotacion,latitudAnotacion), xycoords='figure fraction', xytext=(0.45,0.45), color='g')
# guardar mapa
plt.savefig(tituloTemporalArchivo, dpi=300)
print('****** Genereate: {}'.format(tituloTemporalArchivo))
# finalizar con el proceso
tiempoFinal = strftime("%Y-%m-%d %H:%M:%S")
print("Terminar procesamiento {} {} tiempo: {}".format(key, i, tiempoInicio))
if __name__ == '__main__':
main()
| true |
633471013c72a55c11a4e2a699ea212ecde87da9 | Python | alfredez/Half-Duplex | /File.py | UTF-8 | 898 | 3.328125 | 3 | [] | no_license | #!/usr/bin/python
class File:
def __init__(self, filename):
self.filename = filename
self.lines = []
self.dab_id = 0
self.message_type = 0
def set_lines(self, path):
my_lines = [] # Declare an empty list named mylines.
with open(str(path+self.filename), 'rt') as my_file: # Open lorem.txt for reading text data.
for my_line in my_file: # For each line, stored as myline,
my_lines.append(my_line) # add its contents to mylines.
self.lines = my_lines
self.dab_id = int(self.lines[0])
self.message_type = int(self.lines[1])
def get_dab_id(self):
return self.dab_id
def get_message_type(self):
return self.message_type
def get_coordinates(self):
return float(self.lines[2]), float(self.lines[3])
def get_lines(self):
return self.lines
| true |
b3a0380790ea947e2c3e87a3b474aff519f61857 | Python | Funcan/runonnode | /runonnodes.py | UTF-8 | 1,273 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
from noderange import expand
from nodeutils import NodeConnection
import sys
import paramiko
from paramiko import SSHException
import getpass
import argparse
def runonnodes(nodespec, cmd, dshbak=False, verbose=False, user=None):
nodes = expand(nodespec)
if len(nodes) == 0:
print "Need at least one node to run on"
sys.exit(1)
for node in nodes:
nc = NodeConnection(node, user)
nc.connect(verbose=verbose)
nc.exec_command(cmd)
if not dshbak:
print "--------------- %s ---------------" % (node)
nc.print_output()
else:
nc.print_output(str(node) + ": ")
def main():
parser = argparse.ArgumentParser(description="Run a command on a set of nodes")
parser.add_argument('-t', action="store_true", default=False)
parser.add_argument('-v', action="store_true", default=False)
parser.add_argument('-w', action="store", dest="where")
parser.add_argument('-u', action="store", dest="user")
parser.add_argument('command', nargs='*', action="store")
args = parser.parse_args(sys.argv[1:])
runonnodes(args.where, " ".join(args.command), dshbak=args.t, verbose=args.v, user=args.user)
if __name__ == "__main__":
main()
| true |
72ade6dd938a3c0656e0bb46167212c6867267ef | Python | weishaodaren/ShamGod | /Basic/condition.py | UTF-8 | 610 | 3.71875 | 4 | [
"MIT"
] | permissive | username = input('请输入用户名:')
password = input('请输入密码:')
if username == 'admin' and password == '123456' :
print('Success')
else:
print('Fail')
x = float(input('x = '))
if x > 1:
y = 3 * x - 5
elif x>= -1:
y = x + 2
else:
y = 5 * x + 3
print('%.2f = %.2f' % (x, y))
a = float(input('a = '))
b = float(input('b = '))
c = float(input('c = '))
if a + b > c and a + c > b \
and b + c > a :
print('周长:%f' % (a + b + c))
p = (a + b + c) / 2
area = (p * (p - a) * (p - b) * (p - c)) ** 0.5
print('面积: %f' % (area))
else:
print('No') | true |
254d58f4c2e635cf7d0f37ed669fa038fa628ab5 | Python | beevelop/corona-calculator | /models.py | UTF-8 | 5,279 | 3.171875 | 3 | [
"MIT"
] | permissive | import itertools
import pandas as pd
_STATUSES_TO_SHOW = ["Infected", "Dead", "Need Hospitalization"]
def get_predictions(
cases_estimator, sir_model, num_diagnosed, area_population, max_days
):
true_cases = cases_estimator.predict(num_diagnosed)
# For now assume removed starts at 0. Doesn't have a huge effect on the model
predictions = sir_model.predict(
susceptible=area_population - true_cases,
infected=true_cases,
removed=0,
time_steps=max_days,
)
num_entries = max_days + 1
# Have to use the long format to make plotly express happy
df = pd.DataFrame(
{
"Days": list(range(num_entries)) * len(_STATUSES_TO_SHOW),
"Forecast": list(
itertools.chain.from_iterable(
predictions[status] for status in _STATUSES_TO_SHOW
)
),
"Status": list(
itertools.chain.from_iterable(
[status] * num_entries for status in _STATUSES_TO_SHOW
)
),
}
)
return df
class TrueInfectedCasesModel:
"""
Used to estimate total number of true infected persons based on either number of diagnosed cases or number of deaths.
"""
def __init__(self, ascertainment_rate):
"""
:param ascertainment_rate: Ratio of diagnosed to true number of infected persons.
"""
self._ascertainment_rate = ascertainment_rate
def predict(self, diagnosed_cases):
return diagnosed_cases / self._ascertainment_rate
class SIRModel:
def __init__(
self,
transmission_rate_per_contact,
contact_rate,
recovery_rate,
normal_death_rate,
critical_death_rate,
hospitalization_rate,
hospital_capacity,
):
"""
:param transmission_rate_per_contact: Prob of contact between infected and susceptible leading to infection.
:param contact_rate: Mean number of daily contacts between an infected individual and susceptible people.
:param recovery_rate: Rate of recovery of infected individuals.
:param normal_death_rate: Average death rate in normal conditions.
:param critical_death_rate: Rate of mortality among severe or critical cases that can't get access
to necessary medical facilities.
:param hospitalization_rate: Proportion of illnesses who need are severely ill and need acute medical care.
:param hospital_capacity: Max capacity of medical system in area.
"""
self._infection_rate = transmission_rate_per_contact * contact_rate
self._recovery_rate = recovery_rate
# Death rate is amortized over the recovery period
# since the chances of dying per day are mortality rate / number of days with infection
self._normal_death_rate = normal_death_rate * recovery_rate
# Death rate of sever cases with no access to medical care.
self._critical_death_rate = critical_death_rate * recovery_rate
self._hospitalization_rate = hospitalization_rate
self._hospital_capacity = hospital_capacity
def predict(self, susceptible, infected, removed, time_steps=100):
"""
Run simulation.
:param susceptible: Number of susceptible people in population.
:param infected: Number of infected people in population.
:param removed: Number of recovered people in population.
:param time_steps: Time steps to run simulation for
:return: List of values for S, I, R over time steps
"""
population = susceptible + infected + removed
S = [int(susceptible)]
I = [int(infected)]
R = [int(removed)]
D = [0]
H = [round(self._hospitalization_rate * infected)]
for t in range(time_steps):
# There is an additional chance of dying if people are critically ill
# and have no access to the medical system.
if I[-1] > 0:
underserved_critically_ill_proportion = (
max(0, H[-1] - self._hospital_capacity) / I[-1]
)
else:
underserved_critically_ill_proportion = 0
weighted_death_rate = (
self._normal_death_rate * (1 - underserved_critically_ill_proportion)
+ self._critical_death_rate * underserved_critically_ill_proportion
)
# Forecast
s_t = S[-1] - self._infection_rate * I[-1] * S[-1] / population
i_t = (
I[-1]
+ self._infection_rate * I[-1] * S[-1] / population
- (weighted_death_rate + self._recovery_rate) * I[-1]
)
r_t = R[-1] + self._recovery_rate * I[-1]
d_t = D[-1] + weighted_death_rate * I[-1]
h_t = self._hospitalization_rate * i_t
S.append(round(s_t))
I.append(round(i_t))
R.append(round(r_t))
D.append(round(d_t))
H.append(round(h_t))
return {
"Susceptible": S,
"Infected": I,
"Recovered": R,
"Dead": D,
"Need Hospitalization": H,
}
| true |
4d7ffb70bb211e764e13e3d47d1d7c2aaa612669 | Python | kabirecon/econometrics_with_python | /econometrics/chunk.py | UTF-8 | 622 | 3.15625 | 3 | [] | no_license | # this loads the first file fully into memory
with open('G:\code\dta.csv', 'r') as f:
csvfile = f.readlines()
linesPerFile = 1000000
filename = 1
# this is better then your former loop, it loops in 1000000 lines a peice,
# instead of incrementing 1000000 times and only write on the millionth one
for i in range(0,len(csvfile),linesPerFile):
with open(str(filename) + '.csv', 'w+') as f:
if filename > 1: # this is the second or later file, we need to write the
f.write(csvfile[0]) # header again if 2nd.... file
f.writelines(csvfile[i:i+linesPerFile])
filename += 1
| true |
f068bc50306950e55f23d8ec4f68e9f3a7524cd3 | Python | ctralie/GeometricBeatTracking | /TheoryValidation/CirculantGraphs.py | UTF-8 | 2,327 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse as sparse
import sys
sys.path.append("..")
from Laplacian import *
def getCirculantAdj(N, lags):
#Setup circular parts
I = range(N)*(len(lags)+2)
J = range(1, N+1) + range(-1, N-1)
J[N-1] = 0
J[N] = N-1
for lag in lags:
J = J + (np.mod(np.arange(N) + lag, N)).tolist()
V = np.ones(len(I))
return sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr()
def getOneOnK(N, k):
lags = [i*N/k for i in range(1, k)]
return getCirculantAdj(N, lags)
def getCircleEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 2 - 2*np.cos(2*np.pi*i/N)
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return lambdas
def getMoebiusEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 3 - 2*np.cos(2*np.pi*i/N) - (-1)**i
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return (lambdas, np.sort(lambdas))
def get3WayEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 4 - 2*np.cos(2*np.pi*i/N) - 2*np.cos(2*np.pi*i/3)
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return (lambdas, np.sort(lambdas))
if __name__ == '__main__':
N = 100
A = getOneOnK(N, 2)
#A = getCirculantAdj(N, [30, 60, 80])
A = A.toarray()
(w, v, L) = getLaplacianEigsDense(A, A.shape[0])
(lambdas, lambdassorted) = get3WayEigs(N)
plt.figure(figsize=(15, 4))
plt.subplot(132)
plt.plot(lambdas)
plt.title("Eigenvalues")
plt.xlabel("Eigenvalue Number")
plt.ylabel("Eigenvalue")
# plt.subplot(224)
# plt.scatter(w, lambdassorted)
# plt.xlabel("Numerically Computed")
# plt.ylabel("Analytic")
# plt.axis('equal')
# plt.title("Checking accuracy")
plt.subplot(131)
plt.imshow(A, interpolation = 'nearest', cmap = 'gray')
plt.title("Adjacency Matrix")
plt.subplot(133)
plt.imshow(v, cmap = 'afmhot', aspect = 'auto', interpolation = 'nearest')
plt.xlabel("k-th Smallest Eigenvector")
plt.title("Eigenvectors")
plt.savefig("Eigs.svg", bbox_inches = 'tight')
| true |
02770ec5d9b47f810ae2faa0839dc09d5c9b685b | Python | asmitbhantana/Insight-Workshop | /PythonProgrammingAssignmentsI/Data Types/q43.py | UTF-8 | 336 | 3.671875 | 4 | [] | no_license | """
43. Write a Python program to remove an item from a tuple.
"""
def remove_item(user_tuple: tuple, remove_item):
bake_tuple = ()
for item in user_tuple:
if item is not remove_item:
bake_tuple += (item,)
return bake_tuple
if __name__ == '__main__':
print(remove_item((1, 2, 3, 4, 5, 6), 6))
| true |
fcbe5b762ef621d5a75041fcf433cd1edded6367 | Python | wangfeng351/Python-learning | /语法基础/practise/day02.py | UTF-8 | 178 | 3.53125 | 4 | [] | no_license | str = '''Hello Python
I have a dream
that's I want to be a good programmer'''
a = "10"
b = "20"
if a > b:
print("no")
else:
print(str)
print("yes") | true |
5322b98bf1e03fe35cf9dfc3d367194d26f83e4b | Python | robpedersendev/Computer-science-assessment | /uncover_spy-Robert_Pedersen/main.py3 | UTF-8 | 1,837 | 3.5 | 4 | [
"MIT"
] | permissive | def uncover_spy(n, trust):
# The spy, if it exists:
# Does not trust anyone else.
# Is trusted by everyone else (he's good at his job).
# Works alone; there are no other spies in the city-state.
# Create the trusted and not trusted containers
trusted = {}
not_trusted = set() # Since the spy value is unique uniqe
# Declare a -1 for spy to account for the case in which there is no spy
spy = -1
# loop over the values in trust
for i in range(len(trust)):
# Add every first index value to the not trusted set
# A set is a collection of items that is unordered and does not allow duplicates.
not_trusted.add(trust[i][0])
# Check if the 1th index of the trust list at `i` index is in the trusted dictionary
if trust[i][1] in trusted:
# If it is, increment the 1th value in the dictionary. Increasing each time it is located in the trusted list
# print(trusted[trust[i][1]], trust[i])
trusted[trust[i][1]] += 1
# print( trusted[trust[i][1]], trust[i])
else:
# If it is, Add the to dictionary
trusted[trust[i][1]] = 1
# Loop through all of the items in the trusted dictionary
# Python dictionary method items() returns a list of dict's (key, value) tuple pairs
for key, value in trusted.items():
print("trusted", trusted, "key", key, "value", value, "n", n)
if value == n-1:
spy = key
# Loop through the not_trusted set
for val in not_trusted:
# If the spy value isn't in not_trusted
if spy not in not_trusted:
# Return the spy value
return spy
return -1
# Time complexity: O(n)
# Space complexity: O(n)
| true |
51b54d6946bd6608fb22fa513f2f279e921a3ad5 | Python | detcitty/100DaysOfCode | /python/unfinshed/dig_ng.py | UTF-8 | 959 | 3.875 | 4 | [] | no_license | # https://www.codewars.com/kata/566fc12495810954b1000030/train/python
'''
Take an integer n (n >= 0) and a digit d (0 <= d <= 9) as an integer.
Square all numbers k (0 <= k <= n) between 0 and n.
Count the numbers of digits d used in the writing of all the k**2.
Call nb_dig (or nbDig or ...) the function taking n and d as parameters and returning this count.
'''
import numpy as np
def nb_dig(n, d):
# your code
values = np.arange(0, n+1)
k_squared = np.square(values)
#print(k_squared)
# I just need to check if there is at least one digit d in the k**2 list
# How do I convert an array to string
str_k = np.char.mod('%d', k_squared)
#(str_k)
#str_k = np.array(map(str, k_squared))
count_d = np.char.count(str_k, str(d))
#print(count_d)
return(np.sum(count_d))
# Something is not working right.
# How can I do an apply on all of the numbers?
test1 = nb_dig(5750, 0)
test2 = nb_dig(10, 0)
print(test2) | true |
27236a4da2d52a5f820be5ba150f1665d79fe906 | Python | unistra/eva | /mecc/apps/utils/docx.py | UTF-8 | 11,579 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | from django.db.models import Q
from docx import Document
from docx.shared import Cm
from bs4 import BeautifulSoup as bs
from mecc.apps.rules.models import Rule, Paragraph
from mecc.apps.training.models import Training, SpecificParagraph, AdditionalParagraph
def docx_gen(data):
"""
Microsoft Word docx document generation for the rules
"""
# ########## Some useful functions
def delete_paragraph(paragraph):
p = paragraph._element
p.getparent().remove(p)
p._p = p._element = None
def add_lines(nb_lines=1):
while nb_lines > 0:
doc.add_paragraph()
nb_lines -= 1
def clean_up(text):
"""
Paragraphs are stored as HTML paragraphs (with tags like <p></p>)
This function parses paragraphs and returns a list that contains
style elements and the text to write
Ex : ['p','some text to write', 'i','some text to write in italic']
"""
text = text.replace('\r\n', '').replace('\t', '')
soup = bs(text, 'lxml')
paragraphs = soup.body
to_write = []
for paragraph in paragraphs.descendants:
if paragraph.name:
to_write.append(paragraph.name)
else:
to_write.append(paragraph)
if "ul" in to_write:
to_write.remove("ul")
return to_write
# ########## write_ functions (used to write things in the document)
def write_doc_subtitle(year, institute):
doc.add_paragraph(year, style='Subtitle')
doc.add_paragraph(institute, style='Subtitle')
add_lines(5)
def write_degree_label(label):
doc.add_paragraph(label.upper(), style=style_title1)
add_lines()
def write_rules_type_label(rules_type):
if rules_type is "standard":
doc.add_paragraph(
"Règles standards applicables à TOUS les diplômes "+\
element['degree_type'].short_label.upper(),
style=style_title2
)
elif rules_type is "ccct":
doc.add_paragraph(
"Règles standards applicables aux diplômes "+\
element['degree_type'].short_label.upper()+\
" en régime CC/CT",
style=style_title2
)
elif rules_type is "eci":
doc.add_paragraph(
"Règles standards applicables aux diplômes "+\
element['degree_type'].short_label.upper()+\
" en régime ECI ",
style=style_title2
)
else:
pass
add_lines()
def write_rules_paragraphs(paragraphs):
char_styles = []
for paragraph in paragraphs:
if isinstance(paragraph, Paragraph):
to_write = clean_up(paragraph.text_standard)
elif isinstance(paragraph, SpecificParagraph):
to_write = clean_up(paragraph.text_specific_paragraph)
else:
to_write = clean_up(paragraph.text_additional_paragraph)
for element in to_write:
if element in ['i', 'em', 'strong', 'u', 'li']:
char_styles.append(element)
elif element is not 'p':
paragraph = doc.add_paragraph()
run = paragraph.add_run(element)
for style in char_styles:
if style == 'i' or style == 'em':
run.italic = True
if style == 'strong':
run.bold = True
if style == 'u':
run.underline = True
if style == 'li':
paragraph.style = style_list_bullet
char_styles.clear()
add_lines()
def write_degree_rules(rules):
for rule in rules:
doc.add_paragraph(rule.label, style=style_title3)
rule_paragraphs = Paragraph.objects.filter(rule=rule).order_by('display_order')
write_rules_paragraphs(rule_paragraphs)
def write_training_header(training, reference):
doc.add_paragraph(training.label, style=style_title4)
training_infos = doc.add_paragraph()
training_infos.paragraph_format.tab_stops.add_tab_stop(Cm(6))
training_infos.add_run(
"%s - %s\t\t" % (training.get_MECC_type_display(), training.get_session_type_display())).bold = True
if reference == "with_si":
training_infos.add_run("Référence APOGEE : ").bold = True
training_infos.add_run(training.ref_si_scol)
if reference == "with_rof":
training_infos.add_run("Référence ROF : ").bold = True
training_infos.add_run(training.ref_cpa_rof)
training_infos.style = style_title5
in_charge = doc.add_paragraph()
in_charge.add_run("Responsable(s) : ").bold = True
in_charge.add_run(', '.join(respform for respform in training.get_respform_names))
def write_training_specifics_additionals(rules, specifics, additionals):
for rule in rules:
doc.add_paragraph(rule.label, style=style_title3)
if specifics.filter(rule_gen_id=rule.id):
specifics_title = doc.add_paragraph()
run = specifics_title.add_run("Dérogations :")
run.bold = run.italic = True
write_rules_paragraphs(specifics.filter(rule_gen_id=rule.id))
if additionals.filter(rule_gen_id=rule.id):
additionals_title = doc.add_paragraph()
run = additionals_title.add_run("Alinéa additionnel :")
run.bold = run.italic = True
write_rules_paragraphs(additionals.filter(rule_gen_id=rule.id))
# ########## Treatment depends on the document model
model = data.pop(0)
reference = data.pop(0)
# ########## The generated document is based on a template
# ########## The template contains all the styles we will use
doc = Document('mecc/static/docx/eva-template.docx')
# ########## Template clean up : we only want the title section
doc_paragraphs = doc.paragraphs[2:]
for paragraph in doc_paragraphs:
delete_paragraph(paragraph)
add_lines(2)
# ########## Subtitle section (university year and cmp label)
year = data.pop(0)
code_year = year.code_year
label_year = year.label_year
institute = data.pop(0)
write_doc_subtitle(label_year, institute)
# ########## Initialization of the titles styles
styles = doc.styles
for style in styles:
if 'Heading 1' in style.name:
style_title1 = style
if 'Heading 2' in style.name:
style_title2 = style
if 'Heading 3' in style.name:
style_title3 = style
if 'Heading 4' in style.name:
style_title4 = style
if 'Heading 5' in style.name:
style_title5 = style
if 'List Bullet' in style.name:
style_list_bullet = style
if model is 'a':
for element in data:
degree_rules = Rule.objects.filter(
code_year=code_year,
degree_type=element['degree_type']
).order_by('display_order')
standard_rules = degree_rules.filter(is_ccct=1, is_eci=1)
ccct_rules = degree_rules.filter(is_ccct=1, is_eci=0)
eci_rules = degree_rules.filter(is_ccct=0, is_eci=1)
write_degree_label(element['degree_type'].short_label)
write_rules_type_label("standard")
write_degree_rules(standard_rules)
for mecc_type in element['mecc_types']:
if mecc_type is 'C':
write_rules_type_label("ccct")
write_degree_rules(ccct_rules)
elif mecc_type is 'E':
write_rules_type_label("eci")
write_degree_rules(eci_rules)
degree_specifics = SpecificParagraph.objects.filter(
training__in=element['trainings']
)
degree_additionals = AdditionalParagraph.objects.filter(
training__in=element['trainings']
)
if degree_specifics or degree_additionals:
doc.add_paragraph(
"Dérogations et alinéas additionnels",
style=style_title2
)
add_lines()
for training in element['trainings']:
training_specifics = degree_specifics.filter(
training=training
)
training_additionals = degree_additionals.filter(
training=training
)
rules_with_specific_or_additional = degree_rules.filter(
Q(id__in=[specific.rule_gen_id for specific in training_specifics]) \
| \
Q(id__in=[additional.rule_gen_id for additional in training_additionals])
).order_by('display_order')
write_training_header(training, reference)
write_training_specifics_additionals(
rules_with_specific_or_additional,
training_specifics,
training_additionals
)
doc.add_page_break()
else:
for element in data:
rules = Rule.objects.filter(
code_year=code_year,
degree_type=element['degree_type']
).order_by('display_order')
write_degree_label(element['degree_type'].short_label)
for training in element['trainings']:
write_training_header(training, reference)
doc.add_paragraph(
"Règles applicables à la formation",
style=style_title2
)
add_lines()
if training.MECC_type == 'C':
training_rules = rules.exclude(is_eci=0)
if training.MECC_type == 'E':
training_rules = rules.exclude(is_ccct=0)
training_specifics = SpecificParagraph.objects.filter(training=training)
training_additionals = AdditionalParagraph.objects.filter(training=training)
for rule in training_rules:
doc.add_paragraph(rule.label, style=style_title3)
paragraphs = Paragraph.objects.filter(rule=rule).order_by('display_order')
to_write = []
for paragraph in paragraphs:
if paragraph.id in [specific.paragraph_gen_id for specific in training_specifics]:
to_write.append(training_specifics.get(
paragraph_gen_id=paragraph.id
))
else:
to_write.append(paragraph)
write_rules_paragraphs(to_write)
if rule.id in [additional.rule_gen_id for additional in training_additionals]:
to_write = training_additionals.filter(
rule_gen_id=rule.id
)
write_rules_paragraphs(to_write)
doc.add_page_break()
return doc
| true |
6a9a17f190aa454509f539897ed61c6f97039dbb | Python | strawsyz/straw | /my_cv/famous_netorks/Inceptionv3_pytorch/models.py | UTF-8 | 11,813 | 2.578125 | 3 | [
"MIT"
] | permissive | import torch
from torch import nn
from torch.nn import functional as F
# 参考torchvision中的实现
class Inception3(nn.Module):
def __init__(self, n_classes=1000, aux_logits=True, transform_input=False):
"""
:param n_classes: 输出的参数
:param aux_logits: 使用添加辅助输出层
:param transform_input: 是否需要重新归一化
"""
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = ConvBNReLU(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = ConvBNReLU(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = ConvBNReLU(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = ConvBNReLU(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = ConvBNReLU(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, n_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, n_classes)
# 初始化权重
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.numel()))
values = values.view(m.weight.size())
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.transform_input:
# 作用是归一化。pytorch的inception v3训练的时候用的均值和标准差为[0.5,0.5,0.5] [0.5,0.5,0.5]。
# 而之前那些CNN模型的归一化,均值和标准差为[0.229,0.224,0.225] [0.485,0.456,0.406]。
# 所以这行的语句是将后者的归一化变成前者的归一化。
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
# 299 x 299 x 3
out = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
out = self.Conv2d_2a_3x3(out)
# 147 x 147 x 32
out = self.Conv2d_2b_3x3(out)
# 147 x 147 x 64
out = F.max_pool2d(out, kernel_size=3, stride=2)
# 73 x 73 x 64
out = self.Conv2d_3b_1x1(out)
# 73 x 73 x 80
out = self.Conv2d_4a_3x3(out)
# 71 x 71 x 192
out = F.max_pool2d(out, kernel_size=3, stride=2)
# 35 x 35 x 192
out = self.Mixed_5b(out)
# 35 x 35 x 256
out = self.Mixed_5c(out)
# 35 x 35 x 288
out = self.Mixed_5d(out)
# 35 x 35 x 288
out = self.Mixed_6a(out)
# 17 x 17 x 768
out = self.Mixed_6b(out)
# 17 x 17 x 768
out = self.Mixed_6c(out)
# 17 x 17 x 768
out = self.Mixed_6d(out)
# 17 x 17 x 768
out = self.Mixed_6e(out)
# 17 x 17 x 768
if self.training and self.aux_logits:
# 辅助输出层的结果
aux = self.AuxLogits(out)
# 17 x 17 x 768
out = self.Mixed_7a(out)
# 8 x 8 x 1280
out = self.Mixed_7b(out)
# 8 x 8 x 2048
out = self.Mixed_7c(out)
# 8 x 8 x 2048
out = F.avg_pool2d(out, kernel_size=8)
# 1 x 1 x 2048
out = F.dropout(out, training=self.training)
# 1 x 1 x 2048
out = out.view(out.size(0), -1)
# 2048
out = self.fc(out)
# 1000 (num_classes)
if self.training and self.aux_logits:
# 辅助输出层的结果
return out, aux
else:
return out
class ConvBNReLU(nn.Sequential):
def __init__(self, in_n, out_n, **kwargs):
super(ConvBNReLU, self).__init__()
self.add_module("Conv",
nn.Conv2d(in_n, out_n, **kwargs, bias=False))
self.add_module("BN", nn.BatchNorm2d(out_n, eps=0.001))
self.add_module("ReLU", nn.ReLU(inplace=True))
class InceptionA(nn.Module):
def __init__(self, in_n, pool_features):
"""
:param in_n: 输入的通道数
:param pool_features: 池化的通道数
"""
super(InceptionA, self).__init__()
self.branch1x1 = ConvBNReLU(in_n, 64, kernel_size=1)
self.branch5x5_1 = ConvBNReLU(in_n, 48, kernel_size=1)
self.branch5x5_2 = ConvBNReLU(48, 64, kernel_size=5, padding=2)
self.branch3x3_1 = ConvBNReLU(in_n, 64, kernel_size=1)
self.branch3x3_2 = ConvBNReLU(64, 96, kernel_size=3, padding=1)
self.branch3x3_3 = ConvBNReLU(96, 96, kernel_size=3, padding=1)
# 1x1的卷积,在卷积直线需要先经过池化
self.branch_pool = ConvBNReLU(in_n, pool_features, kernel_size=1)
def forward(self, x):
out1x1 = self.branch1x1(x)
out5x5 = self.branch5x5_2(self.branch5x5_1(x))
out3x3 = self.branch3x3_3(self.branch3x3_2(self.branch3x3_1(x)))
out_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
out_pool = self.branch_pool(out_pool)
# 将四个输出全都连接起来
out = [out1x1, out5x5, out3x3, out_pool]
return torch.cat(out, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels):
super(InceptionB, self).__init__()
self.branch3x3 = ConvBNReLU(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3db_1 = ConvBNReLU(in_channels, 64, kernel_size=1)
self.branch3x3db_2 = ConvBNReLU(64, 96, kernel_size=3, padding=1)
self.branch3x3db_3 = ConvBNReLU(96, 96, kernel_size=3, stride=2)
def forward(self, x):
out3x3 = self.branch3x3(x)
out3x3db = self.branch3x3db_1(x)
out3x3db = self.branch3x3db_2(out3x3db)
out3x3db = self.branch3x3db_3(out3x3db)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
out = [out3x3, out3x3db, branch_pool]
return torch.cat(out, 1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = ConvBNReLU(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = ConvBNReLU(in_channels, c7, kernel_size=1)
self.branch7x7_2 = ConvBNReLU(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = ConvBNReLU(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7db_1 = ConvBNReLU(in_channels, c7, kernel_size=1)
self.branch7x7db_2 = ConvBNReLU(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7db_3 = ConvBNReLU(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7db_4 = ConvBNReLU(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7db_5 = ConvBNReLU(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = ConvBNReLU(in_channels, 192, kernel_size=1)
def forward(self, x):
out1x1 = self.branch1x1(x)
out7x7 = self.branch7x7_1(x)
out7x7 = self.branch7x7_2(out7x7)
out7x7 = self.branch7x7_3(out7x7)
out7x7_db = self.branch7x7db_1(x)
out7x7_db = self.branch7x7db_2(out7x7_db)
out7x7_db = self.branch7x7db_3(out7x7_db)
out7x7_db = self.branch7x7db_4(out7x7_db)
out7x7_db = self.branch7x7db_5(out7x7_db)
out_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
out_pool = self.branch_pool(out_pool)
out = [out1x1, out7x7, out7x7_db, out_pool]
return torch.cat(out, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels):
super(InceptionD, self).__init__()
self.branch3x3_1 = ConvBNReLU(in_channels, 192, kernel_size=1)
self.branch3x3_2 = ConvBNReLU(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = ConvBNReLU(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = ConvBNReLU(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = ConvBNReLU(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = ConvBNReLU(192, 192, kernel_size=3, stride=2)
def forward(self, x):
out3x3 = self.branch3x3_1(x)
out3x3 = self.branch3x3_2(out3x3)
out7x7x3 = self.branch7x7x3_1(x)
out7x7x3 = self.branch7x7x3_2(out7x7x3)
out7x7x3 = self.branch7x7x3_3(out7x7x3)
out7x7x3 = self.branch7x7x3_4(out7x7x3)
out_pool = F.max_pool2d(x, kernel_size=3, stride=2)
out = [out3x3, out7x7x3, out_pool]
return torch.cat(out, 1)
class InceptionE(nn.Module):
def __init__(self, in_n):
super(InceptionE, self).__init__()
self.branch1x1 = ConvBNReLU(in_n, 320, kernel_size=1)
self.branch3x3_1 = ConvBNReLU(in_n, 384, kernel_size=1)
self.branch3x3_2a = ConvBNReLU(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = ConvBNReLU(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3_db_1 = ConvBNReLU(in_n, 448, kernel_size=1)
self.branch3x3_db_2 = ConvBNReLU(448, 384, kernel_size=3, padding=1)
self.branch3x3_db_3a = ConvBNReLU(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_db_3b = ConvBNReLU(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = ConvBNReLU(in_n, 192, kernel_size=1)
def forward(self, x):
out1x1 = self.branch1x1(x)
out3x3 = self.branch3x3_1(x)
out3x3 = [
self.branch3x3_2a(out3x3),
self.branch3x3_2b(out3x3)
]
out3x3 = torch.cat(out3x3, 1)
out3x3_db = self.branch3x3_db_1(x)
out3x3_db = self.branch3x3_db_2(out3x3_db)
out3x3_db = [
self.branch3x3_db_3a(out3x3_db),
self.branch3x3_db_3b(out3x3_db)
]
out3x3_db = torch.cat(out3x3_db, 1)
out_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
out_pool = self.branch_pool(out_pool)
out = [out1x1, out3x3, out3x3_db, out_pool]
return torch.cat(out, 1)
class InceptionAux(nn.Module):
"""
辅助分类结构
"""
def __init__(self, in_n, n_classes):
super(InceptionAux, self).__init__()
self.conv_1 = ConvBNReLU(in_n, 128, kernel_size=1)
self.conv_2 = ConvBNReLU(128, 768, kernel_size=5)
# 设置标准差
self.conv_2.stddev = 0.01
self.fc = nn.Linear(768, n_classes)
self.fc.stddev = 0.001
def forward(self, x):
out = F.avg_pool2d(x, kernel_size=5, stride=3)
out = self.conv_1(out)
out = self.conv_2(out)
out = out.view(out.size(0), -1)
return self.fc(out)
if __name__ == '__main__':
# 经测试网络可以跑通
model = Inception3()
print(model)
input = torch.rand(4, 3, 299, 299)
print("size of the output is {}".format(model(input)[0].size()))
print("size of the aux is {}".format(model(input)[1].size()))
| true |
14b2e7528f3fd85be391b1f2531143f621dd95c6 | Python | CFM-MSG/Code_JFSE | /losses.py | UTF-8 | 5,860 | 3.03125 | 3 | [] | no_license | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Domain Adaptation Loss Functions.
The following domain adaptation loss functions are defined:
- Maximum Mean Discrepancy (MMD).
Relevant paper:
Gretton, Arthur, et al.,
"A kernel two-sample test."
The Journal of Machine Learning Research, 2012
- Correlation Loss on a batch.
"""
from functools import partial
import tensorflow as tf
import utils
slim = tf.contrib.slim
################################################################################
# SIMILARITY LOSS
################################################################################
def maximum_mean_discrepancy(x, y, kernel=utils.gaussian_kernel_matrix):
with tf.name_scope('MaximumMeanDiscrepancy'):
# \E{ K(x, x) } + \E{ K(y, y) } - 2 \E{ K(x, y) }
cost = tf.reduce_mean(kernel(x, x))
cost += tf.reduce_mean(kernel(y, y))
cost -= 2 * tf.reduce_mean(kernel(x, y))
# We do not allow the loss to become negative.
cost = tf.where(cost > 0, cost, 0, name='value')
return cost
def mmd_loss(source_samples, target_samples, weight, scope=None):
sigmas = [
1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100,
1e3, 1e4, 1e5, 1e6
]
gaussian_kernel = partial(
utils.gaussian_kernel_matrix, sigmas=tf.constant(sigmas))
loss_value = maximum_mean_discrepancy(
source_samples, target_samples, kernel=gaussian_kernel)
loss_value = tf.maximum(1e-4, loss_value) * weight
assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value])
with tf.control_dependencies([assert_op]):
tag = 'MMD Loss'
if scope:
tag = scope + tag
tf.summary.scalar(tag, loss_value)
return loss_value
def correlation_loss(source_samples, target_samples, weight, scope=None):
"""Adds a similarity loss term, the correlation between two representations.
Args:
source_samples: a tensor of shape [num_samples, num_features]
target_samples: a tensor of shape [num_samples, num_features]
weight: a scalar weight for the loss.
scope: optional name scope for summary tags.
Returns:
a scalar tensor representing the correlation loss value.
"""
with tf.name_scope('corr_loss'):
source_samples -= tf.reduce_mean(source_samples, 0)
target_samples -= tf.reduce_mean(target_samples, 0)
source_samples = tf.nn.l2_normalize(source_samples, 1)
target_samples = tf.nn.l2_normalize(target_samples, 1)
source_cov = tf.matmul(tf.transpose(source_samples), source_samples)
target_cov = tf.matmul(tf.transpose(target_samples), target_samples)
corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight
assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss])
with tf.control_dependencies([assert_op]):
tag = 'Correlation Loss'
if scope:
tag = scope + tag
tf.summary.scalar(tag, corr_loss)
tf.losses.add_loss(corr_loss)
return corr_loss
def Deep_CORAL_loss(source_samples, target_samples, weight, scope=None):
with tf.name_scope('Deep_CORAL_loss'):
d=source_samples.get_shape().as_list()[1]
source_samples = tf.reduce_mean(source_samples, 0)-source_samples
source_s=tf.transpose(source_samples) @ source_samples
target_samples = tf.reduce_mean(target_samples, 0)-target_samples
target_s=tf.transpose(target_samples) @ target_samples
corr_loss = tf.reduce_mean(tf.multiply((source_s-target_s), (source_s-target_s)))* weight
corr_loss = corr_loss/(4*d*d)
tf.losses.add_loss(corr_loss)
return corr_loss
def dann_loss(source_samples, target_samples, weight, scope=None):
"""Adds the domain adversarial (DANN) loss.
Args:
source_samples: a tensor of shape [num_samples, num_features].
target_samples: a tensor of shape [num_samples, num_features].
weight: the weight of the loss.
scope: optional name scope for summary tags.
Returns:
a scalar tensor representing the correlation loss value.
"""
with tf.variable_scope('dann'):
batch_size = tf.shape(source_samples)[0]
samples = tf.concat(axis=0, values=[source_samples, target_samples])
samples = slim.flatten(samples)
domain_selection_mask = tf.concat(
axis=0, values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))])
# Perform the gradient reversal and be careful with the shape.
grl = grl_ops.gradient_reversal(samples)
grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1]))
grl = slim.fully_connected(grl, 100, scope='fc1')
logits = slim.fully_connected(grl, 1, activation_fn=None, scope='fc2')
domain_predictions = tf.sigmoid(logits)
domain_loss = tf.losses.log_loss(
domain_selection_mask, domain_predictions, weights=weight)
domain_accuracy = utils.accuracy(
tf.round(domain_predictions), domain_selection_mask)
assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss])
with tf.control_dependencies([assert_op]):
tag_loss = 'losses/domain_loss'
tag_accuracy = 'losses/domain_accuracy'
if scope:
tag_loss = scope + tag_loss
tag_accuracy = scope + tag_accuracy
tf.summary.scalar(tag_loss, domain_loss)
tf.summary.scalar(tag_accuracy, domain_accuracy)
return domain_loss
| true |
52869bfb0138a91d4f1d6f3f1d6ab8036e8cbcf2 | Python | CurtinCen/a-new-test-project | /src/test_script.py | UTF-8 | 230 | 2.546875 | 3 | [] | no_license | import math
import random
road_num = 686105
fname = 'result.txt'
with open(fname, 'w') as fout:
for i in range(road_num):
#r = random.randint(1, 3)
#fout.write("%d:%d\n"%(i+1, r))
fout.write("1\n")
| true |
2724092318d3e3f66a6cbf97ce26cf0e4d0b848e | Python | Aasthaengg/IBMdataset | /Python_codes/p03254/s155367407.py | UTF-8 | 243 | 2.703125 | 3 | [] | no_license | n, x = map(int, input().split())
A = sorted(list(map(int, input().split())))
if sum(A) < x:
cnt = -1
else:
cnt = 0
for a in A:
x -= a
if x >= 0:
cnt += 1
if x <= 0:
print(cnt)
exit()
print(cnt) | true |
8c0dd9960eff76af733bcc3c9424001d0f3dd032 | Python | LawrenceGao0224/LeetCode | / Merge_Intervals.py | UTF-8 | 629 | 3.3125 | 3 | [] | no_license | # 56. Merge Intervals
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
temp = []
merge = []
intervals.sort()
merge = intervals[0]
for i in range(1, len(intervals)):
# [0,4], [2,3]
if merge[0] <= intervals[i][0] and merge[1] >= intervals[i][1]:
continue
# [1,3], [2,4]
elif merge[1] >= intervals[i][0]:
merge = [merge[0], intervals[i][1]]
else:
temp.append(merge)
merge = intervals[i]
temp.append(merge)
return temp | true |
b06172f684c812229ac8eecbee3385a3b01ff643 | Python | nauman-zahoor/imdb-data-dashboard | /wrangling_scripts/wrangle_data.py | UTF-8 | 3,513 | 3.4375 | 3 | [] | no_license | import pandas as pd
import plotly.graph_objs as go
# Use this file to read in your data and prepare the plotly visualizations. The path to the data files are in
# `data/file_name.csv`
def return_figures():
"""Creates four plotly visualizations
Args:
None
Returns:
list (dict): list containing the four plotly visualizations
"""
# read_data_file and process
data = pd.read_csv('./data/imdb_data.csv')
cols = ['primaryTitle','startYear','runtimeMinutes','genres','averageRating']
data = data[cols]
data.columns = ['movie','year','runtime','generes','rating']
# top 20 and bottom 20 rated movies
data_top = data.sort_values('rating',ascending=False).reset_index(drop=True).loc[:20,['movie','rating']]
data_bottom = data.sort_values('rating',ascending=True).reset_index(drop=True).loc[:20,['movie','rating']]
# first chart plots top 20 movies
graph_one = []
graph_one.append(
go.Scatter(
x = data_top.movie.tolist(),
y = data_top.rating.tolist(),
mode = 'markers'
)
)
layout_one = dict(title = "IMDB's Highest Rated Top 20 Movies",
xaxis = dict(title = 'Movie Title'),
yaxis = dict(title = 'IMDB Rating'),
)
# second chart plots bottom 20 movies
graph_two = []
graph_two.append(
go.Scatter(
x = data_bottom.movie.tolist(),
y = data_bottom.rating.tolist(),
mode = 'markers'
)
)
layout_two = dict(title = "IMDB's Lowest Rated Bottom 20 Movies",
xaxis = dict(title = 'Movie title',),
yaxis = dict(title = 'IMDB Rating'),
)
####################################
# # top and bottom rated movies vs year
top = data.groupby('year')['rating'].mean().sort_values(ascending=False)
top = pd.DataFrame(top)
top.loc[:,'year'] = top.index
top = top.reset_index(drop=True)
bottom = data.groupby('year')['rating'].mean().sort_values(ascending=True)
bottom = pd.DataFrame(bottom)
bottom.loc[:,'year'] = bottom.index
bottom = bottom.reset_index(drop=True)
top20 = top[:20]
bottom20 = bottom[:20]
# third chart plots top rated movie's year and their average rating
graph_three = []
graph_three.append(
go.Scatter(
x = top20.year.tolist(),
y = top20.rating.tolist(),
mode = 'markers'
)
)
layout_three = dict(title = "IMDB's Highest Rated Movie's average rating <br> vs Year they came out ",
xaxis = dict(title = 'Year of Release'),
yaxis = dict(title = 'Average Rating')
)
# fourth chart plots bottom rated movie's year and their average rating
graph_four = []
graph_four.append(
go.Scatter(
x = bottom20.year.tolist(),
y = bottom20.rating.tolist(),
mode = 'markers'
)
)
layout_four = dict(title = "IMDB's Lowest Rated Movie's average rating <br> vs Year they came out ",
xaxis = dict(title = 'Year of Release'),
yaxis = dict(title = 'Average rating'),
)
# append all charts to the figures list
figures = []
figures.append(dict(data=graph_one, layout=layout_one))
figures.append(dict(data=graph_two, layout=layout_two))
figures.append(dict(data=graph_three, layout=layout_three))
figures.append(dict(data=graph_four, layout=layout_four))
return figures | true |
e4af571b01b525ed0d098c3178dafaa71bf32d64 | Python | 20c/confu | /src/confu/schema/inet.py | UTF-8 | 8,225 | 3.078125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | """
Attributes that deal with networking specific values such as emails, urls and ip addresses
These can be imported directly from `confu.schema`
## Requirements
- `ipaddress` for ip address validation
"""
from __future__ import annotations
import ipaddress
import re
from typing import Any
from urllib.parse import urlparse
from confu.exceptions import SoftDependencyError, ValidationError
from confu.schema.core import Str
class Email(Str):
"""
Describes an email address
"""
def validate(self, value: str | None, path: list[str], **kwargs: Any) -> str | None:
value = super().validate(value, path, **kwargs)
if value == "" and self.blank:
return value
if value is None and self.default_is_none:
return value
# TODO: any reason to get more sophisticated than this?
if not re.match(r"[^@\s]+@[^@\s]+", value):
raise ValidationError(self, path, value, "email address expected")
return value
class Url(Str):
"""
Describes a URL
"""
def __init__(self, name: str = "", **kwargs: Any) -> None:
super().__init__(name=name, **kwargs)
self.schemes = kwargs.get("schemes", [])
def validate(self, value: str | None, path: list[str], **kwargs: Any) -> str | None:
"""
Currently only validates by running urlparse against it
and checking that a scheme and netloc is set - and if a list of allowed
schemes is provide that the scheme is valid against that list
TODO: may want something more sophisticated than that - could look
at django's url validator
"""
value = super().validate(value, path, **kwargs)
if value == "" and self.blank:
return value
if value is None and self.default_is_none:
return value
try:
result = urlparse(value)
except ValueError:
raise ValidationError(self, path, value, "url expected")
if not result.scheme:
raise ValidationError(self, path, value, "no url scheme specified")
if not result.netloc:
raise ValidationError(self, path, value, "no url netloc specified")
if self.schemes and result.scheme not in self.schemes:
raise ValidationError(
self, path, value, f"invalid url scheme: {result.scheme}"
)
return value
class IpAddress(Str):
"""
Describes a IPv4 or IPv6 address
"""
def __init__(
self, name: str = "", protocol: int | None = None, **kwargs: Any
) -> None:
"""
Initialize attribute
**Keyword Arguments**
- name (`str`): describes the attribute name, if not specified
explicitly will be set through the schema that instantiates
the attribute.
- protocol (`int`): ip version, can be 4, 6 or None - if it is none
the attribute can hold either a v4 or a v6 IP address.
- default (`mixed`): the default value of this attribute. Once a default
value is set, schema validation will no longer raise a
validation error if the attribute is missing from the
configuration.
- choices (`list`): if specified on values in this list may be set
for this attribute
- help (`str`): help description
- cli (`bool=True`): enable CLI support for this attribute
- deprecated (`str`): version id of when this attribute will be deprecated
- added (`str`): version id of when this attribute was added to the schema
- removed (`str`): version id of when this attribute will be removed
"""
super().__init__(name=name, **kwargs)
if not ipaddress:
raise SoftDependencyError("ipaddress")
if protocol not in [None, 4, 6]:
raise ValueError("IpAddress protocol needs to be either 4, 6 or None")
self.protocol = protocol
def validate_v4(
self, value: str, path: list[str], **kwargs: Any
) -> bool | ipaddress.IPv4Address:
try:
return ipaddress.IPv4Address(value)
except ipaddress.AddressValueError:
return False
def validate_v6(
self, value: str, path: list[str], **kwargs: Any
) -> bool | ipaddress.IPv6Address:
try:
return ipaddress.IPv6Address(value)
except ipaddress.AddressValueError:
return False
def validate(self, value: str | None, path: list[str], **kwargs: Any) -> Any:
value = super().validate(value, path, **kwargs)
if value is None and self.default_is_none:
return value
if self.blank and value == "":
return value
value = f"{value}"
value_v4 = self.validate_v4(value, path, **kwargs)
value_v6 = self.validate_v6(value, path, **kwargs)
if self.protocol == 4 and not value_v4:
raise ValidationError(self, path, value, "invalid ip (v4)")
elif self.protocol == 6 and not value_v6:
raise ValidationError(self, path, value, "invalid ip (v6)")
elif self.protocol is None and not value_v4 and not value_v6:
raise ValidationError(self, path, value, "invalid ip (v4 or v6)")
return value_v4 or value_v6
class IpNetwork(Str):
"""
Describes a IPv4 or IPv6 IP prefix
"""
def __init__(
self, name: str = "", protocol: int | None = None, **kwargs: Any
) -> None:
"""
Initialize attribute
**Keyword Arguments**
- name (`str`): describes the attribute name, if not specified
explicitly will be set through the schema that instantiates
the attribute.
- protocol (`int`): ip version, can be 4, 6 or None - if it is none
the attribute can hold either a v4 or a v6 IP address.
- default (`mixed`): the default value of this attribute. Once a default
value is set, schema validation will no longer raise a
validation error if the attribute is missing from the
configuration.
- choices (`list`): if specified on values in this list may be set
for this attribute
- help (`str`): help description
- cli (`bool=True`): enable CLI support for this attribute
- deprecated (`str`): version id of when this attribute will be deprecated
- added (`str`): version id of when this attribute was added to the schema
- removed (`str`): version id of when this attribute will be removed
"""
super().__init__(name=name, **kwargs)
if protocol not in [None, 4, 6]:
raise ValueError("IpAddress protocol needs to be either 4, 6 or None")
self.protocol = protocol
def validate_v4(
self, value: str, path: list[str], **kwargs: Any
) -> bool | ipaddress.IPv4Network:
try:
return ipaddress.IPv4Network(value)
except ipaddress.AddressValueError:
return False
def validate_v6(
self, value: str, path: list[str], **kwargs: Any
) -> bool | ipaddress.IPv6Network:
try:
return ipaddress.IPv6Network(value)
except ipaddress.AddressValueError:
return False
def validate(
self, value: str, path: list[str], **kwargs: Any
) -> ipaddress.IPv4Network | ipaddress.IPv6Network | str:
value = super().validate(value, path, **kwargs)
if value is None and self.default_is_none:
return value
if self.blank and value == "":
return value
value = f"{value}"
value_v4 = self.validate_v4(value, path, **kwargs)
value_v6 = self.validate_v6(value, path, **kwargs)
if self.protocol == 4 and not value_v4:
raise ValidationError(self, path, value, "invalid network (v4)")
elif self.protocol == 6 and not value_v6:
raise ValidationError(self, path, value, "invalid network (v6)")
elif self.protocol is None and not value_v4 and not value_v6:
raise ValidationError(self, path, value, "invalid network (v4 or v6)")
return value_v4 or value_v6
| true |
e5889d814a222edc7b97b6c493b22255ada4c8bc | Python | Soumick-Pyne/Quant101 | /Getting Data/yahoofin_intro.py | UTF-8 | 307 | 2.671875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 13 15:07:24 2020
@author: User
"""
from yahoofinancials import YahooFinancials
ticker = "MSFT"
#creating object
yahoo_financials = YahooFinancials(ticker)
data = yahoo_financials.get_historical_price_data("2018-04-24","2020-04-24","daily")
| true |
ae60871ad12e71138a7a67bc236080bdf397e1d7 | Python | shubhamrocks888/python_oops | /class_vs_static_method.py | UTF-8 | 3,410 | 4.71875 | 5 | [] | no_license | ## class method vs static method in Python
'''Class Method'''
A class method receives the class as implicit first argument, just like an instance method receives the instance.
Syntax:
class C(object):
@classmethod
def fun(cls, arg1, arg2, ...):
....
fun: function that needs to be converted into a class method
returns: a class method for function.
##NOTE: if self is used in this function,it will give you an error.
1. A class method is a method which is bound to the class and not the object of the class.
2. They have the access to the state of the class as it takes a class parameter that points
to the class and not the object instance.
3. It can modify a class state that would apply across all the instances of the class.
For example it can modify a class variable that will be applicable to all the instances.
'''Static Method'''
A static method does not receive an implicit first argument.
Syntax:
class C(object):
@staticmethod
def fun(arg1, arg2, ...):
...
returns: a static method for function fun.
##NOTE: if self or cls is used in this function,it will give you an error.
1. A static method is also a method which is bound to the class and not the object of the class.
2. A static method can’t access or modify class state.
3. It is present in a class because it makes sense for the method to be present in class.
'''Class method vs Static Method'''
1. A class method takes cls as first parameter while a static method needs no specific parameters.
2. A class method can access or modify class state while a static method can’t access or modify it.
3. In general, static methods know nothing about class state. They are utility type methods that take
some parameters and work upon those parameters. On the other hand class methods must have class as parameter.
4. We use @classmethod decorator in python to create a class method and we use @staticmethod decorator
to create a static method in python.
'''When to use what?'''
1. We generally use class method to create factory methods. Factory methods return class object
( similar to a constructor ) for different use cases.
2. We generally use static methods to create utility functions.
'''How to define a class method and a static method?'''
# Python program to demonstrate
# use of class method and static method.
from datetime import date
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
# a class method to create a Person object by birth year.
@classmethod
def fromBirthYear(cls, name, year):
return cls(name, date.today().year - year)
# a static method to check if a Person is adult or not.
@staticmethod
def isAdult(age):
return age > 18
person1 = Person('mayank', 21)
person2 = Person.fromBirthYear('mayank', 1996)
print (person1.age)
print (person2.age)
# print the result
print (Person.isAdult(22))
#Output:
21
21
True
## @staticmethod
def isAdult(self):
return self.age > 18
'''TypeError: isadult() missing 1 required positional argument: 'self''''
#NOTE: 1. That's why no self or cls is used in static method function
## 2. Similarly,no self is used in class method function
| true |
8b26a1720e11a851c8b9f1021b4fc93d0d65e82e | Python | sreejith-mq/prashantassignment | /training/training1.py | UTF-8 | 3,753 | 2.890625 | 3 | [] | no_license | import cv2
import numpy as np
import os
from random import shuffle
from tqdm import tqdm
import tflearn
import tensorflow as tf
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import matplotlib.pyplot as plt
TRAIN_DIR = '/home/student/Desktop/algonlty/trainmquo' #training directory
TEST_DIR = '/home/student/Desktop/algonlty/testmquo' #testing directory
LR = 1e-3
MODEL_NAME = 'signxunsign-{}-{}.model'.format(LR, '2conv-basic')
def label_img(img):
word_label = img.split('.')[0]
# conversion to one-hot array [sign,unsign]
# [1 for sign]
if word_label == 'sign': return [1,0]
# [0 for unsign]
elif word_label == 'unsign': return [0,1]
#preparing training data
def create_train_data():
training_data = []
for img in tqdm(os.listdir(TRAIN_DIR)):
label = label_img(img)
path = os.path.join(TRAIN_DIR,img)
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
training_data.append([np.array(img),np.array(label)]) # preparing training data as 2d list where 1st numpy array of image and 2nd is label in 0/1
shuffle(training_data) # shuffle training for leveraging more randomness
np.save('train_data.npy', training_data)
return training_data
#preparing test data
def process_test_data():
testing_data = []
for img in tqdm(os.listdir(TEST_DIR)):
path = os.path.join(TEST_DIR, img)
img_num = img.split('.')[0]
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
testing_data.append([np.array(img), img_num]) # preparing training data as 2d list where 1st numpy array of image and 2nd is image number
shuffle(testing_data) # shuffle testing for levaraging more randomness
np.save('test_data.npy', testing_data)
return testing_data
train_data = create_train_data()
test_data=process_test_data()
#defining CNN
tf.reset_default_graph()
convnet = input_data(shape=[None, 67, 829, 1], name='input')
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 128, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 2, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
if os.path.exists('/home/student/Desktop/mquotient/mquo model{}.meta{}.meta'.format(MODEL_NAME)):
model.load(MODEL_NAME)
print('model loaded!')
#preparing trainig aand validation sets
train = train_data[:81] # 61x2, i[0]=67x829
test = train_data[:-30] # 51x2
#loading training and validations sets
X = np.asarray([i[0] for i in train]) # 61x67x829
X=np.reshape(X,(81,67,829,1))
Y = np.asarray([i[1] for i in train]) # 61x2
test_x = np.asarray([i[0] for i in test])
test_x=np.reshape(test_x,(51,67,829,1)) # preparing validation images
test_y = np.asarray([i[1] for i in test]) # preparing validation labels
#training model
model.fit({'input': X}, {'targets': Y}, n_epoch=20, validation_set=({'input': test_x}, {'targets': test_y}),snapshot_step=500, show_metric=True,batch_size=3, run_id=MODEL_NAME)
| true |
a4d4b5114e4ddcd2f71b4e1526bc6734a188caf5 | Python | firemeadow/DL_Final | /stock_gan.py | UTF-8 | 2,922 | 2.546875 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np
import matplotlib as plt
from gen import Generator
from disc import Discriminator
from steig import *
def discriminator_loss(real_output, fake_output, loss):
real_target = torch.ones(np.shape(real_output))
fake_target = torch.zeros(np.shape(fake_output))
real_loss = loss(real_output, real_target)
fake_loss = loss(fake_output, fake_target)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output, loss):
fake_target = torch.ones(np.shape(fake_output))
return loss(fake_output, fake_target)
def train_step(data, label, gen_model, disc_model):
pos_weight = torch.ones((1,)) * 0.99
loss = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
generated_price = gen_model(data).view(1, 1, 1)
label = label.view(1, 1, 1)
fake_output = disc_model(generated_price)
real_output = disc_model(label)
gen_loss = generator_loss(fake_output, loss)
disc_loss = discriminator_loss(real_output, fake_output, loss)
return gen_loss, disc_loss, gen_model, disc_model
def train(eigen_portfolio, momentums, num_attr, LEARNING_RATE, NUM_EPOCHS, BATCH_SIZE, NUM_BATCHES):
gen_model = Generator(num_attr)
gen_opt = optim.SGD(gen_model.parameters(), lr=LEARNING_RATE)
disc_model = Discriminator()
disc_opt = optim.SGD(disc_model.parameters(), lr=LEARNING_RATE)
gens = []
discs = []
for epoch in range(NUM_EPOCHS):
for batch, mom in zip(eigen_portfolio, momentums):
gen_opt.zero_grad() # zero the gradient buffers
disc_opt.zero_grad()
gen_loss, disc_loss, gen_model, disc_model = train_step(batch, mom, gen_model, disc_model)
if epoch is not NUM_EPOCHS-1:
gen_loss.backward(retain_graph=True)
disc_loss.backward(retain_graph=True)
else:
gen_loss.backward()
disc_loss.backward()
gen_opt.step()
disc_opt.step()
gen = gen_loss.numpy().mean()
disc = disc_loss.numpy().mean()
print(gen)
print(disc)
gens.append(gen)
discs.append(disc)
return gens, discs, generator, discriminator
if __name__ == '__main__':
eigen_portfolio, _, momentums = genEig(loadData('data.txt'), 256, 2, 5, False, saveState = "lucasTest")
print(np.shape(momentums))
num_days = np.shape(eigen_portfolio)[0]
num_attr = np.shape(eigen_portfolio)[1]
eigen_portfolio = torch.tensor(np.reshape(eigen_portfolio, (num_days, num_attr)))
momentums = torch.tensor(momentums)
LEARNING_RATE = 0.01
NUM_EPOCHS = 100
BATCH_SIZE = 128
NUM_BATCHES = num_days - BATCH_SIZE
gens, discs, generator, discriminator = train(eigen_portfolio, momentums, num_attr, LEARNING_RATE, NUM_EPOCHS, BATCH_SIZE, NUM_BATCHES)
| true |
c83accc05097718a0ea4b6317433d85f51428dea | Python | wapor/euler | /42_coded_triangle_numbers.py | UTF-8 | 1,076 | 4.09375 | 4 | [] | no_license | #!/usr/bin/python
# The nth term of the sequence of triangle numbers is given by, t(n) = n(n+1)/2;
# so the first ten triangle numbers are:
# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
# By converting each letter in a word to a number corresponding to its
# alphabetical position and adding these values we form a word value. For
# example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value
# is a triangle number then we shall call the word a triangle word.
# Using words.txt (right click and 'Save Link/Target As...'), a 16K text file
# containing nearly two-thousand common English words, how many are triangle
# words?
filename = 'words.txt'
words = open(filename).read().replace('"', '').split(',')
for i in ''.join(words):
assert ord(i) in range(ord('A'), ord('Z') + 1)
triangles = set()
for i in range(1, 100):
triangles.add(i * (i + 1) / 2)
result = 0
for word in words:
val = 0
for char in word:
val += ord(char) - ord('A') + 1
if val in triangles:
result += 1
print 'Number of triangular words:', result
| true |
3a6603bae888bed4314c550a8a169f9d7848d6e7 | Python | afsneto/Sispot | /le_arquivo_zw_lis_rev04.py | UTF-8 | 16,314 | 3.03125 | 3 | [] | no_license | """
Arquivo para leitura de saída do line constants do ATP quando solicitada Z_w da LT
revisão 04 - capturar modos para até 4 circuitos (há diferenças nas saídas .lis para T_i)
apresenta opções de escolha para o usuario após leitura do arquivo .lis
captura valores de resistência, indutância e capacitância da LT
arquivo funciona caso sejam imprimidos apenas matriz de susceptância em mho/km e
matriz de impedâncias em ohm/km
"""
import math
import cmath
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
n = 0
diretorio = 0
while diretorio == 0:
try:
leitura = input('Digite o diretório do arquivo com seu nome e sua extensão \n'
'por exemplo: C:/Users/Matheus/Desktop/meuarquivo.lis: \n')
arquivo = open(leitura, 'r', encoding='utf8')
except:
print('Há algo errado com o diretorio ou arquivo informados, verifique.')
else:
arquivo.close
diretorio = 1
# Verifica número de circuitos e modos consequentemente
with open(leitura, 'r', encoding='utf8') as arquivo:
texto = arquivo.readlines()
for linha in texto:
if 'Line conductor table after sorting and initial processing.' in linha:
j = 0
while texto[texto.index(linha) + 3 + j][7:14] != '-------':
if int(texto[texto.index(linha) + 3 + j][7:14]) > n:
n = int(texto[texto.index(linha) + 3 + j][7:14])
j = j + 1
else:
j = j + 1
print(f'No arquivo indicado, há {n} condutores reduzidos (fases), portanto, haverá {n} modos de propagação.')
freq = []
zc = [[numero for numero in range(1, 1)] for valor in range(0, n)]
resistencia = defaultdict(lambda: [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)])
indutancia = defaultdict(lambda: [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)])
capacitancia = defaultdict(lambda: [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)])
mod_zc = [[numero for numero in range(1, 1)] for valor in range(0, n)]
angle_zc = [[numero for numero in range(1, 1)] for valor in range(0, n)]
vel = [[numero for numero in range(1, 1)] for valor in range(0, n)]
alfa = [[numero for numero in range(1, 1)] for valor in range(0, n)]
t_i = defaultdict(lambda: [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)])
mod_t_i = [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)]
angle_t_i = [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)]
r_fase = [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)]
l_fase = [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)]
c_fase = [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)]
cont = 0
for linha in texto:
if 'Modal parameters at frequency FREQ = ' in linha:
# Guarda frequência de cálculo
freq.append(float(linha[38:52]))
for i in range(n):
# Guarda parametros de fase
for k in range(i+1):
resistencia[freq[cont]][i][k] = float(texto[texto.index(linha) - (n-i)*3][(6 + 14*k):(18 + 14*k)])
# Indutancias em mH
indutancia[freq[cont]][i][k] = 1000*1/(2*math.pi*freq[cont])*float(texto[texto.index(linha) - ((n-i)*3 - 1)][(6 + 14*k):(18 + 14*k)])
# Capacitancias em nF
capacitancia[freq[cont]][i][k] = 10**9*1/(2*math.pi*freq[cont])*float(texto[texto.index(linha) - (n*3 + 3 + (n-i)*2)][(5 + 14*k):(18 + 14*k)])
for j in range(n):
# Guarda impedância característica em ohm
zc[j].append(float(texto[texto.index(linha)+3+j][49:61]) +
float(texto[texto.index(linha) + 3 + j][62:75])*1j)
# zc_real[j].append(float(texto[texto.index(linha)+3+j][49:61]))
# zc_imag[j].append(float(texto[texto.index(linha) + 3 + j][62:75]))
# Guarda velocidade em km/s
vel[j].append(float(texto[texto.index(linha) + 3 + j][91:103]))
# Guarda a atenuação em np/km
alfa[j].append(float(texto[texto.index(linha) + 3 + j][119:131]))
# Guarda Matrizes de transformação modal
if n <= 6:
for i in range(n):
for k in range(n):
t_i[freq[cont]][i][k] = float(texto[texto.index(linha) + 5 + n + i][(0 + 22 * k):(22 + 22 * k)]) + \
float(texto[texto.index(linha) + 6 + 2 * n + i][(0 + 22 * k):(22 + 22 * k)]) * 1j
else:
for i in range(n):
for k in range(n):
if k < 6:
t_i[freq[cont]][i][k] = float(texto[texto.index(linha) + 5 + n + 2*i][(0 + 22 * k):(22 + 22 * k)]) + \
float(texto[texto.index(linha) + 6 + 3 * n + 2*i][(0 + 22 * k):(22 + 22 * k)]) * 1j
else:
t_i[freq[cont]][i][k] = float(texto[texto.index(linha) + 6 + n + 2*i][(0 + 22 * (k-6)):(22 + 22 * (k-6))]) + \
float(texto[texto.index(linha) + 7 + 3 * n + 2*i][(0 + 22 * (k-6)):(22 + 22 * (k-6))]) * 1j
cont = cont + 1
entrada = 99
while entrada != 6:
try:
entrada = int(input('O arquivo .lis informado foi lido. O que deseja fazer?\n'
'1 - Imprimir as matrizes de transformação T_i e sua inversa para uma frequência especificada \n'
'2 - Plotar as velocidades e atenuações modais \n'
'3 - Plotar as impedâncias características modais \n'
'4 - Plotar os elementos da matriz T_i \n'
'5 - Plotar parâmetros R, L e C de fase \n'
'6 - Sair \n'))
if entrada == 6:
break
elif entrada == 1:
indice_freq = 1
while indice_freq != -1:
try:
print(f'O vetor que possui todas as frequências escaneadas é dado por: \n {freq}')
indice_freq = int(input('Informe o índice do vetor de frequências a ser verificado em T_i \n'
f'Os índices variam de 0 para a primeira posição até {len(freq)-1}'
f' para a última (-1 para sair): \n'))
if indice_freq == -1:
break
else:
print(f'Para o índice escolhido ({indice_freq}), a matriz de transformação modal associada'
f' refere-se à frequência {freq[indice_freq]} Hz e é dada por: \n')
# Imprime matriz t_i escolhida
t_i_num = np.array(t_i[freq[indice_freq]])
for linha in range(len(t_i[freq[indice_freq]])):
print(f'linha {linha + 1}: {t_i_num[linha]}')
print('A matriz inversa de T_i é dada por: ')
# Imprime matriz inversa de t_i escolhida
inv_t_i = np.linalg.inv(np.array(t_i[freq[indice_freq]]))
for linha in range(len(t_i[freq[indice_freq]])):
print(f'linha {linha + 1}: {inv_t_i[linha]}')
print('\n')
except (ValueError, IndexError):
print(f'Entre com um valor inteiro entre 0 e {len(freq)-1}.')
elif entrada == 2:
plt.figure()
# Imprime velocidades modais
plt.subplot(2, 1, 1)
for i in range(len(vel)):
plt.plot(freq, vel[i], label=f'Modo {i+1}')
plt.legend(loc=0)
plt.ylabel('km/s')
plt.xlabel('Hz')
plt.xscale('log')
plt.grid(True)
plt.title('Velocidades e Atenuações Modais')
# Imprime atenuações modais
plt.subplot(2, 1, 2)
for i in range(len(alfa)):
plt.plot(freq, alfa[i], label=f'Modo {i+1}')
plt.legend(loc=0)
plt.ylabel('np/km')
plt.xlabel('Hz')
plt.xscale('log')
plt.yscale('log')
plt.grid(True)
plt.show()
elif entrada == 3:
# Imprime Impedâncias Características Modais
for i in range(n):
for j in zc[i]:
mod_zc[i].append(abs(j))
angle_zc[i].append(cmath.phase(j)*180/math.pi)
plt.figure()
# Imprime Módulo em ohm
plt.subplot(2, 1, 1)
for i in range(len(mod_zc)):
plt.plot(freq, mod_zc[i], label=f'Modo {i+1}')
plt.legend(loc=0)
plt.ylabel('ohm')
plt.xlabel('Hz')
plt.xscale('log')
plt.grid(True)
plt.title('Módulos e ângulos das impedâncias características modais')
# Imprime ângulo em graus
plt.subplot(2, 1, 2)
for i in range(len(angle_zc)):
plt.plot(freq, angle_zc[i], label=f'Modo {i+1}')
plt.legend(loc=0)
plt.ylabel('graus')
plt.xlabel('Hz')
plt.xscale('log')
plt.grid(True)
plt.show()
mod_zc = [[numero for numero in range(1, 1)] for valor in range(0, n)]
angle_zc = [[numero for numero in range(1, 1)] for valor in range(0, n)]
elif entrada == 4:
# Imprime autovetores (matriz T_i)
for i in range(n):
for j in range(n):
for f in freq:
mod_t_i[i][j].append(abs(t_i[f][i][j]))
angle_t_i[i][j].append(cmath.phase(t_i[f][i][j])*180/math.pi)
# Gráficos sempre divididos em 3 por figura e cada gráfico possui uma linha da matriz T_i
# O número de figuras geradas será igual ao número de circuitos
# Imprime módulos de T_i
for i in range(len(mod_t_i)):
if i == 0 or i % 3 == 0:
plt.figure()
plt.subplot(1, 3, 1)
elif i == 1 or i % 3 == 1:
plt.subplot(1, 3, 2)
plt.title('Módulos dos elementos de T_i')
elif i == 2 or i % 3 == 2:
plt.subplot(1, 3, 3)
for j in range(len(mod_t_i)):
plt.plot(freq, mod_t_i[i][j], label=f'T_i({i+1},{j+1}')
plt.legend(loc=0)
plt.xlabel('Hz')
plt.xscale('log')
plt.grid(True)
# Imprime ângulos de T_i
for i in range(len(angle_t_i)):
if i == 0 or i % 3 == 0:
plt.figure()
plt.subplot(1, 3, 1)
elif i == 1 or i % 3 == 1:
plt.subplot(1, 3, 2)
plt.title('Ângulos dos elementos de T_i')
elif i == 2 or i % 3 == 2:
plt.subplot(1, 3, 3)
for j in range(len(angle_t_i)):
plt.plot(freq, angle_t_i[i][j], label=f'T_i({i+1},{j+1}')
plt.legend(loc=0)
plt.xlabel('Hz')
plt.ylabel('graus')
plt.xscale('log')
plt.ylim([-190, 190])
plt.grid(True)
plt.show()
mod_t_i = [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)]
angle_t_i = [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)]
elif entrada == 5:
# Parametros de fase
for i in range(n):
for j in range(n):
for f in freq:
if j > i:
r_fase[i][j].append(resistencia[f][j][i])
l_fase[i][j].append(indutancia[f][j][i])
c_fase[i][j].append(capacitancia[f][j][i])
else:
r_fase[i][j].append(resistencia[f][i][j])
l_fase[i][j].append(indutancia[f][i][j])
c_fase[i][j].append(capacitancia[f][i][j])
# Gráficos sempre divididos em 3 por figura e cada gráfico possui uma linha da matriz T_i
# O número de figuras geradas será igual ao número de circuitos
# Imprime resistencias
for i in range(len(r_fase)):
if i == 0 or i % 3 == 0:
plt.figure()
plt.subplot(1, 3, 1)
elif i == 1 or i % 3 == 1:
plt.subplot(1, 3, 2)
plt.title('Resistências de fase')
elif i == 2 or i % 3 == 2:
plt.subplot(1, 3, 3)
for j in range(len(r_fase)):
plt.plot(freq, r_fase[i][j], label=f'R({i + 1},{j + 1}')
plt.legend(loc=0)
plt.xlabel('Hz')
plt.ylabel('ohm/km')
plt.xscale('log')
plt.grid(True)
# Imprime indutancias
for i in range(len(l_fase)):
if i == 0 or i % 3 == 0:
plt.figure()
plt.subplot(1, 3, 1)
elif i == 1 or i % 3 == 1:
plt.subplot(1, 3, 2)
plt.title('Indutâncias de fase')
elif i == 2 or i % 3 == 2:
plt.subplot(1, 3, 3)
for j in range(len(l_fase)):
plt.plot(freq, l_fase[i][j], label=f'L({i + 1},{j + 1}')
plt.legend(loc=0)
plt.xlabel('Hz')
plt.ylabel('mH/km')
plt.xscale('log')
plt.grid(True)
# Imprime capacitancias
for i in range(len(c_fase)):
if i == 0 or i % 3 == 0:
plt.figure()
plt.subplot(1, 3, 1)
elif i == 1 or i % 3 == 1:
plt.subplot(1, 3, 2)
plt.title('Capacitâncias de fase')
elif i == 2 or i % 3 == 2:
plt.subplot(1, 3, 3)
for j in range(len(c_fase)):
plt.plot(freq, c_fase[i][j], label=f'C({i + 1},{j + 1}')
plt.legend(loc=0)
plt.xlabel('Hz')
plt.ylabel('nF/km')
plt.xscale('log')
plt.grid(True)
plt.show()
r_fase = [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)]
l_fase = [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)]
c_fase = [[[numero for numero in range(1, 1)] for linha in range(0, n)] for coluna in range(0, n)]
else:
print('Digite um número entre 1 e 5, conforme índice acima, ou digite 6 para sair.')
except ValueError:
print('Digite um número entre 1 e 5, conforme índice acima, ou digite 6 para sair.')
| true |
7b81288582f1f0ed2cf1756761aaff77115c180c | Python | millionszhang/spider | /总结/正则匹配.py | UTF-8 | 560 | 3.046875 | 3 | [] | no_license | import re
#匹配.com 或 .cn 的url 网址
pattern = "[a-zA-Z]+://[^\s]*[.com|.cn]"
string = "<a href='http://www.baidu.com'>"
result = re.search(pattern,string)
print(result)
#匹配电话号码
pattern = "\d{4}-\d{7}|\d{3}-\d{8}"
string = "021-6728263653682382265236"
result = re.search(pattern,string)
print(result)
#匹配电子邮件地址
pattern = "\w+([.+-]\w+)*@\w+([.-]\w+)*\.\w+([.-]\w+)*"
string = "<a href ='www.baidu.com'>百度首页</a><br><a href='mailto;c-e+o@iqi-anyue.com.cn'>电子邮件地址</a>"
result = re.search(pattern,string)
print(result)
# | true |
f868adae2949c7b40bd900824f2edbad0ac0d832 | Python | TheWatcherI/TheWatcherI | /BetterROT13.py | UTF-8 | 1,830 | 3.546875 | 4 | [] | no_license | import string , sys
class ROT13:
"""docstring for ROT13"""
def __init__(self, Text:str) -> None:
self.alphabet_string = list(string.ascii_lowercase)
self.Text:str = Text
self.new_Text = [""] * len(self.Text)
self.index_list = []
self.Not_chars = []
self.new_charcters = []
self.Full = []
self.new_String:str = ""
def get_index(self) -> None:
self.Text = self.Text.lower()
for pos , char in enumerate(self.Text) :
if char in self.alphabet_string :
for index , item in enumerate(self.alphabet_string) :
if char == item :
self.index_list.append((pos,index))
else :
self.Not_chars.append((pos,char))
def new_Charcters(self) -> None:
for item in self.index_list :
pos,index = item
if index <= 12 :
index +=13
self.new_charcters.append((pos,index))
else :
index -= 13
self.new_charcters.append((pos,index))
self.Full = self.new_charcters + self.Not_chars
def get_newText(self) -> None:
for item in self.Full :
pos, new_char = item
new_Charcter = ""
Found = False
if isinstance(new_char, int) and not Found :
new_Charcter = self.alphabet_string[new_char]
Found = True
else :
new_Charcter = new_char
self.new_Text[pos] = new_Charcter
self.new_String = "".join(self.new_Text)
def Decoder(self) -> str :
self.get_index()
self.new_Charcters()
self.get_newText()
return f'{self.new_String}'
def Encoder(self) -> str :
self.get_index()
self.new_Charcters()
self.get_newText()
return f'{self.new_String}'
if __name__ == "__main__" :
if len(sys.argv) > 1 and len(sys.argv) < 3:
if sys.argv[1] == str :
Rot13 = ROT13(sys.argv[1])
print(Rot13.Decoder())
else :
print("Your Input is not a String .Try again")
else :
print("You have an Error Try Again")
| true |
709b7a52e8deff71aaf12b4529926ff6e166e56a | Python | linannn/LeetCode_Solution | /714.买卖股票的最佳时机含手续费.py | UTF-8 | 1,063 | 3.375 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=714 lang=python3
#
# [714] 买卖股票的最佳时机含手续费
#
# @lc code=start
from typing import List
class Solution:
def maxProfit(self, prices: List[int], fee: int) -> int:
size = len(prices)
if size < 2:
return 0
# dp[j] 表示 [0, i] 区间内,到第 i 天(从 0 开始)状态为 j 时的最大收益
# j = 0 表示不持股,j = 1 表示持股
# 并且规定在买入股票的时候,扣除手续费
dp = [0, 0]
dp[0] = 0
dp[1] = -prices[0] - fee
for i in range(1, size):
dp[0] = max(dp[0], dp[1] + prices[i])
dp[1] = max(dp[1], dp[0] - prices[i] - fee)
return dp[0]
# 作者:liweiwei1419
# 链接:https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-transaction-fee/solution/dong-tai-gui-hua-by-liweiwei1419-6/
# 来源:力扣(LeetCode)
# 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
# @lc code=end
| true |
2bf5456c68732aec55db5e7dd8aa93e1879e0f90 | Python | Jyouya/ExtraMacros | /Macro.py | UTF-8 | 630 | 3.046875 | 3 | [] | no_license |
class Macro:
def __init__(self, modifier=None, key=None, command=None, text=None, x=None, y=None, color=None):
self.modifier = modifier
self.key = key
self.command = command
self.text = text
self.x = x
self.y = y
self.color = color
def move(self, x, y):
self.x = x
self.y = y
def todict(self):
m = {
"command": self.command,
"text": self.text,
"x": str(self.x + 1),
"y": str(self.y + 1),
}
if self.color:
m["color"] = "FF" + self.color[1:]
return m
| true |
e1cbd7815615664eda7f7a7490dabf23b3171b2e | Python | EllieHachem/Some-projects | /Teaching-Others-day-1continuation-of-first-day/main.py | UTF-8 | 215 | 3.078125 | 3 | [] | no_license |
#print function is simple
#day 1 ali hachem python teaching
print("type anything inside quotation then hit run")
#just type print and between paraenthsis use quotations that includes words in it and that is it
| true |
6d6630f303f6d2829b0149a60db8e28d4f8f1a39 | Python | SamG97/AnthropometricsToday | /backend/restAPI/NearestNeigbour.py | UTF-8 | 1,906 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
from DataBaseScript import getAllMeasurements
def discardNone(a):
b = []
for i in range(len(a)):
isFull = True
for j in range(len(a[i])):
if a[i][j] == None:
isFull = False
if isFull:
b.append(a[i])
return b
studentList = getAllMeasurements()
students = [
{studentList.getall()[j][i][0]: studentList.getall()[j][i][1] for i in
range(len(studentList.getall()[j]))}
for j in range(len(studentList.getall()))]
alldists = np.array(discardNone([[students[i]['Face_iobreadth'],
students[i]['Face_breadth'],
students[i]['Head_length']] for i in
range(len(students))]))
elipseMatrix = np.cov(alldists, rowvar=False)
try:
elipseMatrix = np.linalg.inv(elipseMatrix)
except:
elipseMatrix = np.identity(len(elipseMatrix))
def calcDist(a, b):
ab = b - a
return np.sqrt(np.dot(np.dot(np.transpose(ab), elipseMatrix), ab))
def sigmoid(x):
return np.exp(x) / (1 + np.exp(x))
def getIndex(dists):
sum = 0
for i in range(len(dists)):
sum += dists[i]
probs = [dists[i] / sum for i in range(len(dists))]
return np.random.choice(range(len(dists)), 1, p=list(probs))[0]
def calcNearestNeigbour(node, points):
if len(points) == 0:
return None
point = np.array(node)
points = discardNone(points)
dists = [calcDist(np.array(points[i]), point) for i in range(len(points))]
return getIndex(dists)
if __name__ == '__main__':
# tests
print(elipseMatrix)
print(calcDist(np.array([0, 0, 0]), np.array([1, 2, 3])))
print(getIndex([100, 300, 500, 2000, 0.1, 2000]))
print(calcNearestNeigbour([2, 4, 5],
[[1, 4, 5], [2, 3, 2], [2, 2, 2], [4, 2, 3],
[1, 1, 1]]))
| true |
8999a672393dc3fb3c97555a1d2beac6051edd43 | Python | simone-campagna/dtcalc | /dt.py | UTF-8 | 9,633 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python3
import time
class MetaDT(type):
def __new__(cls, class_name, class_bases, class_dict):
t = super().__new__(cls, class_name, class_bases, class_dict)
t.P_INF = t(t.P_INF_VALUE)
t.M_INF = t(t.M_INF_VALUE)
t.P_INF_LABELS = set(t.P_INF_ADDITIONAL_LABELS)
t.P_INF_LABELS.add(t.P_INF_LABEL)
t.M_INF_LABELS = set(t.M_INF_ADDITIONAL_LABELS)
t.M_INF_LABELS.add(t.M_INF_LABEL)
return t
class DT(int, metaclass=MetaDT):
P_INF_VALUE = 2**64
P_INF_LABEL = '+Inf'
P_INF_ADDITIONAL_LABELS = {'Inf'}
P_INF_LABELS = {P_INF_LABEL.lower(), 'Inf'.lower()}
M_INF_VALUE = -2**64
M_INF_LABEL = '-Inf'
M_INF_ADDITIONAL_LABELS = {}
M_INF_LABELS = {M_INF_LABEL.lower()}
@classmethod
def fromstring(cls, init):
return cls(cls._string2int(init))
@classmethod
def _string2int(cls, init):
raise NotImplemented()
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__, str(self))
class DateTime(DT):
DEFAULT_TIME_FORMAT = "%Y%m%d %H:%M:%S"
TIME_FORMAT = DEFAULT_TIME_FORMAT
ALTERNATE_TIME_FORMATS = (
DEFAULT_TIME_FORMAT,
'%Y%m%d',
)
TIME_CONVERTER = time.localtime
#TIME_CONVERTER = time.gmtime
QUANTUM = 0
P_INF_LABEL = '+TInf'
P_INF_ADDITIONAL_LABELS = {'TInf', '+Inf', 'Inf'}
M_INF_LABEL = '-TInf'
M_INF_ADDITIONAL_LABELS = {'-Inf'}
def __new__(cls, init=None):
if init is None:
init = time.time()
elif isinstance(init, str):
init = cls._string2int(init)
t_seconds = init
if cls.QUANTUM > 0:
t_seconds = (int(t_seconds + time_quantum - 1) // time_quantum ) * time_quantum
return DT.__new__(cls, t_seconds)
@classmethod
def set_time_format(cls, time_format):
DateTime.TIME_FORMAT = time_format
@classmethod
def quantized(cls, time_quantum, t):
if time_quantum > 0:
qt = (int(t + time_quantum - 1) // time_quantum ) * time_quantum
return cls(qt)
else:
return cls(t)
@classmethod
def _string2int(cls, init):
l_init = init.lower()
if l_init in cls.P_INF_LABELS:
init = cls.P_INF
elif l_init in cls.M_INF_LABELS:
init = cls.M_INF
else:
if cls.TIME_FORMAT != cls.DEFAULT_TIME_FORMAT:
time_formats = (cls.TIME_FORMAT, ) + cls.ALTERNATE_TIME_FORMATS
else:
time_formats = cls.ALTERNATE_TIME_FORMATS
for time_format in time_formats:
try:
init = time.mktime(time.strptime(init, time_format))
break
except ValueError:
pass
else:
raise ValueError("cannot convert string {0} to a DateTime object".format(init))
return init
def __str__(self):
return repr(self.tostring(self.TIME_FORMAT))
def tostring(self, format):
if self == 0:
return ''
elif self <= self.M_INF_VALUE:
return self.M_INF_LABEL
elif self >= self.P_INF_VALUE:
return self.P_INF_LABEL
else:
return time.strftime(format, self.TIME_CONVERTER(self))
def __add__(self, other):
if self == self.M_INF_VALUE:
if other == Duration.P_INF_VALUE:
raise OverflowError("invalid operation {0!r} + {1!r}".format(self, other))
else:
return self
elif self == self.P_INF_VALUE:
if other == Duration.M_INF_VALUE:
raise OverflowError("invalid operation {0!r} + {1!r}".format(self, other))
else:
return self
if isinstance(other, DateTime):
raise TypeError("invalid operands: {0} + {1}".format(self.__class__.__name__, other.__class__.__name__))
elif isinstance(other, Duration):
if other >= Duration.P_INF_VALUE:
#print(">>> + {0} + {1} ===> {2}".format(self, other, self.P_INF))
return self.P_INF
elif other <= Duration.M_INF_VALUE:
#print(">>> - {0} + {1} ===> {2}".format(self, other, self.P_INF))
return self.M_INF
else:
return self.__class__(int(self) + other)
elif isinstance(other, int):
return self.__class__(int(self) + other)
else:
raise TypeError("invalid operands: {0} + {1}".format(self.__class__.__name__, other.__class__.__name__))
def __sub__(self, other):
if isinstance(other, DateTime):
if self == self.P_INF_VALUE:
if other == self.P_INF_VALUE:
raise OverflowError("invalid operation {0!r} + {1!r}".format(self, other))
else:
return Duration.P_INF
if self == self.M_INF_VALUE:
if other == self.M_INF_VALUE:
raise OverflowError("invalid operation {0!r} + {1!r}".format(self, other))
else:
return Duration.M_INF
else:
return Duration(int(self) - int(other))
else:
if not isinstance(other, Duration):
other = Duration(other)
if self == self.P_INF_VALUE:
if other == Duration.M_INF_VALUE:
raise OverflowError("invalid operation {0!r} + {1!r}".format(self, other))
else:
return self
elif self == self.M_INF_VALUE:
if other == Duration.P_INF_VALUE:
raise OverflowError("invalid operation {0!r} + {1!r}".format(self, other))
else:
return self
else:
return DateTime(int(self) - int(other))
def date(self):
return Date(self)
#class DateTime(DateTimeType):
# TIME_QUANTUM = 0
# def date(self):
# return Date(self)
#class Time(DateTime):
# pass
class Date(DateTime):
DEFAULT_TIME_FORMAT = "%Y%m%d"
TIME_FORMAT = DEFAULT_TIME_FORMAT
ALTERNATE_TIME_FORMATS = (
DEFAULT_TIME_FORMAT,
)
TIME_QUANTUM = 86400
def datetime(self):
return DateTime(self)
class Duration(DT):
P_INF_LABEL = '+DInf'
P_INF_ADDITIONAL_LABELS = {'DInf', '+Inf', 'Inf'}
M_INF_LABEL = '-DInf'
M_INF_ADDITIONAL_LABELS = {'-Inf'}
def __new__(cls, init=None):
if init is None:
init = -1
elif isinstance(init, str):
if init == cls.M_INF_LABEL:
init = cls.M_INF
elif init == cls.P_INF_LABEL:
init = cls.P_INF
else:
init = int(cls._string2int(init))
return DT.__new__(cls, init)
@classmethod
def _string2int(cls, init):
l_init = init.lower()
if l_init in cls.M_INF_LABELS:
init = cls.M_INF
elif l_init in cls.P_INF_LABELS:
init = cls.P_INF
else:
l = init.split('+', 1)
if len(l) == 2:
n_days = int(l[0].strip())
s_rest = l[1].strip()
else:
n_days = 0
s_rest = l[0].strip()
l = s_rest.split(':', 2)
if len(l) == 3:
n_hours = int(l[0].strip())
n_minutes = int(l[1].strip())
n_seconds = int(l[2].strip())
elif len(l) == 2:
n_hours = 0
n_minutes = int(l[0].strip())
n_seconds = int(l[1].strip())
elif len(l) == 1:
n_hours = 0
n_minutes = 0
n_seconds = int(l[0].strip())
init = n_seconds + 60 * (n_minutes + 60 * (n_hours + 24 * n_days))
return init
@classmethod
def fromstring(cls, init):
return cls(cls._string2int(init))
def __str__(self):
return repr(self.tostring())
def tostring(self, days=True, microseconds=False):
if self == 0:
return '0'
elif self <= self.M_INF_VALUE:
return self.M_INF_LABEL
elif self >= self.P_INF_VALUE:
return self.P_INF_LABEL
else:
i_self = int(self)
n_microseconds = (self - i_self) * 1000000
n_days, i_rest = divmod(i_self, 86400)
n_hours, i_rest = divmod(i_rest, 3600)
n_minutes, n_seconds = divmod(i_rest, 60)
if microseconds and n_microseconds:
ms = '.{0:6d}'.format(n_microseconds)
else:
ms = ''
if n_days and not days:
n_hours += n_days * 24
n_days = 0
if n_days or n_hours:
l_hms = [n_hours, n_minutes, n_seconds]
else:
l_hms = [n_minutes, n_seconds]
hms = ':'.join('{0:02d}'.format(n) for n in l_hms)
if n_days:
fmt = "{0}+{1}{2}"
else:
fmt = "{1}{2}"
return fmt.format(n_days, hms, ms)
def create_dt(s):
if isinstance(s, (int, float)):
return Duration(s)
elif isinstance(s, DT):
return s
else:
for dtClass in Date, DateTime, Duration:
try:
#print("DBG: CREATE_DT(s={0}, type={1}, {2}) ->".format(s, type(s), dtClass))
t = dtClass(s)
except ValueError:
pass
else:
return t
else:
return s
| true |
7f4d9c588810194d0e05b8795f285f138162d3cc | Python | pgasidlo/adventofcode2019 | /day10/main.py | UTF-8 | 2,362 | 2.890625 | 3 | [] | no_license | import sys
import copy
from fractions import gcd
from numpy import arctan
import math
import pprint
pp = pprint.PrettyPrinter(indent=4)
am = []
ams = None
for aml in open("input.txt"):
aml = aml.rstrip()
if (ams is None):
ams = len(aml)
am.append(list(aml))
def graph(am):
for aml in am:
print "".join(aml)
def calculate(am, x, y):
if (am[y][x] != '#'):
return {}
am = copy.deepcopy(am)
am[y][x] = 'X';
r = {}
for cx in range(ams):
for cy in range(ams):
if (am[cy][cx] == '#'):
dx = cx - x
dy = cy - y
g = gcd(abs(dx),abs(dy))
# print 'x = {} -> {} = {}, y = {} -> {} = {}, g = {}'.format(x,cx,dx,y,cy,dy,g)
dx = dx / g
dy = dy / g
if (dy == 0):
if (dx < 0):
fi = 270.00
else:
fi = 90.00
elif (dx == 0):
if (dy < 0):
fi = 0.00
else:
fi = 180.00
else:
fi = arctan(1.0 * dx/dy) * 180.0 / math.pi
if (dx < 0):
if (dy < 0):
fi = 360.00 - fi
pass
else:
fi = 180.00 - fi
pass
else:
if (dy < 0):
fi = -fi
pass
else:
fi = 180.00 - fi
pass
d = math.sqrt(cx * cx + cy * cy)
key = '{}|{}'.format(dx,dy)
if not key in r:
r[key] = { 'fi': fi, 'dx': dx, 'dy': dy, 'as': [] }
r[key]['as'].append({'x': cx, 'y': cy, 'd': d});
return r
best = None
for x in range(ams):
for y in range(ams):
r = calculate(am, x, y)
c = len(r.keys())
# print 'x = {}, y = {}, c = {}'.format(x,y,c)
if (best is None or best['c'] < c):
best = { 'x': x, 'y': y, 'c': c, 'r': r }
#pp.pprint(best)
print "part1 = ", best['c']
def vaporize(am, best):
r = best['r'].values()
r.sort(key=lambda v:v['fi'])
for v in r:
v['as'].sort(key=lambda v:v['d'], reverse=True)
# pp.pprint(r)
fis = map(lambda v:v['fi'], r)
c = 0
i = 0
n = len(r)
while True:
if (len(r[i]['as'])):
a = r[i]['as'].pop(0)
c = c + 1
if (c == 200):
print "part2: c =", c, "x =", a['x'], "y =", a['y']
f = True
i = i + 1
if (i == n):
if (not f):
break
i = 0
f = False
vaporize(am, best)
| true |
75124312e3d7fef2dd16433d10ab689b5f91ce74 | Python | shehryarbajwa/Algorithms--Datastructures | /recursion/*interview-question-is_palindrome_recursively.py | UTF-8 | 3,610 | 4.90625 | 5 | [] | no_license | # A palindrome is a word that is the reverse of itself—that is, it is the same word when read forwards and backwards.
# For example:
# "madam" is a palindrome
# "abba" is a palindrome
# "cat" is not
# "a" is a trivial case of a palindrome
# The goal of this exercise is to use recursion to write a function is_palindrome that takes a string as input and checks whether that string is a palindrome.
def is_palindrome(input_string):
if input_string[0] == input_string[-1]:
return True
elif len(input_string) == 1:
return True
elif len(input_string) >= 1 and input_string[0] is not input_string[-1]:
return False
else:
first_char = input_string[0]
last_char = input_string[-1]
sub_string = input_string[1:-1]
ispalindrome = is_palindrome(sub_string)
print(is_palindrome('Udacity'))
# So we need to think this through with what a palindrome is. Each starting letter is equal to the last letter in case
# the length of the palindrome is even
# In case the length of the palindrome is odd, the final length of the input will be 1
# Example of odd palindrome
# madam
# ada
# d
# Since the length of the final string is 1, we call it a palindrome
# Example of odd palindrome
# racecar
# aceca
# cec
# e
# Since the length of the final string is 1, we call it a palindrome
# Example of even palindrome
# abba
# bb
# Since element at 1 and -1 are the same, we call it True
# Example of even palindrome
# abccba
# bccb
# cc
# Since the element at 1 and -1 are the same, we call it True
def _is_palindrome(input):
if len(input) <= 1:
return True
else:
first_character = input[0]
last_character = input[-1]
sub_input = input[1:-1]
return (first_character == last_character) and _is_palindrome(sub_input)
#Lets take two examples one even and odd and run it via this algorithm
# Example 1:
# madam
# Since len is > 1, we continue
# First_character is m
# Last character is m
# sub_input is ada
# we return first_character = last_character which is True and then again run the _is_palindrome function
# Now first_character is a
# Last character is a
# sub_input = d
# first_character and last_character are True but we run _is_palindrome again
# Now the length is <= 1, so we return True
# We returned True and True
# If it is False and True, it will return False
# Example 2:
# abba
# Since len is > 1, we continue
# First_character is a
# Last character is a
# sub_input is bb
# we return first_character = last_character which is True and then again run the _is_palindrome function
# Now first_character is b
# Last character is b
# sub_input = 0
# first_character and last_character are True but we run _is_palindrome again
# Now the length is <= 1, so we return True
# We returned True and True
# In this case it is True and True which leads to True
# If it is False and True, it will return False
# Example 3:
# Not a palindrome
# abcdba
# Since len is > 1, we continue
# First_character is a
# Last character is a
# sub_input is bcdb
# return True and _is_palindrome again
# first_character is now b
# last_character is b again
# sub_input = cd
# run is_palindrome again
# first_character is c
# last_character is d
# sub_string is None
# return False and return the value without proceeding further
# and means both conditions are True to be True
# FYI [] still has length property accessible on it and returns 0
#It is most optimal if recursion has one base case
print(_is_palindrome('abba'))
print(_is_palindrome('boby')) | true |
5ab5b22a14a517f105271e63716d21841839fd19 | Python | Mycroft0121/pyvolve | /src/matrix_builder.py | UTF-8 | 26,315 | 2.859375 | 3 | [] | no_license | #! /usr/bin/env python
##############################################################################
## pyvolve: Python platform for simulating evolutionary sequences.
##
## Written by Stephanie J. Spielman (stephanie.spielman@gmail.com)
##############################################################################
'''
This module will generate the Markov chain's instantaneous rate matrix, Q.
'''
import numpy as np
from copy import deepcopy
from genetics import *
from state_freqs import *
ZERO = 1e-8
MOLECULES = Genetics()
class MatrixBuilder(object):
'''
Parent class for model instantaneous matrix creation.
Child class include the following:
1. *aminoAcid_Matrix*
- Empirical amino acid models (currently, JTT, WAG, and LG and SCG05).
2. *nucleotide_Matrix*
- Nucleotide models (GTR and nested)
3. *mechCodon_Matrix*
- So-called mechanistic codon models, which include GY-style and MG-style models (dN/dS models)
4. *ECM_Matrix*
- ECM (Kosiol2007) empirical codon model
5. *mutSel_Matrix*
- Mutation-selection model (Halpern and Bruno 1998), extended for either codon or nucleotides
'''
def __init__(self, param_dict, scale_matrix = 'yang'):
'''
Construction requires a single positional argument, **param_dict**. This argument should be a dictionary containing parameters about the substitution process in order to construct the matrix.
Optional keyword arguments:
1. **scale_matrix** = <'yang', 'neutral', 'False/None'>. This argument determines how rate matrices should be scaled. By default, all matrices are scaled according to Ziheng Yang's approach, in which the mean substitution rate is equal to 1. However, for codon models (GY94, MG94), this scaling approach effectively causes sites under purifying selection to evolve at the same rate as sites under positive selection, which may not be desired. Thus, the 'neutral' scaling option will allow for codon matrices to be scaled such that the mean rate of *neutral* subsitution is 1. You may also opt out of scaling by providing either False or None to this argument, although this is not recommended.
'''
self.params = param_dict
self.scale_matrix = scale_matrix
if type(self.scale_matrix) is str:
self.scale_matrix = self.scale_matrix.lower()
assert( self.scale_matrix == 'yang' or self.scale_matrix == 'neutral' or self.scale_matrix is False or self.scale_matrix is None ), "You have specified an incorrect matrix scaling scheme. Either 'Yang', 'neutral', or False/None are accepted (case-insensitive)."
def _sanity_params(self):
'''
Sanity-check that all necessary parameters have been supplied to construct the matrix.
'''
print "Parent class method, not called."
def _sanity_params_state_freqs(self):
'''
Sanity-check specifically state_freqs key/value in the params dictionary.
If state_freqs not provided, then set to equal.
'''
if 'state_freqs' not in self.params:
self.params['state_freqs'] = np.repeat(1./self._size, self._size)
if len(self.params['state_freqs']) != self._size:
raise AssertionError("state_freqs key in your params dict does not contain the correct number of values for your specified model.")
def _sanity_params_mutation_rates(self, symmetric = True):
'''
Sanity-check specifically mu key/value in params dictionary.
NOTE: mutation-selection models may take asymmetric mutation rates. However, this function assumes that rates are symmetric.
Thus, if A->C is present but C->A is not, the latter will take on the A->C value.
Any missing mutation rates are given a value of 1.
'''
if 'mu' in self.params:
# Single float provided
if type(self.params['mu']) is float:
new_mu = {'AC':1., 'CA':1., 'AG':1., 'GA':1., 'AT':1., 'TA':1., 'CG':1., 'GC':1., 'CT':1., 'TC':1., 'GT':1., 'TG':1.}
for key in new_mu:
new_mu[key] *= self.params['mu']
self.params['mu'] = new_mu
# Dictionary of mu's provided. Make sure dictionary is full.
elif type(self.params['mu']) is dict:
for key in ['AC', 'AG', 'AT', 'CG', 'CT', 'GT']:
rev_key = str(key[1] + key[0])
# Neither key pair. Add both
if key not in self.params['mu'] and rev_key not in self.params['mu']:
self.params['mu'][key] = 1.
self.params['mu'][rev_key] = 1.
# If one key but not the other, fill in missing one symmetrically.
elif key not in self.params['mu'] and rev_key in self.params['mu']:
self.params['mu'][key] = self.params['mu'][rev_key]
elif key in self.params['mu'] and rev_key not in self.params['mu']:
self.params['mu'][rev_key] = self.params['mu'][key]
else:
raise AssertionError("You must provide EITHER a single mutation or a dictionary of mutation rates for nucleotide pairs to the key 'mu' in the 'params' dictionary.")
# Nothing specified, so simply use equal mutation rates to construct matrix
else:
self.params['mu'] = {'AC':1., 'CA':1., 'AG':1., 'GA':1., 'AT':1., 'TA':1., 'CG':1., 'GC':1., 'CT':1., 'TC':1., 'GT':1., 'TG':1.}
# Apply kappa as needed.
if 'kappa' in self.params:
temp_mu = deepcopy( self.params['mu'] )
self.params['mu'] = {'AC': temp_mu['AC'], 'AG': temp_mu['AG'] * float(self.params['kappa']), 'AT': temp_mu['AT'], 'CG': temp_mu['CG'], 'CT': temp_mu['CT']*float(self.params['kappa']), 'GT': temp_mu['GT'], 'CA': temp_mu['CA'], 'GA': temp_mu['GA'] * float(self.params['kappa']), 'TA': temp_mu['TA'], 'GC': temp_mu['GC'], 'TC': temp_mu['TC']*float(self.params['kappa']), 'TG': temp_mu['TG']}
def _build_matrix( self, params ):
'''
Generate an instantaneous rate matrix.
'''
matrix = np.zeros( [self._size, self._size] ) # For nucleotides, self._size = 4; amino acids, self._size = 20; codons, self._size = 61.
for s in range(self._size):
for t in range(self._size):
# Non-diagonal
rate = self._calc_instantaneous_prob( s, t, params )
matrix[s][t] = rate
# Fill in the diagonal position so the row sums to 0, but ensure it doesn't became -0
matrix[s][s]= -1. * np.sum( matrix[s] )
if matrix[s][s] == -0.:
matrix[s][s] = 0.
#assert ( abs(np.sum(matrix[s])) < ZERO ), "Row in instantaneous matrix does not sum to 0."
return matrix
def _compute_yang_scaling_factor(self, matrix, params):
'''
Compute scaling factor. Note that we have arguments here since this function is used *both* with attributes and for temporary neutral matrix/params.
'''
scaling_factor = 0.
for i in range(self._size):
scaling_factor += ( matrix[i][i] * params['state_freqs'][i] )
return scaling_factor
def _compute_neutral_scaling_factor(self):
'''
Compute scaling factor you'd get if w=1, so mean neutral substitution rate is 1.
Avoids confounding time issue with selection strength.
'''
neutral_params = self._create_neutral_params()
neutral_matrix = self._build_matrix( neutral_params )
scaling_factor = self._compute_yang_scaling_factor(neutral_matrix, neutral_params)
return scaling_factor
def __call__(self):
'''
Generate, scale, return instantaneous rate matrix.
'''
# Construct matrix
self.inst_matrix = self._build_matrix( self.params )
# Scale matrix as needed.
if self.scale_matrix:
if self.scale_matrix == 'yang':
scaling_factor = self._compute_yang_scaling_factor(self.inst_matrix, self.params)
elif self.scale_matrix == 'neutral':
scaling_factor = self._compute_neutral_scaling_factor()
else:
raise AssertionError("You should never be getting here!! Please email stephanie.spielman@gmail.com and report error 'scaling arrival.'")
self.inst_matrix /= -1.*scaling_factor
return self.inst_matrix
def _is_TI(self, source, target):
'''
Determine if a given nucleotide change is a transition or a tranversion. Used in child classes nucleotide_Matrix, mechCodon_Matrix, ECM_Matrix, mutSel_Matrix .
Returns True for transition, False for transversion.
Arguments "source" and "target" are the actual nucleotides (not indices).
'''
ti_pyrim = source in MOLECULES.pyrims and target in MOLECULES.pyrims
ti_purine = source in MOLECULES.purines and target in MOLECULES.purines
if ti_pyrim or ti_purine:
return True
else:
return False
def _is_syn(self, source, target):
'''
Determine if a given codon change is synonymous or nonsynonymous. Used in child classes mechCodon_Matrix, ECM_Matrix .
Returns True for synonymous, False for nonsynonymous.
Arguments arguments "source" and "target" are codon indices (0-60, alphabetical).
'''
source_codon = MOLECULES.codons[source]
target_codon = MOLECULES.codons[target]
if ( MOLECULES.codon_dict[source_codon] == MOLECULES.codon_dict[target_codon] ):
return True
else:
return False
def _get_nucleotide_diff(self, source, target):
'''
Get the nucleotide difference(s) between two codons. Used in child classes ECM_Matrix, mechCodon_Matrix, mutSel_Matrix .
Returns a string representing the nucleotide differences between source and target codon.
For instance, if source is AAA and target is ATA, the string AT would be returned. If source is AAA and target is ACC, then ACAC would be returned.
Input arguments source and target are codon indices (0-60, alphabetical).
'''
source_codon = MOLECULES.codons[source]
target_codon = MOLECULES.codons[target]
return "".join( [source_codon[i]+target_codon[i] for i in range(len(source_codon)) if source_codon[i] != target_codon[i]] )
def _calc_instantaneous_prob(self, source, target, params):
'''
Calculate a given element in the instantaneous rate matrix.
Returns the substitution probability from source to target, for a given model.
Arguments "source" and "target" are *indices* for the relevant aminos (0-19) /nucs (0-3) /codons (0-60).
PARENT CLASS FUNCTION. NOT IMPLEMENTED.
'''
return 0
class aminoAcid_Matrix(MatrixBuilder):
'''
Child class of MatrixBuilder. This class implements functions relevant to constructing amino acid model instantaneous matrices.
Note that all empirical amino acid replacement matrices are in the file empirical_matrices.py.
'''
def __init__(self, *args, **kwargs):
super(aminoAcid_Matrix, self).__init__(*args, **kwargs)
self._size = 20
self._code = MOLECULES.amino_acids
self._sanity_params()
self._init_empirical_matrix()
def _sanity_params(self):
'''
Sanity-check that all necessary parameters have been supplied to construct the matrix.
Required aminoAcid_Matrix params keys:
1. state_freqs
2. aa_model (but this is checked earlier here)
'''
self._sanity_params_state_freqs()
assert( 'aa_model' in self.params ), "You must specify an amino acid model (key 'aa_model') in the params dictionary."
def _init_empirical_matrix(self):
'''
Function to load the appropriate replacement matrix from empirical_matrices.py
'''
import empirical_matrices as em
aa_model = self.params['aa_model'].lower() # I have everything coded in lower case
try:
self.emp_matrix = eval("em."+aa_model+"_matrix")
except:
raise AssertionError("\n\nCouldn't figure out your empirical matrix specification. Note that we currently only support the JTT, WAG, or LG empirical amino acid models.")
def _calc_instantaneous_prob( self, source, target, params ):
'''
Returns the substitution probability (s_ij * p_j, where s_ij is replacement matrix entry and p_j is target amino frequency) from source to target for amino acid empirical models.
Arguments "source" and "target" are indices for the relevant aminos (0-19).
* Third argument not used here! *
'''
return self.emp_matrix[source][target] * self.params['state_freqs'][target]
def _compute_neutral_scaling_factor(self):
''' No selection component to aminoAcid empirical matrices. '''
return -1.
class nucleotide_Matrix(MatrixBuilder):
'''
Child class of MatrixBuilder. This class implements functions relevant to constructing nucleotide model instantaneous matrices.
All models computed here are essentially nested versions of GTR.
'''
def __init__(self, *args, **kwargs):
super(nucleotide_Matrix, self).__init__(*args, **kwargs)
self._size = 4
self._code = MOLECULES.nucleotides
self._sanity_params()
def _sanity_params(self):
'''
Sanity-check that all necessary parameters have been supplied to construct the matrix.
Required nucleotide_Matrix params keys:
1. state_freqs
2. mu
'''
self._sanity_params_state_freqs()
self._sanity_params_mutation_rates()
def _calc_instantaneous_prob(self, source, target, params):
'''
Returns the substitution probability (\mu_ij * p_j, where \mu_ij are nucleotide mutation rates and p_j is target nucleotide frequency) from source to target for nucleotide models.
Arguments "source" and "target" are indices for the relevant nucleotide (0-3).
* Third argument not used here! *
'''
source_nuc = self._code[source]
target_nuc = self._code[target]
if source_nuc == target_nuc:
return 0.
else:
return self.params['state_freqs'][target] * self.params['mu']["".join(sorted(source_nuc + target_nuc))]
def _compute_neutral_scaling_factor(self):
''' No selection component to nucleotide matrices. '''
return -1.
class mechCodon_Matrix(MatrixBuilder):
'''
Child class of MatrixBuilder. This class implements functions relevant to "mechanistic" (dN/dS) codon models.
Models include both GY-style or MG-style varieties, although users should *always specify codon frequencies* to class instance!
Both dS and dN variation are allowed, as are GTR mutational parameters (not strictly HKY85).
'''
def __init__(self, params, type = "GY94", scale_matrix = "yang"):
self.model_type = type
assert(self.model_type == 'GY94' or self.model_type == 'MG94'), "\n\nFor mechanistic codon models, you must specify a model_type as GY94 (uses target *codon* frequencies) or MG94 (uses target *nucleotide* frequencies.) I RECOMMEND MG94!!"
super(mechCodon_Matrix, self).__init__(params, scale_matrix)
self._size = 61
self._code = MOLECULES.codons
self._sanity_params()
if self.model_type == "MG94":
self._nuc_freqs = CustomFrequencies(by = 'codon', freq_dict = dict(zip(self._code, self.params['state_freqs'])))(type = 'nuc')
def _sanity_params(self):
'''
Sanity-check that all necessary parameters have been supplied to construct the matrix.
Required codon_Matrix params keys:
1. state_freqs
2. mu
3. beta, alpha
Additionally, grabs nucleotide frequencies if needed for MG94 simulation.
'''
self._sanity_params_state_freqs()
self._sanity_params_mutation_rates()
if 'omega' in self.params:
self.params['beta'] = self.params['omega']
if 'beta' not in self.params:
raise AssertionError("You must provide a dN value (using either the key 'beta' or 'omega') in params dictionary to run this model!")
if 'alpha' not in self.params:
self.params['alpha'] = 1.
def _calc_prob(self, target_codon, target_nuc, nuc_pair, factor):
'''
Calculate instantaneous probability of (non)synonymous change for mechanistic codon models.
Argument *factor* is either dN or dS.
NOTE: can leave self.params here as state_freqs still won't change for neutral scaling factor.
'''
prob = self.params['mu'][nuc_pair] * factor
if self.model_type == 'GY94':
prob *= self.params['state_freqs'][target_codon]
else:
prob *= self._nuc_freqs[ MOLECULES.nucleotides.index(target_nuc) ]
return prob
def _calc_instantaneous_prob(self, source, target, params):
'''
Returns the substitution probability from source to target for mechanistic codon models.
Arguments "source" and "target" are indices for the relevant codons (0-60).
Third argument can be specified as non-self when we are computing neutral scaling factor.
'''
nuc_diff = self._get_nucleotide_diff(source, target)
if len(nuc_diff) != 2:
return 0.
else:
nuc_pair = "".join(sorted(nuc_diff[0] + nuc_diff[1]))
if self._is_syn(source, target):
return self._calc_prob(target, nuc_diff[1], nuc_pair, params['alpha'])
else:
return self._calc_prob(target, nuc_diff[1], nuc_pair, params['beta'])
def _create_neutral_params(self):
'''
Return self.params except with alpha, beta equal to 1.
'''
return {'state_freqs': self.params['state_freqs'], 'mu': self.params['mu'], 'beta':1., 'alpha':1.}
class mutSel_Matrix(MatrixBuilder):
'''
Child class of MatrixBuilder. This class implements functions relevant to constructing mutation-selection balance model instantaneous matrices, according to the HalpernBruno 1998 model.
Here, this model is extended such that it can be used for either nucleotide or codon. This class will automatically detect which one you want based on your state frequencies.
'''
def __init__(self, *args, **kwargs):
super(mutSel_Matrix, self).__init__(*args, **kwargs)
self._sanity_params()
def _sanity_params(self):
'''
Sanity-check that all necessary parameters have been supplied to construct the matrix.
Required codon_Matrix params keys:
1. state_freqs
2. mu
'''
if self.params['state_freqs'].shape == (61,):
self._model_class = 'codon'
self._size = 61
self._code = MOLECULES.codons
elif self.params['state_freqs'].shape == (4,):
self._model_class = 'nuc'
self._size = 4
self._code = MOLECULES.nucleotides
else:
raise AssertionError("\n\nMutSel models need either codon or nucleotide frequencies.")
self._sanity_params_state_freqs()
self._sanity_params_mutation_rates()
def _calc_instantaneous_prob(self, source, target, params):
'''
Calculate the substitution probability from source to target for mutation-selection-balance models.
Arguments "source" and "target" are indices for the relevant codons (0-60) or nucleotide (0-3).
Third argument can be specified as non-self when we are computing neutral scaling factor.
'''
nuc_diff = self._get_nucleotide_diff(source, target)
if len(nuc_diff) != 2:
return 0.
else:
pi_i = params['state_freqs'][source] # source frequency
pi_j = params['state_freqs'][target] # target frequency
mu_ij = params["mu"][nuc_diff] # source -> target mutation rate
mu_ji = params["mu"][nuc_diff[1] + nuc_diff[0]] # target -> source mutation rate
if pi_i <= ZERO or pi_j <= ZERO:
inst_prob = 0.
elif abs(pi_i - pi_j) <= ZERO:
inst_prob = mu_ij
else:
pi_mu = (pi_j*mu_ji)/(pi_i*mu_ij)
inst_prob = np.log(pi_mu)/(1. - 1./pi_mu) * mu_ij
return inst_prob
def _create_neutral_params(self):
'''
Return self.params except without selection (equal state_freqs!).
'''
return {'state_freqs': np.repeat(1./self._size, self._size), 'mu': self.params['mu']}
class ECM_Matrix(MatrixBuilder):
'''
Child class of MatrixBuilder. This class implements functions relevant to constructing a matrix specifically for the ECM (described in Kosiol2007) model.
We support both restricted (instantaneous single changes only) and unrestricted (instantaneous single, double, or triple) versions of this model (see paper for details).
!!! NOTE: The ECM model supports omega (dN/dS) and kappa (TI/TV) ratios in their calculations, and therefore I have included these parameters here. HOWEVER, I do NOT recommend their use.
'''
def __init__(self, params, type = 'restricted', scale_matrix = 'yang'):
if type.lower() == 'restricted' or type.lower() == 'rest':
self.restricted = True
elif type.lower() == 'unrestricted' or type.lower() == 'unrest':
self.restricted = False
else:
raise AssertionError("For an ECM model, you must specify whether you want restricted (single nuc instantaneous changes only) or unrestricted (1-3 instantaneous nuc changes) Second argument to Model initialization should be 'rest', 'restricted', 'unrest', 'unrestricted' (case insensitive).")
super(ECM_Matrix, self).__init__(params, scale_matrix)
self._size = 61
self._code = MOLECULES.codons
self._sanity_params()
self._init_empirical_matrix()
def _sanity_params(self):
'''
Sanity checks for parameters dictionary.
'''
self._sanity_params_state_freqs()
if 'omega' in self.params:
self.params['beta'] = self.params['omega']
if 'beta' not in self.params:
self.params['beta'] = 1.
if 'alpha' not in self.params:
self.params['alpha'] = 1.
if 'k_ti' not in self.params:
self.params['k_ti'] = 1.
if 'k_tv' not in self.params:
self.params['k_tv'] = 1.
def _init_empirical_matrix(self):
'''
Function to load the appropriate replacement matrix from empirical_matrices.py
'''
import empirical_matrices as em
if self.restricted:
self.emp_matrix = em.ecmrest_matrix
else:
self.emp_matrix = em.ecmunrest_matrix
def _set_kappa_param(self, nuc_diff):
'''
Calculations for the "kappa" parameter(s) for the ECM model. See the 2007 paper for details.
'''
num_ti = 0
num_tv = 0
for i in range(0, len(nuc_diff), 2):
source_nuc = nuc_diff[i]
target_nuc = nuc_diff[i+1]
if self._is_TI(source_nuc, target_nuc):
num_ti += 1
else:
num_tv += 1
return self.params['k_ti']**num_ti * self.params['k_tv']**num_tv
def _calc_instantaneous_prob(self, source, target, params):
'''
Returns the substitution probability from source to target for ECM models.
Arguments "source" and "target" are indices for the relevant codons (0-60).
Third argument can be specified as non-self when we are computing neutral scaling factor.
'''
nuc_diff = self._get_nucleotide_diff(source, target)
if len(nuc_diff) == 0 or (self.restricted and len(nuc_diff) != 2):
return 0.
else:
kappa_param = self._set_kappa_param(nuc_diff)
if self._is_syn(source, target):
return self.emp_matrix[source][target] * params['state_freqs'][target] * params['alpha'] * kappa_param
else:
return self.emp_matrix[source][target] * params['state_freqs'][target] * params['beta'] * kappa_param
def _create_neutral_params(self):
'''
Return self.params except with alpha, beta equal to 1.
'''
return {'state_freqs': self.params['state_freqs'], 'k_ti': self.params['k_ti'], 'k_tv': self.params['k_tv'], 'alpha':1., 'beta':1.}
| true |
34c01e6f65cb8454847d99705e8dc1b9b95932fa | Python | twyunting/Algorithms-LeetCode | /Easy/String/0709. To Lower Case.py | UTF-8 | 109 | 3.21875 | 3 | [] | no_license | def toLowerCase(str):
"""
:type str: str
:rtype: str
"""
return str.lower()
print(toLowerCase("HeLLo")) | true |
669d04302e5e8971ed620d7aff7291f2c19ab74d | Python | ckallum/Daily-Coding-Problem | /solutions/#182.py | UTF-8 | 740 | 3.28125 | 3 | [
"MIT"
] | permissive | def minimally_connected(graph):
for v in graph:
for i, n in enumerate(graph[v]):
graph[v].remove(n)
graph[n].remove(v)
if is_connected(graph, v, n):
return False
graph[n].add(v)
graph[v].add(n)
return True
def is_connected(graph, current, target, seen=[]):
if current == target:
return True
return any(is_connected(graph, n, target, seen + [current]) for n in graph[current] if n not in seen)
def main():
graph = {"a": {"b"}, "b": {"a"}}
graph2 = {"a": {"b", "c"}, "b": {"a", "c"}, "c": {"b", "a"}}
assert minimally_connected(graph)
assert not minimally_connected(graph2)
if __name__ == '__main__':
main()
| true |
f9eeb1eb5e48e5d84034273bce7bea4469e4de17 | Python | sodrian/codewars | /kt06_categorize_new_member.py | UTF-8 | 185 | 3.28125 | 3 | [] | no_license | def openOrSenior(data):
r = []
for item in data:
if item[0] >= 55 and item[1] > 7:
r.append('Senior')
else:
r.append('Open')
return r | true |
ea621e47dea7a47d0bf2a8ea65d6ced9e0a39b99 | Python | Jictyvoo/TEC502-2018.1--English-Dictionary-Game | /src/Server/models/value/Room.py | UTF-8 | 1,841 | 3.03125 | 3 | [
"MIT"
] | permissive | class Room:
def __init__(self, multicast_ip=None, name="Pseudo-Room", coordinator_player_address=None, limit_players=2,
password="", coordinator_name="Pseudo-Name"):
self.__room_ip = multicast_ip
self.__name = name
self.__coordinator_player_address = coordinator_player_address
self.__limit_players = int(limit_players)
self.__current_players = {coordinator_player_address: True}
self.__current_players_names = {coordinator_name: True}
self.__password = password
def get_room_ip(self):
return self.__room_ip
def set_room_ip(self, room_ip):
self.__room_ip = room_ip
def get_name(self):
return self.__name
def get_coordinator_player_address(self):
return self.__coordinator_player_address
def set_coordinator_player_address(self, coordinator_player):
self.__coordinator_player_address = coordinator_player
def get_limit_players(self):
return self.__limit_players
def remove_player(self, player_address):
if player_address in self.__current_players.keys():
self.__current_players[player_address] = False
def add_player(self, player_address, player_name):
self.__current_players[player_address] = True
if player_name:
self.__current_players_names[player_name] = True
def get_amount_players(self):
count = 0
for key in self.__current_players.keys():
if self.__current_players[key]:
count += 1
return count
def get_current_players_names(self):
return self.__current_players_names
def authenticate_connection(self, password):
return self.__password == password
def is_full(self):
return self.get_amount_players() >= self.__limit_players
| true |
a8e65c7576a6a75de37be4a5cfd0fc9a21689909 | Python | chacham/learn-algorithm | /leetcode.com/jump-game/solve.py | UTF-8 | 260 | 2.640625 | 3 | [] | no_license | class Solution:
def canJump(self, nums: 'List[int]') -> 'bool':
maxFromHere = nums[0]
for num in nums:
if maxFromHere < 0:
return False
maxFromHere = max(num - 1, maxFromHere - 1)
return True
| true |
d1760324bbf45bfdf9239121b1c0219935075742 | Python | ObadiahBoda/Obadiah-Boda-Projects | /Misc Coding projects/Sudoku/Sudoku.py | UTF-8 | 1,796 | 2.84375 | 3 | [] | no_license | # draw grid
board = [[0, 7, 0, 0, 2, 0, 0, 0, 5],
[0, 6, 0, 0, 0, 0, 7, 0, 0],
[5, 0, 0, 0, 4, 0, 0, 0, 0],
[0, 1, 3, 0, 0, 0, 0, 0, 0],
[6, 0, 0, 0, 5, 0, 0, 0, 2],
[2, 5, 0, 0, 3, 6, 1, 9, 0],
[0, 4, 0, 0, 0, 0, 0, 0, 6],
[7, 2, 0, 4, 9, 0, 0, 5, 1],
[8, 0, 0, 0, 0, 0, 0, 0, 3]]
def print_board(bo):
for i in range(len(bo)):
if i % 3 == 0 and i != 0:
print("- - - - - - - - - - - -")
for j in range(len(bo[0])):
if j % 3 == 0 and j != 0:
print(" | ", end="")
print(str(bo[i][j]) + " ", end="")
print("")
def find_empty(bo):
for i in range(len(bo)):
for j in range(len(bo[0])):
if bo[i][j] == 0:
return (i, j)
return False
def valid(pos, bo, num):
# row check
for i in range(len(bo[0])):
if bo[pos[0]][i] == num and pos[1] != i:
return False
# collumn
for i in range(len(bo[0])):
if bo[i][pos[1]] == num and pos[0] != i:
return False
# square
box_x = pos[1] // 3
box_y = pos[0] // 3
for i in range(box_y*3,box_y*3+3):
for j in range(box_x*3,box_x*3+3):
if bo[i][j] == num and (i,j) != pos:
return False
return True
def solve(bo):
find = find_empty(bo)
if not find:
return True
else:
row, column = find
for i in range(1,10):
if valid((row,column),bo , i):
bo[row][column] = i
if solve(bo):
return True
bo[row][column] = 0
return False
#print_board(board)
#solve(board)
#print("")
#print_board(board) | true |
4a38c32329c142bf29ee71b3ba51e0c0ad27d213 | Python | BlackNaygas/Info-Schule | /Teilbarkeit (11.052020)/Für Schnelle (11.05.2020).py | UTF-8 | 164 | 3.515625 | 4 | [] | no_license | zahl = int(input("Bitte Zahl eingeben:"))
if (zahl % 3 == 0 and zahl % 7 == 0):
print("Die Zahl", zahl, "ist durch 3 und 7 teilbar.")
else:
print("Nix da") | true |
53f2a641953ca8100501a66753e87377ab9bf26e | Python | sjblim/population-based-rnn-optimisation | /libs/losses.py | UTF-8 | 1,092 | 2.984375 | 3 | [] | no_license | """
losses.py
Created by limsi on 03/04/2019
"""
from enum import IntEnum
import tensorflow as tf
class LossTypes(IntEnum):
MSE = 1,
BINARY = 2
class LossFunctionHelper:
_loss_name_map = {LossTypes.BINARY: 'binary',
LossTypes.MSE: "mse"}
@classmethod
def get_valid_losses(cls):
return set(cls._loss_name_map.keys())
@classmethod
def get_name(cls, loss_type):
return cls._loss_name_map[loss_type]
@staticmethod
def get_output_layer_by_loss(loss_type):
if loss_type == LossTypes.BINARY:
return tf.nn.sigmoid
elif loss_type == LossTypes.MSE:
return lambda x: x # linear
else:
raise ValueError("Unrecognised loss type: {}".format(loss_type))
@staticmethod
def process_outputs_by_loss(outputs, loss_type):
return outputs
@staticmethod
def get_loss_function(loss_type):
return {
LossTypes.BINARY: 'binary_crossentropy',
LossTypes.MSE: 'mse'
}[loss_type]
| true |
526e284ec314f21d5b9d10c92975316d2c5c7ece | Python | EricCacciavillani/Python_Stats_Class | /Stats_Exam_Python/stats_conf_no_sigma.py | UTF-8 | 851 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 28 11:41:13 2017
@author: ericcacciavillani
"""
import math
#------------------------------------
given_val = 90
sample_size = 30
critical_val = 2.575
#------------------------------------
point_estimate = given_val/ sample_size;
q_of_p = 1 - point_estimate;
margin_of_error = critical_val * math.sqrt(point_estimate * q_of_p / sample_size)
point_estimate = round (point_estimate,5)
margin_of_error = round (margin_of_error,5)
upper_limit = point_estimate + margin_of_error
lower_limit = point_estimate - margin_of_error
print("\nPoint Estimate: ", point_estimate)
print("Critical Val Z:",critical_val)
print("Margin of Error: ", margin_of_error)
print("\nCheck the limits manually after rounding!!!")
print("\nUpper Limit", upper_limit)
print("Lower Limit", lower_limit) | true |
3ffc7ef299dea4988b5cc4a643eadc26842a189c | Python | Carryours/algorithm004-03 | /Week 01/id_118/LeetCode_189_118.py | UTF-8 | 1,911 | 4.28125 | 4 | [] | no_license | from typing import List
class Solution1:
"""
First solution is brute force
"""
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
Time complexity: O(n * k)
"""
for _ in range(k):
# the slice to be rotated is [-k:]
# start with the last num in [-k:]
prev = nums[-1]
for i in range(len(nums)):
# there are two steps:
# 1. swap first element with prev
# e.g. [7,2,3,4,5,6,1]
# 2. loop thru to arrange the rest of the elements
# to its original order
# e.g. [7,1,3,4,5,6,2] => [7,1,2,4,5,6,3] =>...=>[7,1,2,3,4,5,6]
nums[i], prev = prev, nums[i]
print(nums) # for testing
class Solution2:
"""
A very Pythonic solution, with list slicing and pythonic swapping
Be super careful with the edge cases
Time complexity: O(n), as slicing a list will create new list
Check https://github.com/python/cpython/blob/master/Objects/listobject.c
line 473 to 493
"""
def rotate(self, nums: List[int], k: int) -> None:
# here k is updated as the mod of len(nums)
# this will be used when k is greater than len(nums)
# e.g. for array of 7, when k = 7 is the same as k = 0, or don't rotate
# when k = 8 is the same as k = 1
k %= len(nums)
if (len(nums) <= 1) or (k == 0):
# when the list is either empty or only has 1 element
# or k % len(nums) == 0, don't move
return
nums[:k], nums[k:] = nums[-k:], nums[:-k]
print(nums)
if __name__ == '__main__':
# Create two test cases
sol1 = Solution1()
sol1.rotate([1, 2, 3, 4, 5, 6, 7], 3)
sol2 = Solution2()
sol2.rotate([1, 2, 3, 4, 5, 6, 7], 3)
| true |
052564dda05c5f0b5a2f640412d1cd713d615820 | Python | Auto4C/sqlite4dummy-project | /sqlite4dummy/tests/sqlite3_in_python/syntax/advance/test_INSDATE.py | UTF-8 | 4,043 | 3.375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
INSERT AND UPDATE (INSDATE)简介
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
尝试进行insert, 如果数据主键冲突, 那么则update除主键以外的其他数据。
问题: 当被insert的不是所有的列时, 只会更新insert的相关列, 其他列的值会被保留。
如果被update的值跟constrain有冲突, 则会抛出异常。
与之相似的是UPSERT:
尝试进行update, 如果数据不存在, 那么则insert一条进去。
问题: 被update的可能仅仅只有几列, 有可能用户的原意是只修改其中几列。但由于如果
被update的行没有被找到, 那么被insert的也仅仅是几列。所以当其他列有NOT NULL限制
时, 会抛出异常。
"""
from __future__ import print_function
from pprint import pprint as ppt
import unittest
import sqlite3
import time
class Unittest(unittest.TestCase):
def test_UPSERT_implementation(self):
"""测试Insdate在sqlite中的正确实现。
"""
connect = sqlite3.connect(":memory:")
cursor = connect.cursor()
# http://www.w3schools.com/sql/sql_create_table.asp
create_table_sql = \
"""
CREATE TABLE employee
(
_id INTEGER PRIMARY KEY NOT NULL,
role TEXT,
name TEXT
)
"""
cursor.execute(create_table_sql)
data = [(1, "coder", "John"), (2, "sales", "Mike")]
cursor.executemany("INSERT INTO employee VALUES (?,?,?)", data)
upsert_sql = \
"""
REPLACE INTO employee
(_id, role, name)
VALUES
(
?,
?,
(SELECT name FROM Employee WHERE _id = ?)
);
"""
cursor.execute(upsert_sql, (2, "manager", 2))
print(cursor.execute("SELECT * FROM employee WHERE _id = 2").fetchone())
def test_UPSERT_performance(self):
"""测试Insert and Update的两种实现的性能比较。
结论: 利用REPLACE INTO employee实现。
"""
connect = sqlite3.connect(":memory:")
cursor = connect.cursor()
# http://www.w3schools.com/sql/sql_create_table.asp
create_table_sql = \
"""
CREATE TABLE employee
(
_id INTEGER PRIMARY KEY NOT NULL,
role TEXT,
name TEXT,
age INTEGER,
height REAL
)
"""
cursor.execute(create_table_sql)
data = [(1, "coder", "John", 30, 1.67), (2, "sales", "Mike", 45, 1.77)]
cursor.executemany("INSERT INTO employee VALUES (?,?,?,?,?)", data)
insert_sql = "INSERT INTO employee (_id, role) VALUES (?,?)"
update_sql = "UPDATE employee SET _id = ?, role = ? WHERE _id = ?"
insdate_sql= \
"""
REPLACE INTO employee
(_id, role, name, age, height)
VALUES
(
?,
?,
(SELECT name FROM Employee WHERE _id = ?),
(SELECT age FROM Employee WHERE _id = ?),
(SELECT height FROM Employee WHERE _id = ?)
);
"""
# Performance test
st = time.clock()
for i in range(1000):
try:
cursor.execute(insert_sql, (2, "manager"))
except sqlite3.IntegrityError:
cursor.execute(update_sql, (2, "manager", 2))
print("insert and update takes %.6f..." % (time.clock() - st))
print(cursor.execute("SELECT * FROM employee WHERE _id = 2").fetchone())
st = time.clock()
for i in range(1000):
cursor.execute(insdate_sql, (2, "CFO", 2, 2, 2))
print("upsert takes %.6f..." % (time.clock() - st))
print(cursor.execute("SELECT * FROM employee WHERE _id = 2").fetchone())
if __name__ == "__main__":
unittest.main() | true |
17697ce37db41dc0cedd59fbed39b8d1009e45da | Python | oyl1998/MLL | /Linear_Regression.py | UTF-8 | 1,348 | 3.15625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
Name: Linear_Regression.py
Auth: long_ouyang
Time: 2020/10/5 15:00
'''
import torch
x_data = torch.Tensor([ [1.0], [2.0], [3.0] ])
y_data = torch.Tensor([ [2.0], [4.0], [6.0] ])
class LinearModel(torch.nn.Module):
def __init__(self):
super(LinearModel, self).__init__()
self.linear = torch.nn.Linear(1, 1)
# forward is __call__() function
def forward(self, x):
y_pred = self.linear(x)
return y_pred
model = LinearModel()
<<<<<<< HEAD
criterion = torch.nn.MSELoss(size_average=False) # 均方差
=======
criterion = torch.nn.MSELoss(size_average=False)
>>>>>>> master
# 优化器 在进行梯度更新的时候用learningrate进行优化
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
'''
torch.optim.Adagrad()
torch.optim.Adam()
torch.optim.Adamax()
torch.optim.ASGD()
torch.optim.LBFGS()
torch.optim.RMSprop()
torch.optim.Rprop()
torch.optim.SGD()
'''
for epoch in range(1000):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
print(epoch, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('w=', model.linear.weight.item())
print('b=', model.linear.bias.item())
x_test = torch.Tensor([[4.0]])
y_test = model(x_test)
print('y_pred=', y_test.data)
| true |
ba03f0487b0871914d6d6a3a525243e16534ece1 | Python | pcsfilho/ross_biprob | /src/biprob_gazebo/src/biprob_gazebo/biprob.py | UTF-8 | 3,374 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env python
"""
Author: Paulo Cezar dos Santos Filho
version: 2.0
Class name: Biprob
"""
import random
from threading import Thread
import math
import rospy
import time
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Twist
"""
Esta classe
"""
class Biprob:
"""
Classe Cliente ROS para controle do Biprob no Gazebo
"""
rospy.loginfo("Biprob")
def __init__(self,ns="/biprob/"):
self.ns=ns
self.joints=None
self.angles=None
"""
A funcao Subscriber() diz ao ROS que deseja-se receber mensagens
do topico /biprob/joint_states. Com o tipo de mensagens sensor_msgs.msg.
Esta eh uma mensagem que contem os dados que descrevem o estado de um conjunto
de articulacoes controladas. Este procedimento invoca uma chamada para o
no mestre ROS, que mantem um registro sobre quem eh a publicacao e quem
esta subscrevendo. As mensagens sao passadas para uma funcao de retorno,
aqui chamada _cb_joints, como argumento. O ultimo parametro para a funcao
Subscriber() eh o tamanho da fila de mensagens. Se as mensagens estao chegando
mais rapido do que eles estao sendo processados, entao, este eh o numero de
mensagens que serao bufferizadas.
"""
self._sub_joints=rospy.Subscriber(ns+"joint_states",JointState,self._cb_joints,queue_size=1)
rospy.loginfo("Povoando juntas...")
while not rospy.is_shutdown():
if self.joints is not None: break
rospy.sleep(0.2)
rospy.loginfo("Povoando juntas...")
rospy.loginfo("Juntas Povoadas")
rospy.loginfo("Criando Editores de comando de juntas")
self._pub_joints={}
"""
A funcao Publisher() declara que o no esta publicando para o topico
/biprob/joint_name_position_controller/command uma mensagem do tipo
std_msgs.msg.Float64.
"""
for i in self.joints:
publisher=rospy.Publisher(self.ns+i+"_position_controller/command",Float64)
self._pub_joints[i]=publisher
rospy.sleep(1)
"""
A funcao Publisher() declara que o no esta publicando para o topico
/biprob/cmd_vel uma mensagem do tipo geometry_msgs/Twist.msg.
"""
self._pub_cmd_vel=rospy.Publisher(ns+"cmd_vel",Twist)
#Funcoes para manipulacao das juntas
"""
Esta funcao
"""
def set_walk_velocity(self,x,y,t):
msg=Twist()
msg.linear.x=x
msg.linear.y=y
msg.angular.z=t
self._pub_cmd_vel.publish(msg)
"""
Esta funcao
"""
def _cb_joints(self,msg):
if self.joints is None:
self.joints=msg.name
self.angles=msg.position
"""
Esta funcao retorna o angula de uma junta
"""
def get_angles(self):
if self.joints is None: return None
if self.angles is None: return None
return dict(zip(self.joints,self.angles))
"""
Esta funcao seta um angulo para a junta recebida em rad.
"""
def set_angles(self,angles):
for j,v in angles.items():
if j not in self.joints:
rospy.logerror(j)
continue
self._pub_joints[j].publish(v)
| true |
d619ea223435a35bdf4bd24bf2863eda882a0700 | Python | johnnynode/python-spider | /contents/code/selenium/5.py | UTF-8 | 706 | 2.75 | 3 | [
"MIT"
] | permissive | from selenium import webdriver
from selenium.webdriver import ActionChains
import time
#创建浏览器对象
driver = webdriver.Chrome()
#加载指定url地址
url = 'http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'
driver.get(url)
# 切换Frame窗口
driver.switch_to.frame('iframeResult')
#获取两个div节点对象
source = driver.find_element_by_css_selector("#draggable")
target = driver.find_element_by_css_selector("#droppable")
#创建一个动作链对象
actions = ActionChains(driver)
#将一个拖拽操作添加到动作链队列中
actions.drag_and_drop(source,target)
time.sleep(3)
#执行所有存储的操作(顺序被触发)
actions.perform()
#driver.close() | true |
a7a3b9fd6347f37c33763be8c4566c846fc189b4 | Python | italo-batista/problems-solving | /storm/dp_LIS.py | UTF-8 | 285 | 2.953125 | 3 | [
"MIT"
] | permissive | # coding: utf-8
sequence = map(int, raw_input().split())
N = len(sequence)
dp = [1] * (N+1)
ans = 0
for i in xrange(N):
dp[i] = 1
for j in xrange(i):
if (sequence[j] < sequence[i] and dp[i] <= dp[j]):
dp[i] = dp[j] + 1
if (ans < dp[i]):
ans = dp[i]
print ans
| true |
12f7e624da1eb36a5266d5c6c3505d1e3cf3becb | Python | syunnashun/self_taught | /3.intro_charange.py | UTF-8 | 511 | 3.65625 | 4 | [] | no_license | # 第3章のチャレンジ
# http://tinyurl.com/zx7o2v9
print('ジオブレイク70S', 'ジオブレイク80S', 'Fレーザー7S')
x = 7
if x < 10:
print('伸びしろですね')
elif 10 <= x <= 25:
print('もっとできるはずさ')
else:
print('慢心ではなく自信を持て!')
print(2020 % 825)
print(2020 // 825)
barth_year = 2000
age = 2020 - barth_year
if age >= 20:
print('もうお酒が飲めるのだ')
else:
print('まだおこちゃまでしょ?') | true |
49f3338d1eabc0944b8148020a580752bdb6c009 | Python | Leputa/Leetcode | /python/725.Split Linked List in Parts.py | UTF-8 | 1,053 | 3.375 | 3 | [] | no_license | import math
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
length=0
tmp=root
while(tmp!=None):
length+=1
tmp=tmp.next
lenofList=[]
kk=k
while(length%kk!=0):
num=math.ceil(length/kk)
length-=num
lenofList.append(num)
kk-=1
start=len(lenofList)
for i in range(start,k):
lenofList.append(length//kk)
tmp=root
ans=[]
for length in lenofList:
tmpList=[]
while(length>0):
tmpList.append(tmp.val)
tmp=tmp.next
length-=1
ans.append(tmpList)
return ans
root=ListNode(1)
for i in range(10,1,-1):
node=ListNode(i)
node.next=root.next
root.next=node
print(Solution().splitListToParts(root,3))
| true |
958d9dd1bedf0dcd3c1023875b1406869c42ec9f | Python | jeongmin-seo/two-stream-pytorch | /data_loader/3d_loader.py | UTF-8 | 5,231 | 2.75 | 3 | [] | no_license | import random
import os
from PIL import Image
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import torch
class OneCubeDataset(Dataset):
def __init__(self, dic, in_channel, root_dir, mode, transform=None):
# Generate a 16 Frame clip
self.keys = list(dic.keys())
self.values = list(dic.values())
self.dic = dic
self.root_dir = root_dir
self.transform = transform
self.mode = mode
self.in_channel = in_channel
self.img_rows = 112
self.img_cols = 112
self.n_label = 51
def reset_idx(self, _idx, _n_frame):
if _idx > _n_frame:
return self.reset_idx(_idx - _n_frame, _n_frame)
else:
return _idx
def stack_frame(self, keys, _n_frame, _step):
video_path = os.path.join(self.root_dir, keys.split('-')[0])
cube = torch.FloatTensor(3, self.in_channel,self.img_rows, self.img_cols)
for j in range(self.in_channel):
idx = self.reset_idx(j * _step + 1, _n_frame)
frame_idx = "image_%05d.jpg" % idx
image = os.path.join(video_path, frame_idx)
img = (Image.open(image))
X = self.transform(img)
cube[:, j, :, :] = X
img.close()
return cube
def get_step_size(self, _nb_frame):
if _nb_frame <= self.in_channel:
return 1
else:
return int(_nb_frame/self.in_channel)
def __len__(self):
return len(self.keys)
def __getitem__(self, idx):
# print ('mode:',self.mode,'calling Dataset:__getitem__ @ idx=%d'%idx)
cur_key = self.keys[idx]
nb_frame = self.dic[cur_key][0]
label = self.dic[cur_key][1]
step = self.get_step_size(nb_frame)
data = self.stack_frame(cur_key, nb_frame, step)
sample = (data, label)
return sample
class CubeDataLoader:
def __init__(self, BATCH_SIZE, num_workers, in_channel, path, txt_path, split_num):
self.BATCH_SIZE = BATCH_SIZE
self.num_workers = num_workers
self.in_channel = in_channel
self.data_path = path
self.text_path = txt_path
self.split_num = split_num
# split the training and testing videos
self.train_video, self.test_video = self.load_train_test_list()
@staticmethod
def read_text_file(file_path):
tmp = {}
f = open(file_path, 'r')
for line in f.readlines():
line = line.replace('\n', '')
split_line = line.split(" ")
tmp[split_line[0]] = [int(split_line[1]), int(split_line[2])] # split[0] is video name and split[1] and [2] are frame num and class label
return tmp
def load_train_test_list(self):
train_file_path = os.path.join(self.text_path, "train_split%d.txt" % self.split_num)
test_file_path = os.path.join(self.text_path, "test_split%d.txt" % self.split_num)
train_video = self.read_text_file(train_file_path)
test_video = self.read_text_file(test_file_path)
return train_video, test_video
def run(self):
train_loader = self.train()
val_loader = self.val()
return train_loader, val_loader, self.test_video
def train(self):
training_set = OneCubeDataset(dic=self.train_video,
in_channel=self.in_channel,
root_dir=self.data_path,
mode='train',
transform=transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5,], std=[0.5,])
]))
print('==> Training data :', len(training_set), ' videos', training_set[1][0][0].size())
train_loader = DataLoader(
dataset=training_set,
batch_size=self.BATCH_SIZE,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True
)
return train_loader
def val(self):
validation_set = OneCubeDataset(dic=self.test_video,
in_channel=self.in_channel,
root_dir=self.data_path,
mode='val',
transform=transforms.Compose([
transforms.Resize([224,224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5,], std=[0.5,])
]))
print('==> Validation data :', len(validation_set), ' frames', validation_set[1][1].size())
# print validation_set[1]
val_loader = DataLoader(
dataset=validation_set,
batch_size=self.BATCH_SIZE,
shuffle=False,
num_workers=self.num_workers)
return val_loader
# A A
# (‘ㅅ‘=)
# J.M.Seo
| true |
73a3e1e7275b3276d4f6ec3d1b67cd92213656bd | Python | JohnDoe1996/bysj | /python/camera.py | UTF-8 | 2,954 | 2.984375 | 3 | [] | no_license | import cv2
import os
class Frame:
@staticmethod
def BGR2RGB(frame):
newFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # 颜色由BGR转为RGB
return newFrame
@staticmethod
def BGR2Gray(frame):
newFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 颜色由BGR转为灰度
return newFrame
@staticmethod
def RGB2Gray(frame):
newFrame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) # 颜色由RGB转为灰度
return newFrame
@staticmethod
def cuter(frame, x, y, width, height, size_x, size_y): # 图片裁剪
newFrame = cv2.resize(
frame[y:(y + height), x:(x + width)],
(size_x, size_y)
)
return newFrame
@staticmethod
def line(frame, pos_list, color_tuple ): # 连线
posHull = cv2.convexHull(pos_list)
cv2.drawContours(frame, [posHull], -1, color_tuple, 1)
return frame
@staticmethod
def readCache(): # 读取缓存文件
newFrame = cv2.imread("cache/frame.jpg")
return newFrame
@staticmethod
def saveCache(frame): # 写入缓存文件
cv2.imwrite("cache/frame.jpg", frame)
@staticmethod
def flipHorizintal(frame):
newFrame = cv2.flip(frame, 1)
return newFrame
@staticmethod
def flipVertical(frame):
newFrame = cv2.flip(frame, 0)
return newFrame
class Camera(Frame):
frame = None
def __init__(self):
self.delCacheFile()
self.cap = cv2.VideoCapture(0) # 创建摄像头句柄
@staticmethod
def delCacheFile():
if os.path.exists("cache/frame.jpg"):
os.remove("cache/frame.jpg")
if os.path.exists("cache/people.json"):
os.remove("cache/people.json")
def getFrame(self):
success, frame = self.cap.read() # 读取当前帧
self.frame = frame
return frame
def flipHorizintal(self):
self.frame = super().flipHorizintal(self.frame)
return self.frame
def flipVertical(self):
self.frame = super().flipVertical(self.frame)
return self.frame
def getRGBFrame(self):
RGBFrame = self.BGR2RGB(self.frame)
return RGBFrame
def getGrayFrame(self):
GrayFrame = self.BGR2Gray(self.frame)
return GrayFrame
def getFaceFrame(self, x, y, width, height):
Frame_128x128 = self.cuter(self.getRGBFrame(), x, y, width, height, 128, 128)
return Frame_128x128
def getFaceFrameAndLine(self, x, y, width, height,
left_eye_pos, right_eye_pos, mouth_pos):
lineFrame = self.getFaceFrame(x, y, width, height)
self.line(lineFrame, left_eye_pos, (0, 255, 0))
self.line(lineFrame, right_eye_pos,(0, 255, 0))
self.line(lineFrame,mouth_pos,(0, 0, 255))
return lineFrame
def __del__(self):
self.delCacheFile()
self.cap.release()
| true |
f0874e47b7854ea1799657c96f9b3b58b96b816b | Python | ankssri/Utilities | /DTMetricJsonUtility.py | UTF-8 | 1,508 | 2.8125 | 3 | [] | no_license |
import json
import requests
#DT Tenant
tenant = "https://<your tenant>.live.dynatrace.com"
#DT API Token
ApiTokenValue = "<Your DT API Token>"
#Step 1 GET json directly from DT server. This example is for only 1 metric
querystring = {'resolution':'10m','Api-Token':ApiTokenValue, 'metricSelector':'builtin:host.cpu.usage'}
ResponsefromDT = requests.get(tenant+"/api/v2/metrics/query", params=querystring)
print('1. JSON API Download Success-----------------')
#print(ResponsefromDT.json())
print('-----------------')
#Step 2 Save data to file on drive
with open('CPUHealth.python', 'w') as json_file:
json.dump(ResponsefromDT.json(), json_file)
print('2. Save Success ------------------')
#Step 3 load json file from local directory
with open('C:\Ankur\Scripts\CPUMetric.json') as f:
data_DICT = json.load(f)
# Step 4 Output: dt metrics format
print('3 Print file -----------------')
print(data_DICT)
print('4 Print name & values of CPU metric -----------------')
print(data_DICT["result"][0]["metricId"])
print(data_DICT["result"][0]["data"][0]["values"])
print('Check if any time Host CPU usage is greater than 75%-----------------')
for x in data_DICT['result'][0]['data'][0]['values']:
print(x)
if x is None:
y = 0
else:
y = float(x) #converting string to float
if y > 75: #if CPU is more than 75 then print and exit
print('done')
break
else:
print('All Good')
| true |
ff48d69784668a1165f36ace08e150d16424502d | Python | msh0576/RL_WCPS | /DeepWNCS/pytorch-gym-master/base/paraenv2.py | UTF-8 | 1,830 | 2.9375 | 3 | [] | no_license | # Qin Yongliang
# start multiple environment in parallel
from llll import PythonInstance
slave_code = '''
from llll import sbc
sbc = sbc()
import numpy
import gym
# import everything relevant here
envname = sbc.recv()
env = gym.make(envname)
print('environment instantiated:', envname)
while True:
obj = sbc.recv() # tuple pair of funcname and arg
f = obj[0]
arg = obj[1]
if f == 'reset':
obs = env.reset()
sbc.send(obs)
elif f == 'step':
ordi = env.step(arg)
sbc.send(ordi)
else:
print('unsupported:',f,arg)
break
'''
class RemoteEnv:
def __init__(self,envname):
self.pi = PythonInstance(slave_code)
self.pi.send(envname)
def reset(self,):
self.pi.send(('reset', None))
def future():
return self.pi.recv()
return future
def step(self,action):
self.pi.send(('step', action))
def future():
return self.pi.recv()
return future
if __name__ == '__main__':
import numpy,gym
envname = 'CartPole-v1'
local_env = gym.make(envname)
# create 16 envs in parallel
remote_envs = [RemoteEnv(envname) for i in range(16)]
# step 20 steps in parallel on 16 envs
# reset all envs and obtain first observation
futures = []
for e in remote_envs:
future = e.reset()
futures.append(future)
for future in futures:
obs = future()
print(obs)
# for 20 steps:
for j in range(20):
# step all 16 envs simultaneously
futures = []
for e in remote_envs:
future = e.step(local_env.action_space.sample())
futures.append(future)
# collect results simultaneously
for future in futures:
o,r,d,i = future()
print(o)
| true |
9c068eb08e7e947950978ba519795ff18b9165f1 | Python | philippreiners/Masterarbeit2 | /Validierung_Python/create_emi_station_file.py | UTF-8 | 1,193 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 11 10:30:05 2018
@author: Pille
"""
path_in = 'D:/Uni/Masterarbeit/Emissivity_stations/Changed2/'
path_out = 'D:/Uni/Masterarbeit/Emissivity_stations/Changed2/station_emis/'
items = os.listdir(path_in)
newlist = []
for names in items:
if names.endswith(".txt"):
newlist.append(names)
firstfile = pd.read_csv(path_in + '/' + newlist[0])
stations = firstfile['Station']
for station in stations:
output = pd.DataFrame()
for i in range(len(newlist)):
data = pd.read_csv(path_in + '/' + newlist[i])
month = data.loc[data['Station'] == station]
month['Month'] = i+1
output = output.append(month)
output.to_csv(path_out + '/' + station + '_emi.csv')
path_in = 'D:/Uni/Masterarbeit/Emissivity_stations/Changed2/station_emis/'
items = os.listdir(path_in)
newlist = []
for names in items:
if names.endswith(".csv"):
newlist.append(names)
for i in range(len(newlist)):
data = pd.read_csv(path_in + '/' + newlist[i])
emi29 = data['[3600x7200] Emis_29 MODIS_MONTHLY_0.05DEG_CMG_LST (8-bit unsigned integer)']
plt.plot(emi29) | true |
e09f1abf437a62d3e03202aaa46a3ab07baa5195 | Python | InfiniteCuriosity/Pandas-for-Everyone | /2. Pandas Data Structures.py | UTF-8 | 2,774 | 3.921875 | 4 | [] | no_license | #### Pandas Data Structures ####
import pandas as pd
s = pd.Series(['banana', 42])
print(s)
# manually assign index values to row names to a series by passing in a Python list
# 2.2.1 Creating a series
s1 = pd.Series(['Wes McKinney', 'Creator of Pandas'],index = ['Person', 'Who'])
print(s1)
# 2.2.2 Create a data frame
scientists = pd.DataFrame({
'Name': ['Rosalind Frankin', 'William Gosset'],
'Occupation': ['Chemist', 'Statistician'],
'Born': ['1920-07-25', '1876-06-13'],
'Died': ['1958-04-16', '1937-10-16'],
'Ages': [37, 61]
})
print(scientists)
# 2.3 The Series
first_row = scientists[scientists.values == 'William Gosset']
print(first_row)
age37 = scientists[scientists.values == 37]
print(age37)
first_row.index
first_row.values
# 2.3.1 The Series is ndarray-like
# get the 'Age' column
scientists
ages = scientists['Ages']
print(ages)
print(ages.mean())
print(ages.max())
print(ages.std())
# 2.3.2 Boolean Subsetting: Series
scientists1 = pd.read_csv('/Users/russellconte/Documents/Pandas for Everyone/pandas_for_everyone-master/data/scientists.csv')
scientists1
Ages = scientists1['Age']
print(Ages)
print(Ages.mean())
print(Ages[Ages>Ages.mean()])
type(Ages[Ages>Ages.mean()])
print(Ages)
rev_ages = Ages.sort_index(ascending = False) # prints the result in descending order
print(rev_ages)
# 2.4 The Data Frame
# Boolean vectors will subset rows:
scientists1[scientists1['Age'] >60]
first_half = scientists1[:4]
last_half = scientists1[4:]
print(first_half)
print(last_half)
# 2.5 Making changes to a searies and to a data frame
# 2.5.1 adding additional columns
print(scientists1['Born'].dtype)
print(scientists1['Died'].dtype)
born_datetime = pd.to_datetime(scientists1['Born'], format = '%Y-%m-%d')
print(born_datetime)
scientists1
died_datetime = pd.to_datetime(scientists1['Died'], format = '%Y-%m-%d')
print(died_datetime)
# Add new columns with the born and died datetime values
pd.set_option('display.max_columns', None) # allows to show all columns!
scientists1['born_dt'], scientists1['died_dt'] = (born_datetime, died_datetime)
print(scientists1)
scientists1['age_days_dt'] = (scientists1['died_dt'] - scientists1['born_dt'])
print(scientists1)
#2.5.3 dropping value
scientists2 = scientists1.drop(['Age'], axis = 1)
print(scientists2)
# 2.6 Exporting and importing data
# 2.6.1 pickle
names = scientists1['Name']
print(names)
names.to_pickle('/Users/russellconte/Documents/scientist_name_series.pickle')
scientist_names_from_pickle = pd.read_pickle('/Users/russellconte/Documents/scientist_name_series.pickle')
print(scientist_names_from_pickle) # Yay it worked!! Double Yay!!
# 2.6.2 CSV files - my very old and familar friend!!
names.to_csv('/Users/russellconte/Documents/scientist_name_series.csv')
print(names.to_csv) | true |
3ecf1235ff528ba4b15e43eb278f500e906b311e | Python | victoriaogomes/Unicorn.io | /semantic_analyzer/semantic_analyzer.py | UTF-8 | 438 | 2.9375 | 3 | [] | no_license | from semantic_analyzer import visitor as vt
class SemanticAnalyzer:
def __init__(self, symbol_table, ast, tokens_list):
self.symbol_table = symbol_table
self.ast = ast
self.tokens_list = tokens_list
self.visitor = vt.Visitor(self.symbol_table, tokens_list)
def analyze(self):
for statement in self.ast:
statement.accept(self.visitor)
return self.visitor.tokens_list
| true |
acb48b8d2099488cf00814348e6b755ae22bb819 | Python | SaiNithin96/pythonfile | /datahiding.py | UTF-8 | 872 | 3.828125 | 4 | [] | no_license | # Datahiding or Encapsulation --- Restricting the acces to variables or methods..
# class Student:
# a=56 #(Public variables) # Those which can be accessed in or outside of class with out any restrictions.
# __b=76 #Private Varibales # Those cannot be accessed outside of the class.
# def add(self): #public
# return self.a+self.__b
# def __sub(self): #private method
# return self.__b-self.a
# obj=Student()
# print(obj.add())
# print(obj.a)
# # print(obj.__b)
# # print(obj.__sub())
# # accessing ptivate varibales
# print(obj._Student__b)
# print(obj._Student__sub())
# #Method Overriding
# class First:
# a=5
# b=6
# def add(self):
# return self.a+self.b
# class Second(First):
# a=67
# b=67
# def add(self):
# return self.a-self.b
# obj=Second()
# print(obj.add())
# Both Method Overriding and Overloading Comes under POLYMORPHISM
| true |
7af398f25c5b4c4c969d9db84456b2ee5c68487b | Python | yotamshadmon/puzzle | /src/puzzle.py | UTF-8 | 2,941 | 3.21875 | 3 | [] | no_license | import random
class pos:
def __init__(self, i, j):
self.i = i
self.j = j
class puzzle:
def __init__(self, n, m, level):
self.n = n
self.m = m
self.board = [[j*n+i+1 for i in range(0, n)] for j in range(0, m)]
self.board[m-1][n-1] = 0
self.blank = pos(n-1, m-1)
self.shuffle(level)
def validDirs(self, lastMove):
dirs = []
if self.blank.i > 0:
dirs.append('L')
if self.blank.i < self.n-1:
dirs.append('R')
if self.blank.j > 0:
dirs.append('U')
if self.blank.j < self.m-1:
dirs.append('D')
if lastMove != None:
dirs.remove(self.opositeDir(lastMove))
return dirs
def move(self, dir):
validMove = 0
if dir == 'L':
if self.blank.i-1 >= 0:
self.board[self.blank.j][self.blank.i], self.board[self.blank.j][self.blank.i-1] = self.board[self.blank.j][self.blank.i-1], self.board[self.blank.j][self.blank.i]
self.blank = pos(self.blank.i-1,self.blank.j);
validMove = 1
if dir == 'R':
if self.blank.i+1 < self.n:
self.board[self.blank.j][self.blank.i], self.board[self.blank.j][self.blank.i+1] = self.board[self.blank.j][self.blank.i+1], self.board[self.blank.j][self.blank.i]
self.blank = pos(self.blank.i+1,self.blank.j);
validMove = 1
if dir == 'U':
if self.blank.j-1 >= 0:
self.board[self.blank.j][self.blank.i], self.board[self.blank.j-1][self.blank.i] = self.board[self.blank.j-1][self.blank.i], self.board[self.blank.j][self.blank.i]
self.blank = pos(self.blank.i,self.blank.j-1);
validMove = 1
if dir == 'D':
if self.blank.j+1 < self.m:
self.board[self.blank.j][self.blank.i], self.board[self.blank.j+1][self.blank.i] = self.board[self.blank.j+1][self.blank.i], self.board[self.blank.j][self.blank.i]
self.blank = pos(self.blank.i,self.blank.j+1);
validMove = 1
return validMove
def opositeDir(self, dir):
if dir == 'U':
return 'D'
if dir == 'D':
return 'U'
if dir == 'R':
return 'L'
if dir == 'L':
return 'R'
def shuffle(self, level):
moveCount = 0
dir = None
while moveCount < level:
dirs = self.validDirs(dir)
dir = dirs[random.randint(0, len(dirs) - 1)]
moveCount += self.move(dir)
def printBoard(self):
for row in self.board:
for col in row:
if col == 0:
print(' ', '\t', end='')
else:
print(col,'\t', end='')
print('\n', end='')
print('\n', end='')
def checkBoard(self):
ret = True
self.board[self.blank.j][self.blank.i] = self.m * self.n
for j in range(0, self.m):
for i in range(0, self.n):
if self.board[j][i] != j*self.n+i+1:
ret = False
self.board[self.blank.j][self.blank.i] = 0
return ret
def play(self):
self.printBoard()
dir = None
while dir != 'Q':
dir = input('direction? ').upper()
self.move(dir)
self.printBoard()
if self.checkBoard() == True:
print('SUCCESS')
break
def main():
p = puzzle(4, 4, 3)
p.play()
if __name__== "__main__":
main()
| true |
4bd60c0c52ddfbd7e40df588968ead8781626917 | Python | xiaogengchen/di_ai_mu | /05/str_unicode.py | UTF-8 | 608 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
普通字符串与unicode字符串之间的转换
'''
# import
__author__='XiaoGengchen'
__version__='1.0'
def strToUnicode(plainString=''):
'''普通字符串向unicode字符串的转换用decode'''
temp_str = plainString.decode("utf-8")
print temp_str,type(temp_str)
def unicodeToStr(unicodeString=u'') :
'''unicode字符串向普通字符串之间的转换encode'''
utf8String = unicodeString.encode("gbk")
print utf8String,type(utf8String)
if __name__=="__main__":
unicodeToStr(u"你好world")
strToUnicode("你好world")
| true |