blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
51798a388b3fe2392c3e3bcaf15594836ab91f62
|
Python
|
aliutkus/beads-presentation
|
/src/beads.py
|
UTF-8
| 12,302
| 3.25
| 3
|
[] |
no_license
|
import numpy as np
from scipy.special import erf
from scipy.stats import norm
import numbers
from copy import copy
from itertools import product
import matplotlib.pyplot as plt
from matplotlib import patches
def cgauss(x, mu, sigma):
return np.real(1./np.pi/sigma*np.exp(-np.abs(x-mu)**2 / sigma))
def vec(z):
# makes a bidimensional array out of complex z
z = np.array(z)
return np.concatenate((np.real(z[None, ...]), np.imag(z[None, ...])))
def w(n):
return np.exp(1j*2*np.pi/n)
class Distribution(object):
"""Distribution:
base class for the probabilistic models. Implements just the basics
about the weight of this object, for convenience in the mixture models."""
def __init__(self):
# initialize the weight to 1
self.weight = 1
def pdf(self, z):
# will compute the pdf. To be overridden
pass
def __rmul__(self, other):
# multiplying left-wise by a scalar means modifying the weight
if isinstance(other, numbers.Number):
result = copy(self)
result.weight *= other
return result
elif other is None:
return self
else:
raise ArithmeticError('Cannot left multiply a distribution '
'by anything else than a number, for '
'the purpose of assigning a weight.')
def contour(self, canvas, ax=None, nlines=20, **kwargs):
if ax is None:
fig, ax = canvas.fig()
density = self.pdf(canvas.Z)
levels = np.linspace(density.min(), density.max(), nlines)
ax.contour(canvas.X, canvas.Y, density,
levels=levels,
**kwargs)
plt.show()
def draw(self, num):
# will draw num samples from the distribution
pass
class Bead(Distribution):
"""Bead:
A Bead object is a simple complex isotropic Gaussian."""
def __init__(self, mu, sigma):
super().__init__()
self.mu = mu
self.sigma = sigma
self.references = []
def pdf(self, z):
return self.weight * cgauss(z, self.mu, self.sigma)
def __copy__(self):
result = Bead(self.mu, self.sigma)
result.weight = self.weight
return result
def draw(self, num):
# draws samples from the isotropic gaussian
return ((np.random.randn(num)+1j*np.random.randn(num)
* np.sqrt(self.sigma) + self.mu))
def plot(self, canvas, ax, color, **kwargs):
if self.sigma is not None:
Canvas.circle(ax, self.mu, np.sqrt(self.sigma), color=color,
linewidth=3, fill=True, alpha=0.3, **kwargs)
ax.plot(np.real(self.mu), np.imag(self.mu), 'o',
markersize=20, color=color)
plt.show()
@staticmethod
def fromBeads(references):
mean = sum([ref.mu for ref in references])
sigma = sum([ref.sigma for ref in references])
result = Bead(mean, sigma)
result.references = references
return result
class Donut(Distribution):
"""Donut:
The Donut class implements the ideal distribution that the BEADS
model approximates."""
def __init__(self, mu, b, sigma):
super().__init__()
self.mu = mu
self.b = b
self.sigma = sigma/2
def pdf(self, z):
radius = np.abs(z-self.mu)
F = norm(self.b, self.sigma)
# the normalizing constant for the donut distribution is
# \int_{r,\theta}f\left(r\mid b,\sigma\right)drrd\theta
# =\pi b(1-erf(-\frac{b}{\sigma\sqrt{2}}))
# +\sqrt{2\pi}\exp(-\frac{b^{2}}{2\sigma2})$
Z = (np.pi * self.b * (1-erf(-self.b / self.sigma / np.sqrt(2)))
+ np.sqrt(2 * np.pi)*np.exp(-self.b**2/2/self.sigma**2))
return self.weight / Z * F.pdf(radius)
def __copy__(self):
result = Donut(self.mu, self.b, self.sigma)
result.weight = self.weight
return result
def draw(self, num):
phases = np.random.rand(num)*2*np.pi
radius = np.random.randn(num)*np.sqrt(self.sigma)+self.b
return radius * np.exp(1j*phases)
class GMM(Distribution):
"""GMM:
A Gaussian Mixture Model is a collection of Bead objects. The objects are
in arbitrary numbers and positions, with arbitrary weights."""
def __init__(self):
super().__init__()
self.components = []
self.product_of = []
def total_weight(self):
return self.weight * sum([comp.weight for comp in self.components])
def __iadd__(self, other):
if not (isinstance(other, Bead) or isinstance(other, GMM)):
raise ArithmeticError('can only add a GMM or a Bead to a GMM')
if isinstance(other, Bead):
# if we want to add a Bead, we simply append it to the components
self.components += [other]
return self
# more complicated case: we add one GMM to another. In this case,
# we need to weight the components of each according to each GMM
# global weight, and doing so, we make new copies of the Bead objects.
# the multiplication creates new copies
self.components = [self.weight * comp for comp in self.components]
other_components = [other.weight * comp for comp in other.components]
self.components += other_components
total_weight = sum([comp.weight for comp in self.components])
for comp in self.components:
comp.weight /= total_weight
self.weight = total_weight
return self
def __add__(self, other):
result = copy(self)
result += other
return result
def pdf(self, z):
result = np.zeros(z.shape)
for component in self.components:
result += component.pdf(z)
return result
def __copy__(self):
result = GMM()
result.weight = self.weight
result.components = [comp for comp in self.components]
return result
def draw(self, num):
# get the weights
weights = [comp.weight for comp in self.components]
weights /= weights.sum()
# draw the random selection of the components according to weights
select = np.random.multinomial(1, weights, num)
# build the result
result = np.empty((num,))
for i, comp in enumerate(self.components):
indices = np.nonzero(select[:, i])
result[indices] = comp.draw(len(indices))
return result
def __mul__(self, other):
if other is None:
return self
if not isinstance(other, (GMM, Beads)):
raise ArithmeticError('Can only multiply GMM with GMM')
if other in self.product_of:
raise ArithmeticError('Cannot include twice the same GMM in a'
'product. Another one is needed.')
result = GMM()
# we want the product not to be nested: all operands need to be
# simple GMM and not already part of the product
def flatten(a, b, attr):
attr_a = getattr(a, attr)
attr_b = getattr(b, attr)
res = attr_a + attr_b
if not len(attr_a):
res += [a]
if not len(attr_b):
res += [b]
return res
result.product_of = flatten(self, other, "product_of")
# now incorporate all couples in the product components
for a, b in product(self.components, other.components):
references = flatten(a, b, "references")
result += Bead.fromBeads(references)
return result
def post(self, mix, x):
# if self is independent from mix, just return a copy
if self not in mix.product_of:
return copy(self)
result = GMM()
# We handle here the general case where the sources are general GMMs
total_weight = 0
for xcomp in mix.components:
scomp = [c for c in self.components if c in xcomp.references]
if len(scomp) != 1:
raise IndexError('One mix component featured no unique'
'Bead object from the source as'
'reference')
scomp = scomp[0]
sigmas = scomp.sigma
sigmax = xcomp.sigma
G = sigmas/sigmax
pi_post = scomp.weight * xcomp.pdf(x)
result += pi_post * Bead(scomp.mu+G*(x-xcomp.mu),
sigmas*(1-G))
total_weight += pi_post
for comp in result.components:
comp.weight /= total_weight
return result
def plot(self, canvas, ax, color):
for comp in self.components:
comp.plot(canvas, ax, color)
@staticmethod
def product(factors):
# multiplying the elements of a list
result = None
for factor in factors:
result *= factor
return result
class Beads(GMM):
def __init__(self, mu, b, sigma, weights):
super().__init__()
if isinstance(weights, int):
weights = np.ones(weights)
weights /= weights.sum()
n = len(weights)
omega = mu + b * w(n)**np.arange(0, n, 1)
for (center_c, pi_c) in zip(omega, weights):
self += pi_c * Bead(center_c, sigma)
class Canvas:
def __init__(self, minx, maxx, Nx, miny, maxy, Ny):
self.Nx = Nx
self.Ny = Ny
self.minx = minx
self.maxx = maxx
self.miny = miny
self.maxy = maxy
# create a meshgrid
X = np.linspace(minx, maxx, Nx)
Y = np.linspace(miny, maxy, Ny)
self.X, self.Y = np.meshgrid(X, Y)
self.Z = self.X + 1j*self.Y
def fig(self, subplots=1):
(fig, ax) = plt.subplots(1, subplots)
self.clear(ax)
fig.show()
return (fig, ax)
def ax(self, title='Figure'):
fig, ax = self.fig()
return ax
def clear(self, ax):
if not isinstance(ax, np.ndarray):
ax = [ax]
for a in ax:
a.clear()
a.set_xlim([self.minx, self.maxx])
a.set_ylim([self.miny, self.maxy])
a.set_xlabel('Real part', fontsize=13)
a.set_ylabel('Imaginary part', fontsize=13)
a.grid(True)
"""def plot(self, distributions, ax, colors=None, **kwargs):
if not isinstance(distributions, list):
distributions = [distributions]
if colors is not None:
for (dist, color) in zip(distributions, colors):
dist.plot(self, ax, color, **kwargs)
else:
for dist in distributions:
dist.plot(self, ax, **kwargs)"""
@staticmethod
def circle(ax, center, radius, color, **kwargs):
ax.add_artist(patches.Circle((np.real(center), np.imag(center)),
radius, facecolor=color,
edgecolor=color, **kwargs))
plt.show()
"""def circles(self, ax, centers, radius, colors, **kwargs):
if ax is None:
ax = self.ax()
if isinstance(radius, numbers.Number):
radius = [radius]
if isinstance(centers, numbers.Number):
centers = [centers] * len(radius)
for (center, rad, color) in zip(centers, radius, colors):
self.circle(ax, center, rad, color, **kwargs)"""
@staticmethod
def arrow(ax, start, delta, **kwargs):
h = ax.arrow(np.real(start), np.imag(start), np.real(delta),
np.imag(delta), head_width=1,
head_length=1, length_includes_head=True, **kwargs)
plt.show()
return h
"""@staticmethod
def arrows(ax, starts, deltas, colors, **kwargs):
h = []
for start, delta, color in zip(starts, deltas, colors):
h += [Canvas.arrow(ax, start, delta, color=color(200), **kwargs)]
return h"""
@staticmethod
def text(ax, pos, text):
ax.text(np.real(pos), np.imag(pos), text, fontsize=13)
@staticmethod
def connect(fig, fn):
return fig.canvas.mpl_connect('button_press_event', fn)
| true
|
e04c7e1f8192a52f14b1f56fcc7d90443cf86ca2
|
Python
|
Rambaldelli/SVM-GOR-Secondary-Structure-Prediction-Comparison
|
/BlindSet_save.py
|
UTF-8
| 1,939
| 2.546875
| 3
|
[] |
no_license
|
import json
import glob
from numpy import argmax
dic = {}
with open('blindSet.json', 'w') as D:
path = 'blindT/dssp/blind_test_dssp/*.dssp'
files = glob.glob(path)
for file in files:
id=file.split('/')[3]
id=id.split(':')[0]
f = open(file, 'r')
F = f.readlines()
dic[id] = {}
dic[id]['str']=''
dic[id]['seq']=''
for lin in F:
#dssp
if (lin[16] == 'H' or lin[16] == 'G' or lin[16] == 'I'):
dic[id]['str']=dic[id]['str']+'H'
elif (lin[16] == 'B' or lin[16] == 'E'):
dic[id]['str']=dic[id]['str']+'E'
else:
dic[id]['str']=dic[id]['str']+'-'
#fasta(sequence)
if lin[13].islower():
dic[id]['seq']=dic[id]['seq']+'C'
else:
dic[id]['seq']=dic[id]['seq']+lin[13]
# profile (if pssm missing create profiles one hot)
# define input string
data = dic[id]['seq']
# define universe of possible input values
alphabet = 'ARNDCQEGHILKMFPSTWYVX'
# define a mapping of chars to integers
char_to_int = dict((c, i) for i, c in enumerate(alphabet))
int_to_char = dict((i, c) for i, c in enumerate(alphabet))
# integer encode input data
integer_encoded = [char_to_int[char] for char in data]
# one hot encode
onehot_encoded = list()
for value in integer_encoded:
letter = [0 for _ in range(len(alphabet))]
letter[value] = 1
onehot_encoded.append(letter)
dic[id]['prof'] = onehot_encoded
# SVM class
dic[id]['SVMclass'] = []
for i in dic[id]['str']:
if i == 'H':
dic[id]['SVMclass'].append(1)
elif i == 'E':
dic[id]['SVMclass'].append(2)
else:
dic[id]['SVMclass'].append(3)
json.dump(dic, D)
| true
|
82795d57dc9450901460beaafea0c02694a1f88a
|
Python
|
kartiktodi/PolySpider
|
/src/PolySpider/util/CategoryUtil.py
|
UTF-8
| 8,661
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
'''
使用方法:
由于不同的应用商店对应的分类不同,抓取到的应用进行分类整理也不能完全按照分类名来新建分类
为了统一分类,采取91助手的分类作为默认分类方式,并为每一个分类定义一个ID
建立一个字典,key为应用市场中抓取的分类名,value为对应的分类ID
当所有市场中的分类名称都已经在字典中定义以后,就可以直接通过抓取分类名来将应用对应到我们的分类列表中来。
'''
'''
应用名称 ID(后两位为00,预留,如果有子分类的话,可以利用,比如游戏有自分类的话可以是4601,4602这样)
new category 根据GooglePlay分类规则进行分类
个性化 1000
交通运输 1100
体育 1200
健康与健身 1300
动态壁纸 1400
动漫 1500
医药 1600
商务 1700
图书与工具书 1800
天气 1900
娱乐 2000
媒体与视频 2100
小部件 2200
工具 2300
摄影 2400
效率 2500
教育 2600
新闻杂志 2700
旅游与本地出行 2800
生活时尚 2900
社交 3000
财务 3100
购物 3200
软件与演示 3300
通讯 3400
音乐与音频 3500
游戏 3600
其他 3700
'''
CATEGORY_ID = {
## Common
'未分类' : '0',
'其他' : '1000',
## App_star
'阅读资讯' : '2700',
'电子书' : '1800',
'输入法' : '2300',
'休闲益智' : '3600',
'动作竞技' : '3600',
'体育竞速' : '3600',
'健康美食' : '1300',
'系统工具' : '2300',
'主题桌面' : '1000',
'音乐视频' : '3500,2100',
'社交' : '3000',
'办公' : '2500',
'交通地图' : '1100',
## 小米商城
'影音视听':'3500,2100',
'图书与阅读':'1800',
'效率办公':'2500',
'生活':'2900',
'摄影摄像':'2400',
'体育运动':'1200',
'娱乐消遣':'2000',
'实用工具':'2300',
'聊天与社交':'3000',
'学习与教育':'2600',
'时尚与购物':'3200',
'旅行与交通':'1100,2800',
'医疗与健康':'1300',
'新闻':'2700',
'理财':'3100',
'策略':'3600',
'竞速':'3600',
'棋牌':'3600',
'音乐游戏':'3600',
'飞行模式':'3600',
'动作冒险':'3600',
'角色扮演':'3600',
'体育运动':'3600',
'益智解密':'3600',
'重力感应':'3600',
#安卓市场
'工具':'2300',
'对战格斗':'3600',
'其他游戏':'3600',
'便捷生活':'2900',
'网购支付':'3100',
'资讯':'2700',
'赛车竞速':'3600',
'拍摄美化':'2400',
'模拟经营':'3600',
'聊天通讯':'3400',
'动态壁纸':'1400',
'出版-生活情感':'1800',
'社交网络':'3000',
'网络模拟':'2300',
'策略游戏':'3600',
'站外应用':'2300',
'淘宝店铺':'3200',
'效率':'2500',
'射击游戏':'3600',
'其他软件':'3700',
'金融理财':'3100',
'新闻资讯':'2700',
'通讯':'3400',
'辅助工具':'2300',
'棋牌桌游':'3600',
'影音':'2100,3500',
'经营':'3600',
'书籍阅读':'1800',
'浏览器':'3300',
'系统安全':'2300',
'通信':'3400',
'益智游戏':'3600',
'学习办公':'2500',
'阅读':'1800',
'卡片棋牌':'3600',
'主题插件':'1000',
'出版-文史小说':'1800',
'出行':'2800',
'虚拟养成':'3600',
'健康':'1300',
'生活实用':'2900',
'影音图像':'3500,2100',
'体育':'1200',
'休闲':'3600',
'壁纸美化':'1400',
'拍照':'2400',
'通话通讯':'3400',
'角色冒险':'3600',
'动作格斗':'3600',
'个性化':'1000',
'原创-言情':'1800',
'角色':'3600',
'原创-都市':'1800',
'购物':'3200',
'安全':'2300',
'网游':'3600',
'射击':'3600',
'图书阅读':'1800',
'教育':'2600',
'购物娱乐':'3200,2000',
'飞行射击':'3600',
'原创-玄幻':'1800',
'原创-历史':'1800',
'经营策略':'3600',
'经营养成':'3600',
'影音播放':'2100,3500',
'益智':'3600',
'手机网游':'3600',
'网络社区':'3000',
'地图导航':'2800',
'理财购物':'3100,3200',
'原创-仙侠':'1800',
'原创-竞技':'1800',
'出版-养生保健':'1800',
'儿童':'2600',
'原创-屌丝':'1800',
'原创-穿越':'1800',
'游戏':'3600',
'原创-惊悚':'1800',
'原创-军事':'1800',
'原创-网络':'1800',
'原创-科幻':'1800',
'出版-时尚娱乐':'1800',
'出版-经管励志':'1800',
'原创-同人':'1800',
'休闲益智':'3600',
'动作射击':'3600',
'体育竞技':'3600',
'网络游戏':'3600',
'棋牌游戏':'3600',
'策略塔防':'3600',
'卡牌策略':'3600',
'动漫':'3600',
'生活时尚':'2900',
'回合战斗':'3600',
'媒体与视频':'2100',
'精选游戏':'3600',
'交通运输':'1100',
'音乐与音频':'3500',
'射击冒险':'3600',
'健康与健身':'1300',
'词典':'2300',
'娱乐':'2000',
'必备软件':'3300',
'益智和解谜':'3600',
'有声读物':'1800',
'休闲其它':'3600',
'天气':'1900',
'新闻杂志':'2700',
'最新游戏':'3600',
'软件与演示':'3300',
'图书与工具书':'1800',
'摄影':'2400',
'纸牌和赌博':'3600',
'财务':'3100',
'中文游戏':'3600',
'医药':'1600',
'旅游与本地出行':'2800',
'商务':'1700',
'即时动作':'3600',
'网站应用':'3300',
'街机和动作':'3600',
'即时动作':'3600'
}
CATEGORY_NAME = {
'0' :'未分类',
'1000':'个性化',
'1100':'交通运输',
'1200':'体育',
'1300':'健康与健身',
'1400':'动态壁纸',
'1500':'动漫',
'1600':'医药',
'1700':'商务',
'1800':'图书与工具书',
'1900':'天气',
'2000':'娱乐',
'2100':'媒体与视频',
'2200':'小部件',
'2300':'工具',
'2400':'摄影',
'2500':'效率',
'2600':'教育',
'2700':'新闻杂志',
'2800':'旅游与本地出行',
'2900':'生活时尚',
'3000':'社交',
'3100':'财务',
'3200':'购物',
'3300':'软件与演示',
'3400':'通讯',
'3500':'音乐与音频',
'3600':'游戏',
'3700':'其他',
'交通运输':'1100',
'健康与健身':'1300',
'动态壁纸':'1400',
'动漫':'1500',
'医疗':'1600',
'商务':'1700',
'图书与工具书':'1800',
'天气':'1900',
'小部件':'2200',
'摄影':'2400',
'教育':'2600',
'新闻杂志':'2700',
'旅游与本地出行':'2800',
'生活时尚':'2900',
'财务':'3100',
'软件与演示':'3300',
'音乐与音频':'3500'
}
def get_category_id_by_name(category_name,item):
'''
* 根据抓取来的应用名来获取对应在分类系统中的适合的id
* 如果没有找到对应项,则说明抓取到的该分类属于新分类,记录在`un_record_category.txt`文件中,等待人工进行分类确认
* input: category_name
* output: category_id
'''
if not CATEGORY_ID.get(category_name):
flag = True
category_map = {}
if not os.path.isfile('un_record_category.txt'):
f = file("un_record_category.txt","w")
f.close()
for line in open('un_record_category.txt','r'):
if line.strip() == category_name:
flag = False
break
category_map[line.strip()] = 1
category_map[category_name] = 1
if flag:
with open('un_record_category.txt','w') as f:
for key in category_map.keys():
f.write(key + "\n")
return "0"
return CATEGORY_ID.get(category_name)
def get_category_name_by_id(category_id):
'''
* 根据category_id来获取对应在分类系统中的名称,如果没有找到,则输出'无',不过应该不会出现这种情况
* input: category_id
* output: category_name
'''
if not CATEGORY_NAME.get(category_id):
return "无"
else:
return CATEGORY_NAME.get(category_id)
| true
|
589a0036fdef27c6c2eff42184aa328014033f10
|
Python
|
mahasen-s/deepQuantum
|
/exact/exact_temp.py
|
UTF-8
| 1,320
| 2.796875
| 3
|
[] |
no_license
|
import numpy as np
from scipy import sparse
from scipy.sparse import linalg
def exact_TFI(N,h):
# sparse function
sp_fun = sparse.csr_matrix
# Pauli matrices
sx = sp_fun(np.array([[0,1],[1, 0]]));
sz = sp_fun(np.array([[1,0],[0,-1]]));
eye = sparse.identity(2);
zer = sp_fun(np.array([[0,0],[0,0]]));
# Blank H
H = sp_fun(1);
for i in range(0,N):
H = sparse.kron(H,zer)
# Build H
for i in range(0,N):
# sig x
sig_x = sp_fun(1)
sig_z = sp_fun(1)
for j in range(0,N):
if i==j:
sig_x = sparse.kron(sig_x,sx)
sig_z = sparse.kron(sig_z,sz)
else:
sig_x = sparse.kron(sig_x,eye)
sig_z = sparse.kron(sig_z,eye)
H += h*sig_x
for k in range(0,i):
sig_z2 = sp_fun(1)
for j in range(0,N):
if j==k:
sig_z2 = sparse.kron(sig_z2,sz)
else:
sig_z2 = sparse.kron(sig_z2,eye)
sig_z2 = sp_fun.multiply(sig_z,sig_z2)
H += sig_z2
print(H.toarray())
# Solve H
evals, evecs = linalg.eigsh(H,1,which='SA')
return evals, evecs
evals, evecs = exact_TFI(3,0.11)
print(evals)
print(evecs)
| true
|
ea96feea19ccf0387021024dc490946763cd9b0f
|
Python
|
nikolayvoronchikhin/pydrill
|
/pydrill/client/result.py
|
UTF-8
| 502
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
class ResultQuery(object):
"""
Class responsible for maintaining a information returned from Drill.
It is iterable.
"""
# TODO: create better docs.
def __init__(self, response, data, duration):
self.response = response
self.duration = duration
self.data = data
self.rows = data.get('rows', [])
self.columns = data.get('columns', [])
def __iter__(self):
for row in self.rows:
yield row
| true
|
2fd25662d26d1f7aa3e70d89a023f947e565d892
|
Python
|
bhatnagaranshika02/Data-Structures-Python
|
/Stack/PranthesisOptimized.py
|
UTF-8
| 176
| 3.40625
| 3
|
[] |
no_license
|
s = input()
n=-1
while len(s)!=n:
n =len(s)
s=s.replace('()','')
s=s.replace('[]','')
s=s.replace('{}','')
if len(s)==0:
print("Yes")
else:
print("No")
| true
|
29235e38ddeaf47169ad7ce3eefbc6be6629f053
|
Python
|
AK-1121/code_extraction
|
/python/python_12208.py
|
UTF-8
| 127
| 2.5625
| 3
|
[] |
no_license
|
# Accessing capture groups during substitution
re.sub(r'\d+\.\d*', lambda match: str(int(round(float(match.group(0))))), line)
| true
|
1b34412c775e1e20781baa182f15d7a8cb524747
|
Python
|
BorisVV/try-your-luck-app
|
/games_classes.py
|
UTF-8
| 4,288
| 3.71875
| 4
|
[] |
no_license
|
from game_class import Game
import random
# "Power Ball": 70,
# "Power Play": 27,
# "Gopher Five": 48,
# "North Star": 32,
# "Lotto America": 53,
# "Star Ball": 11,
# "Mega Millions": 71,
# "Mega Ball": 26,
# "Lucky For Life": 49,
# "Lucky Ball": 19,
#For each of the games we'll use the quick pick where the computer
#selects the number. '''
class NorthStar(Game):
'''
North Star has 31 numbers and the computer will select five for the quick pick and the drawings.
'''
def calculate(self):
self.numbers1 = 31 + 1 #When using range for loops the last number is not counted.
self.counter = 1
insideLoopCounter = 0
if self.numb_of_qpick == 1:
self.numb_of_weeks *= 7
while self.counter <= self.numb_of_weeks:
Game.userQPickFive(self)
# Change the number in the perimeter between 1 and 5.
Game.computerDrawingFives(self, 3)
self.counter += 1
# User decides to buy once every week and the ticket is good for 7 drawings.
elif self.numb_of_qpick == 2:
self.numb_of_qpick = 7
while self.counter <= self.numb_of_weeks:
insideLoopCounter = 1
Game.userQPickFive(self)
# Every week, we need to update the quick pick drawing for the user.
while insideLoopCounter <= self.numb_of_qpick: # number of times North Star drawings.
Game.computerDrawingFives(self, 3)
insideLoopCounter += 1
self.counter += 1
# User selected one quick pick that is good for two weeks, or fourteen drawings.
else:
self.numb_of_qpick = 14
while self.counter <= self.numb_of_weeks:
insideLoopCounter = 1
Game.userQPickFive(self)
while insideLoopCounter <= self.numb_of_qpick:
Game.computerDrawingFives(self, 3)
insideLoopCounter += 1
self.counter += 2
def get_name(self):
return self.name
def get_numb_qpicks(self):
print(self.numb_of_qpick)
class GopherFive(Game):
''' This game plays 3 times a week. e.g. if user enters 2 for the numb of qpicks,
wich is 1 week, that means that the ticket is valid to play for 3 drawings. The same numbers will be matched against 3 drawing and then, the user's numbers will
be drawn again, repeating the sequence until the number of weeks are done..
'''
def calculate(self):
self.numbers1 = 48 + 1
self.counter = 1
insideLoopCounter = 0 #This is for every ticket, loop three times.
# User buys a ticket 3 times a week, every week.
if self.numb_of_qpick == 1: #One quick pick for every drawing.
self.numb_of_weeks *= 3 #where 3 is the number that gopher five plays
# everyweek times the total number of week in a the years entered.
while self.counter <= self.numb_of_weeks:
Game.userQPickFive(self)
# Change the number in the perimeter between 1 and 5.
Game.computerDrawingFives(self, 2)
self.counter += 1
# User buys a ticket that is good for 3 drawing (full week).
elif self.numb_of_qpick == 2: #User buys quick pick once everyweek.
self.numb_of_qpick = 3
while self.counter <= self.numb_of_weeks:
insideLoopCounter = 1
Game.userQPickFive(self)
while insideLoopCounter <= self.numb_of_qpick:
Game.computerDrawingFives(self, 2)
insideLoopCounter += 1
self.counter += 1
else:
self.numb_of_qpick = 6
while self.counter <= self.numb_of_weeks:
insideLoopCounter = 1
Game.userQPickFive(self)
while insideLoopCounter <= self.numb_of_qpick:
Game.computerDrawingFives(self, 2)
insideLoopCounter += 2
self.counter += 2
def get_name(self):
return self.name
def get_numb_qpicks(self):
return self.numb_of_qpick
| true
|
1f67820253e9959c44ee8ce839a9ec3d8bfbf1d2
|
Python
|
rajeshkumarkarra/qml
|
/implementations/tutorial_rotoselect.py
|
UTF-8
| 16,801
| 3.609375
| 4
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
r"""
.. _rotoselect:
Quantum circuit structure learning
==================================
"""
##############################################################################
# This example shows how to learn a good selection of rotation
# gates so as to minimize a cost
# function using the Rotoselect algorithm of `Ostaszewski et al.
# (2019) <https://arxiv.org/abs/1905.09692>`__. We apply this algorithm to minimize a Hamiltonian for a
# variational quantum eigensolver (VQE) problem,
# and improve upon an initial circuit structure ansatz.
#
# Background
# ----------
#
# In quantum machine learning and optimization problems,
# one wishes to minimize a cost function with respect to some parameters in the circuit. It is desirable
# to keep the circuit as shallow as possible to reduce the effects of noise, but an arbitrary
# choice of gates is generally suboptimal for performing the optimization.
# Therefore, it would be useful to employ an
# algorithm which learns a good circuit structure at fixed depth to minimize the cost function.
#
# Furthermore, PennyLane's optimizers perform automatic differentiation of quantum nodes by evaluating phase-shifted
# expectation values using the quantum circuit itself.
# The output of these calculations, the gradient, is used in optimization methods to minimize
# the cost function. However,
# there exists a technique to discover the optimal parameters of a quantum circuit through phase-shifted evaluations,
# without the need for calculating the gradient as an intermediate step (i.e., a gradient-free optimization).
# It could be desirable, in some cases, to
# take advantage of this.
#
#
# The Rotoselect algorithm addresses the above two points: it allows one to jump directly to the
# optimal value for a single parameter
# with respect to fixed values for the other parameters, skipping gradient descent, and tries various
# rotation gates along the way.
# The algorithm works by updating the parameters :math:`\boldsymbol{\theta}=\theta_1,\dots,\theta_D` and gate choices
# :math:`\boldsymbol{R}=R_1,\dots,R_D`
# one at a time according to a *closed-form expression* for the optimal value of the :math:`d^{\text{th}}` parameter
# :math:`\theta^{*}_d` when the other parameters and gate choices are fixed:
#
# .. math::
#
# \theta^{*}_d &= \underset{\theta_d}{\text{argmin}} \langle H \rangle_{\theta_d} \\
# &= -\frac{\pi}{2} - \text{arctan}\left(\frac{2\langle H \rangle_{\theta_d = 0} -
# \langle H \rangle_{\theta_d=\pi/2} - \langle H \rangle_{\theta_d=-\pi/2}}{\langle
# H \rangle_{\theta_d=\pi/2} -
# \langle H \rangle_{\theta_d=-\pi/2}}\right)
#
# The calculation makes use of 3 separate evaluations
# of the expectation value :math:`\langle H \rangle_{\theta_d}` using the quantum circuit. Although
# :math:`\langle H \rangle` is really a function of all parameters and gate choices
# (:math:`\boldsymbol{\theta}`, :math:`\boldsymbol{R}`), we
# are fixing every parameter and gate choice apart from :math:`\theta_d` in this expression so we write it as
# :math:`\langle H \rangle = \langle H \rangle_{\theta_d}`.
# For each parameter in the quantum circuit, the algorithm proceeds by evaluating :math:`\theta^{*}_d`
# for each choice of
# gate :math:`R_d \in \{R_x,R_y,R_z\}` and selecting the gate which yields the minimum value of
# :math:`\langle H \rangle`.
#
# Thus, one might expect the number of circuit evaluations required to be 9 for each parameter (3 for each gate
# choice). However, since all 3 rotation gates yield identity when :math:`\theta_d=0`,
#
# .. math:: R_x(0) = R_y(0) = R_z(0) = 1,
#
# the value of :math:`\langle H \rangle_{\theta_d=0}` in the expression for :math:`\theta_d^{*}` above
# is the same for each of the gate choices, and this 3-fold
# degeneracy reduces the number of evaluations required to 7.
#
# One cycle of the Rotoselect algorithm involves
# iterating through every parameter and performing the calculation above.
# This cycle is repeated for a fixed number of steps or until convergence. In this way, one could learn both
# the optimal parameters and gate choices for the task of minimizing
# a given cost function. Next, we present an example of this algorithm
# applied to a VQE Hamiltonian.
#
# Example VQE Problem
# -------------------
#
# We focus on a 2-qubit VQE circuit for simplicity. Here, the Hamiltonian
# is
#
# .. math::
# H = 0.5Y_2 + 0.8Z_1 - 0.2X_1
#
# where the subscript denotes the qubit upon which the Pauli operator acts. The
# expectation value of this quantity acts as the cost function for our
# optimization.
#
# Rotosolve
# ---------
# As a precursor to implementing Rotoselect we can analyze a version of the algorithm
# which does not optimize the choice of gates and only optimizes the parameters for a given circuit ansatz,
# called Rotosolve. Later, we will build on this example
# to implement Rotoselect and vary the circuit structure.
#
# Imports
# ~~~~~~~
# To get started, we import PennyLane and the PennyLane-wrapped version of NumPy. We also
# create a 2-qubit device using the ``default.qubit`` plugin and set the ``analytic`` keyword to ``True``
# in order to obtain exact values for any expectation values calculated. In contrast to real
# devices, simulators have the capability of doing these calculations without sampling.
import pennylane as qml
from pennylane import numpy as np
n_wires = 2
dev = qml.device("default.qubit", analytic=True, wires=2)
##############################################################################
# Creating a fixed quantum circuit
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. figure:: ../implementations/rotoselect/original_ansatz.png
# :scale: 65%
# :align: center
# :alt: original_ansatz
#
# |
#
# Next, we set up a circuit with a fixed ansatz structure---which will later be subject to change---and encode
# the Hamiltonian into a cost function. The structure is shown in the figure above.
def ansatz(params):
qml.RX(params[0], wires=0)
qml.RY(params[1], wires=1)
qml.CNOT(wires=[0, 1])
@qml.qnode(dev)
def circuit(params):
ansatz(params)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
@qml.qnode(dev)
def circuit2(params):
ansatz(params)
return qml.expval(qml.PauliX(0))
def cost(params):
Z_1, Y_2 = circuit(params)
X_1 = circuit2(params)
return 0.5 * Y_2 + 0.8 * Z_1 - 0.2 * X_1
##############################################################################
# Helper methods for the algorithm
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# We define methods to evaluate
# the expression in the previous section. These will serve as the basis for
# our optimization algorithm.
# calculation as described above
def opt_theta(d, params, cost):
params[d] = 0.0
M_0 = cost(params)
params[d] = np.pi / 2.0
M_0_plus = cost(params)
params[d] = -np.pi / 2.0
M_0_minus = cost(params)
a = np.arctan2(
2.0 * M_0 - M_0_plus - M_0_minus, M_0_plus - M_0_minus
) # returns value in (-pi,pi]
params[d] = -np.pi / 2.0 - a
# restrict output to lie in (-pi,pi], a convention
# consistent with the Rotosolve paper
if params[d] <= -np.pi:
params[d] += 2 * np.pi
# one cycle of rotosolve
def rotosolve_cycle(cost, params):
for d in range(len(params)):
opt_theta(d, params, cost)
return params
##############################################################################
# Optimization and comparison with gradient descent
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# We set up some initial parameters for the :math:`R_x` and :math:`R_y`
# gates in the ansatz circuit structure and perform an optimization using the
# Rotosolve algorithm.
init_params = [0.3, 0.25]
params_rsol = init_params.copy()
n_steps = 30
costs_rotosolve = []
for i in range(n_steps):
costs_rotosolve.append(cost(params_rsol))
params_rsol = rotosolve_cycle(cost, params_rsol)
##############################################################################
# We then compare the results of Rotosolve to an optimization
# performed with gradient descent and plot
# the cost functions at each step (or cycle in the case of Rotosolve).
# This comparison is fair since the number of circuit
# evaluations involved in a cycle of Rotosolve is similar to those required to calculate
# the gradient of the circuit and step in this direction. Evidently, the Rotosolve algorithm
# converges on the minimum after the first cycle for this simple circuit.
params_gd = init_params.copy()
opt = qml.GradientDescentOptimizer(stepsize=0.5)
costs_gd = []
for i in range(n_steps):
costs_gd.append(cost(params_gd))
params_gd = opt.step(cost, params_gd)
# plot cost function optimization using the 2 techniques
import matplotlib.pyplot as plt
steps = np.arange(0, n_steps)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(7, 3))
plt.subplot(1, 2, 1)
plt.plot(steps, costs_gd, "o-")
plt.title("grad. desc.")
plt.xlabel("steps")
plt.ylabel("cost")
plt.subplot(1, 2, 2)
plt.plot(steps, costs_rotosolve, "o-")
plt.title("rotosolve")
plt.xlabel("cycles")
plt.ylabel("cost")
plt.tight_layout()
plt.show()
##############################################################################
# Cost function surface for circuit ansatz
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Now, we plot the cost function surface for later comparison with the surface generated
# by learning the circuit structure.
from matplotlib import cm
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(6, 4))
ax = fig.gca(projection="3d")
X = np.linspace(-4.0, 4.0, 40)
Y = np.linspace(-4.0, 4.0, 40)
xx, yy = np.meshgrid(X, Y)
Z = np.array([[cost([x, y]) for x in X] for y in Y]).reshape(len(Y), len(X))
surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)
ax.set_xlabel(r"$\theta_1$")
ax.set_ylabel(r"$\theta_2$")
ax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune="lower"))
plt.show()
##############################################################################
# It is apparent that, based on the circuit structure
# chosen above, the cost function does not depend on the angle parameter :math:`\theta_2`
# for the rotation gate :math:`R_y`. As we will show in the following sections, this independence is not true
# for alternative gate choices.
#
# Rotoselect
# ----------
#
# .. figure:: ../implementations/rotoselect/rotoselect_structure.png
# :scale: 65%
# :align: center
# :alt: rotoselect_structure
#
# |
#
# We now implement the Rotoselect algorithm to learn a good selection of gates to minimize
# our cost function. The structure is similar to the original ansatz, but the generators of rotation are
# selected from the set of Pauli gates :math:`P_d \in \{X,Y,Z\}` as shown in the figure above. For example,
# :math:`U(\theta,Z) = R_z(\theta)`.
#
# Creating a quantum circuit with variable gates
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# First, we set up a quantum circuit with a similar structure to the one above, but
# instead of fixed rotation gates :math:`R_x` and :math:`R_y`, we allow the gates to be specified with the
# ``generators`` keyword, which is a list of the generators of rotation that will be used for the gates in the circuit.
# For example, ``generators=['X', 'Y']`` reproduces the original circuit ansatz used in the Rotosolve example
# above.
# A helper method ``RGen`` returns the correct unitary gate according to the
# rotation specified by an element of ``generators``.
def RGen(param, generator, wires):
if generator == "X":
qml.RX(param, wires=wires)
elif generator == "Y":
qml.RY(param, wires=wires)
elif generator == "Z":
qml.RZ(param, wires=wires)
def ansatz_rsel(params, generators):
RGen(params[0], generators[0], wires=0)
RGen(params[1], generators[1], wires=1)
qml.CNOT(wires=[0, 1])
@qml.qnode(dev)
def circuit_rsel(params, generators=None): # generators will be passed as a keyword arg
ansatz_rsel(params, generators)
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
@qml.qnode(dev)
def circuit_rsel2(params, generators=None): # generators will be passed as a keyword arg
ansatz_rsel(params, generators)
return qml.expval(qml.PauliX(0))
def cost_rsel(params, generators):
Z_1, Y_2 = circuit_rsel(params, generators=generators)
X_1 = circuit_rsel2(params, generators=generators)
return 0.5 * Y_2 + 0.8 * Z_1 - 0.2 * X_1
##############################################################################
# Helper methods
# ~~~~~~~~~~~~~~
# We define helper methods in a similar fashion to Rotosolve. In this case,
# we must iterate through the possible gate choices in addition to optimizing each parameter.
def rotosolve(d, params, generators, cost, M_0): # M_0 only calculated once
params[d] = np.pi / 2.0
M_0_plus = cost(params, generators)
params[d] = -np.pi / 2.0
M_0_minus = cost(params, generators)
a = np.arctan2(
2.0 * M_0 - M_0_plus - M_0_minus, M_0_plus - M_0_minus
) # returns value in (-pi,pi]
params[d] = -np.pi / 2.0 - a
if params[d] <= -np.pi:
params[d] += 2 * np.pi
return cost(params, generators)
def optimal_theta_and_gen_helper(d, params, generators, cost):
params[d] = 0.0
M_0 = cost(params, generators) # M_0 independent of generator selection
for generator in ["X", "Y", "Z"]:
generators[d] = generator
params_cost = rotosolve(d, params, generators, cost, M_0)
# initialize optimal generator with first item in list, "X", and update if necessary
if generator == "X" or params_cost <= params_opt_cost:
params_opt_d = params[d]
params_opt_cost = params_cost
generators_opt_d = generator
return params_opt_d, generators_opt_d
def rotoselect_cycle(cost, params, generators):
for d in range(len(params)):
params[d], generators[d] = optimal_theta_and_gen_helper(d, params, generators, cost)
return params, generators
##############################################################################
# Optimizing the circuit structure
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# We perform the optimization and print the optimal generators for the rotation gates. The minimum value of the
# cost function obtained by optimizing using Rotoselect is less than the minimum value of the cost function obtained by
# gradient descent or Rotosolve, which were performed on the original circuit structure ansatz.
# In other words, Rotoselect performs better without
# increasing the depth of the circuit by selecting better gates for the task of minimizing the cost function.
costs_rsel = []
params_rsel = init_params.copy()
init_generators = ["X", "Y"]
generators = init_generators
for _ in range(n_steps):
costs_rsel.append(cost_rsel(params_rsel, generators))
params_rsel, generators = rotoselect_cycle(cost_rsel, params_rsel, generators)
print("Optimal generators are: {}".format(generators))
# plot cost function vs. steps comparison
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(7, 3))
plt.subplot(1, 2, 1)
plt.plot(steps, costs_gd, "o-")
plt.title("grad. desc. on original ansatz")
plt.xlabel("steps")
plt.ylabel("cost")
plt.subplot(1, 2, 2)
plt.plot(steps, costs_rsel, "o-")
plt.title("rotoselect")
plt.xlabel("cycles")
plt.ylabel("cost")
plt.yticks(np.arange(-1.25, 0.80, 0.25))
plt.tight_layout()
plt.show()
##############################################################################
# Cost function surface for learned circuit structure
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. figure:: ../implementations/rotoselect/learned_structure.png
# :scale: 65%
# :align: center
# :alt: learned_structure
#
# |
#
# Finally, we plot the cost function surface for the newly discovered optimized
# circuit structure shown in the figure above. It is apparent from the minima in the plot that
# the new circuit structure is better suited for the problem.
fig = plt.figure(figsize=(6, 4))
ax = fig.gca(projection="3d")
X = np.linspace(-4.0, 4.0, 40)
Y = np.linspace(-4.0, 4.0, 40)
xx, yy = np.meshgrid(X, Y)
# plot cost for fixed optimal generators
Z = np.array([[cost_rsel([x, y], generators=generators) for x in X] for y in Y]).reshape(
len(Y), len(X)
)
surf = ax.plot_surface(xx, yy, Z, cmap=cm.coolwarm, antialiased=False)
ax.set_xlabel(r"$\theta_1$")
ax.set_ylabel(r"$\theta_2$")
ax.zaxis.set_major_locator(MaxNLocator(nbins=5, prune="lower"))
plt.show()
##############################################################################
# References
# ----------
#
# 1. Mateusz Ostaszewski, Edward Grant, Marcello Bendetti. "Quantum circuit structure learning."
# `arxiv:1905.09692 <https://arxiv.org/abs/1905.09692>`__, 2019.
| true
|
4c56e24d81da12a8a9720cd40819e1d4c0be5424
|
Python
|
rrriiikkk/FormationFlying
|
/formation_flying/negotiations/japanese.py
|
UTF-8
| 7,240
| 2.90625
| 3
|
[] |
no_license
|
'''
# =============================================================================
# This file contains the function to do a Japanese auction.
# =============================================================================
'''
def do_Japanese(flight):
if not flight.departure_time:
raise Exception(
"The object passed to the japanese protocol has no departure time, therefore it seems that it is not a flight.")
if flight.formation_state == 0 or flight.formation_state == 2:
if flight.negotiation_state == 0:
if flight.manager == 1 and flight.auctioneer == 0:
#each auction is opened with a bidding value of 50. This is done to speed up the auction
flight.auctionvalue = 50
flight.negotiation_state += 1
#auctioneers wil search for the best potential auction and take part of that bidding if the starting value of 50 is below their private maximum bidding value
elif flight.negotiation_state == 1:
if flight.manager == 0 and flight.auctioneer == 1:
formation_targets = flight.find_greedy_candidate() # function works also for Japanese protocol
if not formation_targets == []:
potential_winning_manager = []
alliancemember = []
for manager in formation_targets:
#check if auction is open and calculate own max private bidding value
if manager.accepting_bids == True and not manager.auctionvalue == None:
potential_winning_manager.append(flight.calculate_potential_fuelsavings(manager))
alliancemember.append(manager.Alliance)
if not potential_winning_manager == [] and max(potential_winning_manager) > 0:
#increase own private max bidding value by 25% if auctioneer AND mananager are part of the alliance
if flight.Alliance == 1:
for i in range(len(alliancemember)):
if alliancemember[i] == 1:
potential_winning_manager[i] = 1.25 * potential_winning_manager[i]
flight.potential_managers = formation_targets[potential_winning_manager.index(max(potential_winning_manager))]
flight.maxbid = max(potential_winning_manager)
bidexp = 0 #dummy value, only used as it is requested by the make_bid function
flight.negotiation_state += 1
if not flight.potential_managers.auctionvalue == None and flight.maxbid >= flight.potential_managers.auctionvalue:
flight.make_bid(flight.potential_managers, flight.potential_managers.auctionvalue, bidexp)
else:
flight.regenerate_manager_auctioneer()
#if there is no auction that is benefitial fot the auctioneer:
else:
flight.regenerate_manager_auctioneer()
else:
flight.regenerate_manager_auctioneer()
elif flight.manager == 1 and flight.auctioneer == 0:
flight.negotiation_state += 1
elif flight.negotiation_state % 2 == 0:
#manager will make sure all auctioneers know how many auctioneers are taking part of the auction
#if one auctioneer is left a formation is started
if flight.manager == 1 and flight.auctioneer == 0:
if len(flight.received_bids) > 1:
# Open auction, so each bidder should know how many other bidders there are
for bid in flight.received_bids:
flight.other_bids(bid.get("bidding_agent"), flight.auctionvalue)
flight.received_bids = [] #this list will be filled again in next step
flight.auctionvalue += 10 #each bidding round the bid value will be increased by 10, seemed reasonable
flight.negotiation_state += 1
elif len(flight.received_bids) == 1:
winning_agent = flight.received_bids[0].get("bidding_agent")
#this is a ducttape fix that will only allow a manager or auctioneer to take part in 1 formation
#without this, at the destination a loop will be created at which formations leave each other but in the next step will rejoin each other...
flight.numberformation += 1
winning_agent.numberformation += 1
if flight.numberformation == 2 or winning_agent.numberformation == 2:
flight.regenerate_manager_auctioneer()
winning_agent.regenerate_manager_auctioneer()
# if winning_agent == flight.unique_id: #duckttape fix
# flight.regenerate_manager_auctioneer()
bid_value = flight.auctionvalue
#very long if statement, somehow it is otherwise possible to let 2 formations join each other...
if len(flight.agents_in_my_formation) > 0 and not winning_agent.formation_state == 1 and not winning_agent.formation_state == 2 and len(winning_agent.agents_in_my_formation) == 0 and not winning_agent in flight.agents_in_my_formation:
flight.add_to_formation(winning_agent, bid_value, discard_received_bids=True)
print('large formation!!!')
elif len(flight.agents_in_my_formation) == 0 and len(winning_agent.agents_in_my_formation) == 0 and flight.manager == 1:
flight.start_formation(winning_agent, bid_value, discard_received_bids=True)
flight.regenerate_manager_auctioneer()
elif len(flight.received_bids) == 0:
flight.regenerate_manager_auctioneer()
else:
flight.negotiation_state += 1
elif flight.negotiation_state % 2 == 1:
#in the previous step all auctioneers that are still in the bidding have been sent to all auctioneers, so they
#'know' with how many they are still in the auction.
#as in the previous step (that also is the next step) is looked at the amount of auctioneers in the auction,
#and that a formation is started when only 1 auctioneer is left, it was decided to do nothing with that info in this step
if flight.manager == 0 and flight.auctioneer == 1:
if flight.maxbid >= flight.potential_managers.auctionvalue:
bidexp = 2 #dummy
flight.make_bid(flight.potential_managers, flight.potential_managers.auctionvalue, bidexp)
flight.negotiation_state += 1
else:
flight.regenerate_manager_auctioneer()
else:
flight.negotiation_state += 1
| true
|
8efe7e8282737c335870664f59353010ec522595
|
Python
|
insigh/open-cv-study
|
/windows/Binary Image/Big Image Binary.py
|
UTF-8
| 1,005
| 2.53125
| 3
|
[] |
no_license
|
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
def big_image_binary_demo(image):
print(image.shape)
ch, cw = 256, 256
h, w = image.shape[:2]
gray = cv.cvtColor(src=image, code=cv.COLOR_BGR2GRAY)
for row in range(0, h, ch):
for col in range(0, w, cw):
roi = gray[row:row+ch, col:col+cw]
ret, dst = cv.threshold(src=roi, thresh=0, maxval=255, type=cv.THRESH_OTSU)
gray[row:row+ch, col:col+cw] = dst
print(np.std(dst), np.mean(dst))
cv.imwrite(filename=".\\big.jpg", img=gray)
print("=========hello python!==========")
src = cv.imread(filename="C:\\Users\zcj\Desktop\docum\photos\8776db91659bf1b9abada9bbc9d9f15d0b085642.jpg")
cv.namedWindow(winname="input image", flags=cv.WINDOW_AUTOSIZE)
cv.imshow(winname="input image", mat=src)
print("=========Functions start here!==========")
big_image_binary_demo(image=src)
print("=========Functions end here!==========")
cv.waitKey(0)
cv.destroyAllWindows()
| true
|
33d7ac65eda31a9e1cf29ebf471d1f685aaaf344
|
Python
|
cprovencher/dcos-e2e
|
/src/dcos_e2e_cli/common/options.py
|
UTF-8
| 14,259
| 2.546875
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""
Click options which are common across CLI tools.
"""
import re
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Union
import click
import yaml
from .utils import DEFAULT_SUPERUSER_PASSWORD, DEFAULT_SUPERUSER_USERNAME
from .validators import (
validate_path_is_directory,
validate_path_pair,
validate_paths_are_directories,
)
def _validate_cluster_id(
ctx: click.core.Context,
param: Union[click.core.Option, click.core.Parameter],
value: Optional[Union[int, bool, str]],
) -> str:
"""
Validate that a value is a valid cluster ID.
"""
# We "use" variables to satisfy linting tools.
for _ in (ctx, param):
pass
# This matches the Docker ID regular expression.
# This regular expression can be seen by running:
# > docker run -it --rm --id=' WHAT ? I DUNNO ! ' alpine
if not re.fullmatch('^[a-zA-Z0-9][a-zA-Z0-9_.-]*$', str(value)):
message = (
'Invalid cluster id "{value}", only [a-zA-Z0-9][a-zA-Z0-9_.-] '
'are allowed and the cluster ID cannot be empty.'
).format(value=value)
raise click.BadParameter(message)
return str(value)
def _validate_environment_variable(
ctx: click.core.Context,
param: Union[click.core.Option, click.core.Parameter],
value: Any,
) -> Dict[str, str]:
"""
Validate that environment variables are set as expected.
"""
# We "use" variables to satisfy linting tools.
for _ in (param, ctx):
pass
env = {}
for definition in value:
try:
key, val = definition.split(sep='=', maxsplit=1)
except ValueError:
message = (
'"{definition}" does not match the format "<KEY>=<VALUE>".'
).format(definition=definition)
raise click.BadParameter(message=message)
env[key] = val
return env
def _validate_dcos_configuration(
ctx: click.core.Context,
param: Union[click.core.Option, click.core.Parameter],
value: Union[int, bool, str],
) -> Dict[str, Any]:
"""
Validate that a given value is a file containing a YAML map.
"""
# We "use" variables to satisfy linting tools.
for _ in (ctx, param):
pass
if value is None:
return {}
content = Path(str(value)).read_text()
try:
return dict(yaml.load(content) or {})
except ValueError:
message = '"{content}" is not a valid DC/OS configuration'.format(
content=content,
)
except yaml.YAMLError:
message = '"{content}" is not valid YAML'.format(content=content)
raise click.BadParameter(message=message)
def masters_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for the number of masters.
"""
function = click.option(
'--masters',
type=click.INT,
default=1,
show_default=True,
help='The number of master nodes.',
)(command) # type: Callable[..., None]
return function
def agents_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for the number of agents.
"""
function = click.option(
'--agents',
type=click.INT,
default=1,
show_default=True,
help='The number of agent nodes.',
)(command) # type: Callable[..., None]
return function
def public_agents_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for the number of agents.
"""
function = click.option(
'--public-agents',
type=click.INT,
default=1,
show_default=True,
help='The number of public agent nodes.',
)(command) # type: Callable[..., None]
return function
def environment_variables_option(command: Callable[..., None],
) -> Callable[..., None]:
"""
An option decorator for setting environment variables.
"""
function = click.option(
'--env',
type=str,
callback=_validate_environment_variable,
multiple=True,
help='Set environment variables in the format "<KEY>=<VALUE>"',
)(command) # type: Callable[..., None]
return function
def superuser_username_option(command: Callable[..., None],
) -> Callable[..., None]:
"""
An option decorator for a superuser username.
"""
function = click.option(
'--superuser-username',
type=str,
default=DEFAULT_SUPERUSER_USERNAME,
help=(
'The superuser username is needed only on DC/OS Enterprise '
'clusters. '
'By default, on a DC/OS Enterprise cluster, `admin` is used.'
),
)(command) # type: Callable[..., None]
return function
def superuser_password_option(command: Callable[..., None],
) -> Callable[..., None]:
"""
An option decorator for a superuser password.
"""
function = click.option(
'--superuser-password',
type=str,
default=DEFAULT_SUPERUSER_PASSWORD,
help=(
'The superuser password is needed only on DC/OS Enterprise '
'clusters. '
'By default, on a DC/OS Enterprise cluster, `admin` is used.'
),
)(command) # type: Callable[..., None]
return function
def extra_config_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for supplying extra DC/OS configuration options.
"""
function = click.option(
'--extra-config',
type=click.Path(exists=True),
callback=_validate_dcos_configuration,
help=(
'The path to a file including DC/OS configuration YAML. '
'The contents of this file will be added to add to a default '
'configuration.'
),
)(command) # type: Callable[..., None]
return function
def workspace_dir_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for the workspace directory.
"""
help_text = (
'Creating a cluster can use approximately 2 GB of temporary storage. '
'Set this option to use a custom "workspace" for this temporary '
'storage. '
'See '
'https://docs.python.org/3/library/tempfile.html#tempfile.gettempdir '
'for details on the temporary directory location if this option is '
'not set.'
)
function = click.option(
'--workspace-dir',
type=click.Path(exists=True),
callback=validate_path_is_directory,
help=help_text,
)(command) # type: Callable[..., None]
return function
def variant_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for a DC/OS variant.
"""
function = click.option(
'--variant',
type=click.Choice(['auto', 'oss', 'enterprise']),
default='auto',
help=(
'Choose the DC/OS variant. '
'If the variant does not match the variant of the given '
'installer, an error will occur. '
'Using "auto" finds the variant from the installer. '
'Finding the variant from the installer takes some time and so '
'using another option is a performance optimization.'
),
)(command) # type: Callable[..., None]
return function
def license_key_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for passing a license key.
"""
function = click.option(
'--license-key',
type=click.Path(exists=True),
envvar='DCOS_LICENSE_KEY_PATH',
help=(
'This is ignored if using open source DC/OS. '
'If using DC/OS Enterprise, this defaults to the value of the '
'`DCOS_LICENSE_KEY_PATH` environment variable.'
),
)(command) # type: Callable[..., None]
return function
def security_mode_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for the DC/OS Enterprise security mode.
"""
function = click.option(
'--security-mode',
type=click.Choice(['disabled', 'permissive', 'strict']),
help=(
'The security mode to use for a DC/OS Enterprise cluster. '
'This overrides any security mode set in ``--extra-config``.'
),
)(command) # type: Callable[..., None]
return function
def copy_to_master_option(command: Callable[..., None]) -> Callable[..., None]:
"""
A decorator for setting files to copy to master nodes before installing
DC/OS.
"""
function = click.option(
'--copy-to-master',
type=str,
callback=validate_path_pair,
multiple=True,
help=(
'Files to copy to master nodes before installing DC/OS. '
'This option can be given multiple times. '
'Each option should be in the format '
'/absolute/local/path:/remote/path.'
),
)(command) # type: Callable[..., None]
return function
def dcos_login_uname_option(command: Callable[..., None],
) -> Callable[..., None]:
"""
A decorator for choosing the username to set the ``DCOS_LOGIN_UNAME``
environment variable to.
"""
function = click.option(
'--dcos-login-uname',
type=str,
default=DEFAULT_SUPERUSER_USERNAME,
help=(
'The username to set the ``DCOS_LOGIN_UNAME`` environment '
'variable to.'
),
)(command) # type: Callable[..., None]
return function
def dcos_login_pw_option(command: Callable[..., None]) -> Callable[..., None]:
"""
A decorator for choosing the password to set the ``DCOS_LOGIN_PW``
environment variable to.
"""
function = click.option(
'--dcos-login-pw',
type=str,
default=DEFAULT_SUPERUSER_PASSWORD,
help=(
'The password to set the ``DCOS_LOGIN_PW`` environment variable '
'to.'
),
)(command) # type: Callable[..., None]
return function
def sync_dir_run_option(command: Callable[..., None]) -> Callable[..., None]:
"""
A decorator for choosing a DC/OS checkout to sync before running commands.
"""
function = click.option(
'--sync-dir',
type=click.Path(exists=True),
multiple=True,
help=(
'The path to a DC/OS checkout. '
'Part of this checkout will be synced to all master nodes before '
'the command is run. '
'The bootstrap directory is synced if the checkout directory '
'variant matches the cluster variant.'
'Integration tests are also synced.'
'Use this option multiple times on a DC/OS Enterprise cluster to '
'sync both DC/OS Enterprise and DC/OS Open Source tests.'
),
callback=validate_paths_are_directories,
)(command) # type: Callable[..., None]
return function
def verbosity_option(command: Callable[..., None]) -> Callable[..., None]:
"""
A decorator for setting the verbosity of logging.
"""
function = click.option(
'-v',
'--verbose',
help=(
'Use verbose output. '
'Use this option multiple times for more verbose output.'
),
count=True,
)(command) # type: Callable[..., None]
return function
def test_env_run_option(command: Callable[..., None]) -> Callable[..., None]:
"""
A decorator for choosing whether to run commands in a test environment.
"""
function = click.option(
'--test-env',
'-te',
is_flag=True,
help=(
'With this flag set, environment variables are set and the '
'command is run in the integration test directory. '
'This means that "pytest" will run the integration tests.'
),
)(command) # type: Callable[..., None]
return function
def cluster_id_option(command: Callable[..., None]) -> Callable[..., None]:
"""
A Click option for choosing a new cluster ID.
"""
function = click.option(
'-c',
'--cluster-id',
type=str,
default='default',
callback=_validate_cluster_id,
help=(
'A unique identifier for the cluster. '
'Use the value "default" to use this cluster for other '
'commands without specifying --cluster-id.'
),
)(command) # type: Callable[..., None]
return function
def existing_cluster_id_option(command: Callable[..., None],
) -> Callable[..., None]:
"""
An option decorator for an existing Cluster ID.
"""
function = click.option(
'-c',
'--cluster-id',
type=str,
default='default',
show_default=True,
help='The ID of the cluster to use.',
)(command) # type: Callable[..., None]
return function
def genconf_dir_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for a custom "genconf" directory.
"""
function = click.option(
'--genconf-dir',
type=click.Path(exists=True),
callback=validate_path_is_directory,
help=(
'Path to a directory that contains additional files for the DC/OS '
'installer. '
'All files from this directory will be copied to the "genconf" '
'directory before running the DC/OS installer.'
),
)(command) # type: Callable[..., None]
return function
def enable_selinux_enforcing_option(command: Callable[..., None],
) -> Callable[..., None]:
"""
An option decorator for setting the SELinux mode to "enforcing".
"""
function = click.option(
'--enable-selinux-enforcing',
is_flag=True,
help=(
'With this flag set, SELinux is set to enforcing before DC/OS is '
'installed on the cluster.'
),
)(command) # type: Callable[..., None]
return function
| true
|
a6fe7696f63e1f6500232c405fbe6d728b15d855
|
Python
|
lucasebs/TIC
|
/calc.py
|
UTF-8
| 685
| 3.375
| 3
|
[] |
no_license
|
import numpy as np
from text import Get_words
from print_entropy import Print_entropy
text = raw_input("Texto qualquer para conferencia de Entropia: ")
words, wordset = Get_words(text)
freq={word: words.count(word) for word in wordset}
word_count_information = []
entropy = 0
for word in wordset:
probability = freq[word] / float(1.0 * len(words))
self_information = np.log2(1.0/probability)
entropy += (probability * self_information)
word_count_information.append([word, freq[word], self_information])
sorted_word_count_information = list(sorted(word_count_information, key=lambda k:k[2], reverse=True))
Print_entropy(sorted_word_count_information, entropy)
| true
|
3cf13d705e15b7d0d4969f3d4725152d1acc9379
|
Python
|
liuyuzhou/ai_pre_sourcecode
|
/chapter2/slice_1.py
|
UTF-8
| 158
| 3.359375
| 3
|
[] |
no_license
|
import numpy as np
# 创建ndarray对象
ar_np = np.arange(10)
# 从索引 2 开始到索引 7 停止,间隔为2
s = slice(2, 7, 2)
print(ar_np[s])
| true
|
310788779ea095f2932ae43e6f2bcb08e3df5ae1
|
Python
|
yemikudaisi/Micro-GIS
|
/geometry/scale.py
|
UTF-8
| 462
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
class Scale(object):
def __init__(self, scale, denominator):
assert isinstance(scale, float)
assert isinstance(denominator, float)
self.numerator = scale
self.denominator = denominator
@property
def representativeFraction(self):
numerator = int(round(self.numerator/self.numerator))
denominator = round(self.denominator/self.numerator,2)
return "{numerator}:{denominator}".format(**locals())
| true
|
f97c24ab02d6efd06d60b5865a8f51414d76038d
|
Python
|
quhuohuo/python
|
/lvlist/teacherPython/test2.py
|
UTF-8
| 175
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/python
def P(n,x):
if n == 0:
return 1
elif n == 1:
return x
return ((2 * n - 1)*x*P(n - 1,x) - (n - 1)*P(n - 2,x)) / n
print P(2,3)
| true
|
f44d8b0c0fe6ca9f5a59c5f1decbc049d64c27ca
|
Python
|
ximitiejiang/PythonCodingSkill
|
/test/test_tbd.py
|
UTF-8
| 1,785
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 18 11:10:14 2019
@author: ubuntu
"""
import torch.nn as nn
class Registry(object):
def __init__(self):
print('this is init')
self.module_dict={}
def __call__(self, class_type):
print('this is call')
module_name = class_type.__name__
self.module_dict[module_name] = class_type
return class_type
# def __init__(self, name):
# self._name = name
# self._module_dict = dict()
# def _register_module(self, module_class):
# if not issubclass(module_class, nn.Module):
# raise TypeError(
# 'module must be a child of nn.Module, but got {}'.format(
# type(module_class)))
# module_name = module_class.__name__
# if module_name in self._module_dict:
# raise KeyError('{} is already registered in {}'.format(
# module_name, self.name))
# self._module_dict[module_name] = module_class
# def register_module(self, cls): # 装饰器函数:传入一个类cls,返回一个类cls
# self._register_module(cls)
# return cls
# def register_module(self, class_type):
# module_name = class_type.__name__
# self._module_dict[module_name] = class_type
# return class_type
#backbones = Registry() # 创建一个Registry对象
#@backbones.register_module # 挂一个装饰器:用对象的方法作为装饰器,传入的是一个类名,比如ResNet
registry = Registry()
@registry
class ResNet(nn.Module):
def __init__(self):
super().__init__()
def forwrad(self):
pass
print(registry.module_dict)
#model = ResNet()
print(registry.module_dict)
| true
|
6169d44e3f3e069f589a901c06af0bbe19c06f36
|
Python
|
martinabr/pydgilib
|
/tests/test_logger_data.py
|
UTF-8
| 5,464
| 2.796875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""This module holds the automated tests for LoggerData."""
from pydgilib_extra import (
InterfaceData, LoggerData, INTERFACE_POWER, INTERFACE_SPI, INTERFACE_GPIO)
def test_init_logger_data():
"""Test instantiations."""
# Simple instantiaton
data = LoggerData()
assert tuple(data[INTERFACE_POWER]) == ()
assert tuple(data[INTERFACE_GPIO]) == ()
# Instantiaton from list of interfaces
data = LoggerData([INTERFACE_GPIO, INTERFACE_POWER])
assert tuple(data[INTERFACE_POWER]) == ()
assert tuple(data[INTERFACE_GPIO]) == ()
# Instantiation from dictionary with empty values
data = LoggerData(
{INTERFACE_POWER: InterfaceData(), INTERFACE_GPIO: InterfaceData()}
)
assert tuple(data[INTERFACE_POWER]) == ()
assert tuple(data[INTERFACE_GPIO]) == ()
# Instantiation from dictionary
data = LoggerData(
{INTERFACE_POWER: ([], []), INTERFACE_GPIO: ([], [])})
assert tuple(data[INTERFACE_POWER]) == ()
assert tuple(data[INTERFACE_GPIO]) == ()
# Instantiation from dictionary with data
data = LoggerData(
{INTERFACE_POWER: ([1], [2]), INTERFACE_GPIO: ([3], [4])})
assert tuple(data[INTERFACE_POWER]) == ((1, 2),)
assert tuple(data[INTERFACE_GPIO]) == ((3, 4),)
# Instantiation from dictionary with InterfaceData
data = LoggerData({
INTERFACE_POWER: InterfaceData(([1], [2])),
INTERFACE_GPIO: InterfaceData(([3], [4]))})
assert tuple(data[INTERFACE_POWER]) == ((1, 2),)
assert tuple(data[INTERFACE_GPIO]) == ((3, 4),)
def test__getattr__():
"""Tests for __getattr__ function."""
data = LoggerData({
INTERFACE_POWER: ([1], [2]),
INTERFACE_GPIO: ([], []),
4: ([3, 4], [5, 6])})
# Getting via dict
assert tuple(data[INTERFACE_POWER]) == ((1, 2),)
# Getting via attribute
assert tuple(data.gpio) == ()
# assert data["gpio"] == ([3], [4]) # Not in syntax
def test__setattr__():
"""Tests for __setattr__ function."""
data = LoggerData({INTERFACE_POWER: ([1], [2])})
# Setting as dict
data[INTERFACE_GPIO] = InterfaceData(([3], [4]))
assert tuple(data[INTERFACE_POWER]) == ((1, 2),)
assert tuple(data[INTERFACE_GPIO]) == ((3, 4),)
# Setting as attribute
data.spi = InterfaceData(([5], [6]))
assert tuple(data[INTERFACE_SPI]) == ((5, 6),)
def test__iadd__():
"""Tests for __iadd__ function."""
# Add dict for existing interface
data = LoggerData({INTERFACE_POWER: ([1], [2])})
data += {INTERFACE_POWER: ([2], [3])}
assert tuple(data[INTERFACE_POWER]) == ((1, 2), (2, 3))
# Add LoggerData for existing interface
data = LoggerData({INTERFACE_POWER: ([1], [2])})
data += LoggerData({INTERFACE_POWER: ([2], [3])})
assert tuple(data[INTERFACE_POWER]) == ((1, 2), (2, 3))
# Add dict and LoggerData with new interfaces
data = LoggerData({INTERFACE_POWER: ([1], [2])})
data += {INTERFACE_GPIO: ([2], [3])}
data += LoggerData({4: ([3], [4])})
assert tuple(data[INTERFACE_POWER]) == ((1, 2),)
assert tuple(data[INTERFACE_GPIO]) == ((2, 3),)
assert tuple(data[4]) == ((3, 4),)
# Add dict and LoggerData for new and existing interfaces
data = LoggerData({
INTERFACE_POWER: ([1], [2]),
4: ([3, 4], [5, 6])})
data += {
INTERFACE_POWER: ([2], [3]), INTERFACE_GPIO: ([2], [3])}
data += LoggerData({INTERFACE_POWER: ([3], [4]),
INTERFACE_GPIO: ([1], [2])})
assert tuple(data[INTERFACE_POWER]) == ((1, 2), (2, 3), (3, 4))
assert tuple(data[INTERFACE_GPIO]) == ((2, 3), (1, 2))
assert tuple(data[4]) == ((3, 5), (4, 6))
def test__add__():
"""Tests for __add__ function."""
# Simple addition of objects
data1 = LoggerData({INTERFACE_POWER: ([1], [2])})
data2 = LoggerData({INTERFACE_POWER: ([2], [3])})
data = data1 + data2
assert tuple(data[INTERFACE_POWER]) == ((1, 2), (2, 3))
# Check that data has been deep copied
data1[INTERFACE_POWER] = ([4], [5])
assert tuple(data[INTERFACE_POWER]) == ((1, 2), (2, 3))
# Delete original copies (decrease reference count to them)
del data1
del data2
assert tuple(data[INTERFACE_POWER]) == ((1, 2), (2, 3))
# # Check that data has been shallow copied
# data = LoggerData({INTERFACE_POWER: ([1], [2])}), "Incorrect value"
# data1 = data
# del data
# assert data1[INTERFACE_POWER] == ((1, 2),), "Incorrect value"
# Check that data has been deep copied
data = LoggerData({INTERFACE_POWER: ([1], [2])})
data1 = data + {}
del data
assert tuple(data1[INTERFACE_POWER]) == ((1, 2),)
def test_extend():
"""Tests for extend function."""
# Simple extention of objects
data = LoggerData({INTERFACE_POWER: ([1], [2])})
data1 = InterfaceData([[2], [3]])
data.extend(INTERFACE_POWER, data1)
assert tuple(data[INTERFACE_POWER]) == ((1, 2), (2, 3))
def test_length():
"""Tests for length function."""
data = LoggerData({
INTERFACE_POWER: ([1], [2]),
INTERFACE_GPIO: ([], []),
4: ([3, 4], [5, 6])})
# Length of individual interfaces
assert data.length(INTERFACE_POWER) == 1
assert data.length(INTERFACE_GPIO) == 0
assert data.length(4) == 2
# Lengths in dict of interfaces
len_dict = data.length()
assert len_dict[INTERFACE_POWER] == 1
assert len_dict[INTERFACE_GPIO] == 0
assert len_dict[4] == 2
| true
|
b792965305b952795e0aa222c805441a2ba4652e
|
Python
|
zqfd9981/e-book-reader
|
/小说阅读器/typechange/typetobyte.py
|
UTF-8
| 2,467
| 3.09375
| 3
|
[] |
no_license
|
"""
类型转化函数,将'int'等类型封装转化为byte
"""
import socket
import enum
from struct import pack, unpack
from typechange.message_type import MessageType
from binascii import unhexlify
VAR_TYPE_INVERSE = {
'int': 1,
'float': 2,
'str': 3,
'list': 4,
'dict': 5,
'bool': 6,
'bytearray': 7
}
def long_to_bytes(val, endianness='big'):
"""
将数字转化为byte
"""
width = val.bit_length()
width += 8 - ((width % 8) or 8)
fmt = '%%0%dx' % (width // 4)
s = b'\x00' if fmt % val == '0' else unhexlify(fmt % val)
if endianness == 'little':
s = s[::-1]
return s
def _serialize_int(int):
body = long_to_bytes(int)
return bytes([VAR_TYPE_INVERSE['int']]) + pack('!L', len(body)) + body
def _serialize_bool(value):
body = value
return bytes([VAR_TYPE_INVERSE['bool']]) + pack('!L', 1) + bytes([1 if value else 0])
def _serialize_float(float):
body = pack('f', float)
return bytes([VAR_TYPE_INVERSE['float']]) + pack('!L', len(body)) + body
def _serialize_str(str):
body = str.encode()
return bytes([VAR_TYPE_INVERSE['str']]) + pack('!L', len(body)) + body
def _serialize_bytes(body):
return bytes([VAR_TYPE_INVERSE['bytearray']]) + pack('!L', len(body)) + body
def _serialize_list(list):
body = bytearray()
for i in range(0, len(list)):
body += _serialize_any(list[i])
return bytes([VAR_TYPE_INVERSE['list']]) + pack('!L', len(body)) + body
def _serialize_dict(dict):
body = bytearray()
for item_key, value in dict.items():
item_body = _serialize_any(value)
key_length = len(item_key)
body += bytes([key_length])
body += str.encode(item_key)
body += item_body
return bytes([VAR_TYPE_INVERSE['dict']]) + pack('!L', len(body)) + body
_serialize_by_type = [None, _serialize_int, _serialize_float, _serialize_str, _serialize_list, _serialize_dict,
_serialize_bool, _serialize_bytes]
def _serialize_any(obj):
if obj is None:
return bytearray([0])
type_byte = VAR_TYPE_INVERSE[type(obj).__name__] # 首先判断是哪个类型,再调用相应的函数
return _serialize_by_type[type_byte](obj)
def serialize_message(message_type, parameters=None):
"""将message_type和message本身转化为byte合并返回"""
result = bytes([message_type.value])
result += _serialize_any(parameters)
return result
| true
|
fa83e9e29d5407a4de66b6008e89685bdcc310e0
|
Python
|
shaoguangleo/FastImaging-Python
|
/src/fastimgproto/skymodel/helpers.py
|
UTF-8
| 1,352
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""
Basic classes used to help structure data related to skymodel, skyregions, etc.
"""
import astropy.units as u
import attr.validators
from astropy.coordinates import Angle, SkyCoord
from attr import attrib, attrs
@attrs
class SkyRegion(object):
"""
Defines a circular region of the sky.
"""
centre = attrib(validator=attr.validators.instance_of(SkyCoord))
radius = attrib(validator=attr.validators.instance_of(Angle))
@attrs
class PositionError(object):
"""
Represent positional uncertainty.
(Mainly used for representing entries in the SUMSS catalog.)
"""
ra = attrib(validator=attr.validators.instance_of(Angle))
dec = attrib(validator=attr.validators.instance_of(Angle))
@attrs
class SkySource(object):
"""
Basic point source w/ flux modelled at a single frequency
Attributes:
position (astropy.coordinates.SkyCoord): Sky-coordinates of source.
flux (astropy.units.Quantity): Source flux at measured frequency.
frequency (astropy.units.Quantity): Measurement frequency.
variable (bool): 'Known variable' flag.
"""
position = attrib(validator=attr.validators.instance_of(SkyCoord))
flux = attrib(convert=lambda x: x.to(u.Jy))
frequency = attrib(default=2.5 * u.GHz, convert=lambda x: x.to(u.GHz))
variable = attrib(default=False)
| true
|
8e551171e49774f11e372787baef37da5f4082b3
|
Python
|
markWJJ/text_classifier_rl
|
/actor.py
|
UTF-8
| 3,458
| 2.734375
| 3
|
[] |
no_license
|
import tensorflow as tf
import tflearn
import numpy as np
from tensorflow.contrib.rnn import LSTMCell
class ActorNetwork(object):
"""
action network
use the state
sample the action
"""
def __init__(self, sess, dim, optimizer, learning_rate, embeddings):
self.global_step = tf.Variable(0, trainable=False, name="ActorStep")
self.sess = sess
self.dim = dim
self.learning_rate = tf.train.exponential_decay(learning_rate, self.global_step, 10000, 0.95, staircase=True)
self.init = tf.random_uniform_initializer(-0.05, 0.05, dtype=tf.float32)
if optimizer == 'adam':
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.wordvector = embeddings
self.num_other_variables = len(tf.trainable_variables())
self.input_state, self.input_word, self.scaled_out, self.out_state = self.create_actor_network()
self.network_params = tf.trainable_variables()[self.num_other_variables:]
self.action_gradient = tf.placeholder(tf.float32, [None, 2])
self.log_scaled_out = tf.log(self.scaled_out)
self.reward = tf.placeholder(tf.float32, [None, 2])
self.loss = tf.reduce_mean(-self.log_scaled_out * self.reward)
self.optimize = self.optimizer.minimize(self.loss)
def create_actor_network(self):
intpu_state_c = tf.placeholder(tf.float32, shape = [None, self.dim], name="cell_state")
intpu_state_h = tf.placeholder(tf.float32, shape = [None, self.dim], name="cell_state")
input_word = tf.placeholder(tf.int32, shape=[None,])
input_w = tf.nn.embedding_lookup(self.wordvector, input_word)
cell = LSTMCell(self.dim, initializer=self.init)
with tf.variable_scope('Actor/LSTM'):
out, state1 = cell(input_w, (intpu_state_c, intpu_state_h))
t1 = tflearn.fully_connected(state1.c, 1, name='Actor/FullyConnectedC')
t2 = tflearn.fully_connected(state1.h, 1, name='Actor/FullyConnectedH')
t3 = tflearn.fully_connected(input_w, 1, name='Actor/FullyConnectedWord')
scaled_out = tflearn.activation(\
tf.matmul(intpu_state_c, t1.W) + tf.matmul(intpu_state_h, t2.W) \
+ tf.matmul(input_w, t3.W) + t1.b,\
activation = 'sigmoid')
s_out = tf.clip_by_value(scaled_out, 1e-5, 1 - 1e-5)
scaled_out = tf.concat([1.0 - s_out, s_out], axis=1)
input_state = (intpu_state_c, intpu_state_h)
out_state = (state1.c, state1.h)
return input_state, input_word, scaled_out, out_state
def train(self, input_state, input_word, reward):
self.sess.run(self.optimize, feed_dict={
self.input_state: input_state,
self.input_word: input_word,
self.reward: reward})
def predict_target(self, input_state, input_word):
return self.sess.run([self.scaled_out, self.out_state], feed_dict={
self.input_state: input_state,
self.input_word: input_word})
def lower_LSTM_state(self, state, inputs):
"""
state : (state_c, state_h)
"""
state_c, state_h = state
return self.sess.run(self.lower_cell_state1, feed_dict={
self.lower_cell_state: state,
self.lower_cell_input: inputs})
| true
|
5ec3599b8870a676d9bf4595883e12111cda7fa9
|
Python
|
harpninja/boids
|
/main.py
|
UTF-8
| 9,074
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
import maya.cmds as cmds
import random
import math
import imp
v = imp.load_source('v', '/lhome/kristina/Documents/code/maya/boids/vector_class.py') # maya python 2.7 weirdness
width = 100
height = 100
depth = 100 # was 150
class Particle(v.vec3):
'''
Class defining a single particle.
'''
def __init__(self, name):
self.name = name
super(v.vec3, self).__init__() # Complies with Python 2.7 conventions in Maya.
# Python 3+ is super().__init__(0.0, 0.0, 0.0)
self.position = v.vec3(random.uniform(0, width), random.uniform(0, height), random.uniform(0, depth))
self.velocity = v.vec3(math.cos(random.uniform(0.4, 1)), math.sin(random.uniform(0.4, 1)), math.tan(random.uniform(0.4, 1)))
self.acceleration = v.vec3(0.0, 0.0, 0.0)
self.size = 1
self.max_steering_speed = 0.4 * 6
self.max_steering_force = 0.8 * 2 # was 6
self.desired_separation = math.pow(self.size, 2) + 6 # was + 3
self.neighbour_distance = width/2
self.add_geometry()
# self.point_boid()
self.add_shader()
def __repr__(self):
return self.name
def add_shader(self):
'''
Create a random coloured shader.
Apply shader to object geometry.
'''
name_shader = 'aiStandardSurface' + self.name
red = random.uniform(0.0, 0.1)
green = random.uniform(0.0, 1.0)
blue = random.uniform(0.3, 1.0)
cmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=name_shader+'SG')
cmds.shadingNode('aiStandardSurface', asShader=True, name=name_shader)
cmds.setAttr(name_shader+'.baseColor', red, green, blue, type='double3')
cmds.setAttr(name_shader+'.base', 0.85)
cmds.setAttr(name_shader+'.specular', 1)
cmds.setAttr(name_shader+'.emission', 1)
cmds.setAttr(name_shader+'.emissionColor', red, green, blue, type='double3')
cmds.setAttr(name_shader+'.specularColor', red, green, blue, type='double3')
cmds.connectAttr(name_shader+'.outColor', name_shader+'SG.surfaceShader')
cmds.sets(self.name, edit=True, forceElement=name_shader+'SG')
def add_geometry(self):
'''
Create boid geometry.
'''
# cmds.polyPlatonicSolid(n=self.name, solidType=0) # dodecahedron
#cmds.polyCone(n=self.name, sx=24, sy=1, sz=0, ax=[0, -1, 0], rcp=0, cuv=3, ch=1, radius=self.size/2)
cmds.sphere(n=self.name, radius=self.size)
# cmds.polyCube(n=self.name)
def set_key(self, frame):
'''
Set keyframe for boid at frame number.
'''
cmds.select(self.name)
cmds.setKeyframe(self.name, t=frame)
def point_boid(self):
'''
Point boid in the direction it is travelling in Maya scene.
'''
cmds.select(self.name)
degrees_tuple = self.velocity.cosine_direction()
cmds.rotate(degrees_tuple[0], degrees_tuple[1], degrees_tuple[2], absolute=True, componentSpace=True)
def move_boid(self):
'''
Move boid in Maya scene.
'''
cmds.select(self.name)
cmds.move(self.position.x, self.position.y, self.position.z, relative=True)
def move_boid_absolute(self):
'''
Move boid in Maya scene.
'''
cmds.select(self.name)
cmds.move(self.position.x, self.position.y, self.position.z, absolute=True)
def update(self):
'''
Update velocity, position and lifespan for this particle.
'''
self.velocity = self.velocity + self.acceleration
self.position = self.position + self.velocity
self.acceleration = self.acceleration * 0
self.move_boid()
self.point_boid()
def apply_force(self, force):
'''
Add force vector to acceleration vector
@param {float} force
'''
self.acceleration = self.acceleration + force
def flock(self, others):
'''
Apply flocking behaviours.
'''
separation_force = self.separate(others)
separation_force = separation_force * 0.5
self.apply_force(separation_force)
alignment_force = self.align(others)
alignment_force = alignment_force * 0.5
self.apply_force(alignment_force)
cohesion_force = self.cohesion(others)
cohesion_force = cohesion_force * 0.5
self.apply_force(cohesion_force)
def seek(self, target):
'''
Steer particle towards target.
Called by cohesion().
'''
desired = target - self.position # point from position to target
desired = desired.unit_vector()
desired = desired * self.max_steering_speed
steer = desired - self.velocity
steer.limit(self.max_steering_force)
return steer
def separate(self, others):
'''
Separate self from others.
Separation is the average of all the vectors pointing away from any close others.
'''
sum = v.vec3(0, 0, 0)
count = 0
steer = self.velocity
for other in others:
d = self.position.distance(other.position)
if ((d > 0) and (d < self.desired_separation)):
# calculate vector pointing away from other
difference = self.position - other.position
difference = difference.unit_vector()
difference = difference / d # weight by distance. More flee from closer things.
sum = sum + difference # average of all vectors pointing away from close particles.
count += 1
if count > 0:
sum = sum / count
sum = sum.unit_vector()
sum = sum * self.max_steering_speed # go this way!
steer = sum - self.velocity # steering = desired - velocity
steer.limit(self.max_steering_force)
return steer
def align(self, others):
'''
Align self with others.
'''
sum = v.vec3(0, 0, 0)
count = 0
for other in others:
d = self.position.distance(other.position)
if ((d > 0) and (d < self.neighbour_distance)):
sum = sum + other.velocity
count += 1
if count > 0:
sum = sum / count
sum = sum.unit_vector()
sum = sum * self.max_steering_speed # go this way!
steer = sum - self.velocity # steering = desired - velocity
steer.limit(self.max_steering_force)
return steer
else:
return v.vec3(0, 0, 0) # if no close boids then steering force is zero
def cohesion(self, others):
'''
Cohesion of self with others.
'''
sum = v.vec3(0, 0, 0)
count = 0
for other in others:
d = self.position.distance(other.position)
if ((d > 0) and (d < self.neighbour_distance)):
sum = sum + other.position # sum location of others
count += 1
if count > 0:
sum = sum / count
return self.seek(sum)
else:
return v.vec3(0, 0, 0) # if no close boids then cohesion force is zero
def borders(self):
'''
Move particle to wrap around borders of drawing area.
'''
if self.position.x < -self.desired_separation:
self.position.x = width + self.desired_separation
if self.position.y < -self.desired_separation:
self.position.y = height + self.desired_separation
if self.position.z < -self.desired_separation:
self.position.z = depth + self.desired_separation
if self.position.x > width + self.desired_separation:
self.position.x = -self.desired_separation
if self.position.y > height + self.desired_separation:
self.position.y = -self.desired_separation
if self.position.z > depth + self.desired_separation:
self.position.z = -self.desired_separation
self.move_boid_absolute()
self.point_boid()
def borders1(self):
'''
Move particle stay within borders of drawing area.
Not used.
'''
if self.position.x > width or self.position.x < 0:
self.velocity.x = self.velocity.x * -1
if self.position.y > height or self.position.y < 0:
self.velocity.y = self.velocity.y * -1
if self.position.z > depth or self.position.z < 0:
self.velocity.z = self.velocity.z * -1
self.move_boid_absolute()
self.point_boid()
# initialise particle system
boids = []
for a in range(2400):
name = 'cube' + str(a)
obj = Particle(name)
boids.append(obj)
FRAMES = 420
frame = 1
while frame < FRAMES:
print('frame = ', frame)
for boid in boids:
boid.borders()
boid.flock(boids)
boid.update()
boid.set_key(frame)
frame += 1
| true
|
26394d522a287ace3a9741e27d7d487fa638a969
|
Python
|
LokeshKD/MachineLearning
|
/CaseStudy/dataProcessing/integerFeature.py
|
UTF-8
| 389
| 2.78125
| 3
|
[] |
no_license
|
import tensorflow as tf
# Add the integer Feature objects to the feature dictionary
def add_int_features(dataset_row, feature_dict):
# CODE HERE
int_vals = ['Store', 'Dept', 'IsHoliday', 'Size']
for feature_name in int_vals:
list_val = tf.train.Int64List(value=[dataset_row[feature_name]])
feature_dict[feature_name] = tf.train.Feature(int64_list=list_val)
| true
|
fee457acacb1d4721bd7f7beb84e60273f6ba4d9
|
Python
|
shubh4197/Python
|
/PycharmProjects/day3/program8.py
|
UTF-8
| 380
| 2.609375
| 3
|
[] |
no_license
|
with open('demo.txt', 'r') as fo:
data = fo.readline()
data1 = fo.readline()
data2 = fo.readline()
print(data)
print(data1)
print(data2)
fo = open('RA1511004010460 Shubham Das.jpg', 'rb')
i = 0
for line in fo:
fo1 = open('chunks/demo' + str(i) + '.jpg', 'wb')
print(line)
i += 1
fo1.write(line)
fo1.close()
fo.close()
| true
|
35f24a0a9d55f7468b65e803659e7ae2d1cfbe85
|
Python
|
aplocher/rpi-bbmon
|
/WebMonitor/file_writer.py
|
UTF-8
| 213
| 3.484375
| 3
|
[] |
no_license
|
class FileWriter:
def __init__(self, filename):
self._filename = filename
def write(self, text):
file = open(self._filename, "w")
file.write(text +"\n")
file.close()
| true
|
91e8de234eaed083d081eb26bd60dedf25b413f3
|
Python
|
AbiramiRavichandran/DataStructures
|
/Tree/LeafTraversalOfTwoBST.py
|
UTF-8
| 1,410
| 3.828125
| 4
|
[] |
no_license
|
class Node:
def __init__(self, data):
self.data = data
self.left = self.right = None
def is_leaf(self):
return self.left is None and self.right is None
def has_same_leaf_traversal(t1, t2):
q1 = []
q2 = []
q1.append(t1)
q2.append(t2)
while q1 or q2:
if not q1 or not q2:
return False
temp1 = q1.pop(0)
while temp1 and not temp1.is_leaf():
if temp1.left:
q1.append(temp1.left)
if temp1.right:
q1.append(temp1.right)
temp1 = q1.pop(0)
temp2 = q2.pop(0)
while temp2 and not temp2.is_leaf():
if temp2.left:
q2.append(temp2.left)
if temp2.right:
q2.append(temp2.right)
temp2 = q2.pop(0)
if (temp1 is None and temp2) or (temp2 is None and temp1):
return False
if temp1.data != temp2.data:
return False
return True
if __name__ == '__main__':
root1 = Node(1)
root1.left = Node(2)
root1.right = Node(3)
root1.left.left = Node(4)
root1.right.left = Node(6)
root1.right.right = Node(7)
root2 = Node(0)
root2.left = Node(1)
root2.right = Node(5)
root2.left.right = Node(4)
root2.right.left = Node(6)
root2.right.right = Node(7)
print(has_same_leaf_traversal(root1, root2))
| true
|
448e30095ea4fa023a3091d5956d47fe8b81a18a
|
Python
|
LitRidl/checker-content
|
/cont11lab/problems/20/solution.py
|
UTF-8
| 772
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
u'''
перевести все мерные расстояние из миль(mi) в киллометры(km). Например 1000mi -> 1609km
28147326av 100mi 205mi\nami in 0mi 5MI man 1000mi
'''
from __future__ import print_function
from sys import stdin, stdout
from string import *
def cs(w):
return str(int(round(1.609 * int(w[:-2], base=10)))) + 'km'
def work_ok(w):
if w[0] in '-+':
w = w[1:]
return len(w) > 2 and w[-1] == 'i' and w[-2] =="m" and all(c in digits for c in w[:-2])
for line in stdin:
words = [w for w in line.strip().split() if len(w.strip()) > 0]
words = [w for w in words if work_ok(w)]
if len(words) > 0:
print(' '.join(str(cs(word)) for word in words))
| true
|
24301f4022d78ff3e78643c1776a0820c01c8777
|
Python
|
MsSusie/PythonProjects
|
/fillingInTheGaps.py
|
UTF-8
| 2,161
| 3.53125
| 4
|
[] |
no_license
|
# python 3
# program that finds all files with a given prefix eg. spam001.txt, spam002.txt, spam004.txt etc and
# locates any gaps in the numbering eg. missing spam003.txt when the file jumps to spam004.txt
# program should rename all the later files to close the gaps
import re, os, glob, shutil
from pathlib import Path
while True:
userPathInput = input("Where is the file path that you want to check? ")
filePath = os.chdir(userPathInput)
p = Path(userPathInput)
pathExists = os.path.exists(p)
if not pathExists:
print("That file path does not exist. Try again.")
print('')
continue
else:
for folders, subfolders, files in sorted(os.walk(p)): # traverse each file in folder and reads filename
for file in files:
fileRegex = re.compile(r'(\w{1,})(\d{1,})(.\w+)')
fileMO = fileRegex.search(file)
if fileMO == None:
print("Not a match.")
else:
print(fileMO.group())
prefix = fileMO.group(1) # if file 'eggs001.txt' then prefix = eggs00
numberInFile = int(fileMO.group(2)) # if file 'eggs001.txt' then numberInFile = 1
suffix = fileMO.group(3) # extension of file
counter = numberInFile # use this to check against the file number pulled from fileMO.group(2) to check for gaps, starts at whatever the smallest file number is
for i in range(len(glob.glob('*[0-9].txt'))):
if numberInFile != counter: # finds the gap in the numbering by comparing file number against counter
renamedFile = prefix + str(counter) + suffix # rename the file by putting together the pieces
if not os.path.exists(file):
print(renamedFile) # test to see if it renamed correctly
shutil.move(os.path.abspath(file), os.path.abspath(renamedFile))
counter += 1
break
| true
|
c6a2f52900752d7ce222943ef9fda075a0c5f780
|
Python
|
june2413/python
|
/EMPService.py
|
UTF-8
| 1,035
| 3.75
| 4
|
[] |
no_license
|
from EMP import EMP
from datetime import datetime
class EMPService:
# 객체 생성없이 바로 사용가능한 static method로 선언
@staticmethod
def readEmp(): # 사원번호, 이름, 성, 이메일, 전화번호, 입사일 등 입력
empno = input("사원번호를 입력하여 주십시오 : ")
fname = input("이름을 입력하여 주십시오 : ")
lname = input("성을 입력하여 주십시오 : ")
email = input("이메일을 입력하여 주십시오 : ")
phone = input("전화번호를 입력하여 주십시오 : ")
hdate = input("입사일을 입력하여 주십시오 : ")
return EMP(empno, fname, lname, email, phone, hdate)
@staticmethod
def computeDuty(emp: EMP): # 입사일 기준 근무일 계산
hdate = emp.hdate.split('-')
now = datetime.now()
hire = datetime(int(hdate[0]), int(hdate[1]), int(hdate[2])) # 연, 월, 일
days = now - hire
print(str(days).split()[0], '일')
| true
|
348a94152a810a80eb68776ab5ceaf0b228f5afb
|
Python
|
olive1618/csss17_multilayer_networks
|
/MultiTensor_Pkg/AUC.py
|
UTF-8
| 1,303
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
def calculate_AUC(M,Pos,Neg):
# M= # SORTED (from small to big) List of 2-tuple, each entry is M[n]=(mu_ij,A_ij)
# Pos= # positive entries (graph edges)
# Neg= # negative entries (graph non-edges)
y=0.;bad=0.;
for m,a,n in M:
if(a>=1.):
y+=1;
else:
bad+=y;
AUC=1.-(bad/(Pos*Neg));
return AUC;
def calculate_AUC_weighted(M,max_w):
""""
AUC for weighted entries A_ij. Ideally, if an observed edge has A_ij=4, it will have a bigger M entry than an observed edge with A_ij<4
- M= # SORTED (from small to big) List of 2-tuple, each entry is M[n]=(mu_ij,A_ij)
- m_k= multiplicity of A_ij=k entries
- max_w is the maximum edge weigth, needed to order the y[k]
"""
bad=0.
y=np.zeros(max_w+1)
for m, a in M:
y[a] += 1 # number of entries with A_ij=a already encountered
for q in range(a+1,max_w+1):
# number of entries with A_ij>a already encountered --> penalty
bad += y[q]
# calculate denominator --> normalization
Z = 0
for k in range(max_w+1):
m = 0.
for q in range(k+1, max_w+1):
m += y[q]
Z += m*y[k]
AUC = 1.-(bad/float(Z))
return AUC;
| true
|
fd5900793e66d605b11225e795b65ffc67c10232
|
Python
|
yogii1981/Fullspeedpythoneducative1
|
/test3.py
|
UTF-8
| 505
| 4.59375
| 5
|
[] |
no_license
|
# Given an inRange(x,y) function, write a method that determine whether a pair (x,y) falls in
# the range ( x < 1/3 < y)/ Essentially you will be implementing the body aof a function that takes two numbers
# x and y and returns True if x <1/3 < y ; otherwise it returns False.
x = int(input("Enter a value:"))
y = int(input("Enter a value"))
def inrange(x, y):
if x < 1 / 3 < y:
print(True)
else:
print(False)
return True if x < 1 / 3 < y else False
print(inrange(x, y))
| true
|
57054a5a1c7b1ec450f2ccb0daf2f0bbe6ac48b9
|
Python
|
phny/python-lab
|
/random_walk.py
|
UTF-8
| 1,027
| 3.8125
| 4
|
[] |
no_license
|
#!/usr/bin/env python3.5
from random import choice
import matplotlib.pyplot as plt
class RandomWalk():
'''
一个生成随机漫步数据的类
'''
def __init__(self, num_points = 5000):
'''初始化随机漫步属性 '''
self.num_points = num_points
#所有的随机漫步都始于(0, 0)
self.x_values = [0]
self.y_values = [0]
def fill_walk(self):
while len(self.x_values) < self.num_points:
x_direction = choice([1, -1])
x_distance = choice([0, 1, 2, 3, 4])
x_step = x_direction * x_distance
y_direction = choice([1, -1])
y_distance = choice([0, 1, 2, 3, 4])
y_step = y_direction * y_distance
#拒绝原地踏步
if x_step == 0 and y_step == 0:
continue
#计算下一个x 和 y的值
next_x = self.x_values[-1] + x_step
next_y = self.y_values[-1] + y_step
self.x_values.append(next_x)
self.y_values.append(next_y)
if __name__ == '__main__':
rw = RandomWalk()
rw.fill_walk()
plt.scatter(rw.x_values, rw.y_values, s = 15)
plt.show()
| true
|
f6cf889773ce23c267394868bae65f3bb61e4ec2
|
Python
|
HanQQ/KNN-Test
|
/knn.py
|
UTF-8
| 3,353
| 2.84375
| 3
|
[] |
no_license
|
#-*-coding:utf-8-*-
__author__ = 'Qiao'
from numpy import *
import operator
class knn:
#初始化:
def __init__(self,Filename,Inx,K,Filetest):
#training数据集:
self.filetrain=Filename
#待检测数据:
self.inX=Inx
self.k=K
#test数据集:
self.filetest=Filetest
#对training数据进行文本分析并返回矩阵:
def file2matrix(self,filetrain):
try:
fr = open(self.filetrain)
except:
filetrain=raw_input("错误.输入training数据集")
fr=open(filetrain)
numberOfLines = len(fr.readlines())
dataSet = zeros((numberOfLines,3))
labels = []
index = 0
for line in fr.readlines():
line = line.strip()
listFromLine = line.split('\t')
dataSet[index,:] = listFromLine[0:3]
labels.append(int(listFromLine[-1]))
index += 1
return dataSet,labels,numberOfLines
#归一化数据:
def autoNorm(self,dataSet):
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet=zeros()
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals, (m,1))
normDataSet = normDataSet/tile(ranges, (m,1))
return normDataSet, ranges, minVals
#分类:
def classify0(self, normdataSet, labels):
#计算距离并距离排序:
dataSetSize = normdataSet.shape[0]
norminx=self.autoNorm(self,self.inX)
diffMat = tile(norminx, (dataSetSize,1)) - normdataSet
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort()
#选择与待分类数据最近k个的数据点并统计其类型:
classCount={}
try:
k=int(self.k)
except:
k=raw_input("错误。输入k值")
k=int(k)
for i in range(self.k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
#确定分类:
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
#算法检测:
def datingClassTest(self):
hoRatio = 0.50
datingDataMat,datingLabels =self.file2matrix(self.filetest)
normMat, ranges, minVals = self.autoNorm(datingDataMat)
m = normMat.shape[0]
numTestVecs = int(m*hoRatio)
errorCount = 0.0
for i in range(numTestVecs):
classifierResult = self.classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
print "分类结果为: %d, 正确结果为: %d" % (classifierResult, datingLabels[i])
if (classifierResult != datingLabels[i]): errorCount += 1.0
print "错误率为: %f" % (errorCount/float(numTestVecs))
print errorCount
print "k均值聚类学习"
Filename=raw_input("输入training数据集")
Filetest=raw_input("输入test数据集")
#可以用graphlib将一个数据集进行确定比例的随机分割
K=raw_input("输入k值")
Inx=raw_input("输入待分类数据")
KNN=knn(Filename,Inx,K,Filetest)
KNN.start()
| true
|
bcd04a88f6af6cece9c278b4bfab0676c59f2c39
|
Python
|
Shuravin/python_practice
|
/W3Resources/Strings/15.py
|
UTF-8
| 503
| 4.59375
| 5
|
[] |
no_license
|
# 15. Write a Python function to create the HTML string with tags around the word(s). Go to the editor
# Sample function and result :
# add_tags('i', 'Python') -> '<i>Python</i>'
# add_tags('b', 'Python Tutorial') -> '<b>Python Tutorial </b>'
s = input("Type your sentence: ")
tag = input(
"In which tag do you whant to wrap your sentence? Type without brackets (eg a, b, p): ")
def add_tags(attr="p", string="Python"):
print("<" + attr + ">" + string + "</" + attr + ">")
add_tags(tag, s)
| true
|
c141f0d56d04e6c960e6de44e20ae8f7f46a8be0
|
Python
|
NoraXie/LeetCode_Archiver
|
/LeetCode_Archiver/pipelines.py
|
UTF-8
| 1,178
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import time
from LeetCode_Archiver.LocalFile import LocalFile
from LeetCode_Archiver.Statistic import Statistic
class QuestionDataPipeline(object):
def open_spider(self, spider):
self.data_set = []
self.language_set = set()
# self.start = time.time()
def process_item(self, item, spider):
data = dict(item)
data["id"] = int(data["id"])
if len(data["submission_list"]) == 0:
return
self.data_set.append(data)
for language in data["submission_list"].keys():
if language not in self.language_set:
self.language_set.add(language)
def close_spider(self, spider):
print("Generating local files")
LocalFile(self.data_set, self.language_set)
print("Generating statistical figures")
Statistic(self.data_set)
print("Done!")
# self.end = time.time()
# print("Total time: " + str((self.end - self.start) // 1000) + "s")
| true
|
84c97d77825d990da42eee6bf3c1a49e71c9dc62
|
Python
|
FalseG0d/gecko-dev
|
/third_party/rust/jsparagus/jsparagus/actions.py
|
UTF-8
| 10,213
| 3.0625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LLVM-exception",
"Apache-2.0"
] |
permissive
|
from .ordered import OrderedFrozenSet
from .grammar import InitNt, Nt
class Action:
__slots__ = [
"read", # Set of trait names which are consumed by this action.
"write", # Set of trait names which are mutated by this action.
"_hash", # Cached hash.
]
def __init__(self, read, write):
assert isinstance(read, list)
assert isinstance(write, list)
self.read = read
self.write = write
self._hash = None
def is_inconsistent(self):
"""Returns True whether this action is inconsistent. An action can be
inconsistent if the parameters it is given cannot be evaluated given
its current location in the parse table. Such as CheckNotOnNewLine.
"""
return False
def is_condition(self):
"Unordered condition, which accept or not to reach the next state."
return False
def condition(self):
"Return the conditional action."
raise TypeError("Action::condition_flag not implemented")
def update_stack(self):
"""Change the parser stack, and resume at a different location. If this function
is defined, then the function reduce_with should be implemented."""
return False
def reduce_with(self):
"Returns the non-terminal with which this action is reducing with."
assert self.update_stack()
raise TypeError("Action::reduce_to not implemented.")
def shifted_action(self, shifted_term):
"Returns the same action shifted by a given amount."
return self
def maybe_add(self, other):
"""Implement the fact of concatenating actions into a new action which can have
a single state instead of multiple states which are following each others."""
actions = []
if isinstance(self, Seq):
actions.extend(list(self.actions))
else:
actions.append(self)
if isinstance(other, Seq):
actions.extend(list(other.actions))
else:
actions.append(other)
if any([a.is_condition() for a in actions]):
return None
if any([a.update_stack() for a in actions[:-1]]):
return None
return Seq(actions)
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
if sorted(self.read) != sorted(other.read):
return False
if sorted(self.write) != sorted(other.write):
return False
for s in self.__slots__:
if getattr(self, s) != getattr(other, s):
return False
return True
def __hash__(self):
if self._hash is not None:
return self._hash
def hashed_content():
yield self.__class__
yield "rd"
for alias in self.read:
yield alias
yield "wd"
for alias in self.write:
yield alias
for s in self.__slots__:
yield repr(getattr(self, s))
self._hash = hash(tuple(hashed_content()))
return self._hash
def __lt__(self, other):
return hash(self) < hash(other)
def __repr__(self):
return str(self)
class Reduce(Action):
"""Define a reduce operation which pops N elements of he stack and pushes one
non-terminal. The replay attribute of a reduce action corresponds to the
number of stack elements which would have to be popped and pushed again
using the parser table after reducing this operation. """
__slots__ = 'nt', 'replay', 'pop'
def __init__(self, nt, pop, replay = 0):
name = nt.name
if isinstance(name, InitNt):
name = "Start_" + name.goal.name
super().__init__([], ["nt_" + name])
self.nt = nt # Non-terminal which is reduced
self.pop = pop # Number of stack elements which should be replayed.
self.replay = replay # List of terms to shift back
def __str__(self):
return "Reduce({}, {}, {})".format(self.nt, self.pop, self.replay)
def update_stack(self):
return True
def reduce_with(self):
return self
def shifted_action(self, shifted_term):
return Reduce(self.nt, self.pop, replay = self.replay + 1)
class Lookahead(Action):
"""Define a Lookahead assertion which is meant to either accept or reject
sequences of terminal/non-terminals sequences."""
__slots__ = 'terms', 'accept'
def __init__(self, terms, accept):
assert isinstance(terms, (OrderedFrozenSet, frozenset))
assert all(not isinstance(t, Nt) for t in terms)
assert isinstance(accept, bool)
super().__init__([], [])
self.terms = terms
self.accept = accept
def is_inconsistent(self):
# A lookahead restriction cannot be encoded in code, it has to be
# solved using fix_with_lookahead.
return True
def is_condition(self):
return True
def condition(self):
return self
def __str__(self):
return "Lookahead({}, {})".format(self.terms, self.accept)
def shifted_action(self, shifted_term):
if isinstance(shifted_term, Nt):
return True
if shifted_term in self.terms:
return self.accept
return not self.accept
class CheckNotOnNewLine(Action):
"""Check whether the terminal at the given stack offset is on a new line or
not. If not this would produce an Error, otherwise this rule would be
shifted."""
__slots__ = 'offset',
def __init__(self, offset = 0):
# assert offset >= -1 and "Smaller offsets are not supported on all backends."
super().__init__([], [])
self.offset = offset
def is_inconsistent(self):
# We can only look at stacked terminals. Having an offset of 0 implies
# that we are looking for the next terminal, which is not yet shifted.
# Therefore this action is inconsistent as long as the terminal is not
# on the stack.
return self.offset >= 0
def is_condition(self):
return True
def condition(self):
return self
def shifted_action(self, shifted_term):
if isinstance(shifted_term, Nt):
return True
return CheckNotOnNewLine(self.offset - 1)
def __str__(self):
return "CheckNotOnNewLine({})".format(self.offset)
class FilterFlag(Action):
"""Define a filter which check for one value of the flag, and continue to the
next state if the top of the flag stack matches the expected value."""
__slots__ = 'flag', 'value'
def __init__(self, flag, value):
super().__init__(["flag_" + flag], [])
self.flag = flag
self.value = value
def is_condition(self):
return True
def condition(self):
return self
def __str__(self):
return "FilterFlag({}, {})".format(self.flag, self.value)
class PushFlag(Action):
"""Define an action which pushes a value on a stack dedicated to the flag. This
other stack correspond to another parse stack which live next to the
default state machine and is popped by PopFlag, as-if this was another
reduce action. This is particularly useful to raise the parse table from a
LR(0) to an LR(k) without needing as much state duplications."""
__slots__ = 'flag', 'value'
def __init__(self, flag, value):
super().__init__([], ["flag_" + flag])
self.flag = flag
self.value = value
def __str__(self):
return "PushFlag({}, {})".format(self.flag, self.value)
class PopFlag(Action):
"""Define an action which pops a flag from the flag bit stack."""
__slots__ = 'flag',
def __init__(self, flag):
super().__init__(["flag_" + flag], ["flag_" + flag])
self.flag = flag
def __str__(self):
return "PopFlag({})".format(self.flag)
class FunCall(Action):
"""Define a call method operation which reads N elements of he stack and
pushpathne non-terminal. The replay attribute of a reduce action correspond
to the number of stack elements which would have to be popped and pushed
again using the parser table after reducing this operation. """
__slots__ = 'method', 'offset', 'args', 'set_to'
def __init__(self, method, alias_read, alias_write, args, set_to = None, offset = 0):
super().__init__(alias_read, alias_write)
self.method = method # Method and argument to be read for calling it.
self.offset = offset # Offset to add to each argument offset.
self.args = args # Tuple of arguments offsets.
self.set_to = set_to # Temporary variable name to set with the result.
def __str__(self):
return "FunCall({}, {}, {}, {})".format(self.method, self.offset, self.args, self.set_to)
def shifted_action(self, shifted_term):
return FunCall(self.method, self.read, self.write, self.args, self.set_to, offset = self.offset + 1)
class Seq(Action):
"""Aggregate multiple actions in one statement. Note, that the aggregated
actions should not contain any condition or action which are mutating the
state. Only the last action aggregated can update the parser stack"""
__slots__ = 'actions',
def __init__(self, actions):
assert isinstance(actions, list)
read = [ rd for a in actions for rd in a.read ]
write = [ wr for a in actions for wr in a.write ]
super().__init__(read, write)
self.actions = tuple(actions) # Ordered list of actions to execute.
assert all([not a.is_condition() for a in actions[1:]])
assert all([not a.update_stack() for a in actions[:-1]])
def __str__(self):
return "Seq({})".format(repr(self.actions))
def is_condition(self):
return self.actions[0].is_condition()
def condition(self):
return self.actions[0]
def update_stack(self):
return self.actions[-1].update_stack()
def reduce_with(self):
return self.actions[-1].reduce_with()
def shifted_action(self, shift):
actions = list(map(lambda a: a.shifted_action(shift), self.actions))
return Seq(actions)
| true
|
9f184d4510aa25b6751e1a9ad0c0533c00ca5d42
|
Python
|
Rossel/Solve_250_Coding_Challenges
|
/chal111.py
|
UTF-8
| 111
| 3.484375
| 3
|
[] |
no_license
|
x = "The days of Python 2 are almost over. Python 3 is the king now."
if len(x) >= 50:
print("True!")
| true
|
696126e59b5036483badcc22bd49c7aa97bd2a34
|
Python
|
JetBrains-Research/similar-python-dependencies
|
/src/preprocessing/preprocess.py
|
UTF-8
| 4,357
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
from collections import Counter, defaultdict
from operator import itemgetter
import os
from typing import Dict, List, Optional
import numpy as np
import requirements
def process_sources() -> None:
"""
Parses through all the versious of requirements files, saves the processed version
into a single file.
:return: None
"""
dataset = []
# Get the list of all the projects.
with open("sources/requirements_history/download.txt") as fin:
for line in fin:
data = line.rstrip().split("/")
dataset.append((data[0], data[1]))
dataset = sorted(dataset, key=itemgetter(0, 1))
# Iterate over all the versions from the old towards the new.
with open("processed/requirements_history.txt", "w+") as fout:
for date in ["2011-11-22", "2012-11-21", "2013-11-21", "2014-11-21", "2015-11-21",
"2016-11-20", "2017-11-20", "2018-11-20", "2019-11-20", "2020-11-19"]:
# Iterate over all the projects.
for repo in dataset:
path = f"sources/requirements_history/{date}/{repo[0]}_{repo[1]}.txt"
if os.path.exists(path):
with open(path) as fin_req:
try:
reqs_list = []
# Parse the `requirements.txt` and save all the dependencies
# in the form "reqirement:1" (remained from topic modelling)
reqs = requirements.parse(fin_req)
for req in reqs:
reqs_list.append(req.name)
if len(reqs_list) != 0:
fout.write(
f"{repo[0]}_{repo[1]}/{date};{','.join([req + ':1' for req in reqs_list])}\n")
except:
continue
def read_dependencies() -> Dict[str, List[str]]:
"""
Read the file with the dependencies and return a dictionary with repos as keys
and lists of their dependencies as values. Automatically considers the lower- and upppercase,
and replaces "-" with "_".
:return: dictionary {repo: [dependencies], ...}
"""
reqs = {}
with open(f"processed/requirements_history.txt") as fin:
for line in fin:
data = line.rstrip().split(";")
reqs[data[0]] = [req.split(":")[0].lower().replace("-", "_") for req in
data[1].split(",")]
return reqs
def read_idfs() -> Dict[str, float]:
idfs = {}
with open(f"models/idfs.txt") as fin:
for line in fin:
data = line.rstrip().split(";")
idfs[data[0]] = float(data[1])
return idfs
def read_repo_list() -> np.ndarray:
repos_list = []
with open("models/repos_list.txt") as fin:
for line in fin:
repos_list.append(line.rstrip())
repos_list = np.array(repos_list)
return repos_list
def read_library_names() -> List[str]:
return [line.strip() for line in open("models/libraries_embeddings_dependencies.txt")]
def read_library_embeddings() -> np.ndarray:
return np.load('models/libraries_embeddings.npy')
def years_requirements() -> None:
"""
Save the most popular requirement for each year separately.
:return: None.
"""
reqs = read_dependencies()
# Compile a dictionary {year: [depenencies]}
years = defaultdict(list)
for repo in reqs:
year = repo.split("/")[1]
years[year].extend(reqs[repo])
# Transform the lists into the Counters and print them.
for year in years:
years[year] = [x[0] for x in sorted(Counter(years[year]).items(),
key=itemgetter(1, 0), reverse=True)]
with open("dynamics/years.txt", "w+") as fout:
for year in years:
fout.write(f"{year};{','.join(years[year])}\n")
def project_to_library_embedding(dependencies: List[str]) -> Optional[np.ndarray]:
known_libraries = read_library_names()
library_embeddings = read_library_embeddings()
inds = [known_libraries.index(dep) for dep in dependencies if dep in known_libraries]
if len(inds) == 0:
return None
embedding = library_embeddings[inds].mean(axis=0)
return embedding / np.linalg.norm(embedding)
| true
|
b3b0b8bb76c3c958628e771b2bfec4cb3c5499a9
|
Python
|
blodyx/AdventOfCode2020
|
/3_extra.py
|
UTF-8
| 342
| 2.6875
| 3
|
[] |
no_license
|
lines = open("input/3.input", "r").readlines()
tests=[]
maxx=len(lines)
maxy=len(lines[0])-1
for cx in range(10,19):
for cy in range(10,19):
tree=x=y=0
while x<maxx:
if lines[x][y] == '#':
tree+=1
x+=cx
y+=cy
if y>=maxy:
y=y-maxy
tests.append([cx,cy,tree])
print tests
| true
|
f1b8a4d0f66502a2ca94cc20b1cf9e0043ee2ca4
|
Python
|
LoneElk/Infrared-IPS
|
/test/test_building_model.py
|
UTF-8
| 12,022
| 2.5625
| 3
|
[] |
no_license
|
__author__ = 'jim'
import unittest
import sys
import json
import logging
import globals.building_model as bm
from globals.global_constants import *
def load_map(file_name):
"""load map for specified room"""
map_data = {}
try:
with open(file_name,'r') as fs:
map_data = json.load(fs)
except IOError as e:
logging.error("I/O error({0}): {1}".format(e.errno, e.strerror))
except:
logging.error("Unexpected error: %s - %s",str(sys.exc_info()[0]), str(sys.exc_info()[1]))
return map_data
class SurfaceUnitTests(unittest.TestCase):
def test_surface_attributes(self):
self.surface = bm.Surface('test', [],False)
self.assertEquals(self.surface.surface_type,'test')
self.assertItemsEqual(self.surface.linearring, [])
self.surface = bm.Floor([])
self.assertEquals(self.surface.surface_type,SurfaceType.floor)
self.assertItemsEqual(self.surface.linearring, [])
self.surface = bm.Wall([])
self.assertEquals(self.surface.surface_type,SurfaceType.wall)
self.assertItemsEqual(self.surface.linearring, [])
self.assertTrue(self.surface.polygon is None)
#Single Polygon no interior
poly = [[[0,0],[0,1],[1,1],[1,0]]]
self.surface = bm.Wall([poly])
self.assertEquals(self.surface.surface_type,SurfaceType.wall)
self.assertItemsEqual(self.surface.linearring, [poly])
#Multiple Polygons
poly2 = [[[0,0],[0,2],[2,2]]]
self.surface = bm.Wall([poly, poly2])
self.assertEquals(self.surface.surface_type,SurfaceType.wall)
self.assertItemsEqual(self.surface.linearring, [poly,poly2])
self.assertItemsEqual(self.surface.polygon.exterior.coords[:],
[(0.0, 0.0), (0.0, 1.0), (0.0, 2.0), (2.0, 2.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)])
class ContainerTests(unittest.TestCase):
def setUp(self):
self.container_geojson_obj = {"type": "FeatureCollection",
"features": [
{"geometry": {"type": "Polygon",
"coordinates": [
[[539, 2], [282, 1],
[266, 120], [252, 94],
[125, 96], [117, 13],
[0, 14], [1, 301],
[124, 300], [127, 231],
[540, 233]]]},
"type": "Feature",
"properties": {"geomType": "floor",
"accessible": True,
"colour": [79, 233, 252],
"name": "hall",
"level": 0},
"bbox": [0, 0, 542, 303]}, {
"geometry": {"type": "MultiPolygon",
"coordinates": [
[[
[0, 0],
[0, 35],
[1, 35],
[3, 118],
[18, 118],
[18, 34],
[97, 33],
[101, 117],
[297, 114],
[304, 18],
[515, 22],
[516, 113],
[542, 121],
[542, 0],
[0, 0]
]],
[[
[100, 303],
[125, 303],
[125, 236],
[394, 232],
[394, 207],
[101, 207],
[99, 277],
[21, 276],
[21, 209],
[4, 209],
[2, 293],
[101, 299],
[100, 303]
]],
[[
[542, 233],
[542, 194],
[483, 209],
[483, 232],
[542, 233]
]]
]},
"type": "Feature",
"properties": {"geomType": "wall",
"accessible": False,
"colour": [0, 0, 0],
"name": "walls",
"level": 0},
"bbox": [0, 0, 542, 303]}],
"properties": {
"srcImage": "/Users/jim/Dropbox/Documents/Msc/Thesis/"
"A4/Infrared-IPS/location_rules_engine/"
"utilities/floor_plan/floorplan1.jpg",
"scale": 100, "originalBBox": [617, 16, 1159, 319],
"name": "hall", "imageSize": [303, 542, 3],
'position': {'y': 16, 'x': 721, 'referenceFrame': 'level', 'heading': 0}}}
self.container_geojson_obj2 = {"type": "FeatureCollection", "features": [
{"geometry": {"type": "Polygon", "coordinates": [[[1, 2], [2, 450], [525, 451], [526, 4]]]},
"type": "Feature",
"properties": {"geomType": "floor", "accessible": True, "colour": [186, 187, 248], "name": "livingRoom",
"level": 0}, "bbox": [0, 0, 531, 452]}, {"geometry": {"type": "Polygon", "coordinates": [
[[0, 0], [0, 450], [517, 450], [530, 437], [530, 11], [517, 0], [379, 0], [379, 22], [505, 25],
[505, 425], [21, 422], [21, 26], [290, 22], [290, 0], [0, 0]]]}, "type": "Feature",
"properties": {"geomType": "wall",
"accessible": False,
"colour": [0, 0, 0],
"name": "walls", "level": 0},
"bbox": [0, 0, 542, 303]}], "properties": {
"srcImage": "/Users/jim/Dropbox/Documents/Msc/Thesis/A4/Infrared-IPS/location_rules_engine/utilities/floor_plan/floorplan1.jpg",
"scale": 100, "originalBBox": [721, 226, 1252, 678], "name": "livingRoom",
"imageSize": [452, 531, 3],
'position': {'y': 16, 'x': 721, 'referenceFrame': 'level', 'heading': 0}
}}
self.floor_plan_geojson_file = '/Users/jim/Dropbox/Documents/Msc/Thesis/A4/Infrared-IPS/configuration/floorplan1.json'
def test_container_attributes(self):
# Multipolygon
self.container = bm.Container(ContainerType.room, self.container_geojson_obj)
self.assertEqual(self.container.name,self.container_geojson_obj['properties']['name'])
self.assertEquals(self.container.container_type, ContainerType.room)
self.assertEquals(self.container.container_geojson_obj, self.container_geojson_obj)
self.assertItemsEqual(self.container.floor_linearring,
[self.container_geojson_obj['features'][0]['geometry']['coordinates']])
self.assertItemsEqual(self.container.wall_linearring,
self.container_geojson_obj['features'][1]['geometry']['coordinates'])
self.assertItemsEqual(self.container.floor.polygon.exterior.coords[:],
[(539.0, 2.0), (282.0, 1.0), (266.0, 120.0), (252.0, 94.0), (125.0, 96.0),
(117.0, 13.0), (0.0, 14.0), (1.0, 301.0),
(124.0, 300.0), (127.0, 231.0), (540.0, 233.0), (539.0, 2.0)])
#Polygon
self.container = bm.Container(ContainerType.room, self.container_geojson_obj2)
self.assertEqual(self.container.name,self.container_geojson_obj2['properties']['name'])
self.assertItemsEqual(self.container.floor_linearring,
[self.container_geojson_obj2['features'][0]['geometry']['coordinates']])
self.assertItemsEqual(self.container.wall_linearring,
[self.container_geojson_obj2['features'][1]['geometry']['coordinates']])
self.assertItemsEqual(self.container.floor.polygon.exterior.coords[:],
[(1.0, 2.0), (2.0, 450.0), (525.0, 451.0), (526.0, 4.0), (1.0, 2.0)])
#Floor Plan
self.container = bm.Container(ContainerType.room, load_map(self.floor_plan_geojson_file))
# print self.container.floor.polygon.exterior.coords[:]
# print self.container.walls.polygon.exterior.coords[:]
# print self.container.usable_area.polygon
def test_room_attributes(self):
self.room = bm.Room(self.container_geojson_obj)
self.assertEqual(self.room.name, self.container_geojson_obj['properties']['name'])
self.assertEquals(self.room.container_type, ContainerType.room)
self.assertEquals(self.room.container_geojson_obj, self.container_geojson_obj)
self.room = bm.Room(load_map(self.floor_plan_geojson_file))
def test_level_attributes(self):
self.level = bm.Level(self.container_geojson_obj)
self.level = bm.Level(load_map(self.floor_plan_geojson_file))
if __name__ == '__main__':
unittest.main()
| true
|
203eb553422fa0c063e352723b4c5ebc45f68e5a
|
Python
|
mattmar2410/spec_analysis
|
/spec_analysis/spec_analysis/fix_spe.py
|
UTF-8
| 877
| 2.609375
| 3
|
[] |
no_license
|
import os
from copy import deepcopy
import numpy as np
def fix_spe(fname, fname_new=None):
'''
fix_spe_zero_cal(fname, fname_new=None) creates a new file name for an uncalibrated
gammavision spectrum
fname: the spectrum that needs to be calibrated
'''
if fname_new is None:
fname_new = '{}_fixed{}'.format(*os.path.splitext(fname))
with open(fname, 'r') as infile:
old_lines = [l.rstrip('\n') for l in infile]
new_lines = deepcopy(old_lines)
for i, l in enumerate(old_lines):
if l.startswith('$MCA_CAL'):
cal = [float(x) for x in old_lines[i + 2].split()]
if np.isclose(sum(cal), 0.):
new_lines[i + 2] = '1.0 1.0 1.0'
print('Fixed line:', i + 2)
with open(fname_new, 'w') as infile:
infile.write('\n'.join(new_lines) + '\n')
return fname_new
| true
|
f67efe29b21082a949d708bf9381ad412bf30872
|
Python
|
marcvifi10/Curso-Python
|
/Python 1/1 - Introducion/10 - Salida de datos/Salida_de_datos.py
|
UTF-8
| 180
| 4.125
| 4
|
[] |
no_license
|
# Salidas
nombre = "Marc"
edad = 22
print("Hola",nombre,"tienes",edad,"años.")
print("Hola {} tienes {} años.".format(nombre,edad))
print(f"Hola {nombre} tienes {edad} años.")
| true
|
e6f9cf4f0f688084342266687a8b185983d6631b
|
Python
|
FKomendah/PYTHON_TIZI
|
/second_assignment/dirb_look_alike_with_exception.py
|
UTF-8
| 582
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/python
import requests as req
import sys
def statuschecker(url,path):
'''comment here'''
r=req.get(url+path)
code=r.status_code
return code
path_file=open("/usr/share/dirb/wordlists/common.txt","r")
try:
if len(sys.argv)<2:
print "Usage: %s http://target.url"%sys.argv[0]
sys.exit()
else:
url=sys.argv[1]
for path in path_file.readlines():
scode=statuschecker(url,path)
print "[+] Checking URL: %s:%s. Status:%s"%(url,path,scode)
except Exception as err:
print "Check that URL"
print (err)
| true
|
70c62bbd1b1d115dcbcdb3639aa88e4cada0b88b
|
Python
|
Jannis12324/meme_generator
|
/QuoteEngine/DocxIngestor.py
|
UTF-8
| 921
| 3.09375
| 3
|
[] |
no_license
|
"""Ingests docx files and returns a list of QuoteModule Objects."""
from .IngestorInterface import IngestorInterface
from typing import List
from .QuoteModel import QuoteModel
import docx
class DocxIngestor(IngestorInterface):
"""A class that handles the ingestion of docx files."""
allowed_extensions = ["docx"]
@classmethod
def parse(cls, path: str) -> List[QuoteModel]:
"""Parse QuoteModel obj of docx files and return a list of them."""
if not cls.can_ingest(path):
raise Exception('Cannot Ingest Exception')
quotes = []
doc = docx.Document(path)
for line in doc.paragraphs:
if line.text != "":
parse = line.text.split('-')
new_cat = QuoteModel(str(parse[0]).strip(' "'),
str(parse[1]).strip(' "'))
quotes.append(new_cat)
return quotes
| true
|
ed5186557c1a376d46524c4dc43accf789724f21
|
Python
|
wenshanj/Database-Development-for-Spotify
|
/simple_query_2.py
|
UTF-8
| 633
| 3.109375
| 3
|
[] |
no_license
|
import psycopg2 as pg2
from prettytable import PrettyTable
con = pg2.connect(database = 'spotify', user = 'isdb')
con.autocommit = True
cur = con.cursor()
def US5(playlist_id):
print ("US5: As a listener, I want to see the total number of songs in a given playlist")
print ("Input: playlist_id = 9")
tmpl = '''
SELECT playlist_id, num_songs
FROM Playlists
WHERE playlist_id = %s;
'''
cmd = cur.mogrify(tmpl,(playlist_id,))
cur.execute(cmd)
rows = cur.fetchall()
table = PrettyTable(["playlist_id", "num_songs"])
for row in rows:
table.add_row(row)
print(table)
US5(9)
| true
|
492806f512380ad866832db38cdb64dec74a71a6
|
Python
|
ReinaKousaka/core
|
/src/algorithm/scripts/count_overlap_btw_clusters.py
|
UTF-8
| 1,984
| 2.734375
| 3
|
[] |
no_license
|
from src.algorithm.Activity.count_overlap_activity import CountOverlapActivity
import argparse
import time
from src.algorithm.count_overlap.parser.parse_config import parse_from_file
from src.shared.utils import get_project_root
DEFAULT_PATH = str(get_project_root()) + "/src/algorithm/count_overlap/config/count_overlap.yaml"
DEFAULT_METHOD_1 = "Label Propagation"
DEFAULT_METHOD_2 = "Max Modularity"
def count_overlap(cluster_num_1: int, cluster_num_2: int, base_user: str, cluster_type_1: str, cluster_type_2: str, path=DEFAULT_PATH):
config = parse_from_file(path)
activity = CountOverlapActivity(config)
activity.count_overlap_between_cluster(cluster_num_1, cluster_num_2, base_user, cluster_type_1, cluster_type_2)
if __name__ == "__main__":
"""
Clean inactive users in the friends list.
"""
parser = argparse.ArgumentParser(description='count overlap between clusters')
parser.add_argument('-c1', '--first cluster number', dest='cluster_num_1', required=True,
help='the first cluster to compare', type=int)
parser.add_argument('-c2', '--second cluster number', dest='cluster_num_2', required=True,
help='the second cluster to compare', type=int)
parser.add_argument('-u', '--user', dest='base_user', required=True,
help='The target user', type=str)
parser.add_argument('-t1', '--clustering type', dest='cluster_type_1', required=False,
default=DEFAULT_METHOD_1, help='Label Propagation / Max Modularity', type=str)
parser.add_argument('-t2', '--clustering type', dest='cluster_type_2', required=False,
default=DEFAULT_METHOD_2, help='Label Propagation / Max Modularity', type=str)
parser.add_argument('-p', '--path', dest='path', required=False,
default=DEFAULT_PATH, help='The path of the config file', type=str)
args = parser.parse_args()
count_overlap(args.cluster_num_1, args.cluster_num_2, args.base_user, args.cluster_type_1, args.cluster_type_2, args.path)
| true
|
a0f64ab6723d3a8ff4e25e00c4e7b50d578b5fc3
|
Python
|
jfmacedo91/curso-em-video
|
/python/ex102.py
|
UTF-8
| 543
| 4.375
| 4
|
[] |
no_license
|
def fatorial(num, show=False):
"""
→ Calcula o Fatorial de um número.
:param num: O número a ser calculado.
:param show: (Opcinal) Mostrar ou não a conta.
:return: O valor fatorial do número num.
"""
tot = 1
for c in range(num, 0, -1):
tot *= c
if show:
if c == 1:
print(f'{c}', end=' = ')
else:
print(f'{c}', end=' x ')
return tot
print(f'\033[33m{" Exercício 102 ":-^51}\033[m')
print(fatorial(7, True))
| true
|
f12f67f33c0388e36300dc50a8b06ac4e841cca3
|
Python
|
jmoradian/directedStudy
|
/dataPrep/createFeatureMatrix/Featurize.py
|
UTF-8
| 946
| 2.734375
| 3
|
[] |
no_license
|
import string, calendar, textblob
from dateutil import parser
FAVORITE_INDEX = 0
DATE_INDEX = 1
TEXT_INDEX = 3
LISTED_INDEX = 5
VERIFIED_INDEX = 6
FRIEND_INDEX = 7
def getTimeStamp(timeStr):
parsedDate = parser.parse(timeStr)
timestamp = calendar.timegm(parsedDate.timetuple())
return timestamp
# uses textblob library to analyze the tweet sentiment
# - textblob is trained on movie review data
def getTweetSentiment(tweet):
printable = set(string.printable)
tweetText = filter(lambda char_: char_ in printable, tweet[TEXT_INDEX])
sentiment = float(textblob.TextBlob(tweetText).sentiment.polarity) # may need to rid text of special characters
return sentiment
def preProcessTweet(tweet):
tweet[TEXT_INDEX] = getTweetSentiment(tweet)
tweet[VERIFIED_INDEX] = 0 if tweet[VERIFIED_INDEX] == "False" else 1
tweet[DATE_INDEX] = getTimeStamp(tweet[DATE_INDEX])
del tweet[FRIEND_INDEX]
del tweet[LISTED_INDEX]
del tweet[FAVORITE_INDEX]
| true
|
811d2537ca49dbeabcab9ee7c604f4235cef5d6c
|
Python
|
shen-huang/selfteaching-python-camp
|
/exercises/1901040047/1001S02E05_string.py
|
UTF-8
| 1,489
| 3.65625
| 4
|
[] |
no_license
|
t = '''
"The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!"
'''
t=t.replace("better","worse") #把上面段落里的"better"全部替换为"worse"
print(t)
t=t.swapcase() #把上面文字的大小写全部对换
print(t)
t=t.replace(',','').replace('.','').replace('!','').replace('*','').replace('--','') #把标点符号去掉
t=t.split() #把上面文字变成字符串
x='EA' #上面的文字已大小写转换
y=[] #列表
for i in t:
if i.find(x)<1:#搜索字符串中含有“EA”的单词并删除
y.append(i) #把除去含有“EA”外的单词放进列表
print(y)
y.sort() #排序
print(y)
| true
|
24bd91dbcb6cf81db97e05d97f7416b6e6d1ab84
|
Python
|
EachenKuang/LeetCode
|
/code/459#Repeated Substring Pattern.py
|
UTF-8
| 636
| 3.65625
| 4
|
[] |
no_license
|
# https://leetcode.com/problems/repeated-substring-pattern/description/
class Solution(object):
# First char of input string is first char of repeated substring
# Last char of input string is last char of repeated substring
# Let S1 = S + S (where S in input string)
# Remove 1 and last char of S1. Let this be S2
# If S exists in S2 then return true else false
# Let i be index in S2 where S starts then repeated substring length i + 1 and repeated substring S[0: i+1]
def repeatedSubstringPattern(self, s):
"""
:type s: str
:rtype: bool
"""
return s in (s + s)[1:-1]
| true
|
b502ccc25a35b4297690f608243ee427e4405825
|
Python
|
924235317/leetcode
|
/179_largest_number.py
|
UTF-8
| 473
| 3.5625
| 4
|
[] |
no_license
|
from functools import cmp_to_key
def largestNumber(nums):
def comp(str1, str2):
if str1 + str2 > str2 + str1:
return 1
elif str1 + str2 == str2 + str1:
return 0
else:
return -1
nums_str = list(map(str, nums))
nums_str = sorted(nums_str, key=cmp_to_key(comp), reverse=True)
return str(int("".join(nums_str)))
if __name__ == "__main__":
nums = [10, 2]
print(largestNumber(nums))
| true
|
83cc1c83482a81874da6a730706c15b6f39403bc
|
Python
|
mattswoon/peanut-butter
|
/peanut_butter/indexer.py
|
UTF-8
| 1,262
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
import attr
import enum
from typing import List
import numpy as np
@attr.s
class Region:
code = attr.ib(type=str)
class Status(enum.Enum):
susceptible = enum.auto()
infected = enum.auto()
recovered = enum.auto()
NUM_STATUSES = 3
@attr.s
class Indexer:
regions = attr.ib(type=List[Region])
def __attrs_post_init__(self):
self.regions = sorted(self.regions)
idx = 0
fwd = {}
bwd = {}
for r in self.regions:
fwd[r] = idx
bwd[idx] = r
idx += 1
self._index = fwd
self._region = bwd
self._n = idx
def vector(self, map: Dict[Region, float]):
v = np.zeros(self._n)
for r, val in map.items():
v[self._index[r]] = val
return v
def matrix(self, entries: List[Dict[str, float]]):
pass
def unpack(self, y):
"""
Unpacks the vector into the S, I and R variables in that order
"""
n = self._n
return np.split(y, [(i+1)*n for i in range(NUM_STATUSES-1)])
def pack(self, S, I, R):
"""
Packs variables S, I and R into a vector by stacking them
in that order
"""
return np.concatenate((S, I, R), axis=0)
| true
|
88da2053bc2c93da551bd414c1fd629e9cb90863
|
Python
|
qmnguyenw/python_py4e
|
/geeksforgeeks/python/easy/6_17.py
|
UTF-8
| 19,882
| 4.5
| 4
|
[] |
no_license
|
Tic Tac Toe GUI In Python using PyGame
This article will guide you and give you a basic idea of designing a game Tic
Tac Toe using _pygame_ library of Python. Pygame is a cross-platform set of
Python modules designed for writing video games. It includes computer graphics
and sound libraries designed to be used with the Python programming language.
Let’s break the task in five parts:
1. Importing the required libraries and setting up the required global variables.
2. Designing the game display function, that will set a platform for other components to be displayed on the screen.
3. Main algorithm of win and draw
4. Getting the user input and displaying the “X” or “O” at the proper position where the user has clicked his mouse.
5. Running an infinite loop, and including the defined methods in it.
**Note:** The required PNG files can be downloaded below –

modified_cover.png

X_modified.png

o_modified.png
### Importing the required libraries and setting up the required global
variables
We are going to use the pygame, time, and the sys library of Python.
**time** library is used to keep track of time and sleep() method that we
are going to use inside our code. Have a look at the code below.
__
__
__
__
__
__
__
# importing the required libraries
import pygame as pg
import sys
import time
from pygame.locals import *
# declaring the global variables
# for storing the 'x' or 'o'
# value as character
XO = 'x'
# storing the winner's value at
# any instant of code
winner = None
# to check if the game is a draw
draw = None
# to set width of the game window
width = 400
# to set height of the game window
height = 400
# to set background color of the
# game window
white = (255, 255, 255)
# color of the straightlines on that
# white game board, dividing board
# into 9 parts
line_color = (0, 0, 0)
# setting up a 3 * 3 board in canvas
board = [[None]*3, [None]*3, [None]*3]
---
__
__
### Designing the game display
This is the trickier part, that makes the utmost importance in game
development. We can use the **display.set_mode()** method to set up our
display window. This takes three arguments, first one being a tuple having
(width, height) of the display that we want it to be, the other two arguments
are depth and fps respectively. **display.set_caption()** , sets a caption on
the name tag of our display. **pg.image.load()** is an useful method to load
the background images to customize the display. This method takes the file
name as an argument along with the extension. There is a small problem with
image.load(), it loads the image as a Python object in its native size,
which may not be optimized along with the display. So we use another method in
pygame known as **pg.transform.scale()**. This method takes two arguments, one
being the name of the image object and the other is a tuple having (width,
height), that we want our image to scale to.
Finally we head to the first function, **game_initiating_window()**. On the
very first line there is a **screen.blit()** function. The screen is the
Python function and blit is the method that enables pygame to display
something over another thing. Here out image object has been displayed over
the screen, which was set white initially. **pg.display.update()** is another
important function in game development. It updates the display of our window
when called. Pygame also enables us to draw geometric objects like line,
circle, etc. In this project we have used **pg.draw.line()** method that takes
five arguments, namely – _(display, line color, starting point, ending point,
width)_. This involves a little bit of coordinate geometry to draw the lines
properly.
This is not sufficient. At each update of the display we need to know the game
status, Weather it is win or lose. **draw_status()** helps us in displaying
another 100pc window at the bottom of the main window, that updates the status
at each click of the user.
__
__
__
__
__
__
__
# initializing the pygame window
pg.init()
# setting fps manually
fps = 30
# this is used to track time
CLOCK = pg.time.Clock()
# this method is used to build the
# infrastructure of the display
screen = pg.display.set_mode((width, height + 100), 0, 32)
# setting up a nametag for the
# game window
pg.display.set_caption("My Tic Tac Toe")
# loading the images as python object
initiating_window = pg.image.load("modified_cover.png")
x_img = pg.image.load("X_modified.png")
y_img = pg.image.load("o_modified.png")
# resizing images
initiating_window = pg.transform.scale(initiating_window, (width, height
+ 100))
x_img = pg.transform.scale(x_img, (80, 80))
o_img = pg.transform.scale(y_img, (80, 80))
def game_initiating_window():
# displaying over the screen
screen.blit(initiating_window, (0, 0))
# updating the display
pg.display.update()
time.sleep(3)
screen.fill(white)
# drawing vertical lines
pg.draw.line(screen, line_color, (width / 3, 0), (width /
3, height), 7)
pg.draw.line(screen, line_color, (width / 3 * 2, 0), (width
/ 3 * 2, height), 7)
# drawing horizontal lines
pg.draw.line(screen, line_color, (0, height / 3), (width, height
/ 3), 7)
pg.draw.line(screen, line_color, (0, height / 3 * 2),
(width, height / 3 * 2), 7)
draw_status()
def draw_status():
# getting the global variable draw
# into action
global draw
if winner is None:
message = XO.upper() + "'s Turn"
else:
message = winner.upper() + " won !"
if draw:
message = "Game Draw !"
# setting a font object
font = pg.font.Font(None, 30)
# setting the font properties like
# color and width of the text
text = font.render(message, 1, (255, 255, 255))
# copy the rendered message onto the board
# creating a small block at the bottom of the main display
screen.fill ((0, 0, 0), (0, 400, 500, 100))
text_rect = text.get_rect(center =(width / 2,
500-50))
screen.blit(text, text_rect)
pg.display.update()
---
__
__
### Main algorithm
The main algorithm has a straight forward approach. A user can win row-wise,
column-wise, and diagonally. So by using a multidimensional array, we can set
up the conditions easily.
__
__
__
__
__
__
__
def check_win():
global board, winner, draw
# checking for winning rows
for row in range(0, 3):
if((board[row][0] == board[row][1] ==
board[row][2]) and (board [row][0] is not None)):
winner = board[row][0]
pg.draw.line(screen, (250, 0, 0),
(0, (row + 1)*height / 3 -height / 6),
(width, (row + 1)*height / 3 - height / 6 ),
4)
break
# checking for winning columns
for col in range(0, 3):
if((board[0][col] == board[1][col] ==
board[2][col]) and (board[0][col] is not None)):
winner = board[0][col]
pg.draw.line (screen, (250, 0, 0), ((col + 1)* width
/ 3 - width / 6, 0), \
((col + 1)* width / 3 - width / 6, height),
4)
break
# check for diagonal winners
if (board[0][0] == board[1][1] ==
board[2][2]) and (board[0][0] is not None):
# game won diagonally left to right
winner = board[0][0]
pg.draw.line (screen, (250, 70, 70), (50, 50),
(350, 350), 4)
if (board[0][2] == board[1][1] ==
board[2][0]) and (board[0][2] is not None):
# game won diagonally right to left
winner = board[0][2]
pg.draw.line (screen, (250, 70, 70), (350, 50),
(50, 350), 4)
if(all([all(row) for row in board]) and winner is
None ):
draw = True
draw_status()
---
__
__
### Getting the user input and displaying the “X” or “O”
This part deals with a visualization of the board and a little bit of
coordinate geometry. **drawXO()** takes two arguments row and col. First of
all, we have to set up the correct geometrical position to put the image of X
and image of O that we have stored as two python objects “x_img” and “y_img”
respectively. Have a look at the code for a proper understanding.
**user_click()** is a function we have designed to get the input from a user
mouse click. Imagine, you have clicked on one of the nine parts (boxes divided
by the lines we have drawn horizontally and vertically), this function will
define the coordinate of the position where you have clicked.
**pg.mouse.get_pos()** gets the x-coordinate and y-coordinate of the mouse
click of the user and return a tuple. Depending upon the (x, y) we can define
the exact row and the exact column where the user has clicked. Finally, when
we have the row and col, we pass these two as arguments to the function
**drawXO(row, col)** to draw the image of ‘X’ or the image of ‘O’ at the
desired position of the user on the game screen.
__
__
__
__
__
__
__
def drawXO(row, col):
global board, XO
# for the first row, the image
# should be pasted at a x coordinate
# of 30 from the left margin
if row == 1:
posx = 30
# for the second row, the image
# should be pasted at a x coordinate
# of 30 from the game line
if row == 2:
# margin or width / 3 + 30 from
# the left margin of the window
posx = width / 3 + 30
if row == 3:
posx = width / 3 * 2 + 30
if col == 1:
posy = 30
if col == 2:
posy = height / 3 + 30
if col == 3:
posy = height / 3 * 2 + 30
# setting up the required board
# value to display
board[row-1][col-1] = XO
if(XO == 'x'):
# pasting x_img over the screen
# at a coordinate position of
# (pos_y, posx) defined in the
# above code
screen.blit(x_img, (posy, posx))
XO = 'o'
else:
screen.blit(o_img, (posy, posx))
XO = 'x'
pg.display.update()
def user_click():
# get coordinates of mouse click
x, y = pg.mouse.get_pos()
# get column of mouse click (1-3)
if(x<width / 3):
col = 1
elif (x<width / 3 * 2):
col = 2
elif(x<width):
col = 3
else:
col = None
# get row of mouse click (1-3)
if(y<height / 3):
row = 1
elif (y<height / 3 * 2):
row = 2
elif(y<height):
row = 3
else:
row = None
# after getting the row and col,
# we need to draw the images at
# the desired positions
if(row and col and board[row-1][col-1] is
None):
global XO
drawXO(row, col)
check_win()
---
__
__
### Running an infinite loop
This is the final important step to run our game infinitely until the user
clicks **exit**. Before running an infinite loop, we need to set up a function
that can reset all the global values and parameters to initial values for a
fresh start of the game.
**reset_game()** is used for this purpose. It resets the board value to 3 * 3
None value again and initializes global parameters.
In the game development, every action by the player is an **event**. Whether
he clicks on the window or clicks on the exit/close icon. To get these events
as an object, pygame has a built-in method used as **pg.event.get()**. If the
event type is “QUIT”, we use the sys library of Python to exit the game. But
if the mouse is pressed, the **event.get()** will return “MOUSEBUTTONDOWN” and
our call to **user_click()** happens to know the exact coordinate of the board
where the user has clicked.
In the entire code we have used the .sleep() method to pause our game for
sometimes and make that user friendly and smooth.
__
__
__
__
__
__
__
def reset_game():
global board, winner, XO, draw
time.sleep(3)
XO = 'x'
draw = False
game_initiating_window()
winner = None
board = [[None]*3, [None]*3, [None]*3]
game_initiating_window()
while(True):
for event in pg.event.get():
if event.type == QUIT:
pg.quit()
sys.exit()
elif event.type is MOUSEBUTTONDOWN:
user_click()
if(winner or draw):
reset_game()
pg.display.update()
CLOCK.tick(fps)
---
__
__
**The complete code:**
__
__
__
__
__
__
__
# importing the required libraries
import pygame as pg
import sys
import time
from pygame.locals import *
# declaring the global variables
# for storing the 'x' or 'o'
# value as character
XO = 'x'
# storing the winner's value at
# any instant of code
winner = None
# to check if the game is a draw
draw = None
# to set width of the game window
width = 400
# to set height of the game window
height = 400
# to set background color of the
# game window
white = (255, 255, 255)
# color of the straightlines on that
# white game board, dividing board
# into 9 parts
line_color = (0, 0, 0)
# setting up a 3 * 3 board in canvas
board = [[None]*3, [None]*3, [None]*3]
# initializing the pygame window
pg.init()
# setting fps manually
fps = 30
# this is used to track time
CLOCK = pg.time.Clock()
# this method is used to build the
# infrastructure of the display
screen = pg.display.set_mode((width, height + 100), 0, 32)
# setting up a nametag for the
# game window
pg.display.set_caption("My Tic Tac Toe")
# loading the images as python object
initiating_window = pg.image.load("modified_cover.png")
x_img = pg.image.load("X_modified.png")
y_img = pg.image.load("o_modified.png")
# resizing images
initiating_window = pg.transform.scale(initiating_window, (width, height
+ 100))
x_img = pg.transform.scale(x_img, (80, 80))
o_img = pg.transform.scale(y_img, (80, 80))
def game_initiating_window():
# displaying over the screen
screen.blit(initiating_window, (0, 0))
# updating the display
pg.display.update()
time.sleep(3)
screen.fill(white)
# drawing vertical lines
pg.draw.line(screen, line_color, (width / 3, 0), (width /
3, height), 7)
pg.draw.line(screen, line_color, (width / 3 * 2, 0), (width
/ 3 * 2, height), 7)
# drawing horizontal lines
pg.draw.line(screen, line_color, (0, height / 3), (width, height
/ 3), 7)
pg.draw.line(screen, line_color, (0, height / 3 * 2),
(width, height / 3 * 2), 7)
draw_status()
def draw_status():
# getting the global variable draw
# into action
global draw
if winner is None:
message = XO.upper() + "'s Turn"
else:
message = winner.upper() + " won !"
if draw:
message = "Game Draw !"
# setting a font object
font = pg.font.Font(None, 30)
# setting the font properties like
# color and width of the text
text = font.render(message, 1, (255, 255, 255))
# copy the rendered message onto the board
# creating a small block at the bottom of the main display
screen.fill ((0, 0, 0), (0, 400, 500, 100))
text_rect = text.get_rect(center =(width / 2,
500-50))
screen.blit(text, text_rect)
pg.display.update()
def check_win():
global board, winner, draw
# checking for winning rows
for row in range(0, 3):
if((board[row][0] == board[row][1] ==
board[row][2]) and (board [row][0] is not None)):
winner = board[row][0]
pg.draw.line(screen, (250, 0, 0),
(0, (row + 1)*height / 3 -height / 6),
(width, (row + 1)*height / 3 - height / 6 ),
4)
break
# checking for winning columns
for col in range(0, 3):
if((board[0][col] == board[1][col] ==
board[2][col]) and (board[0][col] is not None)):
winner = board[0][col]
pg.draw.line (screen, (250, 0, 0), ((col + 1)* width
/ 3 - width / 6, 0), \
((col + 1)* width / 3 - width / 6, height),
4)
break
# check for diagonal winners
if (board[0][0] == board[1][1] ==
board[2][2]) and (board[0][0] is not None):
# game won diagonally left to right
winner = board[0][0]
pg.draw.line (screen, (250, 70, 70), (50, 50),
(350, 350), 4)
if (board[0][2] == board[1][1] ==
board[2][0]) and (board[0][2] is not None):
# game won diagonally right to left
winner = board[0][2]
pg.draw.line (screen, (250, 70, 70), (350, 50),
(50, 350), 4)
if(all([all(row) for row in board]) and winner is
None ):
draw = True
draw_status()
def drawXO(row, col):
global board, XO
# for the first row, the image
# should be pasted at a x coordinate
# of 30 from the left margin
if row == 1:
posx = 30
# for the second row, the image
# should be pasted at a x coordinate
# of 30 from the game line
if row == 2:
# margin or width / 3 + 30 from
# the left margin of the window
posx = width / 3 + 30
if row == 3:
posx = width / 3 * 2 + 30
if col == 1:
posy = 30
if col == 2:
posy = height / 3 + 30
if col == 3:
posy = height / 3 * 2 + 30
# setting up the required board
# value to display
board[row-1][col-1] = XO
if(XO == 'x'):
# pasting x_img over the screen
# at a coordinate position of
# (pos_y, posx) defined in the
# above code
screen.blit(x_img, (posy, posx))
XO = 'o'
else:
screen.blit(o_img, (posy, posx))
XO = 'x'
pg.display.update()
def user_click():
# get coordinates of mouse click
x, y = pg.mouse.get_pos()
# get column of mouse click (1-3)
if(x<width / 3):
col = 1
elif (x<width / 3 * 2):
col = 2
elif(x<width):
col = 3
else:
col = None
# get row of mouse click (1-3)
if(y<height / 3):
row = 1
elif (y<height / 3 * 2):
row = 2
elif(y<height):
row = 3
else:
row = None
# after getting the row and col,
# we need to draw the images at
# the desired positions
if(row and col and board[row-1][col-1] is
None):
global XO
drawXO(row, col)
check_win()
def reset_game():
global board, winner, XO, draw
time.sleep(3)
XO = 'x'
draw = False
game_initiating_window()
winner = None
board = [[None]*3, [None]*3, [None]*3]
game_initiating_window()
while(True):
for event in pg.event.get():
if event.type == QUIT:
pg.quit()
sys.exit()
elif event.type is MOUSEBUTTONDOWN:
user_click()
if(winner or draw):
reset_game()
pg.display.update()
CLOCK.tick(fps)
---
__
__
**Output:**
https://media.geeksforgeeks.org/wp-content/uploads/20200507213942/python-tic-
tac-toe-pygame.webm
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| true
|
849d86ee6a349a19d1ef5025e59c45f593bdb2ea
|
Python
|
kangminsu1/Computer_Vision
|
/Midterm/Q_3.py
|
UTF-8
| 1,637
| 3.703125
| 4
|
[] |
no_license
|
class N:
def __init__(self, head, new_head=None, random=None):
self.head = head
self.new_head = new_head
self.random = random
#Print를 위한 재귀 함수
def finding(node):
if node is None:
print("null")
return
# 현재의 노드 head와 random한 포인터 데이타
print(node.head, end='')
if node.random:
text = "[%d]"%(node.random.head)
print(text, end = ' -> ')
else:
print("[x]", end = ' -> ')
# 다음 노트를 찾는다
finding(node.new_head)
def Random_pointer_update(node, dictionary):
if dictionary.get(node) is None:
return
dictionary.get(node).random = dictionary.get(node.random)
Random_pointer_update(node.new_head, dictionary)
def Recursive(node, dictionary):
if node is None:
return None
dictionary[node] = N(node.head)
dictionary.get(node).new_head = Recursive(node.new_head, dictionary)
return dictionary.get(node)
def Clone_linked_list(node):
dictionaryionary = {}
Recursive(node, dictionaryionary)
Random_pointer_update(node, dictionaryionary)
return dictionaryionary[node]
if __name__ == '__main__':
node = N(1)
node.new_head = N(2)
node.new_head.new_head = N(3)
node.new_head.new_head.new_head = N(4)
node.new_head.new_head.new_head.new_head = N(5)
node.random = node.new_head
node.new_head.random = node.new_head.new_head
node.new_head.new_head.random = node.new_head.new_head.new_head
node.new_head.new_head.new_head.random = node.new_head.new_head.new_head.new_head
# print("Linked lists:")
# finding(node)
clone = Clone_linked_list(node)
print("\nCloned Linked List:")
finding(clone)
| true
|
b2e7a6f4a2df69448b8231f572d8fbb3701bb1ff
|
Python
|
NannikEU/eu_python_basics
|
/exc_423.py
|
UTF-8
| 1,852
| 3.40625
| 3
|
[] |
no_license
|
# 423
import math
import random
def a():
result = 0
for i in range(n):
result += A[i][0]
return result
def b():
result = 0
for i in range(n):
result += A[i][i]
for i in range(n):
result += A[i][n - i - 1]
if n % 2 == 1:
result -= A[math.floor(n / 2)][math.floor(n / 2)]
return result
def c():
max = A[0][0]
for j in range(2, n):
if A[0][j] > max:
max = A[0][j]
for j in range(n):
if A[n - 1][j] > max:
max = A[n - 1][j]
return max
def d():
if n < 3:
print("n must be > 3")
return 0
min = A[0][n - 1]
for i in range(n - 2, -1, -1):
if A[i][n - i - 1] < min:
min = A[i][n - i - 1]
for i in range(n - 1, -1, -1):
if A[i][n - i - 2] < min:
min = A[i][n - i - 2]
for i in range(n - 2, -1, -1):
if A[i - 1][n - i - 1] < min:
min = A[i - 1][n - i - 1]
return min
def e():
result = 0
m = int(input("print m: "))
if m > 2 * n:
print("m must be <= {}".format(2 * n))
return 0
i = m
j = 0
while i >= 0:
if i < len(A):
if j < len(A[i]):
result += A[i][j]
i -= 1
j += 1
return result
def f():
min = A[0][n - 1]
max = A[0][0]
for i in range(1, n):
if A[i][i] > max:
max = A[i][i]
for i in range(1, n):
if A[i][n - i - 1] < min:
min = A[i][n - i - 1]
return max > min
n = int(input("n: "))
A = []
for i in range(n):
row = []
for j in range(n):
row.append(random.randint(0, 9))
A.append(row)
print("")
for row in A:
print(row)
print("\na:", a())
print("b:", b())
print("c:", c())
print("d:", d())
print("e:", e())
print("f:", f())
| true
|
c229eda1c818c0dcc98a36a7279e3186f31a4f42
|
Python
|
gungui98/object-detect
|
/shrink_images.py
|
UTF-8
| 435
| 2.625
| 3
|
[] |
no_license
|
import glob
import cv2
FOLDERS = ['livingrooms/','bedrooms/','kitchen/','bathrooms/']
ROOT_FOLDER = 'data/'
DESTINATION_FOLDER = 'preprocess/'
for folder in FOLDERS:
for file in glob.glob(ROOT_FOLDER+folder+'*.jpg'):
file_name = file.replace(ROOT_FOLDER+folder,'')
image = cv2.imread(file)
small = cv2.resize(image, (0, 0), fx=0.25, fy=0.25)
cv2.imwrite(DESTINATION_FOLDER+folder+file_name,small)
| true
|
d134596f4bbf3f38682b70687c3d559bb559a913
|
Python
|
NikhilCBhat/grow-organic
|
/data_collection/water_plants.py
|
UTF-8
| 1,789
| 3.390625
| 3
|
[] |
no_license
|
import sys
sys.path.append('.')
import time
from time import sleep
from data_collection.valve import setup_valve, open_valve, close_valve
from data_collection.pump import setup_pump, run_pump_forward, run_pump_backward, stop_pump
from data_collection.moisture import is_water_safe
def water_plant(plant_id, water_duration=60):
"""plant_id = '1', '2', '3' or '0' or None to loop through all plants"""
plant_id_to_pins = {
'1': (26, 23, 24),
'2': (13, 23, 24),
'3': (14, 23, 24)
}
# ids_to_water = [plant_id] if plant_id != 0 else plant_id_to_pins.keys()
ids_to_water = [plant_id] if (plant_id is not None) and (plant_id != 0) else plant_id_to_pins.keys()
for p_id in ids_to_water:
print(f"Water plant {p_id}")
valve_pin, pump_in1, pump_in2 = plant_id_to_pins[p_id]
# setup
setup_valve(valve_pin)
setup_pump(pump_in1, pump_in2)
# do watering
start_time = time.time()
while time.time() - start_time < water_duration and is_water_safe():
run_pump_forward(pump_in1, pump_in2, 0.5)
open_valve(valve_pin, 0.5)
print("Soil too wet")
# stop watering
close_valve(valve_pin, 1)
stop_pump(pump_in1, pump_in2, 1)
sleep(10)
def aerate_water(aerate_duration=2):
valve_pin, pump_in1, pump_in2 = 26, 23, 24
# setup
setup_valve(valve_pin)
setup_pump(pump_in1, pump_in2)
# do aeration
open_valve(valve_pin, 0.5)
run_pump_backward(pump_in1, pump_in2, 0.5)
sleep(aerate_duration)
# stop aerating
stop_pump(pump_in1, pump_in2, 1)
close_valve(valve_pin, 1)
sleep(10)
if __name__ == "__main__":
aerate_water(2)
# water_plant(1)
water_plant(2)
# water_plant(3)
| true
|
f04fc2dd703cf2308caba986c4966c320a5c5084
|
Python
|
amaozhao/algorithms
|
/algorithms/arrays/two_sum.py
|
UTF-8
| 509
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
"""
给定一个整型数组, 返回这样2个元素的索引: 这2个元素相加的结果为给定的值.
你可以假定这个结果只有一种情况, 但是每个元素只能使用一次.
例如:
给定 nums = [2, 7, 11, 15], target = 9,
因 nums[0] + nums[1] = 2 + 7 = 9,
返回 [0, 1].
"""
def two_sum(array, target):
dic = {}
for i, num in enumerate(array):
if num in dic:
return dic[num], i
else:
dic[target - num] = i
return None
| true
|
0f4776e4b12700024e106bd7e1a31cd1300ae489
|
Python
|
joesdesk/themecrafter
|
/themecrafter/interface/html.py
|
UTF-8
| 3,351
| 3.15625
| 3
|
[] |
no_license
|
# Module to visualize the comments through html.
from math import floor
from bs4 import BeautifulSoup
from .htmlrender import doc2tr
from .html_styling import Doc2HtmlStyler
class HTMLInterface:
'''The interface for viewing the XML documents.'''
def __init__(self, xmlstring):
'''Takes an XML string and turns it into a soup so that elements
can be easily extracted and rendered.
'''
# Convert the entire XML string into a soup.
soup = BeautifulSoup(xmlstring, "xml")
# The documents are cached via a list of soup tags so that
# only selected documents are rendered saving time.
self.docs = soup.corpus.contents
# Set html styling
self.renderer = Doc2HtmlStyler()
# Pagination variables
self.n_docs_per_page = 10
self.n_pages = 0
self.curr_page = 0
# Selection of documents
self.sel_doc_ids = None
self.set_doc_sel()
def set_doc_sel(self, doc_ids=None):
'''Changes the list of documents to navigate through.'''
self.sel_doc_ids = doc_ids
if doc_ids is None:
total_docs = len(self.docs)
else:
total_docs = len(doc_ids)
self.n_pages = (total_docs // self.n_docs_per_page) + 1
def doc_range(self, page_num):
'''Returns a range object indicating the ids of the documents
specified by the page number.
'''
# Find start and end indices specified by page_num
start_doc_id = self.n_docs_per_page * page_num
end_doc_id = start_doc_id + self.n_docs_per_page
if self.sel_doc_ids is None:
total_docs = len(self.docs)
sel_docs = range(total_docs)
return list(sel_docs[start_doc_id:end_doc_id])
total_docs = len(self.sel_doc_ids)
end_doc_id = min(end_doc_id, total_docs)
if start_doc_id > end_doc_id:
return []
return self.sel_doc_ids[start_doc_id:end_doc_id]
def render(self, page_num):
'''Turns the documents into a list of html strings for pages.'''
page_num = max(0, page_num)
page_num = min(page_num, self.n_pages-1)
self.curr_page = page_num
doc_ids = self.doc_range(page_num)
page = r'<html><table style="width:100%">'
for i in doc_ids:
doc_elem = self.docs[i]
self.renderer.highlight(doc_elem)
page += r'<tr style="align:center">'
page += r'<td></td>'
#page += r'<td style="text-align:right; vertical-align:top; background-color:blue; width:100%">300</td>'
page += r'<td style="vertical-align:top; width:50">{:d}</td>'.format(i+1)
page += doc2tr(doc_elem)
page += r'<td></td>'
page += r'</tr>'
page += r'<table></html>'
return page
def render_first(self):
return self.render(0)
def render_prev(self):
return self.render(self.curr_page-1)
def render_next(self):
return self.render(self.curr_page+1)
def render_last(self):
return self.render(self.n_pages-1)
| true
|
04fee2416e4a10cde49fc3267f7b371f0b16fa87
|
Python
|
AlbertoCastelo/Neuro-Evolution-BNN
|
/neat/dataset/regression_example.py
|
UTF-8
| 5,653
| 2.78125
| 3
|
[] |
no_license
|
import torch
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset
import numpy as np
from neat.dataset.abstract import NeatTestingDataset
class RegressionExample1Dataset(NeatTestingDataset):
'''
Dataset with 1 input variables and 1 output
'''
TRAIN_SIZE = 5000
TEST_SIZE = 5000
def __init__(self, train_percentage, dataset_type='train', random_state=42, noise=0.0, label_noise=0.0,
is_debug=False):
self.is_debug = is_debug
self.dataset_type = dataset_type
self.input_scaler = StandardScaler()
self.output_scaler = StandardScaler()
if dataset_type not in ['train', 'validation', 'test']:
raise ValueError(f'Dataset Type {dataset_type} is not valid')
super().__init__(train_percentage=train_percentage, dataset_type=dataset_type, random_state=random_state,
noise=noise, label_noise=label_noise)
def generate_data(self):
range_ = [0.0, 0.7]
noise = [0.0, 0.02]
x_train = np.random.uniform(range_[0], range_[1], self.TRAIN_SIZE)
x_train, y_train = self._get_x_y(x=x_train, noise=noise)
self.input_scaler.fit(x_train)
self.output_scaler.fit(y_train)
if self.dataset_type == 'train':
x = x_train
y = y_train
if self.dataset_type == 'test':
x_test = np.linspace(-0.5, 1.5, self.TEST_SIZE)
noise = [0.00, 0.00]
x, y = self._get_x_y(x=x_test, noise=noise)
self.x_original = x
self.y_original = y
# self.x = x
# self.y = y.unsqueeze(y, dim=1)
self.x = self.input_scaler.transform(x)
self.y = self.output_scaler.transform(y).reshape((-1, 1))
if self.is_debug:
self.x = x[:512]
self.y = y[:512]
self.x = torch.tensor(self.x).float()
self.y = torch.tensor(self.y).float()
self._generate_train_test_sets()
def _get_x_y(self, x, noise):
dataset_size = x.shape[0]
# y = 0.1 + 0.5 * (x + np.random.normal(noise[0], noise[1], dataset_size)) + \
# 0.3 * np.square(x + np.random.normal(noise[0], noise[1], dataset_size))
y = x + 0.3 * np.sin(2 * np.pi * (x + np.random.normal(noise[0], noise[1], dataset_size))) + \
0.3 * np.sin(4 * np.pi * (x + np.random.normal(noise[0], noise[1], dataset_size))) + \
np.random.normal(noise[0], noise[1], dataset_size)
return x.reshape((-1, 1)), y.reshape((-1, 1))
def unnormalize_output(self, y_pred: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.numpy().reshape((-1, 1))
y_pred_unnormalized = self.output_scaler.inverse_transform(y_pred).reshape(-1)
return torch.Tensor(y_pred_unnormalized)
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
x = self.x[idx]
y = self.y[idx]
return x, y
class RegressionExample2Dataset(NeatTestingDataset):
TRAIN_SIZE = 500
TEST_SIZE = 500
'''
Dataset with 2 input variables and 1 output
'''
def __init__(self, train_percentage, dataset_type='train', is_debug=False):
self.is_debug = is_debug
self.input_scaler = StandardScaler()
self.output_scaler = StandardScaler()
super().__init__(train_percentage=train_percentage, dataset_type=dataset_type)
def generate_data(self):
range_ = [0.0, 0.7]
noise = [0.0, 0.02]
x_1_train = np.random.uniform(range_[0], range_[1], self.TRAIN_SIZE)
x_2_train = np.random.uniform(range_[0], range_[1], self.TRAIN_SIZE)
x_train, y_train = self._get_x_y(x_1=x_1_train, x_2=x_2_train, noise=noise)
self.input_scaler.fit(x_train)
self.output_scaler.fit(y_train)
if self.dataset_type == 'train':
x = x_train
y = y_train
if self.dataset_type == 'test':
x_1_test = np.linspace(-0.5, 1.5, self.TEST_SIZE)
x_2_test = np.linspace(-0.5, 1.5, self.TEST_SIZE)
noise = [0.00, 0.00]
x, y = self._get_x_y(x_1=x_1_test, x_2=x_2_test, noise=noise)
self.x_original = x
self.y_original = y
# self.x = x
# self.y = y.reshape((-1, 1))
self.x = self.input_scaler.transform(x)
self.y = self.output_scaler.transform(y).reshape((-1, 1))
if self.is_debug:
self.x = self.x[:512]
self.y = self.y[:512]
self.x = torch.tensor(self.x).float()
self.y = torch.tensor(self.y).float()
data_limit = self._get_data_limit()
self.x_train = self.x[:data_limit]
self.y_train = self.y[:data_limit]
self.x_test = self.x[data_limit:]
self.y_test = self.y[data_limit:]
def _get_x_y(self, x_1, x_2, noise):
x = np.array(list(zip(x_1, x_2)))
dataset_size = x.shape[0]
y = x_1 + 0.3 * np.sin(2 * np.pi * (x_1 + np.random.normal(noise[0], noise[1], dataset_size))) + \
0.3 * np.sin(4 * np.pi * (x_2 + np.random.normal(noise[0], noise[1], dataset_size))) + \
np.random.normal(noise[0], noise[1], dataset_size)
return x, y.reshape(-1, 1)
def unnormalize_output(self, y_pred: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.numpy().reshape((-1, 1))
y_pred_unnormalized = self.output_scaler.inverse_transform(y_pred).reshape(-1)
return torch.Tensor(y_pred_unnormalized)
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
x = self.x[idx]
y = self.y[idx]
return x, y
| true
|
50c4efde030006daeab9693571f8c1d5324c5b80
|
Python
|
1576dkm/Spark_Jobs
|
/Abs/p1.py
|
UTF-8
| 408
| 2.59375
| 3
|
[] |
no_license
|
from operator import add
from pyspark import SparkContext
sc = SparkContext("local[*]", "example")
rdd = sc.textFile("C:\\Users\janjanam.sudheer\Desktop\data.csv")
rdd1 = rdd.map(lambda x : x.split(',')).map(lambda x: (x[0],x[1].split(';'))).filter(lambda line: "Country" not in line).map(lambda z : (z[0],list(map(lambda y : int(y),z[1])))).reduceByKey(lambda a, b: list(map(add,a,b)))
print(rdd1.take(10))
| true
|
c6d6b7f340d07353c7da767b83732d87361ae593
|
Python
|
kar655/Kaggle-cactus-identification
|
/build_data.py
|
UTF-8
| 2,879
| 3.125
| 3
|
[] |
no_license
|
import os
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
labels = pd.read_csv("train.csv")
class Cactus():
IMG_SIZE = 32
cactuscount = 0
notcactuscount = 0
# 0: not-cac 1: cac
test_data_amount = [0, 0] # trying to get 1000 cac and 1000 not-cac
training_data = []
test_data = []
def make_dataset(self):
for _, row in tqdm(labels.iterrows()):
has_cac = row['has_cactus']
try:
path = os.path.join("train", row['id'])
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE) / 255
# first 1000 imgs of cac and not-cac goes to test dataset
if(self.test_data_amount[has_cac] < 1000):
self.test_data_amount[has_cac] += 1
self.test_data.append([
np.array(img),
int(has_cac)])
else:
self.training_data.append([
np.array(img),
int(has_cac)])
if has_cac:
self.cactuscount += 1
else:
self.notcactuscount += 1
except Exception as e:
print(str(e))
print("Cactus / Total = ",
round(self.cactuscount / (self.cactuscount + self.notcactuscount), 4))
print(len(self.training_data))
# count cac
def count(self, arr):
sum = 0
for _, is_cac in arr:
sum += is_cac
# adding either 0 or 2
return sum
def save(self):
np.random.shuffle(self.training_data)
np.random.shuffle(self.test_data)
np.save("training_data.npy", self.training_data)
np.save("testing_data.npy", self.test_data)
with open("info.txt", "a") as f:
f.write(f"Time: {time.time()}\n")
f.write(f"Whole dataset\n")
f.write(f"Img size: {cac.IMG_SIZE}\n")
f.write(f"Cactuses: {self.cactuscount}\n")
f.write(f"Notcactuses: {self.notcactuscount}\n")
f.write(f"Total: {self.cactuscount + self.notcactuscount}\n\n")
f.write(f"Training dataset\n")
f.write(f"Cactuses: {self.count(self.training_data)}\n")
f.write(f"Training data samples: {len(self.training_data)}\n")
f.write(f"Percentage: {self.count(self.training_data) / len(self.training_data)}\n")
f.write(f"Testing dataset\n")
f.write(f"Cactuses: {self.count(self.test_data)}\n")
f.write(f"Test data samples: {len(self.test_data)}\n")
f.write(f"Percentage: {self.count(self.test_data) / len(self.test_data)}\n")
f.write("\n")
cac = Cactus()
cac.make_dataset()
cac.save()
| true
|
890735457d3b5c643b871623ee8af1edcddef0f7
|
Python
|
bledem/webvision
|
/cnn/vocab.py
|
UTF-8
| 2,578
| 2.921875
| 3
|
[] |
no_license
|
# Create a vocabulary wrapper
import nltk
import pickle
from collections import Counter
import json
import argparse
import os
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self):
self.word2idx = {}
self.idx2word = {}
self.idx = 0
def add_word(self, word):
if word not in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def __call__(self, word):
if word not in self.word2idx:
return self.word2idx['<unk>']
return self.word2idx[word]
def __len__(self):
return len(self.word2idx)
def from_json(path):
dataset = json.load(open(path, 'r'))
captions = []
for i, d in enumerate(dataset):
captions += [str(x['raw']) for x in d['descriptions']]
captions += [str(x['raw']) for x in d['title']]
return captions
def build_vocab(data_path, threshold):
"""Build a simple vocabulary wrapper."""
counter = Counter()
for json_file in sorted(os.listdir(data_path)): #args.data_path
print("Processing metadata of {}".format(json_file))
full_path = os.path.join(data_path, json_file)
captions = from_json(full_path)
for i, caption in enumerate(captions):
tokens = nltk.tokenize.word_tokenize(
caption.lower().decode('utf-8'))
counter.update(tokens)
if i % 1000 == 0:
print("[%d/%d] tokenized the captions." % (i, len(captions)))
# Discard if the occurrence of the word is less than min_word_cnt.
words = [word for word, cnt in counter.items() if cnt >= threshold]
# Create a vocab wrapper and add some special tokens.
vocab = Vocabulary()
vocab.add_word('<pad>')
vocab.add_word('<start>')
vocab.add_word('<end>')
vocab.add_word('<unk>')
# Add words to the vocabulary.
for i, word in enumerate(words):
vocab.add_word(word)
return vocab
def main(data_path, data_name):
vocab = build_vocab(data_path, threshold=4)
with open('./vocab/%s_vocab.pkl' % data_name, 'wb') as f:
pickle.dump(vocab, f, pickle.HIGHEST_PROTOCOL)
print("Saved vocabulary file to ", './vocab/%s_vocab.pkl' % data_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='/w/31/faghri/vsepp_data/')
parser.add_argument('--data_name', default='webvision')
opt = parser.parse_args()
main(opt.data_path, opt.data_name)
| true
|
b78980d67c88fb9400fc79bc00e1aa0d0a2b96d3
|
Python
|
an-kumar/rntn
|
/stepper.py
|
UTF-8
| 9,974
| 2.84375
| 3
|
[] |
no_license
|
'''
Ankit Kumar
ankitk@stanford.edu
learning steps for the rntn model
uses adagrad as in the paper
'''
from rntn import *
import cPickle as pkl
from data import *
def softmax_crossentropy_cost(tree):
'''
a costfunction that computes softmax crossentropy gradients and cost
'''
cost = 0.
for node in tree.nodes:
cost -= np.log(node.rntnparams['softmax'][node.label])
node.rntnparams['dtop'] = np.copy(node.rntnparams['softmax'])
node.rntnparams['dtop'][node.label] -= 1.
return cost
def unsupervised_softmax_cost(tree):
'''
a costfunction for unsupervised training
'''
cost = 0.
for node in tree.nodes:
windices = node.rntnparams['wIndices']
# print np.log(node.rntnparams['softmax'][windices])
cost -= np.mean(np.log(node.rntnparams['softmax'][windices]))
node.rntnparams['dtop'] = np.copy(node.rntnparams['softmax'])
node.rntnparams['dtop'][windices] -= 1./len(windices)
return cost
class stepper(object):
def __init__(self, lr=1e-2, reg={'Ws': 1e-4, 'L':1e-4, 'W':1e-3, 'V':1e-3, 'bs':1e-4, 'b':1e-3}, learning_algo='adagrad', momentum=.9):
self.lr = lr
self.start_lr = lr # for SGD decreasing
self.reg = reg
self.historical_gparams = None
self.learning_algo = learning_algo
self.momentum = momentum
def step(self,model, minibatch, costfunction, epoch=0):
'''
performs a single step given the model and the minibatch
minibatch is an iterable of PTBtrees
returns the cost on the minibatch
'''
# iterate through batch and go forward,backward,cost
cost, gparams = self.cost_fbprop(model, minibatch, costfunction)
# # get regularization cost and gparams
# reg_cost, reg_gparams = self.cost_regularize(model)
# # add the two gparam dicts and scale by minibatch size (so that larger minibatches doesn't greatly change anything)
# minibatch_scale = 1/float(len(minibatch))
# for k in reg_gparams:
# # only add the reg gparams to gparams; non reg gparams stay as they were
# gparams[k] += reg_gparams[k]
# gparams[k] * minibatch_scale
# now update the model
if self.learning_algo == 'adagrad':
# if reset_adagrad is not None:
# if epoch > 0 and (epoch % reset_adagrad == 0):
# print "resetting adagrad"
# self.historical_gparams = None
self.update_model_adagrad(model, gparams)
elif self.learning_algo == 'sgd':
self.lr = self.start_lr * (.98**epoch) # trick to scale the lr down
self.update_model_sgd(model,gparams)
# return the cost (scaled by minibatch size)
return cost
def update_model_sgd(self, model, gparams):
'''
sgd with momentum
'''
# first we get current updates:
upds = {}
for p in gparams:
upds[p] = -1*self.lr * gparams[p]
# now add momentum term
if self.historical_gparams is None:
self.historical_gparams = upds
else:
for p in upds:
upds[p] += self.momentum * self.historical_gparams[p]
self.historical_gparams[p] = upds[p]
for p in upds:
model[p] += upds[p]
def update_model_adagrad(self, model, gparams):
'''
updates a model using adagrad learning algorithm
'''
# we're gonna use AdaGrad because that's what is listed in the paper
# note that I have never really used AdaGrad before so I'm going off the original paper + this helpful blog: http://xcorr.net/2014/01/23/adagrad-eliminating-learning-rates-in-stochastic-gradient-descent/
epsilon = 1e-6
if self.historical_gparams is None:
adj_gparams = gparams
self.historical_gparams = {} # set the historical gparams
for key in gparams:
self.historical_gparams[key] = (gparams[key] ** 2)
else:
adj_gparams = {}
for key in gparams:
grad = gparams[key]
# add square of current gradient to historical gradient
self.historical_gparams[key] += (grad**2)
# calculate adjusted gradient
if np.isinf(self.historical_gparams[key]).sum() > 0 or np.isnan(self.historical_gparams[key]).sum()> 0:
print key
raise Exception ("fuck.")
adj_gparams[key] = grad / (epsilon + np.sqrt(self.historical_gparams[key]))
# now one by one update the params
for key in adj_gparams:
model[key] -= self.lr * adj_gparams[key]
def cost_regularize(self, model):
'''
computes the cost and fills a dict of gparams for the regularization
'''
cost = 0.
gparams = {}
for p in model['regularize']:
gparams[p] = model[p] * self.reg[p]
cost += np.sum(model[p]**2) * (self.reg[p]) * .5 # .5 so that the gradient is just reg*model[p], otherwise would be 2x
return cost, gparams
def cost_fbprop(self,model,minibatch,costfunction):
'''
total cost and gparams
'''
cost, gparams = self.cost_fbprop_softmax(model, minibatch, costfunction)
reg_cost,reg_gparams = self.cost_regularize(model)
# add the two gparam dicts and scale by minibatch size (so that larger minibatches doesn't greatly change anything)
minibatch_scale = 1/float(len(minibatch))
for k in gparams:
gparams[k] *= minibatch_scale
for k in reg_gparams:
# only add the reg gparams to gparams; non reg gparams stay as they were
gparams[k] += reg_gparams[k]
cost = cost*minibatch_scale + reg_cost
# cost *= minibatch_scale
return cost, gparams
def cost_fbprop_softmax(self, model, minibatch,costfunction):
'''
iterates through the batch and goes forward, backward, and computes cost
returns cost and an aggregated gparam dict
'''
cost = 0.
gparams = {k:np.zeros(model[k].shape) for k in model['update']}
# below is commented because in practice it was slower.
batch_fprop(model, minibatch)
for tree in minibatch:
# fprop, bprop
# fprop(model, tree)
cost += costfunction(tree) # this should add node.rntnparams['dh']
local_gparams = bprop(model, tree)
# accumulate the dicts
for k in local_gparams:
gparams[k] += local_gparams[k]
# cost += self.compute_cost(tree)
return cost, gparams
def compute_cost(self, tree):
''' computes the cost of the tree, assumed to already be fpropd. note that this does NOT include regularization cost.'''
cost = 0.
for node in tree.nodes:
if node.rntnparams['softmax'][node.label] == 0:
print node.rntnparams
cost -= np.log(node.rntnparams['softmax'][node.label])
return cost
def gradcheck(self, model, minibatch, costfunction, num_checks=5, epsilon=1e-5):
'''
does a gradcheck on the minibatch
lot of the code is very similar to karpathy neuraltalk
'''
# run forward, backward to get grads & cost
for it in range(num_checks):
cost, gparams = self.cost_fbprop(model, minibatch,costfunction)
for p in model['update']:
mat = model[p]
grad = gparams[p]
assert mat.shape == grad.shape, "shapes dont match"
# let's also do a max to get out of numerical stuff (hopefully)
ri = np.argmax(grad.flat)
old = mat.flat[ri]
# add epsilon
mat.flat[ri] = old + epsilon
c1,_ = self.cost_fbprop(model, minibatch,costfunction)
# subtract epsilon
mat.flat[ri] = old - epsilon
c2, _=self.cost_fbprop(model, minibatch,costfunction)
# back to normal
mat.flat[ri] = old
analytic = grad.flat[ri]
numerical = (c1 - c2) / (2*epsilon)
print "MAX: param: %s. analytical grad: %s. numerical grad: %s. relative error: %s" % (p, str(analytic), str(numerical), str(abs(analytic - numerical) / abs(numerical + analytic)))
ri = np.random.randint(mat.size)
old = mat.flat[ri]
# add epsilon
mat.flat[ri] = old + epsilon
c1,_ = self.cost_fbprop(model, minibatch,costfunction)
# subtract epsilon
mat.flat[ri] = old - epsilon
c2, _=self.cost_fbprop(model, minibatch,costfunction)
# back to normal
mat.flat[ri] = old
analytic = grad.flat[ri]
numerical = (c1 - c2) / (2*epsilon)
print "param: %s. analytical grad: %s. numerical grad: %s. relative error: %s" % (p, str(analytic), str(numerical), str(abs(analytic - numerical) / abs(numerical + analytic)))
if __name__ == '__main__':
model1 = initRNTN(25, 16582, 5, activation='tanh', wordActivations=False)
# model2= initRNTN(25, 16582, 5, activation='relu')
train = pkl.load(open('formatted/train.pkl'))
s1 = stepper()
# s2 = stepper()
# test step
mbsize = 27
minibatches = [train.trees[i:i+mbsize] for i in range(0,len(train.trees), mbsize)][:2]
for mb in minibatches:
s1.step(model1, mb, softmax_crossentropy_cost)
# s2.step(model2, train.trees[:50])
# test grad
s1.gradcheck(model1,train.trees[51:71],softmax_crossentropy_cost)
print "\n\n"
# s2.gradcheck(model2, train.trees[51:71])
| true
|
c0a508625d84d1b3117a33a8987a7b8a0108e28c
|
Python
|
gogofunoliver/WeChatCon
|
/WeChatServer/CloudVision.py
|
UTF-8
| 1,204
| 2.75
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# filename: CloudVision.py
# Jason Li
# text detection API
import argparse
import io
from google.cloud import vision
from google.cloud.vision import types
class GCPCV(object):
def detect_document(path):
"""Detects document features in an image."""
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
response = client.document_text_detection(image=image)
document = response.full_text_annotation
for page in document.pages:
for block in page.blocks:
block_words = []
for paragraph in block.paragraphs:
block_words.extend(paragraph.words)
block_symbols = []
for word in block_words:
block_symbols.extend(word.symbols)
block_text = ''
for symbol in block_symbols:
block_text = block_text + symbol.text
print('Block Content: {}'.format(block_text))
print('Block Bounds:\n {}'.format(block.bounding_box))
| true
|
00edc8f96e99bc3890df137019b0c585194d6d9c
|
Python
|
boks01/True-or-False
|
/checking.py
|
UTF-8
| 965
| 4.09375
| 4
|
[] |
no_license
|
class Checking:
def __init__(self, question, answer):
self.answer = answer
self.question = question
self.question_number = 0
self.score = 0
def give_question(self):
for _ in range(len(self.question)):
current_question = self.question[self.question_number]
answer = self.answer[self.question_number]
self.question_number += 1
asking = input(f"{self.question_number}.{current_question} : True or False?\n")
self.check_answer(answer, asking)
print(f"\nYou reach the end of this quiz\nscore: {self.score}/{self.question_number}")
def check_answer(self, answer, user_answer):
if answer.lower() == user_answer.lower():
print("You right!!!")
self.score += 1
print(f"{self.score}/{self.question_number}")
else:
print("You wrong")
print(f"{self.score}/{self.question_number}")
| true
|
873ac0f5487556c590adb9ced4a2edf7cb7353ff
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02803/s480619618.py
|
UTF-8
| 1,651
| 2.671875
| 3
|
[] |
no_license
|
#!usr/bin/env python3
from collections import defaultdict, deque, Counter, OrderedDict
from functools import reduce, lru_cache
import collections, heapq, itertools, bisect
import math, fractions
import sys, copy
def LI(): return [int(x) for x in sys.stdin.readline().split()]
def LI1(): return [int(x) - 1 for x in sys.stdin.readline().split()]
def I(): return int(sys.stdin.readline().rstrip())
def LS(): return [list(x) for x in sys.stdin.readline().split()]
def S(): return list(sys.stdin.readline().rstrip())
def IR(n): return [I() for i in range(n)]
def LIR(n): return [LI() for i in range(n)]
def SR(n): return [S() for i in range(n)]
def LSR(n): return [LS() for i in range(n)]
sys.setrecursionlimit(1000000)
dire = [[1, 0], [0, 1], [-1, 0], [0, -1]]
dire8 = [[1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1], [1, -1]]
MOD = 1000000007
def main():
H, W = LI()
maze = SR(H)
ans = 0
for h in range(H):
for w in range(W):
if maze[h][w] == '#':
continue
q = deque()
q.append((0, h, w))
visited = [[False] * W for _ in range(H)]
visited[h][w] = True
while q:
dist, y, x = q.popleft()
for dy, dx in dire:
if y + dy >= 0 and y + dy < H and x + dx >= 0 and x + dx < W:
if maze[y + dy][x + dx] == '.' and not visited[y + dy][x + dx]:
q.append((dist + 1, y + dy, x + dx))
visited[y + dy][x + dx] = True
ans = max(ans, dist)
print(ans)
if __name__ == '__main__':
main()
| true
|
995424bb9db2fcd925533d89bee981055daa223e
|
Python
|
LoverOfPies/AutomationBuild
|
/src/gui/dictionary/simple_dictionary/BaseUnitUI.py
|
UTF-8
| 4,016
| 2.5625
| 3
|
[] |
no_license
|
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.screenmanager import Screen
from kivy.uix.scrollview import ScrollView
from src.db.models.base.BaseUnit import BaseUnit
from src.gui.BaseUIUtils import init_control_buttons, init_title_layout, init_back_button
from src.gui.add_dictionary.AddRowSimplePopup import AddRowSimplePopup
from src.gui.custom_uix.ChangeTextAttributePopup import ChangeTextAttributePopup
from src.gui.custom_uix.DeleteRowButton import DeleteRowButton
from src.gui.custom_uix.SelectableButton import SelectableButton
class BaseUnitUI:
screen_name = 'baseunit_screen'
parent_screen = 'dictionary_screen'
table_name = 'Базовые единицы'
model_class = BaseUnit
screen = Screen(name=screen_name)
add_popup = AddRowSimplePopup
def __init__(self, screen_manager):
self.sm = screen_manager
self.update_screen()
self.sm.add_widget(self.screen)
# Обновление экрана
def update_screen(self):
self.screen.clear_widgets()
self.screen.add_widget(self.main_layout())
# Получение main_layout для интерфейса
def main_layout(self):
main_anchor = AnchorLayout()
bl = BoxLayout(orientation='vertical', size_hint=[.7, .9])
main_anchor.add_widget(bl)
# Фильтр
# search_layout = BoxLayout(orientation='horizontal', size_hint=[1, .2], padding=[0, 15])
# search_layout.add_widget(Label(text='Фильтр'))
# id_input = TextInput(hint_text='id', multiline=False)
# name_input = TextInput(hint_text='Наименование', multiline=False)
# search_button = Button(text='Поиск')
# search_layout.add_widget(id_input)
# search_layout.add_widget(name_input)
# search_layout.add_widget(search_button)
# Вывод данных
data_scroll = ScrollView(do_scroll_y=True, do_scroll_x=False)
data_layout = Builder.load_string('''GridLayout:
size:(root.width, root.height)
size_hint_x: 1
size_hint_y: None
cols: 2
height: self.minimum_height
row_default_height: 50
row_force_default: True''')
data_scroll.add_widget(data_layout)
data_layout.add_widget(Label(text='Наименование', height=dp(30)))
data_layout.add_widget(Label(text='', height=dp(30)))
base_units = self.model_class.select()
for base_unit in base_units:
data_layout.add_widget(SelectableButton(height=dp(30),
text=str(base_unit.name),
popup_title="Изменить наименование",
class_popup=ChangeTextAttributePopup,
dict_class=self.model_class,
id_value=str(base_unit.id),
field='name'
))
data_layout.add_widget(DeleteRowButton(height=dp(30),
text='Удалить',
id_value=str(base_unit.id),
ui=self))
# Заголовок формы
title_layout = init_title_layout(self)
# Кнопки управления
button_layout = init_control_buttons(self)
# Кнопка назад
back_layout = init_back_button(self)
bl.add_widget(title_layout)
bl.add_widget(back_layout)
bl.add_widget(data_scroll)
# bl.add_widget(search_layout)
bl.add_widget(button_layout)
return main_anchor
| true
|
fe3129bdcdd5644f8eb6ae4fc4701f750863c384
|
Python
|
gladpark/network-importer
|
/tests/unit/adapters/test_base_adapter.py
|
UTF-8
| 1,436
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""test for the base adapter."""
from typing import List, Optional
from pydantic import BaseSettings
from network_importer.adapters.base import BaseAdapter
def test_init_no_settings_class():
adapter = BaseAdapter(nornir="nornir_object", settings=None)
assert adapter.nornir == "nornir_object"
assert adapter.settings is None
adapter = BaseAdapter(nornir="nornir_object", settings={"mysettings": "settings"})
assert adapter.nornir == "nornir_object"
assert adapter.settings == {"mysettings": "settings"}
def test_init_with_settings_class():
"""Validate that the settings are properly initialized with the settings_class when present."""
class MyAdapterSettings(BaseSettings):
"""Fake adapter settings."""
first: List[str] = list()
second: Optional[str]
class MyAdapter(BaseAdapter):
"""test adapter."""
settings_class = MyAdapterSettings
def load(self):
"""load must be defined."""
adapter = MyAdapter(nornir="nornir_object", settings=None)
assert adapter.nornir == "nornir_object"
assert adapter.settings is not None
assert adapter.settings.first == []
assert adapter.settings.second is None
adapter = MyAdapter(nornir="nornir_object", settings={"second": "settings"})
assert adapter.nornir == "nornir_object"
assert adapter.settings.first == []
assert adapter.settings.second == "settings"
| true
|
30f04eafccdaf258dff18d0448b1311371e2bce5
|
Python
|
tro9lh/RandomYoutubeVideo
|
/defs.py
|
UTF-8
| 3,574
| 2.5625
| 3
|
[] |
no_license
|
import requests
import json
import datetime
import random
def get_random_video_ru():
day = random.randrange(3000)
sec = random.randrange(86399)
datenow = datetime.datetime.now()
publishedAfter1 = datenow - datetime.timedelta(days=(day +1), seconds = sec)
publishedAfter = publishedAfter1.strftime("%Y-%m-%dT%H:%M:%SZ")
publishedBefore1 = datenow - datetime.timedelta(days=day, seconds = sec)
publishedBefore = publishedBefore1.strftime("%Y-%m-%dT%H:%M:%SZ")
qQ = 'абвгдеёжзийклмнопрстуфхцчшщъыэюя'
q = qQ[random.randrange(0, (len(qQ)-1))]
params = {"q": q,
"order": 'date',
"part": 'snippet',
"type": 'video',
"videoDuration": 'short',
"publishedBefore": publishedBefore,
"publishedAfter": publishedAfter,
"relevanceLanguage": 'ru',
"maxResults": 1,
"key": 'AIzaSyDMQuRHpIjfG_eMhJEVNo2TEcntJfw5_Ms'}
url = 'https://www.googleapis.com/youtube/v3/search'
r = requests.get(url, params=params)
json_data = json.loads(r.text)
try:
temp = json_data["items"][0]["id"]["videoId"]
video_link = str(temp)
return video_link
except :
print('except')
get_random_video_ru()
def get_random_video():
day = random.randrange(3000)
sec = random.randrange(86399)
datenow = datetime.datetime.now()
publishedAfter1 = datenow - datetime.timedelta(days=(day +1), seconds = sec)
publishedAfter = publishedAfter1.strftime("%Y-%m-%dT%H:%M:%SZ")
publishedBefore1 = datenow - datetime.timedelta(days=day, seconds = sec)
publishedBefore = publishedBefore1.strftime("%Y-%m-%dT%H:%M:%SZ")
key = 'AIzaSyDMQuRHpIjfG_eMhJEVNo2TEcntJfw5_Ms'
params = {"q": '',
"order": 'date',
"part": 'snippet',
"type": 'video',
"videoDuration": 'short',
"publishedBefore": publishedBefore,
"publishedAfter": publishedAfter,
"maxResults": 1,
"key": 'AIzaSyDMQuRHpIjfG_eMhJEVNo2TEcntJfw5_Ms'}
url = 'https://www.googleapis.com/youtube/v3/search'
r = requests.get(url, params=params)
json_data = json.loads(r.text)
try:
temp = json_data["items"][0]["id"]["videoId"]
video_link = str(temp)
return video_link
except :
get_random_video()
def most_popular_video_a_year_ago(n):
datenow = datetime.datetime.now()
date_some_years_ago = datenow.replace(year=datenow.year-n, hour=0, minute=0)
publishedAfter = date_some_years_ago.strftime("%Y-%m-%dT%H:%M:%SZ")
publishedBefore1 = date_some_years_ago + datetime.timedelta(days=1)
publishedBefore = publishedBefore1.strftime("%Y-%m-%dT%H:%M:%SZ")
stopdate = datetime.datetime(1970, 1, 1)
if date_some_years_ago < stopdate:
return 'date error'
key = 'AIzaSyDMQuRHpIjfG_eMhJEVNo2TEcntJfw5_Ms'
params = {"q": '',
"order": 'viewCount',
"part": 'snippet',
"type": 'video',
"videoDuration": 'short',
"publishedBefore": publishedBefore,
"publishedAfter": publishedAfter,
"maxResults": 1,
"key": 'AIzaSyDMQuRHpIjfG_eMhJEVNo2TEcntJfw5_Ms'}
url = 'https://www.googleapis.com/youtube/v3/search'
r = requests.get(url, params=params)
json_data = json.loads(r.text)
try:
temp = json_data["items"][0]["id"]["videoId"]
video_link = "https://www.youtube.com/watch?v="+str(temp)
return video_link
except :
return 'canot read request correct'
| true
|
c5daaf71e11306b0f614c84959bd39ce2b6df241
|
Python
|
lijiunderstand/MultitaskNet
|
/utils/data_utils.py
|
UTF-8
| 4,856
| 2.859375
| 3
|
[] |
no_license
|
import os
import numpy as np
import h5py
import torch
import torch.utils.data as data
import pickle
from PIL import Image
class CreateData(data.Dataset):
def __init__(self, dataset_dict):
self.len_dset_dict = len(dataset_dict)
self.rgb = dataset_dict['rgb']
self.depth = dataset_dict['depth']
self.seg_label = dataset_dict['seg_label']
if self.len_dset_dict > 3:
self.class_label = dataset_dict['class_label']
self.use_class = True
def __getitem__(self, index):
rgb_img = self.rgb[index]
depth_img = self.depth[index]
seg_label = self.seg_label[index]
rgb_img = torch.from_numpy(rgb_img)
depth_img = torch.from_numpy(depth_img)
dataset_list = [rgb_img, depth_img, seg_label]
if self.len_dset_dict > 3:
class_label = self.class_label[index]
dataset_list.append(class_label)
return dataset_list
def __len__(self):
return len(self.seg_label)
def get_data(opt, dset_info, use_train=True, use_test=True, visualize = False):
"""
Load NYU_v2 or SUN rgb-d dataset in hdf5 format from disk and prepare
it for classifiers.
"""
if list(dset_info.keys())[0] == "NYU":
# Load the chosen datasets path
if os.path.exists(opt.dataroot):
path = opt.dataroot
else:
raise Exception('Wrong datasets requested. Please choose either "NYU" or "SUN"')
h5file = h5py.File(path, 'r')
elif list(dset_info.keys())[0] == "SUN":
h5file = None
train_dataset_generator = None
test_dataset_generator = None
# Create python dicts containing numpy arrays of training samples
if use_train:
train_dataset_generator = dataset_generator(h5file, 'train', opt, dset_info, visualize)
print('[INFO] Training set generator has been created')
# Create python dicts containing numpy arrays of test samples
if use_test:
test_dataset_generator = dataset_generator(h5file, 'test', opt, dset_info, visualize)
print('[INFO] Test set generator has been created')
if h5file is not None:
h5file.close()
return train_dataset_generator, test_dataset_generator
def dataset_generator(h5file, dset_type, opt, dset_info, visualize):
"""
Move h5 dictionary contents to python dict as numpy arrays and create dataset generator
"""
use_class = opt.use_class
if list(dset_info.keys())[0] == "NYU":
dataset_dict = dict()
# Create numpy arrays of given samples
dataset_dict['rgb'] = np.array(h5file['rgb_' + dset_type], dtype=np.float32)
dataset_dict['depth'] = np.array(h5file['depth_' + dset_type], dtype=np.float32)
dataset_dict['seg_label'] = np.array(h5file['label_' + dset_type], dtype=np.int64)
# If classification loss is included in training add the classification labels to the dataset as well
if use_class:
dataset_dict['class_label'] = np.array(h5file['class_' + dset_type], dtype=np.int64)
print(dataset_dict['rgb'].shape)
print(dataset_dict['depth'].shape)
print(dataset_dict['seg_label'].shape)
return CreateData(dataset_dict)
elif list(dset_info.keys())[0] == "SUN":
root = opt.dataroot
splits = pickle.load(open(os.path.join(root, "splits.pkl"), "rb"), encoding="latin1")
tsplits = None
if dset_type == 'train':
tsplits = np.arange(2000, 9001) #1 - 9001
elif dset_type== 'test':
tsplits = np.arange(9001, 10335) #9001 - 10335
rgb = []
depth = []
mask = []
for index in tsplits:
rimg = np.array(Image.open(os.path.join(root, "images-224", str(index)+".png")))
rimg = rimg.transpose(2, 0, 1)
rgb.append(rimg)
dimg = np.array(Image.open(os.path.join(root, "depth-inpaint-u8-224", str(index)+".png")))
dimg = dimg[:, :, np.newaxis]
dimg = dimg.transpose(2, 0, 1)
depth.append(dimg)
mask.append(np.array(Image.open(os.path.join(root, "seglabel-224", str(index)+".png"))))
if visualize:
if index == tsplits[0] + 300: # only 20 images for visualization, limited memory while training
break
dataset_dict = dict()
dataset_dict['rgb'] = np.array(rgb, dtype=np.float32)
dataset_dict['depth'] = np.array(depth, dtype=np.float32)
dataset_dict['seg_label'] = np.array(mask, dtype=np.int64)
print(dataset_dict['rgb'].shape)
#print(dataset_dict['depth'].shape)
#print(dataset_dict['seg_label'].shape)
return CreateData(dataset_dict)
| true
|
ff403a67dc337f4c59971d5be5adddbab58fae18
|
Python
|
shellydeforte/PDB
|
/pdb/lib/datetime_info.py
|
UTF-8
| 1,184
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""Create ISO 8601 compliant date and time stamps."""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import pytz
import re
from datetime import datetime
class RE(object):
datetime_stamp_pattern = """
^ # Anchor to the beginning of string.
# Named references.
(?P<year>\d{4})
(?P<month>\d{2})
(?P<day>\d{2})
T # Delimiter (Begin time.)
(?P<hour>\d{2})
(?P<minute>\d{2})
(?P<second>\d{2})
Z # Indicate UTC TZ.
"""
datetime_pat = re.compile(datetime_stamp_pattern, re.VERBOSE)
def now_utc():
return datetime.utcnow().strftime("%Y%m%dT%H%M%S" + "Z")
def create_dt_obj(time_stamp):
dt = RE.datetime_pat.search(time_stamp)
assert dt
dt_obj = datetime(
int(dt.group('year')),
int(dt.group('month')),
int(dt.group('day')),
hour=int(dt.group('hour')),
minute=int(dt.group('minute')),
second=int(dt.group('second')),
tzinfo=pytz.UTC
)
return dt_obj
| true
|
4a8f579cace15ea48effc824850c610308b56bd8
|
Python
|
JonathanCSantos098/programacao-orientada-a-objetos
|
/listas/lista-de-exercicio-02/questao11.py
|
UTF-8
| 159
| 3.15625
| 3
|
[] |
no_license
|
num1=int(input())
num2=int(input())
num3=int(input())
produto=(num1*2)*(num2/2)
soma=(num1*3)+num3
potencia=num3**3
print(produto)
print(soma)
print(potencia)
| true
|
dde499912a98190e19a7026b2fe799d61d7c76b5
|
Python
|
Hchong16/VulnContract
|
/tests/declaration_test.py
|
UTF-8
| 5,152
| 2.65625
| 3
|
[] |
no_license
|
import unittest
import sys
sys.path.append("../..")
from declarations.solidityFile import SolidityFile
class TestFunctionsModule(unittest.TestCase):
filename = 'suicidal.sol'
file_path = '../../examples/{}'.format(filename)
smart_contract = SolidityFile(filename, file_path)
smart_contract.parse_top_level()
def test_pragmas(self):
pragma_dict = self.smart_contract.pragmas
pragma_obj = pragma_dict['solidity']
expected_name = 'solidity'
expected_kind = 'PragmaDirective'
expected_version = '0.8.1'
self.assertEqual(len(pragma_dict), 1) # Only 1 pragma in suicidal.sol
self.assertEqual(pragma_obj.name, expected_name)
self.assertEqual(pragma_obj.kind, expected_kind)
self.assertEqual(pragma_obj.version, expected_version)
def test_imports(self):
import_dict = self.smart_contract.imports
import_obj = import_dict['test']
expected_path = 'test'
expected_kind = 'ImportDirective'
expected_symbol_aliases = {}
expected_unit_alias = 'test'
self.assertEqual(len(import_dict), 1) # Only 1 import in suicidal.sol
self.assertEqual(import_obj.path, expected_path)
self.assertEqual(import_obj.kind, expected_kind)
self.assertEqual(import_obj.symbol_aliases, expected_symbol_aliases)
self.assertEqual(import_obj.unit_alias, expected_unit_alias)
def test_contracts(self):
contract_dict = self.smart_contract.underlying_contracts
expected_contracts = ['Contract_v1', 'Contract_v2', 'Contract_v3',
'Contract_v4', 'Contract_v5']
if len(contract_dict) == 5:
self.assertEqual(len(contract_dict), 5)
else:
return
# Evaluate each contract
for contract in contract_dict.values():
self.assertIn(contract.name, expected_contracts)
self.assertEqual(len(contract.functions), 1)
def test_functions(self):
contract_dict = self.smart_contract.underlying_contracts
# Evaluate function in Contract 1
contract = contract_dict['Contract_v1']
function = contract.functions['protected_kill_1']
self.assertEqual(function.name, 'protected_kill_1')
self.assertEqual(function.visibility, 'public')
self.assertEqual(function.state_mutability, None)
self.assertTrue(function.is_protected)
self.assertEqual(len(function.arguments), 0)
self.assertEqual(len(function.declarations), 0)
self.assertEqual(len(function.identifiers), 5)
self.assertEqual(len(function.returns), 0)
# Evaluate function in Contract 2
contract = contract_dict['Contract_v2']
function = contract.functions['bad_kill_1']
self.assertEqual(function.name, 'bad_kill_1')
self.assertEqual(function.visibility, 'public')
self.assertEqual(function.state_mutability, None)
self.assertFalse(function.is_protected)
self.assertEqual(len(function.arguments), 0)
self.assertEqual(len(function.declarations), 1)
self.assertEqual(len(function.identifiers), 6)
self.assertEqual(len(function.returns), 0)
# Evaluate function in Contract 3
contract = contract_dict['Contract_v3']
function = contract.functions['bad_kill_2']
self.assertEqual(function.name, 'bad_kill_2')
self.assertEqual(function.visibility, 'public')
self.assertEqual(function.state_mutability, None)
self.assertFalse(function.is_protected)
self.assertEqual(len(function.arguments), 0)
self.assertEqual(len(function.declarations), 0)
self.assertEqual(len(function.identifiers), 2)
self.assertEqual(len(function.returns), 0)
# Evaluate function in Contract 4
contract = contract_dict['Contract_v4']
function = contract.functions['bad_kill_3']
self.assertEqual(function.name, 'bad_kill_3')
self.assertEqual(function.visibility, 'public')
self.assertEqual(function.state_mutability, None)
self.assertFalse(function.is_protected)
self.assertEqual(len(function.arguments), 0)
self.assertEqual(len(function.declarations), 0)
self.assertEqual(len(function.identifiers), 5)
self.assertEqual(len(function.returns), 0)
# Evaluate function in Contract 5
contract = contract_dict['Contract_v5']
function = contract.functions['protected_kill_2']
self.assertEqual(function.name, 'protected_kill_2')
self.assertEqual(function.visibility, 'private')
self.assertEqual(function.state_mutability, None)
self.assertTrue(function.is_protected)
self.assertEqual(len(function.arguments), 0)
self.assertEqual(len(function.declarations), 0)
self.assertEqual(len(function.identifiers), 2)
self.assertEqual(len(function.returns), 0)
if __name__ == '__main__':
unittest.main()
| true
|
177b0021efcbe19b1edf7f5151db60c43cf0c7d0
|
Python
|
jjliewie/kakao
|
/taxi.py
|
UTF-8
| 560
| 2.671875
| 3
|
[] |
no_license
|
import heapq
# floyd warshall algorithm
def solution(n, s, a, b, fares):
d = [ [ 20000001 for _ in range(n) ] for _ in range(n) ]
for x in range(n):
d[x][x] = 0
for x, y, c in fares:
d[x-1][y-1] = c
d[y-1][x-1] = c
for i in range(n):
for j in range(n):
for k in range(n):
if d[j][k] > d[j][i] + d[i][k]:
d[j][k] = d[j][i] + d[i][k]
minv = 40000002
for i in range(n):
minv = min(minv, d[s-1][i]+d[i][a-1]+d[i][b-1])
return minv
| true
|
28b38a2047ed90001f54964a449f02eaebe24c98
|
Python
|
NgoKnows/Interview-Practice
|
/Trees and Graphs/List of Depths.py
|
UTF-8
| 475
| 2.90625
| 3
|
[] |
no_license
|
from collections import dequeue
def list_depths(root, queue=None, levels=None, index=0):
if queue is None:
queue = dequeue([root])
if levels is None:
levels = dict()
levels[index] = LinkedList()
queue2 = dequeue()
while len(queue1):
node = queue.pop()
levels[index].add(node)
if node.left:
queue2.append(node.left)
if node.right:
queue2.append(node.right)
return list_depths(root, queue, levels, index + 1)
| true
|
3e867851c9048c770968fa665bda04d4a5ddbdf9
|
Python
|
syn1911/Python-spider-demo
|
/01improve/14多线程04.py
|
UTF-8
| 910
| 3.5
| 4
|
[] |
no_license
|
# map使用
from concurrent.futures import ThreadPoolExecutor
import time
# 参数times用来模拟网络请求的时间
def get_request(times):
time.sleep(times)
print("用了{} 时间进行了返回".format(times))
return times
executor = ThreadPoolExecutor(max_workers=2)
urls = [3, 2, 4] # 并不是真的url
for data in executor.map(get_request, urls):
print("获取 url {}s 成功".format(data))
"""使用map方法,无需提前使用submit方法,
map方法与python标准库中的map含义相同,
都是将序列中的每个元素都执行同一个函数。
上面的代码就是对urls的每个元素都执行get_html函数,
并分配各线程池。可以看到执行结果与上面的as_completed方法的结果不同,
输出顺序和urls列表的顺序相同,就算2s的任务先执行完成,
也会先打印出3s的任务先完成,再打印2s的任务完成
"""
| true
|
1e554b0bc634837c42ca21686db3181b6225c5bc
|
Python
|
YangLiyli131/Leetcode2020
|
/in_Python/1276 Number of Burgers with No Waste of Ingredients.py
|
UTF-8
| 456
| 2.625
| 3
|
[] |
no_license
|
class Solution(object):
def numOfBurgers(self, tomatoSlices, cheeseSlices):
"""
:type tomatoSlices: int
:type cheeseSlices: int
:rtype: List[int]
"""
A,B = tomatoSlices, cheeseSlices
res = []
a = A - 2 * B
b = 4 * B - A
if a % 2 != 0 or b % 2 != 0:
return res
if a < 0 or b < 0:
return res
return [a/2, b/2]
| true
|
ca9b7730bc2a4157f7570d3dbce5114725c50613
|
Python
|
frostbyte16/botaku
|
/project_files/contentBased.py
|
UTF-8
| 3,560
| 3.28125
| 3
|
[] |
no_license
|
# content based filtering
import pandas as pd
from sklearn.metrics.pairwise import sigmoid_kernel, cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
import random
def recommend(name, anime_type, subtype):
df_anime = pd.read_csv(f"{anime_type}.csv")
# Drops all blank anime with duplicate titles
df_anime.drop_duplicates(subset=['title'], inplace=True)
x = df_anime.loc[(df_anime['synopsis'].isnull())]
# Drops all blank anime with blank synopsis
df_anime.dropna(subset=['synopsis'], inplace=True)
if anime_type == 'manga':
df_anime['chCount'] = df_anime['chCount'].fillna(0)
df_anime['vCount'] = df_anime['vCount'].fillna(0)
# Resets the index of dataframe after removing blanks and duplicates
df_anime.reset_index(drop=True, inplace=True)
# Computes for the similarity of synopsis using sigmoid kernel
tfv = TfidfVectorizer(min_df=3, max_features=None, strip_accents='unicode',
analyzer='word', token_pattern=r'\w{1,}', ngram_range=(1, 3), stop_words='english')
tfv_matrix = tfv.fit_transform(df_anime['synopsis'])
sig = sigmoid_kernel(tfv_matrix, tfv_matrix)
indices = pd.Series(df_anime.index, index=df_anime['title']).drop_duplicates()
if anime_type == 'anime':
df_anime = pd.concat(
[df_anime, df_anime['subtype'].str.get_dummies(), df_anime['genre'].str.get_dummies(sep=',')], axis=1)
anime_features = df_anime.loc[:, "TV":].copy()
elif anime_type == 'manga':
df_anime = pd.concat([df_anime, df_anime['genre'].str.get_dummies(sep=', ')], axis=1)
anime_features = df_anime.loc[:, "'Cooking']":].copy()
cosine_sim = cosine_similarity(anime_features.values, anime_features.values)
def recommend_anime(title, similarity=cosine_sim):
title = title.replace(' (TV)', '')
# Searches for anime if title exists in dataframe
if title in df_anime.values:
if df_anime['title'].str.contains(title).sum() > 0:
idx = int(indices[title])
# multiplies the similarity of synopsis and genre
scores = list(enumerate(sig[idx] * cosine_sim[idx]))
# sort the movies
scores = sorted(scores, key=lambda x: x[1], reverse=True)
# anime indices
anime_indices = [i[0] for i in scores]
if anime_type == 'anime':
recommendation = \
df_anime[['title', 'status', 'subtype', 'epCount', 'image', 'rating', 'synopsis']].iloc[
anime_indices]
if subtype == 'TV':
recommendation.drop(df_anime[df_anime['subtype'] == 'movie'].index, inplace=True)
elif subtype == 'movie':
recommendation.drop(df_anime[df_anime['subtype'] == 'TV'].index, inplace=True)
else:
recommendation = \
df_anime[['title', 'status', 'chCount', 'vCount', 'image', 'rating', 'synopsis']].iloc[
anime_indices]
# Returns 5 from Top 10 most similar anime
ran_idx = random.randint(1, 5)
recommendation = recommendation[ran_idx:ran_idx+5]
else:
# Returns empty dataframe if title is not found
recommendation = df_anime.iloc[0:0]
return recommendation
recommended = recommend_anime(name, anime_type)
return recommended
| true
|
a78c8ee218eacb59646c2dea593ffa74ebb7e231
|
Python
|
odnodn/PatientFM
|
/src/models/BiLstmCRF/decoder.py
|
UTF-8
| 2,379
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
import torch
from torch import nn
from torchcrf import CRF
r""" The Decoder implements two tasks: I2B2 entity classification, and novel detection of entities.
I2B2 part uses the CRF, novel part uses linear+softmax """
class Decoder(nn.Module):
def __init__(self, input_size, hidden_size, output_size, max_len, batch_size=1, num_layers = 1, bi=True):
super(Decoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = num_layers
self.batch_size = batch_size
self.max_len = max_len
self.mutihead_attention = nn.MultiheadAttention(self.input_size, num_heads=2)
r""" I2B2 task: CRF """
self.crf_linear = nn.Linear(self.hidden_size*2, self.output_size)
self.crf = CRF(self.output_size, batch_first=True)
r""" Entity detection task: linear + softmax """
self.entity_linear = nn.Linear(self.hidden_size*2, 2)
self.softmax = nn.LogSoftmax(dim=2)
def generate_masked_labels(self, observed_labels, mask, device):
masked_labels = torch.zeros((mask.size(0), mask.size(1)), dtype=torch.long).to(device)
for i in range(mask.size(0)):
masked_labels[i, :len(observed_labels[i][0])] = observed_labels[i][0]
return masked_labels
r""" CHECK IF hn IS NECESSARY IN INPUT """
# def forward(self, encoder_outputs, hn, batch_classes, mask, device):
def forward(self, encoder_outputs, batch_classes, mask, device):
r""" Attention block """
x = encoder_outputs.permute(1, 0, 2)
attn_output, attn_output_weights = self.mutihead_attention(x, x, x)
z = attn_output.permute(1, 0, 2)
decoder_inputs = nn.functional.relu(z)
r""" I2B2 task: CRF """
fc_out = self.crf_linear(decoder_inputs)
#fc_out = self.crf_linear(encoder_outputs)
masked_labels = self.generate_masked_labels(batch_classes, mask, device)
mask = mask.type(torch.uint8).to(device)
crf_loss = self.crf(fc_out, masked_labels, mask, reduction='token_mean')
crf_out = self.crf.decode(fc_out)
r""" Entity detection task: linear + softmax """
seg_weights = self.entity_linear(decoder_inputs)
seg_out = self.softmax(seg_weights)
return crf_out, seg_out, -crf_loss
| true
|
50790c3c6a4926d4ce0271d910df58945e45a5bd
|
Python
|
texnedo/algo-tasks
|
/algo-python/PaintHouse.py
|
UTF-8
| 804
| 2.890625
| 3
|
[] |
no_license
|
import sys
from typing import List, Dict
class Solution:
def min_cost(self, costs: List[List[int]]) -> int:
return self.min_cost_internal(costs, 0, -1, dict())
def min_cost_internal(self, costs: List[List[int]],
i: int, prev_j: int, cache: Dict[tuple, int]):
if i >= len(costs):
return 0
option = (i, prev_j)
if option in cache:
return cache[option]
min_cost = None
for current_j in range(0, len(costs[i])):
if current_j != prev_j:
cost = costs[i][current_j] + self.min_cost_internal(costs, i + 1, current_j, cache)
if min_cost is None or cost < min_cost:
min_cost = cost
cache[option] = min_cost
return min_cost
| true
|
a633fa471a6d1f6ac4f93a9e3cf85be4870a08b8
|
Python
|
Sentone5/Laba_4
|
/Zadanye2.py
|
UTF-8
| 545
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Дано слово из 12 букв. Переставить в обратном порядке буквы, расположенные между
# второй и десятой буквами (т.е. с третьей по девятую).
if __name__ == '__main__':
word = input("Введите слово из 12 букв ")
word = word.replace(word[2], word[8], 1)
word = word.replace(word[3], word[7], 1)
word = word.replace(word[4], word[6], 1)
print(word)
| true
|
7f32b5e6a1d6151c7442621d939bd63214d82bb6
|
Python
|
noahfkaplan/InterviewPracticeSolutions
|
/DailySolutions/2020/01-07-2020/main.py
|
UTF-8
| 72
| 2.765625
| 3
|
[] |
no_license
|
import LookAndSee
n = 4
result = LookAndSee.LookAndSee(n)
print(result)
| true
|
863485bb52b7b7aa4fa0b76ac2aa8ced543578fa
|
Python
|
asw101/GithubAzureGuide
|
/djangoapp-master/polls/pandas_data.py
|
UTF-8
| 2,121
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
import pandas
import os
from datetime import datetime
from datetime import timedelta
from .models import Githubevent
def filter_in_admin():
issue_df = pandas.DataFrame(list(Githubevent.objects.all().values()))
count_df = issue_df.groupby('issue_id').size().sort_values(ascending=False)
count_df = count_df[count_df.iloc[:] > 1]
total_times = []
for id in list(count_df.index):
temp_df = issue_df.loc[issue_df['issue_id'] == id]
temp_df = temp_df.sort_values(by=['time'], ascending=True)
temp_list = temp_df['time'].tolist()
if temp_df['action'].iloc[0] == 'closed' and len(temp_df) % 2 == 0:
temp_list = temp_df['time'].tolist()
del temp_list[0]
# for i, time in enumerate(temp_list):
# temp_list[i] = datetime.strptime(time, '%Y-%m-%d %X')
temp_list.append(datetime.combine(temp_list[-1], datetime.min.time()) + timedelta(days=1))
elif temp_df['action'].iloc[0] == 'closed' and len(temp_df) % 2 == 1:
temp_list = temp_df['time'].tolist()
del temp_list[0]
# for i, time in enumerate(temp_list):
# temp_list[i] = datetime.strptime(time, '%Y-%m-%d %X')
if len(temp_list) == 1:
temp_list.append(datetime.combine(temp_list[-1], datetime.min.time()) + timedelta(days=1))
elif len(temp_df) % 2 == 0:
temp_list = temp_df['time'].tolist()
# for i, time in enumerate(temp_list):
# temp_list[i] = datetime.strptime(time, '%Y-%m-%d %X')
else:
temp_list = temp_df['time'].tolist()
# for i, time in enumerate(temp_list):
# temp_list[i] = datetime.strptime(time, '%Y-%m-%d %X')
temp_list.append(datetime.combine(temp_list[-1], datetime.min.time()) + timedelta(days=1))
temp_time = 0
for i,k in zip(temp_list[0::2], temp_list[1::2]):
temp_time += (k-i).total_seconds()
total_times.append(temp_time)
return(float(sum(total_times)) / len(total_times) / 60)
| true
|
081af84de8920c3300774a5d658ce7b2520ef2b0
|
Python
|
brandoneng000/LeetCode
|
/medium/998.py
|
UTF-8
| 883
| 3.390625
| 3
|
[] |
no_license
|
from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
# def insertIntoMaxTree(self, root: Optional[TreeNode], val: int) -> Optional[TreeNode]:
# res = root
# parent = None
# while root and root.val > val:
# parent = root
# root = root.right
# if not parent:
# return TreeNode(val, root)
# else:
# parent.right = TreeNode(val, root)
# return res
def insertIntoMaxTree(self, root: Optional[TreeNode], val: int) -> Optional[TreeNode]:
if not root or root.val < val:
return TreeNode(val, root)
root.right = self.insertIntoMaxTree(root.right, val)
return root
| true
|
9feba069ed707117cae743f32f65590a64e44a5d
|
Python
|
TylerMorley/book-scraper
|
/scraper-test-unit.py
|
UTF-8
| 2,490
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import unittest
import scraper
from lxml import etree, html
from lxml.etree import tostring
import requests
class UnitTest_Iterations(unittest.TestCase):
@classmethod
def setUp(self):
self.setupTest(self)
class TestCases(UnitTest_Iterations):
def setupTest(self):
self.header = '<!DOCTYPE html><html lang="en-US"><body><div class="wx_audiobook">'
self.footer = '</div></body></html>'
self.author = 'Patrick Shoggothfuss'
self.title = 'Shame of the Wind'
#Test Case 1, example 10.1
def test_p_a_em_a(self):
body = '<p><a href="a"><em>{}</em></a>, by {},<a href="b">stuff</a>more</p>'.format(self.title, self.author)
website = self.header + body + self.footer
result = scraper.getBookData(website)
self.assertEqual(result, [['Shame of the Wind', 'Patrick Shoggothfuss']])
#Test Case 2, example 10.2
def test_p_a_em(self):
body = '<p><a href="huh"><em>{}</em></a>, by {}, and somesuch</p>'.format(self.title, self.author)
website = self.header + body + self.footer
result = scraper.getBookData(website)
self.assertEqual(result, [['Shame of the Wind', 'Patrick Shoggothfuss']])
#Test Case 3, example 10.4
def test_p_em_a(self):
body = '<p><em><a href="eh">{}</a></em>by {}, and other info</p>'.format(self.title, self.author)
website = self.header + body + self.footer
result = scraper.getBookData(website)
self.assertEqual(result, [['Shame of the Wind', 'Patrick Shoggothfuss']])
#Test Case 4, example 10.10
def test_p_em(self):
body = '<p><em>{}</em>, by {}, and some other info.</p>'.format(self.title, self.author)
website = self.header + body + self.footer
result = scraper.getBookData(website)
self.assertEqual(result, [['Shame of the Wind', 'Patrick Shoggothfuss']])
#Test Case 5, example 10.20
def test_p(self):
body = '<p>{}, by {}</p>'.format(self.title, self.author)
website = self.header + body + self.footer
result = scraper.getBookData(website)
self.assertEqual(result, [['Shame of the Wind', 'Patrick Shoggothfuss']])
#Test Case 6, example 10.41
def test_p_ahref(self):
body = '<p><a href="heh">{}</a>, some info by {}</p>'.format(self.title, self.author)
website = self.header + body + self.footer
result = scraper.getBookData(website)
self.assertEqual(result, [['Shame of the Wind', 'Patrick Shoggothfuss']])
if __name__ == '__main__':
unittest.main()
| true
|
80cf59158e8dd1dabbf7d47822b6cb99b9ec3bd1
|
Python
|
sebinemeth/chesscraft-client
|
/figure/FigureFactory.py
|
UTF-8
| 773
| 3.046875
| 3
|
[] |
no_license
|
from abc import ABC
from figure.Bishop import Bishop
from figure.King import King
from figure.Knight import Knight
from figure.Peasant import Peasant
from figure.Queen import Queen
from figure.Rook import Rook
class FigureFactory(ABC):
@staticmethod
def get_figure(figure_type, player):
if figure_type == 'peasant':
return Peasant(player)
if figure_type == 'rook':
return Rook(player)
if figure_type == 'knight':
return Knight(player)
if figure_type == "bishop":
return Bishop(player)
if figure_type == 'queen':
return Queen(player)
if figure_type == 'king':
return King(player)
raise Exception(f'{figure_type} is not a figure type')
| true
|
024000850ac46c847cc2838f945b9d3c42139baf
|
Python
|
liuspencersjtu/MyLeetCode
|
/925-Long-Pressed-Name.py
|
UTF-8
| 1,026
| 3.03125
| 3
|
[] |
no_license
|
class Solution:
def isLongPressedName(self, name, typed):
"""
:type name: str
:type typed: str
:rtype: bool
"""
if len(name)==1:
for i in typed:
if i != name:
return False
return True
p1,p2 = 0,0
l1 = len(name)-1
l2 = len(typed)-1
while p1<l1:
if name[p1+1]!=name[p1]:
if p2>=l2 or name[p1]!=typed[p2]:
print(p1,p2,1)
return False
p1+=1
p2+=1
while p2<l2 and typed[p2]==typed[p2-1]:
p2+=1
else:
if p2>=l2 or name[p1+1]!=typed[p2+1]:
print(p1,p2,2,l1)
return False
p1+=1
p2+=1
print(p1)
for i in typed[p2:]:
if i != name[p1]:
print(p1,p2)
return False
return True
| true
|
5ea700be999aee2a1187981c0497c725c06fec7f
|
Python
|
bohdi2/euler
|
/problem43.py
|
UTF-8
| 4,167
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
import argparse
import functools
import sys
import time
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('%r (%r, %r) %2.2f sec' % (method.__name__, args, kw, te-ts))
return result
return timed
def isPandigital(n):
return "0123456789" == ''.join(sorted(str(n)))
def pandigital():
for n in range(123456789, 9876543210):
if isPandigital(n):
#print("slow", n)
yield n
def pandigital1(step=1):
steps = [n for n in range(1000) if n % step == 0]
for ii in range(123456, 9876543):
for jj in steps:
n = ii * 1000 + jj
if isPandigital(n):
yield n
def pandigital3():
digits = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
def piter(remaining_digits, result):
if not remaining_digits:
yield result
else:
for digit in remaining_digits:
new_digits = set(remaining_digits) - {digit}
new_result = list(result)
new_result.append(digit)
yield from piter(new_digits, new_result)
return piter(digits, [])
def test(n, divisor, prime):
return ((n//divisor) % 1000) % prime == 0
def test_all(n):
return (test(n, 1, 17) and
test(n, 10, 13) and
test(n, 100, 11) and
test(n, 1000, 7) and
test(n, 10000, 5) and
test(n, 100000, 3) and
test(n, 1000000, 2))
def test3(result, index, prime):
n = 100 * result[index-1] + 10 * result[index] + result[index+1]
return n % prime == 0
def test_all3(result):
return (test3(result, 8, 17) and
test3(result, 7, 13) and
test3(result, 6, 11) and
test3(result, 5, 7) and
test3(result, 4, 5) and
test3(result, 3, 3) and
test3(result, 2, 2))
class BruteForce:
def __call__(self):
answer = 0
for n in pandigital():
if test_all(n):
answer += n
print(n, answer)
return answer
class BruteForce17:
def __call__(self):
answer = 0
for n in pandigital1(17):
if test_all(n):
answer += n
print(n, answer)
return answer
class BruteForce3:
def __call__(self):
answer = 0
for result in pandigital3():
if test_all3(result):
n = functools.reduce(lambda accum, item: 10*accum + item, result, 0)
answer += n
print(n, answer)
return answer
class Problem43():
@timeit
def sanity_checks(self):
return test_all(1406357289)
@timeit
def sanity_checks2(self):
return test_all(1406357288)
@timeit
def brute_force(self):
# 2.4 hours
function = BruteForce()
return function()
@timeit
def brute_force17(self):
# 9.1 minutes
function = BruteForce17()
return function()
@timeit
def brute_force3(self):
# 8 seconds
function = BruteForce3()
return function()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("command")
args = parser.parse_args()
problem = Problem43()
if "tests" == args.command:
# print("sanity checks", problem.sanity_checks())
# print("sanity checks", problem.sanity_checks2())
# print("17s", [n for n in range(1000) if n%17 == 0])
for n in pandigital3():
print("m", n)
elif "brute1" == args.command:
print("brute_force", problem.brute_force())
elif "brute17" == args.command:
print("brute_force17", problem.brute_force17())
elif "brute3" == args.command:
print("brute_force3", problem.brute_force3())
elif "all" == args.command:
print("brute_force", problem.brute_force())
print("brute_force17", problem.brute_force17())
print("brute_force3", problem.brute_force3())
if __name__ == '__main__':
sys.exit(main())
| true
|
f888e32e5eae74a2110534237c0b47145bfc2c4a
|
Python
|
jlyons6100/Wallbreakers
|
/Week_2/jewels_and_stones.py
|
UTF-8
| 453
| 3.453125
| 3
|
[] |
no_license
|
# Jewels and Stones: Given strings J representing the types of stones that are jewels and s representing the stones you have.
# Determine how many stones you have that are also jewels.
class Solution:
def numJewelsInStones(self, J: str, S: str) -> int:
j_set = set()
for jewel in J:
j_set.add(jewel)
count = 0
for stone in S:
if stone in j_set:
count += 1
return count
| true
|
a86d6791ab30901d2297c2ad67a250392e608cd9
|
Python
|
UrbanWojnowski/PythonFiles
|
/ExcelWrite.py
|
UTF-8
| 206
| 2.640625
| 3
|
[] |
no_license
|
import xlwt
# create an object of workbook
wk = xlwt.Workbook()
ws = wk.add_sheet("Testing")
ws.write(0,0,"Testing WOrld")
ws.write(0,1,"www.theTestingWorld.com")
wk.save("TestingWorld1.xlsx")
| true
|
cf1889966eba2efc94e0aa558178f2f0002dcec3
|
Python
|
rezasaneei/ML
|
/Python/massoud_tfidf/massoud_tfidf_v3.py
|
UTF-8
| 1,655
| 3.078125
| 3
|
[] |
no_license
|
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
from sklearn.metrics.pairwise import euclidean_distances
import csv
import numpy as np
docs = []
with open('movie_lines.txt') as f:
docs = f.read().splitlines()
len_docs = len(docs)
#print 'number of documents' , len(docs), '\n'
# empty list to be the movie corpus
my_corpus = []
# open a plot_summaries file containing movie summaries, from http://www.cs.cmu.edu/~ark/personas/
# and put movie summaries as a list of strings to be used as our corpus
with open('plot_summaries.txt', encoding='utf8') as f:
rd = csv.reader(f, delimiter='\t', quotechar='"')
for row in rd:
my_corpus.append(row[1])
#print top 10 movie corpus text
#print(my_corpus[:10])
# initialize the vectorizer
vect = TfidfVectorizer(stop_words = stopwords.words('english'))
# vectorize the corpus
corpus_dtm = vect.fit(docs)
# transfor the given documents of texts according to corpus
docs_dtm = vect.transform(docs)
# print features (extracted words)
#print(vect.get_feature_names())
# make a pandas dataframe for better visualization
pd_docs = pd.DataFrame(docs_dtm.toarray(),columns = vect.get_feature_names())
#print(type(docs_dtm))
for i in range(len_docs):
print ('document',i+1,'top 5 keywords :')
print (pd_docs.loc[i].sort_values(ascending = False)[:5])
print ('\n')
print ('Euclidean distance of each document text from others')
for i in range(len_docs):
print ('document',i+1,'distance to others:')
for d in docs_dtm:
print (euclidean_distances(docs_dtm[i], d))
print ('\n')
| true
|
b444bc7436611d75e25e30bea711a1c36801b5f6
|
Python
|
moonkangyun/MSE_python
|
/ex110.py
|
UTF-8
| 500
| 4.25
| 4
|
[] |
no_license
|
if True :
if False:
print("1")
print("2")
else:
print("3")
else :
print("4")
print("5")
# 즉, 3과 5가 나올 것이다. 첫번째 if문에서서 무조건 True기 때문에 두번째 if문으로 가는데 이때에는 거짓이라면 1과 2를 출력하지만 거짓이 아니기 때문에 3을 출력하고 if문을 빠져나온다. 그러고나서 제일 마지막에 if문에 영향을 받지않는 print("5")가 있기 때문에 5도 출력하고 마친다.
| true
|
21a66cb234653fce958ae89e45123bafe910f79c
|
Python
|
JohnEstefano/AWS_Data_Lake
|
/etl.py
|
UTF-8
| 7,266
| 2.59375
| 3
|
[] |
no_license
|
import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format
config = configparser.ConfigParser()
config.read_file(open('dl.cfg'))
config.sections()
os.environ['AWS_ACCESS_KEY_ID']=config['AWS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS']['AWS_SECRET_ACCESS_KEY']
def create_spark_session():
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.5") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
# get filepath to song data file
song_data = input_data + "song_data/*/*/*/*.json"
# read song data file
df = spark.read.json(song_data)
# create song_data_table view for SQL
df.createOrReplaceTempView("song_data_table")
# extract columns to create songs table
songs_table = spark.sql("""
SELECT
song_id,
title,
artist_id,
year,
duration
FROM song_data_table
WHERE song_id IS NOT NULL
""")
# write songs table to parquet files partitioned by year and artist
songs_table.write.mode('overwrite').partitionBy("year", "artist_id").parquet(output_data +"songs_table.parquet")
# extract columns to create artists table
artists_table = spark.sql("""
SELECT DISTINCT
artist_id,
artist_name,
artist_location,
artist_latitude,
artist_longitude
FROM song_data_table
WHERE artist_id IS NOT NULL
""")
# write artists table to parquet files
artists_table.write.mode('overwrite').parquet(output_data +'artists_table.parquet')
def process_log_data(spark, input_data, output_data):
# get filepath to log data file
log_data = input_data + 'log_data/*.json'
# read log data file
df = spark.read.json(log_data)
# filter by actions for song plays
df = df.filter(df.page == 'NextSong')
# create log_data_table view for SQL
df.createOrReplaceTempView("log_data_table")
# extract columns for users table
users_table = spark.sql("""
SELECT DISTINCT
(log_data_table.userId) AS user_id,
log_data_table.firstName AS first_name,
log_data_table.lastName AS last_name,
log_data_table.gender AS gender,
log_data_table.level AS level
FROM log_data_table
WHERE log_data_table.userId IS NOT NULL
""")
# write users table to parquet files
users_table.write.mode('overwrite').parquet(output_data +'users_table.parquet')
# create timestamp column from original timestamp column
get_timestamp = udf(lambda x: str(int(int(x) / 1000)))
df = df.withColumn("timestamp", get_timestamp(df.ts))
# create datetime column from original timestamp column
get_datetime = udf(lambda x: str(datetime.fromtimestamp(int(x) / 1000.0)))
df = df.withColumn("datetime", get_datetime(df.ts))
# extract columns to create time table
time_table = df.select(
'timestamp',
hour('datetime').alias('hour'),
dayofmonth('datetime').alias('day'),
weekofyear('datetime').alias('week'),
month('datetime').alias('month'),
year('datetime').alias('year'),
date_format('datetime', 'F').alias('weekday')
)
# extract columns to create time table
time_table = spark.sql("""
SELECT
tt.time as start_time,
hour(tt.time) as hour,
dayofmonth(tt.time) as day,
weekofyear(tt.time) as week,
month(tt.time) as month,
year(tt.time) as year,
dayofweek(tt.time) as weekday
FROM (
SELECT
to_timestamp(log_data_table.ts/1000) as time
FROM log_data_table
WHERE log_data_table.ts IS NOT NULL
) tt
""")
# write time table to parquet files partitioned by year and month
time_table.write.mode('overwrite').partitionBy("year", "month").parquet(output_data +'time_table.parquet')
# read in song data to use for songplays table
song_data = input_data + "song_data/*/*/*/*.json"
song_df = spark.read.json(song_data)
# create song_data_table view for SQL
song_df.createOrReplaceTempView("song_data_table")
# extract columns from joined song and log datasets to create songplays table
songplays_table = spark.sql("""
SELECT
monotonically_increasing_id() AS songplay_id,
to_timestamp(log_data_table.ts/1000) AS start_time,
month(to_timestamp(log_data_table.ts/1000)) AS month,
year(to_timestamp(log_data_table.ts/1000)) AS year,
log_data_table.userId AS user_id,
log_data_table.level AS level,
song_data_table.song_id AS song_id,
song_data_table.artist_id AS artist_id,
log_data_table.sessionId AS session_id,
log_data_table.location AS location,
log_data_table.userAgent AS user_agent
FROM log_data_table
JOIN song_data_table
ON log_data_table.artist = song_data_table.artist_name
AND log_data_table.song = song_data_table.title
""")
# write songplays table to parquet files partitioned by year and month
songplays_table.write.mode('overwrite').partitionBy("year", "month").parquet(output_data +'songplays_table.parquet')
def main():
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3a://dend-14/"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
| true
|
dd8d9bf1244616b0884e9da8b9b1e05c23a01f83
|
Python
|
johnwatterlond/playground
|
/hangman.py
|
UTF-8
| 4,291
| 4.5625
| 5
|
[] |
no_license
|
"""
A hangman game.
Player is allowed 7 wrong guesses.
"""
import string
import os
import random
from words import word_list
def clear():
"""Clear terminal screen."""
os.system('clear')
class Hangman:
"""
Represents a game of hangman.
Args:
secret_word: The word used for the game of hangman.
Attributes:
secret_word: The word used for the game of hangman.
board_list: List representing current board.
Missing letters are indicated by '-'.
guesses: List of guesses made.
guesses_left: Number of guesses left. Only changes if an
incorrect guess is made.
last_guess: String holding the last guess made.
message: String used to print a message to the screen.
"""
def __init__(self, secret_word):
self.secret_word = secret_word
self.board_list = ['-' for letter in secret_word]
self.guesses = []
self.guesses_left = 7
self.last_guess = ''
self.message = ''
def update_board(self, letter):
"""Update board with letter."""
for index, secret_letter in enumerate(self.secret_word):
if secret_letter == letter:
self.board_list[index] = letter
def update_guesses(self, letter):
"""Update the list of letters already tried."""
self.guesses.append(letter)
def update_guesses_left(self):
"""Update the number of guesses left."""
self.guesses_left = self.guesses_left - 1
def update_last_guess(self, letter):
"""Update the last guess to letter."""
self.last_guess = letter
def board_empty(self):
"""Return True if the board is empty (word guessed)."""
return not '-' in self.board_list
def print_board(self):
"""
Print out the current board and info about it.
Clears out terminal screen.
"""
clear()
print('Current board:')
print(''.join(self.board_list))
print()
print('Number of guesses left: {}'.format(self.guesses_left))
print('Last guess was: {}'.format(self.last_guess))
print('Guesses so far: {}\n'.format(' '.join(self.guesses)))
print('{}'.format(self.message))
def prompt_letter(self):
"""
Prompt the user for a letter.
Checks that given character is a single letter and that it
hasn't already been guessed.
"""
while True:
letter = input('Guess a letter.\n').lower()
if letter not in list(string.ascii_letters):
print('\nInvalid input. Try again.')
elif letter in self.guesses:
print('\nAlready guessed {}. Try again.'.format(letter))
else:
break
return letter
def take_turn(self):
"""
Take a turn.
Prompt user for letter. Update board and letters that have
already been tried. Also update the number of guesses
and the last guess.
If game is over, prints the appropriate message.
"""
# print board and prompt user for letter.
self.print_board()
letter = self.prompt_letter()
# update list of guesses and last guess.
self.update_guesses(letter)
self.update_last_guess(letter)
# process guess.
if letter in self.secret_word:
self.update_board(letter)
else:
self.update_guesses_left()
# check for and process end of game.
if self.board_empty():
self.message = 'Congrats you win!'
self.print_board()
elif not self.guesses_left:
self.message = (
'No more guesses left. You lose.'
'\n'
'The word was {}.'
).format(self.secret_word)
self.print_board()
def play_game(self):
"""Play hangman."""
while self.guesses_left and not self.board_empty():
self.take_turn()
def main():
"""
Grab a secret_word from word_list and start a game of hangman.
"""
secret_word = random.choice(word_list)
hangman = Hangman(secret_word)
hangman.play_game()
if __name__ == '__main__':
main()
| true
|
03a83330bc3bfd7e16bda60c29dd3dea06bb7a08
|
Python
|
JorgeOrobio/Proyecto1_CG
|
/Proyecto_1/Objetos.py
|
UTF-8
| 3,515
| 2.78125
| 3
|
[] |
no_license
|
import pygame as pg
from libreria import*
class Bloque(pg.sprite.Sprite):
"""clase bloque"""
def __init__(self,imagen,pos):
pg.sprite.Sprite.__init__(self)
self.image = imagen
self.rect=self.image.get_rect()
self.pos = pos
self.rect.x=pos[0]+240
self.rect.y=pos[1]
self.velx=-8
self.error=32
# self.grito_arc="/home/jorge/Escritorio/CGrafica/Music/Wilhelm_Scream.ogg"
# self.grito = pg.mixer.Sound(self.grito_arc)
def update(self):
self.rect.x+=self.velx
def OnLimit(self,pos_jugador):
condition=False
liminfx=self.rect.center[0] - self.error
liminfy=self.rect.center[1] + self.error
liminf = [liminfx,liminfy]
limsupx=self.rect.center[0] + self.error
limsupy=self.rect.center[1] - self.error
limsup = [limsupx,limsupy]
if liminf < pos_jugador <limsup :
condition=True
else:
condition=False
return condition
def Death(self,pos_jugador):
condition=False
liminfx=self.rect.center[0] - self.error
liminfy=self.rect.center[1] + self.error
liminf = [liminfx,liminfy]
limsupx=self.rect.center[0] + self.error
limsupy=self.rect.center[1] - self.error
limsup = [limsupx,limsupy]
posr = pos_jugador[0] + 32
posl = pos_jugador[0] - 32
posu = pos_jugador[1] + 32
posd = pos_jugador[1] - 32
if posr == liminfx:
condition=True
else:
condition=False
return condition
class Torre(pg.sprite.Sprite):
"""clase torre"""
def __init__(self,pos,cl=azul):
pg.sprite.Sprite.__init__(self)
self.image=pg.Surface([40,40])
self.image.fill(cl)
self.rect=self.image.get_rect()
self.rect.x=pos[0]
self.rect.y=pos[1]
self.click=False
def update(self):
if self.click:
self.rect.center = pg.mouse.get_pos()
class Cuadro(pg.sprite.Sprite):
"""clase cuadro"""
def __init__(self,pos,cl=verde):
pg.sprite.Sprite.__init__(self)
self.image=pg.Surface([40,40])
self.image.fill(cl)
self.rect=self.image.get_rect()
self.rect.x=pos[0]
self.rect.y=pos[1]
self.click=False
self.velx=0
self.vely=0
def update(self):
self.rect.x += self.velx
self.rect.y += self.vely
if self.click:
self.rect.center = pg.mouse.get_pos()
class Region(pg.sprite.Sprite):
"""clase Region"""
def __init__(self,pos,cl=blanco):
pg.sprite.Sprite.__init__(self)
self.image=pg.Surface([100,100])
self.image.fill(cl)
self.rect=self.image.get_rect()
self.rect.x=pos[0]
self.rect.y=pos[1]
self.click=False
# def update(self):
# if self.click:
# self.rect.center = pg.mouse.get_pos()
class Linea(pg.sprite.Sprite):
"""clase Linea"""
def __init__(self,pos,c=[5,200]):
pg.sprite.Sprite.__init__(self)
self.image=pg.Surface(c)
self.image.fill(negro)
self.rect=self.image.get_rect()
self.rect.x=pos[0]
self.rect.y=pos[1]
self.click=False
self.velx=0
self.vely=0
self.h=False
self.v=False
def update(self):
if self.h:
self.velx=0
self.vely=5
else:
self.velx=-5
self.vely=0
| true
|
69fb323f44bea3f1af5b7c9721c59fb59eef4bcd
|
Python
|
bengwie/PythonCodePractice
|
/missingnumber2.py
|
UTF-8
| 1,335
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/python
def testMe(nums):
maxRep = 0
num_dict = dict()
previousNum = None
for (index, num) in enumerate(nums):
print "num: %s" % num
if num in num_dict:
num_dict[num] += 1
else:
if maxRep != 0:
if num_dict[previousNum] < maxRep:
return previousNum
elif len(num_dict.keys()) == 2 and maxRep > num_dict[nums[0]]:
return nums[0]
num_dict[num] = 1
if maxRep < num_dict[num]:
maxRep = num_dict[num]
if index == len(nums) - 1:
if len(num_dict.keys()) == 1:
return -1
else:
return num
previousNum = num
if __name__ == '__main__':
samples = ["11222333",
"11122333",
"11122233",
"11222333444555666777888",
"1112233344455666777888",
"11122233344455566777888",
"11122233344455566677788",
"111"
]
import time
for sample in samples:
print "Sample is: %s" % sample
time1 = time.time()
output = testMe(sample)
time2 = time.time()
print "start time: %d" % time1
print "end time: %d" % time2
delta = time2 - time1
print "output: %s with time: %d" % (output, delta)
| true
|
7fede34f5e5c07e5f1a9d887e832e160c19b6367
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_135/2180.py
|
UTF-8
| 501
| 3.1875
| 3
|
[] |
no_license
|
def gl(f, splitchar=' '):
return map(f, raw_input().split(splitchar))
def g(f):
return f(raw_input())
t=g(int)
for i in xrange(t):
first=g(int) - 1
cards1=[gl(int) for _ in xrange(4)]
second=g(int) - 1
cards2=[gl(int) for _ in xrange(4)]
ans = set(cards1[first]) & set(cards2[second])
print "Case #%d:" % (i + 1),
if len(ans) == 1:
print list(ans)[0]
elif len(ans) == 0:
print "Volunteer cheated!"
else:
print "Bad magician!"
| true
|
04e99c8a77a9a28ee221e945ed8ed30c662cad8d
|
Python
|
KashifAS/Aganitha-Full-Stack-and-AI-Quiz-2020-
|
/app.py
|
UTF-8
| 5,209
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, jsonify, request
import flask
app = Flask(__name__)
def get_rules():
rules = {'Numbers': {
'zero': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90,
'hundred': 100,
}, 'Tuples': {
'single': 1,
'double': 2,
'triple': 3,
'quadruple': 4,
'quintuple': 5,
'sextuple': 6,
'septuple': 7,
'octuple': 8,
'nonuple': 9,
'decuple': 10,
}, 'General': {
'C M': 'CM',
'P M': 'PM',
'D M': 'DM',
'A M': 'AM',
}}
return rules
def check_front_last(word):
'''checking if word has comma at front or at last or at both if true then return front,word and last.'''
front = ''
last = ''
if len(word) > 1:
if word[-1] == ',' or word[-1] == '.':
last = word[-1]
word = word[:-1]
if word[0] == ',' or word[0] == '.':
front = word[0]
word = word[1:]
return (front, word, last)
class SpokenToWritten:
def __init__(self):
self.rules = get_rules()
self.paragraph = ''
self.ouptut_para = ''
# getting user input
def get_user_input(self,input):
self.paragraph = input
if(self.paragraph == '.'):
self.paragraph = self.paragraph[:-1]
if not self.paragraph:
raise ValueError("You entered nothing.")
# main conversion function of spoken to written english
def Convert(self):
# splitting paragraph into individual words
words_of_para = self.paragraph.split()
# accessing defines rules
numbers = self.rules['Numbers']
tuples = self.rules['Tuples']
general = self.rules['General']
i = 0
no_of_words = len(words_of_para)
# loop will run for the number of words in paragraph
while i < no_of_words:
(front, word, last) = check_front_last(words_of_para[i])
# Word of paragraph may of form ',dollars.'
if i + 1 != no_of_words:
# when word is of the form e.g.: two
(front_n, next_word, last_n) = \
check_front_last(words_of_para[i + 1])
if i + 2 != no_of_words:
(front_t, t_word, last_t) = \
check_front_last(words_of_para[i + 2])
if word.lower() in numbers.keys() \
and (next_word.lower() == 'dollars'
or next_word.lower() == 'dollar'):
self.ouptut_para = self.ouptut_para + ' ' + front \
+ '$' + str(numbers[word.lower()]) + last
i = i + 2
elif word.lower() in tuples.keys() and len(next_word) \
== 1:
# when word is of form Triple A
self.ouptut_para = self.ouptut_para + ' ' + front_n \
+ next_word * tuples[word.lower()] + last_n
i = i + 2
elif word + ' ' + next_word in general.keys():
# if word is of form P M or C M
self.ouptut_para = self.ouptut_para + ' ' + front \
+ word + next_word + last_n
i = i + 2
elif word.lower() == 'at' and next_word.lower() \
== 'the' and t_word.lower() == 'rate':
self.ouptut_para = self.ouptut_para + ' ' + front \
+ '@' + last
i = i + 3
else:
self.ouptut_para = self.ouptut_para + ' ' \
+ words_of_para[i]
i = i + 1
else:
self.ouptut_para = self.ouptut_para + ' ' \
+ words_of_para[i]
i = i + 1
else:
if self.ouptut_para[-1] != '.':
self.ouptut_para = self.ouptut_para + '.'
return self.ouptut_para
# main function
def convert_sp_to_wr(input):
# creating class object
obj_spoken = SpokenToWritten()
obj_spoken.get_user_input(input)
return "<h1>Converted text :</h1><br/>" + obj_spoken.Convert()
# obj_spoken.show_output()
@app.route('/')
def hello_world():
return flask.render_template('index.html')
@app.route('/index')
def index():
return flask.render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
query = request.form.to_dict()
return convert_sp_to_wr(query['review_text'])
if __name__ == '__main__':
app.run(debug=True)
| true
|
b57228086259be3c3d07349118f0a38d612f85ea
|
Python
|
JiaXingBinggan/RL_ad
|
/src/DRLB/RL_brain_for_test.py
|
UTF-8
| 3,045
| 2.734375
| 3
|
[] |
no_license
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.config import config
import os
np.random.seed(1)
class Net(nn.Module):
def __init__(self, feature_numbers, action_numbers):
super(Net, self).__init__()
# 第一层网络的神经元个数,第二层神经元的个数为动作数组的个数
neuron_numbers_1 = 100
# 第二层网络的神经元个数,第二层神经元的个数为动作数组的个数
neuron_numbers_2 = 100
self.fc1 = nn.Linear(feature_numbers, neuron_numbers_1)
self.fc1.weight.data.normal_(0, 0.1) # 全连接隐层 1 的参数初始化
self.fc2 = nn.Linear(neuron_numbers_1, neuron_numbers_2)
self.fc2.weight.data.normal_(0, 0.1) # 全连接隐层 2 的参数初始化
self.out = nn.Linear(neuron_numbers_1, action_numbers)
self.out.weight.data.normal_(0, 0.1) # 全连接隐层 2 的参数初始化
def forward(self, input):
x_1 = self.fc1(input)
x_1 = F.relu(x_1)
x_2 = self.fc2(x_1)
x_2 = F.relu(x_2)
actions_value = self.out(x_2)
return actions_value
def store_para(Net):
torch.save(Net.state_dict(), 'Model/DRLB_model_params.pth')
# 定义DeepQNetwork
class DRLB:
def __init__(
self,
action_space, # 动作空间
action_numbers, # 动作的数量
feature_numbers, # 状态的特征数量
learning_rate=0.01, # 学习率
reward_decay=1, # 奖励折扣因子,偶发过程为1
e_greedy=0.9, # 贪心算法ε
replace_target_iter=300, # 每300步替换一次target_net的参数
memory_size=500, # 经验池的大小
batch_size=32, # 每次更新时从memory里面取多少数据出来,mini-batch
):
self.action_space = action_space
self.action_numbers = action_numbers # 动作的具体数值?[0,0.01,...,budget]
self.feature_numbers = feature_numbers
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy # epsilon 的最大值
self.replace_target_iter = replace_target_iter # 更换 target_net 的步数
self.memory_size = memory_size # 记忆上限
self.batch_size = batch_size # 每次更新时从 memory 里面取多少记忆出来
self.epsilon = 0.9
# restore params
self.eval_net = Net(self.feature_numbers, self.action_numbers).cuda()
self.eval_net.load_state_dict(torch.load('Model/DRLB_model_params.pth'))
# 选择最优动作
def choose_best_action(self, state):
# 统一 state 的 shape (1, size_of_state)
state = torch.unsqueeze(torch.FloatTensor(state), 0).cuda()
actions_value = self.eval_net.forward(state)
action_index = torch.max(actions_value, 1)[1].data.cpu().numpy()[0]
action = self.action_space[action_index] # 选择q_eval值最大的那个动作
return action
| true
|