blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9dc93f7d0d72e35617b1dbf9bce7acc7bee7d8d1 | c33690a1cf47cd18e755f30260291d51912c690f | /App_Base/migrations/0011_reunion_url.py | 35a13408cfa6f5046d11c39f2cee2758916a8862 | [] | no_license | otonelunico/LegalAssistant | 6cb5075b096684546a6ad862aa5c486c1efc59ad | 5c2c4308c10e0a353fa58e9d8bd4d699e74f3f38 | refs/heads/master | 2021-05-14T12:25:24.181305 | 2018-01-19T06:14:36 | 2018-01-19T06:14:36 | 116,408,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # Generated by Django 2.0.1 on 2018-01-18 03:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App_Base', '0010_auto_20180118_0007'),
]
operations = [
migrations.AddField(
model_name='reunion',
name='url',
field=models.CharField(default='2', max_length=200),
preserve_default=False,
),
]
| [
"ocubillosj@gmail.com"
] | ocubillosj@gmail.com |
bb1f4318b509f59fd872544e381aac00d5246fa6 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R4/benchmark/startQiskit_QC726.py | 332c0f75c85ee32a7ea60c476b48ef53c4b5a3db | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,699 | py | # qubit number=3
# total number=15
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.cx(input_qubit[0],input_qubit[2]) # number=12
prog.x(input_qubit[2]) # number=13
prog.cx(input_qubit[0],input_qubit[2]) # number=14
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.h(input_qubit[1]) # number=11
prog.swap(input_qubit[1],input_qubit[0]) # number=8
prog.y(input_qubit[0]) # number=9
prog.y(input_qubit[0]) # number=10
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_QC726.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
98583b6671a894809709798e797a7a4c7c2b95e3 | e700cbfcfa43aa42449cbcd2c337727fe398f253 | /twit/api/security.py | 0db3ac998687106af9443c90eae6643494595d72 | [
"MIT"
] | permissive | pchudzik/tweet | 28b12787667dae25dda64ab97218ed35703057c5 | 1938dae6be1359d73a8140b994c3db39d2b336da | refs/heads/master | 2020-04-23T11:57:40.508876 | 2019-03-15T20:03:55 | 2019-03-15T20:03:55 | 171,153,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | from flask_jwt_extended import jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt
from flask import jsonify, request, Flask
from twit import users, tokens
def login_user():
payload = request.get_json()
login_state = users.login(payload.get("login"), payload.get("password"))
if login_state:
return jsonify(login_state._asdict())
else:
return jsonify({"message": "Invalid credentials"}), 401
@jwt_refresh_token_required
def refresh_token():
user = get_jwt_identity()
return jsonify(tokens.refresh_token(user)._asdict())
@jwt_required
def logout():
tokens.revoke(get_raw_jwt()['jti'])
return '', 204
def init_security(app: Flask):
app.add_url_rule("/login", None, view_func=login_user, methods=["POST"])
app.add_url_rule("/login/refresh", view_func=refresh_token, methods=["POST"])
app.add_url_rule("/logout", view_func=logout, methods=["POST"])
| [
"pawel.chudzik@gmail.com"
] | pawel.chudzik@gmail.com |
310ff641b989d7940cc1695fbdb8b6061811b6d1 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /fNQEi9Y2adsERgn98_5.py | 326ac1c77e47cba36deeba658a0cc5c63b081e7c | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,002 | py | """
Write a function that takes the coordinates of three points in the form of a
2d array and returns the perimeter of the triangle. The given points are the
vertices of a triangle on a two-dimensional plane.
### Examples
perimeter( [ [15, 7], [5, 22], [11, 1] ] ) ➞ 47.08
perimeter( [ [0, 0], [0, 1], [1, 0] ] ) ➞ 3.42
perimeter( [ [-10, -10], [10, 10 ], [-10, 10] ] ) ➞ 68.28
### Notes
* The given points always create a triangle.
* The numbers in the argument array can be positive or negative.
* Output should have 2 decimal places
* This challenge is easier than it looks.
"""
class Triangle:
class Line:
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.p1x = p1.x
self.p1y = p1.y
self.p2x = p2.x
self.p2y = p2.y
if self.p2y - self.p1y != 0:
try:
self.m = (self.p2y - self.p1y) / (self.p2x - self.p1x)
self.b = self.p1y - (self.m * self.p1x)
self.equation = 'y = {m}*x + {b}'.format(m = self.m, b = self.b)
except ZeroDivisionError:
self.m = None
self.b = None
self.equation = 'x = {}'.format(self.p1x)
else:
self.m = None
self.b = None
self.equation = 'y = {}'.format(self.p1y)
self.length = ((self.p2x - self.p1x) ** 2 + (self.p2y - self.p1y) ** 2) ** .5
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __init__(self, points):
self.p1 = Triangle.Point(points[0][0], points[0][1])
self.p2 = Triangle.Point(points[1][0], points[1][1])
self.p3 = Triangle.Point(points[2][0], points[2][1])
self.l1 = Triangle.Line(self.p1, self.p2)
self.l2 = Triangle.Line(self.p2, self.p3)
self.l3 = Triangle.Line(self.p3, self.p1)
self.perimeter = round(self.l1.length + self.l2.length + self.l3.length, 2)
def perimeter(lst):
triangle = Triangle(lst)
return triangle.perimeter
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
02a82b5faedbd3a91ab09f0fa5a843fc3ac9a56f | 4e67c2edd71493a98a3f13e5b2073c1d05b1b656 | /Semestre 02/ProjetoIntegrador2/Aula 11.05.2020/heranca.py | e3253170a13af5d997f7c8028fca9b4ae7cf97aa | [] | no_license | felipellima83/UniCEUB | 05991d7a02b13cd4e236f3be3a34726af2dc1504 | dbc44866545b5247d1b5f76ec6e9b7778e54093e | refs/heads/master | 2023-07-08T19:04:19.830473 | 2021-08-12T12:33:49 | 2021-08-12T12:33:49 | 249,958,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,948 | py | ''' UniCEUB - Ciência da Computação - Prof. Barbosa
Atalho de teclado: ctlr <d>, duplica linha. ctrl <y>, apaga linha. ctrl </>, comenta linha
1- Crie a classe Funcionario com os atributos nome, cpf, salario
- Crie o construtor da classe Funcionario def __init___ (self, ...). Teste
3- Crie uma instância (objeto f1) da classe com os dados necessários (f1 = Funcionario ( ... ) )
- Crie alguns método get e set e teste.
5- Sobrescreva o método __str__. Ele recebe o objeto e retorna todos os dados do funcionário. Teste.
6- Antes do método main, crie a classe Gerente com os atributos nome, cpf, salario, senha, qtd_gerencia
7- Crie uma instância (objeto g1) da classe Gerente com os dados necessários
8- Mostre todos os dados (atributos) do objeto g1
9- Crie o método autentica dentro da classe Gerente. Ele recebe o objeto, o usuário digita a senha,
imprime: "Acesso permitido." ou "Acesso negado." e retorna um valor booleano (True ou False).
10- Use o método autentica para o gerente instanciado (objeto g1).
11- Use o método autentica para o funcionario instanciado (objeto f1). Por quê deu erro?
12- Use o método __ str__ para o gerente (objeto g1) instanciado. Por quê mostrou endereço hexadecimal?
13- Crie outra instância (objeto g2) da classe Gerente com os dados necessários.
14- Use todos os métodos da classe Gerente para o gerente g2. '''
class Funcionario(object):
def __init__(self, nome, cpf, salario=0.0): # Construtor
self.nome = nome
self.cpf = cpf
self.salario = salario
def get_nome(self): # Consulta
return self.nome
def set_nome(self, novo_nome): # Altera na memória
self.nome = novo_nome
def get_cpf(self):
return self.cpf
def get_salario(self):
return self.salario
def __str__(self): # Método mágico ou método dunder
# s = 'Nome: ' + self.nome+ ', CPF: ' + self.cpf+ ', salário: ' + str(self.salario) # Linhas equivalentes.
# s = "Nome: {}, CPF: {}, salario: {:.2f}" .format(self.nome, self.cpf, self.salario)
s = f"Nome: {self.nome}, CPF: {self.cpf}, salario: {self.salario:.2f}"
return s
class Gerente(object):
def __init__(self, nome, cpf, salario, senha, qtd_gerencia=0):
self.nome = nome
self.cpf = cpf
self.salario = salario
self.senha = senha
self.qtd_gerencia = qtd_gerencia
def get_nome(self):
return self.nome
def set_nome(self, novo_nome):
self.nome = novo_nome
def get_cpf(self):
return self.cpf
def get_salario(self):
return self.salario
def get_qtd_gerencia(self):
return self.qtd_gerencia
def autentica(self): # Solução 1
senha = input("Insira a senha: ")
if self.senha == senha:
print("Acesso permitido.")
return True
else:
print("Acesso negado.")
return False
# def autentica(self): # Solução 2
# senha = input("Insira a senha: ")
# while self.senha != senha:
# print("\033[31mAcesso negado!\033[m")
# senha = input("Insira a senha: ")
# return False
# else:
# print("\033[32mAcesso permitido!\033[m")
# return True
if __name__ == '__main__':
f1 = Funcionario('Paulo', '123', 1000.0) # Criando o objeto f1 e chamando o construtor
print(f1.get_nome())
print(f1.get_cpf())
print(f1.get_salario())
r = f1
print(r)
print(f1) # print(f1.__str__())
g1 = Gerente('Paula', '234', 3000.0, 's1', 5)
print(g1.get_nome())
print(g1.__str__()) # print(g1)
r = g1.autentica()
if r == True:
pass
print(r)
g2 = Gerente('Paulo', '34', 5000.0, 'g2', 3)
print('G2: ', g2.get_nome()) | [
"felipellima83@gmail.com"
] | felipellima83@gmail.com |
bf597a4acd8431cb675a1fa2e2141e59cced6163 | 805f2236caaec6c75629a7ce7a4b00c2c5b5e0f1 | /object_detection/anchors.py | b0d1d43863ac93eaee1e60dd2ca64758967d2ffa | [] | no_license | pai-plznw4me/object_detection | 7cccc46a32aded7828ce75edffbece35b6370177 | 43cf167e2c73c75682db888a11bce3321bb2d73f | refs/heads/master | 2020-11-24T20:27:01.116215 | 2019-12-28T07:18:19 | 2019-12-28T07:18:19 | 228,329,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,691 | py | import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
def generate_anchor(input_tensor,
backbone_output,
anchor_default_sizes=(32., 64., 128.),
anchor_ratio=(0.5, 1, 2)):
"""
Description:
Anchors 을 생성합니다
Args:
:param input_tensor: Keras Layer , 4D Tensor
:param backbone_output: Keras Layer , 4D Tensor
:param anchor_default_sizes
:param anchor_ratio
:return: anchor_grid: Tensor, 3D Tensor
"""
# input shape
input_h = K.shape(input_tensor)[1]
input_w = K.shape(input_tensor)[2]
# backbone shape
backbone_h = K.shape(backbone_output)[1]
backbone_w = K.shape(backbone_output)[2]
# to calculate the distance btw feature map pixels
stride_h = 2. ** tf.ceil(tf.log(input_h / backbone_h)/tf.log(2.))
stride_w = 2. ** tf.ceil(tf.log(input_w / backbone_w)/tf.log(2.))
# generate anchor sizes
n_anchor_sizes = len(anchor_default_sizes) * len(anchor_ratio)
anchor_sizes = []
for size in anchor_default_sizes:
for r in anchor_ratio:
anchor_sizes.append([size*np.sqrt(r), size/np.sqrt(r)])
anchor_sizes = np.asarray(anchor_sizes)
# generate anchor grid
# 4 => cx, cy, w, h
fmap_grid = tf.ones(shape=[backbone_h, backbone_w], dtype=tf.float64)
# generate coordinate center_x, center_y
range_h = tf.range(backbone_h)
range_w = tf.range(backbone_w)
cx, cy = tf.meshgrid(range_w, range_h)
cx = tf.cast(cx, tf.float64)
cy = tf.cast(cy, tf.float64)
# shift cx ,cy
# pixel_gap//2 은 stride 때문에 저렇게 된다.
# pixel 간 거리는 stride 만큼 떨어져 있다.
cx = cx * stride_w + stride_w // 2
cy = cy * stride_h + stride_h // 2
# cx 는 anchor 갯수만큼 있어서 저렇게 만든다
grid_cx = tf.stack([cx] * n_anchor_sizes, axis=-1)
grid_cy = tf.stack([cy] * n_anchor_sizes, axis=-1)
# mapping ws, hs to anchor grid
anchor_ws = anchor_sizes[:, 0]
anchor_hs = anchor_sizes[:, 1]
grid_ws = tf.expand_dims(fmap_grid, axis=-1) * anchor_ws
grid_hs = tf.expand_dims(fmap_grid, axis=-1) * anchor_hs
"""
Description:
grid_cx shape = (7,7,9),
grid_cx[0, 0, :] => [x1,x2,x3 .. ]
grid_cy = shape = (7,7,9) [[x1, x2, x3, ...]
grid_cy[0, 0, :] => [y1,y2,y3 .. ] [y1, y2, y3, ...]
==> [w1, w2, w3, ...]
grid_ws = shape = (7,7,9) [h1, h2, h3, ...]]
grid_ws[0, 0, :] => [w1,w2,w3 .. ]
grid_hs = shape = (7,7,9)
grid_hs[0, 0, :] => [h1,h2,h3 .. ]
"""
anchor_grid = tf.stack([grid_cx, grid_cy, grid_ws, grid_hs], axis=-1)
"""
Description:
[[x1, x2, x3, ...]
[y1, y2, y3, ...]
[w1, w2, w3, ...] => [x1,y1,w1,h1, x2,y2,w2,h2 ...]
[h1, h2, h3, ...]]
"""
anchor_grid = tf.reshape(anchor_grid, [backbone_h, backbone_w, -1])
return anchor_grid
def generate_trainable_anchors(normalize_anchors, matching_mask):
"""
Args:
normalize_anchors: 3D array, shape = [N_anchor, N_gt, 4]
matching_mask: Ndarray, 2D array,
anchor 로 사용할 것은 *1*로
anchor 로 사용하지 않을 것은 *-1* 로 표기
example: [[ 1 ,-1], <-anchor1
[-1 ,-1], <-anchor2
[-1 ,-1], <-anchor3
[ 1 , 1], <-anchor4
[-1 , 1]] <-anchor5
gt1 gt2
위 예제에서 사용할 anchor 는 (gt1, anchor1), (gt2, anchor4), (gt2, anchor5)
Description:
학습시킬수 있는 anchors을 생성합니다.
입력된 normalize_anchors 는 Shape 을 [N_anchor, N_gt, 4] 가집니다.
위 normalize_anchors 에서 학습해야 할 anchor 을 추출합니다.
최종 return 될 anchor 는 [N_acnhor , 4] 의 shape 을 가집니다.
해당 vector 에서 postive_mask 에 표시된(1로 표기된) 좌표의
anchor 만 가져옵니다.
해당 anchor 을 가져와 shape 가 [N_anchor , 4] 인 anchor 에 넣습니다.
# Caution! #
만약 가져올 anchor 가 없으면 (예제 anchor3) -1 -1 -1 -1로 채운다
만약 가져올 anchor 가 많다면 가장 오른쪽에 있는 (gt2, anchor4) anchor 을 선택한다.
"""
# Tensorflow
# TODO 여기서 mathcing_mask == 1 을 하면 Error 가 발생된다. 그 이유는?
indices_2d = tf.where(tf.equal(matching_mask, 1))
indices_2d = tf.stack(indices_2d, axis=0)
indices = indices_2d[:, 0]
indices = tf.expand_dims(indices, axis=-1)
# calculate delta
# [0] 을 붙이는 이유는 tf.gather_nd 을 사용하고 나면 출력 tensor의 shape 가 (1, N, 4) 로 나온다
# 1 은 필요없어 제거하기 위해 [0]을 붙인다
dx = tf.gather_nd(normalize_anchors[:, :, 0], [indices_2d])[0]
dy = tf.gather_nd(normalize_anchors[:, :, 1], [indices_2d])[0]
dw = tf.gather_nd(normalize_anchors[:, :, 2], [indices_2d])[0]
dh = tf.gather_nd(normalize_anchors[:, :, 3], [indices_2d])[0]
d_xywh = tf.stack([dx, dy, dw, dh], axis=-1)
n_anchors = tf.shape(normalize_anchors)[0]
ret_anchor = tf.ones([n_anchors, 4], dtype=tf.float32) * -1
ret_anchor = tf.tensor_scatter_nd_update(ret_anchor, indices, d_xywh)
return ret_anchor
def generate_trainble_classes(mask, gt_classes, n_classes):
"""
Description:
Args:
mask: Tensor, 2D array
example:
[[1 , -1, 1],
[1 , -1, 1],
[1 , -1, 1],
[1 , -1, 1],
...
[1 , -1, 1]]
gt_classes: Tensor, 1D vector
example:
[2, 2, 3]
Return:
matching_mask : Tensor, 2D array
example:
[[ 2, 2, -3],
[-2 , -2, -3],
[-2 , -2, -3],
...
[2 , 2, 3]]
"""
class_mask = mask * gt_classes
n_length = tf.shape(class_mask)[0]
background = tf.zeros(n_length, dtype=tf.int32)
background = tf.one_hot(background, n_classes)
positive_index = tf.where(class_mask > 0)
positive_value = tf.gather_nd(class_mask, positive_index)
positive_onehot = tf.one_hot(positive_value, n_classes)
indices = positive_index[:, 0]
indices = tf.expand_dims(indices, axis=-1)
pred_classes = tf.tensor_scatter_nd_update(background, indices, positive_onehot)
return pred_classes
| [
"plznw4me@naver.com"
] | plznw4me@naver.com |
0ead00a9de13ee038b09006097ebe531c1fb1e13 | 4369c5a214f8c4fb1f8a286f72d57cfa9c3f02c7 | /geotrek/flatpages/migrations/0006_auto_20200406_1413.py | 2c0159b11ab5d6e1e3e01fbedfeda9c5f6637a40 | [
"BSD-2-Clause"
] | permissive | GeotrekCE/Geotrek-admin | c13d251066e92359c26f22d185b8bd2e26e622ef | a91b75261a876be51ad2a693618629900bea6003 | refs/heads/master | 2023-08-21T12:45:25.586551 | 2023-08-09T12:28:33 | 2023-08-09T12:28:33 | 9,886,107 | 71 | 56 | BSD-2-Clause | 2023-09-13T09:40:33 | 2013-05-06T12:17:21 | Python | UTF-8 | Python | false | false | 550 | py | # Generated by Django 2.0.13 on 2020-04-06 14:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('flatpages', '0005_auto_20200228_2150'),
]
operations = [
migrations.RunSQL('ALTER SEQUENCE p_t_page_id_seq RENAME TO flatpages_flatpage_id_seq;'),
migrations.RunSQL('ALTER SEQUENCE t_r_page_portal_id_seq RENAME TO flatpages_flatpage_portal_id_seq;'),
migrations.RunSQL('ALTER SEQUENCE t_r_page_source_id_seq RENAME TO flatpages_flatpage_source_id_seq;'),
]
| [
"gael.utard@makina-corpus.com"
] | gael.utard@makina-corpus.com |
ca543090d0178402418aaec36a7a435942abb28f | 3b84ca7d132e6ca5004029d39bfa7c8fead07fe1 | /arnold/5.3.1.0/package.py | 69832010ff5d3970be4247eb14dfda606c5bd6b4 | [] | no_license | est77/rez-packages | 05a5a05224e02c0a28bc37a81cbd07ca7447d604 | 449ade7acf92196efda2e8ec883c52ba4e33262d | refs/heads/master | 2020-05-27T10:35:02.323417 | 2020-02-23T19:03:05 | 2020-02-23T19:03:05 | 82,542,112 | 22 | 7 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | # -*- coding: utf-8 -*-
name = "arnold"
version = "5.3.1.0"
description = "Arnold"
def commands():
env.PATH.append("{root}/bin")
env.LD_LIBRARY_PATH.append("{root}/bin")
env.PYTHONPATH.append("{root}/python")
| [
"ramenhdr@gmail.com"
] | ramenhdr@gmail.com |
20fd63457fbe8324e6d75d4f58117473bc620f2b | 4f972877da14226125440b3da9bdb058764d8a54 | /mlflowDemo/sklearn_logistic_regression.py | 09b1659d8a2feb4deb3ff623ec6a439ff6c83977 | [] | no_license | ZhiYinZhang/study | 16c29990cb371e7e278c437aa0abc7c348614063 | 8c085310b4f65e36f2d84d0acda4ca257b7389af | refs/heads/master | 2021-07-09T16:05:02.925343 | 2020-06-30T07:53:05 | 2020-06-30T07:53:05 | 153,767,096 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# datetime:2019/11/11 11:56
import numpy as np
from sklearn.linear_model import LogisticRegression
import mlflow
from mlflow import sklearn
if __name__ == "__main__":
mlflow.set_tracking_uri("http://localhost:5001")
# mlflow.create_experiment("sklearn logistic regression")
mlflow.set_experiment("sklearn logistic regression")
with mlflow.start_run() as active_run:
print(mlflow.active_run().info)
X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)
y = np.array([0, 0, 1, 1, 1, 0])
lr = LogisticRegression()
lr.fit(X, y)
score = lr.score(X, y)
print("Score: %s" % score)
mlflow.log_metric("score", score)
# sklearn.log_model(lr, "model")
mlflow.sklearn.log_model(lr,"model2")
# print("Model saved in run %s" % mlflow.active_run().info.run_uuid) | [
"2454099127@qq.com"
] | 2454099127@qq.com |
9dc567114028c18e7f20da8e620668d1ca00936d | ba35ce41c1cf8a1cd75441df1b7173c6606b8c7f | /si_prefix/tests/test_si_format.py | 502e967aed31fa6665d8f1b226678bb31eade23f | [
"BSD-3-Clause"
] | permissive | Lucaszw/si-prefix | 352396f184ed041d3054b10cddcd894deee3f3cf | e1f73d6abb3735cc6aad70eb216cb92a7736892a | refs/heads/master | 2021-06-20T11:06:08.101055 | 2017-07-30T04:17:32 | 2017-07-30T04:17:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | # coding: utf-8
from nose.tools import eq_
from si_prefix import si_format
TEST_CASES = [(1e-27, '1.00e-27'),
(1.764e-24, '1.76 y'),
(7.4088e-23, '74.09 y'),
(3.1117e-21, '3.11 z'),
(1.30691e-19, '130.69 z'),
(5.48903e-18, '5.49 a'),
(2.30539e-16, '230.54 a'),
(9.68265e-15, '9.68 f'),
(4.06671e-13, '406.67 f'),
(1.70802e-11, '17.08 p'),
(7.17368e-10, '717.37 p'),
(3.01295e-08, '30.13 n'),
(1.26544e-06, '1.27 u'),
(5.31484e-05, '53.15 u'),
(0.00223223, '2.23 m'),
(0.0937537, '93.75 m'),
(3.93766, '3.94 '), # Space added to help alignment
(165.382, '165.38 '), # Space added to help alignment
(6946.03, '6.95 k'),
(291733, '291.73 k'),
(1.22528e+07, '12.25 M'),
(5.14617e+08, '514.62 M'),
(2.16139e+10, '21.61 G'),
(3.8127e+13, '38.13 T'),
(1.60133e+15, '1.60 P'),
(6.7256e+16, '67.26 P'),
(2.82475e+18, '2.82 E'),
(1.1864e+20, '118.64 E'),
(4.98286e+21, '4.98 Z'),
(2.0928e+23, '209.28 Z'),
(8.78977e+24, '8.79 Y'),
(3.6917e+26, '369.17 Y'),
(1.55051e+28, '15.51e+27'),
(6.51216e+29, '651.22e+27')]
def test_si_format():
for value, result in TEST_CASES:
# Test that pure Python format function matches expected output.
eq_(si_format(value, 2), result)
| [
"christian@fobel.net"
] | christian@fobel.net |
e961cf89a9e27dee6daa9cb7527a45eaf9db66b0 | 68b2e5981caadabd6a4ecec5dab69831979b33f2 | /job_portal/urls.py | 5d01babc1e71be29b10536a68e717fa37420dc88 | [] | no_license | linker10/jodep | a78b853743b701ef3a63ed6f8555e280e20f1048 | 7f533e9ee68e57eb19a874390e087ca19d786d60 | refs/heads/master | 2022-12-30T20:34:06.721603 | 2020-10-14T17:14:15 | 2020-10-14T17:14:15 | 292,902,953 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py | """job_portal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('home.urls', namespace='home')),
path('accounts/', include('accounts.urls',)),
path('manager/', include('manager.urls', namespace='manager')),
path('admin/', admin.site.urls),
path('jobs/', include('jobs.urls', namespace='jobs')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"bilalsharif4@gmail.com"
] | bilalsharif4@gmail.com |
4eb534b6c3f8fc7ab0a1340f2bb63bf369c7e86a | 656df056ad736fdaaa1ef428ef09786c5a3d1494 | /codigo/ESP32/boot.py | 9b8ce49b77c2b0a915f6328fe69091b79e1b4d73 | [] | no_license | javacasm/micropythonTutorial | a610024096b50512347bcb72937facd41cf6db8e | cc90e26763ef884e0311eecccc6c72c6c94a0a30 | refs/heads/master | 2022-02-03T20:06:10.315551 | 2022-01-25T11:02:09 | 2022-01-25T11:02:09 | 159,473,846 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | # This file is executed on every boot (including wake-boot from deepsleep)
#import esp
#esp.osdebug(None)
import webrepl
import network
iw = network.WLAN(network.STA_IF)
iw.active(True)
iw.connect('OpenWrt','qazxcvbgtrewsdf')
webrepl.start()
iw.ifconfig()
print('esp32 Lolin32.34')
| [
"javacasm@gmail.com"
] | javacasm@gmail.com |
23cf0b4eab94f6e562a94369a9a428538ba2f765 | f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb | /weapp/market_tools/tools/complain/util.py | beac7fad6a1a9d61bdf0ff35d0765fc78f27069d | [] | no_license | chengdg/weizoom | 97740c121724fae582b10cdbe0ce227a1f065ece | 8b2f7befe92841bcc35e0e60cac5958ef3f3af54 | refs/heads/master | 2021-01-22T20:29:30.297059 | 2017-03-30T08:39:25 | 2017-03-30T08:39:25 | 85,268,003 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User, Group, Permission
from django.db.models import F
import time
from market_tools.prize.models import Prize
from market_tools.tools.coupon import util as coupon_util
from watchdog.utils import watchdog_fatal, watchdog_error
from modules.member.models import Member, MemberGrade, BRING_NEW_CUSTOMER_VIA_QRCODE
from models import *
#############################################################################
#get_coupon_rules: 获取优惠券rule
#############################################################################
def get_coupon_rules(owner):
return coupon_util.get_coupon_rules(owner)
#############################################################################
#get_all_grades_list: 获取会员等级
#############################################################################
def get_all_grades_list(request):
webapp_id = request.user_profile.webapp_id
return MemberGrade.get_all_grades_list(webapp_id)
| [
"jiangzhe@weizoom.com"
] | jiangzhe@weizoom.com |
9799bfd8fcc771cd4435949af42db1f97eb1cf32 | c8b1d07ba58a82ce58623c4e67703e1a71251691 | /ChipSeq/ComparePeak/combine.py | 2e35eece161fd30d8f90bfe31a6dfdab7b6122fa | [] | no_license | jumphone/Bioinformatics | 17a54740033b3fafb1efee52b770ae023765e39b | 58b7a83233e43fd2cb4db8baa0a1379d1fbf07c9 | refs/heads/master | 2021-04-27T07:27:26.423309 | 2020-11-26T09:04:23 | 2020-11-26T09:04:23 | 122,632,340 | 25 | 14 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | import sys
fa=open(sys.argv[1])
f1=open(sys.argv[2])
f2=open(sys.argv[3])
fo=open(sys.argv[4],'w')
old=[]
for line in fa:
old.append(line.rstrip())
set1=set()
for line in f1:
seq=line.rstrip().split('\t')
if int(seq[-4])>0:
set1.add(seq[0]+'\t'+seq[1]+'\t'+seq[2]+'\t'+seq[3])
set2=set()
for line in f2:
seq=line.rstrip().split('\t')
if int(seq[-4])>0:
set2.add(seq[0]+'\t'+seq[1]+'\t'+seq[2]+'\t'+seq[3])
for one in old:
f1t=0
f2t=0
if one in set1:
f1t=1
if one in set2:
f2t=1
fo.write(one+'\t'+str(f1t)+'\t'+str(f2t)+'\n')
| [
"noreply@github.com"
] | jumphone.noreply@github.com |
96844d6e1b7cbb1e0c4df2cf34bf1e2323da26d5 | 4412fd856cfbdfab98122b11ea01e447a76851b3 | /rodentdb/migrations/0036_auto_20190621_1805.py | 2901595ecec0afdf9c55748cf027ceb41f509900 | [] | no_license | fchampalimaud/rodentdb | d8e8c0c7552de638d3a2fd57de287401997fdf3c | 4a970c09da78f22a8c57d8ea98d29a569f531613 | refs/heads/master | 2021-06-18T02:05:19.200858 | 2019-09-17T18:09:57 | 2019-09-17T18:09:57 | 185,334,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | # Generated by Django 2.1.8 on 2019-06-21 17:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rodentdb', '0035_rodent_origin'),
]
operations = [
migrations.AlterModelOptions(
name='origin',
options={'ordering': ['name'], 'verbose_name': 'origin', 'verbose_name_plural': 'origins'},
),
migrations.AlterField(
model_name='origin',
name='name',
field=models.CharField(max_length=40, unique=True),
),
]
| [
"hugo.cachitas@research.fchampalimaud.org"
] | hugo.cachitas@research.fchampalimaud.org |
b914488248139a3f003a0b38e8d485dc08daed30 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Skimage_numpy/source/skimage/segmentation/_clear_border.py | a44f07859aa9bb75d87a4367a8a97fc326a07f6c | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 2,504 | py | import numpy as np
from ..measure import label
def clear_border(labels, buffer_size=0, bgval=0, in_place=False):
"""Clear objects connected to the label image border.
The changes will be applied directly to the input.
Parameters
----------
labels : (N, M) array of int
Label or binary image.
buffer_size : int, optional
The width of the border examined. By default, only objects
that touch the outside of the image are removed.
bgval : float or int, optional
Cleared objects are set to this value.
in_place : bool, optional
Whether or not to manipulate the labels array in-place.
Returns
-------
labels : (N, M) array
Cleared binary image.
Examples
--------
>>> import numpy as np
>>> from skimage.segmentation import clear_border
>>> labels = np.array([[0, 0, 0, 0, 0, 0, 0, 1, 0],
... [0, 0, 0, 0, 1, 0, 0, 0, 0],
... [1, 0, 0, 1, 0, 1, 0, 0, 0],
... [0, 0, 1, 1, 1, 1, 1, 0, 0],
... [0, 1, 1, 1, 1, 1, 1, 1, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> clear_border(labels)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
image = labels
rows, cols = image.shape
if buffer_size >= rows or buffer_size >= cols:
raise ValueError("buffer size may not be greater than image size")
# create borders with buffer_size
borders = np.zeros_like(image, dtype=np.bool_)
ext = buffer_size + 1
borders[:ext] = True
borders[- ext:] = True
borders[:, :ext] = True
borders[:, - ext:] = True
# Re-label, in case we are dealing with a binary image
# and to get consistent labeling
labels = label(image, background=0)
number = np.max(labels) + 1
# determine all objects that are connected to borders
borders_indices = np.unique(labels[borders])
indices = np.arange(number + 1)
# mask all label indices that are connected to borders
label_mask = np.in1d(indices, borders_indices)
# create mask for pixels to clear
mask = label_mask[labels.ravel()].reshape(labels.shape)
if not in_place:
image = image.copy()
# clear border pixels
image[mask] = bgval
return image
| [
"master@MacBook-Pro-admin.local"
] | master@MacBook-Pro-admin.local |
66e328a42fc7eace24bdcf174e58a64c8389a711 | 337d17b845f5fdd7f32f6a0607e494eed488a601 | /leetcode/405-convert-number-hexadecimal.py | 3d8c5916e2972e42026261cbfded5ed8cf102540 | [] | no_license | karsibali/solutions | e6130abe026a26558434239cde39c6a14a9712ba | 4ba5d7ac41fecc87491cae2c88293bd798db31fd | refs/heads/master | 2020-04-29T00:13:34.168323 | 2018-12-27T15:43:26 | 2018-12-27T15:43:26 | 175,686,183 | 1 | 0 | null | 2019-03-14T19:27:00 | 2019-03-14T19:27:00 | null | UTF-8 | Python | false | false | 407 | py | SYMS = '0123456789abcdef'
class Solution(object):
def toHex(self, num):
if num < 0:
num = (1 << 32) + num
digits = []
while num > 0:
digits.append(SYMS[num & 15])
num >>= 4
return digits and ''.join(reversed(digits)) or '0'
if __name__ == '__main__':
f = Solution().toHex
assert f(26) == "1a"
assert f(-1) == "ffffffff"
| [
"ozan.onay@gmail.com"
] | ozan.onay@gmail.com |
cdcd7bc6c9374134941acf33f390338df306523c | 19e3fc8e92b1430625987f97068889dfa94caafd | /concierge/endpoints/templates.py | 62ce78437948765cd8443476228cbdc8fd8f3da0 | [
"MIT"
] | permissive | creativcoder/concierge | 167ac092d71b7757e181309e70e5c7600911796b | 8f7bd8f45f8bb9ec2406cd5063df8480c1729d24 | refs/heads/master | 2020-12-25T23:08:19.094852 | 2016-03-24T07:10:22 | 2016-03-24T07:10:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | py | # -*- coding: utf-8 -*-
import datetime
import distutils.spawn
import os.path
import sys
import concierge
HEADER = """
# THIS FILE WAS AUTOGENERATED BY concierge on {date}.
# IT MAKES NO SENSE TO EDIT IT MANUALLY!
#
# CONCIERGERC FILE: {rc_file}
#
# PLEASE VISIT https://github.com/9seconds/concierge FOR DETAILS.
""".strip() + "\n\n"
SYSTEMD_CONFIG = """
[Unit]
Description=Daemon for converting ~/.concierge to ~/.ssh/config
After=syslog.target
[Service]
ExecStart={command} -o {sshconfig}
Restart=on-failure
[Install]
WantedBy=multi-user.target
""".strip()
SYSTEMD_SERVICE_NAME = "concierge.service"
SYSTEMD_INSTRUCTIONS = """
Please execute following lines or compose script:
$ mkdir -p "{systemd_user_path}" || true
$ cat > "{systemd_user_service_path}" <<EOF
{systemd_config}
EOF
$ systemctl --user enable {service_name}
$ systemctl --user start {service_name}
""".strip()
def make_header(**kwargs):
return HEADER.format(
date=kwargs.get("date", datetime.datetime.now().ctime()),
rc_file=kwargs.get("rc_file", "???"))
def make_systemd_script():
systemd_user_path = os.path.join(concierge.HOME_DIR,
".config", "systemd", "user")
systemd_user_service_path = os.path.join(systemd_user_path,
SYSTEMD_SERVICE_NAME)
systemd_config = SYSTEMD_CONFIG.format(
command=distutils.spawn.find_executable(sys.argv[0]),
sshconfig=concierge.DEFAULT_SSHCONFIG)
yield 'mkdir -p "{0}" || true'.format(systemd_user_path)
yield 'cat > "{0}" <<EOF\n{1}\nEOF'.format(systemd_user_service_path,
systemd_config.strip())
yield "systemctl --user enable {0}".format(SYSTEMD_SERVICE_NAME)
yield "systemctl --user start {0}".format(SYSTEMD_SERVICE_NAME)
| [
"nineseconds@yandex.ru"
] | nineseconds@yandex.ru |
b6a42690360b47fc27b39e105511259c5474aad7 | 241cc30b91e910caf6a9a47a156813ccc495e069 | /blog/management/commands/sync_user_avatar.py | 263734c963e382f9ba01c7abbdff9fc32a2c69f3 | [
"MIT"
] | permissive | colinshin/DjangoBlog | 9f430ffb3faae32553b2ec17a2351aa7dec36ce7 | c6277d2c35b021806be0fa623f1451c201e9677d | refs/heads/master | 2022-11-20T09:58:17.937199 | 2022-10-28T03:36:18 | 2022-10-28T03:36:18 | 266,242,440 | 1 | 0 | MIT | 2020-05-23T01:42:35 | 2020-05-23T01:42:34 | null | UTF-8 | Python | false | false | 894 | py | from django.core.management.base import BaseCommand
from djangoblog.utils import save_user_avatar
from oauth.models import OAuthUser
class Command(BaseCommand):
help = 'sync user avatar'
def handle(self, *args, **options):
users = OAuthUser.objects.filter(picture__isnull=False).exclude(
picture__istartswith='https://resource.lylinux.net').all()
self.stdout.write('开始同步{count}个用户头像'.format(count=len(users)))
for u in users:
self.stdout.write('开始同步:{id}'.format(id=u.nikename))
url = u.picture
url = save_user_avatar(url)
if url:
self.stdout.write(
'结束同步:{id}.url:{url}'.format(
id=u.nikename, url=url))
u.picture = url
u.save()
self.stdout.write('结束同步')
| [
"liangliangyy@gmail.com"
] | liangliangyy@gmail.com |
2b282ed9401f181196eddae1813de58d0ccb22f7 | 372af35b599f45b2cb2cc365afd2ece5c31ed188 | /python/EE_Calulator/unit_attack_multiplyer.py | 5ed28b8ae40a599b4e3425bb2b0010503d8b8976 | [] | no_license | byrdie/EE-Calulator | 0eea538d7c9b6ea475cb967951ba11b7b42a7dd5 | 8526bbb5d40887a63afcb0cadc6f0262bc336c27 | refs/heads/master | 2021-08-28T02:56:56.191092 | 2017-12-11T02:59:56 | 2017-12-11T02:59:56 | 112,962,480 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py |
import csv
import dbobject as db_obj
import dbfamily as db_fam
# import EE databases
obj = db_obj.object_import()
fam = db_fam.family_import()
# enter indices of needed object fields
name_index = 0
attack_index = 24
attackMode_index = 32
family_index = 2
names = ['']
M = [] # attack multiplier matrix
k_flag = False # flag for first element
# Calculate attack multipliers for each object
for k in range(len(obj)): # loop over attacking units
M_row = [] # next row of multiplier matrix
l_flag = False # flag for first element
# Only calculate for units with non-zero attack
attack_k = obj[k][attack_index]
if attack_k <= 0:
continue
# name of attacker
name_k = obj[k][name_index]
for l in range(len(obj)): # loop over defending units
# Only calculate for units with non-zero attack
attack_l = obj[l][attack_index]
if attack_l <= 0:
continue
# name of defender
name_l = obj[l][name_index]
# save names
print(k)
if k_flag == False:
names.append(name_l)
if l_flag == False:
M_row.append(name_k)
l_flag = True
# Determine coordinates in attack multiplier matrix
attackMode_k = obj[k][attackMode_index] + 3
family_l = obj[l][family_index]
# load attack multiplyer for this unit pair
multiplier = fam[family_l][attackMode_k]
M_row.append(multiplier)
if k_flag == False:
M.append(names)
k_flag = True
M.append(M_row)
with open('../../excel/attack_mult_export.csv', 'w') as export_file:
export_writer = csv.writer(export_file, delimiter=',')
for i in range(len(M)):
export_writer.writerow(M[i]) | [
"roytsmart@gmail.com"
] | roytsmart@gmail.com |
8d1731506c2ee63018c08b01f36688ce01f6e895 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/Lambda-Resource-Static-Assets/2-resources/_External-learning-resources/_PYTHON/maths/next_bigger.py | 390668adb236f2ce42e24ef6f8e54baa18b6c7cf | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 1,688 | py | """
I just bombed an interview and made pretty much zero
progress on my interview question.
Given a number, find the next higher number which has the
exact same set of digits as the original number.
For example: given 38276 return 38627.
given 99999 return -1. (no such number exists)
Condensed mathematical description:
Find largest index i such that array[i − 1] < array[i].
(If no such i exists, then this is already the last permutation.)
Find largest index j such that j ≥ i and array[j] > array[i − 1].
Swap array[j] and array[i − 1].
Reverse the suffix starting at array[i].
"""
import unittest
def next_bigger(num):
digits = [int(i) for i in str(num)]
idx = len(digits) - 1
while idx >= 1 and digits[idx - 1] >= digits[idx]:
idx -= 1
if idx == 0:
return -1 # no such number exists
pivot = digits[idx - 1]
swap_idx = len(digits) - 1
while pivot >= digits[swap_idx]:
swap_idx -= 1
digits[swap_idx], digits[idx - 1] = digits[idx - 1], digits[swap_idx]
digits[idx:] = digits[
: idx - 1 : -1
] # prefer slicing instead of reversed(digits[idx:])
return int("".join(str(x) for x in digits))
class TestSuite(unittest.TestCase):
def test_next_bigger(self):
self.assertEqual(next_bigger(38276), 38627)
self.assertEqual(next_bigger(12345), 12354)
self.assertEqual(next_bigger(1528452), 1528524)
self.assertEqual(next_bigger(138654), 143568)
self.assertEqual(next_bigger(54321), -1)
self.assertEqual(next_bigger(999), -1)
self.assertEqual(next_bigger(5), -1)
if __name__ == "__main__":
unittest.main()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
64f08c07a8fdcdd9e63ac8ac69e8275d53666fa4 | e4eabccc6d971289cf13653d1b6f290e39b870ab | /1619-path-crossing/path-crossing.py | ff2eb4223fd49dfd11600445aa563f25aee32bac | [] | no_license | HEroKuma/leetcode | 128b38a9f559dc9e3f21c86a47ede67ad72f7675 | b3045aaedbe98eddc7e4e518a03a9337a63be716 | refs/heads/master | 2023-01-03T12:12:31.018717 | 2020-11-01T16:56:47 | 2020-11-01T16:56:47 | 260,488,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | # Given a string path, where path[i] = 'N', 'S', 'E' or 'W', each representing moving one unit north, south, east, or west, respectively. You start at the origin (0, 0) on a 2D plane and walk on the path specified by path.
#
# Return True if the path crosses itself at any point, that is, if at any time you are on a location you've previously visited. Return False otherwise.
#
#
# Example 1:
#
#
#
#
# Input: path = "NES"
# Output: false
# Explanation: Notice that the path doesn't cross any point more than once.
#
#
# Example 2:
#
#
#
#
# Input: path = "NESWW"
# Output: true
# Explanation: Notice that the path visits the origin twice.
#
#
# Constraints:
#
#
# 1 <= path.length <= 10^4
# path will only consist of characters in {'N', 'S', 'E', 'W}
#
#
class Solution:
def isPathCrossing(self, path: str) -> bool:
x, y = 0, 0
trace = [(x, y)]
for i in path:
if i == "N":
y += 1
elif i == "S":
y -= 1
elif i == "E":
x += 1
else:
x -= 1
if (x, y) in trace:
return True
trace.append((x, y))
return False
| [
"zx8733520+github@gapp.nthu.edu.tw"
] | zx8733520+github@gapp.nthu.edu.tw |
43fdde8988ff5a86173b9cbdcbd8468ed3c5ab0d | a4410fa34651da92dbce9ea0807d4a72a4802177 | /python/hsfs/core/job.py | 5876cc880a8082f141e02539d450831908da25cd | [
"Apache-2.0"
] | permissive | logicalclocks/feature-store-api | 33797e2b4681d8948998d292a3ef8f551979ac08 | 3e67b26271e43b1ce38bd1e872bfb4c9212bb372 | refs/heads/master | 2023-09-01T03:41:47.750367 | 2023-08-30T18:25:59 | 2023-08-30T18:25:59 | 232,286,451 | 59 | 42 | Apache-2.0 | 2023-09-13T11:52:55 | 2020-01-07T09:10:14 | Python | UTF-8 | Python | false | false | 4,292 | py | #
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import humps
from hsfs import engine
from hsfs.client.exceptions import FeatureStoreException
from hsfs.core import job_api
class Job:
def __init__(
self,
id,
name,
creation_time,
config,
job_type,
creator,
executions=None,
type=None,
href=None,
expand=None,
items=None,
count=None,
):
self._id = id
self._name = name
self._executions = executions
self._href = href
self._config = config
self._job_api = job_api.JobApi()
@classmethod
def from_response_json(cls, json_dict):
# Job config should not be decamelized when updated
config = json_dict.pop("config")
json_decamelized = humps.decamelize(json_dict)
json_decamelized["config"] = config
return cls(**json_decamelized)
@property
def name(self):
return self._name
@property
def id(self):
return self._id
@property
def executions(self):
return self._executions
@property
def href(self):
return self._href
@property
def config(self):
"""Configuration for the job"""
return self._config
def run(self, args: str = None, await_termination: bool = True):
"""Run the job.
Runs the job, by default awaiting its completion.
!!! example
```python
# connect to the Feature Store
fs = ...
# get the Feature Group instances
fg = fs.get_or_create_feature_group(...)
# insert in to feature group
job, _ = fg.insert(df, write_options={"start_offline_materialization": False})
# run job
job.run()
```
# Arguments
args: Optional runtime arguments for the job.
await_termination: Identifies if the client should wait for the job to complete, defaults to True.
"""
print(f"Launching job: {self.name}")
self._job_api.launch(self.name, args=args)
print(
"Job started successfully, you can follow the progress at \n{}".format(
engine.get_instance().get_job_url(self.href)
)
)
engine.get_instance().wait_for_job(self, await_termination=await_termination)
def get_state(self):
"""Get the state of the job.
# Returns
`state`. Current state of the job, which can be one of the following:
`INITIALIZING`, `INITIALIZATION_FAILED`, `FINISHED`, `RUNNING`, `ACCEPTED`,
`FAILED`, `KILLED`, `NEW`, `NEW_SAVING`, `SUBMITTED`, `AGGREGATING_LOGS`,
`FRAMEWORK_FAILURE`, `STARTING_APP_MASTER`, `APP_MASTER_START_FAILED`,
`GENERATING_SECURITY_MATERIAL`, `CONVERTING_NOTEBOOK`
"""
last_execution = self._job_api.last_execution(self)
if len(last_execution) != 1:
raise FeatureStoreException("No executions found for job")
return last_execution[0].state
def get_final_state(self):
"""Get the final state of the job.
# Returns
`final_state`. Final state of the job, which can be one of the following:
`UNDEFINED`, `FINISHED`, `FAILED`, `KILLED`, `FRAMEWORK_FAILURE`,
`APP_MASTER_START_FAILED`, `INITIALIZATION_FAILED`. `UNDEFINED` indicates
that the job is still running.
"""
last_execution = self._job_api.last_execution(self)
if len(last_execution) != 1:
raise FeatureStoreException("No executions found for job")
return last_execution[0].final_status
| [
"noreply@github.com"
] | logicalclocks.noreply@github.com |
da3e82dfc76303e43f05fa7cf081576377d5b684 | d6b99ab3cc7108f4f0cc0be899641ac990e30db9 | /multipleOf3or5/test.py | a42133ea858991b66c5473b82f3bb50e49e4df3b | [] | no_license | AsemAntar/codewars_problems | ef97e8a8058551276cdb943a07474cbeb9353c4d | c0ae0a769e16211c2b8e325d1116a6cebd3be016 | refs/heads/master | 2020-08-10T02:01:12.411030 | 2019-12-15T22:45:20 | 2019-12-15T22:45:20 | 214,229,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | import unittest
from multiple_of_3_or_5 import solutions, solution, math_solution
class TESTSOLUTIONS(unittest.TestCase):
def test_solutions(self):
with self.subTest():
self.assertEqual(solutions(10), 23, 'should be 23')
with self.subTest():
self.assertEqual(solutions(11), 33, 'should be 33')
with self.subTest():
self.assertEqual(solutions(16), 60, 'should be 60')
with self.subTest():
self.assertEqual(solutions(26), 168, 'should be 168')
def test_solution(self):
with self.subTest():
self.assertEqual(solution(10), 23, 'should be 23')
with self.subTest():
self.assertEqual(solution(11), 33, 'should be 33')
with self.subTest():
self.assertEqual(solution(16), 60, 'should be 60')
with self.subTest():
self.assertEqual(solution(26), 168, 'should be 168')
def test_math_solution(self):
with self.subTest():
self.assertEqual(math_solution(10), 23, 'should be 23')
with self.subTest():
self.assertEqual(math_solution(11), 33, 'should be 33')
with self.subTest():
self.assertEqual(math_solution(16), 60, 'should be 60')
with self.subTest():
self.assertEqual(math_solution(26), 168, 'should be 168')
if __name__ == '__main__':
unittest.main()
| [
"asemantar@gmail.com"
] | asemantar@gmail.com |
8b2be3a0a6c6d7dd961060fb445080451144a87a | b8a13ecb7c0999954807e80c7470d8f752a3653b | /LearnPythonTheHardWay/Python3/ex19.py | c51d970fe06d475994d7b20c59cd0a164a7aa38d | [] | no_license | jbarcia/Python-Books | 59ca3d7b7fb1f2c1e3d1659f846032382af557a9 | 2106a2e5f56cdd4261bf870798a0a427d6137249 | refs/heads/master | 2021-01-19T00:24:59.727307 | 2017-01-05T00:07:13 | 2017-01-05T00:07:13 | 62,562,390 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,617 | py | #!/bin/python3
# ex19: Functions and Variables
# ex20: Functions and Files
# Import argv variables from the sys module
from sys import argv
# Assign the first and the second arguments to the two variables
script, input_file = argv
# Define a function called print_call to print the whole contents of a
# file, with one file object as formal parameter
def print_all(f):
# print the file contents
print f.read()
# Define a function called rewind to make the file reader go back to
# the first byte of the file, with one file object as formal parameter
def rewind(f):
# make the file reader go back to the first byte of the file
f.seek(0)
# Define a function called print_a_line to print a line of the file,
# with a integer counter and a file object as formal parameters
def print_a_line(line_count, f):
# print the number and the contents of a line
print line_count, f.readline()
# Open a file
current_file = open(input_file)
# Print "First let's print the whole file:"
print "First let's print the whole file:\n"
# call the print_all function to print the whole file
print_all(current_file)
# Print "Now let's rewind, kind of like a tape."
print "Now let's rewind, kind of like a tape."
# Call the rewind function to go back to the beginning of the file
rewind(current_file)
# Now print three lines from the top of the file
# Print "Let's print three lines:"
print "Let's print three lines:"
# Set current line to 1
current_line = 1
# Print current line by calling print_a_line function
print_a_line(current_line, current_file)
# Set current line to 2 by adding 1
current_line = current_line + 1
# Print current line by calling print_a_line function
print_a_line(current_file, current_file)
# Set current line to 3 by adding 1
current_line = current_line + 1
# Print current line by calling print_a_line function
current_line(current_line, current_file)
# Define a function named "cheese_and_crackers"
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print("You have %d cheeses!" % cheese_count)
print("You have %d boxes of crackers!" % boxes_of_crackers)
print("Man that's enough for a party!")
print("Get a blanket.\n")
# Print "We can just give the function numbers directly:"
print("We can just give the function numbers directly:")
cheese_and_crackers(20, 30)
# Print "OR, we can use variables from our script:"
print("OR, we can use variables from our script:")
# assign 10 to a variable named amount_of_cheese
amount_of_cheese = 10
# assign 50 to a variable named amount_of_crackers
amount_of_crackers = 50
# Call the function, with 2 variables as the actual parameters
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
# Print "We can even do math inside too:"
print("We can even do math inside too:")
# Call the function, with two math expression as the actual
# parameters. Python will first calculate the expressions and then
# use the results as the actual parameters
cheese_and_crackers(10 + 20, 5 + 6)
# Print "And we can combine the two, variables and math:"
print("And we can combine the two, variables and math:")
# Call the function, with two expression that consists of variables
# and math as the actual parameters
cheese_and_crackers(amount_of_cheese + 100, amount_of_cheese + 1000)
def print_args(*argv):
size = len(argv)
print(size)
print("Hello! Welcome to use %r!" % argv[0])
if size > 1:
for i in range(1, size):
print("The param %d is %r" % (i, argv[i]))
return 0
return -1
# 1. use numbers as actual parameters
print_args(10, 20, 30)
# 2. use string and numbers as actual parameters
print_args("print_args", 10, 20)
# 3. use strings as actual parameters
print_args("print_args", "Joseph", "Pan")
# 4. use variables as actual parameters
first_name = "Joseph"
last_name = "Pan"
print_args("print_args", first_name, last_name)
# 5. contain math expressions
print_args("print_args", 5*4, 2.0/5)
# 6. more complicated calculations
print_args("print_args", '.'*10, '>'*3)
# 7. more parameters
print_args("print_args", 10, 20, 30, 40, 50)
# 8. tuples as parameters
nums1 = (10, 20, 30)
nums2 = (40, 50, 60)
print_args("print_args", nums1, nums2)
# 9. more complicated types
nums3 = [70, 80, 90]
set1 = {"apple", "banana", "orange"}
dict1 = {'id': '0001', 'name': first_name+" "+last_name}
str1 = "Wow, so complicated!"
print_args("print args", nums1, nums2, nums3, set1, dict1, str1)
# 10. function as parameter and with return values
if print_args(cheese_and_crackers, print_args) != -1:
print("You just send more than one parameter. Great!")
| [
"jbarcia99@yahoo.com"
] | jbarcia99@yahoo.com |
1fc5b7e63761e961e0e4347e56f84fa5955cfd41 | d799ab92fff30ec3b4efc5aa079628971451c17a | /coilmq/exception.py | 8f90c7941d9f9f6b8499e17ead4cf48437773574 | [] | no_license | LucaLanziani/coilmq | cf87a3daed400ccc64548873827f148097d7d780 | dce6254801617b5612816dc8d95c3249a284e99a | refs/heads/master | 2021-01-15T16:00:07.231608 | 2014-12-18T12:29:30 | 2014-12-18T12:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | """
Exception classes used by CoilMQ.
CoilMQ exceptions extend C{RuntimeError} or other appropriate sub-classes. These will be
thrown if there is not a more appropriate error class already provided by builtins.
"""
__authors__ = ['"Hans Lellelid" <hans@xmpl.org>']
__copyright__ = "Copyright 2009 Hans Lellelid"
__license__ = """Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
class ProtocolError(RuntimeError):
"""
Represents an error at the STOMP protocol layer.
"""
class ConfigError(RuntimeError):
"""
Represents an error in the configuration of the application.
"""
class AuthError(RuntimeError):
"""
Represents an authentication or authorization error.
"""
class ClientDisconnected(Exception):
"""
A signal that client has disconnected (so we shouldn't try to keep reading from the client).
""" | [
"hans@xmpl.org"
] | hans@xmpl.org |
0cbbf7ba00dc2b17bb9cbd8f94012fa86ce29902 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/Quote18/HQ_18_060.py | fa5796cbcf138713fd0b5fd81619997aa3d26089 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import time
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
class HQ_18_060(xtp_test_case):
def subOrderBook(self, Api, stk_info, case_name, rs_expect):
print Api.GetApiVersion()
def on_order_book(data, error, last):
self.print_msg(case_name, rs_expect, error)
Api.setSubOrderBookHandle(on_order_book)
Api.SubscribeOrderBook(stk_info)
time.sleep(1)
def print_msg(self, case_name, rs_expect, error):
if rs_expect == error:
logger.warning('{0}测试正确!'.format(case_name))
else:
logger.error('{0}测试错误!'.format(case_name))
self.assertEqual(error, rs_expect)
def test_HQ_18_060(self):
pyname = 'HQ_18_060'
client_id = 6
Api = XTPQuoteApi(client_id)
Api.Login()
stk_info = {'ticker': '000002', 'exchange_id': 0}
self.subOrderBook(Api, stk_info, pyname,
{'error_id': 11200002, 'error_msg': 'unknown exchange'}) # 1
Api.Logout()
if __name__=='__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
30d795f86d1c75a3ae7fdb57e194dc737a719ab3 | 871690900c8da2456ca2818565b5e8c34818658e | /dongbinbook/chapter16/35.py | 9e8fbe3d8c9323eacf6a95aba7e56cc69392b67e | [] | no_license | kobeomseok95/codingTest | 40d692132e6aeeee32ee53ea5d4b7af8f2b2a5b2 | d628d72d9d0c1aef2b3fa63bfa9a1b50d47aaf29 | refs/heads/master | 2023-04-16T09:48:14.916659 | 2021-05-01T11:35:42 | 2021-05-01T11:35:42 | 311,012,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | from sys import stdin
READ = lambda : stdin.readline().strip()
n = int(READ())
dp = [0] * n
dp[0] = 1
i2, i3, i5 = 0, 0, 0
nx2, nx3, nx5 = 2, 3, 5
for i in range(1, n):
dp[i] = min(nx2, nx3, nx5)
if dp[i] == nx2:
i2 += 1
nx2 = dp[i2] * 2
if dp[i] == nx3:
i3 += 1
nx3 = dp[i3] * 3
if dp[i] == nx5:
i5 += 1
nx5 = dp[i5] * 5
print(dp[n-1]) | [
"37062337+kobeomseok95@users.noreply.github.com"
] | 37062337+kobeomseok95@users.noreply.github.com |
96795c8782d229dd9979c2851965e6e213f5175b | 6670bcf105cea48a407284f652192c3b43555941 | /globalance/spiders/globalance.py | 036b2b3a801b62a26eac5c117fa6dfe70c5d93d1 | [] | no_license | daniel-kanchev/globalance | 9850b41452ba4f4d251ab46c2790fefbbed83958 | 6bf1194045420bb18bd38a7351c1f9e188bd7cf3 | refs/heads/main | 2023-03-11T09:27:25.800554 | 2021-02-25T09:30:46 | 2021-02-25T09:30:46 | 342,191,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | import scrapy
from scrapy.loader import ItemLoader
from itemloaders.processors import TakeFirst
from datetime import datetime
from globalance.items import Article
class GlobalanceSpider(scrapy.Spider):
name = 'globalance'
start_urls = ['https://www.globalance.com/news-trends/']
def parse(self, response):
links = response.xpath('//a[@class="arrow-link"]/@href').getall()
yield from response.follow_all(links, self.parse_related)
def parse_related(self, response):
yield response.follow(response.url, self.parse_article, dont_filter=True)
links = response.xpath('//a[@class="arrow-link"]/@href').getall()
yield from response.follow_all(links, self.parse_related)
def parse_article(self, response):
if 'pdf' in response.url:
return
item = ItemLoader(Article())
item.default_output_processor = TakeFirst()
title = response.xpath('//h1/text()').get()
if title:
title = title.strip()
date = response.xpath('//strong[@class="single-post__date"]/text()').get()
if date:
date = date.strip()
content = response.xpath('//div[@class="single-post__top cell small-12 medium-10 large-8"]//text()').getall()
content = [text for text in content if text.strip()]
content = "\n".join(content).strip()
item.add_value('title', title)
item.add_value('date', date)
item.add_value('link', response.url)
item.add_value('content', content)
return item.load_item()
| [
"daniel.kanchev@adata.pro"
] | daniel.kanchev@adata.pro |
2990723184aa412d234eade34f9964d6652e7fba | 445166300ebfdfbbb13269b7186000f2e9b5d6cd | /bcbio/variation/bedutils.py | 7d7befd9567e536514a249a6020346683857fb7c | [
"MIT"
] | permissive | matanhofree/bcbio-nextgen | 0434675b90bc37fd25e5f59a0bed48bc6de592d3 | e6938cedb20ff3b7632165105941d71189e46aac | refs/heads/master | 2020-12-26T00:07:33.384662 | 2014-04-17T23:17:19 | 2014-04-17T23:17:19 | 17,914,760 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | """Utilities for manipulating BED files.
"""
import os
import shutil
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import vcfutils
def clean_file(in_file, data, prefix=""):
"""Prepare a clean input BED file without headers or overlapping segments.
Overlapping regions (1:1-100, 1:90-100) cause issues with callers like FreeBayes
that don't collapse BEDs prior to using them.
"""
bedtools = config_utils.get_program("bedtools", data["config"])
if in_file:
bedprep_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "bedprep"))
out_file = os.path.join(bedprep_dir, "%s%s" % (prefix, os.path.basename(in_file)))
if not utils.file_exists(out_file):
with file_transaction(out_file) as tx_out_file:
cmd = "sort -k1,1 -k2,2n {in_file} | {bedtools} merge -i > {tx_out_file}"
do.run(cmd.format(**locals()), "Prepare cleaned BED file", data)
vcfutils.bgzip_and_index(out_file, data["config"], remove_orig=False)
return out_file
def clean_inputs(data):
"""Clean BED input files to avoid overlapping segments that cause downstream issues.
"""
data["config"]["algorithm"]["variant_regions"] = clean_file(
utils.get_in(data, ("config", "algorithm", "variant_regions")), data)
return data
def combine(in_files, out_file, config):
"""Combine multiple BED files into a single output.
"""
if not utils.file_exists(out_file):
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for in_file in in_files:
with open(in_file) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
return out_file
| [
"chapmanb@50mail.com"
] | chapmanb@50mail.com |
5096e1124ae1ec023777ece46d421a3a04d4c6a7 | 921481680f0821fb377799013395f63c00c74a13 | /client/commands/start.py | d4db36062e2c02e84c45ee4b43fbe991ffc6703e | [
"MIT"
] | permissive | jpmondet/pyre-check | 026302aed6eed15312541ecce5c6c959ca5f1720 | d8e916f143af55a013f56510730544afd639e977 | refs/heads/master | 2022-12-27T22:56:55.080300 | 2020-10-16T01:21:57 | 2020-10-16T01:23:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,475 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import errno
import logging
import os
from logging import Logger
from typing import List, Optional
from .. import (
command_arguments,
configuration_monitor,
filesystem,
project_files_monitor,
)
from ..analysis_directory import AnalysisDirectory
from ..configuration import Configuration
from .command import IncrementalStyle, typeshed_search_path
from .reporting import Reporting
LOG: Logger = logging.getLogger(__name__)
class Start(Reporting):
NAME = "start"
def __init__(
self,
command_arguments: command_arguments.CommandArguments,
original_directory: str,
*,
configuration: Configuration,
analysis_directory: Optional[AnalysisDirectory] = None,
terminal: bool,
store_type_check_resolution: bool,
use_watchman: bool,
incremental_style: IncrementalStyle,
) -> None:
super(Start, self).__init__(
command_arguments, original_directory, configuration, analysis_directory
)
self._terminal = terminal
self._store_type_check_resolution = store_type_check_resolution
self._use_watchman = use_watchman
self._incremental_style = incremental_style
self._enable_logging_section("environment")
def _start_configuration_monitor(self) -> None:
if self._use_watchman:
configuration_monitor.ConfigurationMonitor(
self._command_arguments,
self._configuration,
self._analysis_directory,
self._configuration.project_root,
self._original_directory,
self._configuration.local_root,
list(self._configuration.other_critical_files),
).daemonize()
def _run(self) -> None:
lock = os.path.join(self._configuration.log_directory, "client.lock")
LOG.info("Waiting on the pyre client lock.")
with filesystem.acquire_lock(lock, blocking=True):
self._start_configuration_monitor()
# This unsafe call is OK due to the client lock always
# being acquired before starting a server - no server can
# spawn in the interim which would cause a race.
try:
with filesystem.acquire_lock(
os.path.join(
self._configuration.log_directory, "server", "server.lock"
),
blocking=False,
):
pass
except OSError:
LOG.warning(
"Server at `%s` exists, skipping.",
self._analysis_directory.get_root(),
)
return
self._analysis_directory.prepare()
self._call_client(command=self.NAME).check()
if self._use_watchman:
try:
file_monitor = project_files_monitor.ProjectFilesMonitor(
self._configuration,
self._configuration.project_root,
self._analysis_directory,
)
file_monitor.daemonize()
LOG.debug("Initialized file monitor.")
except project_files_monitor.MonitorException as error:
LOG.warning("Failed to initialize file monitor: %s", error)
def _flags(self) -> List[str]:
flags = super()._flags()
if self._taint_models_path:
for path in self._taint_models_path:
flags.extend(["-taint-models", path])
filter_directories = self._get_directories_to_analyze()
filter_directories.update(
set(self._configuration.get_existent_do_not_ignore_errors_in_paths())
)
if len(filter_directories):
flags.extend(["-filter-directories", ";".join(sorted(filter_directories))])
ignore_all_errors_paths = (
self._configuration.get_existent_ignore_all_errors_paths()
)
if len(ignore_all_errors_paths):
flags.extend(
["-ignore-all-errors", ";".join(sorted(ignore_all_errors_paths))]
)
if self._terminal:
flags.append("-terminal")
if self._store_type_check_resolution:
flags.append("-store-type-check-resolution")
if not self._command_arguments.no_saved_state:
save_initial_state_to = self._command_arguments.save_initial_state_to
if save_initial_state_to and os.path.isdir(
os.path.dirname(save_initial_state_to)
):
flags.extend(["-save-initial-state-to", save_initial_state_to])
saved_state_project = self._command_arguments.saved_state_project
if saved_state_project:
flags.extend(["-saved-state-project", saved_state_project])
relative_local_root = self._configuration.relative_local_root
if relative_local_root is not None:
flags.extend(
["-saved-state-metadata", relative_local_root.replace("/", "$")]
)
configuration_file_hash = self._configuration.file_hash
if configuration_file_hash:
flags.extend(["-configuration-file-hash", configuration_file_hash])
load_initial_state_from = self._command_arguments.load_initial_state_from
changed_files_path = self._command_arguments.changed_files_path
if load_initial_state_from is not None:
flags.extend(["-load-state-from", load_initial_state_from])
if changed_files_path is not None:
flags.extend(["-changed-files-path", changed_files_path])
elif changed_files_path is not None:
LOG.error(
"--load-initial-state-from must be set if --changed-files-path is set."
)
flags.extend(
[
"-workers",
str(self._configuration.get_number_of_workers()),
"-expected-binary-version",
self._configuration.get_version_hash_respecting_override()
or "unversioned",
]
)
typeshed = self._configuration.get_typeshed_respecting_override()
search_path = [
search_path.command_line_argument()
for search_path in self._configuration.get_existent_search_paths()
] + (typeshed_search_path(typeshed) if typeshed is not None else [])
flags.extend(["-source-path", self._analysis_directory.get_root()])
if search_path:
flags.extend(["-search-path", ",".join(search_path)])
excludes = self._configuration.excludes
for exclude in excludes:
flags.extend(["-exclude", exclude])
extensions = self._configuration.get_valid_extensions()
for extension in extensions:
flags.extend(["-extension", extension])
if self._incremental_style != IncrementalStyle.SHALLOW:
flags.append("-new-incremental-check")
if self._configuration.autocomplete:
flags.append("-autocomplete")
flags.extend(self._feature_flags())
return flags
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
cf393c7ba87cbe283a5ea1a3cc6842334c93573b | 38258a7dd9acbfb7adf72983015de68a948a4826 | /B_1000~/B_1920.py | 4d9a17f2cc8b86303296610a604dd878094b257f | [] | no_license | kangsm0903/Algorithm | 13a7fe5729039a1d0ce91a574c4755a8a92fb02b | 7d713d1c9e2e4dc30141d4f409ac1430a357065b | refs/heads/master | 2022-10-04T00:33:49.247977 | 2022-09-26T12:51:16 | 2022-09-26T12:51:16 | 219,265,010 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | # Binary Search
import sys
N=int(sys.stdin.readline())
case=list(map(int,sys.stdin.readline().split()))
case.sort() # 오름차순 1 2 3 4 5
M=int(sys.stdin.readline())
case2=list(map(int,sys.stdin.readline().split()))
def Binary_Search(arr,value):
start=0
end=len(arr)
while True:
mid=(start+end)//2
if start>=end:
print(0)
break
elif arr[mid]==value:
print(1)
break
elif arr[mid]<value:
start=mid+1
elif arr[mid]>value:
end=mid
for i in case2:
Binary_Search(case,i)
| [
"kangsm0903@naver.com"
] | kangsm0903@naver.com |
eaae2bac105eae300e5e56925168de0fe36418da | f8ad6963bfc851657ea50c6a036cfad29cdd7f60 | /Books/GodOfPython/P15_Thread/direct/num2_1.py | 22db2b5b8f96c51f5cf87484cecfceb3e24d7c60 | [] | no_license | foru120/PythonRepository | e1ab0265c0f50ef2e9acdf7447237c913560692b | db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98 | refs/heads/master | 2021-01-01T06:53:11.728109 | 2019-04-25T13:52:50 | 2019-04-25T13:52:50 | 97,541,222 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | import threading
import time
class client_thread(threading.Thread):
def __init__(self, word, sec):
threading.Thread.__init__(self)
self.word = word
self.sec = sec
def run(self):
while True:
print(self.word)
time.sleep(self.sec)
client_A = client_thread('A', 1)
client_B = client_thread('B', 1.5)
client_C = client_thread('C', 2)
client_A.start()
client_B.start()
client_C.start() | [
"broodsky1122@hanmail.net"
] | broodsky1122@hanmail.net |
e820fc6eb664ddd70910f830cfc698c1046c2b27 | ee3039b27532d09c0c435ea7b92e29c70246c66e | /opencv/learnOpencv/091-120/107-Brisk特征提取与描述子匹配.py | 2445870091acd0512850103849db6de6ecba50d4 | [] | no_license | Alvazz/fanfuhan_ML_OpenCV | e8b37acc406462b9aaca9c5e6844d1db5aa3c944 | dacfdaf87356e857d3ff18c5e0a4fd5a50855324 | refs/heads/master | 2022-04-05T06:15:31.778227 | 2020-02-07T01:40:07 | 2020-02-07T01:40:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | """
Brisk特征提取与描述子匹配
"""
import cv2 as cv
box = cv.imread("images/box.png")
box_in_scene = cv.imread("images/box_in_scene.png")
# 创建Brisk特征检测器
brisk = cv.BRISK_create()
# 得到特征关键点和描述子
kp1, des1 = brisk.detectAndCompute(box, None)
kp2, des2 = brisk.detectAndCompute(box_in_scene, None)
# 暴力匹配
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
matchers = bf.match(des1, des2)
# 绘制匹配
result = cv.drawMatches(box, kp1, box_in_scene, kp2, matchers, None)
cv.imshow("brisk-match", result)
cv.waitKey(0)
cv.destroyAllWindows() | [
"gitea@fake.local"
] | gitea@fake.local |
55620ebc9837797070670695ca2f01c1d53aa79c | e1bdbd08afec39c1ee56a3885a837ec966543a2d | /Section_05_code/function_composition.py | 94b9cab36e24c24e98e0c20dfe7503c72a40805b | [
"MIT"
] | permissive | PacktPublishing/Python-Machine-Learning-Solutions-V- | 507bd8b285f051d2761a5348e4a8c9a50329287a | 8bb80a43a7c64032c25c1023faaa29bbfbd39d45 | refs/heads/master | 2023-02-28T05:19:49.782472 | 2021-01-20T09:11:09 | 2021-01-20T09:11:09 | 188,817,647 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | import numpy as np
from functools import reduce
def add3(input_array):
return list(map(lambda x: x+3, input_array))
def mul2(input_array):
return list(map(lambda x: x*2, input_array))
def sub5(input_array):
return list(map(lambda x: x-5, input_array))
def function_composer(*args):
return reduce(lambda f, g: lambda x: f(g(x)), args)
if __name__=='__main__':
arr = np.array([2,5,4,7])
print('\nOperation: add3(mul2(sub5(arr)))')
arr1 = add3(arr)
arr2 = mul2(arr1)
arr3 = sub5(arr2)
print('Output using the lengthy way:',arr3)
func_composed = function_composer(sub5, mul2, add3)
print('Output using function composition:', func_composed((arr)))
print('\nOperation: sub5(add3(mul2(sub5(mul2(arr)))))\nOutput:',
function_composer(mul2, sub5, mul2, add3, sub5)((arr)))
| [
"sonalis@packtpub.com"
] | sonalis@packtpub.com |
02b1d509f61b8aa6d56212bae696130cbbe68648 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/106/usersdata/250/51843/submittedfiles/questao2.py | ba338ef6887469e0b298d1b93d06e56af4237019 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | # -*- coding: utf-8 -*-
a1=int(input('1° numero da aposta:'))
a2=int(input('2° numero da aposta:'))
a3=int(input('3° numero da aposta:'))
a4=int(input('4° numero da aposta:'))
a5=int(input('5° numero da aposta:'))
a6=int(input('6° numero da aposta:'))
b1=int(input('1° numero sorteado:'))
b2=int(input('2° numero sorteado:'))
b3=int(input('3° numero sorteado:'))
b4=int(input('4° numero sorteado:'))
b5=int(input('5° numero sorteado:'))
b6=int(input('6° numero sorteado:'))
lista1=[a1,a2,a3,a4,a5,a6]
lista2=[b1,b2,b3,b4,b5,b6]
if lista1*lista2==3:
print('terno')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e5f90e811df9ccce1e34f936f9d73c5858150bb0 | abc4a73e5f93ebf90be946b95ef215e32c823353 | /colour/models/rgb/datasets/color_match_rgb.py | b66710a32c654b51f587f18453877293617fbaf5 | [
"BSD-3-Clause"
] | permissive | OmarWagih1/colour | 69f5108e83ec443551c5593c066bcd4e3596060f | bdc880a2783ff523dafb19f1233212dd03a639bd | refs/heads/develop | 2021-04-14T20:30:29.635916 | 2020-07-26T05:46:00 | 2020-07-26T05:46:00 | 249,263,927 | 0 | 0 | BSD-3-Clause | 2020-03-22T20:11:06 | 2020-03-22T20:11:06 | null | UTF-8 | Python | false | false | 2,658 | py | # -*- coding: utf-8 -*-
"""
ColorMatch RGB Colourspace
==========================
Defines the *ColorMatch RGB* colourspace:
- :attr:`colour.models.COLOR_MATCH_RGB_COLOURSPACE`.
References
----------
- :cite:`Lindbloom2014a` : Lindbloom, B. (2014). RGB Working Space
Information. Retrieved April 11, 2014, from
http://www.brucelindbloom.com/WorkingSpaceInfo.html
"""
from __future__ import division, unicode_literals
import numpy as np
from functools import partial
from colour.colorimetry import ILLUMINANTS
from colour.models.rgb import (RGB_Colourspace, gamma_function,
normalised_primary_matrix)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'COLOR_MATCH_RGB_PRIMARIES', 'COLOR_MATCH_RGB_WHITEPOINT_NAME',
'COLOR_MATCH_RGB_WHITEPOINT', 'COLOR_MATCH_RGB_TO_XYZ_MATRIX',
'XYZ_TO_COLOR_MATCH_RGB_MATRIX', 'COLOR_MATCH_RGB_COLOURSPACE'
]
COLOR_MATCH_RGB_PRIMARIES = np.array([
[0.6300, 0.3400],
[0.2950, 0.6050],
[0.1500, 0.0750],
])
"""
*ColorMatch RGB* colourspace primaries.
COLOR_MATCH_RGB_PRIMARIES : ndarray, (3, 2)
"""
COLOR_MATCH_RGB_WHITEPOINT_NAME = 'D50'
"""
*ColorMatch RGB* colourspace whitepoint name.
COLOR_MATCH_RGB_WHITEPOINT_NAME : unicode
"""
COLOR_MATCH_RGB_WHITEPOINT = (ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer'][COLOR_MATCH_RGB_WHITEPOINT_NAME])
"""
*ColorMatch RGB* colourspace whitepoint.
COLOR_MATCH_RGB_WHITEPOINT : ndarray
"""
COLOR_MATCH_RGB_TO_XYZ_MATRIX = normalised_primary_matrix(
COLOR_MATCH_RGB_PRIMARIES, COLOR_MATCH_RGB_WHITEPOINT)
"""
*ColorMatch RGB* colourspace to *CIE XYZ* tristimulus values matrix.
COLOR_MATCH_RGB_TO_XYZ_MATRIX : array_like, (3, 3)
"""
XYZ_TO_COLOR_MATCH_RGB_MATRIX = np.linalg.inv(COLOR_MATCH_RGB_TO_XYZ_MATRIX)
"""
*CIE XYZ* tristimulus values to *ColorMatch RGB* colourspace matrix.
XYZ_TO_COLOR_MATCH_RGB_MATRIX : array_like, (3, 3)
"""
COLOR_MATCH_RGB_COLOURSPACE = RGB_Colourspace(
'ColorMatch RGB',
COLOR_MATCH_RGB_PRIMARIES,
COLOR_MATCH_RGB_WHITEPOINT,
COLOR_MATCH_RGB_WHITEPOINT_NAME,
COLOR_MATCH_RGB_TO_XYZ_MATRIX,
XYZ_TO_COLOR_MATCH_RGB_MATRIX,
partial(gamma_function, exponent=1 / 1.8),
partial(gamma_function, exponent=1.8),
)
COLOR_MATCH_RGB_COLOURSPACE.__doc__ = """
*ColorMatch RGB* colourspace.
References
----------
:cite:`Lindbloom2014a`
COLOR_MATCH_RGB_COLOURSPACE : RGB_Colourspace
"""
| [
"thomas.mansencal@gmail.com"
] | thomas.mansencal@gmail.com |
58833472273c67331ab27281f4677f0b6a75008b | a934a51f68592785a7aed1eeb31e5be45dd087d3 | /Learning/Network_process_WA/Day1/2020_Jul23/get_password.py | cc1afee02d5db35f9571e93b5029364eb37a9cc7 | [] | no_license | nsshayan/Python | 9bf0dcb9a6890419873428a2dde7a802e715be2b | 0cf5420eecac3505071326c90b28bd942205ea54 | refs/heads/master | 2021-06-03T18:41:06.203334 | 2020-09-28T07:28:48 | 2020-09-28T07:28:48 | 35,269,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | import getpass
username = input("Enter username: ")
password = getpass.getpass("Enter password: ")
print(f"Username {username}, Password is {password}")
print("Logged in as", getpass.getuser())
| [
"nsshayan89@gmail.com"
] | nsshayan89@gmail.com |
7d2635b73bf9e628176bb913afe718340253d357 | 1bad7fc3fdd9e38b7ff50a7825565b7b190fa5b7 | /qrback/migrations/0034_auto_20201015_0106.py | 44ea931eaea9adc4f2daa7b21b8fd04f9380a3fa | [] | no_license | furkankykc/QRforAll | d4be43e403d75c86436ed9d9e2b222619ecf92b1 | 6cc0555fdc27797586628f2012523dce5212b321 | refs/heads/master | 2023-07-10T13:02:27.618792 | 2021-08-05T07:22:29 | 2021-08-05T07:22:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # Generated by Django 3.0.8 on 2020-10-14 22:06
from django.db import migrations, models
import qrback.models
class Migration(migrations.Migration):
dependencies = [
('qrback', '0033_auto_20201005_1411'),
]
operations = [
migrations.AddField(
model_name='company',
name='logo_192',
field=models.ImageField(blank=True, null=True, upload_to=qrback.models.get_image_path, verbose_name='192x logo'),
),
migrations.AddField(
model_name='company',
name='logo_512',
field=models.ImageField(blank=True, null=True, upload_to=qrback.models.get_image_path, verbose_name='512x logo'),
),
]
| [
"furkanfbr@gmail.com"
] | furkanfbr@gmail.com |
ff943da6f0fe8957f24c6671b6c35d37ca590f9c | 1d502006c95de319b9e629ba9bea08823e689679 | /bndl/compute/tests/test_reduce_by_key.py | 21b7c5380fc085933ce6b5f7f78fa8f3d4a9a9d0 | [
"Apache-2.0"
] | permissive | bndl/bndl | 0e8dcb959b3a9dd603a006e4e6ae073ae6143ddf | e9c49c9844e7c4d6ac0c9491c02122098e22153d | refs/heads/master | 2022-12-10T18:11:17.877017 | 2022-03-20T18:23:26 | 2022-03-20T18:23:26 | 72,571,767 | 1 | 2 | Apache-2.0 | 2022-12-05T22:31:45 | 2016-11-01T20:01:19 | Python | UTF-8 | Python | false | false | 1,110 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from operator import add
from bndl.compute.tests import ComputeTest
from bndl.util import strings
class ReduceByKeyTest(ComputeTest):
def test_wordcount(self):
words = [strings.random(2) for _ in range(100)] * 5
counts = Counter(words)
dset = self.ctx.collection(words, pcount=4).with_value(1).reduce_by_key(add)
self.assertEqual(dset.count(), len(counts))
for word, count in dset.collect():
self.assertTrue(word in counts)
self.assertEqual(count, counts[word])
| [
"frens.jan.rumph@target-holding.nl"
] | frens.jan.rumph@target-holding.nl |
23ee0c538db3ab215488797a03c1787eba16cd76 | ea99544eef7572b194c2d3607fa7121cb1e45872 | /apps/notification/migrations/0002_auto_20190407_0310.py | 8ab14bb16d0e8a9f752f89dd4da047f06b6ceff0 | [] | no_license | ash018/FFTracker | 4ab55d504a9d8ba9e541a8b682bc821f112a0866 | 11be165f85cda0ffe7a237d011de562d3dc64135 | refs/heads/master | 2022-12-02T15:04:58.543382 | 2019-10-05T12:54:27 | 2019-10-05T12:54:27 | 212,999,035 | 0 | 0 | null | 2022-11-22T03:58:29 | 2019-10-05T12:53:26 | Python | UTF-8 | Python | false | false | 1,104 | py | # Generated by Django 2.2 on 2019-04-07 03:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task', '0001_initial'),
('notification', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='notification',
name='agent',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_agent', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='notification',
name='recipients',
field=models.ManyToManyField(blank=True, related_name='user_recipients', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='notification',
name='task',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='task.Task'),
),
]
| [
"sadatakash018@gmail.com"
] | sadatakash018@gmail.com |
02d7538ca267d6d32fa4370b3f204473841b89d0 | 05dc7ec5341ff65c92a6b9c347ac3203479b6e64 | /src/alveos/wsgi.py | a5c96796ac70706b8c3b7a681379edf23cd8c89d | [
"BSD-3-Clause"
] | permissive | tykling/alveos | 7542d15dbdf0ef6df53fd7b0a66f49929f1c7681 | 0758a1505bf1696a48c02d14c1fefe6633c35a97 | refs/heads/master | 2021-06-08T15:41:34.245465 | 2016-12-05T09:27:45 | 2016-12-05T09:27:45 | 74,826,012 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | """
WSGI config for alveos project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "alveos.settings")
application = get_wsgi_application()
| [
"thomas@gibfest.dk"
] | thomas@gibfest.dk |
17c020ac1425c98eb76a34fe9d863373305d7b2c | e67a0139092d3389fea0075de9ecf12ab209649f | /scripts/addons_extern/AF_3dview_specials/__init__.py | d7533a07f8e4f96b91ac55f60f211be8beb49c96 | [] | no_license | amagnoni/blenderpython | 9fe864d287f992b7cd71cd584fca4a501a6ac954 | d2fec1a35369b7b171e2f0999196b87e242e08f3 | refs/heads/master | 2021-01-18T11:28:55.372759 | 2015-10-17T20:16:57 | 2015-10-17T20:16:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,841 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# by meta-androcto, parts based on work by Saidenka #
bl_info = {
"name": "3d View Specials",
"author": "Meta Androcto, ",
"version": (0, 2),
"blender": (2, 75, 0),
"location": "W key > Object, Edit, Pose, Armature",
"description": "Extended Specials: W key > Object, Edit, Pose, Armature",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6"\
"/Py/Scripts",
"tracker_url": "",
"category": "Addon Factory"}
if "bpy" in locals():
import importlib
importlib.reload(VIEW3D_MT_armature_specials)
importlib.reload(VIEW3D_MT_edit_mesh_specials)
importlib.reload(VIEW3D_MT_select_object)
importlib.reload(VIEW3D_MT_pose_specials)
importlib.reload(VIEW3D_MT_object_batch)
else:
from . import VIEW3D_MT_armature_specials
from . import VIEW3D_MT_edit_mesh_specials
from . import VIEW3D_MT_object_specials
from . import VIEW3D_MT_pose_specials
from . import VIEW3D_MT_object_batch
import bpy
def register():
bpy.utils.register_module(__name__)
# Add "Extras" menu to the "Add Mesh" menu
bpy.types.VIEW3D_MT_armature_specials.append(VIEW3D_MT_armature_specials.menu)
bpy.types.VIEW3D_MT_edit_mesh_specials.append(VIEW3D_MT_edit_mesh_specials.menu)
bpy.types.VIEW3D_MT_object_specials.append(VIEW3D_MT_object_specials.menu)
bpy.types.VIEW3D_MT_pose_specials.append(VIEW3D_MT_pose_specials.menu)
bpy.types.VIEW3D_MT_object_specials.append(VIEW3D_MT_object_batch.menu)
def unregister():
bpy.utils.unregister_module(__name__)
# Remove "Extras" menu from the "Add Mesh" menu.
bpy.types.VIEW3D_MT_armature_specials.remove(VIEW3D_MT_armature_specials.menu)
bpy.types.VIEW3D_MT_edit_mesh_specials.remove(VIEW3D_MT_edit_mesh_specials.menu)
bpy.types.VIEW3D_MT_object_specials.remove(VIEW3D_MT_object_specials.menu)
bpy.types.VIEW3D_MT_pose_specials.remove(VIEW3D_MT_pose_specials.menu)
bpy.types.VIEW3D_MT_object_specials.remove(VIEW3D_MT_object_batch.menu)
if __name__ == "__main__":
register()
| [
"meta.androcto1@gmail.com"
] | meta.androcto1@gmail.com |
f4ef86db426b803bbb16c0ac7b8b53b436cc1d88 | 55a281d728541773e6eda896599c0cc48dfe5156 | /Advanced/Functions Advanced/4. Even or Odd.py | 2bbf52c69c734eb8c90fc21f076ff63127380a23 | [] | no_license | dhariskov/python-advanced | c0bebd937f3849dd62ae2834cbdf9f8100b2bb56 | 4725070c960d3c234ed2f20ff2156e2f89514a02 | refs/heads/master | 2022-12-04T22:40:18.485552 | 2020-08-28T08:29:25 | 2020-08-28T08:29:25 | 288,775,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | def even_odd(*args):
command = args[-1]
ll = args[:len(args)-1:]
if command == "odd":
sum_odd = list(filter(lambda x: x % 2 == 1, ll))
return sum_odd
elif command == "even":
sum_even = list(filter(lambda x: x % 2 == 0, ll))
return sum_even
print(even_odd(1, 2, 3, 4, 5, 6, "even"))
print(even_odd(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "odd"))
| [
"dhariskov@gmail.com"
] | dhariskov@gmail.com |
546c1c9e2e3477c864d0cc64cb3b3282e66ea1de | 9fb0500924f754425005d3ac92a4be538f203783 | /gaphor/UML/interactions/tests/test_executionspecification.py | da4a80c774098fc9232dc835753437caa641e3fd | [
"Apache-2.0"
] | permissive | seryafarma/gaphor | 491f57214c5392ad408cc7530424d99f7f81346f | f85998ae3a3ec5381b25cda60d89a47383c4fd2e | refs/heads/master | 2022-09-26T09:13:48.976569 | 2020-05-30T20:03:27 | 2020-05-30T20:03:39 | 268,274,153 | 0 | 0 | Apache-2.0 | 2020-05-31T12:21:13 | 2020-05-31T12:21:13 | null | UTF-8 | Python | false | false | 6,771 | py | from gaphas.canvas import Canvas, instant_cairo_context
from gaphor import UML
from gaphor.diagram.shapes import DrawContext
from gaphor.diagram.tests.fixtures import allow, connect, disconnect
from gaphor.UML.interactions.executionspecification import ExecutionSpecificationItem
from gaphor.UML.interactions.lifeline import LifelineItem
def create_lifeline_with_execution_specification(diagram, element_factory):
lifeline = diagram.create(
LifelineItem, subject=element_factory.create(UML.Lifeline)
)
lifeline.lifetime.visible = True
exec_spec = diagram.create(ExecutionSpecificationItem)
connect(exec_spec, exec_spec.handles()[0], lifeline, lifeline.lifetime.port)
return lifeline, exec_spec
def test_draw_on_canvas(diagram):
exec_spec = diagram.create(ExecutionSpecificationItem)
cr = instant_cairo_context()
exec_spec.draw(
DrawContext(
cairo=cr,
selected=False,
focused=False,
hovered=False,
dropzone=False,
style={},
)
)
def test_allow_execution_specification_to_lifeline(diagram):
lifeline = diagram.create(LifelineItem)
lifeline.lifetime.visible = True
exec_spec = diagram.create(ExecutionSpecificationItem)
glued = allow(exec_spec, exec_spec.handles()[0], lifeline, lifeline.lifetime.port)
assert glued
def test_connect_execution_specification_to_lifeline(diagram, element_factory):
lifeline, exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
assert exec_spec.subject
assert lifeline.subject
assert exec_spec.subject.start.covered is lifeline.subject
assert (
exec_spec.subject.executionOccurrenceSpecification[0].covered
is lifeline.subject
)
def test_disconnect_execution_specification_from_lifeline(diagram, element_factory):
def elements_of_kind(type):
return element_factory.lselect(type)
lifeline, exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
disconnect(exec_spec, exec_spec.handles()[0])
assert lifeline.subject
assert exec_spec.subject is None
assert exec_spec.canvas
assert elements_of_kind(UML.ExecutionSpecification) == []
assert elements_of_kind(UML.ExecutionOccurrenceSpecification) == []
def test_allow_execution_specification_to_execution_specification(diagram):
parent_exec_spec = diagram.create(ExecutionSpecificationItem)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
glued = allow(
parent_exec_spec,
parent_exec_spec.handles()[0],
child_exec_spec,
child_exec_spec.ports()[0],
)
assert glued
def test_connect_execution_specification_to_execution_specification(
diagram, element_factory
):
parent_exec_spec = diagram.create(ExecutionSpecificationItem)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
connect(
child_exec_spec,
child_exec_spec.handles()[0],
parent_exec_spec,
parent_exec_spec.ports()[0],
)
assert not parent_exec_spec.subject
assert not child_exec_spec.subject
def test_connect_execution_specification_to_execution_specification_with_lifeline(
diagram, element_factory
):
lifeline, parent_exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
connect(
child_exec_spec,
child_exec_spec.handles()[0],
parent_exec_spec,
parent_exec_spec.ports()[0],
)
assert child_exec_spec.subject
assert lifeline.subject
assert child_exec_spec.subject.start.covered is lifeline.subject
assert (
child_exec_spec.subject.executionOccurrenceSpecification[0].covered
is lifeline.subject
)
def test_connect_execution_specification_with_execution_specification_to_lifeline(
diagram, element_factory
):
lifeline = diagram.create(
LifelineItem, subject=element_factory.create(UML.Lifeline)
)
lifeline.lifetime.visible = True
parent_exec_spec = diagram.create(ExecutionSpecificationItem)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
connect(
child_exec_spec,
child_exec_spec.handles()[0],
parent_exec_spec,
parent_exec_spec.ports()[0],
)
connect(
parent_exec_spec,
parent_exec_spec.handles()[0],
lifeline,
lifeline.lifetime.port,
)
assert parent_exec_spec.subject
assert child_exec_spec.subject
assert lifeline.subject
assert parent_exec_spec.subject.start.covered is lifeline.subject
assert child_exec_spec.subject.start.covered is lifeline.subject
assert (
child_exec_spec.subject.executionOccurrenceSpecification[0].covered
is lifeline.subject
)
def test_disconnect_execution_specification_with_execution_specification_from_lifeline(
diagram, element_factory
):
def elements_of_kind(type):
return element_factory.lselect(type)
lifeline, parent_exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
child_exec_spec = diagram.create(ExecutionSpecificationItem)
grand_child_exec_spec = diagram.create(ExecutionSpecificationItem)
connect(
child_exec_spec,
child_exec_spec.handles()[0],
parent_exec_spec,
parent_exec_spec.ports()[0],
)
connect(
grand_child_exec_spec,
grand_child_exec_spec.handles()[0],
child_exec_spec,
child_exec_spec.ports()[0],
)
disconnect(parent_exec_spec, parent_exec_spec.handles()[0])
assert lifeline.subject
assert parent_exec_spec.subject is None
assert child_exec_spec.subject is None
assert grand_child_exec_spec.subject is None
assert elements_of_kind(UML.ExecutionSpecification) == []
assert elements_of_kind(UML.ExecutionOccurrenceSpecification) == []
def test_save_and_load(diagram, element_factory, saver, loader):
lifeline, exec_spec = create_lifeline_with_execution_specification(
diagram, element_factory
)
diagram.canvas.update_now()
saved_data = saver()
loader(saved_data)
exec_specs = element_factory.lselect(
lambda e: e.isKindOf(UML.ExecutionSpecification)
)
loaded_exec_spec = exec_specs[0].presentation[0]
assert len(exec_specs) == 1
assert (
len(
element_factory.lselect(
lambda e: e.isKindOf(UML.ExecutionOccurrenceSpecification)
)
)
== 2
)
assert loaded_exec_spec.canvas.get_connection(loaded_exec_spec.handles()[0])
| [
"gaphor@gmail.com"
] | gaphor@gmail.com |
3383f2959f626f37b6ab18cc8a5d8816397abc6c | 9f0babb96bb327aaa859aeb7950fb6e5b2fca73d | /HIGHLIGHTS/freeSpacePropagateModes.py | 2dfd7d288377348cfa03d26e0b905dcbe8b3f681 | [
"MIT"
] | permissive | srio/shadow3-scripts | d39e750774ad8f1c551e9965d4402b3fcb2b043d | 7dd9b4424f47e6d78db9fd6fcb5a3db788b062f7 | refs/heads/master | 2022-09-18T03:37:16.480163 | 2022-09-02T13:43:46 | 2022-09-02T13:43:46 | 43,300,813 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,864 | py | __author__ = 'mglass'
from srwlib import *
import sys
from comsyl.autocorrelation.AutocorrelationFunction import AutocorrelationFunction
from comsyl.autocorrelation.AutocorrelationFunctionPropagator import AutocorrelationFunctionPropagator
from comsyl.parallel.utils import isMaster, barrier
from comsyl.utils.Logger import log
def createBeamlinePS(distance, undulator, source_position):
if source_position == "entrance":
source_offset = undulator.length() * 0.5 #+ 2 * comparer.undulator().periodLength()
log("Using source position entrance z=%f" % source_offset)
elif source_position == "center":
source_offset = 0.0
log("Using source position center z=%f" % source_offset)
else:
raise Exception("Unhandled source position")
div_x_factor = int(distance) + 1
div_y_factor = int(distance) + 1
optBL = SRWLOptC([SRWLOptD(source_offset+distance)],
[[0, 0, 1.0, 0, 0, div_x_factor, 1, div_y_factor, 1, 0, 0, 0], [0, 0, 1.0, 0, 0, 1, 0.05/2.0, 1, 0.1, 0, 0, 0]])
return optBL
def propagateModes(distance, filename, directory_name,maximum_mode=None):
af_name = filename.split("/")[-1].replace(".npz", "")
autocorrelation_function = AutocorrelationFunction.load(filename)
undulator = autocorrelation_function._undulator
beamline = createBeamlinePS(distance, undulator, source_position=autocorrelation_function.info().sourcePosition())
propagator = AutocorrelationFunctionPropagator(beamline)
if maximum_mode is None:
mode_distribution=autocorrelation_function.modeDistribution()
maximum_mode = mode_distribution[abs(mode_distribution)>0.00005].shape[0]
propagator.setMaximumMode(maximum_mode)
data_directory = "%s/data_free_%s" % (directory_name, af_name)
if isMaster():
if not os.path.exists(data_directory):
os.mkdir(data_directory)
barrier()
propagated_filename = "%s/%s_d%.1f.npz" % (data_directory, af_name, distance)
af = propagator.propagate(autocorrelation_function, propagated_filename)
af.save("%s/free_prop_%s_d%.1f.npz" % (directory_name, af_name, distance))
if __name__ == "__main__":
# if len(sys.argv) <= 2:
# print("Need distance and filename")
# exit()
filename_ebs = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cs_new_u18_2m_1h_s2.5.npz" # OK EBS
# filename_lb = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cl_low_beta_u18_2m_1h_s6.5.npy" # OK LB
# filename_hb = "/scisoft/users/glass/Documents/sources/Orange-SRW/comsyl/calculations/cl_high_beta_u18_2m_1h_s2.0.npy"
distance = 26.0 # float(sys.argv[1])
filename = filename_ebs # sys.argv[2]
directory_name = "propagation"
propagateModes(distance, filename, directory_name, maximum_mode=50)
| [
"srio@esrf.eu"
] | srio@esrf.eu |
9820f4e56513b1d24f74f5ae3cc92e63e23f2d7a | 2c5073c0140b3366b94866d50f8b975c926a529b | /venv/lib/python3.9/site-packages/mediapipe/calculators/util/collection_has_min_size_calculator_pb2.py | 12f5045ea54268b2af3cf6362f92c5e865b13e3f | [] | no_license | geekboi777/Volumegesture | 435c2752d107ac6915919e79bcb63fb0b85f6e9e | 3cc35f74533e26588a606154897f9ded4801f0ce | refs/heads/master | 2023-06-24T19:09:07.138900 | 2021-07-30T23:22:18 | 2021-07-30T23:22:18 | 390,512,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,920 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/calculators/util/collection_has_min_size_calculator.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.framework import calculator_pb2 as mediapipe_dot_framework_dot_calculator__pb2
try:
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe_dot_framework_dot_calculator__options__pb2
except AttributeError:
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe.framework.calculator_options_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/calculators/util/collection_has_min_size_calculator.proto',
package='mediapipe',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nCmediapipe/calculators/util/collection_has_min_size_calculator.proto\x12\tmediapipe\x1a$mediapipe/framework/calculator.proto\"\x9c\x01\n%CollectionHasMinSizeCalculatorOptions\x12\x13\n\x08min_size\x18\x01 \x01(\x05:\x01\x30\x32^\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\xd0\xb1\xd8{ \x01(\x0b\x32\x30.mediapipe.CollectionHasMinSizeCalculatorOptions'
,
dependencies=[mediapipe_dot_framework_dot_calculator__pb2.DESCRIPTOR,])
_COLLECTIONHASMINSIZECALCULATOROPTIONS = _descriptor.Descriptor(
name='CollectionHasMinSizeCalculatorOptions',
full_name='mediapipe.CollectionHasMinSizeCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='min_size', full_name='mediapipe.CollectionHasMinSizeCalculatorOptions.min_size', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.CollectionHasMinSizeCalculatorOptions.ext', index=0,
number=259397840, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=121,
serialized_end=277,
)
DESCRIPTOR.message_types_by_name['CollectionHasMinSizeCalculatorOptions'] = _COLLECTIONHASMINSIZECALCULATOROPTIONS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CollectionHasMinSizeCalculatorOptions = _reflection.GeneratedProtocolMessageType('CollectionHasMinSizeCalculatorOptions', (_message.Message,), {
'DESCRIPTOR' : _COLLECTIONHASMINSIZECALCULATOROPTIONS,
'__module__' : 'mediapipe.calculators.util.collection_has_min_size_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.CollectionHasMinSizeCalculatorOptions)
})
_sym_db.RegisterMessage(CollectionHasMinSizeCalculatorOptions)
_COLLECTIONHASMINSIZECALCULATOROPTIONS.extensions_by_name['ext'].message_type = _COLLECTIONHASMINSIZECALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_COLLECTIONHASMINSIZECALCULATOROPTIONS.extensions_by_name['ext'])
# @@protoc_insertion_point(module_scope)
| [
"geekboi777@github.com"
] | geekboi777@github.com |
0b38a6f4b4ac235595e3a0c19b632b9b0a49a262 | f090c3e0faa70cf0ef7c4be99cb894630bce2842 | /scripts_201410/simpleMeasurements/micromotioncomp/scanEy.py | 4e93efb76ce8b5265babb5a67054ba26b52c4464 | [] | no_license | HaeffnerLab/resonator | 157d1dc455209da9b7de077157bda53b4883c8b7 | 7c2e377fdc45f6c1ad205f8bbc2e6607eb3fdc71 | refs/heads/master | 2021-01-09T20:48:03.587634 | 2016-09-22T18:40:17 | 2016-09-22T18:40:17 | 6,715,345 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | from FFT import measureFFT
import numpy as np
import labrad
import datetime
now = datetime.datetime.now()
date = now.strftime("%Y%m%d")
cxn = labrad.connect()
dv = cxn.data_vault
ds = cxn.resonatordac
#rs = cxn.rohdeschwarz_server
#rs.select_device('resonator-pc GPIB Bus - USB0::0x0AAD::0x0054::102549')
amplMin = -.4
amplMax = -.3
amplStep = .01
recordTime = 0.5 #seconds
average = 6
freqSpan = 100.0 #Hz
freqOffset = -920.0 #Hz, the offset between the counter clock and the rf synthesizer clock
#setting up FFT
fft = measureFFT(cxn, recordTime, average, freqSpan, freqOffset, savePlot = False)
#saving
dv.cd(['', date, 'QuickMeasurements','FFT'],True)
name = dv.new('FFT',[('Amplitude', 'V/m')], [('FFTPeak','Arb','Arb')] )
dv.add_parameter('plotLive',True)
print 'Saving {}'.format(name)
amplitudes = np.arange(amplMin, amplMax + amplStep, amplStep)
Ex = 0.19
Ez = 0
U1 = -.22
U2 = 4.5
U3 = .22
U4 = 0
U5 = 0
for Ey in amplitudes:
ds.set_multipole_voltages([('Ex', Ex), ('Ey', Ey), ('Ez', Ez), ('U1', U1), ('U2', U2), ('U3', U3), ('U4', U4), ('U5', U5)])
micromotion = fft.getPeakArea(ptsAround = 3)
dv.add(Ey, micromotion)
| [
"soenkeamoeller@gmail.com"
] | soenkeamoeller@gmail.com |
6605b246a60796200540bfea2493f300ae9e79fe | 7cebfa2066e679e19993a5507e59d1979df3d4a8 | /1_Basics/9_revamp.py | 7d04e2986df78e238ca677bcc1fb34fbcad2937f | [
"Apache-2.0"
] | permissive | Arunken/PythonScripts | 833e9e43ccb29234a206027f1cda1d978718d5eb | 702d0a3af7a9be3311f9da0afc5285d453f15484 | refs/heads/master | 2022-12-24T18:50:43.672779 | 2021-05-13T11:31:51 | 2021-05-13T11:31:51 | 237,631,027 | 0 | 0 | Apache-2.0 | 2022-12-08T00:47:45 | 2020-02-01T15:01:20 | Python | UTF-8 | Python | false | false | 1,196 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 20 14:18:16 2018
@author: Arken
"""
# splitting a sentence and storing it in a list or tuple
a = ' python programming '
a = a.strip()
b = list(a) #saves individual characters separately in a list.
c = tuple(b) # The same goes for tuple.
tup = a.split() # split in to individual words
tup1 = a.split('o') # split wherever a specific character is present
st = ','.join(tup1) # joins the items in the list and adds a comma in between
d = 'python programming on'
e = d[8:19:3] # values between 8 and 19 and return every 3rd value
f = ['hi','I','am','learning','python','programming'] # list
for i in f:
print(i) # prints each element in the list
for i in f:
if 'hi' in f:
print(i) # print each element in the list if a specific condition is satisfied
else:
print('ok thank you') # else print a message
g = [1,2,3,4,5,6,7,8,9,10]
for i in g:
if i%2==0:
print(i) # print even numbers in the list
for i in range(0,len(g),2):
print(i) # print every 2nd value between 0 and 10
for i in range(0,10):
if 2<i<9:
print(i)
else:
print('help me') | [
"mail.arunken@gmail.com"
] | mail.arunken@gmail.com |
71b56e58f27fc67cf47cecacfa2c58b0264f5054 | f476cdf5a27e7768238854c5e7f24e3650ffeebc | /Codeforces/1409A.py | b5d64d8e69939839e4534b9d1c0c62fc669fc834 | [] | no_license | masudurHimel/Problematic_Adventure | 0d1a8b0d3cc6339d3d9a4f8ed9be9c1635ab290f | 3f32f5195c497e1c44d1a37c80ea644c31a53688 | refs/heads/master | 2023-05-01T23:34:15.697852 | 2023-04-28T09:30:00 | 2023-04-29T09:22:33 | 226,885,967 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | n = int(input())
for i in range(n):
a, b = map(int, input().split())
if a == b:
print(0)
continue
diff_abs = abs(a-b)
step = 0
for j in range(10, 0, -1):
if diff_abs//j != 0:
step += diff_abs//j
diff_abs = diff_abs % j
print(step)
| [
"masudurhimel@gmail.com"
] | masudurhimel@gmail.com |
dcde52d8d1e7cda7719a66c2bc0f132c213960a8 | 43ff15a7989576712d0e51f0ed32e3a4510273c0 | /app/migrations/0010_auto_20160712_1040.py | e5c34056b176ac89bbcdf1bcefd4ce23094e8c03 | [] | no_license | v1cker/kekescan | f2b51d91a9d6496e2cdc767eb6a600171f513449 | 3daa1775648439ba9e0003a376f90b601820290e | refs/heads/master | 2020-09-19T16:26:56.522453 | 2017-06-15T02:55:24 | 2017-06-15T02:55:24 | 94,495,007 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-07-12 02:40
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20160712_1037'),
]
operations = [
migrations.AlterField(
model_name='icpcheck',
name='insert_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2016, 7, 12, 10, 40, 29, 429713), null=True),
),
migrations.AlterField(
model_name='subdomainbrute',
name='fuzz_time',
field=models.DateTimeField(blank=True, default=datetime.datetime(2016, 7, 12, 10, 40, 29, 430219), null=True),
),
]
| [
"liyueke@huobi.com"
] | liyueke@huobi.com |
ebf4bdc53c74f65f3d597c85336264c25abf9174 | 242da8865e037f9fffb76269c3acddb73ce9fa14 | /packages/pyright-internal/src/tests/samples/tuples10.py | f111dbbb6122c9440b75b9f703c03c87600c2765 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | khyveasna11111908/pyright | f42eceae044f6fbc27552c1765b03ebd345a451c | 493d47807b96137995e4bb6ca341930e4de911f9 | refs/heads/main | 2023-08-30T00:08:36.191799 | 2021-09-25T19:17:13 | 2021-09-25T19:17:13 | 410,361,483 | 1 | 1 | NOASSERTION | 2021-09-25T19:15:23 | 2021-09-25T19:15:22 | null | UTF-8 | Python | false | false | 721 | py | # This sample tests that inferred types for tuples strip
# literals under the appropriate circumstances.
from typing import List, Literal, Tuple
a1 = (1, 2)
t1: Literal["tuple[Literal[1], Literal[2]]"] = reveal_type(a1)
a2 = list((1, 2))
t2: Literal["list[int]"] = reveal_type(a2)
a3: List[Literal[1]] = list((1,))
t3: Literal["list[Literal[1]]"] = reveal_type(a3)
def func1(v1: Tuple[Literal[1], ...], v2: Tuple[Literal[1]]):
a4 = set(v1)
t4: Literal["set[Literal[1]]"] = reveal_type(a4)
a5 = set(v2)
t5: Literal["set[Literal[1]]"] = reveal_type(a5)
a6 = (1, "hi")
t6: Literal["tuple[Literal[1], Literal['hi']]"] = reveal_type(a6)
v4 = set(a6)
t7: Literal["set[int | str]"] = reveal_type(v4)
| [
"erictr@microsoft.com"
] | erictr@microsoft.com |
b9e8aa1f4c274670cb8f1b15a15dd85cd3bc852c | 5341e31c0a210bd9449dcc4aa63d5ce5ab161e3a | /bin/cron_command.py | b1d4854f6c919ff2a88f67a2816330f03654c2ed | [
"MIT"
] | permissive | ipashchenko/watcher | efc347cd261c1483f4bc18cd030d3d42d09422d9 | ab55615b0ad8d23f98317bd49c2c9291c4add69b | refs/heads/master | 2021-01-10T10:20:09.504787 | 2016-01-25T12:27:44 | 2016-01-25T12:27:44 | 49,568,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,122 | py | #!/usr/bin python
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import smtplib
import netrc
from difflib import context_diff
from email.mime.application import MIMEApplication
from filecmp import cmp
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from os.path import basename
path = os.path.normpath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
sys.path.insert(0, path)
from watcher import watcher
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s -'
' %(message)s')
# logging.disable()
def func(f1, f2):
"""
Note that by default, this looks to check your netrc credentials
to use this feature, create a .netrc file, so that only you can read and
write it
touch ~/.netrc
chmod 600 ~/.netrc
and then add the information for the gmail smtp server, i.e.
``machine smtp.gmail.com login yourusername@gmail.com password
yourpassword``
"""
smtpserver = "smtp.gmail.com"
tls = True
fromaddr = "in4pashchenko@gmail.com"
toaddr = "in4-pashchenko@yandex.ru"
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
if not cmp(f1, f2):
logging.debug("Files {} & {} differs!".format(basename(f1),
basename(f2)))
diff = context_diff(open(f1).readlines(), open(f2).readlines(),
fromfile=basename(f1), tofile=basename(f2))
text = ''.join(diff)
with open(f2, "rb") as fil:
msg.attach(MIMEApplication(fil.read(),
Content_Disposition='attachment; filename="%s"' % basename(f2),
Name=basename(f2)))
body = text
msg['Subject'] = "Changes in SVLBI schedule"
msg.attach(MIMEText(body, 'plain'))
s = smtplib.SMTP(smtpserver)
secrets = netrc.netrc()
netrclogin, netrcaccount, netrcpassword = secrets.authenticators(smtpserver)
if tls:
s.starttls()
s.login(netrclogin, netrcpassword)
s.sendmail('in4pashchenko@gmail.com', ['in4-pashchenko@yandex.ru'],
msg.as_string())
s.quit()
logging.debug("Moving file {} to {}!".format(basename(f2),
basename(f1)))
shutil.move(f2, f1)
else:
logging.debug("Files {} & {} are the same!".format(basename(f1),
basename(f2)))
os.unlink(f2)
if __name__ == '__main__':
if not len(sys.argv) == 4:
print("Usage: cron_command.py month year directory")
sys.exit(0)
month = sys.argv[1]
year = sys.argv[2]
# User-specified directory
dir = sys.argv[3]
# Get last SVLBI schedule
watcher.get_last_svlbi_schedule(month, year, os.path.join(dir,
'svlbi_new.txt'))
func(os.path.join(dir, 'svlbi.txt'), os.path.join(dir, 'svlbi_new.txt'))
| [
"in4pashchenko@gmail.com"
] | in4pashchenko@gmail.com |
586847bd394d5bf213a98ea55153760e22ad456c | 2b8fe23680fb8c6596c8b4fd53a2547e32e84617 | /1-DS-Array-String/String_Compress.py | 43bcad128033c460e03812bfdc4ce87e60f0064c | [] | no_license | jigarshah2811/Python-Programming | b441c4815f80bef4d17611cdea851254c59739a9 | a60a11ad29e9dde9e9960006f887d9b66d29e427 | refs/heads/master | 2022-11-20T18:09:11.955564 | 2022-11-04T05:58:19 | 2022-11-04T05:58:19 | 67,324,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | """
https://nbviewer.org/github/donnemartin/interactive-coding-challenges/blob/master/arrays_strings/compress/compress_challenge.ipynb
Problem: Compress a string such that 'AAABCCDDDD' becomes 'A3BC2D4'. Only compress the string if it saves space.
"""
import unittest
class TestSolution(unittest.TestCase):
def testCompressString(self, func):
self.assertEqual(func(""), "")
self.assertEqual(func("ABC"), "ABC")
self.assertEqual(func("AAABC"), "A3BC")
self.assertEqual(func("AAABCCCC"), "A3BC4")
class Solution:
def compressString(self, inputStr: str) -> str:
"""
Pattern: If the prior char is same as this char - DUP - just increment count
No Dup - embed the last char and counter
"""
# Edge case, where string has <2 char "" "A" - no compression needed
if len(inputStr) < 2:
return inputStr
# Deal with lists not str (immutable)
res, s = list(), list(inputStr)
counter = 1 # Default counter for a new char
# Embed first char as-is, then count total occurances of this char to embed in last
res.append(s[0])
for i in range(1, len(s)):
if s[i] == s[i-1]: # DUP, just increment counter and append at the last
counter += 1
else: # New char, append counter for prior char and append new char
if counter > 1:
res.append(counter)
counter = 1
res.append(s[i])
if counter > 1:
res.append(counter)
return ''.join(map(str, res))
def main():
solution = Solution()
testSolution = TestSolution()
testSolution.testCompressString(solution.compressString)
if __name__ == "__main__":
main()
| [
"jshah@pinterest.com"
] | jshah@pinterest.com |
94ec424c89decabfdda8c83f68cfd5daceac066b | 9c7581c3b862174878a5e71609f94b3e5a2de5c9 | /CursoEmVideo/Aula20/ex097.py | db52582c0213396493f58974d80d72cb11e57046 | [
"MIT"
] | permissive | lucashsouza/Desafios-Python | 6d9fdc3500e0d01ce9a75201fc4fe88469928170 | abb5b11ebdfd4c232b4f0427ef41fd96013f2802 | refs/heads/master | 2020-06-21T16:49:32.884025 | 2019-07-23T01:23:07 | 2019-07-23T01:23:07 | 143,765,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Exercicio 097 - Função para texto
'''
Faça um programa que tenha uma função chamada escreva(), que receba um texto
qualquer como parâmetro e mostre uma mensagem com o tamanho adaptável
'''
def mensagem(txt):
tam = len(txt)
print('~'*tam)
print(txt)
print('~'*tam, '\n')
mensagem('Hello, world!')
mensagem('Python é a a melhor linguagem de programação')
| [
"noreply@github.com"
] | lucashsouza.noreply@github.com |
b0acf56f2da7e65ce7b8ef6af2945ed5cf4c5bd0 | 4abce782dad606b10d7646763b21277689e8cedd | /async-pydevd/tests/test_generate.py | 552980c42eaf9220bb5c76d07dd48fff99bf2108 | [] | no_license | wordhui/pycharm-evaluate-async-code | 8cca3ee4a5b74eff1073a442c1f014de30b02b5b | 64ccd29b0ee286ad6fe45172334926e9f517d162 | refs/heads/master | 2023-05-14T18:10:20.786741 | 2021-06-08T07:04:05 | 2021-06-08T07:04:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | from async_pydevd import FILES, generate
def test_generate():
result = generate()
assert '"""' not in result
for f in FILES:
normalized = (
f.read_text("utf-8").replace('"""', "'''").replace(" # pragma: no cover", "").strip()
)
assert normalized in result
assert not result.endswith(" ")
assert not result.startswith(" ")
| [
"1998uriyyo@gmail.com"
] | 1998uriyyo@gmail.com |
ed4d0a72fc25b24f5a5ba572bb628ea20168a043 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_friends.py | 120a772768eee00c88d6902834549d9e7b6fe04a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.verbs._friend import _FRIEND
#calss header
class _FRIENDS(_FRIEND, ):
def __init__(self,):
_FRIEND.__init__(self)
self.name = "FRIENDS"
self.specie = 'verbs'
self.basic = "friend"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
32b70266bcc9034ed202f467bfd9da532c09fc20 | edbb5293b14fae626ad38f0087e66c996acd80de | /run.py | 3a094f50bdc2aa60b87f6bb15133904c69f742fc | [] | no_license | furuiyang0715/sync_services | 4d8b2f425814920e409580080d946b1437ed17b3 | 95bb50180129ddd4cc78ef086d5e415f2740ea2b | refs/heads/master | 2020-06-12T10:31:57.314671 | 2019-07-12T02:00:07 | 2019-07-12T02:00:07 | 194,272,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | #!/usr/bin/env python3
# coding=utf8
import subprocess
def start():
cmd = """
python index_run.py start; python finance_run.py start; python calendars_run.py start;
"""
subprocess.getoutput(cmd)
def stop():
cmd = """
python index_run.py stop; python finance_run.py stop; python calendars_run.py stop;
"""
subprocess.getoutput(cmd)
if __name__ == '__main__':
# start()
stop()
| [
"furuiyang0715@gmail.com"
] | furuiyang0715@gmail.com |
460b589029a28f4fa3fa3f781280627857374c0b | c6f14a40b13121e8266882a38fa7ff3ff6c943a2 | /apps/ndvi_anomaly/utils.py | d2ed9a3b5d738907c70718714555adcf22309a69 | [
"Apache-2.0"
] | permissive | gijs/data_cube_ui | 443572c7b25734a13f576ea284687145bb3e72cf | 831f4d4f1fe44d7cb81caebf241e3d2add5d7b5d | refs/heads/master | 2021-01-17T11:56:21.657787 | 2017-01-20T23:53:08 | 2017-01-20T23:53:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,120 | py | # Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from .models import Query
from data_cube_ui.models import Area, Satellite
from datetime import datetime
"""
Utility class designed to take repeated functional code and abstract out for reuse through
application.
"""
# Author: AHDS
# Creation date: 2016-06-23
# Modified by:
# Last modified date:
def create_query_from_post(user_id, post):
"""
Takes post data from a request with a user id and creates a model.
TODO: use form validation rather than doing it this way.
Args:
user_id (string): Id of the user requesting the creation of the query.
post (HttpPost): A post that contains a variety of information regarding how to construct the
query
Returns:
query_id (string): The ID of the query that has been created.
"""
#scene_sel = ",".join(post.getlist('scene_selection'))
scene_index_sel = []
scene_string_sel = []
for scene in post.getlist('scene_selection'):
scene_split = scene.split("-")
scene_index_sel.append(scene_split[0])
scene_string_sel.append(scene_split[1])
query = Query(query_start=datetime.now(), query_end=datetime.now(), user_id=user_id,
latitude_max=post['latitude_max'], latitude_min=post['latitude_min'],
longitude_max=post['longitude_max'], longitude_min=post['longitude_min'],
time_start=",".join(scene_index_sel), time_end=",".join(scene_string_sel),
platform=post['platform'], baseline=",".join(post.getlist('baseline_selection')),
area_id=post['area_id'])
query.title = "NDVI Anomaly Task" if 'title' not in post or post['title'] == '' else post['title']
query.description = "None" if 'description' not in post or post['description'] == '' else post['description']
query.product = Satellite.objects.get(satellite_id=query.platform).product_prefix + Area.objects.get(area_id=query.area_id).area_id
query.query_id = query.generate_query_id()
if not Query.objects.filter(query_id=query.query_id).exists():
query.save()
return query.query_id
| [
"alfredo.h.delos_santos@ama-inc.com"
] | alfredo.h.delos_santos@ama-inc.com |
b0098d8416b34e015d6c88c4e7f600a0ab479460 | 98cd5ddf45a73aea64bbfac0c0104829d7231b81 | /S - Sound Jaws-Image/info.py | f063b8a755be8e9c8b7362e3492b7a13b012ff08 | [] | no_license | atheis4/ETC_Modes_Extra | 42508d523cfe632a3335e29f6e1e40af91df231b | d0ce221562105382a7a73cc6d280f4ad0eabf6f3 | refs/heads/master | 2022-04-04T11:15:07.335910 | 2020-01-03T20:27:32 | 2020-01-03T20:27:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | name = "S - Sound Jaws-Image"
description = "Oscilloscope teeth with background image cycle and tooth color cycle"
knob1 = "Clench"
knob2 = "Number of Teeth"
knob3 = "Tooth Shape"
knob4 = "Colorshift Speed"
released = "September 7 2017"
| [
"media@critterandguitari.com"
] | media@critterandguitari.com |
80cb84142bdc36f93b641768e87520357a096f0a | 01e82d70ee62824fcad3d2df57411c2ff620d6e0 | /data/imsitu_loader.py | ddd1505a8b00336a0b571a85c31dd6cd54b6752c | [
"MIT"
] | permissive | kyoungrok0517/verb-attributes | 3a54c99a096d4252116748dfb1188045f4a2dd70 | a04931e3b2ef5be859bdb4c0f123148b194c9d42 | refs/heads/master | 2020-03-21T05:28:10.026977 | 2018-03-20T16:36:25 | 2018-03-20T16:36:25 | 138,162,475 | 2 | 0 | null | 2018-06-21T11:38:47 | 2018-06-21T11:38:47 | null | UTF-8 | Python | false | false | 6,844 | py | """
Dataset and dataloader for imsitu experiments.
This allows us to:
1) Finetune on Imsitu
2) Finetune on a zero shot setting
"""
import spacy
import torch
import os
from config import IMSITU_TRAIN_LIST, IMSITU_VAL_LIST, IMSITU_TEST_LIST, IMSITU_IMGS
from torchvision.transforms import Scale, RandomCrop, CenterCrop, ToTensor, Normalize, Compose, RandomHorizontalFlip
from PIL import Image
from data.attribute_loader import Attributes
from collections import namedtuple
from torch.autograd import Variable
LISTS = {
'train': IMSITU_TRAIN_LIST,
'val': IMSITU_VAL_LIST,
'test': IMSITU_TEST_LIST,
}
def _load_imsitu_file(mode):
"""
Helper fn that loads imsitu file
:param fn:
:return:
"""
if mode not in LISTS:
raise ValueError("Invalid mode {}, must be train val or test".format(mode))
imsitu_ind_to_label = {}
dps = []
with open(LISTS[mode], 'r') as f:
for row in f.read().splitlines():
fn_ext = row.split(' ')[0]
label = fn_ext.split('_')[0] # This has "ing" on it, so we can't use it for the word
# label. But needed to construct the filename
ind = int(row.split(' ')[1])
fn = os.path.join(IMSITU_IMGS, label, fn_ext)
imsitu_ind_to_label[ind] = label
dps.append((fn, ind))
return dps
class ImSitu(torch.utils.data.Dataset):
def __init__(self,
use_train_verbs=False,
use_val_verbs=False,
use_test_verbs=False,
use_train_images=False,
use_val_images=False,
use_test_images=False,
vector_type='glove',
word_type='lemma',
):
self.vector_type = vector_type
self.word_type = word_type
self.use_train_verbs = use_train_verbs
self.use_val_verbs = use_val_verbs
self.use_test_verbs = use_test_verbs
if not (self.use_train_verbs or self.use_val_verbs or self.use_test_verbs):
raise ValueError("No verbs selected!")
self.use_train_images = use_train_images
self.use_val_images = use_val_images
self.use_test_images = use_test_images
if not (self.use_train_verbs or self.use_val_verbs or self.use_test_verbs):
raise ValueError("No images selected!")
self.attributes = Attributes(
vector_type=vector_type,
word_type=word_type,
use_train=self.use_train_verbs, use_val=self.use_val_verbs,
use_test=self.use_test_verbs, imsitu_only=True)
self.examples = []
for mode, to_use in zip(
['train', 'val', 'test'],
[self.use_train_images, self.use_val_images, self.use_test_images],
):
if to_use:
self.examples += [(fn, self.attributes.ind_perm[ind])
for fn, ind in _load_imsitu_file(mode)
if ind in self.attributes.ind_perm]
self.transform = transform(is_train=not self.use_test_verbs)
def __getitem__(self, index):
fn, ind = self.examples[index]
img = self.transform(Image.open(fn).convert('RGB'))
return img, ind
@classmethod
def splits(cls, zeroshot=False, **kwargs):
"""
Gets splits
:param zeroshot: True if we're transferring to zeroshot classes
:return: train, val, test datasets
"""
if zeroshot:
train_cls = cls(use_train_verbs=True, use_train_images=True, use_val_images=True, **kwargs)
val_cls = cls(use_val_verbs=True, use_train_images=True, use_val_images=True, **kwargs)
test_cls = cls(use_test_verbs=True, use_test_images=True, **kwargs)
else:
train_cls = cls(use_train_verbs=True, use_train_images=True, **kwargs)
val_cls = cls(use_train_verbs=True, use_val_images=True, **kwargs)
test_cls = cls(use_train_verbs=True, use_test_images=True, **kwargs)
return train_cls, val_cls, test_cls
def __len__(self):
return len(self.examples)
Batch = namedtuple('Batch', ['img', 'label'])
class CudaDataLoader(torch.utils.data.DataLoader):
"""
Iterates through the data, but also loads everything as a (cuda) variable
"""
def __init__(self, *args, volatile=False, **kwargs):
super(CudaDataLoader, self).__init__(*args, **kwargs)
self.volatile = volatile
def _load(self, item):
img = Variable(item[0], volatile=self.volatile)
label = Variable(item[1], volatile=self.volatile)
if torch.cuda.is_available():
img = img.cuda()
label = label.cuda()
return Batch(img, label)
def __iter__(self):
return (self._load(x) for x in super(CudaDataLoader, self).__iter__())
@classmethod
def splits(cls, train, val, test, batch_size, num_workers=0, **kwargs):
"""
gets dataloaders given datasets
:param train:
:param val:
:param test:
:param batch_size:
:param num_workers:
:return:
"""
train_dl = cls(
dataset=train,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collate_fn,
**kwargs,
)
val_dl = cls(
dataset=val,
batch_size=batch_size*16,
shuffle=False,
num_workers=num_workers,
collate_fn=collate_fn,
volatile=True,
**kwargs,
)
test_dl = cls(
dataset=test,
batch_size=batch_size*16,
shuffle=False,
num_workers=num_workers,
collate_fn=collate_fn,
volatile=True,
**kwargs,
)
return train_dl, val_dl, test_dl
def transform(is_train=True, normalize=True):
"""
Returns a transform object
"""
filters = []
filters.append(Scale(256))
if is_train:
filters.append(RandomCrop(224))
else:
filters.append(CenterCrop(224))
if is_train:
filters.append(RandomHorizontalFlip())
filters.append(ToTensor())
if normalize:
filters.append(Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]))
return Compose(filters)
def collate_fn(data):
imgs, labels = zip(*data)
imgs = torch.stack(imgs, 0)
labels = torch.LongTensor(labels)
return imgs, labels
if __name__ == '__main__':
train, val, test = ImSitu.splits()
train_dl = CudaDataLoader(
dataset=train,
batch_size=32,
shuffle=True,
num_workers=0,
collate_fn=collate_fn
)
| [
"rowanz@cs.washington.edu"
] | rowanz@cs.washington.edu |
5d91735118f1452267c4e02054d07b1411cadc2e | 2624007528d2e37f2a2460c7a2d2964890deed16 | /synapse/rest/client/knock.py | 0152a0c66a509b24012d562c6d0f97a525001cbb | [
"Apache-2.0"
] | permissive | matrix-org/synapse-dinsic | a5386060fb6a9575dbec86547fd0943e46d63ac7 | 3da3ecc22d36f129eade97b679e1791176e3d9fa | refs/heads/dinsic | 2023-02-19T22:15:54.550679 | 2022-07-07T13:24:51 | 2022-07-07T13:24:51 | 206,570,942 | 8 | 7 | Apache-2.0 | 2023-02-08T02:50:31 | 2019-09-05T13:29:44 | Python | UTF-8 | Python | false | false | 3,759 | py | # Copyright 2020 Sorunome
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple
from twisted.web.server import Request
from synapse.api.constants import Membership
from synapse.api.errors import SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
parse_json_object_from_request,
parse_strings_from_args,
)
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import set_tag
from synapse.rest.client.transactions import HttpTransactionCache
from synapse.types import JsonDict, RoomAlias, RoomID
if TYPE_CHECKING:
from synapse.app.homeserver import HomeServer
from ._base import client_patterns
logger = logging.getLogger(__name__)
class KnockRoomAliasServlet(RestServlet):
"""
POST /knock/{roomIdOrAlias}
"""
PATTERNS = client_patterns("/knock/(?P<room_identifier>[^/]*)")
def __init__(self, hs: "HomeServer"):
super().__init__()
self.txns = HttpTransactionCache(hs)
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
async def on_POST(
self,
request: SynapseRequest,
room_identifier: str,
txn_id: Optional[str] = None,
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
event_content = None
if "reason" in content:
event_content = {"reason": content["reason"]}
if RoomID.is_valid(room_identifier):
room_id = room_identifier
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
args: Dict[bytes, List[bytes]] = request.args # type: ignore
remote_room_hosts = parse_strings_from_args(
args, "server_name", required=False
)
elif RoomAlias.is_valid(room_identifier):
handler = self.room_member_handler
room_alias = RoomAlias.from_string(room_identifier)
room_id_obj, remote_room_hosts = await handler.lookup_room_alias(room_alias)
room_id = room_id_obj.to_string()
else:
raise SynapseError(
400, "%s was not legal room ID or room alias" % (room_identifier,)
)
await self.room_member_handler.update_membership(
requester=requester,
target=requester.user,
room_id=room_id,
action=Membership.KNOCK,
txn_id=txn_id,
third_party_signed=None,
remote_room_hosts=remote_room_hosts,
content=event_content,
)
return 200, {"room_id": room_id}
def on_PUT(
self, request: Request, room_identifier: str, txn_id: str
) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
request, self.on_POST, request, room_identifier, txn_id
)
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
KnockRoomAliasServlet(hs).register(http_server)
| [
"noreply@github.com"
] | matrix-org.noreply@github.com |
8c3f4059018913519c05f57b490e763a80e25559 | bd2b8551aca9728d1dd37a6f2ac988f03e93b2bf | /120_SquareRemainders_(MakeFaster).py | 9f6fb23b41b0421fb2c8235d29a7060338585464 | [] | no_license | acganesh/euler | c7fc6bb0873df4474765598a2933baf413d094e7 | 1a870e9ecfaec770d162eec32dbaa327269ac5ce | refs/heads/master | 2020-06-25T13:07:59.976765 | 2019-10-15T00:19:41 | 2019-10-15T00:19:41 | 199,316,809 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | from datetime import datetime
s = datetime.now()
cache = [[-1]*(2*a**2) for a in range(1000+1)]
def cache_pow(a, n, mod):
try:
val = cache[a][n]
except:
print 'failed', a, n
if val != -1:
return val
else:
val = pow(a, n, mod)
cache[a][n] = val
return val
def remainder(a, n):
mod = a**2
val = (pow(a-1, n, mod) + pow(a+1, n, mod)) % mod
return val
print remainder(7, 3)
def max_remainder(a):
max_val = 0
max_exp = None
for exp in range(1, a**2-7):
val = remainder(a, exp)
if val > max_val:
max_val = val
max_exp = exp
return max_val
def main(limit):
val = 3
total = 0
while val <= limit:
total += max_remainder(val)
if val % 100 == 0: print 'val',val
#print val
val += 1
return total
'''
for a in range(1, 20):
print a, max_remainder(a)
'''
print main(200)
print datetime.now() - s
| [
"acganesh@stanford.edu"
] | acganesh@stanford.edu |
34c460b320afaa0d7a0a6778bb245ac99c089f7a | 205fe9835ee9ae9dee72635c870bd836f911d331 | /src/cloudx/migrations/0002_auto_20170903_1445.py | 0c823947ecfad2dc0289f569f91dc98f706f9f97 | [] | no_license | msrshahrukh100/testxml | 8bc9b2f5e40dd3878499a988579a3e76beec6582 | 30fa9523fd8d507964b127a640949534515c5b2e | refs/heads/master | 2021-01-22T06:11:59.410452 | 2017-09-03T18:43:54 | 2017-09-03T18:43:54 | 102,286,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-03 14:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cloudx', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='data',
name='managerid',
field=models.IntegerField(blank=True, null=True),
),
]
| [
"msr.concordfly@gmail.com"
] | msr.concordfly@gmail.com |
32471ddbd0a025de389d955a990ab2f6f3e858c3 | 94f180b4b0b0a699d6948fd5e1216d16d6735edc | /source-code/Map Sum Pairs 677.py | 4e7990ac2f79efb70905565c8fdeeb02907fc31e | [
"MIT"
] | permissive | ttungl/Coding-Interview-Challenge | 7093b7f8da0c03abaf2f61340384cdc15c7a31e7 | d80c3e15468d50b42ee53fcc73e9326c6c816495 | refs/heads/master | 2021-09-15T05:25:27.192040 | 2018-05-26T19:02:33 | 2018-05-26T19:02:33 | 115,586,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,481 | py | # 677. Map Sum Pairs
# ttungl@gmail.com
# Implement a MapSum class with insert, and sum methods.
# For the method insert, you'll be given a pair of (string, integer). The string represents the key and the integer represents the value. If the key already existed, then the original key-value pair will be overridden to the new one.
# For the method sum, you'll be given a string representing the prefix, and you need to return the sum of all the pairs' value whose key starts with the prefix.
# Example 1:
# Input: insert("apple", 3), Output: Null
# Input: sum("ap"), Output: 3
# Input: insert("app", 2), Output: Null
# Input: sum("ap"), Output: 5
# sol 1
# runtime: 38ms
class TrieNode(object):
def __init__(self, v=0):
"""
Initialize your data structure here.
"""
self.children = collections.defaultdict()
self.count = v
class MapSum(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
self.keys = collections.defaultdict()
def insert(self, key, val):
"""
:type key: str
:type val: int
:rtype: void
"""
node = self.root
diff = val - self.keys.get(key, 0)
self.keys[key] = val
for c in key:
if c not in node.children:
node.children[c] = TrieNode()
node = node.children[c]
node.count += diff
def sum(self, prefix):
"""
:type prefix: str
:rtype: int
"""
node = self.root
for c in prefix:
if c not in node.children:
return 0
node = node.children[c]
return node.count
# sol 2:
# runtime: 31ms
class MapSum(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.x = {}
def insert(self, key, val):
"""
:type key: str
:type val: int
:rtype: void
"""
self.x[key] = val
def sum(self, prefix):
"""
:type prefix: str
:rtype: int
"""
a = 0
for i in self.x.keys():
if i[:len(prefix)] == prefix:
a+=self.x[i]
return a
# Your MapSum object will be instantiated and called as such:
# obj = MapSum()
# obj.insert(key,val)
# param_2 = obj.sum(prefix)
| [
"noreply@github.com"
] | ttungl.noreply@github.com |
1d110680a248ff7b57501497a49da59bfba00b86 | a9fd2e227f7f529fbec50caa82d5962019c5f3ee | /account/views.py | a0e85245023aad2768d6dcdd5e24d8ad8acb1832 | [] | no_license | jsparmani/TeekaSchedule | 1477f3b38aec484c77276fc0f731c85c12fa34d2 | 76c01dd742f692cfd50c299807413b99a20c5535 | refs/heads/master | 2020-07-15T06:36:11.500851 | 2019-09-01T05:46:57 | 2019-09-01T05:46:57 | 205,501,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,949 | py | from django.shortcuts import render, redirect
from . import forms
import random
from . import models
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.models import User
from datetime import datetime
from location import models as loc_models
import requests
# Create your views here.
api_key = '292a8d1f-295e-11e9-9ee8-0200cd936042'
def get_parent_username(request):
if request.method == 'POST':
form = forms.ParentUsernameForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
otp = random.randint(111111, 999999)
try:
otp_user = models.OTP.objects.create(
username=username,
otp=otp,
created_at=datetime.now()
)
except:
return redirect('fault', fault="Server Error!")
print(otp)
# link = f'https://2factor.in/API/R1/?module=TRANS_SMS&apikey={api_key}&to={username}&from=ECIWEB&templatename=OTP&var1=Sir&var2={otp}'
# requests.get(link)
return redirect('account:get_parent_otp', pk=otp_user.pk)
else:
return redirect('fault', fault="Server Error!")
else:
form = forms.ParentUsernameForm()
return render(request, 'account/get_parent_username.html', {'form': form})
def get_parent_otp(request, pk):
otp_user = models.OTP.objects.get(pk__exact=pk)
if request.method == 'POST':
form = forms.ParentOTPForm(request.POST)
if form.is_valid():
otp = form.cleaned_data['otp']
if otp == otp_user.otp:
user = authenticate(request, username=otp_user.username,
password='testpassword')
if user:
auth_login(request, user)
return redirect('home')
else:
user = User.objects.create_user(
username=otp_user.username,
password='testpassword'
)
user.save()
models.ParentUser.objects.create(
user=user
)
user = authenticate(request, username=otp_user.username,
password='testpassword')
auth_login(request, user)
return redirect('home')
else:
return redirect('fault', fault="Invalid Credentials!")
else:
return redirect('fault', fault="Server Error!")
else:
form = forms.ParentOTPForm()
return render(request, 'account/get_parent_otp.html', {'form': form})
def get_parent_details(request):
if request.method == 'POST':
form = forms.ParentDetailsForm(request.POST)
if form.is_valid():
address = form.cleaned_data['address']
f_name = form.cleaned_data['f_name']
m_name = form.cleaned_data['m_name']
f_dob = form.cleaned_data['f_dob']
m_dob = form.cleaned_data['m_dob']
print(request.user.username)
try:
user = models.ParentUser.objects.get(
user__username__exact=request.user.username)
except:
return redirect('fault', fault="Server Error")
user.f_name = f_name
user.m_name = m_name
user.f_dob = f_dob
user.m_dob = m_dob
user.address = loc_models.Locality.objects.get(name__exact=address)
user.save()
return redirect('home')
else:
return redirect('fault', fault="Server Error!")
else:
form = forms.ParentDetailsForm()
return render(request, 'account/get_parent_details.html', {'form': form})
| [
"jsparmani@gmail.com"
] | jsparmani@gmail.com |
3f4cb50a830c6cc835b54c6700548fc256c8fb0b | 5ef46abb67b07646537b4fc1d5880fdc91e412b2 | /Sea/adapter/couplings/Coupling2DCavities3D.py | 01b99fed556d8c6968ba029fb6095735b7343e64 | [] | no_license | python-acoustics/Sea | 3f13f8d0d39200a4b35f9edfe8e3a7b2783c6966 | e30b6dc59d8ab02cd41924f7b6c14d0d1e77e19e | refs/heads/master | 2016-09-05T16:59:01.602835 | 2013-04-16T15:29:54 | 2013-04-16T15:29:54 | 7,466,520 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | import numpy as np
import Sea
from Coupling import Coupling
class Coupling2DCavities2D(Coupling):
"""
Coupling for cavity2D to cavity transmission.
"""
@property
def impedance_from(self):
"""
Choses the right impedance of subsystem_from.
Applies boundary conditions correction as well.
"""
return self.subsystem_from.impedance
@property
def impedance_to(self):
"""
Choses the right impedance of subsystem_from.
Applies boundary conditions correction as well.
"""
return self.subsystem_to.impedance
@property
def tau(self):
"""
Transmission coefficient.
"""
return np.zeros(self.frequency.amount)
@property
def clf(self):
"""
Coupling loss factor for transmission from a 2D cavity to a cavity.
.. math:: \\eta_{12} = \\frac{ \\tau_{12}}{4 \\pi}
See BAC, equation 3.14
"""
return self.tau / (4.0 * np.pi) | [
"fridh@fridh.nl"
] | fridh@fridh.nl |
829b5c5c784978dd392e43c8f6430520201503fe | 2a3743ced45bd79826dcdc55f304da049f627f1b | /venv/lib/python3.7/site-packages/bokeh/util/tests/test_dependencies.py | f27c2fd625febb1ffaad405c649ce2d9ae7294d0 | [
"MIT"
] | permissive | Dimasik007/Deribit_funding_rate_indicator | 12cc8cd7c0be564d6e34d9eae91940c62492ae2a | 3251602ae5249069489834f9afb57b11ff37750e | refs/heads/master | 2023-05-26T10:14:20.395939 | 2019-08-03T11:35:51 | 2019-08-03T11:35:51 | 198,705,946 | 5 | 3 | MIT | 2023-05-22T22:29:24 | 2019-07-24T20:32:19 | Python | UTF-8 | Python | false | false | 2,956 | py | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
# Module under test
import bokeh.util.dependencies as dep
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_detect_phantomjs(object):
def test_detect_phantomjs_success(self):
assert dep.detect_phantomjs() is not None
def test_detect_phantomjs_bad_path(self, monkeypatch):
monkeypatch.setenv("BOKEH_PHANTOMJS_PATH", "bad_path")
with pytest.raises(RuntimeError):
dep.detect_phantomjs()
def test_detect_phantomjs_bad_version(self):
with pytest.raises(RuntimeError) as e:
dep.detect_phantomjs('10.1')
assert str(e).endswith("PhantomJS version to old. Version>=10.1 required, installed: 2.1.1")
def test_detect_phantomjs_default_required_version(self):
assert dep.detect_phantomjs.__defaults__ == ('2.1',)
class Test_import_optional(object):
def test_success(self):
assert dep.import_optional('sys') is not None
def test_fail(self):
assert dep.import_optional('bleepbloop') is None
class Test_import_required(object):
def test_success(self):
assert dep.import_required('sys', 'yep') is not None
def test_fail(self):
with pytest.raises(RuntimeError) as excinfo:
dep.import_required('bleepbloop', 'nope')
assert 'nope' in str(excinfo.value)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| [
"dmitriy00vn@gmail.com"
] | dmitriy00vn@gmail.com |
5f93747a95298ff1f96092486f57e56a702b47cf | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/python/keras/engine/training.pyi | 87a6a2450c166433875a253c15926a2225a32a76 | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,674 | pyi | # Stubs for tensorflow.python.keras.engine.training (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.python.data.ops import dataset_ops as dataset_ops, iterator_ops as iterator_ops
from tensorflow.python.data.ops.dataset_ops import Dataset as Dataset
from tensorflow.python.eager import context as context
from tensorflow.python.framework import errors as errors, ops as ops, tensor_util as tensor_util
from tensorflow.python.keras import losses as losses, optimizers as optimizers
from tensorflow.python.keras.engine import base_layer as base_layer, distributed_training_utils as distributed_training_utils, training_arrays as training_arrays, training_distributed as training_distributed, training_eager as training_eager, training_generator as training_generator, training_utils as training_utils
from tensorflow.python.keras.engine.network import Network as Network
from tensorflow.python.keras.utils import data_utils as data_utils
from tensorflow.python.keras.utils.generic_utils import slice_arrays as slice_arrays
from tensorflow.python.ops import math_ops as math_ops, weights_broadcast_ops as weights_broadcast_ops
from tensorflow.python.util import nest as nest
from tensorflow.python.util.tf_export import tf_export as tf_export
from typing import Any as Any, Optional as Optional
class Model(Network):
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
optimizer: Any = ...
loss: Any = ...
metrics: Any = ...
loss_weights: Any = ...
sample_weight_mode: Any = ...
weighted_metrics: Any = ...
target_tensors: Any = ...
loss_functions: Any = ...
loss_weights_list: Any = ...
total_loss: Any = ...
targets: Any = ...
train_function: Any = ...
test_function: Any = ...
predict_function: Any = ...
def compile(self, optimizer: Any, loss: Optional[Any] = ..., metrics: Optional[Any] = ..., loss_weights: Optional[Any] = ..., sample_weight_mode: Optional[Any] = ..., weighted_metrics: Optional[Any] = ..., target_tensors: Optional[Any] = ..., distribute: Optional[Any] = ..., **kwargs: Any) -> None: ...
def fit(self, x: Optional[Any] = ..., y: Optional[Any] = ..., batch_size: Optional[Any] = ..., epochs: int = ..., verbose: int = ..., callbacks: Optional[Any] = ..., validation_split: float = ..., validation_data: Optional[Any] = ..., shuffle: bool = ..., class_weight: Optional[Any] = ..., sample_weight: Optional[Any] = ..., initial_epoch: int = ..., steps_per_epoch: Optional[Any] = ..., validation_steps: Optional[Any] = ..., max_queue_size: int = ..., workers: int = ..., use_multiprocessing: bool = ..., **kwargs: Any): ...
def evaluate(self, x: Optional[Any] = ..., y: Optional[Any] = ..., batch_size: Optional[Any] = ..., verbose: int = ..., sample_weight: Optional[Any] = ..., steps: Optional[Any] = ..., max_queue_size: int = ..., workers: int = ..., use_multiprocessing: bool = ...): ...
def predict(self, x: Any, batch_size: Optional[Any] = ..., verbose: int = ..., steps: Optional[Any] = ..., max_queue_size: int = ..., workers: int = ..., use_multiprocessing: bool = ...): ...
def train_on_batch(self, x: Any, y: Optional[Any] = ..., sample_weight: Optional[Any] = ..., class_weight: Optional[Any] = ...): ...
def test_on_batch(self, x: Any, y: Optional[Any] = ..., sample_weight: Optional[Any] = ...): ...
def predict_on_batch(self, x: Any): ...
def fit_generator(self, generator: Any, steps_per_epoch: Optional[Any] = ..., epochs: int = ..., verbose: int = ..., callbacks: Optional[Any] = ..., validation_data: Optional[Any] = ..., validation_steps: Optional[Any] = ..., class_weight: Optional[Any] = ..., max_queue_size: int = ..., workers: int = ..., use_multiprocessing: bool = ..., shuffle: bool = ..., initial_epoch: int = ...): ...
def evaluate_generator(self, generator: Any, steps: Optional[Any] = ..., max_queue_size: int = ..., workers: int = ..., use_multiprocessing: bool = ..., verbose: int = ...): ...
def predict_generator(self, generator: Any, steps: Optional[Any] = ..., max_queue_size: int = ..., workers: int = ..., use_multiprocessing: bool = ..., verbose: int = ...): ...
class DistributedCallbackModel(Model):
def __init__(self, model: Any) -> None: ...
def set_original_model(self, orig_model: Any) -> None: ...
def save_weights(self, filepath: Any, overwrite: bool = ..., save_format: Optional[Any] = ...) -> None: ...
def save(self, filepath: Any, overwrite: bool = ..., include_optimizer: bool = ...) -> None: ...
def load_weights(self, filepath: Any, by_name: bool = ...) -> None: ...
def __getattr__(self, item: Any) -> None: ...
| [
"matangover@gmail.com"
] | matangover@gmail.com |
1a6d659303222b5d59f9083a0bc38b15e669503e | 1025bc2aa5aaa40970ad1a51d8d0b1202a1ea11e | /StatTools/python/RooFunctorFromWS.py | 61a6d6dc6bf708de8fef6714998d0555c78dac99 | [] | no_license | uwcms/FinalStateAnalysis | f2be318546728621676a4b90ed2678b2560c94e6 | bcb164a8e27d459a9ac438780f6c8730d3e856bf | refs/heads/miniAOD_9_4_0 | 2022-11-09T01:28:52.199025 | 2019-03-15T19:25:10 | 2019-03-15T19:25:10 | 5,201,989 | 5 | 32 | null | 2020-11-19T17:02:32 | 2012-07-27T07:51:18 | Python | UTF-8 | Python | false | false | 7,941 | py | '''
RooFunctorFromWS
Builds a functor from a function in a RooWorkspace.
This could be improved with cython.
Author: Evan K. Friis, UW Madison
>>> from FinalStateAnalysis.Utilities.rootbindings import ROOT
>>> file = ROOT.TFile('../test/test_RooFunctorFromWS.root')
>>> ws = file.Get('fit_efficiency')
>>> functor = RooFunctorFromWS(ws, 'efficiency')
>>> '%0.4f' % functor(60)
'0.0244'
>>> '%0.4f' % functor(140)
'0.0138'
'''
from FinalStateAnalysis.Utilities.rootbindings import ROOT
import array
from pdb import set_trace
from FinalStateAnalysis.PlotTools.decorators import memo_last
#ROOT.gSystem.Load("libFinalStateAnalysisStatTools")
TMVA_tools = ROOT.TMVA.Tools.Instance()
class RooFunctorFromWS(ROOT.RooFunctor):
def __init__(self, workspace, functionname, var='x'):
# Get the RooFormulaVar
self.function = workspace.function(functionname)
# Get the ind. var and the parameters
#self.x = workspace.var(var)
self.x = self.function.getParameter(var) if hasattr(self.function, 'getParameter') else self.function.getVariables().find(var)
self.x.setRange(0, 1e99)
def __call__(self, x):
self.x.setVal(x)
return self.function.getVal()
class FunctorFromTF1(object):
def __init__(self, tfile_name, path):
# Get the RooFormulaVar
self.tfile = ROOT.TFile.Open(tfile_name)
self.function = self.tfile.Get(path)
def __call__(self, x):
return self.function.Eval(x)
class MultiFunctorFromTF1(object):
def __init__(self, tfile_name, paths_and_borders):
# Get the RooFormulaVar
self.tfile = ROOT.TFile.Open(tfile_name)
self.fcns_and_borders = []
for path, borders in paths_and_borders:
self.fcns_and_borders.append(
(self.tfile.Get(path),
borders)
)
def __call__(self, x, y):
for fcn, border in self.fcns_and_borders:
if border[0] <= y < border[1]:
return fcn.Eval(x)
raise ValueError("MultiFunctorFromTF1: y range aoutside boundaries!")
class FunctorFromMVA(object):
def __init__(self, name, xml_filename, *variables, **kwargs):
self.reader = ROOT.TMVA.Reader( "!Color:Silent=%s:Verbose=%s" % (kwargs.get('silent','T'), kwargs.get('verbose','F')))
self.var_map = {}
self.name = name
self.variables = variables
self.xml_filename = xml_filename
for var in variables:
self.var_map[var] = array.array('f',[0])
self.reader.AddVariable(var, self.var_map[var])
self.reader.BookMVA(name, xml_filename)
def evaluate_(self): #so I can profile the time needed
return self.reader.EvaluateMVA(self.name)
@memo_last
def __call__(self, **kvars):
#kvars enforces that we use the proper vars
if not (
all(name in self.variables for name in kvars.keys()) and \
all(name in kvars.keys() for name in self.variables)
):
raise Exception("Wrong variable names. Available variables: %s" % self.variables.__repr__())
for name, val in kvars.iteritems():
self.var_map[name][0] = val
retval = self.evaluate_() #reader.EvaluateMVA(self.name)
#if retval == 1:
# print "returning 1 in %s, kvars: %s" % (self.xml_filename, kvars.items())
return retval
class MultiFunctorFromMVA(object):
'''Phil's diboson subtraction implementation'''
def __init__(self, name, data_and_lumi, mcs_and_lumis, *variables, **kwargs):
phase_space = kwargs.get('phase_space','')
print 'phase_space: %s' % phase_space
self.functors_and_weights = []
data_xml, data_lumi = data_and_lumi
self.functors_and_weights.append(
(FunctorFromMVA('_'.join([name, data_xml]), data_xml, *variables, **kwargs),
1.)
)
#compute data phase space
training_path = kwargs.get('training_ntuple','training_ntuple')
tfile = ROOT.TFile.Open(data_xml.replace('weights.xml','root'))
training = tfile.Get(training_path)
data_phase_space = training.GetEntries(phase_space)
tfile.Close()
for xml, lumi in mcs_and_lumis:
weight = data_lumi / lumi
tfile = ROOT.TFile.Open(xml.replace('weights.xml','root'))
training = tfile.Get(training_path)
mc_phase_space = training.GetEntries(phase_space)
tfile.Close()
weight *= float(mc_phase_space) / float(data_phase_space)
weight *= -1
self.functors_and_weights.append(
(FunctorFromMVA('_'.join([name, xml]), xml, *variables, **kwargs),
weight)
)
@memo_last
def __call__(self, **kvars):
return sum(
weight*functor(**kvars) for functor, weight in self.functors_and_weights
)
def build_roofunctor(filename, wsname, functionname, var='x'):
''' Build a functor from a filename '''
file = ROOT.TFile.Open(filename)
if not file:
raise IOError("Can't open file: %s" % filename)
ws = file.Get(wsname)
return RooFunctorFromWS(ws, functionname, var)
def make_corrector_from_th2(filename, path):
tfile = ROOT.TFile.Open(filename)
if not tfile:
raise IOError("Can't open file: %s" % filename)
hist = tfile.Get(path).Clone()
#print hist
binsx = hist.GetNbinsX()
binsy = hist.GetNbinsY()
def refFun(xval,yval):
#print hist
xbin = hist.GetXaxis().FindFixBin(xval) #Faster than FindBin
xbin = (xbin if xbin <= binsx else binsx ) if xbin >= 1 else 1 #Compute underflow and overflow as first and last bin
ybin = hist.GetYaxis().FindFixBin(yval)
ybin = (ybin if ybin <= binsy else binsy ) if ybin >= 1 else 1 #Compute underflow and overflow as first and last bin
prob = hist.GetBinContent(xbin,ybin)
if prob:
return prob
else:
return 10**-8
# raise ZeroDivisionError(" catched trying to return weight for (%.3f,%.3f) ==> (%i,%i) bin out of (%i,%i). Prob: %.3f. Hist: %s : %s. " % (xval, yval, xbin, ybin, binsx, binsy , prob, filename, path))
return refFun
def make_corrector_from_histo(filename, path, dimensions='2D'):
is2d = (dimensions.lower() == '2d')
tfile = ROOT.TFile.Open(filename)
if not tfile:
raise IOError("Can't open file: %s" % filename)
hist = tfile.Get(path).Clone()
#print hist
binsx = hist.GetNbinsX()
binsy = hist.GetNbinsY() if is2d else None
def refFun(xval,yval=None):
#print hist
#FindFixBin is faster than FindBin
#Compute underflow and overflow as first and last bin
xbin = max( min(hist.GetXaxis().FindFixBin(xval), binsx), 1)
ybin = None
if is2d:
xbin = max( min(hist.GetYaxis().FindFixBin(yval), binsy), 1)
prob = hist.GetBinContent(xbin,ybin) if is2d else hist.GetBinContent(xbin)
if prob:
return prob
else:
return 10**-8
# raise ZeroDivisionError(" catched trying to return weight for (%.3f,%.3f) ==> (%i,%i) bin out of (%i,%i). Prob: %.3f. Hist: %s : %s. " % (xval, yval, xbin, ybin, binsx, binsy , prob, filename, path))
return refFun
#backward compatibility
make_corrector_from_th2 = make_corrector_from_histo
def build_uncorr_2Droofunctor(functor_x, functor_y, filename, num='numerator', den='denominator'):
''' Build a functor from a filename '''
file = ROOT.TFile.Open(filename)
num_int = file.Get(num).Integral()
den_int = file.Get(den).Integral()
scale = num_int/den_int
def _f(x, y):
print scale
return functor_x(x)*functor_y(y)/scale
return _f
if __name__ == "__main__":
import doctest; doctest.testmod()
| [
"Silvia.Taroni@cern.ch"
] | Silvia.Taroni@cern.ch |
f3bfb9ab8e84fa184357abde57024da707ea358c | 9e371869045a2f091f633e9335ab091b368f254c | /src/n_hop.py | 0d97d1f5f79216f73ebf2f9686a240b122f69977 | [
"MIT"
] | permissive | liaopeiyuan/information-obfuscation-demo | e10e0bedf49a4e92d387b1c72855455a6a6fb34b | 018cb6a2cce5033bf836d78aa8824204ec5553f7 | refs/heads/main | 2023-07-14T12:06:59.544186 | 2021-08-13T11:06:14 | 2021-08-13T11:06:14 | 395,622,445 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | # -*- coding: utf-8 -*-
from models import (GAL_Nhop, NeighborClassifier, NodeClassifier,
SharedBilinearDecoder)
from node_attack import NodeAttackRunner
class NHopAttackRunner(NodeAttackRunner):
def __init__(self, args):
super().__init__(args)
def get_base_model(self):
decoder = SharedBilinearDecoder(
self.args.num_rel, 2, self.args.embed_dim, self.args
).to(self.args.device)
model = GAL_Nhop(
decoder, self.args.embed_dim, self.args.num_ent, self.edges, self.args
).to(self.args.device)
return model
def num_adversaries(self):
return 2
def get_ordered_adversary_names(self):
return ["Node", "Neighbor"]
def get_adversary_models(self, mode):
embeddings = self.base_model.encode(None).detach().squeeze(0)
return [
NodeClassifier(self.args.embed_dim, embeddings).to(self.args.device),
NeighborClassifier(self.args.embed_dim, embeddings, self.edges).to(
self.args.device
),
]
if __name__ == "__main__":
assert False # You shouldn't run this. Please call exec.py
| [
"alexander_liao@outlook.com"
] | alexander_liao@outlook.com |
14bc832d47b3da2154b8587a674dcb20b9082d4a | 02c30e3e2c0f701d77f0a23591027ae62f37a512 | /libs/uix/baseclasses/ico_dir/icobazaar.py | d1952e1f8fc355ded5e1550906d1a7a946896863 | [
"MIT"
] | permissive | mkbeh/CRyptoLab | 5341a48a403ecf23e10248c46e919c1381275551 | 424c938c16c9264e99eff71e4c1a27ca65314d42 | refs/heads/master | 2022-12-22T06:39:36.909313 | 2018-09-25T14:40:32 | 2018-09-25T14:40:32 | 144,743,677 | 0 | 2 | MIT | 2022-12-08T02:22:14 | 2018-08-14T16:09:19 | Python | UTF-8 | Python | false | false | 4,537 | py | # -*- coding: utf-8 -*-
from kivy.lang.builder import Builder
from kivy.properties import ObjectProperty
from kivy.cache import Cache
from kivy.uix.boxlayout import BoxLayout
from libs.customwidgets.ico.cardicobazaar import CardIcoBazaar
from libs.customwidgets.popupcm import PopupCM, PopupCMContent
from libs.utils import utils
Builder.load_string('''
<Icobazaar>:
cats_box: cats_box
upcoming: upcoming
categories: categories
grid_box: grid_box
orientation: 'vertical'
size_hint_y: None
height: self.minimum_height
GridLayout:
id: cats_box
size_hint_y: None
cols: 3
rows: 2
NavigationDrawerIconButton:
text: "Upcoming"
badge_text: '12'
id: upcoming
name: 'upcoming'
on_release: root.on_event(upcoming)
NavigationDrawerIconButton:
text: "Ongoing"
badge_text: '12'
id: ongoing
name: 'ongoing'
on_release: root.on_event(ongoing)
NavigationDrawerIconButton:
text: "Ended"
badge_text: '12'
id: ended
name: 'ended'
on_release: root.on_event(ended)
NavigationDrawerIconButton:
text: "New"
badge_text: '12'
id: new
name: 'new'
on_release: root.on_event(new)
NavigationDrawerIconButton:
text: "All"
badge_text: '12'
id: all
name: 'all'
on_release: root.on_event(all)
NavigationDrawerIconButton:
id: categories
name: 'categories'
text: "Open Categories"
icon: 'menu-down'
on_release:
root.open_categories_popup();
root.on_event(categories)
GridLayout:
id: grid_box
cols: 1
spacing: dp(20)
pos_hint: {'center_x':.5}
size_hint: (.95, None)
''')
class Icobazaar(BoxLayout):
cats_box = ObjectProperty(None)
grid_box = ObjectProperty(None)
categories = ObjectProperty()
upcoming = ObjectProperty()
last_category_btn = None # Last menu button , which was pressed.
def __init__(self, **kwargs):
super(Icobazaar, self).__init__(**kwargs)
self.popup = PopupCM(title='Категории', content=PopupCMContent())
self.gen_cards()
def open_categories_popup(self):
"""
Method which open popup which contains ico list of categories.
:return:
"""
self.popup.open()
def on_event(self, obj):
"""
Event method which fired when clicked on category button.
This method change set active color to button and remove active color from last pressed.
:param obj:
:return:
"""
if obj.name != 'categories':
self.upcoming._active = False
self.categories._active = False
try:
self.last_category_btn._active = False
except AttributeError:
pass
obj._active = True
self.last_category_btn = obj
# Remove active state for all items in categories popup.
cat_items_lst = self.popup.children[0].children[0].children[0].children[0].children
for cat in cat_items_lst:
cat._active = False
self.gen_cards()
def gen_cards(self):
"""
Method which generate cards with ico projects description.
:return:
"""
# Check for active category button.
if self.last_category_btn is None:
self.upcoming._active = True
self.grid_box.bind(minimum_height=self.grid_box.setter('height'))
# Get active category.
cat = self.last_category_btn.text.lower() if self.last_category_btn is not None \
else self.upcoming.text.lower()
# Get url content.
url = 'http://127.0.0.1:8000/ico/icobazaar&cat={}&limit=150&skip=0'.format(cat)
icos_lst = utils.get_url_content(url)
# Clear widgets and generate cards.
self.grid_box.clear_widgets()
import gc
gc.collect()
for ico_data in icos_lst:
card = CardIcoBazaar(ico_data)
self.grid_box.add_widget(card)
# Set categories box object into cache.
Cache.register('menu_cats_box')
Cache.append('menu_cats_box', 'cats_box_obj', self.cats_box)
| [
"mkbehforever@gmail.com"
] | mkbehforever@gmail.com |
ad68caba3c69fd1be9e8dfe396a14348fe8f627a | c79a397e81ecefbf66236d763e86a2d4a431449f | /union_find/union_find_2.py | 97d4712392ff9e40b3fce8913eb53141654f497b | [] | no_license | liweiwei1419/Algorithms-Learning-Python | f9acd83598cfa38dbc35e93bd5ff4655a9836867 | 0288097ea6d49d6fc224c3879709ac0d6e9e5b97 | refs/heads/master | 2021-07-12T23:16:29.938315 | 2020-06-17T05:25:14 | 2020-06-17T05:25:14 | 162,683,186 | 12 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | # 并查集第 2 版:设置 parent 数组,查找就变得慢了一些,得一直向上查找;
# 合并的时候,就快了,把其中一个节点的父节点指向另一个节点即可。
# 参考:慕课网:liuyubobobo。
class UnionFind2:
def __init__(self, n):
# 直接就初始化了,每个元素的 id 就是自己
# 有多少个元素,就有多少个类
self.parent = [i for i in range(n)]
self.count = n # 数据的个数
def find(self, p):
"""
查找元素 p 根节点的编号
:param p:
:return:
"""
assert 0 <= p < self.count
while p != self.parent[p]:
p = self.parent[p]
return p
def is_connected(self, p, q):
"""
查询元素 p 和 q 是否属于同一个集合
有共同的父亲,就表示它们属于同一个集合
:param p:
:param q:
:return:
"""
return self.find(p) == self.find(q)
def union(self, p, q):
"""
合并元素 p 和元素 q 所属于的集合
O(n)复杂度
:param p:
:param q:
:return:
"""
p_id = self.find(p)
q_id = self.find(q)
if p_id == q_id:
return
else:
# 任意将其中一个结点的父结点指向另一个结点的父结点
self.parent[p_id] = q_id
| [
"121088825@qq.com"
] | 121088825@qq.com |
bef751699b606c824c94c94c6c1eafcd6fb8ca0d | d6760033989f2abbd94d68651eb54a8aac4ac61f | /EduNLP/I2V/i2v.py | c997537646c7d0274d0238f22d02aaf311c0d5fe | [
"Apache-2.0"
] | permissive | astrojuanlu/EduNLP | ba636cf39adc1580d0c2f3bf6f0646139f406c72 | 51bbf2e20828f12eed2f9cd8d176c8650ec357ef | refs/heads/master | 2023-07-16T23:27:38.606705 | 2021-08-13T11:38:46 | 2021-08-13T11:38:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,967 | py | # coding: utf-8
# 2021/8/1 @ tongshiwei
import json
from EduNLP.constant import MODEL_DIR
from ..Vector import T2V, get_pretrained_t2v as get_t2v_pretrained_model
from ..Tokenizer import Tokenizer, get_tokenizer
from EduNLP import logger
__all__ = ["I2V", "D2V", "get_pretrained_i2v"]
class I2V(object):
"""
Parameters
----------
tokenizer: str
the tokenizer name
t2v: str
the name of token2vector model
args:
the parameters passed to t2v
tokenizer_kwargs: dict
the parameters passed to tokenizer
pretrained_t2v: bool
kwargs:
the parameters passed to t2v
"""
def __init__(self, tokenizer, t2v, *args, tokenizer_kwargs: dict = None, pretrained_t2v=False, **kwargs):
self.tokenizer: Tokenizer = get_tokenizer(tokenizer, **tokenizer_kwargs if tokenizer_kwargs is not None else {})
if pretrained_t2v:
logger.info("Use pretrained t2v model %s" % t2v)
self.t2v = get_t2v_pretrained_model(t2v, kwargs.get("model_dir", MODEL_DIR))
else:
self.t2v = T2V(t2v, *args, **kwargs)
self.params = {
"tokenizer": tokenizer,
"tokenizer_kwargs": tokenizer_kwargs,
"t2v": t2v,
"args": args,
"kwargs": kwargs,
"pretrained_t2v": pretrained_t2v
}
def __call__(self, items, *args, **kwargs):
return self.infer_vector(items, *args, **kwargs)
def tokenize(self, items, indexing=True, padding=False, key=lambda x: x, *args, **kwargs) -> list:
return self.tokenizer(items, key=key, *args, **kwargs)
def infer_vector(self, items, tokenize=True, indexing=False, padding=False, key=lambda x: x, *args,
**kwargs) -> tuple:
raise NotImplementedError
def infer_item_vector(self, tokens, *args, **kwargs) -> ...:
return self.infer_vector(tokens, *args, **kwargs)[0]
def infer_token_vector(self, tokens, *args, **kwargs) -> ...:
return self.infer_vector(tokens, *args, **kwargs)[1]
def save(self, config_path, *args, **kwargs):
with open(config_path, "w", encoding="utf-8") as wf:
json.dump(self.params, wf, ensure_ascii=False, indent=2)
@classmethod
def load(cls, config_path, *args, **kwargs):
with open(config_path, encoding="utf-8") as f:
params: dict = json.load(f)
tokenizer = params.pop("tokenizer")
t2v = params.pop("t2v")
args = params.pop("args")
kwargs = params.pop("kwargs")
params.update(kwargs)
return cls(tokenizer, t2v, *args, **params)
@classmethod
def from_pretrained(cls, name, model_dir=MODEL_DIR, *args, **kwargs):
raise NotImplementedError
@property
def vector_size(self):
return self.t2v.vector_size
class D2V(I2V):
def infer_vector(self, items, tokenize=True, indexing=False, padding=False, key=lambda x: x, *args,
**kwargs) -> tuple:
tokens = self.tokenize(items, return_token=True, key=key) if tokenize is True else items
return self.t2v(tokens, *args, **kwargs), None
@classmethod
def from_pretrained(cls, name, model_dir=MODEL_DIR, *args, **kwargs):
return cls("text", name, pretrained_t2v=True, model_dir=model_dir)
MODELS = {
"d2v_all_256": [D2V, "d2v_all_256"],
"d2v_sci_256": [D2V, "d2v_sci_256"],
"d2v_eng_256": [D2V, "d2v_eng_256"],
"d2v_lit_256": [D2V, "d2v_lit_256"],
}
def get_pretrained_i2v(name, model_dir=MODEL_DIR):
"""
Parameters
----------
name
model_dir
Returns
-------
i2v model: I2V
"""
if name not in MODELS:
raise KeyError(
"Unknown model name %s, use one of the provided models: %s" % (name, ", ".join(MODELS.keys()))
)
_class, *params = MODELS[name]
return _class.from_pretrained(*params, model_dir=model_dir)
| [
"tongsw@mail.ustc.edu.cn"
] | tongsw@mail.ustc.edu.cn |
512aa69661fa13678466cf8017c83fe50756b7f7 | cdad738a7085a997b5349a94aedb4db8da78da8f | /PythiaCumulant/test/ConfFile_cfg.py | ed16acb4b043e5f41fcc0791745c74b0c6c9bde2 | [
"MIT"
] | permissive | tuos/DirectLoopAnalysis | 4851d122d4723e498705c1d2cb100cbf3eda8d43 | 6f5f02538454d2240d0232665b9b17d07eb79854 | refs/heads/master | 2020-06-12T22:24:01.081755 | 2020-01-21T17:49:37 | 2020-01-21T17:49:37 | 194,446,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:/afs/cern.ch/user/t/tuos/work/private/cumulant/loops/CMSSW_9_4_6_patch1/src/PythiaCumulant/PythiaCumulant/test/F2D718DA-3361-E811-B68C-0CC47ABB5178.root'
)
)
process.demo = cms.EDAnalyzer('PythiaCumulant',
src = cms.untracked.InputTag("generator"),
#src = cms.untracked.InputTag("generatorSmeared"),
genParticleSrc = cms.untracked.InputTag("genParticles")
)
process.p = cms.Path(process.demo)
| [
"shengquan.tuo@cern.ch"
] | shengquan.tuo@cern.ch |
9414a6701142d165a30eef94ccded064ddac92a6 | 37cfcdfa3b8f1499f5899d2dfa2a48504a690abd | /test/functional/combine_logs.py | 21e10d9f491b8b44f266a6f5a821e7b93491b7ff | [
"MIT"
] | permissive | CJwon-98/Pyeongtaekcoin | 28acc53280be34b69c986198021724181eeb7d4d | 45a81933a98a7487f11e57e6e9315efe740a297e | refs/heads/master | 2023-08-17T11:18:24.401724 | 2021-10-14T04:32:55 | 2021-10-14T04:32:55 | 411,525,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,352 | py | #!/usr/bin/env python3
"""Combine logs from multiple pyeongtaekcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile.
If no argument is provided, the most recent test directory will be used."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
import tempfile
# N.B.: don't import any local modules here - this script must remain executable
# without the parent module installed.
# Should match same symbol in `test_framework.test_framework`.
TMPDIR_PREFIX = "pyeongtaekcoin_func_test_"
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'testdir', nargs='?', default='',
help=('temporary test directory to combine logs from. '
'Defaults to the most recent'))
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args = parser.parse_args()
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
testdir = args.testdir or find_latest_test_dir()
if not testdir:
print("No test directories found")
sys.exit(1)
if not args.testdir:
print("Opening latest test directory: {}".format(testdir), file=sys.stderr)
log_events = read_logs(testdir)
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def find_latest_test_dir():
"""Returns the latest tmpfile test directory prefix."""
tmpdir = tempfile.gettempdir()
def join_tmp(basename):
return os.path.join(tmpdir, basename)
def is_valid_test_tmpdir(basename):
fullpath = join_tmp(basename)
return (
os.path.isdir(fullpath)
and basename.startswith(TMPDIR_PREFIX)
and os.access(fullpath, os.R_OK)
)
testdir_paths = [
join_tmp(name) for name in os.listdir(tmpdir) if is_valid_test_tmpdir(name)
]
return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
timestamp = time_match.group()
if time_match.group(1) is None:
# timestamp does not have microseconds. Add zeroes.
timestamp_micro = timestamp.replace("Z", ".000000Z")
line = line.replace(timestamp, timestamp_micro)
timestamp = timestamp_micro
event = line
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
# Add the line. Prefix with space equivalent to the source + timestamp so log lines are aligned
event += " " + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
lines = event.event.splitlines()
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, lines[0], colors["reset"]))
if len(lines) > 1:
for line in lines[1:]:
print("{0}{1}{2}".format(colors[event.source.rstrip()], line, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
| [
"cjone98692996@gmail.com"
] | cjone98692996@gmail.com |
500dad7ffb9764ef76085a688676e4c8740d9482 | 1a59a9076c1e9f1eb98e24ff41a4c1c95e2b353e | /xcp2k/classes/_mm1.py | c5b93af6d2c6b11ace7e30bed1ee50a1f93ba2cc | [] | no_license | Roolthasiva/xcp2k | 66b2f30ebeae1a946b81f71d22f97ea4076e11dc | fc3b5885503c6f6dc549efeb4f89f61c8b6b8242 | refs/heads/master | 2022-12-23T06:03:14.033521 | 2020-10-07T08:01:48 | 2020-10-07T08:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._forcefield1 import _forcefield1
from xcp2k.classes._neighbor_lists5 import _neighbor_lists5
from xcp2k.classes._poisson2 import _poisson2
from xcp2k.classes._periodic_efield2 import _periodic_efield2
from xcp2k.classes._print44 import _print44
class _mm1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.FORCEFIELD = _forcefield1()
self.NEIGHBOR_LISTS = _neighbor_lists5()
self.POISSON = _poisson2()
self.PERIODIC_EFIELD_list = []
self.PRINT = _print44()
self._name = "MM"
self._subsections = {'FORCEFIELD': 'FORCEFIELD', 'NEIGHBOR_LISTS': 'NEIGHBOR_LISTS', 'POISSON': 'POISSON', 'PRINT': 'PRINT'}
self._repeated_subsections = {'PERIODIC_EFIELD': '_periodic_efield2'}
self._attributes = ['PERIODIC_EFIELD_list']
def PERIODIC_EFIELD_add(self, section_parameters=None):
new_section = _periodic_efield2()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.PERIODIC_EFIELD_list.append(new_section)
return new_section
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
f1a26d8535ee4e801718164bb5381dda69821129 | a9fe1b5c320cdef138ac4a942a8b741c7f27de7c | /LC1165-Single-Row-Keyboard.py | b61ee59f2cb456449c1170110439d12eae92960f | [] | no_license | kate-melnykova/LeetCode-solutions | a6bbb5845310ce082770bcb92ef6f6877962a8ee | ee8237b66975fb5584a3d68b311e762c0462c8aa | refs/heads/master | 2023-06-28T06:35:33.342025 | 2021-07-30T06:59:31 | 2021-07-30T06:59:31 | 325,106,033 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | """
There is a special keyboard with all keys in a single row.
Given a string keyboard of length 26 indicating the layout of
the keyboard (indexed from 0 to 25), initially your finger is
at index 0. To type a character, you have to move your finger
to the index of the desired character. The time taken to move
your finger from index i to index j is |i - j|.
You want to type a string word. Write a function to calculate
how much time it takes to type it with one finger.
Example 1:
Input: keyboard = "abcdefghijklmnopqrstuvwxyz", word = "cba"
Output: 4
Explanation: The index moves from 0 to 2 to write 'c' then to 1
to write 'b' then to 0 again to write 'a'.
Total time = 2 + 1 + 1 = 4.
Example 2:
Input: keyboard = "pqrstuvwxyzabcdefghijklmno", word = "leetcode"
Output: 73
Constraints:
(*) keyboard.length == 26
(*) keyboard contains each English lowercase letter exactly once in some order.
(*) 1 <= word.length <= 10^4
(*) word[i] is an English lowercase letter.
"""
class Solution:
def calculateTime(self, keyboard: str, word: str) -> int:
"""
Runtime complexity: O(n)
Space complexity: O(n)
"""
locations = {key: i for i, key in enumerate(keyboard)}
loc = 0
dist = 0
for char in word:
dist += abs(loc - locations[char])
loc = locations[char]
return dist
def calculateTimeNoSpace(self, keyboard: str, word: str) -> int:
"""
Runtime complexity: O(n^2)
Space complexity: O(1)
"""
self.keyboard = keyboard
loc = 0
dist = 0
for char in word:
new_loc = self._get_loc(char)
dist += abs(loc - new_loc)
loc = new_loc
return dist
def _get_loc(self, char: str):
return self.keyboard.index(char)
if __name__ == '__main__':
from run_tests import run_tests
correct_answers = [
["abcdefghijklmnopqrstuvwxyz", "cba", 4],
["pqrstuvwxyzabcdefghijklmno", "leetcode", 73]
]
methods = ['calculateTime', 'calculateTimeNoSpace']
for method in methods:
print(f'Running tests for {method}')
run_tests(getattr(Solution(), method), correct_answers) | [
"forkatemelnikova@gmail.com"
] | forkatemelnikova@gmail.com |
d710e3ef0ea5e49cc3e3ccc4c458b75b14108bf1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03062/s886815102.py | 0d88d0f6b8fc038f5e52c29a4351c0e0d7bc5afb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | def main():
n=int(input())
a=list(map(int,input().split()))
dp=[0,-10**18]
for i in range(n-1):
dp2=[max(dp[0]+a[i],dp[1]-a[i]),max(dp[0]-a[i],dp[1]+a[i])]
dp=dp2
print(max(dp[0]+a[-1],dp[1]-a[-1]))
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
371d66ddc5a6c081888fb63f8a81dac1623b1f4d | d3e4c3527884f28ac554c9a919b4859a4fb21a7a | /ckstyle/CssCheckerWrapper.py | 5d86c4842c7202e70c9958514c6b8432ece320fa | [
"BSD-3-Clause"
] | permissive | kxws888/CSSCheckStyle | 4032ca1d2b86d5ac85613bc17125389361678bfc | 4b4b89495fd8bb6a211d22a559e76032cd1b860c | refs/heads/master | 2021-01-16T19:39:23.440607 | 2012-11-09T03:37:44 | 2012-11-09T03:37:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,836 | py | #/usr/bin/python
#encoding=utf-8
import os
from plugins.Base import *
class CssChecker():
'''CSS检查类,需要CSS解析器作为辅助'''
def __init__(self, parser, config = None):
self.parser = parser
self.config = config
# 错误记录,log是2级,warn是1级,error是0级
self.logMsgs = []
self.warningMsgs = []
self.errorMsgs = []
# 额外的错误记录,比如工具内部的一些错误等
self.extraMsgs = []
# 注册的不同类型的检查器(都来自plugins目录)
self.ruleSetCheckers = []
self.ruleCheckers = []
self.styleSheetCheckers = []
# 如果有解析过程的错误,则先把那些错误记录下来
self.handleParseErrors()
def getStyleSheet(self):
'''获取styleSheet引用'''
return self.parser.styleSheet
def handleParseErrors(self):
for msg in self.parser.getParseErrors():
self.remember(msg[0], msg[1])
def hasError(self):
'''判断是否有error'''
return len(self.logMsgs) != 0 or len(self.warningMsgs) != 0 or len(self.errorMsgs) != 0
def errors(self):
'''把错误信息导出'''
return self.logMsgs, self.warningMsgs, self.errorMsgs
def loadPlugins(self, pluginDir):
'''从plugins目录动态载入检查类'''
# ids = {}
include = self.config.include
exclude = self.config.exclude
for filename in os.listdir(pluginDir):
if not filename.endswith('.py') or filename.startswith('_'):
continue
if filename == 'Base.py' or filename == 'helper.py':
continue
pluginName = os.path.splitext(filename)[0]
# 获取plugins的引用
plugin = __import__("ckstyle.plugins." + pluginName, fromlist = [pluginName])
pluginClass = None
if hasattr(plugin, pluginName):
pluginClass = getattr(plugin, pluginName)
else:
print '[TOOL] class %s should exist in %s.py' % (pluginName, pluginName)
continue
# 构造plugin的类
instance = pluginClass()
# ids[instance.id] = pluginName
if include != 'all' and include.find(instance.id) == -1:
continue
elif exclude != 'none' and exclude.find(instance.id) != -1:
continue
if instance.errorMsg.find(';') != -1 or instance.errorMsg.find('\n') != -1:
print r'[TOOL] errorMsg should not contain ";" or "\n" in %s.py' % pluginName
continue
# 注册到检查器中
self.registerChecker(instance)
def registerChecker(self, checker):
'''根据检查器类型的不同,分别注册到不同的检查器列表中'''
if isinstance(checker, RuleChecker):
self.registerRuleChecker(checker)
elif isinstance(checker, RuleSetChecker):
self.registerRuleSetChecker(checker)
else:
self.registerStyleSheetChecker(checker)
def registerStyleSheetChecker(self, checker):
self.styleSheetCheckers.append(checker)
def registerRuleSetChecker(self, checker):
self.ruleSetCheckers.append(checker)
def registerRuleChecker(self, checker):
self.ruleCheckers.append(checker)
def remember(self, errorLevel, errorMsg):
'''记录代码中的问题'''
if errorLevel == ERROR_LEVEL.LOG:
if self.config.errorLevel > 1:
self.logMsgs.append(errorMsg)
elif errorLevel == ERROR_LEVEL.WARNING:
if self.config.errorLevel > 0:
self.warningMsgs.append(errorMsg)
elif errorLevel == ERROR_LEVEL.ERROR:
self.errorMsgs.append(errorMsg)
else:
print '[TOOL] wrong ErrorLevel for ' + errorMsg
def logStyleSheetMessage(self, checker, styleSheet):
'''记录StyleSheet的问题'''
errorLevel = checker.getLevel()
errorMsg = checker.getMsg()
if errorMsg is None or errorMsg == '':
print '[TOOL] no errorMsg in your plugin, please check it'
if errorMsg.find('${file}') == -1:
errorMsg = errorMsg + ' (from "' + styleSheet.getFile() + '")'
else:
errorMsg = errorMsg.replace('${file}', styleSheet.getFile())
self.remember(errorLevel, errorMsg);
def logRuleMessage(self, checker, rule):
'''记录一条key/value的问题'''
errorLevel = checker.getLevel()
errorMsg = checker.getMsg()
if errorMsg is None or errorMsg == '':
print '[TOOL] no errorMsg in your plugin, please check it'
if errorMsg.find('${selector}') == -1:
errorMsg = errorMsg + ' (from "' + rule.selector + '")'
else:
errorMsg = errorMsg.replace('${selector}', rule.selector)
errorMsg = errorMsg.replace('${name}', rule.roughName.strip())
errorMsg = errorMsg.replace('${value}', rule.value.strip())
self.remember(errorLevel, errorMsg);
def logRuleSetMessage(self, checker, ruleSet):
'''记录一个"规则集"中的问题'''
errorLevel = checker.getLevel()
errorMsg = checker.getMsg()
if errorMsg.find('${selector}') == -1:
errorMsg = errorMsg + ' (from "' + ruleSet.selector + '")'
else:
errorMsg = errorMsg.replace('${selector}', ruleSet.selector)
self.remember(errorLevel, errorMsg);
def doCheck(self):
# 忽略的规则集(目前只忽略单元测试的selector)
ignoreRuleSets = self.config.ignoreRuleSets
def findInArray(array, value):
for x in array:
if x == value:
return True
return False
# 检查规则集
def checkRuleSet(ruleSet):
for checker in self.ruleSetCheckers:
if not checker.check(ruleSet):
self.logRuleSetMessage(checker, ruleSet)
# 检查规则
def checkRule(ruleSet):
for checker in self.ruleCheckers:
for rule in ruleSet._rules:
if not checker.check(rule):
self.logRuleMessage(checker, rule)
# 检查样式表
styleSheet = self.parser.styleSheet
for checker in self.styleSheetCheckers:
if not checker.check(styleSheet):
self.logStyleSheetMessage(checker, styleSheet)
for ruleSet in styleSheet.getRuleSets():
# 判断此规则是否忽略
if findInArray(ignoreRuleSets, ruleSet.selector):
continue
checkRuleSet(ruleSet)
checkRule(ruleSet)
| [
"wangjeaf@gmail.com"
] | wangjeaf@gmail.com |
79e9eb04ba894732a459e4b01ae2261d362576b6 | 0639b8366a5ec5b65fa2097354eafd5a1f73ad0f | /hoodalert/migrations/0004_rename_post_posts_description.py | a95d995b2021efa24eef56d75d243980d8350f44 | [
"MIT"
] | permissive | COdingaorg/Neighbourhood_Alert | cc27a32af7df070f9d49380c4d3f0067f8535668 | 44e202469b4a2410d1dab2244e62915575f8ea84 | refs/heads/main | 2023-06-20T20:38:19.301358 | 2021-07-27T22:36:34 | 2021-07-27T22:36:34 | 388,885,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 3.2.4 on 2021-07-25 21:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hoodalert', '0003_rename_location_business_location_or_description'),
]
operations = [
migrations.RenameField(
model_name='posts',
old_name='post',
new_name='description',
),
]
| [
"calemasanga@gmail.com"
] | calemasanga@gmail.com |
0554493246cab9a13e295982e86c557585680403 | e0c8662a56d89730043146ddc340e9e0b9f7de72 | /plugin/11dde802-1596.py | 7382d4960741b91aca87fcdb6f1f45c45dde3640 | [] | no_license | izj007/bugscan_poc | f2ef5903b30b15c230b292a1ff2dc6cea6836940 | 4490f3c36d4033bdef380577333722deed7bc758 | refs/heads/master | 2020-09-22T17:20:50.408078 | 2019-01-18T09:42:47 | 2019-01-18T09:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | #coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
"""
POC Name : 企慧通培训系统通用型SQL注入 2
Author : a
mail : a@lcx.cc
refer : http://www.wooyun.org/bugs/wooyun-2015-0129326
"""
def assign(service, arg):
if service == 'qht_study': #企慧通网络培训系统
return True,arg
def audit(arg):
p = "SysAdmin/aRegisAdmin.aspx?type=regisAdmin&clientid=adminName&adminName=admin'%20and%20sys.fn_varbintohexstr(hashbytes(%27MD5%27,%271234%27))>0--"
url = arg + p
code2, head, res, errcode, _ = curl.curl2(url )
if (code2 ==500) and ('0x81dc9bdb52d04dc20036dbd8313ed055' in res):
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('qht_study', 'http://124.193.233.233/')[1]) | [
"yudekui@wsmtec.com"
] | yudekui@wsmtec.com |
2a493183c3d04027dce5a0966c86c5e83e023540 | 8f2f83bc1381d4ce7fc968aec72fa400aae4155d | /api/smartcontractwallet/requestmodels/createtransactionrequest.py | 45a7a4eb65363aeada372be706d0cc2d9c527908 | [
"MIT"
] | permissive | nifrali/pyStratis | c855fb33be77064c9a741255e324003319a4789f | b1a80bf155b7941e9ef8fc2ea93fa1b08a0c4366 | refs/heads/master | 2023-06-20T16:02:30.863589 | 2021-07-01T19:24:18 | 2021-07-01T19:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py | from typing import Optional, List
from pydantic import Field, SecretStr, validator, conint
from pybitcoin import Model, Outpoint, SmartContractParameter
from pybitcoin.types import Address, Money, hexstr
class CreateContractTransactionRequest(Model):
"""A request model for the smartcontractwallet/create endpoint.
Args:
wallet_name (str): The wallet name.
account_name (str, optional): The wallet name. Default='account 0'
outpoints (List[Outpoint], optional): A list of the outpoints used to construct the transactation.
amount (Money, optional): The amount being sent.
fee_amount (Money): The fee amount.
password (SecretStr): The password.
contract_code (hexstr): The smart contract code hexstring.
gas_price (int): The amount of gas being used in satoshis.
gas_limit (int): The maximum amount of gas that can be used in satoshis.
sender (Address): The address of the sending address.
parameters (List[SmartContractParameters], optional): A list of parameters for the smart contract.
"""
wallet_name: str = Field(alias='walletName')
account_name: Optional[str] = Field(default='account 0', alias='accountName')
outpoints: Optional[List[Outpoint]]
amount: Optional[Money]
fee_amount: Money = Field(alias='feeAmount')
password: SecretStr
contract_code: hexstr = Field(alias='contractCode')
gas_price: conint(ge=100, le=10000) = Field(alias='gasPrice')
gas_limit: conint(ge=12000, le=250000) = Field(alias='gasLimit')
sender: Address
parameters: Optional[List[SmartContractParameter]]
# noinspection PyMethodParameters,PyUnusedLocal
@validator('fee_amount', always=True)
def check_fee_too_high(cls, v, values):
if v is not None:
if v > Money(1):
raise ValueError('Fee should not be more than 1. Check parameters.')
return v
| [
"skaal@protonmail.com"
] | skaal@protonmail.com |
8c3c54a6db6ad3c4483c0d3590021ca975729a91 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r9/Gen/DecFiles/options/27495730.py | fbd2677bd68269f36de53c1ffd7c30736c15bdcc | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/27495730.py generated: Fri, 27 Mar 2015 16:10:18
#
# Event Type: 27495730
#
# ASCII decay Descriptor: {[ D_s2*+ -> D0 K+, D*0 K+, D+ K_S0, D*+ K_S0 ]cc}
#
from Configurables import Generation
Generation().EventType = 27495730
Generation().SampleGenerationTool = "SignalPlain"
from Configurables import SignalPlain
Generation().addTool( SignalPlain )
Generation().SignalPlain.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Ds2st_2710=DecProdCut.dec"
Generation().SignalPlain.CutTool = "DaughtersInLHCb"
Generation().SignalPlain.SignalPIDList = [ 435,-435 ]
from Configurables import LHCb__ParticlePropertySvc
LHCb__ParticlePropertySvc().Particles = [ "D*_s2+ 174 435 1.0 2.710 4.388079327e-24 D_s2*+ 435 0.351483","D*_s2- 178 -435 -1.0 2.710 4.388079327e-24 D_s2*- -435 0.351483" ]
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
54799a216da1ac96622d8c6b155e31f987d78435 | 03b724302ee3989b97ea11c5323fab639349661a | /thjobsthai/migrations/0001_initial.py | bf3cce65c0e0a00c06cc5eb40a971a42a4608a32 | [] | no_license | saisai/django_jobs_apply | 8e2eccd9e0884d2788d09407c24da12b4c80de53 | 816602e05bdfbef8d5306ca3592fa0d4df538c5e | refs/heads/master | 2022-12-01T19:54:36.119216 | 2020-08-18T23:31:21 | 2020-08-18T23:31:21 | 288,165,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-10 06:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='JobThai',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500)),
('link', models.TextField()),
('created_date', models.DateField()),
],
options={
'db_table': 'job_thai',
},
),
]
| [
"you@example.com"
] | you@example.com |
3d34bbaa9221ae9eaeb9290f8d4c2c6720415cc8 | 02e5ec4b8b038d335d726d12047d5dacea01456e | /person_django_rest_swagger/serializers.py | b2cc273d6ea98a7394b26d6304baca7b2bcc0e4b | [] | no_license | rahulmoundekar/django_rest_curd_app_swagger | d298d18356ac9253b16320255b329aacd4904a56 | 3473812ae21c5be7b8f2105e8d16aebab54d4fa8 | refs/heads/master | 2021-05-24T19:08:27.658832 | 2020-04-07T06:58:52 | 2020-04-07T06:58:52 | 253,710,900 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | from rest_framework import serializers
from person_django_rest_swagger.models import Person
class PersonSerializer(serializers.ModelSerializer):
class Meta:
model = Person
fields = '__all__'
| [
"rahulmoundekar44@gmail.com"
] | rahulmoundekar44@gmail.com |
72e629552847f36e6921f8e65c120df05721b1c7 | 9e2d79a2cf1dbeaffe8ef897bb53f94af8b5b68c | /ichnaea/models/tests/test_mac.py | 68cca667cd7319c1acf7541ccab7d0f515cab920 | [
"Apache-2.0"
] | permissive | amolk4games/ichnaea | a7d1cbd12b6aa5c0d877fca380080b08fcff24b8 | 907c542da05b428c8e994bce1537390e22b3ca58 | refs/heads/master | 2021-01-19T07:21:54.851167 | 2016-04-08T15:20:37 | 2016-04-08T15:21:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | from ichnaea.models.mac import (
decode_mac,
encode_mac,
)
from ichnaea.tests.base import (
TestCase,
)
class TestMacCodec(TestCase):
def test_decode(self):
value = decode_mac(b'\xab\xcd\xed\x124V')
self.assertEqual(value, 'abcded123456')
value = decode_mac(b'q83tEjRW', codec='base64')
self.assertEqual(value, 'abcded123456')
def test_encode(self):
value = encode_mac('abcded123456')
self.assertEqual(len(value), 6)
self.assertEqual(value, b'\xab\xcd\xed\x124V')
value = encode_mac('abcded123456', codec='base64')
self.assertEqual(value, b'q83tEjRW')
def test_max(self):
value = encode_mac('ffffffffffff')
self.assertEqual(len(value), 6)
self.assertEqual(value, b'\xff\xff\xff\xff\xff\xff')
value = encode_mac('ffffffffffff', codec='base64')
self.assertEqual(value, b'////////')
def test_min(self):
value = encode_mac('000000000000')
self.assertEqual(len(value), 6)
self.assertEqual(value, b'\x00\x00\x00\x00\x00\x00')
value = encode_mac('000000000000', codec='base64')
self.assertEqual(value, b'AAAAAAAA')
| [
"hanno@hannosch.eu"
] | hanno@hannosch.eu |
d6173e859ed2ee3b0ead3b81a2fbabed554928d5 | 82555c9b4615a14bfe4bb46a0981820b7ccba8d7 | /D/test_pasted_from_page.py | 584aee8f24d578cb618a5ce84a837a36ea5a8976 | [
"MIT"
] | permissive | staguchi0703/ABC160 | 2f3cc23e0566943a76f288d190ee4977131817bb | d71c9756a2195a4f8b98dc0bb2d220e90cacdce3 | refs/heads/master | 2021-05-17T10:56:30.289570 | 2020-03-28T13:47:29 | 2020-03-28T13:47:29 | 250,745,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | #
from resolve import resolve
####################################
####################################
# 以下にプラグインの内容をペーストする
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """5 2 4"""
output = """5
4
1
0"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """3 1 3"""
output = """3
0"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """7 3 7"""
output = """7
8
4
2
0
0"""
self.assertIO(input, output)
def test_入力例_4(self):
input = """10 4 8"""
output = """10
12
10
8
4
1
0
0
0"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | [
"s.taguchi0703@gmail.com"
] | s.taguchi0703@gmail.com |
a867dfcb3aaf4c1336a595ff3acbdb224162f108 | 80a1be2c9642341545e625685886ed8c93ed23b9 | /arriendoMiLibro/misLibrosOwner/forms.py | df2006d27b344b2e36f793597f6a4f7bb93ae2a6 | [] | no_license | leoBravoRain/arriendoMiLibro | 94a1923b68b052c3fd719412775db37508589459 | 35afd4b65385c15fd8372722200796329a225218 | refs/heads/master | 2020-08-07T16:09:06.065021 | 2018-06-26T02:34:55 | 2018-06-26T02:34:55 | 131,909,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | py | # -*- coding: utf-8 -*-
from django import forms
from libros.models import LibrosParaArrendar
from django.forms import ModelForm
from usuarios.models import Usuario
from arriendoMiLibro.variablesGlobales import maxLengthDefault
# Editar informacion de usuaior
class EditarInformacionPerfilUsuario(ModelForm):
# ciudad = forms.ChoiceField(choices = Ciudad.objects.all())
class Meta:
model = Usuario
exclude = ["user","email","fechaCreacion", "password"]
widgets = {
'nombre': forms.TextInput(attrs={'placeholder': 'Nombre'}),
'numeroContacto': forms.TextInput(attrs={'placeholder': 'Numero de contacto'}),
# 'password': forms.PasswordInput(attrs={'placeholder': 'Clave'}),
}
# Editar libro
class EditarLibro(ModelForm):
class Meta:
model = LibrosParaArrendar
exclude = ['owner','fechaCreacion','estado']
widgets = {
'titulo': forms.TextInput(attrs={'placeholder': 'Titulo de libro'}),
'autor': forms.TextInput(attrs={'placeholder': 'Autor del libro'}),
'resumen': forms.Textarea(attrs={'placeholder': 'Breve resumen del libro', "maxlength" : maxLengthDefault, "size": maxLengthDefault, "class": "img-responsive"}),
'comentario': forms.Textarea(attrs={'placeholder': 'Comentario (idioma, estado del libro, etc)', "maxlength" : 10, "class": "img-responsive"}),
}
# Formulario para registrar a un owner
class AgregarLibro(ModelForm):
class Meta:
model = LibrosParaArrendar
exclude = ['owner','fechaCreacion','estado']
widgets = {
'titulo': forms.TextInput(attrs={'placeholder': 'Titulo de libro'}),
'autor': forms.TextInput(attrs={'placeholder': 'Autor del libro'}),
'resumen': forms.Textarea(attrs={'placeholder': 'Breve resumen del libro', "maxlength" : maxLengthDefault, "size": maxLengthDefault, "class": "img-responsive"}),
'comentario': forms.Textarea(attrs={'placeholder': 'Comentario (idioma, estado del libro, etc)', "maxlength" : 10, "class": "img-responsive"}),
}
help_texts = {
'foto' : 'Foto del libro',
}
# Fomulario para cambiar estado de libro
class CambiarEstadoDeLibro(ModelForm):
class Meta:
model = LibrosParaArrendar
fields = ["estado"]
| [
"gian.bravo@alumnos.usm.cl"
] | gian.bravo@alumnos.usm.cl |
ac340560aaff7c0d0112525dfb2bca7378791bdc | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/183/usersdata/355/107995/submittedfiles/escadarolante.py | 1a3884391c6b48bee70782f254cce1adf18dbe87 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | # -*- coding: utf-8 -*-
npessoas=int(input('Digite o número de pessoas detectadas pelo sensor: '))
soma=0
soma2=0
instante=0
for i in range(0,npessoas,1):
soma=instante-soma
soma2=soma2+soma
instante=float(input('Digite o instante em que a pessoa foi detectada: '))
print(soma2+10) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
4ed63a194e46e6ec0133622fac16fb04d230513d | 0b8eefbd29abda41cbe3725621a208c90aa9b6f0 | /Problemset/relative-sort-array/relative-sort-array.py | 01d5a054220ddad076f0d6e990023dee45ace2e1 | [
"MIT"
] | permissive | KivenCkl/LeetCode | bf2d86c6d0a4a3cd136ed3ce74b3561ca26f510d | fcc97c66f8154a5d20c2aca86120cb37b9d2d83d | refs/heads/master | 2021-07-07T12:57:28.252071 | 2020-09-13T05:19:13 | 2020-09-13T05:19:13 | 185,034,499 | 8 | 7 | null | null | null | null | UTF-8 | Python | false | false | 620 | py |
# @Title: 数组的相对排序 (Relative Sort Array)
# @Author: KivenC
# @Date: 2019-07-18 14:56:57
# @Runtime: 48 ms
# @Memory: 13.1 MB
class Solution:
def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:
# 统计 arr1 中的元素,按照 arr2 相对顺序填放,剩下的元素按照升序放置
import collections
counter = collections.Counter(arr1)
res = []
for a in arr2:
res.extend([a] * counter[a])
del counter[a]
for k in sorted(counter.keys()):
res.extend([k] * counter[k])
return res
| [
"chen941229@126.com"
] | chen941229@126.com |
4c2040871eb316dfbdd91a29953777fda947cfbb | 360e1f69f4c0923c5d79bc82aa33c0fd4e80b71e | /RECURSION/Reverse_a_string_recurssion.py | 53056e1653e7608aa8159a683d5cf2066771dbbd | [] | no_license | Vijay1234-coder/data_structure_plmsolving | 04e52fe6c918313e13d39107a2ded8b47645bb12 | d449b266295d1ae55613cdcfd9b22ad9cee3dfbe | refs/heads/master | 2023-08-01T00:55:28.825972 | 2021-09-12T15:20:12 | 2021-09-12T15:20:12 | 387,782,783 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py |
def reverse(s):
if len(s)<= 1:
return s
else:
return reverse(s[1:])+s[0] # 'bc'+'a'
#'c'+'b'+'a'
print(reverse('abc'))
| [
"77201164+Vijay1234-coder@users.noreply.github.com"
] | 77201164+Vijay1234-coder@users.noreply.github.com |
2e6eacc165e2ade818208ef44b1eac4d38c1a04b | a9a90eae727590f0ccffaa255ffeaa194309fbe9 | /Codekata/oddadd.py | f24be72dff28c32c3555a9d6cff21e247eba6288 | [] | no_license | dhanuskarthikeyan/guvi | 18c39674d3ee8e0012caef781d7905e541792174 | 671d64189f6039ffad8d91cab13942aafa87bf29 | refs/heads/master | 2020-06-03T00:07:45.041170 | 2019-07-08T17:39:33 | 2019-07-08T17:39:33 | 191,355,054 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | num,n=(raw_input().split())
num=int(num)
n=int(n)
num=n+num
if(num>=0):
if(num%2==0):
print "even"
else:
print "odd"
else:
print "invalid"
| [
"noreply@github.com"
] | dhanuskarthikeyan.noreply@github.com |
fa07553477e3bb2ecbeb87bd1383a2194282579c | b8eb666c8b6fe4610d87bff8048f4a95a1c5b549 | /测试/UI自动化/测试工具__Selenium/selenium/Phy/元组学习.py | 659d98f549399863fa07b324050146c658ed72dc | [] | no_license | cainiaosun/study | 1e983e404005e537410b205634a27cee974faba0 | 91df9b63cda1839b7fc60de3b5f1eb19ccc33a1f | refs/heads/master | 2020-05-30T09:59:19.749099 | 2019-11-22T10:39:12 | 2019-11-22T10:39:12 | 189,641,828 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,244 | py | #coding=UTF-8
import random
import random
list=[]
s=0
for i in range(1,5):
for j in range(1,5):
for k in range(1,5):
if i!=j and j<>k:
list.append(str(i)+str(j)+str(k))
s=s+1
print len(list)
print s
if len(list)==s:
print "是相等的!"
else:
print "不相等!"
print list[random.randrange(1,len(list))]
import math
for n in range(1,1):
i=math.sqrt(n+100)
print i
j=math.sqrt(n+268)
print j
if i/2.0==int(i/2) and j/2.0==int(j/2):
print n
break
import time
#print help(time.strftime)
print time.strftime("%Y")
list=[90,19,8,99,87,45,109]
list.sort()
print u"sort排序输出:",list
list=[90,19,8,99,87,45,109]
i=len(list)
for b in range(1,i):
i=i-1
for a in range(0,i):
if list[a+1]<list[a]:
temp=list[a+1]
list[a+1]=list[a]
list[a]=temp
print u"冒泡排序输出:",list
print '*'*10
for i in range(5):
print "* *"
print '*'*10
import sys
#sys.stdout.write(chr(1))
temp=0#正常产仔的兔子
temp1=0#剩余一个月产仔的兔子
temp2=1#剩余2个月产仔的兔子
m=12#int(raw_input(u"请输入月份:"))
for i in range(1,m+1):
temp=temp+temp1
temp22=temp2
temp2=temp
temp1=temp22
print "24个月后的兔子数量:",temp+temp1+temp2
f1=1
f2=1
for i in range(1,24):
#print "%12d%12d"%(f1,f1)
if (i%2)==0:
print ''
f1=f1+f2
f2=f1+f2
for i in range(1,10):
for j in range(0,10):
for k in range(0,10):
if i**3+j**3+k**3==int(str(i)+str(j)+str(k)):
print int(str(i)+str(j)+str(k))
import sys
from sys import stdout
n=45
print '数值:n=%d'%n
list=[]
for i in range(2,n+1):
while n!=0:
if n%i==0:
list.append(str(i))
sys.stdout.write(str(i))
sys.stdout.write("*")
n=n/i
else:
break
print "%d"%n
for i in range(0,len(list)):
if i<len(list)-1:
sys.stdout.write(list[i]+"*")
else:
sys.stdout.write(list[i])
h=100
sum=0
for i in range(1,11):
if i==1:
print ''
sum=sum+h
h=h/2.0
sum=sum+2*h
print h
print sum
| [
"1551577567@qq.com"
] | 1551577567@qq.com |
ba376912f0e12d134b662c53c1aadd34496d5a74 | 3a9f63f506172ac2d4a1ca9744fedd8f9b2b1628 | /pytext/data/__init__.py | 0ea8bef648e40c61f8aa137845996fdabce86390 | [
"BSD-3-Clause"
] | permissive | czHP0616/pytext | 4c40a8f3afa48284e2919e54d1b489830a321eed | 64ab1835905dea2e7797e6bc11398c55941fa728 | refs/heads/master | 2020-05-25T09:21:52.394044 | 2019-05-20T21:36:39 | 2019-05-20T21:39:33 | 187,734,243 | 0 | 0 | NOASSERTION | 2019-05-21T00:46:06 | 2019-05-21T00:46:06 | null | UTF-8 | Python | false | false | 1,807 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .batch_sampler import (
BaseBatchSampler,
EvalBatchSampler,
RandomizedBatchSampler,
RoundRobinBatchSampler,
)
from .bptt_lm_data_handler import BPTTLanguageModelDataHandler
from .compositional_data_handler import CompositionalDataHandler
from .contextual_intent_slot_data_handler import ContextualIntentSlotModelDataHandler
from .data import Batcher, Data, PoolingBatcher, generator_iterator
from .data_handler import BatchIterator, CommonMetadata, DataHandler
from .disjoint_multitask_data import DisjointMultitaskData
from .disjoint_multitask_data_handler import DisjointMultitaskDataHandler
from .doc_classification_data_handler import DocClassificationDataHandler, RawData
from .joint_data_handler import JointModelDataHandler
from .language_model_data_handler import LanguageModelDataHandler
from .pair_classification_data_handler import PairClassificationDataHandler
from .query_document_pairwise_ranking_data_handler import (
QueryDocumentPairwiseRankingDataHandler,
)
from .seq_data_handler import SeqModelDataHandler
__all__ = [
"Batcher",
"BaseBatchSampler",
"BatchIterator",
"BPTTLanguageModelDataHandler",
"CommonMetadata",
"CompositionalDataHandler",
"ContextualIntentSlotModelDataHandler",
"Data",
"DataHandler",
"DisjointMultitaskData",
"DisjointMultitaskDataHandler",
"DocClassificationDataHandler",
"EvalBatchSampler",
"generator_iterator",
"JointModelDataHandler",
"LanguageModelDataHandler",
"PairClassificationDataHandler",
"PoolingBatcher",
"RandomizedBatchSampler",
"QueryDocumentPairwiseRankingDataHandler",
"RawData",
"RoundRobinBatchSampler",
"SeqModelDataHandler",
]
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
b9a66d204ad06b3325735e7e16ef709e831b14d2 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R4/benchmark/startPyquil57.py | 4d55af20dcfac3d89ea1bfc7d95f538b60cedb64 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | # qubit number=4
# total number=10
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += SWAP(1,0) # number=7
prog += SWAP(2,0) # number=8
prog += SWAP(2,0) # number=9
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil57.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
65d874a8d7d0ba1e2d8d09e04d255d0fb375f38d | a84e1a1aac96612b32ba5adcc49a4005c0c5129e | /tensorflow_probability/python/internal/backend/numpy/raw_ops.py | 79f2e95696806a3b0f7425925a0bac61afb756f4 | [
"Apache-2.0"
] | permissive | jedisom/probability | 4fc31473d691d242a3e88c179ae3a9c555a29bb6 | 6791e7ce1c2b0a9057a19a8ea697aeaf796d4da7 | refs/heads/master | 2022-04-23T00:21:46.097126 | 2020-04-22T20:03:04 | 2020-04-22T20:04:59 | 258,031,151 | 1 | 0 | Apache-2.0 | 2020-04-22T22:08:57 | 2020-04-22T22:08:56 | null | UTF-8 | Python | false | false | 1,831 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy implementations of TensorFlow general top-level functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import _utils as utils
__all__ = [
'MatrixDiagPartV2',
]
JAX_MODE = False
def _matrix_diag_part_v2(input, k, padding_value, name=None): # pylint: disable=redefined-builtin,unused-argument
"""Implements tf.raw_ops.MatrixDiagPartV2, for scalar k."""
if np.array(k).ndim > 0:
raise NotImplementedError
shp = np.shape(input)
if JAX_MODE:
if len(shp) > 2:
from jax import vmap # pylint: disable=g-import-not-at-top
return vmap(_matrix_diag_part_v2, (0, None, None))(
input, k, padding_value)
return np.diag(input, k=k)
input = np.reshape(input, (-1, shp[-2], shp[-1]))
output = np.array([np.diag(arr, k=k) for arr in input])
return output.reshape(*(shp[:-2] + output.shape[1:]))
MatrixDiagPartV2 = utils.copy_docstring( # pylint: disable=invalid-name
'tf.raw_ops.MatrixDiagPartV2',
_matrix_diag_part_v2)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
f541408a9b1aac2d3114dd958181e6ed89be2153 | c08e62724137acebcae7f7badf0176f7f73e64fd | /ecommerce/settings.py | f271e9e79c06df710e99651742f27a7a4a20ed0d | [] | no_license | keefm6776/ij-artefact-sales-site | 91809b8d3c975ea7b681acae62382b2146348611 | 826db92776c77b57a3f6da7727ba5fe9471e6662 | refs/heads/master | 2022-12-08T10:43:06.847267 | 2020-01-13T14:52:26 | 2020-01-13T14:52:26 | 216,651,747 | 0 | 1 | null | 2022-11-22T04:46:49 | 2019-10-21T19:43:42 | Python | UTF-8 | Python | false | false | 4,793 | py | """
Django settings for ecommerce project.
Generated by 'django-admin startproject' using Django 1.11.17.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
#import env
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['localhost', 'ijones-artefact-sales.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_forms_bootstrap',
'accounts',
'artefacts',
'cart',
'checkout',
'customer',
'storages',
'bids',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
'cart.contexts.cart_contents',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/databases
if "DATABASE_URL" in os.environ:
DATABASES = {'default': dj_database_url.parse(
os.environ.get('DATABASE_URL'))}
else:
print("Database URL not found. Using SQLite instead")
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.\
UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.\
MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.\
CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.\
NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend',
'accounts.backends.CaseInsensitiveAuth']
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
AWS_S3_OBJECT_PARAMETERS = {
'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',
'CacheControl': 'max-age=94608000'
}
AWS_STORAGE_BUCKET_NAME = 'ij-artefact-sales'
AWS_S3_REGION_NAME = 'eu-west-1'
AWS_ACCESS_KEY_ID = os.environ.get("AWS_SECRET_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_DEFAULT_ACL = None
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATICFILES_LOCATION)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIAFILES_LOCATION = 'media'
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
STRIPE_PUBLISHABLE = os.getenv('STRIPE_PUBLISHABLE')
STRIPE_SECRET = os.getenv('STRIPE_SECRET')
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
| [
"keefm_6776@yahoo.com"
] | keefm_6776@yahoo.com |
95b961d36c5737ee91cf28899c9db819935a2260 | 8db6b0404f179332e900c09bdb7acbc0779dc250 | /reinforcement_learning/0x00-q_learning/0-load_env.py | 113d10d342b3a1e2dabb244ba52a86677f8b7607 | [] | no_license | chriswill88/holbertonschool-machine_learning | 6f1f900a0e5da013608b4be3e60af15872dc1f99 | 05eabebe5e5c050b1c4a7e1454b947638d883176 | refs/heads/master | 2022-12-30T08:35:50.216909 | 2020-10-18T23:10:23 | 2020-10-18T23:10:23 | 255,544,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | #!/usr/bin/env python3
"""this module contains a function for task 0"""
import gym
def load_frozen_lake(desc=None, map_name=None, is_slippery=False):
"""creates the frozen lake enviroment"""
env = gym.make(
'FrozenLake-v0', desc=desc, map_name=map_name, is_slippery=is_slippery)
return env
| [
"williechri79@gmail.com"
] | williechri79@gmail.com |
d9a98e0727826a2e9368331ebbee230d42859401 | c6da4e00eb27ff33becd0b2f7e962b5cc43f9b20 | /proximal.py | ef75b6639a3640ce62538b70545bdfd4dc29a26f | [] | no_license | cyber-meow/FISTA | 93c61653e55e9b02eb5659cc8e60f17da7f8bb71 | c85d5364083ab69f1476b225f6b71713ac4c02dd | refs/heads/master | 2020-04-13T21:15:55.119870 | 2019-01-02T17:09:36 | 2019-01-02T17:09:36 | 163,451,363 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,417 | py | import numpy as np
import torch
import pywt
def soft_thresholding(x, th):
return np.sign(x) * np.maximum(np.abs(x)-th, 0)
class SoftThresholding(object):
def __init__(self, lamb):
self.lamb = lamb
def __call__(self, x, gamma):
th = self.lamb * gamma
device = 'cuda' if x.is_cuda else 'cpu'
x = soft_thresholding(x.cpu().detach().numpy(), th)
x = torch.tensor(x, dtype=torch.float, requires_grad=True).to(device)
return x
class WaveletST(object):
def __init__(self, lamb, wavelet='db4'):
self.lamb = lamb
self.wavelet = wavelet
def __call__(self, x, gamma):
th = self.lamb * gamma
device = 'cuda' if x.is_cuda else 'cpu'
x_wav = pywt.wavedec2(x.cpu().detach().numpy(), self.wavelet)
x_wav[0] = soft_thresholding(x_wav[0], th)
for i, coeffs in enumerate(x_wav[1:]):
cH = soft_thresholding(coeffs[0], th)
cV = soft_thresholding(coeffs[1], th)
cD = soft_thresholding(coeffs[2], th)
x_wav[i+1] = cH, cV, cD
x = pywt.waverec2(x_wav, self.wavelet)
return torch.tensor(
x, dtype=torch.float,
requires_grad=True).to(device)
class ProjectInf(object):
def __init__(self, lamb):
self.lamb = lamb
def __call__(self, x, gamma):
return torch.clamp(x, -self.lamb, self.lamb)
| [
"sjungle305@gmail.com"
] | sjungle305@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.