blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
44efa17d2f52a95bd7265e1773831210ac6db66a | a06c9d95093f8c33aefb13d5a46c1466c43cc1e8 | /Aulas/aula 3/aula3.py | fc75a2811210d51ced11feea1e4e87795a300c9f | [] | no_license | danrleydaniel/pygame | 3b7062a24ce1659f1c23bff70af0a372fba0f5c7 | 9de33102920d021a5f28b79a8fd04ba89e9c07f2 | refs/heads/main | 2023-08-14T16:27:00.766939 | 2021-09-30T17:08:56 | 2021-09-30T17:08:56 | 307,095,908 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import pygame
def main():
pygame.init()
tela = pygame.display.set_mode([300, 300])
pygame.display.set_caption("Iniciando com Pygame")
relogio = pygame.time.Clock()
cor_branca = (255,255,255)
sup = pygame.Surface((200, 200))
sair = False
while sair != True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sair = True
relogio.tick(27)
tela.fill(cor_branca)
tela.blit(sup, [10,10])
pygame.display.update()
pygame.quit()
main()
| [
"noreply@github.com"
] | noreply@github.com |
0b53a2dd76f8d99d925bb162548bc01e3f6553b1 | 91483aeea3761368b852bc544fb558db84fbbca1 | /mysite/settings.py | 6bcf17a6dd43653bff1ec8b8b240f53d7bc14a3e | [] | no_license | nra-16/myfirstblog | 7a68efcae540055da7337c1c4de44f8e0511a146 | a3f358bca37253b5c4b99911847061239d3d32ee | refs/heads/master | 2021-01-10T05:45:09.381908 | 2016-03-05T14:59:22 | 2016-03-05T14:59:22 | 53,207,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,236 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=4)8g+_&goqd_zpzz)m_s3v69_q5h93lzgo&4-33e3*a0v5vwf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Amsterdam'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"punitharamani@hotmail.co.uk"
] | punitharamani@hotmail.co.uk |
5fa8083c771b118d03d567b6878554f82c71120c | bd187ecc8a94460cd17d3c13aa469c25a3f3be3a | /mainsite/migrations/0006_auto_20191127_0915.py | 75e8efb19fdb91c2f6c2a2354e86433dc4c9a946 | [] | no_license | ozkilim/DinosaurDating | 3226acb3f4987534ce5ceed7649d76d47b51065e | 44bb4583f50e7a5c903040ab80a63ba390330d35 | refs/heads/master | 2021-11-06T16:18:25.192520 | 2019-12-10T14:47:22 | 2019-12-10T14:47:22 | 224,671,044 | 0 | 0 | null | 2021-09-08T01:28:17 | 2019-11-28T14:30:07 | Python | UTF-8 | Python | false | false | 389 | py | # Generated by Django 2.2.7 on 2019-11-27 09:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainsite', '0005_atendee_event'),
]
operations = [
migrations.AlterField(
model_name='atendee',
name='looking_for',
field=models.CharField(max_length=100),
),
]
| [
"ozkilim@hotmail.co.uk"
] | ozkilim@hotmail.co.uk |
ff64debff15d72ec3bafdf254c48b07687cfa1bc | fa9d297de5b007e249511191ad9ce99ebf07d640 | /Nodoterreno.py | 20f1479c82ccc1144086393825b8ba26671f37e8 | [] | no_license | LuisBarrera23/IPC2_Proyecto1_202010223 | 0eb9e4e86151a8fae6ea4730b8e666228901e31b | 740dbb1cddf9a80c079503c732eb2998197d7382 | refs/heads/main | 2023-08-09T22:32:14.116505 | 2021-08-30T03:46:56 | 2021-08-30T03:46:56 | 394,154,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | class nodoterreno:
def __init__(self,terreno=None,siguiente=None):
self.terreno=terreno
self.siguiente=siguiente | [
"luisbarrera5662@gmail.com"
] | luisbarrera5662@gmail.com |
c6b5593b63f105914856900aebbc5be8af1a513d | 7e90a1f8280618b97729d0b49b80c6814d0466e2 | /workspace_pc/catkin_ws/cartographer_ws/build_isolated/jackal_navigation/catkin_generated/generate_cached_setup.py | 4daf5fb9eb5f1b0d60987d0c7e079fc39de7971f | [] | no_license | IreneYIN7/Map-Tracer | 91909f4649a8b65afed56ae3803f0c0602dd89ff | cbbe9acf067757116ec74c3aebdd672fd3df62ed | refs/heads/master | 2022-04-02T09:53:15.650365 | 2019-12-19T07:31:31 | 2019-12-19T07:31:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/gse5/catkin_ws/cartographer_ws/install_isolated;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/gse5/catkin_ws/cartographer_ws/devel_isolated/jackal_navigation/env.sh')
output_filename = '/home/gse5/catkin_ws/cartographer_ws/build_isolated/jackal_navigation/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"sh9339@outlook.com"
] | sh9339@outlook.com |
7f6063443980fa7eed25912a3dd0b1c477be9440 | 9940a37b520a57ed00e85e2d54991b117ce1a646 | /leetcode/python/binary_tree_postorder_traversal.py | 0dd3ecf67a7204502c52faa593b7ddf21fa20923 | [] | no_license | rioshen/Problems | e36e528f49912ff233f5f5052a7ca8ac95be4f0a | 1269b05a51e834e620d0adf4c3a10fe1a917b458 | refs/heads/master | 2020-12-03T19:37:38.281260 | 2015-04-09T17:46:59 | 2015-04-09T17:46:59 | 29,544,782 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | #!/usr/bin/env python
class Solution:
def postorderTraversal(self, root):
if not root:
return []
result, stack = [], []
prev, curr = None, root
while curr or stack:
if curr:
stack.append(curr)
curr = curr.left
else:
parent = stack[-1]
if parent not in (prev, None):
result.append(parent.val)
curr = parent.right
else:
| [
"rioxshen@gmail.com"
] | rioxshen@gmail.com |
1eb48a906c41d240228e260d96f74a91e308d423 | 2afb1095de2b03b05c8b96f98f38ddeca889fbff | /web_scrapping/try_steam_parse.py | f76504a9eb079147e18d2455c67b424ba847329a | [] | no_license | draganmoo/trypython | 187316f8823296b12e1df60ef92c54b7a04aa3e7 | 90cb0fc8626e333c6ea430e32aa21af7d189d975 | refs/heads/master | 2023-09-03T16:24:33.548172 | 2021-11-04T21:21:12 | 2021-11-04T21:21:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,267 | py | import glob
import pandas as pd
from bs4 import BeautifulSoup
df = pd.DataFrame()
df1 = pd.DataFrame()
df2 = pd.DataFrame()
for one_file in glob.glob("steam_html_file/*.html"):
f = open(one_file, "r", encoding= "utf-8")
soup = BeautifulSoup(f.read(),"html.parser")
items = soup.find("div", id = "search_result_container")
for item in items.find_all("a"):
try:
###因价格修改统一标签下存在多个子标签,打乱整体标签排列结构。
price_change = item.find("div",class_="col search_price discounted responsive_secondrow")
if not price_change:
###价格无变动的游戏的价格
original_price = item.find("div", class_="col search_price_discount_combined responsive_secondrow").get_text().strip()
else:
###注意,如果发现有价格变动时,虽然不去,但是也要输出空值占位置,为后面合并数据做准备!
original_price = ""
df1 = df1.append({
"3.original price": original_price
}, ignore_index=True)
if price_change:
##价格有变动的游戏现在的价格
changed_price = price_change.get_text().strip()
else:
changed_price = ""
df2 = df2.append({
"4.changed price":changed_price
},ignore_index=True)
# print(changed_price)
###价格信息提取完成
######???待寻找如何将变动后价格拼接到没有价格变动的那一列中,查寻方向:合并多个df时如何填补同一列中的空缺值
name = item.find("div", class_="col search_name ellipsis").find("span").get_text().strip()
release_time = item.find("div", class_="col search_released responsive_secondrow").get_text().strip()
df = df.append({
"1.name": name,
"2.release_time":release_time,
},ignore_index=True)
except:
pass
df2 = df1.join(df2)
df = df.join(df2)
print (df)
df.to_csv("steam_html_file/steam_fps_game.csv", encoding="utf-8-sig")
####
| [
"13701304462@163.com"
] | 13701304462@163.com |
472327499a42ace71b4610362be969074821b053 | 8d9c8fd1f8bbdee01bf0bb685feee7f5980484a8 | /RNN_generacion_nombres.py | 3a4fe0bf8dbf2b00a8324320f8970842e4fe12d7 | [] | no_license | avenegascaleron/ejemploRNN | e2784afa2539c959a9799916ecd32bf91fb5dd87 | 8c8e6823bf6adac7b32d892e88ce13203a7e11c5 | refs/heads/main | 2023-01-28T08:59:29.230064 | 2020-12-01T12:53:20 | 2020-12-01T12:53:20 | 317,540,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,988 | py | import numpy as np
np.random.seed(5)
from keras.layers import Input, Dense, SimpleRNN
from keras.models import Model
from keras.optimizers import SGD
from keras.utils import to_categorical
from keras import backend as K
# 1. LECTURA DEL SET DE DATOS
# ===========================================================
nombres = open('apellidosVascos.txt','r', encoding='utf8').read()
nombres = nombres.lower()
# Crear diccionario (listado de caracteres que no se repiten)
alfabeto = list(set(nombres))
tam_datos, tam_alfabeto = len(nombres), len(alfabeto)
print("En total hay %d caracteres, y el diccionario tiene un tamaño de %d caracteres." % (tam_datos, tam_alfabeto))
print(*alfabeto, sep = ", ")
# Conversión de caracteres a índices y viceversa
car_a_ind = { car:ind for ind,car in enumerate(sorted(alfabeto))}
ind_a_car = { ind:car for ind,car in enumerate(sorted(alfabeto))}
print(car_a_ind)
print(ind_a_car)
# 2. MODELO
# ===========================================================
n_a = 25 # Número de unidades en la capa oculta
entrada = Input(shape=(None,tam_alfabeto))
a0 = Input(shape=(n_a,))
celda_recurrente = SimpleRNN(n_a, activation='tanh', return_state = True)
capa_salida = Dense(tam_alfabeto, activation='softmax')
salida = []
hs, _ = celda_recurrente(entrada, initial_state=a0)
salida.append(capa_salida(hs))
modelo = Model([entrada,a0],salida)
#modelo.summary()
opt = SGD(lr=0.0005)
modelo.compile(optimizer=opt, loss='categorical_crossentropy')
# 3. EJEMPLOS DE ENTRENAMIENTO
# ===========================================================
# Crear lista con ejemplos de entrenamiento y mezclarla aleatoriamente
with open("apellidosVascos.txt", encoding='utf8') as f:
ejemplos = f.readlines()
ejemplos = [x.lower().strip() for x in ejemplos]
np.random.shuffle(ejemplos)
# Crear ejemplos de entrenamiento usando un generador
def train_generator():
while True:
# Tomar un ejemplo aleatorio
ejemplo = ejemplos[np.random.randint(0,len(ejemplos))]
# Convertir el ejemplo a representación numérica
X = [None] + [car_a_ind[c] for c in ejemplo]
# Crear "Y", resultado de desplazar "X" un caracter a la derecha
Y = X[1:] + [car_a_ind['\n']]
# Representar "X" y "Y" en formato one-hot
x = np.zeros((len(X),1,tam_alfabeto))
onehot = to_categorical(X[1:],tam_alfabeto).reshape(len(X)-1,1,tam_alfabeto)
x[1:,:,:] = onehot
y = to_categorical(Y,tam_alfabeto).reshape(len(X),tam_alfabeto)
# Activación inicial (matriz de ceros)
a = np.zeros((len(X), n_a))
yield [x, a], y
# 4. ENTRENAMIENTO
# ===========================================================
BATCH_SIZE = 80 # Número de ejemplos de entrenamiento a usar en cada iteración
NITS = 0 # Número de iteraciones
for j in range(NITS):
historia = modelo.fit_generator(train_generator(), steps_per_epoch=BATCH_SIZE, epochs=1, verbose=0)
# Imprimir evolución del entrenamiento cada 1000 iteraciones
if j%1000 == 0:
print('\nIteración: %d, Error: %f' % (j, historia.history['loss'][0]) + '\n')
# 5. GENERACIÓN DE NOMBRES USANDO EL MODELO ENTRENADO
# ===========================================================
def generar_nombre(modelo,car_a_num,tam_alfabeto,n_a):
# Inicializar x y a con ceros
x = np.zeros((1,1,tam_alfabeto,))
a = np.zeros((1, n_a))
# Nombre generado y caracter de fin de linea
nombre_generado = ''
fin_linea = '\n'
car = -1
# Iterar sobre el modelo y generar predicción hasta tanto no se alcance
# "fin_linea" o el nombre generado llegue a los 50 caracteres
contador = 0
while (car != fin_linea and contador != 20):
# Generar predicción usando la celda RNN
a, _ = celda_recurrente(K.constant(x), initial_state=K.constant(a))
y = capa_salida(a)
prediccion = K.eval(y)
# Escoger aleatoriamente un elemento de la predicción (el elemento con
# con probabilidad más alta tendrá más opciones de ser seleccionado)
ix = np.random.choice(list(range(tam_alfabeto)),p=prediccion.ravel())
# Convertir el elemento seleccionado a caracter y añadirlo al nombre generado
car = ind_a_car[ix]
nombre_generado += car
# Crear x_(t+1) = y_t, y a_t = a_(t-1)
x = to_categorical(ix,tam_alfabeto).reshape(1,1,tam_alfabeto)
a = K.eval(a)
# Actualizar contador y continuar
contador += 1
# Agregar fin de línea al nombre generado en caso de tener más de 50 caracteres
if (contador == 20):
nombre_generado += '\n'
print(nombre_generado)
# Generar 100 ejemplos de nombres generados por el modelo ya entrenado
for i in range(100):
generar_nombre(modelo,car_a_ind,tam_alfabeto,n_a) | [
"noreply@github.com"
] | noreply@github.com |
44b8b15712428540ec8bb8881ed03e41fb5bbabc | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit2167.py | 0d743940a6f806b63db35149af9e8542d6728277 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,035 | py | # qubit number=4
# total number=37
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.rx(-0.5686282702997527,input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=18
prog.h(input_qubit[3]) # number=26
prog.cz(input_qubit[0],input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=28
prog.x(input_qubit[3]) # number=21
prog.rx(0.4241150082346221,input_qubit[2]) # number=33
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[0],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=12
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=23
prog.cz(input_qubit[1],input_qubit[2]) # number=24
prog.h(input_qubit[2]) # number=25
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[2],input_qubit[0]) # number=29
prog.z(input_qubit[2]) # number=30
prog.cx(input_qubit[2],input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[0]) # number=14
prog.y(input_qubit[0]) # number=15
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2167.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
9d086a9a5e0386554d865c16beaba00a2871db2a | b2eddd579b39c68884372f71808453d6f50668ae | /step_1_capture_tweets.py | 1c627d8604c2638e383b809d5c952e809c27ef47 | [] | no_license | PhilipGuo1992/csci_5408_assign_2 | 32214185b43ca9086c48ae0ecfb5d291b1afc23b | 39eb825a2768288746bc678892146fe0a067e384 | refs/heads/master | 2020-03-19T12:23:03.344155 | 2018-06-07T21:16:03 | 2018-06-07T21:16:03 | 136,513,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,594 | py | import tweepy
import time
import json
import csv
consumer_key = "z56cu40Jq1XxntXeGKLfhNZnk"
consumer_secret = "NWsAOtbQ4lVGPq7xooVbE21XEeMnDuFBtdTfyZzc85Czh4wKnm"
access_key = "1003752992171024386-lsvEB53AROSLhKGJgEchdgajkBJTIC"
access_secret = "NV7vVHJX3FrGJFm1fx7hivxKBLOfVom34gYAJeaUEklX1"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
# def get_profile(zhenbang):
# api = tweepy.API(auth)
# try:
# user_profile = api.get_user(zhenbang)
# except tweepy.error.TweepError as e:
# user_profile = json.loads(e.response.text)
#
# return user_profile
#
# def get_trends(location_id):
# api = tweepy.API(auth)
# try:
# trends = api.trends_place(location_id)
# except tweepy.error.TweepError as e:
# trends = json.loads(e.response.txt)
#
# return trends
# code from lab.
def get_tweets(query):
api= tweepy.API(auth)
try:
tweets = api.search(query)
except tweepy.error.TweepError as e:
tweets = [json.loads(e.response.text)]
return tweets
queries = ["eminem", "love OR hare", "revival", "slim shady", "rap god", "trump", "donald", "concert"]
with open('tweets.csv', 'w') as outfile:
writer = csv.writer(outfile)
writer.writerow(['id', 'user', 'created_at', 'text'])
for query in queries:
t = get_tweets(query)
for tweet in t:
writer.writerow([tweet.id_str, tweet.user.screen_name, tweet.created_at, tweet.text.encode('unicode-escape')])
| [
"philipguo1992@gmail.com"
] | philipguo1992@gmail.com |
96a2513a19ec5ef5b4cef589ef45c1624ee248cb | 117f066c80f3863ebef74463292bca6444f9758a | /data_pulling/crypto/do.py | 33e917485159521a9e506bfcba5606efbf76ad82 | [] | no_license | cottrell/notebooks | c6de3842cbaeb71457d270cbe6fabc8695a6ee1b | 9eaf3d0500067fccb294d064ab78d7aaa03e8b4d | refs/heads/master | 2023-08-09T22:41:01.996938 | 2023-08-04T22:41:51 | 2023-08-04T22:41:51 | 26,830,272 | 3 | 1 | null | 2023-03-04T03:58:03 | 2014-11-18T21:14:23 | Python | UTF-8 | Python | false | false | 1,908 | py | import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import os
import glob
import inspect
def get_pandas_read_csv_defaults():
# probably fragile
i = inspect.getfullargspec(pd.read_csv)
v = i.defaults
k = i.args[-len(v):]
kwargs = dict(zip(k, v))
return kwargs
_mydir = os.path.dirname(os.path.realpath('__file__'))
def load_raw():
# note manually removed some bad row
kwargs = get_pandas_read_csv_defaults()
kwargs['thousands'] = ',' # always do this
kwargs['parse_dates'] = ['Date']
kwargs['na_values'] = ['-']
kwargs['dtype'] = 'str'
dtype = {
'Close': 'float',
'High': 'float',
'Low': 'float',
'Market Cap': 'float',
'Open': 'float',
'Volume': 'float'
}
meta = pd.read_csv(os.path.join(_mydir, 'Top100Cryptos/data/100 List.csv'))
names = meta.Name.tolist()
files = [os.path.join(_mydir, 'Top100Cryptos/data/{}.csv'.format(x)) for x in names]
# files = glob.glob(os.path.join(_mydir, 'Top100Cryptos/data/*.csv'))
dfs = list()
datadir = os.path.join(_mydir, 'parsed')
if not os.path.exists(datadir):
os.makedirs(datadir)
for i, (name, f) in enumerate(zip(names, files)):
mtime = os.path.getmtime(f)
dirname = os.path.join(datadir, 'name={}/mtime={}'.format(name, mtime))
filename = os.path.join(dirname, 'data.parquet')
if not os.path.exists(filename):
df = pd.read_csv(f, **kwargs)
df = pa.Table.from_pandas(df)
if not os.path.exists(dirname):
os.makedirs(dirname)
print('writing {}'.format(filename))
pq.write_table(df, filename)
pq.read_table('./parsed') # test
else:
print('{} exists'.format(filename))
return pq.read_table('./parsed') # test
# id big ups big downs
df = load_raw()
df = df.sort_values('Date')
| [
"cottrell@users.noreply.github.com"
] | cottrell@users.noreply.github.com |
7c4ca5b5dfae96a3696b405eff6c615b26b86332 | 4c873560c66ce3b84268ad2abcd1ffcada32e458 | /examples/scripts/csc/gwnden_clr.py | d382225550d6ce93e6a690716a2697b9e384579a | [
"BSD-3-Clause"
] | permissive | wangjinjia1/sporco | d21bf6174365acce614248fcd2f24b72d5a5b07f | c6363b206fba6f440dd18de7a17dadeb47940911 | refs/heads/master | 2023-04-02T01:10:02.905490 | 2021-03-29T14:20:57 | 2021-03-29T14:20:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,966 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Gaussian White Noise Restoration via CSC
========================================
This example demonstrates the removal of Gaussian white noise from a colour image using convolutional sparse coding :cite:`wohlberg-2016-convolutional`,
$$\mathrm{argmin}_\mathbf{x} \; (1/2) \sum_c \left\| \sum_m \mathbf{d}_{m} * \mathbf{x}_{c,m} -\mathbf{s}_c \right\|_2^2 + \lambda \sum_m \| \mathbf{x}_m \|_1 + \mu \| \{ \mathbf{x}_{c,m} \} \|_{2,1}$$
where $\mathbf{d}_m$ is the $m^{\text{th}}$ dictionary filter, $\mathbf{x}_{c,m}$ is the coefficient map corresponding to the $c^{\text{th}}$ colour band and $m^{\text{th}}$ dictionary filter, and $\mathbf{s}_c$ is colour band $c$ of the input image.
"""
from __future__ import print_function
from builtins import input
import pyfftw # See https://github.com/pyFFTW/pyFFTW/issues/40
import numpy as np
from sporco import util
from sporco import signal
from sporco import fft
from sporco import metric
from sporco import plot
from sporco.cupy import (cupy_enabled, np2cp, cp2np, select_device_by_load,
gpu_info)
from sporco.cupy.admm import cbpdn
"""
Boundary artifacts are handled by performing a symmetric extension on the image to be denoised and then cropping the result to the original image support. This approach is simpler than the boundary handling strategies that involve the insertion of a spatial mask into the data fidelity term, and for many problems gives results of comparable quality. The functions defined here implement symmetric extension and cropping of images.
"""
def pad(x, n=8):
if x.ndim == 2:
return np.pad(x, n, mode='symmetric')
else:
return np.pad(x, ((n, n), (n, n), (0, 0)), mode='symmetric')
def crop(x, n=8):
return x[n:-n, n:-n]
"""
Load a reference image and corrupt it with Gaussian white noise with $\sigma = 0.1$. (The call to ``numpy.random.seed`` ensures that the pseudo-random noise is reproducible.)
"""
img = util.ExampleImages().image('monarch.png', zoom=0.5, scaled=True,
idxexp=np.s_[:, 160:672])
np.random.seed(12345)
imgn = img + np.random.normal(0.0, 0.1, img.shape).astype(np.float32)
"""
Highpass filter test image.
"""
npd = 16
fltlmbd = 5.0
imgnl, imgnh = signal.tikhonov_filter(imgn, fltlmbd, npd)
"""
Load dictionary.
"""
D = util.convdicts()['G:8x8x128']
"""
Set solver options. See Section 8 of :cite:`wohlberg-2017-convolutional2` for details of construction of $\ell_1$ weighting matrix $W$.
"""
imgnpl, imgnph = signal.tikhonov_filter(pad(imgn), fltlmbd, npd)
W = fft.irfftn(np.conj(fft.rfftn(D[..., np.newaxis, :], imgnph.shape[0:2],
(0, 1))) * fft.rfftn(imgnph[..., np.newaxis], None, (0, 1)),
imgnph.shape[0:2], (0, 1))
W = 1.0/(np.maximum(np.abs(W), 1e-8))
lmbda = 1.5e-2
mu = 2.7e-1
opt = cbpdn.ConvBPDNJoint.Options({'Verbose': True, 'MaxMainIter': 250,
'HighMemSolve': True, 'RelStopTol': 3e-3, 'AuxVarObj': False,
'L1Weight': cp2np(W), 'AutoRho': {'Enabled': False},
'rho': 1e3*lmbda})
"""
Initialise a ``sporco.cupy`` version of a :class:`.admm.cbpdn.ConvBPDNJoint` object and call the ``solve`` method.
"""
if not cupy_enabled():
print('CuPy/GPU device not available: running without GPU acceleration\n')
else:
id = select_device_by_load()
info = gpu_info()
if info:
print('Running on GPU %d (%s)\n' % (id, info[id].name))
b = cbpdn.ConvBPDNJoint(np2cp(D), np2cp(pad(imgnh)), lmbda, mu, opt, dimK=0)
X = cp2np(b.solve())
"""
The denoised estimate of the image is just the reconstruction from the coefficient maps.
"""
imgdp = cp2np(b.reconstruct().squeeze())
imgd = np.clip(crop(imgdp) + imgnl, 0, 1)
"""
Display solve time and denoising performance.
"""
print("ConvBPDNJoint solve time: %5.2f s" % b.timer.elapsed('solve'))
print("Noisy image PSNR: %5.2f dB" % metric.psnr(img, imgn))
print("Denoised image PSNR: %5.2f dB" % metric.psnr(img, imgd))
"""
Display the reference, noisy, and denoised images.
"""
fig = plot.figure(figsize=(21, 7))
plot.subplot(1, 3, 1)
plot.imview(img, title='Reference', fig=fig)
plot.subplot(1, 3, 2)
plot.imview(imgn, title='Noisy', fig=fig)
plot.subplot(1, 3, 3)
plot.imview(imgd, title='CSC Result', fig=fig)
fig.show()
"""
Plot functional evolution during ADMM iterations.
"""
its = b.getitstat()
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional')
"""
Plot evolution of ADMM residuals and ADMM penalty parameter.
"""
plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T,
ptyp='semilogy', xlbl='Iterations', ylbl='Residual',
lgnd=['Primal', 'Dual'])
plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter')
# Wait for enter on keyboard
input()
| [
"brendt@ieee.org"
] | brendt@ieee.org |
a24704b833d23a859af9ec1629f1226377f8c7ea | 5dfbfa153f22b3f58f8138f62edaeef30bad46d3 | /bill_ws/build/bill_description/catkin_generated/pkg.develspace.context.pc.py | d846aac1c29db5d24fa24af24e101a5ae58bdccd | [] | no_license | adubredu/rascapp_robot | f09e67626bd5a617a569c9a049504285cecdee98 | 29ace46657dd3a0a6736e086ff09daa29e9cf10f | refs/heads/master | 2022-01-19T07:52:58.511741 | 2019-04-01T19:22:48 | 2019-04-01T19:22:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "bill_description"
PROJECT_SPACE_DIR = "/home/bill/bill_ros/bill_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"alphonsusbq436@gmail.com"
] | alphonsusbq436@gmail.com |
1a321791650a05c8b77ab5c95f563d2f6b201961 | 0dcbcf0ed0e19a7406e85f1f5f4f957ab2a6975b | /plasTeX/Packages/listings.py | 2f163dd70fae8b9761fbc3de9bcdb434e5da17b3 | [
"MIT"
] | permissive | rsbowman/plastex-mobi | aa3762ffb9e48f8c2120a79c7ed4c2e2717e3308 | 8ea054c63e9deb9dd302d950e636256f9e5e75e9 | refs/heads/master | 2021-01-19T18:32:12.720364 | 2014-06-30T12:07:45 | 2014-06-30T12:07:45 | 21,349,475 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,849 | py | #!/usr/bin/env python
import sys, re, codecs
from plasTeX import Base
try: import pygments
except: pygments = None
class listingsname(Base.Command):
unicode = 'Listing'
PackageOptions = {}
def ProcessOptions(options, document):
document.context.newcounter('listings',
resetby='chapter',
format='${thechapter}.${listings}')
PackageOptions.update(options)
class lstset(Base.Command):
args = 'arguments:dict'
def invoke(self, tex):
Base.Command.invoke(self, tex)
if 'language' in self.attributes['arguments']:
self.ownerDocument.context.current_language = \
self.attributes['arguments']['language']
class lstlisting(Base.verbatim):
args = '[ arguments:dict ]'
counter = 'listings'
def invoke(self, tex):
if self.macroMode == Base.Environment.MODE_END:
return
s = ''.join(Base.verbatim.invoke(self, tex)[1:]).replace('\r','').split('\n')
_format(self, s)
class lstinline(Base.verb):
args = '[ arguments:dict ]'
def invoke(self, tex):
_format(self, ''.join(Base.verb.invoke(self, tex)[2:-1]))
class lstinputlisting(Base.Command):
args = '[ arguments:dict ] file:str'
counter = 'listings'
def invoke(self, tex):
Base.Command.invoke(self, tex)
if 'file' not in self.attributes or not self.attributes['file']:
raise ValueError('Malformed \\lstinputlisting macro.')
_format(self, codecs.open(self.attributes['file'], 'r',
self.config['files']['input-encoding'], 'replace'))
def _format(self, file):
if self.attributes['arguments'] is None:
self.attributes['arguments'] = {}
linenos = False
if 'numbers' in self.attributes['arguments'] or 'numbers' in PackageOptions:
linenos = 'inline'
# If this listing includes a label, inform plasTeX.
if 'label' in self.attributes['arguments']:
if hasattr(self.attributes['arguments']['label'], 'textContent'):
self.ownerDocument.context.label(
self.attributes['arguments']['label'].textContent)
else:
self.ownerDocument.context.label(
self.attributes['arguments']['label'])
# Check the textual LaTeX arguments and convert them to Python
# attributes.
if 'firstline' in self.attributes['arguments']:
first_line_number = int(self.attributes['arguments']['firstline'])
else:
first_line_number = 0
if 'lastline' in self.attributes['arguments']:
last_line_number = int(self.attributes['arguments']['lastline'])
else:
last_line_number = sys.maxint
# Read the file, all the while respecting the "firstline" and
# "lastline" arguments given in the document.
self.plain_listing = ''
for current_line_number, line in enumerate(file):
current_line_number += 1
if (current_line_number >= first_line_number) and \
(current_line_number <= last_line_number):
# Remove single-line "listings" comments. Only
# comments started by "/*@" and ended by "@*/" are
# supported.
line = re.sub('/\*@[^@]*@\*/', '', line)
# Add the just-read line to the listing.
self.plain_listing += '\n' + line
# Create a syntax highlighted XHTML version of the file using Pygments
if pygments is not None:
from pygments import lexers, formatters
try:
lexer = lexers.get_lexer_by_name(self.ownerDocument.context.current_language.lower())
except Exception, msg:
lexer = lexers.TextLexer()
self.xhtml_listing = pygments.highlight(self.plain_listing, lexer, formatters.HtmlFormatter(linenos=linenos))
| [
"r.sean.bowman@gmail.com"
] | r.sean.bowman@gmail.com |
d4796b17e0004688ffac671106272d188dbca7d6 | 1cbc8d9b99e5ac8397a857f4c20adb5c7662171f | /helloworldapp/helloworldapp/__init__.py | 84707a415502114026b8c35a4e51bdbdfc478844 | [] | no_license | six0h/helloworldapp | 255cc08a41317c040c8ad150a8f0246309055a45 | 311aa2c5df5aef763d068e879541b8f77ff1a062 | refs/heads/master | 2021-01-19T22:34:29.180128 | 2017-04-20T15:03:04 | 2017-04-20T15:03:04 | 88,826,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | # -*- coding: utf-8 -*-
__author__ = """Cody Halovich"""
__email__ = 'me@codyhalovich.com'
__version__ = '0.1.0'
| [
"cody@savtechnology.com"
] | cody@savtechnology.com |
88bd4a7be1bb90e388797cbac655dfadd940bb34 | 00635315cacca50f08685e25e5f3e4bcbd1a287b | /Django_citas/settings.py | 79adbac223b724818dc1584b145dadb433951f3c | [] | no_license | DiegoArredo/Django-Citas | ceda2f6f93390f199dd2d4d6b7cc687a82bac07f | d205488dd363526ce06e6a63a881b05329af6937 | refs/heads/master | 2023-08-03T21:05:23.436404 | 2021-09-13T01:51:45 | 2021-09-13T01:51:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,297 | py | """
Django settings for Django_citas project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-at^m__5^b1#tm(126#(t%5alt%e_3l%m46y6)zdgyjl!gt6+dm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"account",
"core",
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Django_citas.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Django_citas.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Santiago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"diego.arredo18@gmail.com"
] | diego.arredo18@gmail.com |
b84e755e93027b2789dea29567569defeb59a598 | 40f6176d172025c7061d3c1fe027628c06c86f39 | /setup.py | 015a81e636e7c57885954242016210af5a43c5c7 | [] | no_license | rsriram315/eds_covid-19 | 8adca890fcf93552b9761036d6b268213afd10bc | 528695a430ff13c9dcc6e969ebf1f7988e26c434 | refs/heads/master | 2022-12-07T16:38:21.071965 | 2020-09-01T20:25:40 | 2020-09-01T20:25:40 | 291,678,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Applied Data science on COVID-19 Data',
author='Sriram Ramachandran',
license='',
)
| [
"rsriram315@gmail.com"
] | rsriram315@gmail.com |
791cb6aa33976e681d1eda201b25780a2d473025 | 4a4ae0208909f3297021977bcb7733d71d8d1321 | /main.py | cb9a2c85d4c497787c93a7502e37f8f948091e48 | [
"MIT"
] | permissive | weishan-Lin/rl-tf2 | 1f52c55df1974ac093969fa0f21c0f955852e6f8 | 7474df2c6ba9980dccf8946fa95942fe2382cd65 | refs/heads/main | 2023-08-05T17:05:22.205615 | 2021-10-04T00:08:55 | 2021-10-04T00:08:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | import tensorflow as tf
import gym
from rl_tf2.agents.ddpg.actor_network import Actor
from rl_tf2.agents.ddpg.critic_network import Critic
from rl_tf2.agents.ddpg.ddpg_agent import DDPG
import yaml
with open('config.yml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
env = gym.make(config['env'])
if config['agent'] == 'DDPG':
critic = Critic(hidden_size=config['critic']['hidden_size'])
actor = Actor(env.action_space.shape[0],
hidden_size=config['actor']['hidden_size'],
action_lb=env.action_space.low,
action_ub=env.action_space.high)
target_critic = Critic(hidden_size=config['critic']['hidden_size'])
target_actor = Actor(env.action_space.shape[0],
hidden_size=config['actor']['hidden_size'],
action_lb=env.action_space.low,
action_ub=env.action_space.high)
# Making the weights equal
target_actor.set_weights(actor.get_weights())
target_critic.set_weights(critic.get_weights())
agent = DDPG(
env,
actor,
critic,
target_actor,
target_critic,
env_name=config['env'],
replay_size=config['replay_size'],
batch_size=config['batch_size'],
epochs=config['epochs'],
noise_std=config['noise_std'],
noise_type=config['noise_type'],
actor_lr=config['actor_lr'],
critic_lr=config['critic_lr'],
target_network_update_rate=config['target_network_update_rate'],
discount=config['discount'],
max_steps_per_epoch=config['max_steps_per_epoch'],
log_weights=config['log_weights'])
agent.train(test_after_epoch=config['test_after_epoch'],
render=config['render'],
print_step_info=config['print_step_info'])
| [
"xjygr08@gmail.com"
] | xjygr08@gmail.com |
ab5123e5c40629e5280e10236c6abe7c03e7a859 | 07e22b0a383fb0fc198ccd76d51e8b1481aa4d8e | /account/migrations/0001_initial.py | b54fe0b4de9605143ffb4a10cf3027c9b6307492 | [] | no_license | Shivangi438/demo3 | fb43bbf4b32bb1bad1c01841289b6d76f503e8dc | fa73ee485b2175f8abbd7dbc31149157e4fa52a1 | refs/heads/master | 2022-12-08T22:34:46.621994 | 2020-09-13T08:19:46 | 2020-09-13T08:19:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | py | # Generated by Django 3.1 on 2020-09-06 07:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('username', models.CharField(max_length=50, primary_key='True', serialize=False)),
],
),
migrations.CreateModel(
name='teacher_timetable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('day', models.CharField(max_length=50)),
('First_lech', models.CharField(max_length=50)),
('sec_lech', models.CharField(max_length=50)),
('third_lech', models.CharField(max_length=50)),
('fourth_lech', models.CharField(max_length=50)),
('fifth_lech', models.CharField(max_length=50)),
('sixth_lech', models.CharField(max_length=50)),
('sev_lech', models.CharField(max_length=50)),
('name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.user')),
],
),
]
| [
"shpy3296@gmail.com"
] | shpy3296@gmail.com |
f5442a5cd16a2677c528d2570c555dac671f2270 | 43535a0667aa1d745fdd718bb8c81b2e616633be | /SLQ/sess_4/4a_citycycle.py | d33344469ec0bc4abccf20a9fea8994831d47573 | [] | no_license | anniequasar/session-summaries | 25336afe94b5033ae009bd124e00d4adf4748a99 | acc17b640e1a737e70b310cb2c8ce21aac35c6da | refs/heads/master | 2023-08-08T03:01:43.030746 | 2023-07-19T01:41:34 | 2023-07-19T01:41:34 | 165,978,720 | 17 | 11 | null | 2022-04-26T07:43:49 | 2019-01-16T05:20:31 | Jupyter Notebook | UTF-8 | Python | false | false | 2,134 | py | # Author: Tim Cummings
# See https://data.brisbane.qld.gov.au for other data sets
# Register at https://developer.jcdecaux.com/ to get an api_key
# contract list GET https://api.jcdecaux.com/vls/v1/contracts?apiKey={api_key} HTTP/1.1
# station list GET https://api.jcdecaux.com/vls/v1/stations?contract={contract_name}&apiKey={api_key} HTTP/1.1
# station info GET https://api.jcdecaux.com/vls/v1/stations/{station_number}?contract={contract_name}&apiKey={api_key} HTTP/1.1
from urllib.request import urlopen
import json
# TODO enter your api key as issued by jcdecaux
api_key = '1fbb..............................82151e'
# contract name from jcdecaux contract list
contract_name = 'Brisbane'
# url for station list with placeholders for contract name and api key
url_station_list = "https://api.jcdecaux.com/vls/v1/stations?contract={contract_name}&apiKey={api_key}"
# use str format function to replace placeholders with values from variables
url = url_station_list.format(contract_name=contract_name, api_key=api_key)
# load the json (JavaScript Object Notation) data from the url's http response into a python list of dicts
station_list = json.load(urlopen(url))
# initialise variables which will capture the bike stations with the most available bikes
stations_with_most_bikes = []
most_bikes = 0
# Loop through all bike stations finding those with the maximum number of available bikes (could be more than one)
for station in station_list:
print(station)
if most_bikes < station['available_bikes']:
# This station exceeds all previous ones so replace list with just this one and save the number of bikes
most_bikes = station['available_bikes']
stations_with_most_bikes = [station]
elif most_bikes == station['available_bikes']:
# This station equals previous maximum so add it to the list
stations_with_most_bikes.append(station)
# Display results
print()
print("Most number of available bikes =", most_bikes, 'at the following station(s)')
for station in stations_with_most_bikes:
print(station['name'], 'at', station['position'])
| [
"noreply@github.com"
] | noreply@github.com |
68a5dfb492a916a1c01d489302bca9f55e915fb8 | 11b13e5c0a5286a6877ecb4f24c5c8b4d69e26d2 | /GMM/fit.py | b7ba45670ecd81c904c15dac50d67c4e52bf6683 | [] | no_license | ruozhengu/machine-learning-model-by-scratch | bcf015b6b7fd768304749858c4006864257b747c | bd4b1da212c5a1626ce89d4cdafc0fe6a6cf1f05 | refs/heads/master | 2022-11-14T03:06:17.768123 | 2020-07-14T07:15:03 | 2020-07-14T07:15:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | from GMM import EM
import numpy as np
from keras.datasets import mnist
from sklearn.decomposition import PCA
def fit_and_eval():
k = 5
# load data
n_train = 60000
n_test = 10000
x_eval = np.zeros((n_test, 5))
y_heta = np.zeros((n_test, 1))
size = 28 # size of image is 28 pixels
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# flat the image
x_train = x_train.reshape((n_train, size ** 2)) / 255
x_test = x_test.reshape((n_test, size ** 2)) / 255
# divide into 10 classes
classes = [[] for _ in range(10)]
for _x, _y in zip(x_train, y_train):
classes[_y].append(_x)
# PCA to reduce dimensions
pca_clf = PCA(n_components=50)
pca_clf.fit(x_train)
classes_after_pca = [pca_clf.transform(classes[i]) for i in range(10)]
test_after_pca = pca_clf.transform(x_test)
# training for each class
train_result = [EM(classes_after_pca[i], k) for i in range(10)]
# evaluation on test data
for i in range(10):
means, Sk, pi_k, loss = train_result[i]
for _k in range(k):
mean_diff = test_after_pca - means[_k]
expo = np.exp(-0.5 * np.einsum('ij,ij->i', np.divide(mean_diff, Sk[_k]), mean_diff))
x_eval[:, _k] = (1./np.sqrt(np.prod(Sk[_k]))) * pi_k[_k] * expo
y_heta = np.c_[y_heta, np.sum(x_eval, axis=1)]
y_heta = y_heta[:, 1:]
# pick the final prediction (which distribution) by selecting the largest prob
pred = [list(y_heta[_i, :]).index(max(list(y_heta[_i, :]))) for _i in range(n_test)]
error = [int(pred[i] != y_test[i]) for i in range(len(pred))]
print("error rate is: %f" % float(sum(error) / len(pred)))
if __name__ == '__main__':
fit_and_eval()
| [
"gu.gabriel@hotmail.com"
] | gu.gabriel@hotmail.com |
38c048053b17d136f49564e67551e3075fbf6610 | f866b2f3450fe6ebdf17fb853bcd27710548b928 | /eleanor/postcard.py | 6727126d4ad4a849a17947d41f7ccad2bf96a427 | [
"MIT"
] | permissive | viyangshah/eleanor | 5efdb5e33506d0105a2617eee9e4c414df645f2b | 7b3393be544854d9a5272f6ce8a4695fdf3a3609 | refs/heads/master | 2020-06-07T08:06:15.549143 | 2019-06-20T17:05:34 | 2019-06-20T17:05:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,079 | py | import os, sys
from astropy.io import fits
import matplotlib.pyplot as plt
from astropy.wcs import WCS
import numpy as np
import warnings
import pandas as pd
import copy
from .mast import crossmatch_by_position
from urllib.request import urlopen
__all__ = ['Postcard']
class Postcard(object):
"""TESS FFI data for one postcard across one sector.
A postcard is an rectangular subsection cut out from the FFIs.
It's like a TPF, but bigger.
The Postcard object contains a stack of these cutouts from all available
FFIs during a given sector of TESS observations.
Parameters
----------
filename : str
Filename of the downloaded postcard.
location : str, optional
Filepath to `filename`.
Attributes
----------
dimensions : tuple
(`x`, `y`, `time`) dimensions of postcard.
flux, flux_err : numpy.ndarray
Arrays of shape `postcard.dimensions` containing flux or error on flux
for each pixel.
time : float
?
header : dict
Stored header information for postcard file.
center_radec : tuple
RA & Dec coordinates of the postcard's central pixel.
center_xy : tuple
(`x`, `y`) coordinates corresponding to the location of
the postcard's central pixel on the FFI.
origin_xy : tuple
(`x`, `y`) coordinates corresponding to the location of
the postcard's (0,0) pixel on the FFI.
"""
def __init__(self, filename, ELEANORURL, location=None):
if location is not None:
self.filename = '{}{}'.format(location, filename)
self.local_path = copy.copy(self.filename)
self.hdu = fits.open(self.local_path)
else:
self.post_dir = os.path.join(os.path.expanduser('~'), '.eleanor/postcards')
if os.path.isdir(self.post_dir) == False:
try:
os.mkdir(self.post_dir)
except OSError:
self.post_dir = '.'
warnings.warn('Warning: unable to create {}. '
'Downloading postcard to the current '
'working directory instead.'.format(self.post_dir))
self.filename = '{}{}'.format(ELEANORURL, filename)
self.local_path = '{}/{}'.format(self.post_dir, filename)
if os.path.isfile(self.local_path) == False:
print("Downloading {}".format(self.filename))
os.system('cd {} && curl -O -L {}'.format(self.post_dir, self.filename))
self.hdu = fits.open(self.local_path)
def __repr__(self):
return "eleanor postcard ({})".format(self.filename)
def plot(self, frame=0, ax=None, scale='linear', **kwargs):
"""Plots a single frame of a postcard.
Parameters
----------
frame : int, optional
Index of frame. Default 0.
ax : matplotlib.axes.Axes, optional
Axes on which to plot. Creates a new object by default.
scale : str
Scaling for colorbar; acceptable inputs are 'linear' or 'log'.
Default 'linear'.
**kwargs : passed to matplotlib.pyplot.imshow
Returns
-------
ax : matplotlib.axes.Axes
"""
if ax is None:
_, ax = plt.subplots(figsize=(8, 7))
if scale is 'log':
with warnings.catch_warnings():
warnings.simplefilter('ignore')
dat = np.log10(self.flux[:, :, frame])
dat[~np.isfinite(dat)] = np.nan
else:
dat = self.flux[:, :, frame]
if ('vmin' not in kwargs) & ('vmax' not in kwargs):
kwargs['vmin'] = np.nanpercentile(dat, 1)
kwargs['vmax'] = np.nanpercentile(dat, 99)
im = ax.imshow(dat, **kwargs)
ax.set_xlabel('Row')
ax.set_ylabel('Column')
cbar = plt.colorbar(im, ax=ax)
if scale == 'log':
cbar.set_label('log$_{10}$ Flux')
else:
cbar.set_label('Flux')
# Reset the x/y ticks to the position in the ACTUAL FFI.
xticks = ax.get_xticks() + self.center_xy[0]
yticks = ax.get_yticks() + self.center_xy[1]
ax.set_xticklabels(xticks)
ax.set_yticklabels(yticks)
return ax
def find_sources(self):
"""Finds the cataloged sources in the postcard and returns a table.
Returns
-------
result : astropy.table.Table
All the sources in a postcard with TIC IDs or Gaia IDs.
"""
result = crossmatch_by_position(self.center_radec, 0.5, 'Mast.Tic.Crossmatch').to_pandas()
result = result[['MatchID', 'MatchRA', 'MatchDEC', 'pmRA', 'pmDEC', 'Tmag']]
result.columns = ['TessID', 'RA', 'Dec', 'pmRA', 'pmDEC', 'Tmag']
return result
@property
def header(self):
return self.hdu[1].header
@property
def center_radec(self):
return(self.header['CEN_RA'], self.header['CEN_DEC'])
@property
def center_xy(self):
return (self.header['CEN_X'], self.header['CEN_Y'])
@property
def origin_xy(self):
return (self.header['POSTPIX1'], self.header['POSTPIX2'])
@property
def flux(self):
return self.hdu[2].data
@property
def dimensions(self):
return self.flux.shape
@property
def flux_err(self):
return self.hdu[3].data
@property
def time(self):
return (self.hdu[1].data['TSTOP'] + self.hdu[1].data['TSTART'])/2
@property
def wcs(self):
return WCS(self.header)
@property
def quality(self):
return self.hdu[1].data['QUALITY']
@property
def bkg(self):
return self.hdu[1].data['BKG']
@property
def barycorr(self):
return self.hdu[1].data['BARYCORR']
@property
def ffiindex(self):
return self.hdu[1].data['FFIINDEX']
class Postcard_tesscut(object):
"""TESS FFI data for one postcard across one sector.
A postcard is an rectangular subsection cut out from the FFIs.
It's like a TPF, but bigger.
The Postcard object contains a stack of these cutouts from all available
FFIs during a given sector of TESS observations.
Parameters
----------
filename : str
Filename of the downloaded postcard.
location : str, optional
Filepath to `filename`.
Attributes
----------
dimensions : tuple
(`x`, `y`, `time`) dimensions of postcard.
flux, flux_err : numpy.ndarray
Arrays of shape `postcard.dimensions` containing flux or error on flux
for each pixel.
time : float
?
header : dict
Stored header information for postcard file.
center_radec : tuple
RA & Dec coordinates of the postcard's central pixel.
center_xy : tuple
(`x`, `y`) coordinates corresponding to the location of
the postcard's central pixel on the FFI.
origin_xy : tuple
(`x`, `y`) coordinates corresponding to the location of
the postcard's (0,0) pixel on the FFI.
"""
def __init__(self, cutout, location=None):
self.hdu = cutout
def plot(self, frame=0, ax=None, scale='linear', **kwargs):
"""Plots a single frame of a postcard.
Parameters
----------
frame : int, optional
Index of frame. Default 0.
ax : matplotlib.axes.Axes, optional
Axes on which to plot. Creates a new object by default.
scale : str
Scaling for colorbar; acceptable inputs are 'linear' or 'log'.
Default 'linear'.
**kwargs : passed to matplotlib.pyplot.imshow
Returns
-------
ax : matplotlib.axes.Axes
"""
if ax is None:
_, ax = plt.subplots(figsize=(8, 7))
if scale is 'log':
with warnings.catch_warnings():
warnings.simplefilter('ignore')
dat = np.log10(self.flux[:, :, frame])
dat[~np.isfinite(dat)] = np.nan
else:
dat = self.flux[:, :, frame]
if ('vmin' not in kwargs) & ('vmax' not in kwargs):
kwargs['vmin'] = np.nanpercentile(dat, 1)
kwargs['vmax'] = np.nanpercentile(dat, 99)
im = ax.imshow(dat, **kwargs)
ax.set_xlabel('Row')
ax.set_ylabel('Column')
cbar = plt.colorbar(im, ax=ax)
if scale == 'log':
cbar.set_label('log$_{10}$ Flux')
else:
cbar.set_label('Flux')
# Reset the x/y ticks to the position in the ACTUAL FFI.
xticks = ax.get_xticks() + self.center_xy[0]
yticks = ax.get_yticks() + self.center_xy[1]
ax.set_xticklabels(xticks)
ax.set_yticklabels(yticks)
return ax
def find_sources(self):
"""Finds the cataloged sources in the postcard and returns a table.
Returns
-------
result : astropy.table.Table
All the sources in a postcard with TIC IDs or Gaia IDs.
"""
result = crossmatch_by_position(self.center_radec, 0.5, 'Mast.Tic.Crossmatch').to_pandas()
result = result[['MatchID', 'MatchRA', 'MatchDEC', 'pmRA', 'pmDEC', 'Tmag']]
result.columns = ['TessID', 'RA', 'Dec', 'pmRA', 'pmDEC', 'Tmag']
return result
@property
def header(self):
return self.hdu[1].header
@property
def center_radec(self):
return(self.header['RA_OBJ'], self.header['DEC_OBJ'])
@property
def center_xy(self):
return (self.header['1CRV4P']+16, self.header['1CRV4P']+16)
@property
def origin_xy(self):
return (self.header['1CRV4P'], self.header['1CRV4P'])
@property
def flux(self):
return self.hdu[1].data['FLUX']
@property
def dimensions(self):
return self.flux.shape
@property
def flux_err(self):
return self.hdu[1].data['FLUX_ERR']
@property
def time(self):
return self.hdu[1].data['TIME']
@property
def wcs(self):
return WCS(self.header)
@property
def quality(self):
sector = self.header['SECTOR']
array_obj = urlopen('https://archipelago.uchicago.edu/tess_postcards/metadata/s{0:04d}/quality_s{0:04d}.txt'.format(sector))
A = [int(x) for x in array_obj.read().decode('utf-8').split()]
return A
@property
def bkg(self):
return np.nanmedian(self.hdu[1].data['FLUX_BKG'], axis=(1,2))
@property
def barycorr(self):
return self.hdu[1].data['TIMECORR']
@property
def ffiindex(self):
sector = self.header['SECTOR']
array_obj = urlopen('https://archipelago.uchicago.edu/tess_postcards/metadata/s{0:04d}/cadences_s{0:04d}.txt'.format(sector))
A = [int(x) for x in array_obj.read().decode('utf-8').split()]
return A | [
"bmontet@uchicago.edu"
] | bmontet@uchicago.edu |
5a82148a31bf8f2f5d8fa145f859153c97c63e7d | db9247b2cf24d9f49225b5a8b6ab4918af0abde9 | /Test-AWS-price-list-Details.py | c7b6b300c123593934f480bfd6eed20ed3946e28 | [] | no_license | anecula/AWS-CostEstimation-Python | af599538d574130d0c62a7e63547e096c32797f7 | e3e8f39f653affb58d823558693c537596b2638d | refs/heads/master | 2020-04-01T14:06:49.401710 | 2018-10-27T08:28:14 | 2018-10-27T08:28:14 | 153,280,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | import argparse
import boto3
import datetime
parser = argparse.ArgumentParser()
parser.add_argument('--days', type=int, default=30)
args = parser.parse_args()
now = datetime.datetime.utcnow()
start = (now - datetime.timedelta(days=args.days)).strftime('%Y-%m-%d')
end = now.strftime('%Y-%m-%d')
cd = boto3.client('ce', 'us-east-1')
results = []
token = None
while True:
if token:
kwargs = {'NextPageToken': token}
else:
kwargs = {}
data = cd.get_cost_and_usage(TimePeriod={'Start': start, 'End': end}, Granularity='DAILY', Metrics=['UnblendedCost'], GroupBy=[{'Type': 'DIMENSION', 'Key': 'LINKED_ACCOUNT'}, {'Type': 'DIMENSION', 'Key': 'SERVICE'}], **kwargs)
results += data['ResultsByTime']
token = data.get('NextPageToken')
if not token:
break
print('\t'.join(['TimePeriod', 'LinkedAccount', 'Service', 'Amount', 'Unit', 'Estimated']))
for result_by_time in results:
for group in result_by_time['Groups']:
amount = group['Metrics']['UnblendedCost']['Amount']
unit = group['Metrics']['UnblendedCost']['Unit']
print(result_by_time['TimePeriod']['Start'], '\t', '\t'.join(group['Keys']), '\t', amount, '\t', unit, '\t', result_by_time['Estimated'])
| [
"srinu24.a@gmail.com"
] | srinu24.a@gmail.com |
5556284697c83072c5f8adcd31eebd42bde99e85 | 9acff625f8a82c510b23213e4eed9b5c94d84580 | /analyze.py | f25e88e4b51899131cd7e4beff16375ee7ff65f8 | [] | no_license | jennalau/lyft-data-challenge | 69b970b4214e63abd9f34cb3c31cbf0e571b9c08 | d3c6cac9807de0aaa87d31f87e237a26bb487672 | refs/heads/master | 2020-07-08T12:48:12.376898 | 2019-09-16T17:32:58 | 2019-09-16T17:32:58 | 203,675,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,894 | py | from utils import *
import pandas as pd
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
def calcTotalDrivingMin(data):
"""
calculate mean # of driving min for all drivers for all days
data shape = (122, 46, 24)
"""
total = []
for i in range(0,24):
total.append([0,0,0])
for driver in data:
for hrs in driver: # 1x24 np array of int64
hrs = hrs.tolist()
for hr in hrs:
if hr > 0:
hrsIdx = hrs.index(hr)
# number of mins
total[hrsIdx][0] += hr
# number of drivers
total[hrsIdx][1] += 1
total = np.asarray(total)
# calculate means minutes worked
for t in total:
t[2] = t[0] / t[1]
return total
def calcResidual(data, driver_hash, avg_time):
num_drivers = len(driver_hash)
residuals = []
for i in range(num_drivers):
residuals.append([0] * 24)
driver_working_min = []
for i in range(num_drivers):
driver_working_min.append(0)
for day in data:
for driver in range(len(day)):
driver_time = day[driver]
r = driver - avg_time
residuals[driver] += r
# calculate number of minutes worked
total_min = np.sum(driver)
driver_working_min[driver] += total_min
residuals = np.asarray(residuals)
driver_working_min = np.asarray(driver_working_min)
return residuals, driver_working_min
def driverEfficiency(residuals, driver_working_min, driver_hash):
driver_rankings = dict()
for r in range(len(residuals)):
sum_r = np.sum(residuals[r])
score = 0
if driver_working_min[r] > 0:
score = sum_r / driver_working_min[r]
# note: we ignored the drivers who were onboarded, but did not work at all
driver_rankings[getDriverID(driver_hash, r)] = score
return driver_rankings
def analyze(data, driver_hash):
# calculate total # of driving minutes for all drivers across the entire dataset timespan for
# each 1-hour interval & the # of drivers working in each interval
totalTime = calcTotalDrivingMin(data) # 24 x 3
# calculate residuals for each driver
avg_times = totalTime[:, -1:]
residuals, driver_working_min = calcResidual(data, driver_hash, np.squeeze(avg_times, axis=-1))
driver_rankings = driverEfficiency(residuals, driver_working_min, driver_hash)
# generate visuals
visRidePopularity(totalTime[:,0:1])
compPrimeTime(residuals, driver_working_min, totalTime)
def visRidePopularity(mins):
time_intervals = [x for x in range(24)]
print(time_intervals)
mins = np.squeeze(mins, axis=-1)
d = {'Time': time_intervals, 'Average # of Minutes Driven': mins}
data = pd.DataFrame(data = d)
ax = sns.scatterplot(x=data['Time'], y=data['Average # of Minutes Driven'], data=data)
fig = ax.get_figure()
fig.savefig('ride_popularity.png')
def compPrimeTime(residuals, driver_working_min, totalTime):
print('residuals: ', residuals.shape) # 46 x 24
resulting_diff = []
for i in range(24):
diff = residuals[:,i:i+1]
resulting_diff.append(np.mean(diff))
print(resulting_diff)
means = totalTime[:,0:1]
means = np.squeeze(means, axis=-1)
resulting_diff = np.asarray(resulting_diff)
print("means", means.shape)
print('resulting_diff: ', resulting_diff.shape)
# resulting_diff = np.squeeze(resulting_diff, axis=-1)
d = {'Prime Time': means, 'Residual': resulting_diff}
data = pd.DataFrame(data = d)
ax = sns.scatterplot(x=data['Prime Time'], y=data['Residual'], data=data)
fig = ax.get_figure()
fig.savefig('prime_time.png')
all_scores = list(zip(resulting_diff, means))
return all_scores
| [
"jennanla@usc.edu"
] | jennanla@usc.edu |
7271a590d509168d243ce7c8c3d509719a72fae1 | b9c67cbe41da7743dbe46e7054844ff68a1c0fb4 | /plot_tri.py | d466a28ba3e0957dc50dedc4c358bfcffd98f7ab | [] | no_license | SuperKam91/McAdam | a82c0a8f4366880566b6a1dd2c551b9572c44b22 | ab2f36417cb870e611a5f43ad7854fdfc2d1e439 | refs/heads/master | 2021-07-18T19:44:44.144640 | 2018-05-06T12:33:38 | 2018-05-06T12:33:38 | 132,277,982 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,502 | py | #!/usr/bin/python26
from getdist import plots, MCSamples, loadMCSamples
import getdist
ch_path=[]
while 1:
ch_path.append(raw_input('Path to chains or q to quit: ').strip())
if ch_path[-1]=='q':
ch_path=ch_path[:-1]
break
ch=[]
for p in ch_path:
ch.append(loadMCSamples(p))
pars=ch[0].getParamNames()
if len(ch)>1:
print 'Select parameters based on first chain entry; will look for matching parameter names in subsequent entries'
print 'Parameters are: '
print pars
plotpars=[]
while 1:
p=raw_input('Parameter to plot or q to quit: ').strip()
if p=='q': break
plotpars.append(pars.parWithName(p).label)
plotpar_names=['plot'+str(i) for i in range(len(plotpars))]
do_markers=raw_input('Enter true values (y/n): ').strip()
markers={}
if 'y' in do_markers.lower():
print 'Enter true value or non-numerical value to skip'
for p in plotpars:
m=raw_input('True value for '+p+': ')
try:
markers[p]=float(m)
except:
pass
for i, samps in enumerate(ch):
parsi=samps.getParamNames()
for p_label in plotpars:
found=False
for j, pj in enumerate(parsi.names):
if pj.label==p_label:
found=True
p_name=plotpar_names[plotpars.index(p_label)]
if p_label[0]=='M' and 'M_{\odot}' in p_label:
# Add a derived parameter for mass to get around labelling issues
M=eval('samps.getParams().'+pj.name)
M1=samps.ranges.getLower(pj.name)
M2=samps.ranges.getUpper(pj.name)
p_label_new=p_label.replace('M_{\odot}', '10^{14} M_{\odot}')
samps.addDerived(M*1e-14, name=p_name, label=p_label_new)
samps.updateBaseStatistics()
# Adding the derived parameter doesn't retain the range info
if M1 is None:
M1='N'
else:
M1*=1e-14
if M2 is None:
M2='N'
else:
M2*=1e-14
samps.setRanges({p_name: [M1, M2]})
if p_label in markers.keys() and i==0:
markers[p_label]*=1e-14
elif p_label[0]=='S' and 'Jy' in p_label:
# Convert source fluxes to mJy
S=eval('samps.getParams().'+pj.name)
S1=samps.ranges.getLower(pj.name)
S2=samps.ranges.getUpper(pj.name)
samps.addDerived(S*1e3, name=p_name, label=p_label.replace('Jy', 'mJy'))
samps.updateBaseStatistics()
# Adding the derived parameter doesn't retain the range info
if S1 is None:
S1='N'
else:
S1*=1e3
if S2 is None:
S2='N'
else:
S2*=1e3
samps.setRanges({p_name: [S1, S2]})
if p_label in markers.keys() and i==0:
markers[p_label]*=1e3
else:
pj.name=p_name
if not found:
print 'Warning: '+p_label+' not found in '+ch_path[i]
samps.setParamNames(parsi)
leg=[]
for c in ch:
leg.append(c.getName())
g=plots.getSubplotPlotter(width_inch=8)
g.settings.axes_fontsize=8
g.settings.alpha_filled_add=0.4
g.triangle_plot(ch, plotpar_names,
filled_compare=True,
legend_labels=leg,
legend_loc='upper right')
for ipar, p in enumerate(plotpars):
if p in markers.keys():
ax=g.subplots[ipar,ipar]
ax.axvline(x=markers[p], color='k')
for jpar, p2 in enumerate(plotpars[ipar+1:]):
if p2 in markers.keys():
jplot=ipar+jpar+1
ax=g.subplots[jplot,ipar]
ax.plot(markers[p], markers[p2], '*k')
g.export(ch_path[0]+'_tri.png')
| [
"kj316@cam.ac.uk"
] | kj316@cam.ac.uk |
8ef604e567085a98f8973285131c2bdfcfa87320 | 45db4a55c6bd5137b17bf8dfa54ed94f361c3bf6 | /ResonantCircuits/parallelResonantCircuit.py | 913cbbe0b0728612811e4e777255d1dfd7520aae | [] | no_license | CatT-DancingDev/PythonProjects | 1be3e8f0b0528be1ccbe8aeadb76ac8a5f9961ae | 7b59d9b1843eaddb9254f980f178d6e8ba551106 | refs/heads/main | 2023-04-15T08:06:25.240981 | 2021-04-25T04:13:15 | 2021-04-25T04:13:15 | 361,327,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,878 | py | ###################################################################################################
#
# Program: Resonant Circuit Design
# Module: parallelResonantCircuit.py
# Author: Catherine Trujillo
# Course: CSC 217-470
# Date: 7/07/2020
#
###################################################################################################
#
#
# Description: This module defines/implements the subclass ParallelResonantCircuit, which extends
# the class Resonant Circuit by adding a method to design the parallel circuit for
# the given RFR values. It also overrides the superclasses display method to include
# a print out of the design values:
# - self._R = Resistance
# - self._C = Capacitance
# - self._L = Inductance
#
############################## SUBCLASS METHODS LIST ##############################################
#
# __init__(self)
# designCircuit(self)
# display(self)
#
############################## LIBRARIES AND MODULES ##############################################
from resonantCircuit import ResonantCircuit
############################## SUBCLASS DEFINITION ################################################
class ParallelResonantCircuit(ResonantCircuit):
############################## METHODS ############################################################
#
# Method: __init__(self)
#
# Parameters: self
# Return Value: ParallelResonantCircuit object
#
# Purpose: SuperClass Constructor initializes fields for:
# _rf = Resonant Frequency in rad/s
# _b = Bandwidth in rad/s
# _k = Gain at RF
#
# SubClass Constructor initiliazes fields for:
# _R = Resistance
# _C = Capacitance
# _L = Inductance
#
####################################################################################################
def __init__(self):
super().__init__()
self._R = 0
self._C = 0
self._L = 0
####################################################################################################
#
# Method: designCircuit(self)
#
# Parameters: self
# Return Value: None
#
# Purpose: Retrieve data from superclass fields for use in design calculations. Set subclass
# instance fields using design equations provided in textbook
#
####################################################################################################
def designCircuit(self):
# Retrive data from superclass fields for use in design calculations
rf = super().getRF()
b = super().getB()
k = super().getK()
# Set subclass instance fields using design equations provided in textbook
self._R = k
self._C = 1 / (b * self._R)
self._L = 1 / ((rf ** 2) * self._C)
####################################################################################################
#
# Method: display(self)
#
# Parameters: self
# Return Value: None
#
# Purpose: This method extends the superclass display method to include a printout of the
# Parallel Resonant Circuit Design Values
#
####################################################################################################
def display(self):
# Include superclass display method
super().display()
# Add parallel Circuit Design
print("PARALLEL CIRCUIT DESIGN")
print("R = {}".format(self._R))
print("C = {}".format(self._C))
print("L = {} \n".format(self._L))
################################## END SUBCLASS ###################################################
| [
"noreply@github.com"
] | noreply@github.com |
47b0fad3467437ec0622fddde5ff65dbed7f685e | a306e621d15d6287f75c8e4f22329da810408605 | /tests/test_distance.py | 2500daf3774b7e965c5bd7d243e4f01b24e8e026 | [
"MIT"
] | permissive | moble/quaternionic | c6175a8e5ff57fbb9d2f2462bc761368f3b4fa66 | 074b626d0c63aa78479ff04ed41638931ca6693a | refs/heads/main | 2023-06-08T08:21:46.827232 | 2023-02-07T17:36:31 | 2023-02-07T17:36:38 | 286,745,519 | 73 | 7 | MIT | 2023-05-27T12:19:43 | 2020-08-11T13:00:26 | Python | UTF-8 | Python | false | false | 3,116 | py | import warnings
import numpy as np
import quaternionic
import pytest
@pytest.mark.parametrize("rotor,rotation,slow", [ # pragma: no branch
(quaternionic.distance.rotor, quaternionic.distance.rotation, True),
quaternionic.distance.CreateMetrics(lambda f: f, quaternionic.utilities.pyguvectorize) + (False,)
], ids=["jit metrics", "non-jit metrics"])
def test_metrics(Rs, array, rotor, rotation, slow):
metric_precision = 4.e-15
Rs = array(Rs.ndarray)
one = array(1, 0, 0, 0)
intrinsic_funcs = (rotor.intrinsic, rotation.intrinsic)
chordal_funcs = (rotor.chordal, rotation.chordal)
metric_funcs = intrinsic_funcs + chordal_funcs
rotor_funcs = (rotor.intrinsic, rotor.chordal)
rotation_funcs = (rotation.intrinsic, rotation.chordal)
distance_dict = {func: func(Rs, Rs[:, np.newaxis]) for func in metric_funcs}
# Check non-negativity
for mat in distance_dict.values():
assert np.all(mat >= 0.)
# Check discernibility
for func in metric_funcs:
if func in chordal_funcs:
eps = 0
else:
eps = 5.e-16
if func in rotor_funcs:
target = Rs != Rs[:, np.newaxis]
else:
target = np.logical_and(Rs != Rs[:, np.newaxis], Rs != - Rs[:, np.newaxis])
assert ((distance_dict[func] > eps) == target).all()
# Check symmetry
for mat in distance_dict.values():
assert np.allclose(mat, mat.T, atol=metric_precision, rtol=0)
# Check triangle inequality
for mat in distance_dict.values():
assert ((mat - metric_precision)[:, np.newaxis, :] <= mat[:, :, np.newaxis] + mat).all()
# Check distances from self or -self
for func in metric_funcs:
# All distances from self should be 0.0
if func in chordal_funcs:
eps = 0
else:
eps = 5.e-16
assert (np.diag(distance_dict[func]) <= eps).all()
# Chordal rotor distance from -self should be 2
assert (abs(rotor.chordal(Rs, -Rs) - 2.0) < metric_precision).all()
# Intrinsic rotor distance from -self should be 2pi
assert (abs(rotor.intrinsic(Rs, -Rs) - 2.0 * np.pi) < metric_precision).all()
# Rotation distances from -self should be 0
assert (rotation.chordal(Rs, -Rs) == 0.0).all()
assert (rotation.intrinsic(Rs, -Rs) < 5.e-16).all()
# We expect the chordal distance to be smaller than the intrinsic distance (or equal, if the distance is zero)
assert np.logical_or(rotor.chordal(one, Rs) < rotor.intrinsic(one, Rs), Rs == one).all()
if slow:
# Check invariance under overall rotations: d(R1, R2) = d(R3*R1, R3*R2) = d(R1*R3, R2*R3)
for func in rotor.chordal, rotation.intrinsic:
rotations = Rs[:, np.newaxis] * Rs
right_distances = func(rotations, rotations[:, np.newaxis])
assert (abs(distance_dict[func][:, :, np.newaxis] - right_distances) < metric_precision).all()
left_distances = func(rotations[:, :, np.newaxis], rotations[:, np.newaxis])
assert (abs(distance_dict[func] - left_distances) < metric_precision).all()
| [
"michael.oliver.boyle@gmail.com"
] | michael.oliver.boyle@gmail.com |
a83ca6ce05b21ca251fbaf60b2b466a01099eeb9 | 8fe8294460dd743534d0c10a9288da3f3d2ae155 | /scripts/mimic3benchmarks_inhospital_mortality/predictions_collapsed/zzz_old_code/train_random_forest_with_per_tslice_features.smk | b0244edb0bbd708ce40fd4f097f44524bbe9cd1d | [
"MIT"
] | permissive | tufts-ml/time_series_prediction | 32132a6c0d17ce196ea97b55387ea27fa18ba6e3 | 831c388d8854773b7545a3a681a596f7c2d98dff | refs/heads/master | 2023-04-08T20:03:05.647403 | 2022-07-19T19:10:06 | 2022-07-19T19:10:06 | 161,347,900 | 8 | 7 | MIT | 2023-03-24T22:43:26 | 2018-12-11T14:40:58 | Jupyter Notebook | UTF-8 | Python | false | false | 2,764 | smk | '''
Train random fores on collapsed features for mimic3 inhospital mortality
Usage
-----
snakemake --cores 1 --snakefile train_random_forest.smk train_and_evaluate_classifier
'''
sys.path.append('../predictions_collapsed/')
from config_loader import (
D_CONFIG,
DATASET_STD_PATH, DATASET_SPLIT_PATH,
PROJECT_REPO_DIR, PROJECT_CONDA_ENV_YAML,
DATASET_SPLIT_FEAT_PER_TSLICE_PATH,
RESULTS_FEAT_PER_TSLICE_PATH,
DATASET_SPLIT_COLLAPSED_FEAT_PER_TSLICE_PATH,
RESULTS_COLLAPSED_FEAT_PER_TSLICE_PATH)
random_seed_list=D_CONFIG['CLF_RANDOM_SEED_LIST']
CLF_TRAIN_TEST_SPLIT_PATH = os.path.join(DATASET_SPLIT_COLLAPSED_FEAT_PER_TSLICE_PATH, 'classifier_train_test_split_dir')
RESULTS_COLLAPSED_FEAT_PER_TSLICE_PATH = os.path.join(RESULTS_COLLAPSED_FEAT_PER_TSLICE_PATH, 'random_forest')
print("Training logistic regression")
print("--------------------------")
print("Results and trained model will go to:")
print(RESULTS_COLLAPSED_FEAT_PER_TSLICE_PATH)
rule train_and_evaluate_classifier:
input:
script=os.path.join(PROJECT_REPO_DIR, 'src', 'eval_classifier.py'),
x_train_csv=os.path.join(CLF_TRAIN_TEST_SPLIT_PATH, 'x_train.csv'),
x_test_csv=os.path.join(CLF_TRAIN_TEST_SPLIT_PATH, 'x_test.csv'),
y_train_csv=os.path.join(CLF_TRAIN_TEST_SPLIT_PATH, 'y_train.csv'),
y_test_csv=os.path.join(CLF_TRAIN_TEST_SPLIT_PATH, 'y_test.csv'),
x_dict_json=os.path.join(CLF_TRAIN_TEST_SPLIT_PATH, 'x_dict.json'),
y_dict_json=os.path.join(CLF_TRAIN_TEST_SPLIT_PATH, 'y_dict.json')
params:
output_dir=RESULTS_COLLAPSED_FEAT_PER_TSLICE_PATH,
random_seed=int(random_seed_list[0])
output:
output_html=os.path.join(RESULTS_COLLAPSED_FEAT_PER_TSLICE_PATH, "report.html")
conda:
PROJECT_CONDA_ENV_YAML
shell:
'''
mkdir -p {params.output_dir} && \
python -u {input.script} \
random_forest \
--outcome_col_name {{OUTCOME_COL_NAME}} \
--output_dir {params.output_dir} \
--train_csv_files {input.x_train_csv},{input.y_train_csv} \
--test_csv_files {input.x_test_csv},{input.y_test_csv} \
--data_dict_files {input.x_dict_json},{input.y_dict_json} \
--merge_x_y False \
--validation_size 0.15 \
--key_cols_to_group_when_splitting {{SPLIT_KEY_COL_NAMES}} \
--random_seed {params.random_seed}\
--n_splits 2 \
--scoring roc_auc_score \
--threshold_scoring balanced_accuracy_score \
--class_weight balanced \
'''.replace("{{OUTCOME_COL_NAME}}", D_CONFIG["OUTCOME_COL_NAME"])\
.replace("{{SPLIT_KEY_COL_NAMES}}", D_CONFIG["SPLIT_KEY_COL_NAMES"]) | [
"prath01@alpha001.lux.tufts.edu"
] | prath01@alpha001.lux.tufts.edu |
d5b4a608c9726f75c6b1686c1985c5c4f548ef98 | 7aff63055c63b41e39f99a87d0628383f61bee54 | /Composite_index/4. Aggregation/MPI_Mazziotta_Pareto.py | d88da10caac02c1655dfd757e34c5a763c2fb8d7 | [] | no_license | IacopoTesti/Spatial_Data_Science_Rome | 634f39fdb5246f5fa14e8db9ec9e55b4a1d19d3d | a63adb33b4b84c28f30131c7b805bb1a50979979 | refs/heads/master | 2023-03-18T08:22:07.472985 | 2021-03-16T17:30:36 | 2021-03-16T17:30:36 | 262,096,553 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | ### This block defines the aggregation for normalization with MPI (Mazziotta Pareto Index)
#### aggregation MPI is used (similar to geometric mean because the arithmetic mean is corrected by a penalty function)
# all variables needed for the formula abovementioned are defined
# mean of the z elements of the standardized matrix
z_media = df_MPI.mean(axis=1)
# standard deviation of elements z of standardized matrix
z_sigma = df_MPI.std(axis=1)
# compute cv
cv = z_sigma / z_media
# calculating index MPI using positive sign (because our phenomenon is negative)
MPI_agg = z_media + z_sigma*cv
# transform the MPI index from series to dataframe
MPI_agg = MPI_agg.to_frame(name='mpi_index')
df_MPI_index = df_MPI.merge(MPI_agg, left_index=True, right_index=True)
df_MPI_index = df_MPI_index.round(3)
print("This is the matrix with the composite index computed with MPI normalization and MPI aggregation")
# export csv
df_MPI_index.to_csv('df_mpi_index.csv')
# prints first rows
df_MPI_index.head()
| [
"noreply@github.com"
] | noreply@github.com |
d038c06c6c4f20653a17f5fb33b4d16d637fb9be | 66acbd1f601e00f311c53a9ce0659e5b56c87fef | /pre_analysis/observable_analysis/topc4mcintervalanalyser.py | 508e973104b53d9e4245701856db48d1f55c9b6c | [
"MIT"
] | permissive | hmvege/LatticeAnalyser | fad3d832190f4903642a588ed018f6cca3858193 | 6c3e69ab7af893f23934d1c3ce8355ac7514c0fe | refs/heads/master | 2021-05-25T11:46:30.278709 | 2019-04-11T14:14:23 | 2019-04-11T14:14:23 | 127,303,453 | 0 | 1 | null | 2018-10-12T21:09:58 | 2018-03-29T14:29:14 | Python | UTF-8 | Python | false | false | 601 | py | from pre_analysis.core.flowanalyser import FlowAnalyser
class Topc4MCIntervalAnalyser(FlowAnalyser):
"""Class for topological charge with quartic topological charge."""
observable_name = r"$\langle Q^4 \rangle$"
observable_name_compact = "topc4MC"
x_label = r"$\sqrt{8t_{f}}$ [fm]"
y_label = r"$\langle Q^4 \rangle$"
def __init__(self, *args, **kwargs):
super(Topc4MCIntervalAnalyser, self).__init__(*args, **kwargs)
self.y **= 4
def main():
exit("Module Topc4MCIntervalAnalyser not intended for standalone usage.")
if __name__ == '__main__':
main() | [
"hmvege@ulrik.uio.no"
] | hmvege@ulrik.uio.no |
1253730c187a79635a06e3f974cfc757d73a20e1 | 909323b8a1baca96711a0b5285b5af233dda70ae | /01 PY0101EN Python Basics for Data Science/Module 2 - Python Data Structures/Dictionaries.py | d10c9175548d0ef9e261c71bd20ee73ffbb837f1 | [] | no_license | ekmanch/IBM-Python-Data-Science | 629084c104294c0dcd67a592f514c5b160859fcb | 813a8bde137b90b4dfd74387b263987c06e08ba7 | refs/heads/main | 2023-03-28T06:08:34.161225 | 2021-03-20T18:37:32 | 2021-03-20T18:37:32 | 330,181,878 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,130 | py | ######################################
# Dictionaries Lab 1
######################################
# You will need the dictionary D:
from typing import Dict
D={'a':0,'b':1,'c':2}
# Find the value for the key 'a'
print(D['a'])
# Find the keys of the dictionary D
print(D.keys())
######################################
# Dictionaries Quiz
######################################
# Question 1
# Consider the following dictionary:
D={'a':0,'b':1,'c':2}
# What is the result of the following: D.values()
# Answer prediction:
# answer will be a list of values in D, i.e. 0, 1 and 2
print(D.values())
# Question 2
# Consider the following dictionary:
D={'a':0,'b':1,'c':2}
# What is the output of the following D['b'] :
# Answer prediction:
# answer will be the value of key 'b', i.e. 1
print(D['b'])
######################################
# Dictionaries Lab 2
######################################
# Question 1
# You will need this dictionary for the next two questions:
soundtrack_dic = {"The Bodyguard":"1992", "Saturday Night Fever":"1977"}
soundtrack_dic
# a) In the dictionary soundtrack_dic what are the keys ?
# Answer prediction
# "The Bodyguard" and "Saturday Night Fever"
print(soundtrack_dic.keys())
# b) In the dictionary soundtrack_dic what are the values ?
# Answer prediction
# "1992" and "1977"
print(soundtrack_dic.values())
# Question 2
# You will need this dictionary for the following questions:
# The Albums Back in Black, The Bodyguard and Thriller have the following
# music recording sales in millions 50, 50 and 65 respectively:
# a) Create a dictionary album_sales_dict where the keys are
# the album name and the sales in millions are the values.
album_sales_dict = {"Back in Black":50, "The Bodyguard":50, "Thriller":65}
print(album_sales_dict)
# b) Use the dictionary to find the total sales of Thriller:
print(album_sales_dict["Thriller"])
# c) Find the names of the albums from the dictionary using the method keys():
print(album_sales_dict.keys())
# d) Find the values of the recording sales from the dictionary
# using the method values:
print(album_sales_dict.values()) | [
"christian.ekman89@gmail.com"
] | christian.ekman89@gmail.com |
cb6bf07d43b3538bd0f67d1273b2348a33926802 | 0d419daf3514e06d18107ae834ff72ae6535c04d | /pilot/pilot/controllers/error.py | 45bb750651d3348bf28be9a0cbf1359bfa44a16b | [] | no_license | grid4hpc/pilot | f7efc2a4786c8146096b614c6f81f04283bf83e8 | 183c8cf9b71acaa6f55565091f76afc9b30ffe52 | refs/heads/master | 2016-09-06T02:30:11.126632 | 2013-10-29T16:10:49 | 2013-10-29T16:10:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,629 | py | import cgi
from paste.urlparser import PkgResourcesParser
from pylons import request
from pylons.controllers.util import forward
from pylons.middleware import error_document_template
from webhelpers.html.builder import literal
from pilot.lib.base import BaseController
class ErrorController(BaseController):
"""Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
def document(self):
"""Render the error document"""
resp = request.environ.get('pylons.original_response')
content = literal(resp.body) or cgi.escape(request.GET.get('message', ''))
page = error_document_template % \
dict(prefix=request.environ.get('SCRIPT_NAME', ''),
code=cgi.escape(request.GET.get('code', str(resp.status_int))),
message=content)
return page
def img(self, id):
"""Serve Pylons' stock images"""
return self._serve_file('/'.join(['media/img', id]))
def style(self, id):
"""Serve Pylons' stock stylesheets"""
return self._serve_file('/'.join(['media/style', id]))
def _serve_file(self, path):
"""Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
"""
request.environ['PATH_INFO'] = '/%s' % path
return forward(PkgResourcesParser('pylons', 'pylons'))
| [
"shamardin@gmail.com"
] | shamardin@gmail.com |
d710f047198a283c91cddbb86877ac0c4ecc62e7 | 4b670f0efaadb56b1770bc387e07a6d1439ab1b6 | /GUI/PlottingWidgets.py | cc8685b662ee49a0ffb775f3eac16877a05bb679 | [] | no_license | xyt556/Scattering_Analysis | f2e8e18fb353a36b73ec7792428c3ac9e91152de | 4a98b34ec8ac122da5077e08432782c591857f56 | refs/heads/master | 2021-01-21T15:03:59.213412 | 2017-04-14T07:55:27 | 2017-04-14T07:55:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,376 | py | import matplotlib as mpl
mpl.use("Qt5Agg")
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib import lines
import logging
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets, Qt
from ..SWAS_Sequence import SWAS_Sequence
from ..SAS import SmallAngleScattering
from ..WAS import WideAngleScattering
from ..SWAS import SWAS
from ..SWAS_Sequence import SWAS_Sequence
plot_types = ('SAS','WAS','SWAS','SAS_Fit','WAS_Fit','SWAS_Fit')
class CentralWidget(QtWidgets.QWidget):
"""Class used for the tree widget panel to store the scattering object.
Based on whether the object is SAS WAS SWAS or a sequence the object
will then decide how to provide the data.
"""
def __init__(self, parent, scattering_object = None, **kwargs):
if kwargs.get('verbose', False):
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
super(CentralWidget, self).__init__(parent, **kwargs)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.setContentsMargins(0,0,0,0)
self.stackedWidget = QtWidgets.QStackedWidget(self)
self.toolBar = QtWidgets.QToolBar(self)
self.selectPlot = QtWidgets.QComboBox(self.toolBar)
self.selectPlot.activated.connect(self.selected_plot)
self.toolBar.addWidget(self.selectPlot)
#self.selectPlot.insertItem(0,'Test1')
#self.selectPlot.insertItem(1,'Test2')
self.layout.addWidget(self.stackedWidget)
self.layout.addWidget(self.toolBar)
self.setLayout(self.layout)
def add_plot(self, plotData, plotLayout = 'individual', **kwargs):
"""add_plot is used to add a new widget to the stacked widget along
with a new entry in the dropdown list. It decides which plotting widget
to use based on the object to be plotted and how it should be displayed.
Args:
plotData(dict): contains all the information relative to the object
to plot:
'Object': one of the existing scattering objects
'SAS': the list of SAS objects which needs to be plotted. (Valid
only if the objects is a SWAS or SWAS_Sequence object)
'WAS': the list of WAS objects which needs to be plotted. (Valid
only if the objects is a SWAS or SWAS_Sequence object)
If WAS and SAS are both in the dictionary but are different then the
plot will not be done and ean error message will be printed
"""
if isinstance(plotData['Object'], SWAS_Sequence):
if plotData['SAS'] or plotData['WAS']:
#Plot both the SAS and WAS patterns
if plotData['SAS'] == plotData['WAS']:
if len(plotData['SAS']) == 1:
self.logging.debug('plotting single SWAS')
currPos = self.stackedWidget.addWidget(DoublePlot(self.stackedWidget, plotData = plotData['Object'][plotData['SAS'][0]],\
plot_type = 'SWAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'][plotData['SAS'][0]].sampleName))
else:
self.logging.debug('plotting multiple SWAS')
currPos = self.stackedWidget.addWidget(MultiplePlot(self.stackedWidget, plotData = plotData, plot_type = 'SWAS_Sequence'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].sampleName))
#Plot only SAS data
elif not plotData['WAS']:
if len(plotData['SAS']) == 1:
self.logging.debug('plotting single SAS')
currPos = self.stackedWidget.addWidget(SinglePlot(self.stackedWidget, plotData = plotData['Object'][plotData['SAS'][0]].SAS,\
plot_type = 'SAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'][plotData['SAS'][0]].SAS.sampleName))
else:
self.logging.debug('plotting multiple SAS')
currPos = self.stackedWidget.addWidget(MultiplePlot(self.stackedWidget, plotData = plotData, plot_type = 'SAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].sampleName))
#Plot only WAS data
elif not plotData['SAS']:
if len(plotData['WAS']) == 1:
self.logging.debug('plotting single WAS')
currPos = self.stackedWidget.addWidget(SinglePlot(self.stackedWidget, plotData = plotData['Object'][plotData['WAS'][0]].SAS,\
plot_type = 'WAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'][plotData['WAS'][0]].WAS.sampleName))
else:
self.logging.debug('plotting multiple WAS')
currPos = self.stackedWidget.addWidget(MultiplePlot(self.stackedWidget, plotData = plotData, plot_type = 'WAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].sampleName))
else:
self.logging.error('Cannot understend how/what to plot with the given selection')
return
elif isinstance(plotData['Object'], SWAS):
if plotData['SAS'] and plotData['WAS']:
self.logging.debug('plotting SWAS')
currPos = self.stackedWidget.addWidget(DoublePlot(self.stackedWidget, plotData = plotData['Object'],\
plot_type = 'SWAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].sampleName))
elif plotData['SAS']:
self.logging.debug('plotting single SAS')
currPos = self.stackedWidget.addWidget(SinglePlot(self.stackedWidget, plotData = plotData['Object'].SAS,\
plot_type = 'SAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].SAS.sampleName))
elif plotData['WAS']:
self.logging.debug('plotting single WAS')
currPos = self.stackedWidget.addWidget(SinglePlot(self.stackedWidget, plotData = plotData['Object'].WAS,\
plot_type = 'WAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].WAS.sampleName))
elif isinstance(plotData['Object'], SAS):
self.logging.debug('plotting single SAS')
currPos = self.stackedWidget.addWidget(SinglePlot(self.stackedWidget, plotData = plotData['Object'].SAS,\
plot_type = 'SAS'))
elif isinstance(plotData['Object'], WAS):
self.logging.debug('plotting single WAS')
currPos = self.stackedWidget.addWidget(SinglePlot(self.stackedWidget, plotData = plotData['Object'].WAS,\
plot_type = 'WAS'))
else:
self.logging.info('No recognizable element selected')
return
def add_fit(self, plotData, fitType, **kwargs ):
'''add_fit is used ot add a new plot containing the fitted data. In this case
all SAS plots are double (the data and the distribution).
Args:
plotData(dict): contains all the information relative to the object for which
the fitting was done.
fitType(string): the name of the fitting method which shoudl be plotted
'''
if isinstance(plotData['Object'], SWAS_Sequence):
if not plotData['WAS']:
if len(plotData['SAS']) == 1:
self.logging.debug('plotting single SAS')
currPos = self.stackedWidget.addWidget(FitPlot(self.stackedWidget, plotData = plotData['Object'][plotData['SAS'][0]].SAS,\
plot_type = 'SAS', fitType = fitType))
self.selectPlot.insertItem(currPos, '{}_fit'.format(plotData['Object'][plotData['SAS'][0]].SAS.sampleName))
else:
self.logging.debug('plotting multiple SAS')
currPos = self.stackedWidget.addWidget(MultiplePlot(self.stackedWidget, plotData = plotData, plot_type = 'SAS', fitType = fitType))
self.selectPlot.insertItem(currPos, '{}_fit'.format(plotData['Object'].sampleName))
#Plot only WAS data
elif not plotData['SAS']:
if len(plotData['WAS']) == 1:
self.logging.debug('plotting single WAS')
currPos = self.stackedWidget.addWidget(FitPlot(self.stackedWidget, plotData = plotData['Object'][plotData['WAS'][0]].SAS,\
plot_type = 'WAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'][plotData['WAS'][0]].WAS.sampleName))
else:
self.logging.debug('plotting multiple WAS')
currPos = self.stackedWidget.addWidget(MultiplePlot(self.stackedWidget, plotData = plotData, plot_type = 'WAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].sampleName))
else:
self.logging.error('Cannot understend how/what to plot with the given selection')
return
elif isinstance(plotData['Object'], SWAS):
if not plotData['WAS']:
currPos = self.stackedWidget.addWidget(FitPlot(self.stackedWidget, plotData = plotData['Object'].SAS,\
plot_type = 'SAS', fitType = fitType))
self.selectPlot.insertItem(currPos, '{}_fit'.format(plotData['Object'].SAS.sampleName))
elif not plotData['SAS']:
currPos = self.stackedWidget.addWidget(FitPlot(self.stackedWidget, plotData = plotData['Object'].SAS,\
plot_type = 'WAS'))
self.selectPlot.insertItem(currPos, '{}_plot'.format(plotData['Object'].WAS.sampleName))
elif isinstance(plotData['Object'], SAS):
currPos = self.stackedWidget.addWidget(FitPlot(self.stackedWidget, plotData = plotData['Object'],\
plot_type = 'SAS', fitType = fitType))
self.selectPlot.insertItem(currPos, '{}_fit'.format(plotData['Object'].sampleName))
elif isinstance(plotData['Object'], WAS):
currPos = self.stackedWidget.addWidget(FitPlot(self.stackedWidget, plotData = plotData['Object'],\
plot_type = 'WAS', fitType = fitType))
self.selectPlot.insertItem(currPos, '{}_fit'.format(plotData['Object'].sampleName))
else:
self.logging.info('No recognizable element selected')
return
def selected_plot(self):
selIndx = self.selectPlot.currentIndex()
self.stackedWidget.setCurrentIndex(selIndx)
self.logging.debug('changed index to {}'.format(selIndx))
class SingleCanvasPlot(FigureCanvas):
'''Drawing Canvas for plotting one axes on a single figure. Can be used to plot
SAS or WAS data
'''
def __init__(self, parent=None, figsize = (5,4), **kwargs):
#firstAx = [[0,1],[0,1]], secondAx = [[0,1],[1,2]] ):
'''Initiates the canvas.
Args:
parent (QtWidget): the parent widget to which the canvas is associated
Defauts to None
figsize (list of int): the size of the figure in the form: (width,height).
This will be used ot create the figure. Defaults to (5,4)
'''
if kwargs.get('verbose', False):
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
self.fig = Figure(figsize=figsize)
self.axes = self.fig.add_subplot(111)
super(SingleCanvasPlot, self).__init__(self.fig)
#FigureCanvas.__init__(self, fig)
self.setParent(parent)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
self.updateGeometry()
class SinglePlot(QtWidgets.QWidget):
'''Wrapping class used to place the single canvas plot in a widget
'''
def __init__(self, parent=None,**kwargs):
if kwargs.get('verbose', False):
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
super(SinglePlot, self).__init__(parent)
self.scatteringObject = kwargs.get('plotData', None)
self.plot_layout = QtWidgets.QVBoxLayout(self)
self.plot_canvas = SingleCanvasPlot(self, figsize = (5,4))
self.navi_toolbar = NavigationToolbar(self.plot_canvas, self)
self.plot_layout.addWidget(self.plot_canvas) # the matplotlib canvas
self.plot_layout.addWidget(self.navi_toolbar)
self.setLayout(self.plot_layout)
#If the widget is created with an Onject than it can directly be plotted
if self.scatteringObject is not None:
self.plot_data()
def plot_data(self):
self.scatteringObject.plot_data(ax = self.plot_canvas.axes)
def remove_line(self, line):
'''remove_line is used ot remove a particular object from the axes given
it's handle. Useful to remove vertical lines drawn to provide visial aid
for fitting limits
'''
if isinstance(line, lines.Line2D):
if line in self.plot_canvas.axes.lines:
self.plot_canvas.axes.lines.remove(line)
else:
self.logging.error('{} is not in the axes'.format(line))
def axvline(self,x):
'''axvline simply calls axvline on the widgets axes
'''
self.plot_canvas.axes.axvline(x, color = 'k', linewidth = 2)
def cla(self):
'''cla cleans the axes in the widget
'''
self.plot_canvas.axes.cla()
def redraw(self):
'''Redraws the canvas in case something was added or removed from it
'''
self.plot_canvas.draw()
def ax_x_lim(self):
'''returns the x limits of the canvas' axes
'''
return self.plot_canvas.axes.get_xlim()
def ax_y_lim(self):
'''returns the y limits of the canvas' axes
'''
return self.plot_canvas.axes.get_ylim()
class DoubleCanvasPlot(FigureCanvas):
'''Drawing Canvas for potting two axis on the same figure. Can be used to plot
SAS and WAS data at the same time or the scattering curve plus a plot of the fitting
(e.g. a SAS curve and the distribution of sizes of the fit)
'''
def __init__(self, parent=None, figsize = (5,4), rows = 1, cols = 2, rowSpan = 1, colSpan = 1, **kwargs):
#firstAx = [[0,1],[0,1]], secondAx = [[0,1],[1,2]] ):
'''Initiates the canvas.
Args:
parent (QtWidget): the parent widget to which the canvas is associated
Defauts to None
figsize (list of int): the size of the figure in the form: (width,height).
This will be used ot create the figure. Defaults to (5,4)
rows (int): the number of rows in which the two axis should be disposed
Defaults to 1
cols (int): the number of columns over which the axis should be disposed.
Defaults to 2.
rowSpan (int): number of rows spanned by the first axis. The remaining
rows are attributed to the second axis. Defaults to 1.
colSpan (int): number of cols spanned by the first axis. The remaining
cols are attributed to the second axis. Defaults to 1
'''
if kwargs.get('verbose', False):
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
firstAx = [[0,rowSpan],[0,colSpan]]
if rowSpan == rows:
secondAx = [[0,rowSpan]]
else:
secondAx = [[rowSpan,rows]]
if colSpan == cols:
secondAx.append([0,colSpan])
else:
secondAx.append([colSpan,cols])
self.fig = Figure(figsize=figsize)
gs = mpl.gridspec.GridSpec(rows,cols)
self.axes1 = self.fig.add_subplot(gs[slice(*firstAx[0]),slice(*firstAx[1])])
self.axes2 = self.fig.add_subplot(gs[slice(*secondAx[0]),slice(*secondAx[1])])
super(DoubleCanvasPlot, self).__init__(self.fig)
#FigureCanvas.__init__(self, fig)
self.setParent(parent)
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
self.updateGeometry()
class DoublePlot(QtWidgets.QWidget):
'''Wrapper class to place the DoubleCanvasPlot in a widget
'''
def __init__(self, parent=None, **kwargs):
if kwargs.get('verbose', False):
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
super(DoublePlot, self).__init__(parent)
self.scatteringObject = kwargs.get('plotData', None)
self.plot_layout = QtWidgets.QVBoxLayout(self)
self.plot_canvas = DoubleCanvasPlot(self, figsize = (5,4))
self.navi_toolbar = NavigationToolbar(self.plot_canvas, self)
self.plot_layout.addWidget(self.plot_canvas) # the matplotlib canvas
self.plot_layout.addWidget(self.navi_toolbar)
self.setLayout(self.plot_layout)
if self.scatteringObject is not None and fitType is not None:
self.plot_fit()
elif self.scatteringObject is not None:
self.plot_data()
def cla(self):
'''Clears bothof the axis in the figure
'''
self.plot_canvas.axes1.cla()
self.plot_canvas.axes2.cla()
def remove_line(self, line):
'''searches both axes to see if the given line is in either.
If it is it is removed
'''
if isinstance(line, lines.Line2D):
if line in self.plot_canvas.axes1.lines:
self.plot_canvas.axes1.lines.remove(line)
elif line in self.plot_canvas.axes2.lines:
self.plot_canvas.axes2.lines.remove(line)
else:
self.logging.error('{} was not found in either axis'.format(line))
def plot_data(self):
'''Uses the plotting function of the scattering object to plot the data on the two
available axis
'''
self.scatteringObject.plot_data(axs = [self.plot_canvas.axes1,self.plot_canvas.axes2])
def plot_fit(self):
'''Uses the fitting plot function of the scattering object to plot the data on the
two available axis
'''
self.scatteringObject.plot_fit()
def redraw(self):
'''Redraws the canvas after lines have been added or removed from the figure
'''
self.plot_canvas.draw()
class MultiplePlot(QtWidgets.QWidget):
'''Widget used to plot a sequence of scattering objects. It is composed of a plotting widget,
two buttons to move the current position forward and backwards by one, and a slider to select
any avalable fitting
'''
def __init__(self, parent=None, **kwargs):
if kwargs.get('verbose', False):
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else:
self.logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
super(MultiplePlot, self).__init__(parent)
#Setup the data for the plotting
self.plotData = kwargs.get('plotData', None)
if self.plotData is not None:
self.scatteringObject = self.plotData['Object']
self.plotType = kwargs.get('plot_type', 'SAS')
self.currPlot = 0
if self.plotType in ('SAS','WAS'):
self.plotWidget = SinglePlot(self)
else:
self.plotWidget = DoublePlot(self)
#Create the widget in which the data is going to be plotted
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.setContentsMargins(0,0,0,0)
self.layout.setSpacing(0)
self.scroll_toolBar = QtWidgets.QToolBar(self)
self.scrollBar = QtWidgets.QScrollBar(QtCore.Qt.Horizontal,self.scroll_toolBar)
self.scrollBar.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,QtWidgets.QSizePolicy.MinimumExpanding)
self.scrollBar.sliderMoved.connect(self.sliderMoving)
self.scrollBar.sliderReleased.connect(self.sliderChanged)
self.text_toolBar = QtWidgets.QToolBar(self)
self.text_toolBar_layout = QtWidgets.QHBoxLayout(self.text_toolBar)
spacerL = QtWidgets.QWidget()
spacerR = QtWidgets.QWidget()
spacerL.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,QtWidgets.QSizePolicy.MinimumExpanding)
spacerR.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,QtWidgets.QSizePolicy.MinimumExpanding)
self.prev_button = QtWidgets.QPushButton('<<', self.text_toolBar)
self.prev_button.clicked.connect(self.prevPlot)
self.next_button = QtWidgets.QPushButton('>>', self.text_toolBar)
self.next_button.clicked.connect(self.nextPlot)
self.lineEdit = QtWidgets.QLabel(self.text_toolBar)
self.lineEdit.setText('LineEditText')
self.scroll_toolBar.addWidget(self.scrollBar)
self.text_toolBar.addWidget(spacerL)
self.text_toolBar.addWidget(self.prev_button)
self.text_toolBar.addWidget(self.lineEdit)
self.text_toolBar.addWidget(self.next_button)
self.text_toolBar.addWidget(spacerR)
self.text_toolBar_layout.setAlignment(self.lineEdit, QtCore.Qt.AlignHCenter)
self.layout.addWidget(self.scroll_toolBar)
self.layout.addWidget(self.text_toolBar)
self.layout.addWidget(self.plotWidget)
self.numb_curves = 0
if self.scatteringObject is not None:
self.InitializeValues()
def InitializeValues(self):
'''InitializeValues sets all the variables needed to move between the available
data sets and visialize them
'''
#self.numb_curves = self.scatteringObject.size
if self.plotType == 'SAS':
self.selectedPlots = self.plotData['SAS']
self.numbCurves = len(self.selectedPlots)
elif self.plotType == 'WAS':
self.selectedPlots = self.plotData['WAS']
self.numbCurves = len(self.selectedPlots)
else:
self.selectedPlots = self.plotData['SAS']
self.numbCurves = len(self.selectedPlots)
self.typeObject = self.scatteringObject.avlbCurves
self.lineEdit.setText('1/{}'.format(self.numbCurves+1))
self.scrollBar.setMinimum(1)
self.scrollBar.setMaximum(self.numbCurves)
self.scrollBar.setValue(1)
if not isinstance(self.scatteringObject,SWAS_Sequence):
self.text_toolBar.hide()
self.scroll_toolBar.hide()
if self.fitType is None:
self.plot_data()
else:
self.plot_fit()
def set_data(self, scatteringObj, selectedPlots, plotType):
'''set_data is a quick setter function to set the scattering object,
the data to plot and the type of plot and fit
'''
self.scatteringObject = scatteringObj
self.selectedPlots = selectedPlots
self.plotType = plotType
def plot_data(self, **kwargs):
'''plot_data used the plotting functions of the object to plot
the data on the available axis after clearing them
'''
#print self.scatteringObject[self.currPlot].SAS.q
self.plotWidget.cla()
if self.plotType == 'SAS':
if isinstance(self.scatteringObject,SmallAngleScattering):
self.scatteringObject.plot_data(ax = self.plotWidget.plot_canvas.axes, **kwargs)
if isinstance(self.scatteringObject,SWAS):
self.scatteringObject.SAS.plot_data(ax = self.plotWidget.plot_canvas.axes, **kwargs)
if isinstance(self.scatteringObject,SWAS_Sequence):
self.scatteringObject[self.selectedPlots[self.currPlot]].SAS.plot_data(ax = self.plotWidget.plot_canvas.axes, **kwargs)
else:
self.logging.error('Cannot plot Small angles for the selected scattering data')
if self.plotType == 'WAS':
if isinstance(self.scatteringObject,WideAngleScattering):
self.scatteringObject.plot_data(ax = self.plotWidget.plot_canvas.axes, **kwargs)
if isinstance(self.scatteringObject,SWAS):
self.scatteringObject.WAS.plot_data(ax = self.plotWidget.plot_canvas.axes, **kwargs)
if isinstance(self.scatteringObject,SWAS_Sequence):
self.scatteringObject[self.selectedPlots[self.currPlot]].WAS.plot_data(ax = self.plotWidget.plot_canvas.axes, **kwargs)
else:
self.logging('Cannot plot wide angles for the selected scattering data')
if self.plotType == 'SWAS_Sequence':
#print 'plotting ', self.currPlot, 'which is object ',self.selectedPlots[self.currPlot]
#print 'of ',self.scatteringObject, ' ', self.scatteringObject[self.selectedPlots[self.currPlot]].SAS.q
if isinstance(self.scatteringObject,SWAS_Sequence):
self.scatteringObject[self.selectedPlots[self.currPlot]].plot_data(axs = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2],\
fig = self.plotWidget.plot_canvas.fig,**kwargs)
self.plotWidget.redraw()
def nextPlot(self):
'''Sets the current plot to one after the current one. If the last plot is
currently being shown it does nothing.
'''
if (self.currPlot+1)<self.numbCurves:
self.currPlot += 1
self.scrollBar.setValue(self.currPlot+1)
self.lineEdit.setText('{}/{}'.format(self.currPlot+1, self.numbCurves))
self.plot_data()
def prevPlot(self):
'''Sets the current plot to one before the current one. If the first plot is
currently being shown it does nothing.
'''
if (self.currPlot-1) >= 0:
self.currPlot -= 1
self.scrollBar.setValue(self.currPlot+1)
self.lineEdit.setText('{}/{}'.format(self.currPlot+1, self.numbCurves))
self.plot_data()
def sliderMoving(self):
'''Updates in real time the number being shown on the text as the slider is moved
'''
self.lineEdit.setText('{}/{}'.format(self.scrollBar.value(), self.numbCurves))
def sliderChanged(self):
'''Updates the current plot based on the selection done with the slider
'''
self.currPlot = self.scrollBar.value()-1
self.plot_data()
def ax_x_lim(self):
'''Returns the x limits of the current axes/axis
'''
if isinstance(self.plotWidget, DoublePlot):
return [self.plotWidget.plot_canvas.axes1.get_xlim(),self.plotWidget.plot_canvas.axes1.get_xlim()]
else:
return self.plotWidget.plot_canvas.axes.get_xlim()
def axvline(self,x):
if isinstance(self.plotWidget, SinglePlot):
self.plotWidget.axvline(x)
def remove_line(self, line):
if isinstance(line,lines.Line2D):
self.plotWidget.remove_line(line)
else:
self.logging.error('{} is not a matplotlib 2D line'.format(line))
class FitPlot(QtWidgets.QWidget):
def __init__(self, parent=None, **kwargs):
super(FitPlot, self).__init__(parent)
self.scatteringObject = kwargs.get('plotData', None)
self.fitType = kwargs.get('fitType',None)
self.plot_layout = QtWidgets.QVBoxLayout(self)
self.plot_canvas = DoubleCanvasPlot(self, figsize = (5,4))
self.navi_toolbar = NavigationToolbar(self.plot_canvas, self)
self.plot_layout.addWidget(self.plot_canvas) # the matplotlib canvas
self.plot_layout.addWidget(self.navi_toolbar)
self.setLayout(self.plot_layout)
if self.scatteringObject is not None and self.fitType is not None:
self.plot_fit()
elif self.scatteringObject is not None:
self.plot_data()
def cla(self):
self.plot_canvas.axes1.cla()
self.plot_canvas.axes2.cla()
def remove_line(self, line):
if isinstance(line, lines.Line2D):
if line in self.plot_canvas.axes1.lines:
self.plot_canvas.axes1.lines.remove(line)
elif line in self.plot_canvas.axes2.lines:
self.plot_canvas.axes2.lines.remove(line)
else:
print line, ' is not in either axes'
def plot_data(self):
self.scatteringObject.plot_fit(self.fitType, axs = [self.plot_canvas.axes1,self.plot_canvas.axes2])
def plot_fit(self):
self.scatteringObject.plot_fit(self.fitType, axs = [self.plot_canvas.axes1,self.plot_canvas.axes2])
def redraw(self):
self.plot_canvas.draw()
class MultipleFitPlot(MultiplePlot):
def __init__(self, parent=None, **kwargs):
super(MultipleFitPlot, self).__init__(parent, **kwargs)
self.fitType = kwargs.get('fitType')
self.plotWidget = DoublePlot(self)
def set_data(self, scatteringObj, selectedPlots, plotType, fitType):
'''set_data is a quick setter function to set the scattering object,
the data to plot, the type of plot and fit
'''
self.scatteringObject = scatteringObj
self.selectedPlots = selectedPlots
self.plotType = plotType
self.fitType = fitType
def plot_data(self, **kwargs):
'''plot_data used the plotting functions of the object to plot
the data on the available axis after clearing them
'''
#print self.scatteringObject[self.currPlot].SAS.q
self.plotWidget.cla()
if self.plotType == 'SAS':
if isinstance(self.scatteringObject,SmallAngleScattering):
self.scatteringObject.plot_fit(self.fitType, ax = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2], **kwargs)
if isinstance(self.scatteringObject,SWAS):
self.scatteringObject.SAS.plot_fit(self.fitType, ax = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2], **kwargs)
if isinstance(self.scatteringObject,SWAS_Sequence):
self.scatteringObject[self.selectedPlots[self.currPlot]].SAS.plot_fit(self.fitType, ax = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2],\
**kwargs)
else:
self.logging.error('Cannot plot Small angles for the selected scattering data')
if self.plotType == 'WAS':
if isinstance(self.scatteringObject,WideAngleScattering):
self.scatteringObject.plot_fit(self.fitType, ax = self.plotWidget.plot_canvas.axes, **kwargs)
if isinstance(self.scatteringObject,SWAS):
self.scatteringObject.WAS.plot_fit(self.fitType, ax = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2], **kwargs)
if isinstance(self.scatteringObject,SWAS_Sequence):
self.scatteringObject[self.selectedPlots[self.currPlot]].WAS.plot_fit(self.fitType, ax = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2], **kwargs)
else:
self.logging('Cannot plot wide angles for the selected scattering data')
if self.plotType == 'SWAS_Sequence':
#print 'plotting ', self.currPlot, 'which is object ',self.selectedPlots[self.currPlot]
#print 'of ',self.scatteringObject, ' ', self.scatteringObject[self.selectedPlots[self.currPlot]].SAS.q
if isinstance(self.scatteringObject,SWAS_Sequence):
self.scatteringObject[self.selectedPlots[self.currPlot]].plot_fit(self.fitType, axs = [self.plotWidget.plot_canvas.axes1, self.plotWidget.plot_canvas.axes2],\
fig = self.plotWidget.plot_canvas.fig,**kwargs)
self.plotWidget.redraw()
| [
"Castro@Nicolos-MacBook-Pro-2.local"
] | Castro@Nicolos-MacBook-Pro-2.local |
7f0200d78f46ea457f3b02e334ed5ce822ffc726 | afc3dddd1c9c7c05436af5c8b41b3215d075905a | /Intermediate 2016/increment.py | 4591045221f591a06f4be5a2acfd6ea0e28512ec | [] | no_license | JohnathanLP/minecraftpython | 9f5b668ea8d21ee78e1341f2099c2a4ff642a602 | 76d91e95128bfabb1f03f80c5a4f7f16899a270d | refs/heads/master | 2021-01-17T13:02:54.784724 | 2016-07-15T19:06:56 | 2016-07-15T19:06:56 | 56,737,802 | 0 | 0 | null | 2016-07-15T19:06:58 | 2016-04-21T02:56:01 | Python | UTF-8 | Python | false | false | 569 | py | import os
import time
fout = open("timerecord.txt",'w')
fout.truncate()
try:
limit = input("How long do you want to count? ")
seconds = 0.00
#take it to the limit
while seconds <= limit:
os.system('clear')
print seconds
seconds += .01
time.sleep(.01)
#one more time
print "All done!"
fout.write("Total time passed: ")
s = str(seconds)
fout.write(s)
fout.close()
except KeyboardInterrupt:
fout.write("Total time passed: ")
s = str(seconds)
fout.write(s)
fout.close()
| [
"johnathanlpowell@gmail.com"
] | johnathanlpowell@gmail.com |
35214fa22f54ec813cf37fce5d8abb7862d782ca | eb397d0383138412c64788834e4073e650b26212 | /game.py | f4333c03688811d5f1c69dd1c6f78b05e20c6816 | [] | no_license | jerseymec/Hilo | e26db92ebf99c915b1ad1f498c0ce0b252230811 | 236c98ec1423b13f6c3a72d1768d892bd10d7851 | refs/heads/master | 2020-09-22T05:34:36.101803 | 2019-11-30T21:57:01 | 2019-11-30T21:57:01 | 225,068,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | import random
while True:
rand_num = random.randint(1, 193)
print(rand_num)
if rand_num < 40:
break
| [
"meldoncharles@hotmail.com"
] | meldoncharles@hotmail.com |
28bfa6b818b26e8d7e4657da0f4649ecd145616e | 120673df478f641a7a0824a074b4b26eeea2846a | /duplicates.py | 95b390554f7bf9a69d11c8ab57c2255be041b061 | [] | no_license | nicholsl/PythonProjects | 413034434e8084a62326a31ef70f09765f180b4e | c2c76409ab0844d67257b980214158dbc83924de | refs/heads/master | 2020-04-26T11:24:33.694907 | 2019-01-11T04:15:11 | 2019-01-11T04:15:11 | 173,515,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | ## This program runs a test of knowledge
# First get the test questions
# Later this will be modified to use file io.
def get_questions():
# notice how the data is stored as a list of lists
return [["What color is the daytime sky on a clear day? ", "blue"],
["What is the answer to life, the universe and everything? ", "42"],
["What is a three letter word for mouse trap? ", "cat"],
["What noise does a truly advanced machine make?", "ping"]]
# This will test a single question
# it takes a single question in
# it returns True if the user typed the correct answer, otherwise False
def check_question(question_and_answer):
# extract the question and the answer from the list
# This function takes a list with two elements, a question and an answer.
question = question_and_answer[0]
answer = question_and_answer[1]
# give the question to the user
given_answer = input(question)
# compare the user's answer to the tester's answer
if answer == given_answer:
print("Correct")
return True
else:
print("Incorrect, correct was:", answer)
return False
# This will run through all the questions
def run_test(questions):
if len(questions) == 0:
print("No questions were given.")
# the return exits the function
return
index = 0
right = 0
while index < len(questions):
# Check the question
#Note that this is extracting a question and answer list from the list of lists.
if check_question(questions[index]):
right = right + 1
# go to the next question
index = index + 1
# notice the order of the computation, first multiply, then divide
print("You got", right * 100 / len(questions),\
"% right out of", len(questions))
# now let's get the questions from the get_questions function, and
# send the returned list of lists as an argument to the run_test function.
def menu():
menu_item = 0
while menu_item !=9:
print("(1) Take the test")
print("(2) View the questions and answers")
print("(9) Quit")
menu_item = int(input("Choose an option using 1, 2, or 9"))
if menu_item == 1:
run_test(get_questions())
elif menu_item == 2:
taco = get_questions()
for item in taco:
print("Question: ",item[0])
print("Answer: ",item[1])
print("You have quit the program.")
menu()
| [
"yumetaki@gmail.com"
] | yumetaki@gmail.com |
bbba29c77ed91e9e1f9c2b9e3b0adf06c9fb0fe1 | db35888bbcacc90e923fdb10312df82f03c40c60 | /WHILELOOPchallenges03.py | 73452b60c8cd7e9c0c4d87e8844f7b04a80decb2 | [] | no_license | DiogoCondecoOphs/Y11-Python- | 45b47286439865e6201114d43e4c72c5f12d77fc | d1e45ad026fd2d9f0cd36a76db51ed126a902118 | refs/heads/master | 2023-04-13T08:41:25.820805 | 2021-04-23T14:50:10 | 2021-04-23T14:50:10 | 296,599,361 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | #WHILELOOPchallenges03
#Diogo.c
num1= int(input("please enter a number "))
total = num1
val= "y"
while val == "y":
num2 = int(input("please enter another number "))
total = total + num2
val=str(input("would you like to add another number y/n "))
print("total was",total)
| [
"noreply@github.com"
] | noreply@github.com |
a28eb576387611a80ec9b659a172f16f38ac1fc9 | 34959caee120ba712b50c52a50975ad39a1b150c | /constants.py | c804cb10e6b961cc9bc6b42aa386b140d8198aea | [] | no_license | satirmo/Team-Awsomesauce | eb2f3e585b70089497f45cbd27cac21b5771f91c | 13e0ec212bf6d66db5ffae9fc5ec4f74c0ed9e4c | refs/heads/master | 2021-01-19T17:07:03.784179 | 2017-05-08T15:42:32 | 2017-05-08T15:42:32 | 88,304,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | # Group Members : John Atti, Mark Bonadies, Tomas Carino, Aayush Shrestha, & Amanda Steidl
# Project : Final Demo : Cozmo Driving Course
# Course : CMPS 367 Robotics
# Professor : Benjamin Fine
# Date : 05.01.2016
# Main Contributor(s) of Section : Amanda Steidl
# Current File : constants.py
class CONST:
def __init__ (self):
self.MAX_LIMIT = 55
self.MIN_LIMIT = 40
self.ROAD_WIDTH = 88.9 # millimeters 3.5 inches
self.MID_WIDTH = 12.7 # millimeters .5 inches
self.COZMO_FRONT = 31.75 # millimeters 1.25
self.COZMO_LENGTH = 88.9
class decisions:
def __init__(self):
self.TURN_LEFT = 0
self.TURN_RIGHT = 1
self.TURN_OPTIONAL_LEFT = 2
self.TURN_OPTIONAL_RIGHT = 3
self.STOP_AHEAD = 4
self.COZMO_AHEAD_STOP = 5
self.COZMO_AHEAD = 6
self.WAIT = 7
self.SPEED_UPDATE = 8
self.CORRECT_LEFT = 9
self.CORRECT_RIGHT = 10
self.CONTINUE = 11
| [
"amanda.steidl@gmail.com"
] | amanda.steidl@gmail.com |
b71d0ba69122f20eb76eb0293f5134ff216d358c | 50e5d9961505b046edc6b8c210fbc17e954a4394 | /LeetCode/Dynamic Programming/Maximum Subarray.py | 9b86013a3d7768deeaaa743c7a8a4e10dfb0ff70 | [] | no_license | chai1323/Data-Structures-and-Algorithms | e1ba49080dfbc16aec7060064ed98711a602b625 | 97b9600c5b4d71bce6d8d5b1a52c99e8ff4c8d1b | refs/heads/master | 2023-05-26T13:21:09.487453 | 2021-02-11T11:32:10 | 2021-02-11T11:32:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | '''Given an integer array nums, find the contiguous subarray (containing at least one number)
which has the largest sum and return its sum.
Example:
Input: [-2,1,-3,4,-1,2,1,-5,4],
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6. '''
class Solution:
def maxSubArray(self, A: List[int]) -> int:
local_max = 0
global_max = -200000000000000000000000000000000000000000
for i in range(len(A)):
local_max = max(A[i],(A[i] + local_max))
if(local_max > global_max):
global_max = local_max
return global_max
| [
"noreply@github.com"
] | noreply@github.com |
37b0f73442e6b0db42d0419136e19faef5f2f973 | d272b041f84bbd18fd65a48b42e0158ef6cceb20 | /catch/datasets/tacaribe_mammarenavirus.py | 7f62021e43d0f87c81e26077042b3721995eee6d | [
"MIT"
] | permissive | jahanshah/catch | bbffeadd4113251cc2b2ec9893e3d014608896ce | 2fedca15f921116f580de8b2ae7ac9972932e59e | refs/heads/master | 2023-02-19T13:30:13.677960 | 2021-01-26T03:41:10 | 2021-01-26T03:41:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | """Dataset with 'Tacaribe mammarenavirus' sequences.
A dataset with 5 'Tacaribe mammarenavirus' sequences. The virus is
segmented and has 2 segments. Based on their strain and/or isolate,
these sequences were able to be grouped into 4 genomes. Many genomes
may have fewer than 2 segments.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetMultiChrom
def seq_header_to_chr(header):
import re
c = re.compile(r'\[segment (L|S)\]')
m = c.search(header)
if not m:
raise Exception("Unknown or invalid segment in header %s" % header)
seg = m.group(1)
return "segment_" + seg
def seq_header_to_genome(header):
import re
c = re.compile(r'\[genome (.+)\]')
m = c.search(header)
if not m:
raise Exception("Unknown genome in header %s" % header)
return m.group(1)
chrs = ["segment_" + seg for seg in ['L', 'S']]
ds = GenomesDatasetMultiChrom(__name__, __file__, __spec__,
chrs, seq_header_to_chr,
seq_header_to_genome=seq_header_to_genome)
ds.add_fasta_path("data/tacaribe_mammarenavirus.fasta.gz", relative=True)
sys.modules[__name__] = ds
| [
"hmetsky@gmail.com"
] | hmetsky@gmail.com |
5cab019870ce8d4033bcf29458e13fbf7f4aed13 | f85e4937fb580d082e83c606a0a58aedbdc140fe | /Pytho chan/Nigma24.py | f8b253bcd37b32ff6c6bb996f4c58779eaff9302 | [] | no_license | matteobaire/pychallenge | 81b0bd8abedbf8f549b6b78f6d2284e1c14e9aba | 9f5d3bf699ee063686978f7ba73734c5b131f036 | refs/heads/master | 2021-01-01T18:41:34.650712 | 2015-09-11T13:04:26 | 2015-09-11T13:04:26 | 42,308,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,172 | py | # Copyright for the following five classes by John Eriksson
# <http://arainyday.se/>, 2006. Originally written for the AStar
# library <http://www.pygame.org/projects/9/195/> and released into the
# public domain. Thanks a lot!
from PIL import Image
class Path:
def __init__(self, nodes, totalCost):
self.nodes = nodes
self.totalCost = totalCost
def getNodes(self):
return self.nodes
def getTotalMoveCost(self):
return self.totalCost
class Node:
def __init__(self, location, mCost, lid, parent=None):
self.location = location
self.mCost = mCost
self.parent = parent
self.score = 0
self.lid = lid
def __eq__(self, n):
if n.lid == self.lid:
return 1
else:
return 0
class AStar:
def __init__(self, maphandler):
self.mh = maphandler
def _getBestOpenNode(self):
bestNode = None
for n in self.on:
if not bestNode:
bestNode = n
elif n.score <= bestNode.score:
bestNode = n
return bestNode
def _tracePath(self, n):
nodes = []
totalCost = n.mCost
p = n.parent
nodes.insert(0, n)
while True:
if p.parent is None:
break
nodes.insert(0, p)
p = p.parent
return Path(nodes, totalCost)
def _handleNode(self, node, end):
i = self.o.index(node.lid)
self.on.pop(i)
self.o.pop(i)
self.c.append(node.lid)
nodes = self.mh.getAdjacentNodes(node, end)
for n in nodes:
if n.location == end:
return n
elif n.lid in self.c:
continue
elif n.lid in self.o:
i = self.o.index(n.lid)
on = self.on[i]
if n.mCost < on.mCost:
self.on.pop(i)
self.o.pop(i)
self.on.append(n)
self.o.append(n.lid)
else:
self.on.append(n)
self.o.append(n.lid)
return None
def findPath(self, fromlocation, tolocation):
self.o = []
self.on = []
self.c = []
end = tolocation
fnode = self.mh.getNode(fromlocation)
self.on.append(fnode)
self.o.append(fnode.lid)
nextNode = fnode
while nextNode is not None:
finish = self._handleNode(nextNode, end)
if finish:
return self._tracePath(finish)
nextNode = self._getBestOpenNode()
return None
class SQ_Location:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, l):
if l.x == self.x and l.y == self.y:
return 1
else:
return 0
class SQ_MapHandler:
def __init__(self, mapdata, width, height):
self.m = mapdata
self.w = width
self.h = height
def getNode(self, location):
x = location.x
y = location.y
if x < 0 or x >= self.w or y < 0 or y >= self.h:
return None
d = self.m[(y * self.w) + x]
if d == -1:
return None
return Node(location, d, ((y * self.w) + x))
def getAdjacentNodes(self, curnode, dest):
result = []
cl = curnode.location
dl = dest
n = self._handleNode(cl.x + 1, cl.y, curnode, dl.x, dl.y)
if n:
result.append(n)
n = self._handleNode(cl.x - 1, cl.y, curnode, dl.x, dl.y)
if n:
result.append(n)
n = self._handleNode(cl.x, cl.y + 1, curnode, dl.x, dl.y)
if n:
result.append(n)
n = self._handleNode(cl.x, cl.y - 1, curnode, dl.x, dl.y)
if n:
result.append(n)
return result
def _handleNode(self, x, y, fromnode, destx, desty):
n = self.getNode(SQ_Location(x, y))
if n is not None:
dx = max(x, destx) - min(x, destx)
dy = max(y, desty) - min(y, desty)
emCost = dx + dy
n.mCost += fromnode.mCost
n.score = n.mCost + emCost
n.parent = fromnode
return n
return None
def main():
img = Image.open("maze.png")
maze = img.load()
mapdata = []
# Translate pixel data into something that AStar understands.
for elt in img.getdata():
if elt == (255, 255, 255, 255):
mapdata.append(-1)
else:
mapdata.append(1)
# Define start and destination points.
mapdata[639] = 5
mapdata[410241] = 6
astar = AStar(SQ_MapHandler(mapdata, 641, 641))
start = SQ_Location(639, 0)
end = SQ_Location(1, 640)
p = astar.findPath(start, end)
data = []
# Extract data from "logs".
for node in p.nodes:
if node.location.x % 2 and node.location.y % 2:
data.append(chr(maze[node.location.x, node.location.y][0]))
h = open("unzip-me.zip", "wb")
h.write("".join(data))
h.close()
if __name__ == "__main__":
main() | [
"mbdigital@virgilio.it"
] | mbdigital@virgilio.it |
f865b3508c6ba6b9fd2a0f572519eae1a3eced74 | 52ed0fe71b8fefcd292ae16e4da9df8c84aedbbf | /probabilidades/env/bin/easy_install-3.8 | eca82ed48d78ef7f5cc12431e0a4f1d2c67f58c2 | [] | no_license | Viistorrr/python_platzi_route | 596b13f2602a612ca64a878bfdb8fd1a34691413 | 36f25737dc970353f333717798afc0ad29d70a6c | refs/heads/master | 2022-11-26T14:50:40.168772 | 2020-07-25T17:54:15 | 2020-07-25T17:54:15 | 281,691,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | 8 | #!/Users/macbookpro/Documents/platzi/python_platzi_route/probabilidades/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"victormeza41@gmail.com"
] | victormeza41@gmail.com |
070f9fae7f4af5744ad3cfe5bb1332147cfc2637 | 13b22a505cbeed3f88653cd379d72ff797068935 | /jenkins_jobs/builder.py | b1b23e2cbe8696c73572efdde25edc35e4f7181d | [
"Apache-2.0"
] | permissive | jaybuff/jenkins-job-builder | af88e4dfd7fe99d17b5e87f2058c0e99cda1d4bd | e3e8f6363f515051e94dc9ebb3ec1473857e4389 | refs/heads/master | 2021-01-15T17:51:28.890445 | 2013-07-25T16:15:48 | 2013-07-25T16:15:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,875 | py | #!/usr/bin/env python
# Copyright (C) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage jobs in Jenkins server
import os
import hashlib
import yaml
import json
import xml.etree.ElementTree as XML
from xml.dom import minidom
import jenkins
import re
import pkg_resources
import logging
import copy
import itertools
from jenkins_jobs.errors import JenkinsJobsException
logger = logging.getLogger(__name__)
def deep_format(obj, paramdict):
"""Apply the paramdict via str.format() to all string objects found within
the supplied obj. Lists and dicts are traversed recursively."""
# YAML serialisation was originally used to achieve this, but that places
# limitations on the values in paramdict - the post-format result must
# still be valid YAML (so substituting-in a string containing quotes, for
# example, is problematic).
if isinstance(obj, str):
ret = obj.format(**paramdict)
elif isinstance(obj, list):
ret = []
for item in obj:
ret.append(deep_format(item, paramdict))
elif isinstance(obj, dict):
ret = {}
for item in obj:
ret[item] = deep_format(obj[item], paramdict)
else:
ret = obj
return ret
class YamlParser(object):
def __init__(self, config=None):
self.registry = ModuleRegistry(config)
self.data = {}
self.jobs = []
def parse(self, fn):
data = yaml.load(open(fn))
for item in data:
cls, dfn = item.items()[0]
group = self.data.get(cls, {})
if len(item.items()) > 1:
n = None
for k, v in item.items():
if k == "name":
n = v
break
# Syntax error
raise JenkinsJobsException("Syntax error, for item named "
"'{0}'. Missing indent?".format(n))
name = dfn['name']
group[name] = dfn
self.data[cls] = group
def getJob(self, name):
job = self.data.get('job', {}).get(name, None)
if not job:
return job
return self.applyDefaults(job)
def getJobGroup(self, name):
return self.data.get('job-group', {}).get(name, None)
def getJobTemplate(self, name):
job = self.data.get('job-template', {}).get(name, None)
if not job:
return job
return self.applyDefaults(job)
def applyDefaults(self, data):
whichdefaults = data.get('defaults', 'global')
defaults = self.data.get('defaults', {}).get(whichdefaults, {})
newdata = {}
newdata.update(defaults)
newdata.update(data)
return newdata
def generateXML(self, jobs_filter=None):
changed = True
while changed:
changed = False
for module in self.registry.modules:
if hasattr(module, 'handle_data'):
if module.handle_data(self):
changed = True
for job in self.data.get('job', {}).values():
if jobs_filter and job['name'] not in jobs_filter:
continue
logger.debug("XMLifying job '{0}'".format(job['name']))
job = self.applyDefaults(job)
self.getXMLForJob(job)
for project in self.data.get('project', {}).values():
logger.debug("XMLifying project '{0}'".format(project['name']))
for jobspec in project.get('jobs', []):
if isinstance(jobspec, dict):
# Singleton dict containing dict of job-specific params
jobname, jobparams = jobspec.items()[0]
else:
jobname = jobspec
jobparams = {}
job = self.getJob(jobname)
if job:
# Just naming an existing defined job
continue
# see if it's a job group
group = self.getJobGroup(jobname)
if group:
for group_jobname in group['jobs']:
job = self.getJob(group_jobname)
if job:
continue
template = self.getJobTemplate(group_jobname)
# Allow a group to override parameters set by a project
d = {}
d.update(project)
d.update(jobparams)
d.update(group)
# Except name, since the group's name is not useful
d['name'] = project['name']
if template:
self.getXMLForTemplateJob(d, template, jobs_filter)
continue
# see if it's a template
template = self.getJobTemplate(jobname)
if template:
d = {}
d.update(project)
d.update(jobparams)
self.getXMLForTemplateJob(d, template, jobs_filter)
def getXMLForTemplateJob(self, project, template, jobs_filter=None):
dimensions = []
for (k, v) in project.items():
if type(v) == list and k not in ['jobs']:
dimensions.append(zip([k] * len(v), v))
# XXX somewhat hackish to ensure we actually have a single
# pass through the loop
if len(dimensions) == 0:
dimensions = [(("", ""),)]
checksums = set([])
for values in itertools.product(*dimensions):
params = copy.deepcopy(project)
params.update(values)
expanded = deep_format(template, params)
# Keep track of the resulting expansions to avoid
# regenerating the exact same job. Whenever a project has
# different values for a parameter and that parameter is not
# used in the template, we ended up regenerating the exact
# same job.
# To achieve that we serialize the expanded template making
# sure the dict keys are always in the same order. Then we
# record the checksum in an unordered unique set which let
# us guarantee a group of parameters will not be added a
# second time.
uniq = json.dumps(expanded, sort_keys=True)
checksum = hashlib.md5(uniq).hexdigest()
# Lookup the checksum
if checksum not in checksums:
# We also want to skip XML generation whenever the user did
# not ask for that job.
job_name = expanded.get('name')
if jobs_filter and job_name not in jobs_filter:
continue
logger.debug("Generating XML for template job {0}"
" (params {1})".format(
template['name'], params))
self.getXMLForJob(expanded)
checksums.add(checksum)
def getXMLForJob(self, data):
kind = data.get('project-type', 'freestyle')
for ep in pkg_resources.iter_entry_points(
group='jenkins_jobs.projects', name=kind):
Mod = ep.load()
mod = Mod(self.registry)
xml = mod.root_xml(data)
self.gen_xml(xml, data)
job = XmlJob(xml, data['name'])
self.jobs.append(job)
break
def gen_xml(self, xml, data):
for module in self.registry.modules:
if hasattr(module, 'gen_xml'):
module.gen_xml(self, xml, data)
class ModuleRegistry(object):
def __init__(self, config):
self.modules = []
self.modules_by_component_type = {}
self.handlers = {}
self.global_config = config
for entrypoint in pkg_resources.iter_entry_points(
group='jenkins_jobs.modules'):
Mod = entrypoint.load()
mod = Mod(self)
self.modules.append(mod)
self.modules.sort(lambda a, b: cmp(a.sequence, b.sequence))
if mod.component_type is not None:
self.modules_by_component_type[mod.component_type] = mod
def registerHandler(self, category, name, method):
cat_dict = self.handlers.get(category, {})
if not cat_dict:
self.handlers[category] = cat_dict
cat_dict[name] = method
def getHandler(self, category, name):
return self.handlers[category][name]
def dispatch(self, component_type,
parser, xml_parent,
component, template_data={}):
"""This is a method that you can call from your implementation of
Base.gen_xml or component. It allows modules to define a type
of component, and benefit from extensibility via Python
entry points and Jenkins Job Builder :ref:`Macros <macro>`.
:arg string component_type: the name of the component
(e.g., `builder`)
:arg YAMLParser parser: the global YMAL Parser
:arg Element xml_parent: the parent XML element
:arg dict template_data: values that should be interpolated into
the component definition
See :py:class:`jenkins_jobs.modules.base.Base` for how to register
components of a module.
See the Publishers module for a simple example of how to use
this method.
"""
if component_type not in self.modules_by_component_type:
raise JenkinsJobsException("Unknown component type: "
"'{0}'.".format(component_type))
component_list_type = self.modules_by_component_type[component_type] \
.component_list_type
if isinstance(component, dict):
# The component is a sigleton dictionary of name: dict(args)
name, component_data = component.items()[0]
if template_data:
# Template data contains values that should be interpolated
# into the component definition
s = yaml.dump(component_data, default_flow_style=False)
s = s.format(**template_data)
component_data = yaml.load(s)
else:
# The component is a simple string name, eg "run-tests"
name = component
component_data = {}
# Look for a component function defined in an entry point
for ep in pkg_resources.iter_entry_points(
group='jenkins_jobs.{0}'.format(component_list_type), name=name):
func = ep.load()
func(parser, xml_parent, component_data)
else:
# Otherwise, see if it's defined as a macro
component = parser.data.get(component_type, {}).get(name)
if component:
for b in component[component_list_type]:
# Pass component_data in as template data to this function
# so that if the macro is invoked with arguments,
# the arguments are interpolated into the real defn.
self.dispatch(component_type,
parser, xml_parent, b, component_data)
class XmlJob(object):
def __init__(self, xml, name):
self.xml = xml
self.name = name
def md5(self):
return hashlib.md5(self.output()).hexdigest()
# Pretty printing ideas from
# http://stackoverflow.com/questions/749796/pretty-printing-xml-in-python
pretty_text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL)
def output(self):
out = minidom.parseString(XML.tostring(self.xml))
out = out.toprettyxml(indent=' ')
return self.pretty_text_re.sub('>\g<1></', out)
class CacheStorage(object):
def __init__(self, jenkins_url):
cache_dir = self.get_cache_dir()
# One cache per remote Jenkins URL:
host_vary = re.sub('[^A-Za-z0-9\-\~]', '_', jenkins_url)
self.cachefilename = os.path.join(
cache_dir, 'cache-host-jobs-' + host_vary + '.yml')
try:
yfile = file(self.cachefilename, 'r')
except IOError:
self.data = {}
return
self.data = yaml.load(yfile)
logger.debug("Using cache: '{0}'".format(self.cachefilename))
yfile.close()
@staticmethod
def get_cache_dir():
home = os.path.expanduser('~')
if home == '~':
raise OSError('Could not locate home folder')
xdg_cache_home = os.environ.get('XDG_CACHE_HOME') or \
os.path.join(home, '.cache')
path = os.path.join(xdg_cache_home, 'jenkins_jobs')
if not os.path.isdir(path):
os.makedirs(path)
return path
def set(self, job, md5):
self.data[job] = md5
yfile = file(self.cachefilename, 'w')
yaml.dump(self.data, yfile)
yfile.close()
def is_cached(self, job):
if job in self.data:
return True
return False
def has_changed(self, job, md5):
if job in self.data and self.data[job] == md5:
return False
return True
class Jenkins(object):
def __init__(self, url, user, password):
self.jenkins = jenkins.Jenkins(url, user, password)
def update_job(self, job_name, xml):
if self.is_job(job_name):
logger.info("Reconfiguring jenkins job {0}".format(job_name))
self.jenkins.reconfig_job(job_name, xml)
else:
logger.info("Creating jenkins job {0}".format(job_name))
self.jenkins.create_job(job_name, xml)
def is_job(self, job_name):
return self.jenkins.job_exists(job_name)
def get_job_md5(self, job_name):
xml = self.jenkins.get_job_config(job_name)
return hashlib.md5(xml).hexdigest()
def delete_job(self, job_name):
if self.is_job(job_name):
self.jenkins.delete_job(job_name)
def get_jobs(self):
return self.jenkins.get_jobs()
class Builder(object):
def __init__(self, jenkins_url, jenkins_user, jenkins_password,
config=None):
self.jenkins = Jenkins(jenkins_url, jenkins_user, jenkins_password)
self.cache = CacheStorage(jenkins_url)
self.global_config = config
def delete_job(self, name):
self.jenkins.delete_job(name)
if(self.cache.is_cached(name)):
self.cache.set(name, '')
def delete_all_jobs(self):
jobs = self.jenkins.get_jobs()
for job in jobs:
self.delete_job(job['name'])
def update_job(self, fn, names=None, output_dir=None):
if os.path.isdir(fn):
files_to_process = [os.path.join(fn, f)
for f in os.listdir(fn)
if (f.endswith('.yml') or f.endswith('.yaml'))]
else:
files_to_process = [fn]
parser = YamlParser(self.global_config)
for in_file in files_to_process:
logger.debug("Parsing YAML file {0}".format(in_file))
parser.parse(in_file)
if names:
logger.debug("Will filter out jobs not in %s" % names)
parser.generateXML(names)
parser.jobs.sort(lambda a, b: cmp(a.name, b.name))
for job in parser.jobs:
if names and job.name not in names:
continue
if output_dir:
if names:
print job.output()
continue
fn = os.path.join(output_dir, job.name)
logger.debug("Writing XML to '{0}'".format(fn))
f = open(fn, 'w')
f.write(job.output())
f.close()
continue
md5 = job.md5()
if (self.jenkins.is_job(job.name)
and not self.cache.is_cached(job.name)):
old_md5 = self.jenkins.get_job_md5(job.name)
self.cache.set(job.name, old_md5)
if self.cache.has_changed(job.name, md5):
self.jenkins.update_job(job.name, job.output())
self.cache.set(job.name, md5)
else:
logger.debug("'{0}' has not changed".format(job.name))
| [
"jenkins@review.openstack.org"
] | jenkins@review.openstack.org |
58f3ad5187db0ba90a597d319ecd2fd4036de17e | fd74a044c0037796455ba4bd4fd44f11c3323599 | /Practice/ABC/Bcontest037_a.py | 25217aff10ee643818c607b06b0b3160e6edfb8b | [] | no_license | tegetege/tegetege_AtCoder | 5ac87e0a7a9acdd50d06227283aa7d95eebe2e2f | ba6c6472082e8255202f4f22a60953d0afe21591 | refs/heads/master | 2022-03-25T00:29:22.952078 | 2022-02-10T14:39:58 | 2022-02-10T14:39:58 | 193,516,879 | 0 | 0 | null | 2019-06-25T13:53:13 | 2019-06-24T14:02:05 | Python | UTF-8 | Python | false | false | 55 | py | A,B,C = map(int,input().split())
print(int(C/min(A,B))) | [
"m_take7_ex_d@yahoo.co.jp"
] | m_take7_ex_d@yahoo.co.jp |
0c6e5ddba7ebbe6e00461c9af0795cf0f598a220 | a2b598d8e89c1755f683d6b6fe35c3f1ef3e2cf6 | /search/[boj]1072_게임_이분탐색.py | 9b2ba1666cc1fce2d54fdd1b38de88a172296a19 | [
"MIT"
] | permissive | DongHyunByun/algorithm_practice | cbe82606eaa7f372d9c0b54679bdae863aab0099 | dcd595e6962c86f90f29e1d68f3ccc9bc673d837 | refs/heads/master | 2022-09-24T22:47:01.556157 | 2022-09-11T07:36:42 | 2022-09-11T07:36:42 | 231,518,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | while(1):
try:
X,Y=map(int,input().split())
nowPercent=int(Y*100/X)
target=nowPercent+1
#a번 더 이겼을때 퍼센트
def percent(a):
return int((Y+a)*100/(X+a))
if nowPercent>=99:
print(-1)
else:
left=0
right=9999999999
#처음으로 승률+1 이상이 나오는 부분을 찾아
while(left<right):
mid=(left+right)//2
#print(left, right, mid)
#print(percent(mid))
if target<=percent(mid):
right=mid
else:
left=mid+1
print(right)
except:
break | [
"noreply@github.com"
] | noreply@github.com |
9ca59003a4044b3e9194cf46b6a1df42749829f2 | f638793eaf038b0f5b6fc21099ef486d5f1a9c4b | /hacker_rank/default_arg.py | be29a49192fa9b1f27bdd3d737c4f44ce641619e | [] | no_license | barbocz/UdemyCourse | 8c8a377c7d3a1c3c091cd9a98d3cd730a08de82d | eb9c9e217ca8357699967fc98ecdd5af9db295e5 | refs/heads/master | 2022-12-24T18:32:36.427401 | 2020-09-30T19:03:33 | 2020-09-30T19:03:33 | 298,615,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | class EvenStream(object):
def __init__(self):
self.current = 0
def get_next(self):
to_return = self.current
self.current += 2
return to_return
class OddStream(object):
def __init__(self):
self.current = 1
def get_next(self):
to_return = self.current
self.current += 2
return to_return
def print_from_stream(n, stream=EvenStream()):
for _ in range(n):
print(stream.get_next())
queries = int(input())
for _ in range(queries):
stream_name, n = input().split()
n = int(n)
if stream_name == "even":
print("even "+str(n))
print_from_stream(n)
else:
print("odd "+str(n))
print_from_stream(n, OddStream())
| [
"barbocz.attila@gmail.com"
] | barbocz.attila@gmail.com |
b6139653ae942b7e144940a2e1f5a870f1debf20 | 701a7bfab7e6e33951c5d418731c008c30c7d0db | /products/migrations/0006_product_media.py | e13190efcd7c10b7237c18abecacf9db7482bab4 | [] | no_license | pjelelhml/bootcamp-django | 4aade18f3e67fa450bcdd3188fab1140afa1684d | f2cb0bf729f16148d1d835a9caa67df96a48cf5e | refs/heads/main | 2023-06-13T08:29:41.318027 | 2021-07-12T20:53:31 | 2021-07-12T20:53:31 | 374,385,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # Generated by Django 3.2.4 on 2021-07-09 14:33
from django.db import migrations, models
import products.storages
class Migration(migrations.Migration):
dependencies = [
('products', '0005_product_image'),
]
operations = [
migrations.AddField(
model_name='product',
name='media',
field=models.FileField(blank=True, null=True, storage=products.storages.ProtectedStorage, upload_to='products/'),
),
]
| [
"paulo3385@hotmail.com"
] | paulo3385@hotmail.com |
ee0d33f5290bac527b87dc1b699c7442a39292b2 | 1214d7d393f6a8edb64e6136dc0b253e0597f4a6 | /bokeh/bokeh_label.py | e8da41d3f378ca7a65a516126efaf282ff741026 | [] | no_license | stockdata123/upgraded-journey | 047319acba7d0e822c581c40ad8b51f34cfe35f1 | 76b97c7944352170d51003be39efdd7683c4bb99 | refs/heads/master | 2023-06-28T01:06:59.493345 | 2021-08-02T01:23:02 | 2021-08-02T01:23:02 | 391,776,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py | from bokeh.plotting import figure, show, output_file
from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label
output_file("label.html", title="label.py example")
source = ColumnDataSource(data=dict(height=[66, 71, 72, 68, 58, 62],
weight=[165, 189, 220, 141, 260, 174],
names=['Mark', 'Amir', 'Matt', 'Greg',
'Owen', 'Juan']))
p = figure(title='Dist. of 10th Grade Students at Lee High',
x_range=Range1d(140, 275))
p.scatter(x='weight', y='height', size=8, source=source)
#p.xaxis[0].axis_label = 'Weight (lbs)'
#p.yaxis[0].axis_label = 'Height (in)'
labels = LabelSet(x='weight', y='height', text='names', source=source)
#labels = LabelSet(x='weight', y='height', text='names', level='glyph',
# x_offset=5, y_offset=5, source=source, render_mode='canvas')
'''
citation = Label(x=70, y=70, x_units='screen', y_units='screen',
text='Collected by Luke C. 2016-04-01', render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
'''
p.add_layout(labels)
#p.add_layout(citation)
show(p) | [
"ssophiemarceau@gmail.com"
] | ssophiemarceau@gmail.com |
d7f797ef4b5ed6b8a1b9e335a74a534bcc982d87 | da349082f2dc259fc2469aacf1ad6d2ebe6ea8d3 | /Exam March 28/02/02. Mountain Run.py | a922e128491cc78705aea64e97385168b8863f81 | [] | no_license | lsnvski/SoftUni | 0f8e80d6d8b098028f4652693cb698cacc6c2ee3 | 984db6a2edcfd40a98987a3d32fb5370f9793d3c | refs/heads/main | 2023-01-31T13:52:29.541592 | 2020-12-13T12:55:15 | 2020-12-13T12:55:15 | 303,548,844 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import math
record = float(input())
meter_per_sec = float(input())
time_for_meter = float(input())
time = (math.floor(meter_per_sec / 50) * 30) + (meter_per_sec * time_for_meter)
if record > time:
print(f"Yes! The new record is {time:.2f} seconds.")
else:
print(f"No! He was {time - record:.2f} seconds slower.") | [
"lsnvski@abv.bg"
] | lsnvski@abv.bg |
2302a6695ea93d585a3bdfd4cc25f1fe488046cf | 1caeb7c3f73562176cdd33d983fc6af40e890c71 | /服务器与计算机视觉模块/View/view.py | 02932cc7fb5640708d6d8cfe863cec6d9b3ece5d | [] | no_license | Yimyl/Care | abe6c302b1a669f9f79c13f9a061bee10699acb4 | b0c72c6536b97a74284ac98e704bd9f0f9c4c57f | refs/heads/master | 2020-06-18T13:04:09.625187 | 2019-07-10T08:06:04 | 2019-07-10T08:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,629 | py | from flask_cors import CORS
from flask_login import LoginManager, login_user, login_required
import json
import datetime
import os
from flask_sqlalchemy import SQLAlchemy
import pymysql
import socket
import cv2
import threading
import struct
import numpy
import _thread
from Util.JudgeInteract import faceRegniZation
from flask import Flask, render_template, Response, request
from Util import send, receive
from Vision.Face import CollectFaces
from model import Sys_user, Volunteer_info, Employee_info, Oldperson_info, Event_info
pymysql.install_as_MySQLdb()
app = Flask(__name__)
CORS(app, supports_credentials=True)
login_manager = LoginManager()
login_manager.init_app(app)
app.config['JSON_AS_ASCII'] = False
app.config["SQLALCHEMY_DATABASE_URI"] = "mysql://root:123456@localhost/first_flask"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config['SECRET_KEY'] = os.urandom(24)
app.config['PERMANENT_SESSION_LIFETIME'] = datetime.timedelta(days=7)
db = SQLAlchemy(app)
status = 0
sz = (640, 480)
fps = 5
fourcc = cv2.VideoWriter_fourcc(*'XVID')
vout = cv2.VideoWriter()
count = 0
def constructPath():
global count
count += 1
return 'C:/Users/Administrator/Desktop/Cares/Vision/output' + str(count) + '.avi'
@app.route('/login', methods=['POST', 'GET'])
def login():
msg = None
if request.method == 'POST' or request.method == 'GET':
temp = json.dumps(request.get_json())
data = json.loads(temp)
user = Sys_user.query.filter(Sys_user.identify == data['id']).first()
if user and user.check_passwd(data['password']) :
#login_user(user)
msg = {"valid": "done"}
else:
msg = {"valid": "error"}
return json.dumps(msg)
@app.route('/register', methods=['POST', 'GET'])
def register():
msg = None
if request.method == 'POST' or request.method == 'GET':
temp = json.dumps(request.get_json())
data = json.loads(temp)
user = Sys_user(data['id'], data['password'], data['real_name'], data['gender'], data['telephone'])
if Sys_user.query.filter(Sys_user.identify == data['id']).first():
msg = {"valid":"error"}
else:
user.add()
msg = {"valid":"done"}
return json.dumps(msg)
@login_required
@app.route('/modify', methods=['POST', 'GET'])
def modify():
msg = None
if request.method == 'POST' or request.method == 'GET':
temp = json.dumps(request.get_json())
data = json.loads(temp)
#data = json.loads(data)
user = Sys_user.query.filter(Sys_user.identify == data['name']).first()
if user and user.check_passwd(data['password']) == False:
user.mod(data['name'], data['password'])
msg = {"valid": "done"}
else:
msg = {"valid": "error"}
return json.dumps(msg)
@login_required
@app.route('/oldperson_required', methods=['POST', 'GET'])
def oldperson_required():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.oldperson_info")
db.session.commit()
data = list(data)
payload = []
content = {}
for result in data:
content = {'id': result[0], 'name': result[1].encode("unicode_escape").decode("unicode_escape"), 'gender': result[2], 'tel': result[3], 'pic_src': result[4],
'checkin_date': result[5].strftime("%Y-%m-%d"),'checkout_date': None, 'first_guardian_name': result[7],
'first_guardian_relation': result[8],'first_guardian_tel':result[9]}
payload.append(content)
content = {}
return json.dumps(payload)
@login_required
@app.route('/oldperson', methods=['POST', 'GET'])
def oldperson():
if request.method == 'POST' or request.method == 'GET':
db.session.execute("DELETE FROM first_flask.oldperson_info")
db.session.commit()
temp = json.dumps(request.get_json())
data = json.loads(temp)
data = json.loads(data)
for item in data:
user = Oldperson_info(int(item['id']), item['name'],
item['gender'], int(item['tel']), None, datetime.datetime.now(),None,
item['first_guardian_name'],None, int(item['first_guardian_tel']))
user.add()
msg = {"valid":"done"}
return json.dumps(msg)
@login_required
@app.route('/volunteer_required', methods=['POST', 'GET'])
def volunteer_required():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.volunteer_info")
db.session.commit()
data = list(data)
payload = []
content = {}
for result in data:
content = {'id': result[0], 'name': result[1].encode("unicode_escape").decode("unicode_escape"), 'gender': result[2], 'tel': result[3], 'pic_src': result[4],
'checkin_date': result[5].strftime("%Y-%m-%d"),'checkout_date': None}
payload.append(content)
content = {}
return json.dumps(payload)
@login_required
@app.route('/volunteer', methods=['POST', 'GET'])
def volunteer():
if request.method == 'POST' or request.method == 'GET':
db.session.execute("DELETE FROM first_flask.volunteer_info")
db.session.commit()
temp = json.dumps(request.get_json())
data = json.loads(temp)
data = json.loads(data)
for item in data:
user = Volunteer_info(int(item['id']), item['name'],
item['gender'], int(item['tel']), None,datetime.datetime.now(),
None)
user.add()
msg = {"valid":"done"}
return json.dumps(msg)
@login_required
@app.route('/employee_required', methods=['POST', 'GET'])
def employee_required():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.employee_info")
db.session.commit()
data = list(data)
payload = []
content = {}
for result in data:
content = {'id': result[0], 'name': result[1].encode("unicode_escape").decode("unicode_escape"), 'gender': result[2], 'tel': result[3], 'pic_src': result[4],
'hire_date': result[5].strftime("%Y-%m-%d"),'resign_date':None}
payload.append(content)
content = {}
return json.dumps(payload)
@login_required
@app.route('/employee', methods=['POST', 'GET'])
def employee():
if request.method == 'POST' or request.method == 'GET':
db.session.execute("DELETE FROM first_flask.employee_info")
db.session.commit()
temp = json.dumps(request.get_json())
data = json.loads(temp)
data = json.loads(data)
for item in data:
user = Oldperson_info(int(item['id']), item['name'],
item['gender'], int(item['tel']), None,datetime.datetime.now(),
None)
user.add()
msg = {"valid":"done"}
return json.dumps(msg)
@login_required
@app.route('/smile', methods=['POST', 'GET'])
def smile():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.event_info WHERE event_type = 0")
db.session.commit()
smile_time = {'0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0}
cur_time = datetime.datetime.now()
for item in data:
day = (cur_time - item[2]).days
if day <= 6:
smile_time[str(day+2)]+=1
return json.dumps(smile_time)
@login_required
@app.route('/invade', methods=['POST', 'GET'])
def invaded():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.event_info WHERE event_type = 2")
db.session.commit()
invaded_time = {'0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0}
cur_time = datetime.datetime.now()
for item in data:
day = (cur_time - item[2]).days
if day <= 6:
invaded_time[str(day+2)]+=1
print(json.dumps(invaded_time))
return json.dumps(invaded_time)
@login_required
@app.route('/interact', methods=['POST', 'GET'])
def interact():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.event_info WHERE event_type = 1")
db.session.commit()
interact_time = {'0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0}
cur_time = datetime.datetime.now()
for item in data:
day = (cur_time - item[2]).days
if day <= 6:
interact_time[str(day+2)]+=1
return json.dumps(interact_time)
@login_required
@app.route('/fall', methods=['POST', 'GET'])
def fall():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.event_info WHERE event_type = 3")
db.session.commit()
fall_time = {'0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0}
cur_time = datetime.datetime.now()
for item in data:
day = (cur_time - item[2]).days
if day <= 6:
fall_time[str(day)]+=1
return json.dumps(fall_time)
@login_required
@app.route('/forbidden', methods=['POST', 'GET'])
def forbiddien():
if request.method == 'POST' or request.method == 'GET':
data = db.session.execute("SELECT * FROM first_flask.event_info WHERE event_type = 4")
db.session.commit()
forbiddien_time = {'0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0}
cur_time = datetime.datetime.now()
for item in data:
day = (cur_time - item[2]).days
if day <= 6:
forbiddien_time[str(day+2)]+=1
return json.dumps(forbiddien_time)
@app.route('/user/<userid>') # 主页
def user(userid):
CollectFaces.collect(userid)
return "ok"
@app.route('/send/<userid>') # 主页
def sendmessage(userid):
if send.send(userid):
# receive.receive(userid)
send.socket_client('../images/'+userid+'.zip')
return "ok"
return "no"
class Camera_Connect_Object:
temp = None
def __init__(self,D_addr_port=["",8880]):
self.resolution=[640,480]
self.addr_port=D_addr_port
self.src=888+15 #双方确定传输帧数,(888)为校验值
self.interval=0 #图片播放时间间隔
self.img_fps=5 #每秒传输多少帧数
def Set_socket(self):
self.client=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.client.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
def Socket_Connect(self):
self.Set_socket()
self.client.connect(self.addr_port)
print("IP is %s:%d" % (self.addr_port[0],self.addr_port[1]))
def shut_down(self):
self.client.close()
def RT_Image(self):
#按照格式打包发送帧数和分辨率
vout.open(constructPath(), fourcc, fps, sz, True)
self.name=self.addr_port[0]+" Camera"
self.client.send(struct.pack("lhh", self.src, self.resolution[0], self.resolution[1]))
while(True):
info=struct.unpack("lhh",self.client.recv(8))
buf_size=info[0] #获取读的图片总长度
if buf_size:
try:
self.buf=b"" #代表bytes类型
temp_buf=self.buf
while(buf_size): #读取每一张图片的长度
temp_buf=self.client.recv(buf_size)
buf_size-=len(temp_buf)
self.buf+=temp_buf #获取图片
data = numpy.fromstring(self.buf, dtype='uint8') #按uint8转换为图像矩阵
self.image = cv2.imdecode(data, 1) #图像解码
vout.write(self.image)
Camera_Connect_Object.temp = self.image
_thread.start_new_thread(faceRegniZation, ( self.image, ))
cv2.imshow("Face Recongnition", self.image)
# cv2.imshow(self.name, self.image) #展示图片
except:
pass
finally:
if(cv2.waitKey(10)==27): #每10ms刷新一次图片,按‘ESC’(27)退出
self.client.close()
cv2.destroyAllWindows()
break
def get_frame(self):
# info = struct.unpack("lhh", self.client.recv(8))
# buf_size = info[0]
# temp_buf = self.client.recv(buf_size)
# buf_size -= len(temp_buf)
# self.buf += temp_buf # 获取图片
# data = numpy.fromstring(self.buf, dtype='uint8') # 按uint8转换为图像矩阵
# self.image = cv2.imdecode(data, 1) # 图像解码
# ret, jpeg = cv2.imencode('.jpg', self.image)
ret, jpeg = cv2.imencode('.jpg', Camera_Connect_Object.temp)
return jpeg.tobytes()
def Get_Data(self,interval):
showThread=threading.Thread(target=self.RT_Image)
showThread.start()
@app.route('/') # 主页
def index():
# jinja2模板,具体格式保存在index.html文件中
# return render_template('index.html')
return "hello world"
def gen(camera):
while True:
frame = camera.get_frame()
# 使用generator函数输出视频流, 每次请求输出的content类型是image/jpeg
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
camera = Camera_Connect_Object()
@app.route('/video_feed') # 这个地址返回视频流响应
def video_feed():
global status
if (status == 1):
camera.Socket_Connect()
camera.Get_Data(camera.interval)
status = status - 1
return Response(gen(camera),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/release')
def release():
global status
status = status + 1
vout.release()
camera.shut_down()
return "ok"
if __name__ == '__main__':
app.debug = True
camera.addr_port[0] = "192.168.10.104"
camera.addr_port = tuple(camera.addr_port)
camera.Socket_Connect()
camera.Get_Data(camera.interval)
app.run(host='0.0.0.0', debug=True, port=5000)
| [
"tiamo39@vip.aa.com"
] | tiamo39@vip.aa.com |
9eb6bae7627c2afdc8c8b8050b3ee526b76ad8c9 | c513e3ca72ce5e8ebe62d00c37531603f1b541c1 | /part1/nn_models.py | 7a400deb0cead2dc5a2bf7c20ecb44158a101a61 | [] | no_license | 0akhilesh9/reinforcement_learning_algos | e3cdb9f9b5333e9c655506a3acb1a825591b9068 | 7750a85cc98b5c1697cb3272fc3a7a572e383d0c | refs/heads/main | 2023-03-08T10:37:29.828739 | 2021-02-25T06:15:37 | 2021-02-25T06:15:37 | 342,087,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | import torch
import torch.nn as nn
import torch.autograd as autograd
class ConvolutionalNN(nn.Module):
# Init the layer dimensions and network architecture
def __init__(self, input_dim, output_dim):
super(ConvDQN, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.fc_input_dim = self.feature_size()
self.conv = nn.Sequential(
nn.Conv2d(self.input_dim[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
self.fc = nn.Sequential(
nn.Linear(self.fc_input_dim, 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, self.output_dim)
)
# Forward Pass
def forward(self, state):
features = self.conv_net(state)
features = features.view(features.size(0), -1)
qvals = self.fc(features)
return qvals
def feature_size(self):
return self.conv_net(autograd.Variable(torch.zeros(1, *self.input_dim))).view(1, -1).size(1)
# Fully connected NN
class FullConnectedNN(nn.Module):
# Init the layer dimensions and network architecture
def __init__(self, input_dim, output_dim):
super(FullConnectedNN, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.fc = nn.Sequential(
nn.Linear(self.input_dim[0], 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, self.output_dim)
)
# Forward Pass
def forward(self, input_state_params):
q_logits = self.fc(input_state_params)
return q_logits | [
"noreply@github.com"
] | noreply@github.com |
e6dc56afa4414aaf765a2ecc588488d2ab650a35 | a86552e1da790c6a24ab0009d84be270e303c1a8 | /highlighting numbers, if, for, while/Queen's move.py | 8e1bc399649ca35fcebc715fa7c444ed0a57e1b0 | [] | no_license | dimasiklrnd/python | 4c9da1c0e7fa834bcf9083c475654e2b4d6ef0eb | 1c0a84ab8242b577c92c1ec8b83ad80216fdc972 | refs/heads/master | 2020-06-29T08:22:26.845678 | 2020-01-09T19:59:23 | 2020-01-09T19:59:23 | 200,410,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | '''Шахматный ферзь может ходить на любое число клеток по горизонтали, по вертикали или по диагонали. Даны две различные клетки шахматной доски, определите, может ли ферзь попасть с первой клетки на вторую одним ходом. Для простоты можно не рассматривать случай, когда данные клетки совпадают.
Формат входных данных
Программа получает на вход четыре числа от 1 до 8 каждое, задающие номер столбца и номер строки сначала для первой клетки, потом для второй клетки.
Формат выходных данных
Программа должна вывести YES, если из первой клетки ходом ферзя можно попасть во вторую. В противном случае - NO'''
x1 = int(input())
y1 = int(input())
x2 = int(input())
y2 = int(input())
if abs(x1 - x2) == abs(y1 - y2) or x1 == x2 or y1 == y2:
print('YES')
else:
print('NO')
| [
"dimasiklrnd@gmail.com"
] | dimasiklrnd@gmail.com |
818fb09f8f5de94bdddf44acd471f366bfd04c70 | eb463217f001a8ff63243208dc2bb7e355793548 | /src/richie/plugins/section/migrations/0002_add_template_field.py | d3c83581c472881596481d657675e6d9f2744e84 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | phuoclhb/richie | 25020254b635c41648d65a30b3f2405007bd8a39 | 328167d02f9596c8b1d428655f0de1bed7fb277d | refs/heads/master | 2020-08-13T07:14:22.006472 | 2019-10-11T15:31:02 | 2019-10-11T15:58:48 | 214,930,515 | 1 | 0 | MIT | 2019-10-14T02:27:14 | 2019-10-14T02:27:13 | null | UTF-8 | Python | false | false | 631 | py | # Generated by Django 2.1.7 on 2019-02-22 01:57
from django.db import migrations, models
from ..defaults import SECTION_TEMPLATES
class Migration(migrations.Migration):
dependencies = [("section", "0001_initial")]
operations = [
migrations.AddField(
model_name="section",
name="template",
field=models.CharField(
choices=SECTION_TEMPLATES,
default=SECTION_TEMPLATES[0][0],
help_text="Optional template for custom look.",
max_length=150,
verbose_name="Template",
),
)
]
| [
"sveetch@gmail.com"
] | sveetch@gmail.com |
9678cb52350193a1095fe3ab30afa8b5e511b73e | 04850099bbe9f7548793eb8fd6376fbdb2f07670 | /ecg/from_MCP3008.py | 109a60e19d49ff1c895139308ad2aa3b6abbceeb | [] | no_license | sam-luby/ECG-Pi | f92f48312347c2b0954cd3bc9b959c5363ebd92d | c0e3d520f15e6e2acd2fb3943c00800efaf1ff5d | refs/heads/master | 2022-07-21T19:07:08.341152 | 2019-03-05T14:31:54 | 2019-03-05T14:31:54 | 122,356,393 | 2 | 1 | null | 2022-06-21T23:29:09 | 2018-02-21T15:47:15 | Python | UTF-8 | Python | false | false | 1,146 | py | import time
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
import pandas as pd
#run pip3 install Adafruit-GPIO & pip3 install Adafruit-MCP3008 if packages not found
# retrieve data from ADC using SPI interface
def get_data_from_MCP(T, filename):
fs = 250
i = 0
Nsamp = T*fs
milestones = []
SPI_PORT = 0
SPI_DEVICE = 0
for x in range(1, 11): # List to store Nsamp/multiples of 10 for %age calc
milestones.append(int(x * (Nsamp / 10)))
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
values = []
percentage = 10
print('Reading MCP3008 values, press Ctrl-C to quit...')
while True and i < Nsamp:
value = mcp.read_adc(0)
values.append(value)
i+=1
if i in milestones: # percentage completed indication for user
print("Collecting data, {}% complete.".format(percentage))
percentage += 10
time.sleep(0.004) # not the best
print(len(values))
print("Recording complete, processing ECG data...")
dat = pd.DataFrame(values)
dat.to_csv(filename, index=False)
return Nsamp
| [
"samwluby@gmail.com"
] | samwluby@gmail.com |
270c15670e030d0104c5c652e4fe7cb418d3d976 | d659810b24ebc6ae29a4d7fbb3b82294c860633a | /aliyun-python-sdk-mse/aliyunsdkmse/request/v20190531/GetOverviewRequest.py | 45fcc3ddbf67d6dc972d5537367c6ddd7257cc6e | [
"Apache-2.0"
] | permissive | leafcoder/aliyun-openapi-python-sdk | 3dd874e620715173b6ccf7c34646d5cb8268da45 | 26b441ab37a5cda804de475fd5284bab699443f1 | refs/heads/master | 2023-07-31T23:22:35.642837 | 2021-09-17T07:49:51 | 2021-09-17T07:49:51 | 407,727,896 | 0 | 0 | NOASSERTION | 2021-09-18T01:56:10 | 2021-09-18T01:56:09 | null | UTF-8 | Python | false | false | 1,556 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class GetOverviewRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'GetOverview','mse')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_Region(self):
return self.get_query_params().get('Region')
def set_Region(self,Region):
self.add_query_param('Region',Region) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
487d70bc23e229a453f023037c3ecd226010880b | ea08a577ce67633a4663890df93690231e9e5736 | /N-body simulation.py | dcbd1d5d4a507ba5391a3dab45afb81109981787 | [] | no_license | tjredfern/N-body-numerical-simulation | 91f8bccadc318ed96526adc117d34ecdb650e420 | f85e4cc51730195ad70ef1bccad72b8fb6be7f13 | refs/heads/master | 2022-11-27T16:08:33.235051 | 2020-07-31T13:05:00 | 2020-07-31T13:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,686 | py | #==========================================================================================================================================
# Script written in Python to integrate the equations of motion of N particles interacting with each other gravitationally with high precision.
# The script computes the equations of motion and use scipy.integrate to integrate them.
# Then it uses matplotlib to visualize the solution
#==========================================================================================================================================
import numpy as np
import sympy as sp
# Define a Vector2D class
class Vec2:
def __init__(self, x, y):
self.x = x
self.y = y
# Used for debugging. This method is called when you print an instance
def __str__(self):
return f"({self.x}, {self.y})"
def __add__(self, v):
return Vec2(self.x + v.x, self.y + v.y)
def __radd__(self, v):
return Vec2(self.x + v.x, self.y + v.y)
def __sub__(self, v):
return Vec2(self.x - v.x, self.y - v.y)
def __rsub__(self, v):
return Vec2(v.x- self.x , v.y - self.y)
def __mul__(self, n):
return Vec2(self.x * n, self.y * n)
def __rmul__(self, n):
return Vec2(self.x * n, self.y * n)
def dot(self, v):
return self.x*v.x + self.y*v.y
def get_length(self):
return np.sqrt(self.dot(self) )
# Define a Particle class. The particles are the bodies attracting each other
class Particle():
# n = number of particles
n = 0
def __init__(self,initial_pos,initial_vel, mass):
# i = particle index
self.i = Particle.n
Particle.n += 1
self.m = mass
self.G = 1 # change this to 6.67408 × 1e-11 if you want real world measuring units.
# pos, vel, acc = symbolic variables
self.pos = Vec2(sp.symbols("x_"+str(self.i)),sp.symbols("y_"+str(self.i)))
self.vel = Vec2(sp.symbols("vx_"+str(self.i)),sp.symbols("vy_"+str(self.i)))
self.acc = Vec2(0,0)
# lamb_vel, lamd_acc = lambdify functions.
self.lamb_vel = Vec2(None,None)
self.lamd_acc = Vec2(None,None)
# initial_pos, initial_vel = intial position and velocity
self.initial_pos = initial_pos
self.initial_vel = initial_vel
# vf_vel, vf_acc = functions used in vectorfield() function
self.vf_vel = Vec2(0,0)
self.vf_acc = Vec2(0,0)
# sol_pos, sol_vel = position and velocity solution list obtained after the integration of the equations of motion
self.sol_pos = Vec2(None,None)
self.sol_vel = Vec2(None,None)
# compute particle acceleration using Newton's law of universal gravitation
def calculate_acc(self,particles):
for j in range(len(particles)):
if self.i !=j:
self.acc += (particles[j].pos - self.pos)*particles[j].m*self.G*(1/(((self.pos.x-particles[j].pos.x)**2 + (self.pos.y-particles[j].pos.y)**2)**(3/2)))
# lambdified symbolic functions are faster for numerical calculations.
# I used this approach (compute first symbolic equations of motion and then compile the function with lambdify)
# to avoid python loops in the vectorfield function which needs to be run thousands of times and that is slow.
def lambdify_vel(self,particles):
self.lamb_vel.x = sp.lambdify(self.vel.x, self.vel.x)
self.lamb_vel.y = sp.lambdify(self.vel.y, self.vel.y)
def lambdify_acc(self,particles):
var = []
for j in range(len(particles)):
var.append(particles[j].pos.x)
var.append(particles[j].pos.y)
self.lamd_acc.x = sp.lambdify([var], self.acc.x)
self.lamd_acc.y = sp.lambdify([var], self.acc.y)
#Input here the initial conditions of the particles and their masses
################################################################################################################################
#particle list
par = []
#create the particles
par.append(Particle(initial_pos = Vec2(2,5), initial_vel = Vec2(0.5,0.5) , mass = 1.))
par.append(Particle(initial_pos = Vec2(5,2), initial_vel = Vec2(0.5,0.2) , mass = 1.))
par.append(Particle(initial_pos = Vec2(3,3), initial_vel = Vec2(0.1,0.5) , mass = 1.))
par.append(Particle(initial_pos = Vec2(0.6,2.5), initial_vel = Vec2(0.5,0.5) , mass = 1.))
# Simulation time and number of steps
t_end = 60.0
steps = 800
################################################################################################################################
n = len(par)
#create the functions to integrate
for i in range(n):
par[i].calculate_acc(par)
for i in range(n):
par[i].lambdify_vel(par)
par[i].lambdify_acc(par)
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
def vectorfield(var, t):
'''
integrate function
the function calculates f, a list with all differential equations of motion in the order
diff(x0), diff(y0), diff(x1), diff(y1)...diff(xn-1), diff(yn-1), diff(vx0), diff(vy0)...diff(vxn-1), diff(vyn-1)
it can be optimized, but it's done to be readable
'''
pos = var[0:2*n]
vel = var[2*n:4*n]
f = []
for i in range(0,n):
par[i].vf_vel.x = par[i].lamb_vel.x(vel[2*i])
par[i].vf_vel.y = par[i].lamb_vel.y(vel[2*i + 1])
f.append(par[i].vf_vel.x)
f.append(par[i].vf_vel.y)
for i in range(0,n):
par[i].vf_acc.x = par[i].lamd_acc.x(pos)
par[i].vf_acc.y = par[i].lamd_acc.y(pos)
f.append(par[i].vf_acc.x)
f.append(par[i].vf_acc.y)
return f
from scipy.integrate import odeint
# set the initial conditions
var = []
for i in range(len(par)):
var.append(par[i].initial_pos.x)
var.append(par[i].initial_pos.y)
for i in range(len(par)):
var.append(par[i].initial_vel.x)
var.append(par[i].initial_vel.y)
# ODE solver parameters
t = np.linspace(0,t_end,steps+1)
sol = odeint(vectorfield, var, t)
sol = np.transpose(sol)
# order the solution for clarity
for i in range(n):
par[i].sol_pos.x = sol[2*i]
par[i].sol_pos.y = sol[2*i+1]
for i in range(n):
par[i].sol_vel.x = sol[2*n + 2*i]
par[i].sol_vel.y = sol[2*n + 2*i+1]
# Calculate the total Energy of the system. The energy should be constant.
# Potential Energy
Energy = 0
for i in range(0,n):
for j in range(i+1,n):
Energy += (-1/(((par[i].sol_pos.x-par[j].sol_pos.x)**2 + (par[i].sol_pos.y-par[j].sol_pos.y)**2)**(1/2)))
# Kinetic Energy
for i in range(0,n):
Energy += 0.5*(par[i].sol_vel.x*par[i].sol_vel.x + par[i].sol_vel.y*par[i].sol_vel.y)
# Visualization of the solution with matplotlib. It uses a slider to change the time
################################################################################################################################
plt.style.use('dark_background')
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(1,1,1)
plt.subplots_adjust(bottom=0.2,left=0.15)
ax.axis('equal')
ax.axis([-1, 30, -1, 30])
ax.set_title('Energy =' + str(Energy[0]))
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
circle = [None]*n
line = [None]*n
for i in range(n):
circle[i] = plt.Circle((par[i].sol_pos.x[0], par[i].sol_pos.y[0]), 0.08, ec="w", lw=2.5, zorder=20)
ax.add_patch(circle[i])
line[i] = ax.plot(par[i].sol_pos.x[:0],par[i].sol_pos.y[:0])[0]
from matplotlib.widgets import Slider
slider_ax = plt.axes([0.1, 0.05, 0.8, 0.05])
slider = Slider(slider_ax, # the axes object containing the slider
't', # the name of the slider parameter
0, # minimal value of the parameter
t_end, # maximal value of the parameter
valinit=0, # initial value of the parameter
color = '#5c05ff'
)
def update(time):
i = int(np.rint(time*steps/t_end))
ax.set_title('Energy =' + str(Energy[i]))
for j in range(n):
circle[j].center = par[j].sol_pos.x[i], par[j].sol_pos.y[i]
line[j].set_xdata(par[j].sol_pos.x[:i+1])
line[j].set_ydata(par[j].sol_pos.y[:i+1])
slider.on_changed(update)
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
012d51acc6c656aee5030104ef582244d8f617f2 | 93d5f864f4e24892c3df665ffd32d3a39faf644f | /Tareas/T01/menu.py | 64fcb6287472f3cd7cb197f8a28a03bf4b83b9c4 | [] | no_license | Benjaescobar/programacion_avanzada | ecf177b6ddf0a250682c7952580b0786197ce0a8 | c0664208b13a9c9216a9361e75560ebd8cf92f45 | refs/heads/master | 2023-05-30T15:33:00.249358 | 2021-05-25T21:24:18 | 2021-05-25T21:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,656 | py |
from parametros import INICIADOR_LOOP, DIAS_COMPETENCIA
from cargar_datos import cargar_delegaciones
from campeonato import Campeonato
from delegaciones import IEEEsparta, DCCrotona
from funciones import ingresar_input, crear_archivo, seleccionar_jugador_lesionado
##### Funciones utiles #####
def ingresar_apodo():
# Chequea he ingresa cualquier apodo ingresado
while INICIADOR_LOOP:
nombre_entrenador = input()
if nombre_entrenador.isalnum():
return nombre_entrenador
else:
print("Nombre invalido :(, ingrese uno con caracteres alfanumericos")
continue
###############################
########## M E N U S ##########
###############################
def menu_entrenador(entrenador, torneo):
# M E N U E N T R E N A D O R
# ENTRENADOR ES EN REALIDAD UNA INSTANCIA
# DE LA DELEGACION CORRESPONDIENTE AL USUARIO
while INICIADOR_LOOP:
print("[0] Fichar deportista")
print("[1] Entrenar deportista")
print("[2] Sanar deportista")
print("[3] Comprar tecnologia")
print("[4] Usar habiliidad especial")
print("[5] Volver al menu anterior")
print("[6] Salir del programa")
comando = ingresar_input(["0","1","2","3","4","5","6"])
if comando == "0":
entrenador.fichar_deportistas(torneo)
elif comando == "1":
entrenador.entrenar_deportistas(torneo)
elif comando == "2":
torneo.entrenador.sanar_lesiones(torneo)
elif comando == "3":
entrenador.comprar_tecnologia(torneo)
elif comando == "4":
entrenador.habilidad_especial(torneo)
elif comando == "5":
return True
else:
return False
##### COMPETENCIA #####
def menu_competencia(torneo):
pass
##### MENU PRINCIPAL #####
def menu_principal(entrenador, torneo):
# Esta variable el proposito de iniciar los while de cada menu.
while INICIADOR_LOOP:
print(torneo.dia_actual)
print(DIAS_COMPETENCIA)
if torneo.dia_actual >= DIAS_COMPETENCIA:
print("\nSE ACABO LA COMPETENCIA! -O-\n")
with open("resultados.txt", "r") as resultados:
for row in resultados:
print(row)
# torneo.mostrar_estado()
if torneo.medallero["IEEEsparta"] > torneo.medallero["DCCrotona"]:
print("\nFelicitaciones a", torneo.entrenador.delegacion, "por su victoria\n")
elif torneo.medallero["IEEEsparta"] < torneo.medallero["DCCrotona"]:
print("\nFelicitaciones a", torneo.rival.delegacion, "por su victoria\n")
else:
print("NO GANO NADIEEEEEEE\n")
return
if torneo.dia_actual % 2 != 0:
imprimir = "y te toca entrenar, aprovecha de hacer mejoras utiles\n"
print("\nEstas en el dia",torneo.dia_actual,imprimir)
print("[0] Menu entrenador")
print("[1] Simular competencias")
print("[2] Mostrar estado competencia")
print("[3] Salir del programa")
# ingresar_input es una funcion que corrobora que el comando ingresado es valido
comando = ingresar_input(["0","1","2","3"])
if comando == "0":
menu = menu_entrenador(entrenador,torneo)
# menu_entrenador retorna False para salir del programa
if menu:
continue
else:
print("Adios")
return
elif comando == "1":
# ACA VA LA COMPETENCIA
torneo.dia_actual += 1
torneo.competencias()
torneo.dia_actual +=1
pass
elif comando == "2":
# estado competencia
torneo.mostrar_estado()
else:
return
##### MENU INICIADOR JUEGO #####
def menu_inicio():
while INICIADOR_LOOP:
print("[0] Iniciar juego\n[1] Salir del programa")
comando = ingresar_input(["0","1"])
if comando == "0":
crear_archivo()
# SE INICIA EL JUEGO
print("Ingrese su nombre:")
nombre_entrenador = ingresar_apodo()
print("Ingrese el nombre de su rival:")
nombre_rival = ingresar_apodo()
print("Escoja una delegacion")
print("[0] IEEEsparta\n[1] DCCrotona")
comando = ingresar_input(["0","1"])
if comando == "0":
# delegaciones retorna un diccionario
datos_esparta = cargar_delegaciones()["IEEEsparta"]
datos_crotona = cargar_delegaciones()["DCCrotona"]
entrenador = IEEEsparta(nombre_entrenador,datos_esparta[0],datos_esparta[1],datos_esparta[2],datos_esparta[3])
rival = DCCrotona(nombre_rival,datos_crotona[0],datos_crotona[1],datos_crotona[2],datos_crotona[3])
torneo = Campeonato(entrenador,rival)
menu_principal(entrenador,torneo)
elif comando == "1":
datos_esparta = cargar_delegaciones()["IEEEsparta"]
datos_crotona = cargar_delegaciones()["DCCrotona"]
rival = IEEEsparta(nombre_rival, datos_esparta[0], datos_esparta[1], datos_esparta[2], datos_esparta[3])
entrenador = DCCrotona(nombre_entrenador, datos_crotona[0], datos_crotona[1], datos_crotona[2], datos_crotona[3])
torneo = Campeonato(entrenador,rival)
menu_principal(entrenador,torneo)
else:
return
| [
"benja.escobar.b@gmail.com"
] | benja.escobar.b@gmail.com |
2f4d3ed5aeb0dd33c30eeb4314e8635d304e8da0 | 307d3dea0a67bc779b65839fad410536d8ee636e | /data/formatted_data.py | ebc47895870b77670cc4a9e2c2c02876a690fafd | [] | no_license | PatelMohneesh/Network-Visualization-with-SigmaJS | c179a7cd321457d60be6b62fd9de3d810c6bb47e | 3b62025903837454d14093b7a4be76179b522ccf | refs/heads/master | 2021-09-24T10:50:48.065943 | 2018-10-08T16:08:32 | 2018-10-08T16:08:32 | 100,344,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 14:46:24 2017
@author: Mohneesh
"""
import json
import os
#Considering the same format of the data as provided by "Benjie", network_temp.json
filename = 'network_temp.json'
with open(filename, 'r') as f:
data = json.load(f)
# data = original data we revieve from the Rest API's
#print (data)
edge_data = data['edges']
node_data = data['nodes']
#Modifying Nodes Data
def ModNodesData(data):
for mydict in data:
mydict['type'] = mydict['label'] #Grapg_db 'lable' = 'type' for UI
mydict['label'] = mydict['id']
return mydict
#NO Need to Modify Edge Data as it's in the required format form Graphdb Rest API's
#Extracting Nodes out of Edges from Edge File
Mod_Node = ModNodesData(node_data)
print(edge_data)
node_list = []
for mydict in edge_data:
print (mydict)
#nodes = {}
#for i in mydict:
#nodes['id'] = i['source']
#nodes['label'] = i['source']
#print (nodes)
| [
"mp3542@columbia.edu"
] | mp3542@columbia.edu |
60b20aa9d3b0bab7d87c6610f60a4be9b8b08c52 | b25fe0d0e401ef3a0ba52751326a427d575ce2bc | /GALAXY_wrapper/run_enrichment.py | 2a793bddc019621885382c8f2c38b1997ab326df | [
"LicenseRef-scancode-biopython"
] | permissive | szymczakpau/biopython | 9fd8385396073d43f4758c60f45aace9aa032e17 | 6f997cd1ea7daf89f0b70854401da4cde35d6a00 | refs/heads/master | 2021-01-26T04:40:40.853772 | 2020-02-26T16:36:28 | 2020-02-26T16:36:28 | 243,308,366 | 0 | 0 | NOASSERTION | 2020-02-26T16:20:02 | 2020-02-26T16:20:00 | null | UTF-8 | Python | false | false | 5,250 | py | #!/usr/bin/python
import sys
import os
import math
import argparse
import Bio.Ontology
import Bio.Ontology.IO as OntoIO
def read_list(filename):
out = []
with open(filename, 'r') as file_in:
line = file_in.readline() #header
if not (line[0] == '!' or line[0] == '#'): file_in.seek(0)
for line in file_in:
content = line.strip().split('\t')
if len(content) <= 1:
if content[0] != "":
if len(content[0].split('_')) < 2:
out.append( content[0])
else:
out.append( "_".join(content[0].split('_')[1:-1]))
elif content[1] == '1':
if len(content[0].split('_')) < 2:
out.append( content[0])
else:
out.append( "_".join(content[0].split('_')[1:-1]))
elif content[1] != '0':
raise Exception("Invalid values in list of genes: second column includes %s instead of 0 or 1"%content[1])
return out
def run_term(assocs, go_graph, gene_list, corrections):
from Bio.Ontology import TermForTermEnrichmentFinder
ef = TermForTermEnrichmentFinder(assocs, go_graph)
result = ef.find_enrichment(gene_list, corrections)
return result
def run_parent_child(assocs, go_graph, gene_list, corrections, method):
from Bio.Ontology import ParentChildEnrichmentFinder
ef = ParentChildEnrichmentFinder(assocs, go_graph)
result = ef.find_enrichment(gene_list, corrections, method)
return result
def check_file(parser, arg, openparam):
if openparam == 'r':
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
try:
f=open(arg, openparam)
f.close()
except:
parser.error("Cannot create file %s" % arg)
def main():
main_parser = argparse.ArgumentParser(description='run Gene Ontology')
subparsers = main_parser.add_subparsers(dest='which', help='type of enrichment analysis')
subparsers.required = True
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('required named arguments')
required.add_argument('-o', '--out', type=str, required=True, nargs = '+',
help='output file')
required.add_argument('-i', '--inp', type=str, required=True,
help='input gene list file')
required.add_argument('-a', '--assoc', type=str, required=True,
help='input associations file (.gaf)')
required.add_argument('-g', '--gograph', type=str, required=True,
help='input GO graph file (.obo)')
parser.add_argument('-f', '--outputformat', choices=["html","txt", "gml", "png", "tabular"], nargs = "+",
help='output file format', default = ["html"])
parser.add_argument('-c', '--corrections', choices=["bonferroni","bh_fdr", "bonferroni,bh_fdr", "bh_fdr,bonferroni"],
help='multiple hypothesis testing corrections', nargs='+', default=[])
parser1 = subparsers.add_parser("term-for-term", parents=[parser])
parser2 = subparsers.add_parser("parent-child", parents=[parser])
#Parent-child
parser2.add_argument('-m', '--method', choices=["union", "intersection"],
help='method used to compute probabilities', default = "union")
#validate args
if len(sys.argv) < 2:
main_parser.print_usage()
sys.exit(1)
args = main_parser.parse_args()
if len(args.out) != len(args.outputformat):
main_parser.error("Number of output files doesn't match numer of formats!")
check_file(main_parser, args.inp, 'r')
check_file(main_parser, args.assoc, 'r')
check_file(main_parser, args.gograph, 'r')
for f in args.out:
check_file(main_parser, f, 'w+')
cors = []
for cor in args.corrections:
if "," in cor:
cors += cor.split(",")
else:
cors.append(cor)
args.corrections = list(set(cors))
#Read inputs
gene_list = read_list(args.inp)
go_graph = OntoIO.read(args.gograph, "obo")
assocs = OntoIO.read(args.assoc, "gaf", assoc_format = "in_mem_sql")
result=None
if args.which == "term-for-term":
result = run_term(assocs, go_graph, gene_list, args.corrections)
elif args.which == "parent-child":
result = run_parent_child(assocs, go_graph, gene_list, args.corrections, args.method)
else:
parser.error("Method unimplemented!")
assert result!= None, "An error occured while computing result"
print result
for outfilename, outputformat in zip(args.out, args.outputformat):
with open(outfilename, 'w+') as outfile:
if outputformat == 'html':
OntoIO.pretty_print(result, go_graph, outfile, outputformat, go_to_url="http://amigo.geneontology.org/amigo/term/")
else:
OntoIO.pretty_print(result, go_graph, outfile, outputformat)
if __name__ == "__main__":
main()
| [
"julia.hermanizycka@gmail.com"
] | julia.hermanizycka@gmail.com |
a75471f9aa157b4f6ec54a2ee0b120c12d7fd17d | a10d048b1780adc0f8da05d9d1e851a529807279 | /WorkWithFile/main.py | b586d488029a6e6be5711bac9693f2bb6f88816f | [] | no_license | jiroblea/Self-Taught-Programmer-Book | c198fb2640a89d97808d66c98d4b0d21c048e627 | 35c85b2e804e767e1ffd2bdb1c40e65cfcee1e2a | refs/heads/master | 2023-08-18T13:40:03.408870 | 2021-10-24T08:42:50 | 2021-10-24T08:42:50 | 371,001,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | import utilities as util
import csv
name, age = util.biographies()
fields = ["Names", "Age"]
rows = [name, age]
filename = input("File to be called: ")
filename = filename + ".csv"
with open(filename, "w") as csvfile:
filewriter = csv.writer(csvfile, delimiter = ",")
filewriter.writerow(fields)
filewriter.writerow(rows)
print(f"Name: {name} \nAge: {age}")
print(filename)
print("Done")
# # importing the csv module
# import csv
# import os
# # field names
# fields = ['Name', 'Branch', 'Year', 'CGPA']
# # data rows of csv file
# rows = [['Nikhil', 'COE', '2', '9.0'],
# ['Sanchit', 'COE', '2', '9.1'],
# ['Aditya', 'IT', '2', '9.3'],
# ['Sagar', 'SE', '1', '9.5'],
# ['Prateek', 'MCE', '3', '7.8'],
# ['Sahil', 'EP', '2', '9.1']]
# # name of csv file
# filename = "university.csv"
# # writing to csv file
# with open(filename, 'w') as csvfile:
# # creating a csv writer object
# csvwriter = csv.writer(csvfile)
# # writing the fields
# csvwriter.writerow(fields)
# # writing the data rows
# csvwriter.writerows(rows)
| [
"jirooblea@gmail.com"
] | jirooblea@gmail.com |
1b0b4bc4e5b5b0bc77020ca601dd1f1dabbccc3a | 23e74e0d5bd42de514544917f7b33206e5acf84a | /alumnos/58003-Martin-Ruggeri/copia.py | eb8dc2774c2bf67e0fdfa336f443d85570aba882 | [] | no_license | Martin-Ruggeri-Bio/lab | 2e19015dae657bb9c9e86c55d8355a04db8f5804 | 9a1c1d8f99c90c28c3be62670a368838aa06988f | refs/heads/main | 2023-08-01T07:26:42.015115 | 2021-09-20T20:21:31 | 2021-09-20T20:21:31 | 350,102,381 | 0 | 0 | null | 2021-03-21T19:48:58 | 2021-03-21T19:48:58 | null | UTF-8 | Python | false | false | 279 | py | #!/bin/python3
def copiaArchivos():
archi_org = open(input("ingrese archivo de origen:\n"), "r")
archi_des = open(input("ingrese archivo de destino:\n"), "w")
with archi_org:
archi_des.write(archi_org.read())
if __name__ == '__main__':
copiaArchivos()
| [
"martinruggeri18@gmail.com"
] | martinruggeri18@gmail.com |
281890c279ba18dc825d3b09570b691ceeaf4bc2 | 73b793758d0db27e4d67e6effbda40f5b550a9f4 | /clientes/comandos.py | a8025e49c2a97a46d2dbc01c81ced6a1bec07659 | [] | no_license | RubenMaier/python_crud_ventas | a696fb52ec4a2ac37e5983c211fc8ff9bc4fee00 | c95af4646bb8173c41e79118a6f53919308a0898 | refs/heads/master | 2020-08-12T06:38:02.986974 | 2019-12-29T13:39:30 | 2019-12-29T13:39:30 | 214,707,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,536 | py | import click
# modificamos los comandos dentro del grupo clientes
# definimos nuestros comandos básicos
from clientes.servicios import ServiciosClientes
from clientes.modelo import Cliente
@click.group() # con esto los convertimos en comandos de click
def clientes():
"""Administrador de ciclo de vida de clientes"""
pass
@clientes.command()
@click.option(
'-n', # abreviación
'--nombre', # nombre completo del comando
type=str, # tipo de dato de entrada
prompt=True, # si no viene el nombre incluido, la consola se lo pide
help='El nombre del cliente') # mensaje de ayuda
@click.option(
'-e',
'--empresa',
type=str,
prompt=True,
help='La empresa del cliente')
@click.option(
'-em',
'--email',
type=str,
prompt=True,
help='El email del cliente')
@click.option(
'-r',
'--rol',
type=str,
prompt=True,
help='El rol del cliente')
@click.pass_context
def crear(contexto, nombre, empresa, email, rol):
""" Crea un nuevo cliente """
cliente = Cliente(nombre, empresa, email, rol)
servicios_cliente = ServiciosClientes(contexto.obj['tabla_clientes'])
servicios_cliente.crear_cliente(cliente)
@clientes.command()
@click.pass_context
def listar(contexto):
"""Lista todo los clientes"""
servicios_cliente = ServiciosClientes(contexto.obj['tabla_clientes'])
lista_clientes = servicios_cliente.listar_clientes()
# para imprimir algo en la consola hacemos uso de click.echo y no de print porque la forma en que funciona...
# la libreria click en los distintos SO varia, y asi aseguramos mostrar todo bajo un mismo formato
click.echo(' ID | NOMBRE | EMPRESA | EMAIL | ROL')
click.echo('*' * 100)
for cliente in lista_clientes:
click.echo('{uid} | {nombre} | {empresa} | {email} | {rol}'.format(
uid=cliente['uid'],
nombre=cliente['nombre'],
empresa=cliente['empresa'],
email=cliente['email'],
rol=cliente['rol']))
@clientes.command()
@click.argument(
'cliente_id',
type=str)
@click.pass_context
def actualizar(contexto, cliente_id):
"""Actualiza el cliente"""
servicio_cliente = ServiciosClientes(contexto.obj['tabla_clientes'])
cliente = _buscar_cliente_por_id(servicio_cliente, cliente_id)
if cliente != None: # si la lista no es vacía entonces...
# creamos un flujo de actualización
# desempaqueto al primer elemento de la lista que es el cliente que quiero actualizar
# debo instanciar al cliente en su clase Clientes por lo que le pasamos la referencia como: **cliente[0]
cliente_actualizado = _flujo_de_cliente_actualizado(
_diccionario_a_objeto(cliente))
servicio_cliente.actualizar_cliente(cliente_actualizado)
click.echo('El cliente fue actualizado')
else:
click.echo('El cliente no fue encontrado')
@clientes.command()
@click.argument(
'cliente_id',
type=str)
@click.pass_context
def eliminar(contexto, cliente_id):
"""Elimina el cliente"""
servicio_cliente = ServiciosClientes(contexto.obj['tabla_clientes'])
cliente = _buscar_cliente_por_id(servicio_cliente, cliente_id)
print(cliente)
if cliente != None: # si la lista no es vacía entonces...
servicio_cliente.borrar_cliente(_diccionario_a_objeto(cliente))
click.echo('El cliente fue eliminado')
else:
click.echo('El cliente no fue encontrado')
def _buscar_cliente_por_id(servicio_cliente, cliente_id):
lista_clientes = servicio_cliente.listar_clientes()
# queremos al cliente de todos los clientes que se encuentren en la lista de clientes...
# que cumpla con la condición de que su id es la que nos pasaron por parametro
cliente = [
cliente for cliente in lista_clientes if cliente['uid'] == cliente_id]
if len(cliente) > 0:
return cliente[0]
return None
def _diccionario_a_objeto(cliente_dic):
return Cliente(**cliente_dic)
def _flujo_de_cliente_actualizado(cliente):
click.echo('Deja vacío si no quiere modificar el valor')
cliente.nombre = click.prompt(
'Nuevo nombre', type=str, default=cliente.nombre)
cliente.empresa = click.prompt(
'Nuevo empresa', type=str, default=cliente.empresa)
cliente.email = click.prompt(
'Nuevo email', type=str, default=cliente.email)
cliente.rol = click.prompt(
'Nuevo rol', type=str, default=cliente.rol)
return cliente
comandos_declarados = clientes
| [
"ruben@MacBook-Pro-2017-15-Inch.local"
] | ruben@MacBook-Pro-2017-15-Inch.local |
44f6e5b18d17003a26d61b4f72c2690406caa75e | c9307021a54fb97eb7e1a76b4aae23d9f5206c71 | /app/external/playhouse/postgres_ext.py | ce50ca9f9b6ab78f67c2aed472b3a0d3b488b098 | [] | no_license | fitzterra/sshKeyServer | 7b331ca4fc50f66371b36d86d6d2ba64ec991784 | 6146fe9f5b945288cd186a09d277bfe13171507a | refs/heads/master | 2021-01-19T14:06:27.539059 | 2017-06-08T12:36:29 | 2017-06-08T12:36:29 | 19,774,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,455 | py | """
Collection of postgres-specific extensions, currently including:
* Support for hstore, a key/value type storage
* Support for UUID field
"""
import uuid
from peewee import *
from peewee import Expression
from peewee import logger
from peewee import Node
from peewee import Param
from peewee import QueryCompiler
from peewee import SelectQuery
from psycopg2 import extensions
from psycopg2.extensions import adapt
from psycopg2.extensions import AsIs
from psycopg2.extensions import register_adapter
from psycopg2.extras import register_hstore
try:
from psycopg2.extras import Json
except:
Json = None
class _LookupNode(Node):
def __init__(self, node, parts):
self.node = node
self.parts = parts
super(_LookupNode, self).__init__()
def clone_base(self):
return type(self)(self.node, list(self.parts))
class JsonLookup(_LookupNode):
def __getitem__(self, value):
return JsonLookup(self.node, self.parts + [value])
class ObjectSlice(_LookupNode):
@classmethod
def create(cls, node, value):
if isinstance(value, slice):
parts = [value.start or 0, value.stop or 0]
elif isinstance(value, int):
parts = [value]
else:
parts = map(int, value.split(':'))
return cls(node, parts)
def __getitem__(self, value):
return ObjectSlice.create(self, value)
class _Array(Node):
def __init__(self, field, items):
self.field = field
self.items = items
super(_Array, self).__init__()
def adapt_array(arr):
conn = arr.field.model_class._meta.database.get_conn()
items = adapt(arr.items)
items.prepare(conn)
return AsIs('%s::%s%s' % (
items,
arr.field.get_column_type(),
'[]'* arr.field.dimensions))
register_adapter(_Array, adapt_array)
class IndexedField(Field):
def __init__(self, index_type='GiST', *args, **kwargs):
kwargs.setdefault('index', True) # By default, use an index.
super(IndexedField, self).__init__(*args, **kwargs)
self.index_type = index_type
class ArrayField(IndexedField):
def __init__(self, field_class=IntegerField, dimensions=1,
index_type='GIN', *args, **kwargs):
self.__field = field_class(*args, **kwargs)
self.dimensions = dimensions
self.db_field = self.__field.get_db_field()
super(ArrayField, self).__init__(
index_type=index_type, *args, **kwargs)
def __ddl_column__(self, column_type):
sql = self.__field.__ddl_column__(column_type)
sql.value += '[]' * self.dimensions
return sql
def __getitem__(self, value):
return ObjectSlice.create(self, value)
def contains(self, *items):
return Expression(self, OP_ACONTAINS, _Array(self, list(items)))
def contains_any(self, *items):
return Expression(self, OP_ACONTAINS_ANY, _Array(self, list(items)))
class DateTimeTZField(DateTimeField):
db_field = 'datetime_tz'
class HStoreField(IndexedField):
db_field = 'hash'
def __init__(self, *args, **kwargs):
super(HStoreField, self).__init__(*args, **kwargs)
def __getitem__(self, key):
return Expression(self, OP_HKEY, Param(key))
def keys(self):
return fn.akeys(self)
def values(self):
return fn.avals(self)
def items(self):
return fn.hstore_to_matrix(self)
def slice(self, *args):
return fn.slice(self, Param(list(args)))
def exists(self, key):
return fn.exist(self, key)
def defined(self, key):
return fn.defined(self, key)
def update(self, **data):
return Expression(self, OP_HUPDATE, data)
def delete(self, *keys):
return fn.delete(self, Param(list(keys)))
def contains(self, value):
if isinstance(value, dict):
return Expression(self, OP_HCONTAINS_DICT, Param(value))
elif isinstance(value, (list, tuple)):
return Expression(self, OP_HCONTAINS_KEYS, Param(value))
return Expression(self, OP_HCONTAINS_KEY, value)
def contains_any(self, *keys):
return Expression(self, OP_HCONTAINS_ANY_KEY, Param(value))
class JSONField(Field):
db_field = 'json'
def __init__(self, *args, **kwargs):
if Json is None:
raise Exception('Your version of psycopg2 does not support JSON.')
super(JSONField, self).__init__(*args, **kwargs)
def db_value(self, value):
return Json(value)
def __getitem__(self, value):
return JsonLookup(self, [value])
class UUIDField(Field):
db_field = 'uuid'
def db_value(self, value):
return str(value)
def python_value(self, value):
return uuid.UUID(value)
OP_HKEY = 'key'
OP_HUPDATE = 'H@>'
OP_HCONTAINS_DICT = 'H?&'
OP_HCONTAINS_KEYS = 'H?'
OP_HCONTAINS_KEY = 'H?|'
OP_HCONTAINS_ANY_KEY = 'H||'
OP_ACONTAINS = 'A@>'
OP_ACONTAINS_ANY = 'A||'
class PostgresqlExtCompiler(QueryCompiler):
def _create_index(self, model_class, fields, unique=False):
clause = super(PostgresqlExtCompiler, self)._create_index(
model_class, fields, unique)
# Allow fields to specify a type of index. HStore and Array fields
# may want to use GiST indexes, for example.
index_type = None
for field in fields:
if isinstance(field, IndexedField):
index_type = field.index_type
if index_type:
clause.nodes.insert(-1, SQL('USING %s' % index_type))
return clause
def _parse(self, node, alias_map, conv):
sql, params, unknown = super(PostgresqlExtCompiler, self)._parse(
node, alias_map, conv)
if unknown:
if isinstance(node, ObjectSlice):
unknown = False
sql, params = self.parse_node(node.node, alias_map, conv)
# Postgresql uses 1-based indexes.
parts = [str(part + 1) for part in node.parts]
sql = '%s[%s]' % (sql, ':'.join(parts))
if isinstance(node, JsonLookup):
unknown = False
sql, params = self.parse_node(node.node, alias_map, conv)
lookups = [sql]
for part in node.parts:
part_sql, part_params = self.parse_node(
part, alias_map, conv)
lookups.append(part_sql)
params.extend(part_params)
# The last lookup should be converted to text.
head, tail = lookups[:-1], lookups[-1]
sql = '->>'.join(('->'.join(head), tail))
return sql, params, unknown
class PostgresqlExtDatabase(PostgresqlDatabase):
compiler_class = PostgresqlExtCompiler
def __init__(self, *args, **kwargs):
self.server_side_cursors = kwargs.pop('server_side_cursors', False)
super(PostgresqlExtDatabase, self).__init__(*args, **kwargs)
def get_cursor(self, name=None):
return self.get_conn().cursor(name=name)
def execute_sql(self, sql, params=None, require_commit=True,
named_cursor=False):
logger.debug((sql, params))
use_named_cursor = (named_cursor or (
self.server_side_cursors and
sql.lower().startswith('select')))
with self.exception_wrapper():
if use_named_cursor:
cursor = self.get_cursor(name=str(uuid.uuid1()))
require_commit = False
else:
cursor = self.get_cursor()
try:
res = cursor.execute(sql, params or ())
except Exception as exc:
logger.exception('%s %s', sql, params)
if self.sql_error_handler(exc, sql, params, require_commit):
raise
else:
if require_commit and self.get_autocommit():
self.commit()
return cursor
def _connect(self, database, **kwargs):
conn = super(PostgresqlExtDatabase, self)._connect(database, **kwargs)
register_hstore(conn, globally=True)
return conn
class ServerSideSelectQuery(SelectQuery):
@classmethod
def clone_from_query(cls, query):
clone = ServerSideSelectQuery(query.model_class)
return query._clone_attributes(clone)
def _execute(self):
sql, params = self.sql()
return self.database.execute_sql(
sql, params, require_commit=False, named_cursor=True)
PostgresqlExtDatabase.register_fields({
'datetime_tz': 'timestamp with time zone',
'hash': 'hstore',
'json': 'json',
'uuid': 'uuid',
})
PostgresqlExtDatabase.register_ops({
OP_HCONTAINS_DICT: '@>',
OP_HCONTAINS_KEYS: '?&',
OP_HCONTAINS_KEY: '?',
OP_HCONTAINS_ANY_KEY: '?|',
OP_HKEY: '->',
OP_HUPDATE: '||',
OP_ACONTAINS: '@>',
OP_ACONTAINS_ANY: '&&',
})
def ServerSide(select_query):
# Flag query for execution using server-side cursors.
clone = ServerSideSelectQuery.clone_from_query(select_query)
with clone.database.transaction():
# Execute the query.
query_result = clone.execute()
# Patch QueryResultWrapper onto original query.
select_query._qr = query_result
# Expose generator for iterating over query.
for obj in query_result.iterator():
yield obj
| [
"github_subs@icave.net"
] | github_subs@icave.net |
74b4ed23694523deb7002963f183afb60094dad0 | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /decoding/GAD/fairseq/modules/scalar_bias.py | c96247c75914fabb8a2b7ff731bb82b588f72690 | [
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 888 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
class ScalarBias(torch.autograd.Function):
"""
Adds a vector of scalars, used in self-attention mechanism to allow
the model to optionally attend to this vector instead of the past
"""
@staticmethod
def forward(ctx, input, dim, bias_init):
size = list(input.size())
size[dim] += 1
output = input.new(*size).fill_(bias_init)
output.narrow(dim, 1, size[dim] - 1).copy_(input)
ctx.dim = dim
return output
@staticmethod
def backward(ctx, grad):
return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None
def scalar_bias(input, dim, bias_init=0):
return ScalarBias.apply(input, dim, bias_init)
| [
"tage@microsoft.com"
] | tage@microsoft.com |
d6758b1214e18affacc304004dfb23d732194dc0 | 07cf86733b110a13224ef91e94ea5862a8f5d0d5 | /taum_and_bday/taum_and_bday.py | 2dca00a8c687bce30c4615338d881eba6f673268 | [] | no_license | karsevar/Code_Challenge_Practice | 2d96964ed2601b3beb324d08dd3692c3d566b223 | 88d4587041a76cfd539c0698771420974ffaf60b | refs/heads/master | 2023-01-23T17:20:33.967020 | 2020-12-14T18:29:49 | 2020-12-14T18:29:49 | 261,813,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | def taumBday(b, w, bc, wc, z):
# Write your code here
# create a variable named black_cost
# create a variable named white_cost
# check if bc + z is less than wc:
# if so overwrite white_cost with b * (bc + z)
# overwrite black_cost with b * (bc)
# elif wc + z is less than bc:
# if so overwrite black_cost with w * (wc + z)
# overwrite white_cost with w * (wc)
# else
# overwrite black_cost with b * (bc + z)
# overwrite white_cost with w * (wc + z)
black_cost = 0
white_cost = 0
if (bc + z) < wc:
white_cost = w * (bc + z)
black_cost = b * bc
elif (wc + z) < bc:
white_cost = w * wc
black_cost = b * (wc + z)
else:
white_cost = w * wc
black_cost = b * bc
return white_cost + black_cost | [
"masonkarsevar@gmail.com"
] | masonkarsevar@gmail.com |
2fd7e86a0345548fe89a360c898f938f9227bdb2 | 5b38dd549d29322ae07ad0cc68a28761989ef93a | /cc_lib/_util/_logger.py | fc66aac68804a599831a0405e5eaf400e78fd1cb | [
"Apache-2.0"
] | permissive | SENERGY-Platform/client-connector-lib | d54ea800807892600cf08d3b2a4f00e8340ab69c | e365fc4bed949e84cde81fd4b5268bb8d4f53c12 | refs/heads/master | 2022-09-03T00:03:29.656511 | 2022-08-24T11:18:22 | 2022-08-24T11:18:22 | 159,316,125 | 1 | 2 | Apache-2.0 | 2020-05-27T07:47:14 | 2018-11-27T10:15:38 | Python | UTF-8 | Python | false | false | 784 | py | """
Copyright 2019 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ('get_logger',)
import logging
logger = logging.getLogger('connector')
logger.propagate = False
def get_logger(name: str) -> logging.Logger:
return logger.getChild(name)
| [
"42994541+y-du@users.noreply.github.com"
] | 42994541+y-du@users.noreply.github.com |
79331affbc571e2fd6380690621972ed904a93b2 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2015_11_15_5deg_FIP_db/check_db_symm_v3.py | 02760a80bd14513da9f994e3a337517bca50323a | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,154 | py | import numpy as np
import matplotlib.pyplot as plt
import euler_func as ef
import h5py
"""
check whether the database exhibits hexagonal-triclinic crystal
symmetry
first find 12 symmetric orientations in triclinic FZ
(0<=phi1<2*pi, 0<=Phi<=pi, 0<=phi2<2*pi)
for each deformation mode sample (theta), check if the value of
interest is the same for all symmetric orientations
"""
inc = 5 # degree increment for angular variables
np.random.seed() # generate seed for random
symhex = ef.symhex()
r2d = 180./np.pi
d2r = np.pi/180.
r2s = r2d/inc
n_th_max = 120/inc # number of theta samples in FOS
n_max = 360/inc # number of phi1, Phi and phi2 samples in FOS
n_hlf = 180/inc # half n_max
n_th = (60/inc)+1 # number of theta samples for FZ
n_p1 = 360/inc # number of phi1 samples for FZ
n_P = (90/inc)+1 # number of Phi samples for FZ
n_p2 = 60/inc # number of phi2 samples for FZ
print "angle space shape: %s" % str(np.array([n_th, n_p1, n_P, n_p2]))
# only look at last in series for value of interest
db = np.load("pre_fft.npy")[:n_th, ..., -1]
print "db shape: %s" % str(db.shape)
# n_FZ: total number of sampled orientations in FZ
n_FZ = n_p1*n_P*n_p2
# FZ_indx: vector of linear indices for sampled orientations in FZ
FZ_indx = np.arange(n_FZ)
print "FZ_indx shape: %s" % str(FZ_indx.shape)
# FZ_subs: array of subscripts of sampled orientations in FZ
FZ_subs = np.unravel_index(FZ_indx, (n_p1, n_P, n_p2))
FZ_subs = np.array(FZ_subs).transpose()
print "FZ_subs shape: %s" % str(FZ_subs.shape)
# FZ_euler: array of euler angles of sampled orientations in FZ
FZ_euler = np.float64(FZ_subs*inc*d2r)
# g: array of orientation matrices (sample to crystal frame rotation
# matrices) for orientations in fundamental zone
g = ef.bunge2g(FZ_euler[:, 0],
FZ_euler[:, 1],
FZ_euler[:, 2])
print "g shape: %s" % str(g.shape)
# FZ_euler_sym: array of euler angles of sampled orientations in
# FZ and their symmetric equivalents
FZ_euler_sym = np.zeros((12, n_FZ, 3))
# find the symmetric equivalents to the euler angle within the FZ
for sym in xrange(12):
op = symhex[sym, ...]
# g_sym: array of orientation matrices transformed with a
# hexagonal symmetry operator
g_sym = np.einsum('ik,...kj', op, g)
tmp = np.array(ef.g2bunge(g_sym)).transpose()
if sym == 0:
print "g_sym shape: %s" % str(g_sym.shape)
print "tmp shape: %s" % str(tmp.shape)
del g_sym
FZ_euler_sym[sym, ...] = tmp
del tmp
# convert euler angles to subscripts
FZ_subs_sym = np.int64(np.round(FZ_euler_sym*r2s))
# # make sure all of the euler angles within the appropriate
# # ranges (eg. not negative)
for ii in xrange(3):
lt = FZ_subs_sym[..., ii] < 0.0
FZ_subs_sym[..., ii] += n_max*lt
print np.sum(FZ_subs_sym < 0)
# determine the deviation from symmetry by finding the value of
# the function for symmetric locations and comparing these values
f = h5py.File('symm_check.hdf5', 'w')
error = f.create_dataset("error", (n_th, 12, n_FZ, 5))
for th in xrange(n_th):
for sym in xrange(12):
error[th, sym, :, 0:3] = FZ_subs_sym[sym, ...]*inc
origFZ = db[th,
FZ_subs_sym[0, :, 0],
FZ_subs_sym[0, :, 1],
FZ_subs_sym[0, :, 2]]
symFZ = db[th,
FZ_subs_sym[sym, :, 0],
FZ_subs_sym[sym, :, 1],
FZ_subs_sym[sym, :, 2]]
if th == 0 and sym == 0:
print "origFZ shape: %s" % str(origFZ.shape)
print "symFZ shape: %s" % str(symFZ.shape)
if th == 0:
print "operator number: %s" % sym
idcheck = np.all(FZ_euler_sym[0, ...] == FZ_euler_sym[sym, ...])
print "are Euler angles in different FZs identical?: %s" % str(idcheck)
orig_0sum = np.sum(origFZ == 0.0)
sym_0sum = np.sum(symFZ == 0.0)
if orig_0sum != 0 or sym_0sum != 0:
print "number of zero values in origFZ: %s" % orig_0sum
print "number of zero values in symFZ: %s" % sym_0sum
error[th, sym, :, 3] = symFZ
error[th, sym, :, 4] = np.abs(origFZ-symFZ)
error_sec = error[...]
f.close()
# perform error analysis
# generate random deformation mode and euler angle
# th_rand = np.int64(np.round((n_th-1)*np.random.rand()))
# g_rand = np.int64(np.round((n_FZ-1)*np.random.rand()))
badloc = np.argmax(error_sec[..., 4])
badloc = np.unravel_index(badloc, error_sec[..., 3].shape)
th_rand = badloc[0]
g_rand = badloc[2]
print "\nexample comparison:"
print "deformation mode: %s degrees" % str(np.float(th_rand*inc))
for sym in xrange(12):
print "operator number: %s" % sym
eul_rand = error_sec[th_rand, sym, g_rand, 0:3]
print "euler angles: %s (degrees)" % str(eul_rand)
val_rand = error_sec[th_rand, sym, g_rand, 3]
print "value of interest: %s" % str(val_rand)
errvec = error_sec[..., 4].reshape(error_sec[..., 4].size)
print "\noverall error metrics:"
print "mean database value: %s" % np.mean(db)
print "mean error: %s" % np.mean(errvec)
print "maximum error: %s" % np.max(errvec)
print "standard deviation of error: %s" % np.std(errvec)
print "total number of locations checked: %s" % (errvec.size)
err_count = np.sum(errvec != 0.0)
# plot the error histograms
error_indx = errvec != 0.0
print error_indx.shape
loc_hist = errvec[error_indx]
print loc_hist.shape
err_count = np.sum(loc_hist != 0.0)
print "number of locations with nonzero error: %s" % err_count
errvec_p1 = error_sec[..., 0].reshape(error_sec[..., 0].size)[error_indx]
plt.figure(num=4, figsize=[10, 6])
plt.hist(errvec_p1, 361)
errvec_P = error_sec[..., 1].reshape(error_sec[..., 1].size)[error_indx]
plt.figure(num=5, figsize=[10, 6])
plt.hist(errvec_P, 361)
errvec_p2 = error_sec[..., 0].reshape(error_sec[..., 0].size)[error_indx]
plt.figure(num=6, figsize=[10, 6])
plt.hist(errvec_p2, 361)
# plot the error histograms
plt.figure(num=1, figsize=[10, 6])
error_hist = error_sec[..., 4]
plt.hist(error_hist.reshape(error_hist.size), 100)
# plot the symmetric orientations in euler space
plt.figure(2)
plt.plot(np.array([0, 360, 360, 0, 0]), np.array([0, 0, 180, 180, 0]), 'k-')
plt.plot(np.array([0, 360]), np.array([90, 90]), 'k-')
plt.xlabel('$\phi_1$')
plt.ylabel('$\Phi$')
sc = 1.05
plt.axis([-(sc-1)*360, sc*360, -(sc-1)*180, sc*180])
plt.figure(3)
plt.plot(np.array([0, 180, 180, 0, 0]), np.array([0, 0, 360, 360, 0]), 'k-')
plt.plot(np.array([90, 90]), np.array([0, 360]), 'k-')
plt.plot(np.array([0, 180]), np.array([60, 60]), 'k-')
plt.plot(np.array([0, 180]), np.array([120, 120]), 'k-')
plt.plot(np.array([0, 180]), np.array([180, 180]), 'k-')
plt.plot(np.array([0, 180]), np.array([240, 240]), 'k-')
plt.plot(np.array([0, 180]), np.array([300, 300]), 'k-')
plt.xlabel('$\Phi$')
plt.ylabel('$\phi2$')
sc = 1.05
plt.axis([-(sc-1)*180, sc*180, -(sc-1)*360, sc*360])
eul_plt = error_sec[th_rand, :, g_rand, 0:3]
plt.figure(2)
plt.plot(eul_plt[:, 0], eul_plt[:, 1],
c='b', marker='o', linestyle='none')
plt.figure(3)
plt.plot(eul_plt[:, 1], eul_plt[:, 2],
c='b', marker='o', linestyle='none')
plt.show()
| [
"noahhpaulson@gmail.com"
] | noahhpaulson@gmail.com |
8ca486bf9468da73f85b937419fe7251c45b06d1 | 56a446ab5ac2994607d92cf7e5453fc39d611e8d | /book/_build/jupyter_execute/docs/Semantic Textual Similarity.py | 52aa5708ea51b5dc4ee1fab6e21cba4cb6e9e835 | [] | no_license | Python-Repository-Hub/klue-baseline | 66dcd8d305d6be184cda31460d29f7043eb808f2 | e4c419feadb30f00a5176cb85f2620964a021531 | refs/heads/main | 2023-07-30T15:05:08.241086 | 2021-09-29T03:12:13 | 2021-09-29T03:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | #!/usr/bin/env python
# coding: utf-8
# # Semantic Textual Similarity
# ## 1. 예시
# - (본문 예시)
# - .ipynb 형식입니다.
# In[1]:
print("test")
# In[ ]:
| [
"jih020202@gmail.com"
] | jih020202@gmail.com |
b74e4582bc25ab70c230b807cd60cc78749dc992 | 34b2402c2058267d9d154b7d150aa61b5324d73c | /proxymodules/http_strip.py | e96a63fe9d0d87f53017ee7c584256476f27ba82 | [
"MIT"
] | permissive | ickerwx/tcpproxy | 0d56241187a3e90d21afb3937c511c88bb389706 | fdf749ef941eca3fc02ddc456dfe36f53b58e22a | refs/heads/master | 2023-08-29T21:07:14.823784 | 2023-07-15T18:20:41 | 2023-07-15T18:20:41 | 32,262,026 | 295 | 97 | MIT | 2023-08-24T18:28:40 | 2015-03-15T13:32:05 | Python | UTF-8 | Python | false | false | 857 | py | #!/usr/bin/env python3
import os.path as path
class Module:
def __init__(self, incoming=False, verbose=False, options=None):
# extract the file name from __file__. __file__ is proxymodules/name.py
self.name = path.splitext(path.basename(__file__))[0]
self.description = 'Remove HTTP header from data'
self.incoming = incoming # incoming means module is on -im chain
def detect_linebreak(self, data):
line = data.split(b'\n', 1)[0]
if line.endswith(b'\r'):
return b'\r\n' * 2
else:
return b'\n' * 2
def execute(self, data):
delimiter = self.detect_linebreak(data)
if delimiter in data:
data = data.split(delimiter, 1)[1]
return data
if __name__ == '__main__':
print('This module is not supposed to be executed alone!')
| [
"mail@renewerner.net"
] | mail@renewerner.net |
e577ea4fe5dde95179497ef4002865bb86ea6907 | 3c9dbfff9e5ac88ca42f5a21f9dff8bcf40ed4b2 | /chapter03_stacks_queues/06_animal_shelter.py | 735b56f77401bf6ee88b3b8d875ff460a0762cb5 | [] | no_license | optionalg/cracking_the_coding_interview | 50834f3c9aea7d24cd372bf3022c4bf01ec90957 | 2e962b3e272074b428ace8e85e6d4a735cfe6afa | refs/heads/master | 2021-06-20T21:15:48.063618 | 2017-07-18T14:42:03 | 2017-07-18T14:42:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | from ctci.chapter02_lists.LinkedList import LinkedList
class Node:
def __init__(self, value, name):
self.data = value
self.name = name
self.next = None
class AnimalQueue:
def __init__(self):
self.dogs = LinkedList()
self.cats = LinkedList()
self.any = LinkedList()
def enqueue(self, value, name):
new_node = Node(value, name)
self.any.add_in_the_end(new_node)
if value == "cat":
self.cats.add_in_the_end(new_node)
elif value == "dog":
self.dogs.add_in_the_end(new_node)
def dequeue_any(self):
popped = self.any.pop()
if popped.data == "cat":
self.cats.pop()
elif popped.data == "dog":
self.dogs.pop()
return popped.name
def dequeue_dog(self):
popped = self.dogs.pop()
self.any.remove(popped.name)
return popped.name
def dequeue_cat(self):
popped = self.cats.pop()
self.any.remove(popped.name)
return popped.name
q = AnimalQueue()
q.enqueue('cat', 'jasper')
q.enqueue('dog', 'marcel')
q.enqueue('cat', 'alice')
q.enqueue('dog', 'pongo')
q.enqueue('cat', 'nala')
q.enqueue('dog', 'nelly')
q.enqueue('dog', 'rudy')
print(q.dequeue_any())
print(q.dequeue_cat())
print(q.dequeue_dog())
| [
"lito.kriara@disneyresearch.com"
] | lito.kriara@disneyresearch.com |
0608ed6c0bd8ae5268945ba712871159548338c1 | bab51ba1ff7a839ca16bcc086ae55451a3a16823 | /stylesite/stylesite/settings.py | 087b2b12ef8b7185ae2fe0c599b1e11b009b3368 | [] | no_license | melissapnyc/insphairation | a96197e03556f63c2c49b4a487a5296d25224b41 | 4e3ba94430a476132da645fdbb8c17e0e87f4468 | refs/heads/master | 2020-05-05T02:54:10.174628 | 2014-04-03T17:13:04 | 2014-04-03T17:13:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,999 | py | """
Django settings for stylesite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')cws#g^5+cajokq)#zk#=f08i$^7klk9fz(i8=qbod!q#41qkd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hairstyles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'stylesite.urls'
WSGI_APPLICATION = 'stylesite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| [
"melissapallay@gmail.com"
] | melissapallay@gmail.com |
6a9b79f848da7a24ffbeb03eedae35d4063b8580 | d78a55788407c58ab7e5d7edc53a2887f46b20c4 | /Basic/SubList.py | ee95eab4cc76e85a710f55a883bcce91dc98e972 | [] | no_license | aakib7/python | db085e7eee10d3427906da7f32349a9f16a82a3e | 40ffa242ebdf78f042e559cecd20722949607b0b | refs/heads/main | 2023-07-01T04:24:03.472448 | 2021-08-04T20:16:39 | 2021-08-04T20:16:39 | 392,812,945 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | matrix = [[1,2,3],[4,5,6],[7,8,9]]
print(matrix[0]) # print [1,2,3]
print(matrix[0][1]) # print 2
# for i in matrix: # print [1,2,3]\n[3,4,5]\n[6,7,8]
# print(i)
count = 0
for sublist in matrix: # frist time loop exicute [1,2,3] store in sublist and so on
count +=1
print(f"{count} Sublist:")
for i in sublist: # after 1st for loop 2nd loop exicute till (elements in sublist)
print(i)
| [
"ajmehdi5@gmail.com"
] | ajmehdi5@gmail.com |
e0630106cd1f4d50aff9fea679ff50ae76a6ff20 | 775c92c038d418a97f483a5c6f3692562aa5e616 | /test_code.py | fed80156fec837c60c2554d0dd3844a941c063d6 | [] | no_license | seminvest/investment | 2878efd9ca8a09e5c708fec99eaea80b6e22cb0e | af5a9f4c4163bbcdc293d3ebad0d551464d96f12 | refs/heads/master | 2021-01-20T18:15:26.114699 | 2019-05-24T19:07:26 | 2019-05-24T19:07:26 | 90,911,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py |
import pandas as pd
def test_run():
start_date ='2018-08-14'
end_date='2019-01-07'
dates=pd.date_range(start_date,end_date)
#print(dates[0])
df1=pd.DataFrame(index=dates)
symbols = ['TSLA','FB','AAPL']
for symbol in symbols:
df_temp=pd.read_csv("{}.csv".format(symbol),index_col="date",parse_dates=True,usecols=['date','adjusted close'],na_values=['nan'])
df_temp = df_temp.rename(columns={"adjusted close":symbol})
df1=df1.join(df_temp,how='inner')
print(df1)
dfTSLA = pd.read_csv("./data/TSLA.csv", index_col="date",parse_dates=True,usecols=['date','adjusted close'],na_values=['nan'])
#df1=df1.join(dfTSLA,how='inner')
#df1=df1.dropna()
if __name__ == "__main__":
test_run() | [
"rtang7813@icloud.com"
] | rtang7813@icloud.com |
0726392c3e962800ab537f902236c9ddf78370f0 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/hinventory/account.py | 7b581f55213470c10ef7d664c6a48948edf9b960 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,184 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Account(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.hinventory.Account")
meta.moClassName = "hinventoryAccount"
meta.rnFormat = "account-[%(name)s]"
meta.category = MoCategory.REGULAR
meta.label = "AInventory"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x600c101
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.hinventory.Region")
meta.childClasses.add("cobra.model.hinventory.StaleResource")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.hcloud.RtSelectorToTagParent")
meta.childClasses.add("cobra.model.hinventory.ResourceGrp")
meta.childNamesAndRnPrefix.append(("cobra.model.hcloud.RtSelectorToTagParent", "rtselectorToTagParent-"))
meta.childNamesAndRnPrefix.append(("cobra.model.hinventory.ResourceGrp", "resourcegrp-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.hinventory.Region", "region-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.hinventory.StaleResource", "stale"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.hinventory.Provider")
meta.superClasses.add("cobra.model.hcloud.AResource")
meta.superClasses.add("cobra.model.hinventory.AInventory")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.hcloud.ACloudBase")
meta.superClasses.add("cobra.model.hinventory.AAInventory")
meta.rnPrefixes = [
('account-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cloudName", "cloudName", 53279, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("cloudName", prop)
prop = PropMeta("str", "cloudProviderId", "cloudProviderId", 54108, PropCategory.REGULAR)
prop.label = "Resource ID in Cloud Provider"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("cloudProviderId", prop)
prop = PropMeta("str", "configDn", "configDn", 54120, PropCategory.REGULAR)
prop.label = "DN of object that created the resource"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("configDn", prop)
prop = PropMeta("str", "delegateDn", "delegateDn", 53375, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("delegateDn", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "isStale", "isStale", 54109, PropCategory.REGULAR)
prop.label = "Resource out-of-sync with current configuration"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("isStale", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 50766, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 50279, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "resolvedObjDn", "resolvedObjDn", 50280, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("resolvedObjDn", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "name"))
getattr(meta.props, "name").needDelimiter = True
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
690be3e020160ec55919e9b80e9a4669bde616bf | 08b623f814a04467d5602b7fded7a5767d696763 | /music_controller/music_controller/urls.py | 3c8bfd48bdfc72bf481629cde03e5383f0fb3d55 | [
"MIT"
] | permissive | ajsnow56/react-python-music-app | 30f122e2243a01b4ef41b7615fbf2c6ecf8c255e | f44fc0040d2c2f8bf3580ba7e71c9196c2603386 | refs/heads/main | 2023-02-10T17:23:36.353584 | 2021-01-10T06:59:40 | 2021-01-10T06:59:40 | 328,294,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | """music_controller URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include("api.urls")),
path('', include("frontend.urls"))
]
| [
"ajsnow56@gmail.com"
] | ajsnow56@gmail.com |
1be1511f6d587ac160826b59755e8c6494866c0f | 70a02a488849ac8cc15f5684564b06cfafe98192 | /src/network/pyramid.py | 10e1ac8d8e0fa5c71aed70d09cd2d88b803c5016 | [] | no_license | takedarts/resnetfamily | 9a4203ba5e2c54caf369138d23352c9f2a514ddb | ea51083c430cc27c5fd285429e6eed67b1f697bd | refs/heads/master | 2020-12-02T11:14:17.881053 | 2017-07-09T14:28:08 | 2017-07-09T14:28:08 | 96,619,601 | 19 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,693 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Residual Network (pre-activation)モジュール。
Residual Block : BN - Conv(3x3) - BN - ReLU - Conv(3x3) - BN
@author: Atsushi TAKEDA
'''
import chainer
def reshape(x, channels):
if x.shape[1] < channels:
xp = chainer.cuda.get_array_module(x)
p = xp.zeros((x.shape[0], channels - x.shape[1], x.shape[2], x.shape[3]), dtype=x.dtype)
x = chainer.functions.concat((x, p), axis=1)
elif x.shape[1] > channels:
x = x[:, :channels, :]
return x
class ResidualUnit(chainer.Chain):
def __init__(self, in_channels, out_channels):
super().__init__(norm0=chainer.links.BatchNormalization(in_channels),
conv1=chainer.links.Convolution2D(in_channels, out_channels, 3, pad=1),
norm1=chainer.links.BatchNormalization(out_channels),
conv2=chainer.links.Convolution2D(out_channels, out_channels, 3, pad=1),
norm2=chainer.links.BatchNormalization(out_channels))
def __call__(self, x):
x = self.norm0(x)
x = self.conv1(x)
x = self.norm1(x)
x = chainer.functions.relu(x)
x = self.conv2(x)
x = self.norm2(x)
return x
class ResidualBlock(chainer.ChainList):
def __init__(self, in_channels, out_channels, depth):
channels = [int((in_channels * (depth - i) + out_channels * i) / depth) for i in range(depth + 1)]
super().__init__(*[ResidualUnit(channels[i], channels[i + 1]) for i in range(depth)])
def __call__(self, x):
for layer in self:
y = layer(x)
y += reshape(x, y.shape[1])
x = y
return x
class Network(chainer.Chain):
def __init__(self, category, params):
depth, alpha = params
depth = (depth - 2) // 6
super().__init__(input=chainer.links.Convolution2D(None, 16, 3, pad=1),
norm=chainer.links.BatchNormalization(16),
block1=ResidualBlock(16 + alpha * 0 // 3, 16 + alpha * 1 // 3, depth),
block2=ResidualBlock(16 + alpha * 1 // 3, 16 + alpha * 2 // 3, depth),
block3=ResidualBlock(16 + alpha * 2 // 3, 16 + alpha * 3 // 3, depth),
output=chainer.links.Linear(16 + alpha, category))
def __call__(self, x):
x = self.input(x)
x = self.norm(x)
x = self.block1(x)
x = chainer.functions.average_pooling_2d(x, 2)
x = self.block2(x)
x = chainer.functions.average_pooling_2d(x, 2)
x = self.block3(x)
x = chainer.functions.relu(x)
x = chainer.functions.average_pooling_2d(x, x.shape[2])
x = self.output(x)
return x
| [
"atsushi@takedarts.jp"
] | atsushi@takedarts.jp |
0ada9e39c61bd009bdf2ced7e598217a1ddcebd1 | 3318f5b9ae8a374f08c3fa8cb3db92681542ba2b | /src/GetElectroStaticEmbedding.py | b7b6b859ff06cb46592737bf8179d882d2592846 | [] | no_license | jiahao/clusterchem | 2c381f0b044f9d51653636666610d2377233b2f7 | 7b5db987c252780dffd4f213b0e14dcb48a62391 | refs/heads/master | 2021-01-22T08:48:46.776359 | 2012-07-18T20:22:30 | 2012-07-18T20:22:30 | 2,615,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,765 | py |
def LoadGromacsTopology(filename):
"""
Parses Gromacs topology file for atomic charge definitions
@param filename Name of Gromacs topology file (usually .top or .itp)
to parse.
@returns a dictionary of atomtypes with (residue, atomtype) as key
and charge as value.
"""
AtomTypes = {}
mode = 'seek'
for line in open(filename):
if mode == 'seek' and '[ atoms ]' in line:
mode = 'read'
elif mode == 'read':
#The GROMACS topology file format specifies lines to have the form
#; nr type resnr residu atom cgnr charge
theline = line[:line.find(';')] #Ignore comments between semicolon and newline
assert '\\' not in theline, 'Cannot handle line continuations at this time.'
t = theline.split()
try:
residu = t[3]
atom = t[4]
charge = float(t[6])
except (ValueError, IndexError):
#Could not parse, assume we are done with this section
mode = 'seek'
continue
AtomTypes[residu, atom] = charge
return AtomTypes
def LoadGromacsGeometry(h5table, filename):
"""
Parses Gromacs topology file for atomic charge definitions
@param h5table HDF5 table to populate. Must be in CHARMM_CARD format.
@param filename Name of Gromacs topology file (usually .top or .itp)
to parse.
"""
mode = 'title'
for line in open(filename):
if mode == 'title':
mode = 'numatoms'
elif mode == 'numatoms':
numatoms = int(line)
thisnumatoms = 0
mode = 'read'
elif mode == 'read':
try:
data = h5table.row
#The GROMACS format does not contain ResID and SegId
#We fill them in with ResidNo.
data['ResID'] = data['SegID'] = data['ResidNo'] = int(line[:5])
data['Res'] = line[5:10].strip()
data['Type'] = line[10:15].strip()
data['AtomNo'] = int(line[15:20])
numbers = map(float, line[20:].split())
data['Coord'] = numbers[:3] #Discard velocities if present
#The GROMACS format does not contain Weighting
#Set to dummy value
data['Weighting'] = 0
data.append()
thisnumatoms += 1
except (ValueError, IndexError): #Assume this is the last line with box vectors
#boxvectors = map(float, line.split())
break
assert thisnumatoms == numatoms, 'Wrong number of atoms read: expected %d but read %d' % (numatoms, thisnumatoms)
h5table.flush()
| [
"jiahao@mit.edu"
] | jiahao@mit.edu |
85dfa9657bf5f1207e0b7cd837ff3661aa12b093 | 2dd560dc468af0af4ca44cb4cd37a0b807357063 | /Leetcode/2. Add Two Numbers/solution1.py | 78df80bb3d9a1d82a8d444589710b5f138669603 | [
"MIT"
] | permissive | hi0t/Outtalent | 460fe4a73788437ba6ce9ef1501291035c8ff1e8 | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | refs/heads/master | 2023-02-26T21:16:56.741589 | 2021-02-05T13:36:50 | 2021-02-05T13:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
dummy_head = curr_head = ListNode()
p, q = l1, l2
carry = 0
while p or q:
if p:
carry += p.val
p = p.next
if q:
carry += q.val
q = q.next
curr_head.next = ListNode(carry % 10)
curr_head = curr_head.next
carry //= 10
if carry > 0:
curr_head.next = ListNode(carry)
return dummy_head.next
| [
"info@crazysquirrel.ru"
] | info@crazysquirrel.ru |
f5de930cd145d2474ed04f2b3d2d810ceba3e181 | f38db79439185ab6062294e1d82f6e909d2be81e | /test/test_update_timesheet_model.py | cbf81950c118167fd9c24c13db0647e8123c0e00 | [] | no_license | ContainerSolutions/avazacli | 3a37f8500ad1f1843acbdbb413d4949e00ec6f91 | 49618314f15d8cb2bda36e6019670fdfbed1524f | refs/heads/master | 2020-06-18T18:44:58.594385 | 2019-07-11T14:23:10 | 2019-07-11T14:23:10 | 196,406,206 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,494 | py | # coding: utf-8
"""
Avaza API Documentation
Welcome to the autogenerated documentation & test tool for Avaza's API. <br/><br/><strong>API Security & Authentication</strong><br/>Authentication options include OAuth2 Implicit and Authorization Code flows, and Personal Access Token. All connections should be encrypted over SSL/TLS <br/><br/>You can set up and manage your api authentication credentials from within your Avaza account. (requires Administrator permissions on your Avaza account).<br/><br/> OAuth2 Authorization endpoint: https://any.avaza.com/oauth2/authorize <br/>OAuth2 Token endpoint: https://any.avaza.com/oauth2/token<br/>Base URL for subsequent API Requests: https://api.avaza.com/ <br/><br/>Blogpost about authenticating with Avaza's API: https://www.avaza.com/avaza-api-oauth2-authentication/ <br/>Blogpost on using Avaza's webhooks: https://www.avaza.com/avaza-api-webhook-notifications/<br/>The OAuth flow currently issues Access Tokens that last 1 day, and Refresh tokens that last 180 days<br/>The Api respects the security Roles assigned to the authenticating Avaza user and filters the data return appropriately. <br/><br><strong>Support</strong><br/>For API Support, and to request access please contact Avaza Support Team via our support chat. <br/><br/><strong>User Contributed Libraries:</strong><br/>Graciously contributed by 3rd party users like you. <br/>Note these are not tested or endorsesd by Avaza. We encourage you to review before use, and use at own risk.<br/> <ul><li> - <a target='blank' href='https://packagist.org/packages/debiprasad/oauth2-avaza'>PHP OAuth Client Package for Azava API (by Debiprasad Sahoo)</a></li></ul> # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import avazacli
from avazacli.models.update_timesheet_model import UpdateTimesheetModel # noqa: E501
from avazacli.rest import ApiException
class TestUpdateTimesheetModel(unittest.TestCase):
"""UpdateTimesheetModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUpdateTimesheetModel(self):
"""Test UpdateTimesheetModel"""
# FIXME: construct object with mandatory attributes with example values
# model = avazacli.models.update_timesheet_model.UpdateTimesheetModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"riccardo.cefala@container-solutions.com"
] | riccardo.cefala@container-solutions.com |
9cf0b12018962010a876c9da5885b4db43e416b4 | 640ac4564cba4836c01cccd37376bdf647c2484c | /optim_project/optim_app/migrations/0007_auto_20200604_0228.py | de9a9a98406647055d86496e801f81d5a64d8095 | [] | no_license | shchepinevg/Bachelor-s-thesis | 9c162839452347e3a8a0f10baf6aa72c3bdd3b95 | ba99c7bf7f75de993a8f6605c2dc4e348f9809df | refs/heads/master | 2023-02-10T10:51:22.914621 | 2020-07-10T09:08:27 | 2020-07-10T09:08:27 | 261,974,296 | 0 | 0 | null | 2021-01-06T03:18:41 | 2020-05-07T06:58:50 | R | UTF-8 | Python | false | false | 2,126 | py | # Generated by Django 3.0.6 on 2020-06-03 19:28
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('optim_app', '0006_auto_20200601_2234'),
]
operations = [
migrations.CreateModel(
name='OptimizationFunction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_function', models.BooleanField()),
('optimization_meth', models.CharField(max_length=64)),
('N', models.IntegerField()),
('optim_type', models.IntegerField()),
('value', models.FloatField()),
('param_func', django.contrib.postgres.fields.jsonb.JSONField()),
('param_optim', django.contrib.postgres.fields.jsonb.JSONField()),
('user_function', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='optim_app.UserFunction')),
],
),
migrations.CreateModel(
name='OptimizationParameters',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meta_optim_meth', models.CharField(max_length=64)),
('meta_N', models.IntegerField()),
('meta_value', models.FloatField()),
('meta_param_optim', django.contrib.postgres.fields.jsonb.JSONField()),
('optim_func', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='optim_app.OptimizationFunction')),
('user_function', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='optim_app.UserFunction')),
],
),
migrations.AlterField(
model_name='parameterinfo',
name='discrete_continuous',
field=models.IntegerField(),
),
migrations.DeleteModel(
name='OptimizationHistory',
),
]
| [
"shchepinevg@gmail.com"
] | shchepinevg@gmail.com |
1783ef895c616c80015e20e025262af87331c702 | bfa58d62c3c03fd3d728d8cea5c47f36942c43b8 | /Modules/MidsagittalSurfaceMacro.py | 0215eddd74fd17fdca8fb2e784e0a09cd28b532c | [
"Apache-2.0"
] | permissive | vinay0458/MidsagittalApp | 3490ecc86e7b27077f20a4d20838463283c133bf | aa067e536df842b8ceafed787f042aa4a6ebd117 | refs/heads/master | 2020-07-22T03:34:04.338842 | 2016-01-15T14:56:37 | 2016-01-15T14:56:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,433 | py | from AlgorithmMacroModule.AlgorithmMacroModule import AlgorithmMacroModule, Error
from AlgorithmModule.Definitions import StatusCode
import numpy
class MidsagittalSurfaceMacro( AlgorithmMacroModule ):
def _init( self ):
self._ctx.field("MemoryCache.clear").touch()
self._ctx.field("outMsp").setVectorValue((1,0,0,0))
self._ctx.field("outMspA").setVectorValue((1,0,0,0))
self._ctx.field("outMspP").setVectorValue((1,0,0,0))
self._ctx.field("outAngle").setDoubleValue(0)
self._ctx.field("progress").setDoubleValue(0)
def _getPlane( self, imageField ):
self._ctx.field("MidsagittalPlane.input0").connectFrom(ctx.field(imageField))
self._ctx.field("MidsagittalPlane.update").touch()
return self._ctx.field("MidsagittalPlane.outPlane").vectorValue()
def __init__( self, ctx ):
AlgorithmMacroModule.__init__( self, ctx )
self._init()
def _validateInput( self ):
if not self._ctx.field("input0").isValid():
raise Error( StatusCode.ERROR_INPUT_OBJECT, u"Input image not valid.")
def _update( self ):
self._init()
self._ctx.field("progress").setDoubleValue(0.05)
# Compute the midsagittal plane
msp = self._getPlane("Bypass.output0")
self._ctx.field("MemoryCache.update").touch()
self._ctx.field("outMsp").setVectorValue(msp)
# Set the cMSPx from the plane computed on the full image
self._ctx.field("MidsagittalSurface.inCMSPx").setIntValue(self._ctx.field("MidsagittalPlane.outCMSPx").intValue())
self._ctx.field("progress").setDoubleValue(0.1)
# Do we want to use the dual-plane approach, to handle extreme asymmetric cases?
if self._ctx.field("inUseDualPlane").boolValue():
mspA = self._getPlane("Anterior.output0")
self._ctx.field("progress").setDoubleValue(0.2)
mspP = self._getPlane("Posterior.output0")
self._ctx.field("progress").setDoubleValue(0.3)
self._ctx.field("outMspA").setVectorValue(mspA)
self._ctx.field("outMspP").setVectorValue(mspP)
# Compute the in-plane angle between the two MSPs
vA = numpy.array(mspA[:2])
vP = numpy.array(mspP[:2])
# Normalize to unit length
vA = vA / numpy.linalg.norm(vA)
vP = vP / numpy.linalg.norm(vP)
# Compute angle. Special case if vectors have an angle of 0 or 180 degrees
angle = numpy.arccos(numpy.dot(vA, vP))
if numpy.isnan(angle):
if (v1_u == v2_u).all():
angle = 0.0
else:
angle = numpy.pi
self._ctx.field("outAngle").setDoubleValue(numpy.degrees(angle))
# Use dual plane ?
if numpy.degrees(angle) > self._ctx.field("inDualPlaneAngle").doubleValue():
self._ctx.field("MidsagittalSurface.inPlane1").setVectorValue(mspA)
self._ctx.field("MidsagittalSurface.inPlane2").setVectorValue(mspP)
self._ctx.field("MidsagittalSurface.inUseSecondPlane").setBoolValue(True)
else:
self._ctx.field("MidsagittalSurface.inPlane1").setVectorValue(msp)
self._ctx.field("MidsagittalSurface.inUseSecondPlane").setBoolValue(False)
self._ctx.field("progress").setDoubleValue(0.4)
self._ctx.field("MidsagittalSurface.update").touch()
self._ctx.field("progress").setDoubleValue(1)
def _clear( self ):
self._ctx.field("MidsagittalSurface.clear").touch()
self._init()
| [
"hjkuijf@users.noreply.github.com"
] | hjkuijf@users.noreply.github.com |
fb68c7639379c040a8111bd9b8d7448a5fe7d37d | 607b41ce463b9941fd6ae4fcaa46e0e9abab38f7 | /auth.py | 6206a845d3a1541c590a89ddbf7030aa688a1c7b | [
"MIT"
] | permissive | nislag/chat-bot-tornado | 45ab3ed2bd25f97bcefb0957a95a61b9f0fa5b2a | def046f5add0773362b547314e7dc3fdea74331f | refs/heads/master | 2021-01-10T05:43:46.960470 | 2015-06-04T13:38:33 | 2015-06-04T13:38:33 | 36,575,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,720 | py | # coding=UTF-8
# Tornado modules.
import tornado.auth
import tornado.web
import tornado.escape
# Import application modules.
from base import BaseHandler
# General modules.
import logging
class LoginHandler(BaseHandler, tornado.auth.GoogleOAuth2Mixin):
"""
Handler for logins with Google Open ID / OAuth
http://www.tornadoweb.org/documentation/auth.html#google
"""
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
elif self.get_argument("start_google_oauth", None):
# Set users attributes to ask for.
ax_attrs = ['name', 'email', 'language', 'username']
self.authenticate_redirect(ax_attrs=ax_attrs)
elif self.get_argument("start_direct_auth", None):
# Get form inputs.
try:
user = dict()
user["email"] = self.get_argument("email", default="")
user["name"] = self.get_argument("name", default="")
except:
# Send an error back to client.
content = "<p>There was an input error. Fill in all fields!</p>"
self.render_default("index.html", content=content)
# If user has not filled in all fields.
if not user["email"] or not user["name"]:
content = ('<h2>2. Direct Login</h2>'
+ '<p>Fill in both fields!</p>'
+ '<form class="form-inline" action="/login" method="get"> '
+ '<input type="hidden" name="start_direct_auth" value="1">'
+ '<input class="form-control" type="text" name="name" placeholder="Your Name" value="' + str(user["name"]) + '"> '
+ '<input class="form-control" type="text" name="email" placeholder="Your Email" value="' + str(user["email"]) + '"> '
+ '<input type="submit" class="btn btn-default" value="Sign in">'
+ '</form>')
self.render_default("index.html", content=content)
elif str(user.get("name")) == "Bot":
content = ('<h2>2. Direct Login</h2>'
+ '<p>Incorrect name Bot, write another!</p>'
+ '<form class="form-inline" action="/login" method="get"> '
+ '<input type="hidden" name="start_direct_auth" value="1">'
+ '<input class="form-control" type="text" name="name" placeholder="Your Name" value="' + str(user["name"]) + '"> '
+ '<input class="form-control" type="text" name="email" placeholder="Your Email" value="' + str(user["email"]) + '"> '
+ '<input type="submit" class="btn btn-default" value="Sign in">'
+ '</form>')
self.render_default("index.html", content=content)
# All data given. Log user in!
else:
self._on_auth(user)
else:
# Logins.
content = '<div class="page-header"><h1>Login</h1></div>'
content += ('<h2>1. Google Login</h2>'
+ '<form action="/login" method="get">'
+ '<input type="hidden" name="start_google_oauth" value="1">'
+ '<input type="submit" class="btn" value="Sign in with Google">'
+ '</form>')
content += ('<h2>2. Direct Login</h2>'
+ '<form class="form-inline" action="/login" method="get"> '
+ '<input type="hidden" name="start_direct_auth" value="1">'
+ '<input class="form-control" type="text" name="name" placeholder="Your Name"> '
+ '<input class="form-control" type="text" name="email" placeholder="Your Email"> '
+ '<input type="submit" class="btn btn-default" value="Sign in">'
+ '</form>')
content += ('<h2>Instructions</h2>'
+ '<div>'
+ '<p>There are 3 base rooms in Chat: Main, Rooms, Help. You can`t write in Rooms and Help.</p>'
+ '<p>If you want to change/create room you have to change url in your browser, after "http..../room/newroom"</p>'
+ '<p>There is command Bot in Chat. You can`t take name "Bot" but can give him commands.</p>'
+ '<p>!news - Bot will write 10 last news from news.ycombinator.com</p>'
+ '<p>!duck word - write 10 search resuts this word by duckduckgo</p>'
+ '<p>!sum numb1 numb2 ... - write sum=numb1+numb2...</p>'
+ '<p>!mean numb1 numb2 ... - write mean of this numbers</p>'
+ '</div>')
self.render_default("index.html", content=content)
def _on_auth(self, user):
"""
Callback for third party authentication (last step).
"""
if not user:
content = ('<div class="page-header"><h1>Login</h1></div>'
+ '<div class="alert alert-error">'
+ '<button class="close" data-dismiss="alert">×</button>'
+ '<h3>Authentication failed</h3>'
+ '<p>This might be due to a problem in Tornados GoogleMixin.</p>'
+ '</div>')
self.render_default("index.html", content=content)
return None
# @todo: Validate user data.
# Save user when authentication was successful.
def on_user_find(result, user=user):
#@todo: We should check if email is given even though we can assume.
if result == "null" or not result:
# If user does not exist, create a new entry.
self.application.client.set("user:" + user["email"], tornado.escape.json_encode(user))
else:
# Update existing user.
# @todo: Should use $set to update only needed attributes?
dbuser = tornado.escape.json_decode(result)
dbuser.update(user)
user = dbuser
self.application.client.set("user:" + user["email"], tornado.escape.json_encode(user))
# Save user id in cookie.
self.set_secure_cookie("user", user["email"])
self.application.usernames[user["email"]] = user.get("name") or user["email"]
# Closed client connection
if self.request.connection.stream.closed():
logging.warning("Waiter disappeared")
return
self.redirect("/")
dbuser = self.application.client.get("user:" + user["email"], on_user_find)
class LogoutHandler(BaseHandler):
def get(self):
self.clear_cookie('user')
self.redirect("/")
| [
"nislagg@gmail.com"
] | nislagg@gmail.com |
20237d2e410ec4f79401483b11eccd032bf6a5f3 | 4a4484e61b662a7d093d72560a0a1182b680acc4 | /contact.py | 1cd6badb10d933d6665091eafcc791d9b50791ec | [] | no_license | TheRohitRahul/Disease_Spread_Simulator | 1a8a86e454f9c7fe29a89189cb8babba3626d2c0 | 3c97bb7e467028ad9b330f5709481a0423040e03 | refs/heads/master | 2021-05-21T02:49:09.063162 | 2020-04-03T08:16:07 | 2020-04-03T08:16:07 | 252,508,531 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | from living_state import *
from random import randint
import pdb
def contact(person1, person2, current_iter):
xp1, yp1 = person1.location
xp2, yp2 = person2.location
if (person1.status == INFECTED or person1.status == NO_HOSPITAL_QUARANTINED) and person2.status == UNINFECTED:
if (xp2 >= xp1 - person1.infection_cls.infect_area and xp2 <= xp1 + person1.infection_cls.infect_area) and (yp2 >= yp1 - person1.infection_cls.infect_area and yp2 <= yp1 + person1.infection_cls.infect_area):
infection_probab = person1.infection_cls.person_infect_probab*100
prob = randint(0,100)
if prob < infection_probab:
person2.infect(person1.infection_cls, current_iter)
if (person2.status == INFECTED or person2.status == NO_HOSPITAL_QUARANTINED) and person1.status == UNINFECTED:
if (xp1 >= xp2 - person2.infection_cls.infect_area and xp1 <= xp2 + person2.infection_cls.infect_area) and (yp1 >= yp2 - person2.infection_cls.infect_area and yp1 <= yp2 + person2.infection_cls.infect_area):
infection_probab = person2.infection_cls.person_infect_probab*100
prob = randint(0,100)
if prob < infection_probab:
person1.infect(person2.infection_cls, current_iter)
| [
"rohit.rahul@tcs.com"
] | rohit.rahul@tcs.com |
61094d5d3babcb4ac784998ee52b573967471ac0 | 7fc22330d96b48a425894311441c4e83cb4d2447 | /code/snakeeyes/tests/__init__.py | e207e34b2b0db2f98b137a14327de8cf795330f9 | [] | no_license | tangentstorm/snakeeyes | 5c23791adfe4511a3a97a35d725d1b2769552000 | a036884e39fe7989e8101c7f96cae8d4f3c507ea | refs/heads/master | 2021-01-22T08:23:27.661057 | 2020-11-22T05:08:56 | 2020-11-22T05:08:56 | 10,516,815 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | """
Created on Aug 1, 2009
@author: michal
"""
import sys; sys.path.append("..") # for testloop.sh
import unittest
from snakeeyes.tests.img_test import *
from snakeeyes.tests.ocr_test import *
from snakeeyes.tests.scrape_test import *
if __name__ == '__main__':
unittest.main()
| [
"michal.wallace@gmail.com"
] | michal.wallace@gmail.com |
60f89bcbfd5b5b577a09c1598ca2168403e00b62 | 136cbeb736cecca889dce4dc4abc601919b41a3c | /GUI/add_monster_popup.py | be4c83de807c4c4a52c013d0dea2d58f3cc68bfd | [] | no_license | LouisChen1013/CharacterManagementSystem | f756ac1817b577003f7bc52c1189d5d67fc8b357 | e710f04a2f95a8f4815088d9fe01168547b55fae | refs/heads/master | 2023-05-23T21:02:44.284701 | 2021-07-29T06:54:25 | 2021-07-29T06:54:25 | 210,401,012 | 0 | 0 | null | 2023-05-23T01:26:35 | 2019-09-23T16:20:29 | Python | UTF-8 | Python | false | false | 1,624 | py | import tkinter as tk
from tkinter import messagebox
import requests
import re
class AddMonsterPopup(tk.Frame):
""" Popup Frame to Add a Monster """
def __init__(self, parent, close_callback):
""" Constructor """
tk.Frame.__init__(self, parent)
self._close_cb = close_callback
self.grid(rowspan=2, columnspan=2)
tk.Label(self, text="Monster Type:").grid(row=1, column=1)
self._monster_type = tk.Entry(self)
self._monster_type.grid(row=1, column=2)
tk.Label(self, text="Monster AI Difficulty:").grid(row=2, column=1)
self._monster_ai_difficulty = tk.Entry(self)
self._monster_ai_difficulty.grid(row=2, column=2)
tk.Button(self, text="Submit", command=self._submit_cb).grid(
row=4, column=1)
tk.Button(self, text="Close", command=self._close_cb).grid(
row=4, column=2)
def _submit_cb(self):
""" Submit the Add Monster """
# Create the dictionary for the JSON request body
data = {}
data['monster_type'] = self._monster_type.get()
data['monster_ai_difficulty'] = self._monster_ai_difficulty.get()
data['type'] = "monster"
""" Adds a character to the backend server"""
headers = {"content-type": "application/json"}
response = requests.post(
"http://127.0.0.1:5000/server/characters", json=data, headers=headers)
if response.status_code == 200:
self._close_cb()
else:
messagebox.showerror(
"Error", "Add Monster Request Failed: " + response.text)
| [
"chenhonglin1013@gmail.com"
] | chenhonglin1013@gmail.com |
13596afe059c609f619908e75c5935e7c9e5e401 | 1754a20778101b8971c057ec6c358d6b45ed940b | /test/functional/wallet-accounts.py | 0d968863299f05bc09ea8207c4ba32c95f244769 | [
"MIT"
] | permissive | valeamoris/platopia | 33ad24e97fa77f09cab94a35705f2180d9904064 | 563c616db768f813aa4482d39d8ed1d8aacaad4f | refs/heads/master | 2020-04-11T06:48:50.911653 | 2018-05-15T06:15:27 | 2018-05-15T06:15:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
start_nodes,
start_node,
assert_equal,
connect_nodes_bi,
)
class WalletAccountsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
node.generate(101)
assert_equal(node.getbalance(), 50)
accounts = ["a", "b", "c", "d", "e"]
amount_to_send = 1.0
account_addresses = dict()
for account in accounts:
address = node.getaccountaddress(account)
account_addresses[account] = address
node.getnewaddress(account)
assert_equal(node.getaccount(address), account)
assert(address in node.getaddressesbyaccount(account))
node.sendfrom("", address, amount_to_send)
node.generate(1)
for i in range(len(accounts)):
from_account = accounts[i]
to_account = accounts[(i + 1) % len(accounts)]
to_address = account_addresses[to_account]
node.sendfrom(from_account, to_address, amount_to_send)
node.generate(1)
for account in accounts:
address = node.getaccountaddress(account)
assert(address != account_addresses[account])
assert_equal(node.getreceivedbyaccount(account), 2)
node.move(account, "", node.getbalance(account))
node.generate(101)
expected_account_balances = {"": 5200}
for account in accounts:
expected_account_balances[account] = 0
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 5200)
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account)
assert(address in node.getaddressesbyaccount(account))
assert(address not in node.getaddressesbyaccount(""))
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account)
node.sendfrom("", multisig_address, 50)
node.generate(101)
for account in accounts:
assert_equal(node.getbalance(account), 50)
if __name__ == '__main__':
WalletAccountsTest().main()
| [
"d4ptakr@gmail.com"
] | d4ptakr@gmail.com |
26acb61a5f3dc306afadcc547821d416d28cbc9b | 29daf4c05f7a604b55f9518757c70093c4dad6a8 | /decision_trees/tree_predict.py | 1107eb8ed53fcb71e51649471d488b5829002950 | [] | no_license | EthanYue/Machine_Learning | 43091a567da5bffa4bc4071237452ffc44f55bc7 | defd64db8273da00128b3f7fa5578fa60ce20a15 | refs/heads/master | 2020-04-22T10:38:26.459500 | 2019-02-22T14:14:01 | 2019-02-22T14:14:01 | 170,311,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,306 | py | from PIL import Image, ImageDraw
my_data = [['slashdot', 'USA', 'yes', 18, 'None'],
['google', 'France', 'yes', 23, 'Premium'],
['digg', 'USA', 'yes', 24, 'Basic'],
['kiwitobes', 'France', 'yes', 23, 'Basic'],
['google', 'UK', 'no', 21, 'Premium'],
['(direct)', 'New Zealand', 'no', 12, 'None'],
['(direct)', 'UK', 'no', 21, 'Basic'],
['google', 'USA', 'no', 24, 'Premium'],
['slashdot', 'France', 'yes', 19, 'None'],
['digg', 'USA', 'no', 18, 'None'],
['google', 'UK', 'no', 18, 'None'],
['kiwitobes', 'UK', 'no', 19, 'None'],
['digg', 'New Zealand', 'yes', 12, 'Basic'],
['google', 'UK', 'yes', 18, 'Basic'],
['kiwitobes', 'France', 'yes', 19, 'Basic']]
class decision_node:
def __init__(self, col=1, value=None, results=None, tb=None, fb=None):
"""
:param col: 待检验的判断条件所对应的列索引值
:param value: 为了使结果为true, 当前列必须匹配的值
:param results: 保存当前分支的结果, 是一个字典, 除了叶节点外,其他节点上该值都为None
:param tb: 结果为true时,树上相对于当前节点的子树上的节点
:param fb: 结果为false时,树上相对于当前节点的子树上的节点
"""
self.col = col
self.value = value
self.results = results
self.tb = tb
self.fb = fb
def divide_set(rows, column, value):
# 定义一个函数, 获得数据行属于第一组(返回值为true)还是第二组(返回值为false)
split_function = None
if isinstance(value, int) or isinstance(value, float):
split_function = lambda row: row[column] >= value
else:
split_function = lambda row: row[column] == value
# 将数据集拆分成两个集合
set1 = [row for row in rows if split_function(row)]
set2 = [row for row in rows if not split_function(row)]
return set1, set2
# 对各种可能的结果进行计数
def unique_counts(rows):
results = {}
for row in rows:
r = row[len(row) - 1]
if r not in results:
results[r] = 0
results[r] += 1
return results
# 随机放置的数据项出现于错误分类中的概率
def gini_impurity(rows):
total = len(rows)
counts = unique_counts(rows)
imp = 0
for k1 in counts:
p1 = float(counts[k1]) / total
for k2 in counts:
if k1 == k2:
continue
p2 = float(counts[k2]) / total
imp += p1 * p2
return imp
# 遍历所有可能的结果之后所得到的p(x)*log(p(x))之和
def entropy(rows):
"""
熵衡量结果之间差异程度的方法
:param rows:
:return:
"""
from math import log
log2 = lambda x: log(x) / log(2)
results = unique_counts(rows)
ent = 0.0
for r in results.keys():
p = float(results[r]) / len(rows)
ent = ent - p * log2(p)
return ent
def build_tree(rows, score_func=entropy):
if len(rows) == 0:
return decision_node()
current_score = score_func(rows)
best_gain = 0.0
best_criteria = None
best_sets = None
column_count = len(rows[0]) - 1
for col in range(0, column_count):
column_values = {}
for row in rows:
column_values[row[col]] = 1
for value in column_values.keys():
set1, set2 = divide_set(rows, col, value)
# 信息增益
p = float(len(set1)) / len(rows)
gain = current_score - p * score_func(set1) - (1 - p) * score_func(set2)
if gain > best_gain and len(set1) > 0 and len(set2) > 0:
best_gain = gain
best_criteria = (col, value)
best_sets = (set1, set2)
# 递归创建子分支
if best_gain > 0:
true_branch = build_tree(best_sets[0])
false_branch = build_tree(best_sets[1])
return decision_node(col=best_criteria[0], value=best_criteria[1], tb=true_branch, fb=false_branch)
else:
return decision_node(results=unique_counts(rows))
def classify(observation, tree):
if tree.results is not None:
return tree.results
else:
v = observation[tree.col]
branch = None
if isinstance(v, int) or isinstance(v, float):
if v >= tree.value:
branch = tree.tb
else:
branch = tree.fb
else:
if v == tree.value:
branch = tree.tb
else:
branch = tree.fb
return classify(observation, branch)
def print_tree(tree, indent=''):
if tree.results is not None:
print(str(tree.results))
else:
print(str(tree.col) + ':' + str(tree.value) +'? ')
print(indent + 'T->')
print_tree(tree.tb, indent+' ')
print(indent + 'F->')
print_tree(tree.fb, indent+' ')
# 得到树的总宽度
def get_width(tree):
if tree.tb is None and tree.fb is None:
return 1
return get_width(tree.tb) + get_width(tree.fb)
# 得到树的总深度
def get_depth(tree):
if tree.tb is None and tree.fb is None:
return 0
return max(get_depth(tree.tb), get_depth(tree.fb)) + 1
# 绘制树
def draw_tree(tree, jpeg='tree.jpg'):
w = get_width(tree) * 100
h = get_depth(tree) * 100 + 200
img = Image.new('RGB', (w, h), (255, 255, 255))
draw = ImageDraw.Draw(img)
draw_node(draw, tree, w/2, 20)
img.save(jpeg, 'JPEG')
# 绘制节点,右边为真
def draw_node(draw, tree, x, y):
if tree.results is None:
w1 = get_width(tree.fb) * 100
w2 = get_width(tree.tb) * 100
left = x - (w1 + w2) / 2
right = x + (w1 + w2) / 2
draw.text((x - 20, y - 10), str(tree.col) + ':' + str(tree.value), (0, 0, 0))
draw.line((x, y, left+w1/2, y+100), fill=(255, 0, 0))
draw.line((x, y, right-w2/2, y+100), fill=(255, 0, 0))
draw_node(draw, tree.fb, left+w1/2, y+100)
draw_node(draw, tree.tb, right-w2/2, y+100)
else:
txt = ' \n'.join(['%s:%d' % v for v in tree.results.items()])
draw.text((x-20, y), txt, (0, 0, 0))
# 剪枝
def prune(tree, min_gain):
# 如果分支不是叶节点则剪枝
if tree.tb.results is None:
prune(tree.tb, min_gain)
if tree.fb.results is None:
prune(tree.fb, min_gain)
# 如果两个子节点都是叶节点,则判断是否需要合并
if tree.tb.results is not None and tree.fb.results is not None:
tb, fb = [], []
for v, c in tree.tb.results.items():
tb += [[v]] * c
for v, c in tree.fb.results.items():
fb += [[v]] * c
# 判断熵的减少是否低于最小阈值
delta = entropy(tb+fb) - (entropy(tb) + entropy(fb) / 2)
if delta < min_gain:
# 合并分支
tree.tb, tree.fb = None, None
tree.results = unique_counts(tb+fb)
# 处理确实数据
def md_classify(observation, tree):
if tree.results is not None:
return tree.results
else:
v = observation[tree.col]
if v is None:
tr, fr = md_classify(observation, tree.tb), md_classify(observation, tree.fb)
t_count = sum(tr.values())
f_count = sum(fr.values())
t_weight = t_count / (t_count + f_count)
f_weight = f_count / (t_count + f_count)
result = {}
for k, v in tr.items():
result[k] = v * t_weight
for k, v in fr.items():
if k not in result:
result[k] = 0
result[k] = v * f_weight
return result
else:
if isinstance(v, int) or isinstance(v, float):
if v >= tree.value:
branch = tree.tb
else:
branch = tree.fb
else:
if v == tree.value:
branch = tree.tb
else:
branch = tree.fb
return md_classify(observation, branch)
def variance(rows):
if len(rows) == 0:
return 0
data = [float(row[len(row)-1]) for row in rows]
mean = sum(data) / len(data)
variance = sum([(d-mean) ** 2 for d in data]) / len(data)
return variance | [
"yfy17859733505@gmail.com"
] | yfy17859733505@gmail.com |
7445accd6462b765ae0452d86137ed83bc7f4af5 | f46a7b675961fc39e626e6f005285c055b1b056d | /worldmap.py | 56809cba74e12f2207200d06ee119899c0c19d0d | [] | no_license | deosjr/PokeRogue | 86514a3bd801f5dd231af0eb31099728b349480e | b5a37e8bc2a603f8e27af83268607504f16b40bc | refs/heads/master | 2021-01-21T15:34:16.057406 | 2014-03-11T22:14:50 | 2014-03-11T22:14:50 | 17,521,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,694 | py |
import gui
from player import *
from pokemon import *
from moves import *
import pygame
from pygame.locals import *
import os
import copy
import random
class MapTile(object):
def __init__(self, img, m, x, y, w, h, wmod=0, hmod=0):
self.image = img
self.map = m
self.tilex, self.tiley, self.w, self.h = x, y, w, h
self.hmod = hmod
self.wmod = wmod
def get_image(self, x, y, xp, yp):
modx, mody = self.tile_mods(xp, yp)
height = y - (self.h - 1 - self.hmod) * gui.CELL_HEIGHT
width = x - self.wmod * gui.CELL_WIDTH
r = pygame.Rect(width, height, gui.CELL_WIDTH, gui.CELL_HEIGHT)
tilex = (self.tilex + modx) * gui.CELL_WIDTH
tiley = (self.tiley + mody) * gui.CELL_HEIGHT
coord = pygame.Rect(tilex, tiley, self.w * gui.CELL_WIDTH, self.h * gui.CELL_HEIGHT)
return self.image, r, coord
def tile_mods(self, x, y):
return 0, 0
def effect_stand_on(self):
return None, None
class MapObject(MapTile):
def __init__(self, img, m, x, y, w, h):
super(MapObject, self).__init__(img, m, x, y, w, h)
self.x, self.y = None, None
class Wall(MapTile):
def __init__(self, img, m, x, y, w, h, hmod=0):
super(Wall, self).__init__(img, m, x, y, w, h, hmod=hmod)
self.passable = False
class CaveWall(Wall):
def __init__(self, img, m, x, y, w, h):
super(CaveWall, self).__init__(img, m, x, y, w, h)
# This will break as soon as these walls are not just edge of the map!
# will have to be done in an initial pass over the map,
# prob. storing mods per gridcell in a sep. dict
# for now, lets try this though
def tile_mods(self, x, y):
m = self.map
grid = self.map.grid
up = None
down = None
left = None
right = None
if (x,y-1) in grid:
up = grid[(x,y-1)]
if (x,y+1) in grid:
down = grid[(x,y+1)]
if (x-1,y) in grid:
left = grid[(x-1,y)]
if (x+1,y) in grid:
right = grid[(x+1,y)]
if isinstance(right, CaveWall) and isinstance(down, CaveWall):
if not up or up == m.filler:
return 6,0
return 0,0
elif isinstance(left, CaveWall) and isinstance(down, CaveWall):
if not up or up == m.filler:
return 7,0
return 2,0
elif isinstance(right, CaveWall) and isinstance(up, CaveWall):
if not down or down == m.filler:
return 1,0
return 0,2
elif isinstance(left, CaveWall) and isinstance(up, CaveWall):
if not down or down == m.filler:
return 1,0
return 2,2
elif isinstance(left, CaveWall) and isinstance(right, CaveWall):
if not up or up == m.filler:
return 1,2
return 1,0
elif isinstance(up, CaveWall) and isinstance(down, CaveWall):
if not left or left == m.filler:
return 2,1
return 0,1
return 1,2
class Sign(MapObject):
def __init__(self, img, m, x, y, w, h):
super(Sign, self).__init__(img, m, x, y, w, h)
self.passable = False
self.message = ""
def interact_with(self):
return self.message, None
class Starter_Choice(MapObject):
def __init__(self, img, m, x, y, w, h):
super(Starter_Choice, self).__init__(img, m, x, y, w, h)
self.passable = False
self.message = "Choose your starter Pokemon!"
self.starters = []
def interact_with(self):
if self.starters:
return None, None
s = [p for p in POKEMON if p.evolutions and p.evolutions[0][1] == "Level" and p.evolutions[0][2] < 30]
types = set([])
while len(self.starters) < 3:
p = random.choice(s)
if not set(p.types).intersection(types):
physical = False
for move in p.moves_learnable[1]:
m = MOVES[INTERNAL_MOVES[move] - 1]
if m.category == "Physical":
physical = True
if physical:
self.starters.append(p)
types.update(set(p.types))
return self.message, ("CHOOSE_STARTER", self.starters)
class Statue(MapTile):
def __init__(self, img, m, x, y, w, h):
super(Statue, self).__init__(img, m, x, y, w, h)
self.passable = False
class Floor(MapTile):
def __init__(self, img, m, x, y, w, h, wmod=0):
super(Floor, self).__init__(img, m, x, y, w, h, wmod=wmod)
self.passable = True
class Heal(Floor):
def effect_stand_on(self):
return "You feel refreshed", "HEAL"
class Warp(Floor):
def __init__(self, img, m, x, y, w, h, wmod=0):
super(Warp, self).__init__(img, m, x, y, w, h, wmod=wmod)
self.level = None
def effect_stand_on(self):
return None, ("WARP", self.level)
class Grass(Floor):
def __init__(self, img, m, x, y, w, h, p, encounters):
super(Grass, self).__init__(img, m, x, y, w, h)
self.p = p
self.encounters = encounters
def effect_stand_on(self):
if random.random() < self.p:
r = random.random()
temp = 0.0
for k,v in self.encounters.items():
if r < k + temp:
pokemon, minlevel, maxlevel = v
level = random.randint(minlevel, maxlevel)
return "A wild pokemon appeared!", ("BATTLE", pokemon, level)
else:
temp += k
return None, None
class Map(object):
def __init__(self, ascii_map):
self.grid = {}
self.NPCs = []
self.dont_draw = []
self.map_to_grid(ascii_map)
def map_to_grid(self, ascii_map):
for y, line in enumerate(ascii_map):
for x, cell in enumerate(line):
if cell == 'x' and hasattr(self, "wall"):
self.grid[(x,y)] = [self.wall]
elif cell == ' ' and hasattr(self, "floor"):
self.grid[(x,y)] = [self.floor]
elif cell == '*':
if hasattr(self, "grass"):
self.grid[(x,y)] = [self.grass]
else:
self.grid[(x,y)] = [self.floor]
# might want to remove this.. statue is an object
elif cell == 's' and hasattr(self, "statue"):
self.grid[(x,y)] = [self.statue]
elif cell == '#' and hasattr(self, "obstacle"):
self.grid[(x,y)] = [self.obstacle]
elif cell == 'o':
trainer = Player(x, y)
trainer.range = 5
self.grid[(x,y)] = [self.floor]
random_pokemon = Pokemon(5, random.choice(INTERNAL_POKEMON.keys()))
trainer.add_to_team(random_pokemon)
self.add_player(trainer)
trainer.image,_ = gui.load_image(os.path.join("Graphics", "Characters", "trchar030.png"), -1)
# Default: filler
elif hasattr(self, "filler"):
self.grid[(x,y)] = [self.filler]
def check_collisions(self, x, y):
for npc in self.NPCs:
if npc.x == x and npc.y == y:
return False
if (x,y) in self.grid:
for tile in self.grid[(x,y)]:
if not tile.passable:
return False
else:
return True
return False
def load_tileset(self, filename):
tileset,_ = gui.load_image(os.path.join("Graphics", "Tilesets", filename + ".png"))
return tileset
def add_to_map(self, category, x, y):
if hasattr(self, category):
obj = getattr(self, category)
if isinstance(obj, MapObject):
obj = copy.copy(obj)
obj.x, obj.y = x, y
if not (x,y) in self.grid:
self.grid[(x,y)] = []
self.grid[(x,y)].append(obj)
w, h = obj.w, obj.h
hmod, wmod = 0, 0
if hasattr(obj, "hmod"):
hmod = obj.hmod
if hasattr(obj, "wmod"):
wmod = obj.wmod
for i in range(w):
for j in range(h-1):
if not (x + i - wmod, y + hmod - j) == (x, y):
if not y + hmod - j > y:
self.grid[(x+i-wmod, y-j+hmod)] = [self.filler]
self.dont_draw.append((x + i-wmod, y + hmod - j))
return obj
def add_player(self, player):
if isinstance(player, Player):
self.NPCs.append(player)
# TODO
def shade_floors(self):
pass
class GrassMap(Map):
def __init__(self, ascii_map):
self.tileset = self.load_tileset("Outside")
self.graveyardtileset = self.load_tileset("Graveyard tower interior")
self.type = "Field"
self.wall = Wall(self.tileset, self, 6, 58, 1, 2)
self.floor = Floor(self.tileset, self, 2, 0, 1, 1)
self.obstacle = Wall(self.tileset, self, 3, 59, 1, 1)
self.sign = Sign(self.tileset, self, 0, 119, 1, 1)
self.filler = Wall(self.tileset, self, 6, 58, 1, 2)
self.heal = Heal(self.graveyardtileset, self, 1, 8, 1, 1)
# chance : (pokemon, minlevel, maxlevel)
encounters = {0.45: ("PIDGEY", 2, 4), 0.3:("RATTATA",2,2), 0.2:("SENTRET",3,3), 0.05:("FURRET",6,6)}
self.grass = Grass(self.tileset, self, 6, 0, 1, 1, 0.1, encounters)
super(GrassMap, self).__init__(ascii_map)
class CaveMap(Map):
def __init__(self, ascii_map):
self.tileset = self.load_tileset("Caves")
self.graveyardtileset = self.load_tileset("Graveyard tower interior")
self.type = "Cave"
#self.wall = Wall(self.tileset, 1, 23, 1, 2)
self.wall = CaveWall(self.tileset, self, 0, 15, 1, 1)
#self.floor = Floor(self.tileset, self, 2, 28, 1, 1)
self.obstacle = Wall(self.tileset, self, 5, 21, 1, 1)
self.filler = Wall(self.tileset, self, 1, 16, 1, 1)
self.sign = Sign(self.tileset, self, 7, 7, 1, 1)
self.heal = Heal(self.graveyardtileset, self, 1, 8, 1, 1)
self.ladder_up = Warp(self.tileset, self, 4, 19, 1, 2)
self.ladder_up.level = 1
# chance : (pokemon, minlevel, maxlevel)
encounters = {1.0: ("ZUBAT", 1, 2)}
self.floor = Grass(self.tileset, self, 2, 28, 1, 1, 0.1, encounters)
self.grass = Grass(self.tileset, self, 5, 22, 1, 1, 0.1, encounters)
super(CaveMap, self).__init__(ascii_map)
class GymMap(Map):
def __init__(self, ascii_map):
self.tileset = self.load_tileset("Gyms interior")
self.type = "IndoorB"
self.wall = Wall(self.tileset, self, 2, 2, 1, 1)
self.floor = Floor(self.tileset, self, 2, 7, 1, 1)
#self.mat = Floor(self.tileset, self, 2, 0, 3, 1)
self.statue = Statue(self.tileset, self, 1, 0, 1, 2)
self.filler = Wall(self.tileset, self, 0, 0, 1, 1)
super(GymMap, self).__init__(ascii_map)
class InteriorMap(Map):
def __init__(self, ascii_map):
self.tileset = self.load_tileset("Interior general")
self.cavetileset = self.load_tileset("Caves")
self.type = "IndoorA"
self.floor = Floor(self.tileset, self, 0, 30, 1, 1)
self.wall = Wall(self.tileset, self, 0, 0, 1, 2)
self.filler = Wall(self.tileset, self, 6, 19, 1, 1)
self.shelf = Wall(self.tileset, self, 3, 140, 2, 3, hmod=1)
self.shelf_small = Wall(self.tileset, self, 5, 140, 1, 3, hmod=1)
self.starter_choice = Starter_Choice(self.tileset, self, 0, 175, 2, 3)
self.mat = Warp(self.tileset, self, 5, 107, 3, 2, wmod=1)
self.mat.level = 0
self.ladder_down = Warp(self.cavetileset, self, 5, 82, 1, 1)
self.ladder_down.level = 2
super(InteriorMap, self).__init__(ascii_map)
class MansionMap(Map):
def __init__(self, ascii_map):
self.tileset = self.load_tileset("Mansion interior")
self.type = "IndoorA"
self.wall = Wall(self.tileset, self, 2, 3, 1, 3)
self.floor = Floor(self.tileset, self, 6, 9, 1, 1)
#self.mat = Floor(self.tileset, self, 2, 0, 3, 1)
self.statue = Statue(self.tileset, self, 7, 18, 1, 2)
self.filler = Wall(self.tileset, self, 0, 7, 1, 1)
super(MansionMap, self).__init__(ascii_map) | [
"deosjr@gmail.com"
] | deosjr@gmail.com |
98c7e860f3e9b72be38d65d6434b2f524d8aef87 | 28ec3ee4daab919ef005e5913498be3fb96b19a4 | /polyorg/tests.py | d4e1c78d9034a910e93000b9baa3e029a3b0b5b4 | [
"BSD-2-Clause"
] | permissive | danielbraun/open-shot | 2bd5b0af9c8b6c32bc0b244edfafa1a871e85972 | 5e7507b57912a047e460b32927412f43df154def | refs/heads/master | 2020-12-25T12:07:26.287583 | 2013-09-15T10:17:16 | 2013-09-15T10:17:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from django.contrib.auth.models import User
from models import Candidate, CandidateList
class CreationTest(TestCase):
def setUp(self):
self.linus = User.objects.create(username='Linus')
self.guido = User.objects.create(username='Guido')
self.jacob = User.objects.create(username='Jacob')
def test_candidatelist(self):
"""
Tests the creation of CandiateList and it's basic methods
"""
cl1 = CandidateList.objects.create(name="Imagine", ballot="I")
c = Candidate.objects.create(candidate_list=cl1, user=self.jacob, ordinal=1)
self.assertFalse(cl1.get_candidates())
c.status = 'V'
c.save()
self.assertEquals(cl1.get_candidates().count(), 1)
c.status = 'X'
c.save()
self.assertFalse(cl1.get_candidates())
cl1.delete()
def teardown(self):
for u in self.users: u.delete()
| [
"bennydaon@gmail.com"
] | bennydaon@gmail.com |
6e6a8d9906539b1e7a5184501859be324b07a84d | 3d12da3c90cf1ebc9b04e9a8b289d82fce46cb48 | /planet_name_generator/__init__.py | 57834dad8290c8c1fe0ed939a7b85ebc7751df79 | [
"MIT"
] | permissive | dawsonren/proc_gen_universe | 14b3cf7e91c39fefece26db1f2f23f4a915602cb | 958cf628e12654cadcc858e2122591f0e0a3c84d | refs/heads/master | 2023-02-17T02:28:17.142137 | 2021-01-13T03:06:32 | 2021-01-13T03:06:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | import random
SUFFIXES = ["prime", "",
"B", "",
"alpha", "",
'proxima', "",
"V", "",
"C", "",
"X", "",
"D", "",
"", ""] # empty strings so some don't have suffixes
with open("planet_name_generator/planets.txt", "r") as f:
raw = f.read()
PLANETS = raw.split("\n")
syllables = []
for p in PLANETS:
lex = p.split("-")
for syl in lex:
if syl not in syllables:
syllables.append(syl)
size = len(syllables)
freq = [[0] * size for i in range(size)]
for p in PLANETS:
lex = p.split("-")
i = 0
while i < len(lex) - 1:
freq[syllables.index(lex[i])][syllables.index(lex[i+1])] += 1
i += 1
freq[syllables.index(lex[len(lex) - 1])][size - 1] += 1
def generate_name():
planet_name = ""
length = random.randint(2, 3)
initial = random.randint(0, size - 1)
while length > 0:
while 1 not in freq[initial]:
initial = random.randint(0, size - 1)
planet_name += syllables[initial]
initial = freq[initial].index(1)
length -= 1
suffix_index = random.randint(0, len(SUFFIXES) - 1)
planet_name += f" {SUFFIXES[suffix_index]}"
return (" ".join([s.capitalize() for s in planet_name.split(" ")])).strip()
if __name__ == "__main__":
print(generate_name())
| [
"dawsonren@gmail.com"
] | dawsonren@gmail.com |
533b9efc9a2717c2a35f6dd0878ed0ee355c00d9 | 0cc2e39d4f288a4d07d2df5cc51b2d27037f3732 | /exercism/python/book-store/book_store.py | 144502255b204e733d633f3c73777cdc47bfc182 | [] | no_license | Sujan-Kandeepan/Exercism | 893aceb3f5b7a37b7d845dfdde3faa11fae60236 | 3491c660d7090627196c83c8f46c13cca9e0f856 | refs/heads/master | 2021-04-27T00:09:09.211345 | 2018-03-04T08:16:58 | 2018-03-04T08:16:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | def calculate_total(books):
def groupnums(num):
if num == 1:
return [[0], [1], [2]]
else:
returnlist = []
for i in groupnums(num-1):
returnlist.extend([i+[0], i+[1], i+[2]])
return returnlist
bestprice = len(books)*8
if books == []:
return 0.0
for i in groupnums(len(books)):
groups = [[], [], []]
for j in range(len(books)):
groups[i[j]].append(books[j])
if groups[0] == sorted(groups[0]) and groups[0] == list(set(groups[0])) and \
groups[1] == sorted(groups[1]) and groups[1] == list(set(groups[1])) and \
groups[2] == sorted(groups[2]) and groups[2] == list(set(groups[2])):
price = 0.0
for j in groups:
if len(j) == 5:
price += 30.0
elif len(j) == 4:
price += 25.6
elif len(j) == 3:
price += 21.6
elif len(j) == 2:
price += 15.2
elif len(j) == 1:
price += 8.0
if price < bestprice:
bestprice = price
return bestprice
| [
"kandeeps@mcmaster.ca"
] | kandeeps@mcmaster.ca |
a9b556949473408521e5fae46b690dbc52cc4f55 | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/pybites/bitesofpy-master/!201-300/204/test_pomodoro.py | ed5d098ac2af44caaaf4144782768d028d668cea | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 2,331 | py | from typing import Union
import pytest
from pomodoro import break_time, lunch_time, main, session, work_time
@pytest.mark.asyncio
async def test_break_time(capfd):
anno = break_time.__annotations__
assert anno["delay"] == Union[int, float]
assert anno["loop"] == int
assert anno["return"] is None
delay = 0.0001
await break_time(delay, 1)
output = capfd.readouterr()[0].strip()
assert "[1]" in output
assert f"Time for a {int(delay/60)} min break!" in output
@pytest.mark.asyncio
async def test_lunch_time(capfd):
anno = lunch_time.__annotations__
assert anno["delay"] == Union[int, float]
assert anno["return"] is None
delay = 0.06
await lunch_time(delay)
output = capfd.readouterr()[0].strip()
assert "Time for lunch!" in output
@pytest.mark.asyncio
async def test_work_time(capfd):
anno = work_time.__annotations__
assert anno["delay"] == Union[int, float]
assert anno["return"] is None
delay = 0.0025
await work_time(delay, 3)
output = capfd.readouterr()[0].strip()
assert "[3]" in output
assert "Time to work!" in output
@pytest.mark.asyncio
async def test_session(capfd):
anno = session.__annotations__
assert anno["work_length"] == Union[int, float]
assert anno["short_break_length"] == Union[int, float]
assert anno["long_break_length"] == Union[int, float]
assert anno["return"] is None
await session(0.0025, 0.0005, 0.003)
output = capfd.readouterr()[0].strip()
assert "Time to work!" in output
assert "min break!" in output
assert "Time for lunch!" not in output
assert len(output.splitlines()) == 8
@pytest.mark.asyncio
async def test_main(capfd):
anno = main.__annotations__
assert anno["work_length"] == Union[int, float]
assert anno["short_break_length"] == Union[int, float]
assert anno["long_break_length"] == Union[int, float]
assert anno["lunch_length"] == Union[int, float]
assert anno["return"] is None
await main(0.0025, 0.0005, 0.003, 0.01)
output = capfd.readouterr()[0].strip()
assert "Pomodor timer started at" in output
assert "Time to work!" in output
assert "min break!" in output
assert "Time for lunch!" in output
assert "Work day completed at" in output
assert len(output.splitlines()) == 45 | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
4db21bd2559e1f6e5ddbbca86d371b4cfd2e4bdd | 258f18e092a2b563521d3ea326ce443a34cd4ad0 | /manage.py | 034ab7d96befe473cdb6e7a780439293370779a4 | [] | no_license | jtkaufman737/django_foodie_app | 2b9e6badb79bb61c433e4e16581bac59ee2c077b | bdf0fd7de49e878c25df66129d87205138be5212 | refs/heads/master | 2022-12-09T06:17:01.427737 | 2020-09-04T14:31:48 | 2020-09-04T14:31:48 | 289,298,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'menu_site.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"jtkaufman737@gmail.com"
] | jtkaufman737@gmail.com |
e33fe0145613768d16866c5fc41bc2560e783bf5 | 70bee1e4e770398ae7ad9323bd9ea06f279e2796 | /test/test_istio_authorization_policy_source.py | d06474312ad2007728f5c1f1dbe3e96ba1395147 | [] | no_license | hi-artem/twistlock-py | c84b420b1e582b3c4cf3631eb72dac6d659d4746 | 9888e905f5b9d3cc00f9b84244588c0992f8e4f4 | refs/heads/main | 2023-07-18T07:57:57.705014 | 2021-08-22T04:36:33 | 2021-08-22T04:36:33 | 398,637,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,731 | py | # coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.istio_authorization_policy_source import IstioAuthorizationPolicySource # noqa: E501
from openapi_client.rest import ApiException
class TestIstioAuthorizationPolicySource(unittest.TestCase):
"""IstioAuthorizationPolicySource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IstioAuthorizationPolicySource
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.istio_authorization_policy_source.IstioAuthorizationPolicySource() # noqa: E501
if include_optional :
return IstioAuthorizationPolicySource(
namespaces = [
''
],
principals = [
''
]
)
else :
return IstioAuthorizationPolicySource(
)
def testIstioAuthorizationPolicySource(self):
"""Test IstioAuthorizationPolicySource"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"aakatev@virtru.com"
] | aakatev@virtru.com |
e9007469f0c62d35934f81d737afe7d8be412af9 | db38a57c46e9ef9d0a8f433dfb8df78ab589ef43 | /Server[raspberry]/kinesisServerRaspi.py | 80349929b46a02477838f6ef5cedd62f6bb5e380 | [] | no_license | Irraky/Kinesis | b49cdd48cb083532c36bb287f92559db9647f572 | 3679f4c4be16c60b14260a2bcf25a22bd00a017a | refs/heads/master | 2023-02-16T17:11:55.073806 | 2021-01-10T22:48:30 | 2021-01-10T22:48:30 | 319,982,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | import socket
import sys
import TPi.GPIO as GPIO
import time
# BCM pin name from processor
GPIO.setmode(GPIO.BCM)
PINARDUINO = 21
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('10.3.141.1', 7000)
print('starting up on %s port %s' % server_address)
sock.bind(server_address)
# setup pin 21 as output
GPIO.setup(PINARDUINO, GPIO.OUT)
GPIO.output(PINARDUINO, 1)
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
print('waiting for a connection')
connection, client_address = sock.accept()
try:
print('connection from', client_address)
GPIO.output(PINARDUINO, 0)
# Receive the data in small chunks and retransmit it
while True:
data = connection.recv(16)
print('received "%s"' % data)
if data:
print('sending data back to the client')
connection.sendall(data)
else:
print('no more data from', client_address)
break
time.sleep(3)
GPIO.output(PINARDUINO, 1)
finally:
# Clean up the connection
connection.close() | [
"drecours@student.42.fr"
] | drecours@student.42.fr |
e551dbec93a15d5f8a6eb4b246b2a3a381e2691e | f62fd455e593a7ad203a5c268e23129473d968b6 | /tacker-0.7.0/tacker/db/vnfm/vnfm_db.py | e7ca3763bffe6461854f842cd873c008ae70d313 | [
"Apache-2.0"
] | permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 27,220 | py | # Copyright 2013, 2014 Intel Corporation.
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_exc
from tacker.api.v1 import attributes
from tacker import context as t_context
from tacker.db.common_services import common_services_db
from tacker.db import db_base
from tacker.db import model_base
from tacker.db import models_v1
from tacker.db import types
from tacker.extensions import vnfm
from tacker import manager
from tacker.plugins.common import constants
LOG = logging.getLogger(__name__)
_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE)
_ACTIVE_UPDATE_ERROR_DEAD = (
constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE,
constants.ERROR, constants.DEAD)
CREATE_STATES = (constants.PENDING_CREATE, constants.DEAD)
###########################################################################
# db tables
class VNFD(model_base.BASE, models_v1.HasId, models_v1.HasTenant,
models_v1.Audit):
"""Represents VNFD to create VNF."""
__tablename__ = 'vnfd'
# Descriptive name
name = sa.Column(sa.String(255), nullable=False)
description = sa.Column(sa.Text)
# service type that this service vm provides.
# At first phase, this includes only single service
# In future, single service VM may accomodate multiple services.
service_types = orm.relationship('ServiceType', backref='vnfd')
# driver to communicate with service managment
mgmt_driver = sa.Column(sa.String(255))
# (key, value) pair to spin up
attributes = orm.relationship('VNFDAttribute',
backref='vnfd')
# vnfd template source - inline or onboarded
template_source = sa.Column(sa.String(255), server_default='onboarded')
class ServiceType(model_base.BASE, models_v1.HasId, models_v1.HasTenant):
"""Represents service type which hosting vnf provides.
Since a vnf may provide many services, This is one-to-many
relationship.
"""
vnfd_id = sa.Column(types.Uuid, sa.ForeignKey('vnfd.id'),
nullable=False)
service_type = sa.Column(sa.String(64), nullable=False)
class VNFDAttribute(model_base.BASE, models_v1.HasId):
"""Represents attributes necessary for spinning up VM in (key, value) pair
key value pair is adopted for being agnostic to actuall manager of VMs.
The interpretation is up to actual driver of hosting vnf.
"""
__tablename__ = 'vnfd_attribute'
vnfd_id = sa.Column(types.Uuid, sa.ForeignKey('vnfd.id'),
nullable=False)
key = sa.Column(sa.String(255), nullable=False)
value = sa.Column(sa.TEXT(65535), nullable=True)
class VNF(model_base.BASE, models_v1.HasId, models_v1.HasTenant,
models_v1.Audit):
"""Represents vnfs that hosts services.
Here the term, 'VM', is intentionally avoided because it can be
VM or other container.
"""
__tablename__ = 'vnf'
vnfd_id = sa.Column(types.Uuid, sa.ForeignKey('vnfd.id'))
vnfd = orm.relationship('VNFD')
name = sa.Column(sa.String(255), nullable=False)
description = sa.Column(sa.Text, nullable=True)
# sufficient information to uniquely identify hosting vnf.
# In case of openstack manager, it's UUID of heat stack.
instance_id = sa.Column(sa.String(64), nullable=True)
# For a management tool to talk to manage this hosting vnf.
# opaque string.
# e.g. (driver, mgmt_url) = (ssh, ip address), ...
mgmt_url = sa.Column(sa.String(255), nullable=True)
attributes = orm.relationship("VNFAttribute", backref="vnf")
status = sa.Column(sa.String(64), nullable=False)
vim_id = sa.Column(types.Uuid, sa.ForeignKey('vims.id'), nullable=False)
placement_attr = sa.Column(types.Json, nullable=True)
vim = orm.relationship('Vim')
error_reason = sa.Column(sa.Text, nullable=True)
class VNFAttribute(model_base.BASE, models_v1.HasId):
"""Represents kwargs necessary for spinning up VM in (key, value) pair.
key value pair is adopted for being agnostic to actuall manager of VMs.
The interpretation is up to actual driver of hosting vnf.
"""
__tablename__ = 'vnf_attribute'
vnf_id = sa.Column(types.Uuid, sa.ForeignKey('vnf.id'),
nullable=False)
key = sa.Column(sa.String(255), nullable=False)
# json encoded value. example
# "nic": [{"net-id": <net-uuid>}, {"port-id": <port-uuid>}]
value = sa.Column(sa.TEXT(65535), nullable=True)
class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
@property
def _core_plugin(self):
return manager.TackerManager.get_plugin()
def subnet_id_to_network_id(self, context, subnet_id):
subnet = self._core_plugin.get_subnet(context, subnet_id)
return subnet['network_id']
def __init__(self):
super(VNFMPluginDb, self).__init__()
self._cos_db_plg = common_services_db.CommonServicesPluginDb()
def _get_resource(self, context, model, id):
try:
if uuidutils.is_uuid_like(id):
return self._get_by_id(context, model, id)
return self._get_by_name(context, model, id)
except orm_exc.NoResultFound:
if issubclass(model, VNFD):
raise vnfm.VNFDNotFound(vnfd_id=id)
elif issubclass(model, ServiceType):
raise vnfm.ServiceTypeNotFound(service_type_id=id)
if issubclass(model, VNF):
raise vnfm.VNFNotFound(vnf_id=id)
else:
raise
def _make_attributes_dict(self, attributes_db):
return dict((attr.key, attr.value) for attr in attributes_db)
def _make_service_types_list(self, service_types):
return [service_type.service_type
for service_type in service_types]
def _make_vnfd_dict(self, vnfd, fields=None):
res = {
'attributes': self._make_attributes_dict(vnfd['attributes']),
'service_types': self._make_service_types_list(
vnfd.service_types)
}
key_list = ('id', 'tenant_id', 'name', 'description',
'mgmt_driver', 'created_at', 'updated_at',
'template_source')
res.update((key, vnfd[key]) for key in key_list)
return self._fields(res, fields)
def _make_dev_attrs_dict(self, dev_attrs_db):
return dict((arg.key, arg.value) for arg in dev_attrs_db)
def _make_vnf_dict(self, vnf_db, fields=None):
LOG.debug(_('vnf_db %s'), vnf_db)
LOG.debug(_('vnf_db attributes %s'), vnf_db.attributes)
res = {
'vnfd':
self._make_vnfd_dict(vnf_db.vnfd),
'attributes': self._make_dev_attrs_dict(vnf_db.attributes),
}
key_list = ('id', 'tenant_id', 'name', 'description', 'instance_id',
'vim_id', 'placement_attr', 'vnfd_id', 'status',
'mgmt_url', 'error_reason', 'created_at', 'updated_at')
res.update((key, vnf_db[key]) for key in key_list)
return self._fields(res, fields)
@staticmethod
def _mgmt_driver_name(vnf_dict):
return vnf_dict['vnfd']['mgmt_driver']
@staticmethod
def _instance_id(vnf_dict):
return vnf_dict['instance_id']
def create_vnfd(self, context, vnfd):
vnfd = vnfd['vnfd']
LOG.debug(_('vnfd %s'), vnfd)
tenant_id = self._get_tenant_id_for_create(context, vnfd)
service_types = vnfd.get('service_types')
mgmt_driver = vnfd.get('mgmt_driver')
template_source = vnfd.get("template_source")
if (not attributes.is_attr_set(service_types)):
LOG.debug(_('service types unspecified'))
raise vnfm.ServiceTypesNotSpecified()
with context.session.begin(subtransactions=True):
vnfd_id = str(uuid.uuid4())
vnfd_db = VNFD(
id=vnfd_id,
tenant_id=tenant_id,
name=vnfd.get('name'),
description=vnfd.get('description'),
mgmt_driver=mgmt_driver,
template_source=template_source)
context.session.add(vnfd_db)
for (key, value) in vnfd.get('attributes', {}).items():
attribute_db = VNFDAttribute(
id=str(uuid.uuid4()),
vnfd_id=vnfd_id,
key=key,
value=value)
context.session.add(attribute_db)
for service_type in (item['service_type']
for item in vnfd['service_types']):
service_type_db = ServiceType(
id=str(uuid.uuid4()),
tenant_id=tenant_id,
vnfd_id=vnfd_id,
service_type=service_type)
context.session.add(service_type_db)
LOG.debug(_('vnfd_db %(vnfd_db)s %(attributes)s '),
{'vnfd_db': vnfd_db,
'attributes': vnfd_db.attributes})
vnfd_dict = self._make_vnfd_dict(vnfd_db)
LOG.debug(_('vnfd_dict %s'), vnfd_dict)
self._cos_db_plg.create_event(
context, res_id=vnfd_dict['id'],
res_type=constants.RES_TYPE_VNFD,
res_state=constants.RES_EVT_ONBOARDED,
evt_type=constants.RES_EVT_CREATE,
tstamp=vnfd_dict[constants.RES_EVT_CREATED_FLD])
return vnfd_dict
def update_vnfd(self, context, vnfd_id,
vnfd):
with context.session.begin(subtransactions=True):
vnfd_db = self._get_resource(context, VNFD,
vnfd_id)
vnfd_db.update(vnfd['vnfd'])
vnfd_db.update({'updated_at': timeutils.utcnow()})
vnfd_dict = self._make_vnfd_dict(vnfd_db)
self._cos_db_plg.create_event(
context, res_id=vnfd_dict['id'],
res_type=constants.RES_TYPE_VNFD,
res_state=constants.RES_EVT_NA_STATE,
evt_type=constants.RES_EVT_UPDATE,
tstamp=vnfd_dict[constants.RES_EVT_UPDATED_FLD])
return vnfd_dict
def delete_vnfd(self,
context,
vnfd_id,
soft_delete=True):
with context.session.begin(subtransactions=True):
# TODO(yamahata): race. prevent from newly inserting hosting vnf
# that refers to this vnfd
vnfs_db = context.session.query(VNF).filter_by(
vnfd_id=vnfd_id).first()
if vnfs_db is not None and vnfs_db.deleted_at is None:
raise vnfm.VNFDInUse(vnfd_id=vnfd_id)
vnfd_db = self._get_resource(context, VNFD,
vnfd_id)
if soft_delete:
vnfd_db.update({'deleted_at': timeutils.utcnow()})
self._cos_db_plg.create_event(
context, res_id=vnfd_db['id'],
res_type=constants.RES_TYPE_VNFD,
res_state=constants.RES_EVT_NA_STATE,
evt_type=constants.RES_EVT_DELETE,
tstamp=vnfd_db[constants.RES_EVT_DELETED_FLD])
else:
context.session.query(ServiceType).filter_by(
vnfd_id=vnfd_id).delete()
context.session.query(VNFDAttribute).filter_by(
vnfd_id=vnfd_id).delete()
context.session.delete(vnfd_db)
def get_vnfd(self, context, vnfd_id, fields=None):
vnfd_db = self._get_resource(context, VNFD, vnfd_id)
return self._make_vnfd_dict(vnfd_db)
def get_vnfds(self, context, filters, fields=None):
if 'template_source' in filters and \
filters['template_source'][0] == 'all':
filters.pop('template_source')
return self._get_collection(context, VNFD,
self._make_vnfd_dict,
filters=filters, fields=fields)
def choose_vnfd(self, context, service_type,
required_attributes=None):
required_attributes = required_attributes or []
LOG.debug(_('required_attributes %s'), required_attributes)
with context.session.begin(subtransactions=True):
query = (
context.session.query(VNFD).
filter(
sa.exists().
where(sa.and_(
VNFD.id == ServiceType.vnfd_id,
ServiceType.service_type == service_type))))
for key in required_attributes:
query = query.filter(
sa.exists().
where(sa.and_(
VNFD.id ==
VNFDAttribute.vnfd_id,
VNFDAttribute.key == key)))
LOG.debug(_('statements %s'), query)
vnfd_db = query.first()
if vnfd_db:
return self._make_vnfd_dict(vnfd_db)
def _vnf_attribute_update_or_create(
self, context, vnf_id, key, value):
arg = (self._model_query(context, VNFAttribute).
filter(VNFAttribute.vnf_id == vnf_id).
filter(VNFAttribute.key == key).first())
if arg:
arg.value = value
else:
arg = VNFAttribute(
id=str(uuid.uuid4()), vnf_id=vnf_id,
key=key, value=value)
context.session.add(arg)
# called internally, not by REST API
def _create_vnf_pre(self, context, vnf):
LOG.debug(_('vnf %s'), vnf)
tenant_id = self._get_tenant_id_for_create(context, vnf)
vnfd_id = vnf['vnfd_id']
name = vnf.get('name')
vnf_id = str(uuid.uuid4())
attributes = vnf.get('attributes', {})
vim_id = vnf.get('vim_id')
placement_attr = vnf.get('placement_attr', {})
with context.session.begin(subtransactions=True):
vnfd_db = self._get_resource(context, VNFD,
vnfd_id)
vnf_db = VNF(id=vnf_id,
tenant_id=tenant_id,
name=name,
description=vnfd_db.description,
instance_id=None,
vnfd_id=vnfd_id,
vim_id=vim_id,
placement_attr=placement_attr,
status=constants.PENDING_CREATE,
error_reason=None)
context.session.add(vnf_db)
for key, value in attributes.items():
arg = VNFAttribute(
id=str(uuid.uuid4()), vnf_id=vnf_id,
key=key, value=value)
context.session.add(arg)
evt_details = "VNF UUID assigned."
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=constants.PENDING_CREATE,
evt_type=constants.RES_EVT_CREATE,
tstamp=vnf_db[constants.RES_EVT_CREATED_FLD],
details=evt_details)
return self._make_vnf_dict(vnf_db)
# called internally, not by REST API
# intsance_id = None means error on creation
def _create_vnf_post(self, context, vnf_id, instance_id,
mgmt_url, vnf_dict):
LOG.debug(_('vnf_dict %s'), vnf_dict)
with context.session.begin(subtransactions=True):
query = (self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(VNF.status.in_(CREATE_STATES)).
one())
query.update({'instance_id': instance_id, 'mgmt_url': mgmt_url})
if instance_id is None or vnf_dict['status'] == constants.ERROR:
query.update({'status': constants.ERROR})
for (key, value) in vnf_dict['attributes'].items():
# do not store decrypted vim auth in vnf attr table
if 'vim_auth' not in key:
self._vnf_attribute_update_or_create(context, vnf_id,
key, value)
evt_details = ("Infra Instance ID created: %s and "
"Mgmt URL set: %s") % (instance_id, mgmt_url)
self._cos_db_plg.create_event(
context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_CREATE,
tstamp=timeutils.utcnow(), details=evt_details)
def _create_vnf_status(self, context, vnf_id, new_status):
with context.session.begin(subtransactions=True):
query = (self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(VNF.status.in_(CREATE_STATES)).one())
query.update({'status': new_status})
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=new_status,
evt_type=constants.RES_EVT_CREATE,
tstamp=timeutils.utcnow(), details="VNF creation completed")
def _get_vnf_db(self, context, vnf_id, current_statuses, new_status):
try:
vnf_db = (
self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(VNF.status.in_(current_statuses)).
with_lockmode('update').one())
except orm_exc.NoResultFound:
raise vnfm.VNFNotFound(vnf_id=vnf_id)
if vnf_db.status == constants.PENDING_UPDATE:
raise vnfm.VNFInUse(vnf_id=vnf_id)
vnf_db.update({'status': new_status})
return vnf_db
def _update_vnf_scaling_status(self,
context,
policy,
previous_statuses,
status,
mgmt_url=None):
with context.session.begin(subtransactions=True):
vnf_db = self._get_vnf_db(
context, policy['vnf']['id'], previous_statuses, status)
if mgmt_url:
vnf_db.update({'mgmt_url': mgmt_url})
updated_vnf_dict = self._make_vnf_dict(vnf_db)
self._cos_db_plg.create_event(
context, res_id=updated_vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=updated_vnf_dict['status'],
evt_type=constants.RES_EVT_SCALE,
tstamp=timeutils.utcnow())
return updated_vnf_dict
def _update_vnf_pre(self, context, vnf_id):
with context.session.begin(subtransactions=True):
vnf_db = self._get_vnf_db(
context, vnf_id, _ACTIVE_UPDATE, constants.PENDING_UPDATE)
updated_vnf_dict = self._make_vnf_dict(vnf_db)
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=updated_vnf_dict['status'],
evt_type=constants.RES_EVT_UPDATE,
tstamp=timeutils.utcnow())
return updated_vnf_dict
def _update_vnf_post(self, context, vnf_id, new_status,
new_vnf_dict=None):
with context.session.begin(subtransactions=True):
(self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(VNF.status == constants.PENDING_UPDATE).
update({'status': new_status,
'updated_at': timeutils.utcnow()}))
dev_attrs = new_vnf_dict.get('attributes', {})
(context.session.query(VNFAttribute).
filter(VNFAttribute.vnf_id == vnf_id).
filter(~VNFAttribute.key.in_(dev_attrs.keys())).
delete(synchronize_session='fetch'))
for (key, value) in dev_attrs.items():
if 'vim_auth' not in key:
self._vnf_attribute_update_or_create(context, vnf_id,
key, value)
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=new_vnf_dict['status'],
evt_type=constants.RES_EVT_UPDATE,
tstamp=new_vnf_dict[constants.RES_EVT_UPDATED_FLD])
def _delete_vnf_pre(self, context, vnf_id):
with context.session.begin(subtransactions=True):
vnf_db = self._get_vnf_db(
context, vnf_id, _ACTIVE_UPDATE_ERROR_DEAD,
constants.PENDING_DELETE)
deleted_vnf_db = self._make_vnf_dict(vnf_db)
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=deleted_vnf_db['status'],
evt_type=constants.RES_EVT_DELETE,
tstamp=timeutils.utcnow(), details="VNF delete initiated")
return deleted_vnf_db
def _delete_vnf_post(self, context, vnf_dict, error, soft_delete=True):
vnf_id = vnf_dict['id']
with context.session.begin(subtransactions=True):
query = (
self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(VNF.status == constants.PENDING_DELETE))
if error:
query.update({'status': constants.ERROR})
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=constants.ERROR,
evt_type=constants.RES_EVT_DELETE,
tstamp=timeutils.utcnow(),
details="VNF Delete ERROR")
else:
if soft_delete:
deleted_time_stamp = timeutils.utcnow()
query.update({'deleted_at': deleted_time_stamp})
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=constants.PENDING_DELETE,
evt_type=constants.RES_EVT_DELETE,
tstamp=deleted_time_stamp,
details="VNF Delete Complete")
else:
(self._model_query(context, VNFAttribute).
filter(VNFAttribute.vnf_id == vnf_id).delete())
query.delete()
# Delete corresponding vnfd
if vnf_dict['vnfd']['template_source'] == "inline":
self.delete_vnfd(context, vnf_dict["vnfd_id"])
# reference implementation. needs to be overrided by subclass
def create_vnf(self, context, vnf):
vnf_dict = self._create_vnf_pre(context, vnf)
# start actual creation of hosting vnf.
# Waiting for completion of creation should be done backgroundly
# by another thread if it takes a while.
instance_id = str(uuid.uuid4())
vnf_dict['instance_id'] = instance_id
self._create_vnf_post(context, vnf_dict['id'], instance_id, None,
vnf_dict)
self._create_vnf_status(context, vnf_dict['id'],
constants.ACTIVE)
return vnf_dict
# reference implementation. needs to be overrided by subclass
def update_vnf(self, context, vnf_id, vnf):
vnf_dict = self._update_vnf_pre(context, vnf_id)
# start actual update of hosting vnf
# waiting for completion of update should be done backgroundly
# by another thread if it takes a while
self._update_vnf_post(context, vnf_id, constants.ACTIVE)
return vnf_dict
# reference implementation. needs to be overrided by subclass
def delete_vnf(self, context, vnf_id, soft_delete=True):
vnf_dict = self._delete_vnf_pre(context, vnf_id)
# start actual deletion of hosting vnf.
# Waiting for completion of deletion should be done backgroundly
# by another thread if it takes a while.
self._delete_vnf_post(context,
vnf_dict,
False,
soft_delete=soft_delete)
def get_vnf(self, context, vnf_id, fields=None):
vnf_db = self._get_resource(context, VNF, vnf_id)
return self._make_vnf_dict(vnf_db, fields)
def get_vnfs(self, context, filters=None, fields=None):
return self._get_collection(context, VNF, self._make_vnf_dict,
filters=filters, fields=fields)
def set_vnf_error_status_reason(self, context, vnf_id, new_reason):
with context.session.begin(subtransactions=True):
(self._model_query(context, VNF).
filter(VNF.id == vnf_id).
update({'error_reason': new_reason}))
def _mark_vnf_status(self, vnf_id, exclude_status, new_status):
context = t_context.get_admin_context()
with context.session.begin(subtransactions=True):
try:
vnf_db = (
self._model_query(context, VNF).
filter(VNF.id == vnf_id).
filter(~VNF.status.in_(exclude_status)).
with_lockmode('update').one())
except orm_exc.NoResultFound:
LOG.warning(_('no vnf found %s'), vnf_id)
return False
vnf_db.update({'status': new_status})
self._cos_db_plg.create_event(
context, res_id=vnf_id,
res_type=constants.RES_TYPE_VNF,
res_state=new_status,
evt_type=constants.RES_EVT_MONITOR,
tstamp=timeutils.utcnow())
return True
def _mark_vnf_error(self, vnf_id):
return self._mark_vnf_status(
vnf_id, [constants.DEAD], constants.ERROR)
def _mark_vnf_dead(self, vnf_id):
exclude_status = [
constants.DOWN,
constants.PENDING_CREATE,
constants.PENDING_UPDATE,
constants.PENDING_DELETE,
constants.INACTIVE,
constants.ERROR]
return self._mark_vnf_status(
vnf_id, exclude_status, constants.DEAD)
| [
"gongwayne@hotmail.com"
] | gongwayne@hotmail.com |
59891f64ba1120d54b73a4d999144edc28304be6 | 652cea9eea05f9baa54f93830746fda5db13af25 | /vscode_test.py | 7e87500081095e377099a6a5b0315ca4f1d65378 | [] | no_license | linbirg/qt | 091af74b0ac4527883c8c0451135d77bdc310d23 | 4cb85944cebf903f0a43d0c2883a0e4839dcad31 | refs/heads/master | 2020-07-23T03:36:40.889622 | 2019-10-28T06:13:34 | 2019-10-28T06:13:34 | 207,435,268 | 22 | 9 | null | null | null | null | UTF-8 | Python | false | false | 42,976 | py | # coding: utf-8
##### 下方代码为 IDE 运行必备代码 #####
import jqdata
if __name__ == '__main__':
import jqsdk
params = {
'token': '88e0627cf5b11e6e988637de1f3be8e8', # 在客户端系统设置中找,字符串格式,例如 'asdf...'
'algorithmId': 6, # 在客户端我的策略中,整数型,例如:1;回测结束后在客户端此ID策略的回测列表中找对应的回测结果
'baseCapital': 100000,
'frequency': 'day',
'startTime': '2017-06-01',
'endTime': '2017-08-01',
'name': "费雪选股",
}
jqsdk.run(params)
##### 下面是策略代码编辑部分 #####
# 克隆自聚宽文章:https://www.joinquant.com/post/7029
# 标题:小费雪选股法(终)
# 作者:小兵哥
#enable_profile()
import numpy as np
import talib
import pandas
import scipy as sp
import scipy.optimize
import datetime as dt
from scipy import linalg as sla
from scipy import spatial
# from jqdata import gta
from jqdata import jy as gta
from jqdata import *
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import statsmodels.api as sm
def initialize(context):
#用沪深 300 做回报基准
set_benchmark('000300.XSHG')
# 滑点、真实价格
set_slippage(FixedSlippage(0.000))
set_option('use_real_price', True)
# 关闭部分log
log.set_level('order', 'error')
run_daily(fun_main, '10:30')
def after_code_changed(context):
# 变量都挪到 after_code_changed 里
g.quantlib = quantlib()
# 策略起停标志位
g.quantlib.fun_set_var(context, 'algo_enable', True)
# 定义风险敞口
g.quantlib.fun_set_var(context, 'riskExposure', 0.03)
# 正态分布概率表,标准差倍数以及置信率
# 1.96, 95%; 2.06, 96%; 2.18, 97%; 2.34, 98%; 2.58, 99%; 5, 99.9999%
g.quantlib.fun_set_var(context, 'confidencelevel', 1.96)
# 调仓参数
g.quantlib.fun_set_var(context, 'hold_cycle', 30)
g.quantlib.fun_set_var(context, 'hold_periods', 0)
g.quantlib.fun_set_var(context, 'stock_list', [])
g.quantlib.fun_set_var(context, 'position_price', {})
g.quantlib.fun_set_var(context, 'recal_periods', 0)
g.quantlib.fun_set_var(context, 'version', 1.0)
if context.version < 1.0:
context.hold_periods = 0
context.riskExposure = 0.03
context.version = 1.0
def before_trading_start(context):
# 定义股票池
moneyfund = ['511880.XSHG','511010.XSHG','511220.XSHG']
# 上市不足 60 天的剔除掉
context.moneyfund = g.quantlib.fun_delNewShare(context, moneyfund, 60)
def fun_main(context):
# 引用 lib
g.value_factor = value_factor_lib()
# g.quantlib = quantlib()
context.msg = ""
# 检查是否需要调仓
rebalance_flag, context.position_price, context.hold_periods, msg = \
g.quantlib.fun_needRebalance('algo', context.moneyfund, context.stock_list, context.position_price, \
context.hold_periods, context.hold_cycle, 0.25)
context.msg += msg
statsDate = context.current_dt.date() - dt.timedelta(1)
#context.algo_enable, context.recal_periods, rebalance_flag = g.quantlib.fun_check_algo(context.algo_enable, context.recal_periods, rebalance_flag, statsDate)
trade_style = False # True 会交易进行类似 100股的买卖,False 则只有在仓位变动 >25% 的时候,才产生交易
if rebalance_flag:
stock_list = []
if context.algo_enable:
#获取坏股票列表,将会剔除
# bad_stock_list = g.quantlib.fun_get_bad_stock_list(statsDate)
# 低估值策略
value_factor_stock_list = g.value_factor.fun_get_stock_list(context, 5, statsDate, None)
stock_list = value_factor_stock_list
# 分配仓位
equity_ratio, bonds_ratio = g.quantlib.fun_assetAllocationSystem(stock_list, context.moneyfund, context.confidencelevel, statsDate)
risk_ratio = 0
if len(equity_ratio.keys()) >= 1:
risk_ratio = context.riskExposure / len(equity_ratio.keys())
# 分配头寸,根据预设的风险敞口,计算交易时的比例
position_ratio = g.quantlib.fun_calPosition(equity_ratio, bonds_ratio, 1.0, risk_ratio, context.moneyfund, context.portfolio.portfolio_value, context.confidencelevel, statsDate)
trade_style = True
context.stock_list = position_ratio.keys()
# 更新待购价格
context.position_price = g.quantlib.fun_update_positions_price(position_ratio)
# 卖掉已有且不在待购清单里的股票
for stock in context.portfolio.positions.keys():
if stock not in position_ratio:
position_ratio[stock] = 0
context.position_ratio = position_ratio
print(position_ratio)
# 调仓,执行交易
g.quantlib.fun_do_trade(context, context.position_ratio, context.moneyfund, trade_style)
class value_factor_lib():
def fun_get_stock_list(self, context, hold_number, statsDate=None, bad_stock_list=[]):
relative_ps = self.fun_get_relative_ps(context, statsDate)
low_ps = self.fun_get_low_ps(context, statsDate)
good_stock_list = list(set(relative_ps) & set(low_ps))
# 取净利润增长率为正的
df = g.quantlib.get_fundamentals_sum('income', income.net_profit, statsDate)
df = df.drop(['0Q', '1Q', '2Q', '3Q'], axis=1)
df.rename(columns={'sum_value':'ttm_1y'}, inplace=True)
df1 = g.quantlib.get_fundamentals_sum('income', income.net_profit, (statsDate - dt.timedelta(365)))
df1 = df1.drop(['0Q', '1Q', '2Q', '3Q'], axis=1)
df1.rename(columns={'sum_value':'ttm_2y'}, inplace=True)
df = df.merge(df1, on='code')
df = df.fillna(value=0)
df['inc_net_profit'] = 1.0*(df['ttm_1y'] - df['ttm_2y'])
df = df[df.inc_net_profit > 0]
inc_net_profit_list = list(df.code)
good_stock_list = list(set(good_stock_list) & set(inc_net_profit_list))
print(len(good_stock_list))
# 按行业取营业收入增长率前 1/3
df = g.quantlib.get_fundamentals_sum('income', income.operating_revenue, statsDate)
df = df.drop(['0Q', '1Q', '2Q', '3Q'], axis=1)
df.rename(columns={'sum_value':'ttm_1y'}, inplace=True)
df1 = g.quantlib.get_fundamentals_sum('income', income.operating_revenue, (statsDate - dt.timedelta(365)))
df1 = df1.drop(['0Q', '1Q', '2Q', '3Q'], axis=1)
df1.rename(columns={'sum_value':'ttm_2y'}, inplace=True)
df = df.merge(df1, on='code')
df = df.fillna(value=0)
df['inc_operating_revenue'] = 1.0*(df['ttm_1y'] - df['ttm_2y']) / abs(df['ttm_2y'])
df = df.fillna(value = 0)
industry_list = g.quantlib.fun_get_industry(cycle=None)
#industry_list = g.quantlib.fun_get_industry_levelI()
inc_operating_revenue_list = []
for industry in industry_list:
stock_list = g.quantlib.fun_get_industry_stocks(industry, 2, statsDate)
df_inc_operating_revenue = df[df.code.isin(stock_list)]
df_inc_operating_revenue = df_inc_operating_revenue.sort_values(by='inc_operating_revenue', ascending=False)
inc_operating_revenue_list = inc_operating_revenue_list + list(df_inc_operating_revenue[:int(len(df_inc_operating_revenue)*0.33)].code)
good_stock_list = list(set(good_stock_list) & set(inc_operating_revenue_list))
print(len(good_stock_list))
# 指标剔除资产负债率相对行业最高的1/3的股票
df = get_fundamentals(query(balance.code, balance.total_liability, balance.total_assets), date = statsDate)
df = df.fillna(value=0)
df['liability_ratio'] = 1.0*(df['total_liability'] / df['total_assets'])
industry_list = g.quantlib.fun_get_industry(cycle=None)
#industry_list = g.quantlib.fun_get_industry_levelI()
liability_ratio_list = []
for industry in industry_list:
stock_list = g.quantlib.fun_get_industry_stocks(industry, 2, statsDate)
df_liability_ratio = df[df.code.isin(stock_list)]
df_liability_ratio = df_liability_ratio.sort_values(by='liability_ratio', ascending=True)
liability_ratio_list = liability_ratio_list + list(df_liability_ratio[:int(len(df_liability_ratio)*0.66)].code)
good_stock_list = list(set(good_stock_list) & set(liability_ratio_list))
# 剔除净利润率相对行业最低的1/3的股票;
df = get_fundamentals(query(indicator.code, indicator.net_profit_to_total_revenue ), date = statsDate)
df = df.fillna(value=0)
industry_list = g.quantlib.fun_get_industry(cycle=None)
#industry_list = g.quantlib.fun_get_industry_levelI()
profit_ratio_list = []
for industry in industry_list:
stock_list = g.quantlib.fun_get_industry_stocks(industry, 2, statsDate)
df_profit_ratio = df[df.code.isin(stock_list)]
df_profit_ratio = df_profit_ratio.sort_values('net_profit_to_total_revenue', ascending=False)
profit_ratio_list = profit_ratio_list + list(df_profit_ratio[:int(len(df_profit_ratio)*0.66)].code)
good_stock_list = list(set(good_stock_list) & set(profit_ratio_list))
stock_list = []
for stock in relative_ps:
#for stock in low_ps:
if stock in good_stock_list:
stock_list.append(stock)
print(len(good_stock_list))
positions_list = context.portfolio.positions.keys()
stock_list = g.quantlib.unpaused(stock_list, positions_list)
stock_list = g.quantlib.remove_st(stock_list, statsDate)
stock_list = g.quantlib.fun_delNewShare(context, stock_list, 30)
stock_list = stock_list[:hold_number*10]
stock_list = g.quantlib.remove_bad_stocks(stock_list, bad_stock_list)
stock_list = g.quantlib.remove_limit_up(stock_list, positions_list)
stock_list = g.quantlib.fun_diversity_by_industry(stock_list, int(hold_number*0.4), statsDate)
return stock_list[:hold_number]
def fun_get_relative_ps(self, context, statsDate=None):
def __fun_get_ps(statsDate, deltamonth):
__df = get_fundamentals(query(valuation.code, valuation.ps_ratio), date = (statsDate - dt.timedelta(30*deltamonth)))
__df.rename(columns={'ps_ratio':deltamonth}, inplace=True)
return __df
for i in range(48):
df1 = __fun_get_ps(statsDate, i)
if i == 0:
df = df1
else:
df = df.merge(df1, on='code')
df.index = list(df['code'])
df = df.drop(['code'], axis=1)
df = df.fillna(value=0, axis=0)
# 1. 计算相对市收率,相对市收率等于个股市收率除以全市场的市收率,这样处理的目的是为了剔除市场估值变化的影响
for i in range(len(df.columns)):
s = df.iloc[:,i]
median = s.median()
df.iloc[:,i] = s / median
length, stock_list, stock_dict = len(df), list(df.index), {}
# 2. 计算相对市收率N个月的移动平均值的N个月的标准差,并据此计算布林带上下轨(N个月的移动平均值+/-N个月移动平均的标准差)。N = 24
for i in range(length):
s = df.iloc[i,:]
if s.min() < 0:
pass
else:
# tmp_list 是24个月的相对市收率均值
tmp_list = []
for j in range(24):
tmp_list.append(s[j:j+24].mean())
# mean_value 是最近 24个月的相对市收率均值
mean_value = tmp_list[0]
# std_value 是相对市收率24个月的移动平均值的24个月的标准差
std_value = np.std(tmp_list)
tmp_dict = {}
# (mean_value - std_value),是布林线下轨(此处定义和一般布林线不一样,一般是 均线 - 2 倍标准差)
'''
研报原始的策略,选择 s[0] < mean_value - std_value 的标的,但因为 ps_ratio十分不稳定,跳跃很大,此区间里的测试结果非常不稳定
本策略退而求其次,选择均线-1倍标准差 和 均线 - 2 倍标准差之间的标的
大致反映策略的有效性
'''
if s[0] > (mean_value - 2.0*std_value) and s[0] < mean_value:
# 记录 相对市收率均值 / 当期相对市收率
stock_dict[stock_list[i]] = (1.0*mean_value/s[0])
stock_list = []
dict_score = stock_dict
dict_score = sorted(dict_score.items(), key=lambda d:d[1], reverse=True)
for idx in dict_score:
stock = idx[0]
stock_list.append(stock)
return stock_list
def fun_get_low_ps(self, context, statsDate=None):
df = get_fundamentals(
query(valuation.code, valuation.ps_ratio),
date = statsDate
)
# 根据 sp 去极值、中性化、标准化后,跨行业选最佳的标的
industry_list = g.quantlib.fun_get_industry(cycle=None)
df = df.fillna(value = 0)
sp_ratio = {}
df['SP'] = 1.0/df['ps_ratio']
df = df.drop(['ps_ratio'], axis=1)
for industry in industry_list:
tmpDict = g.quantlib.fun_get_factor(df, 'SP', industry, 2, statsDate).to_dict()
for stock in tmpDict.keys():
if stock in sp_ratio:
if sp_ratio[stock] < tmpDict[stock]:
sp_ratio[stock] = tmpDict[stock]
else:
sp_ratio[stock] = tmpDict[stock]
dict_score = sorted(sp_ratio.items(), key=lambda d:d[1], reverse=True)
stock_list = []
for idx in dict_score:
stock = idx[0]
stock_list.append(stock)
return stock_list[:int(len(stock_list)*0.5)]
# class quantlib():
# def get_fundamentals_sum(self, table_name='indicator', search='indicator.adjusted_profit', statsDate=None):
# # 取最近的五个季度财报的日期
# def __get_quarter(table_name, statsDate):
# '''
# 返回最近 n 个财报的日期
# 返回每个股票最近一个财报的日期
# '''
# # 取最新一季度的统计日期
# if table_name == 'indicator':
# q = query(indicator.code, indicator.statDate)
# elif table_name == 'income':
# q = query(income.code, income.statDate)
# elif table_name == 'cash_flow':
# q = query(cash_flow.code, cash_flow.statDate)
# elif table_name == 'balance':
# q = query(balance.code, balance.statDate)
# df = get_fundamentals(q, date = statsDate)
# stock_last_statDate = {}
# tmpDict = df.to_dict()
# for i in range(len(tmpDict['statDate'].keys())):
# # 取得每个股票的代码,以及最新的财报发布日
# stock_last_statDate[tmpDict['code'][i]] = tmpDict['statDate'][i]
# df = df.sort_values(by='statDate', ascending=False)
# # 取得最新的财报日期
# last_statDate = df.iloc[0,1]
# this_year = int(str(last_statDate)[0:4])
# this_month = str(last_statDate)[5:7]
# if this_month == '12':
# last_quarter = str(this_year) + 'q4'
# last_two_quarter = str(this_year) + 'q3'
# last_three_quarter = str(this_year) + 'q2'
# last_four_quarter = str(this_year) + 'q1'
# last_five_quarter = str(this_year - 1) + 'q4'
# elif this_month == '09':
# last_quarter = str(this_year) + 'q3'
# last_two_quarter = str(this_year) + 'q2'
# last_three_quarter = str(this_year) + 'q1'
# last_four_quarter = str(this_year - 1) + 'q4'
# last_five_quarter = str(this_year - 1) + 'q3'
# elif this_month == '06':
# last_quarter = str(this_year) + 'q2'
# last_two_quarter = str(this_year) + 'q1'
# last_three_quarter = str(this_year - 1) + 'q4'
# last_four_quarter = str(this_year - 1) + 'q3'
# last_five_quarter = str(this_year - 1) + 'q2'
# else: #this_month == '03':
# last_quarter = str(this_year) + 'q1'
# last_two_quarter = str(this_year - 1) + 'q4'
# last_three_quarter = str(this_year - 1) + 'q3'
# last_four_quarter = str(this_year - 1) + 'q2'
# last_five_quarter = str(this_year - 1) + 'q1'
# return last_quarter, last_two_quarter, last_three_quarter, last_four_quarter, last_five_quarter, stock_last_statDate
# # 查财报,返回指定值
# def __get_fundamentals_value(table_name, search, myDate):
# '''
# 输入查询日期
# 返回指定的财务数据,格式 dict
# '''
# if table_name == 'indicator':
# q = query(indicator.code, search, indicator.statDate)
# elif table_name == 'income':
# q = query(income.code, search, income.statDate)
# elif table_name == 'cash_flow':
# q = query(cash_flow.code, search, cash_flow.statDate)
# elif table_name == 'balance':
# q = query(balance.code, search, balance.statDate)
# df = get_fundamentals(q, statDate = myDate).fillna(value=0)
# tmpDict = df.to_dict()
# stock_dict = {}
# name = str(search).split('.')[-1]
# for i in range(len(tmpDict['statDate'].keys())):
# tmpList = []
# tmpList.append(tmpDict['statDate'][i])
# tmpList.append(tmpDict[name][i])
# stock_dict[tmpDict['code'][i]] = tmpList
# return stock_dict
# # 得到最近 n 个季度的统计时间
# last_quarter, last_two_quarter, last_three_quarter, last_four_quarter, last_five_quarter, stock_last_statDate = __get_quarter(table_name, statsDate)
# last_quarter_dict = __get_fundamentals_value(table_name, search, last_quarter)
# last_two_quarter_dict = __get_fundamentals_value(table_name, search, last_two_quarter)
# last_three_quarter_dict = __get_fundamentals_value(table_name, search, last_three_quarter)
# last_four_quarter_dict = __get_fundamentals_value(table_name, search, last_four_quarter)
# last_five_quarter_dict = __get_fundamentals_value(table_name, search, last_five_quarter)
# tmp_list = []
# stock_list = stock_last_statDate.keys()
# for stock in stock_list:
# tmp_dict = {}
# tmp_dict['code'] = stock
# value_list = []
# if stock in last_quarter_dict:
# if stock_last_statDate[stock] == last_quarter_dict[stock][0]:
# value_list.append(last_quarter_dict[stock][1])
# if stock in last_two_quarter_dict:
# value_list.append(last_two_quarter_dict[stock][1])
# if stock in last_three_quarter_dict:
# value_list.append(last_three_quarter_dict[stock][1])
# if stock in last_four_quarter_dict:
# value_list.append(last_four_quarter_dict[stock][1])
# if stock in last_five_quarter_dict:
# value_list.append(last_five_quarter_dict[stock][1])
# for i in range(4 - len(value_list)):
# value_list.append(0)
# tmp_dict['0Q'] = value_list[0]
# tmp_dict['1Q'] = value_list[1]
# tmp_dict['2Q'] = value_list[2]
# tmp_dict['3Q'] = value_list[3]
# tmp_dict['sum_value'] = value_list[0] + value_list[1] + value_list[2] + value_list[3]
# tmp_list.append(tmp_dict)
# df = pd.DataFrame(tmp_list)
# return df
# def fun_set_var(self, context, var_name, var_value):
# if var_name not in dir(context):
# setattr(context, var_name, var_value)
# def fun_check_price(self, algo_name, stock_list, position_price, gap_trigger):
# flag = False
# msg = ""
# if stock_list:
# h = history(1, '1d', 'close', stock_list, df=False)
# for stock in stock_list:
# curPrice = h[stock][0]
# if stock not in position_price:
# position_price[stock] = curPrice
# oldPrice = position_price[stock]
# if oldPrice != 0:
# deltaprice = abs(curPrice - oldPrice)
# if deltaprice / oldPrice > gap_trigger:
# msg = algo_name + "需要调仓: " + stock + ",现价: " + str(curPrice) + " / 原价格: " + str(oldPrice) + "\n"
# flag = True
# return flag, position_price, msg
# return flag, position_price, msg
# def fun_needRebalance(self, algo_name, moneyfund, stock_list, position_price, hold_periods, hold_cycle, gap_trigger):
# msg = ""
# rebalance_flag = False
# stocks_count = 0
# for stock in stock_list:
# if stock not in moneyfund:
# stocks_count += 1
# if stocks_count == 0:
# msg += algo_name + "调仓,因为持股数为 0 \n"
# rebalance_flag = True
# elif hold_periods == 0:
# msg += algo_name + "调仓,因为持股天数剩余为 0 \n"
# rebalance_flag = True
# if not rebalance_flag:
# rebalance_flag, position_price, msg2 = self.fun_check_price(algo_name, stock_list, position_price, gap_trigger)
# msg += msg2
# if rebalance_flag:
# hold_periods = hold_cycle
# else:
# hold_periods -= 1
# msg += algo_name + "离下次调仓还剩 " + str(hold_periods) + " 天\n"
# return rebalance_flag, position_price, hold_periods, msg
# # 更新持有股票的价格,每次调仓后跑一次
# def fun_update_positions_price(self, ratio):
# position_price = {}
# if ratio:
# h = history(1, '1m', 'close', ratio.keys(), df=False)
# for stock in ratio.keys():
# if ratio[stock] > 0:
# position_price[stock] = round(h[stock][0], 3)
# return position_price
# def fun_assetAllocationSystem(self, stock_list, moneyfund, confidencelevel, statsDate=None):
# def __fun_getEquity_ratio(__stocklist, confidencelevel, type, limit_up=1.0, limit_low=0.0, statsDate=None):
# __ratio = {}
# if __stocklist:
# if type == 1: #风险平价 历史模拟法
# # 正态分布概率表,标准差倍数以及置信率
# # 1.96, 95%; 2.06, 96%; 2.18, 97%; 2.34, 98%; 2.58, 99%; 5, 99.9999%
# __ratio = self.fun_calStockWeight_by_risk(confidencelevel, __stocklist, limit_up, limit_low, statsDate)
# elif type == 2: #马科维奇
# __ratio = self.fun_calStockWeight(__stocklist, limit_up, limit_low)
# elif type == 3: #最小方差
# __ratio = self.fun_cal_Weight_by_minvar(__stocklist, limit_up, limit_low)
# elif type == 5: # 风险平价 方差-协方差法
# __ratio = self.fun_calWeight_by_RiskParity(__stocklist, statsDate)
# else: #等权重
# for stock in __stocklist:
# __ratio[stock] = 1.0/len(__stocklist)
# return __ratio
# if stock_list:
# limit_up, limit_low = round(2.0/len(list(set(stock_list))), 4), round(0.5/len(list(set(stock_list))), 4)
# equity_ratio = __fun_getEquity_ratio(stock_list, confidencelevel, 0, limit_up, limit_low, statsDate)
# else:
# equity_ratio = {}
# bonds_ratio = __fun_getEquity_ratio(moneyfund, confidencelevel, 0, 1.0, 0.0, statsDate)
# return equity_ratio, bonds_ratio
# def fun_calPosition(self, equity_ratio, bonds_ratio, algo_ratio, risk_ratio, moneyfund, portfolio_value, confidencelevel, statsDate=None):
# '''
# equity_ratio 资产配仓结果
# bonds_ratio 债券配仓结果
# algo_ratio 策略占市值的百分比
# risk_ratio 每个标的承受的风险系数
# '''
# trade_ratio = equity_ratio # 简化
# return trade_ratio
# # 去极值
# def fun_winsorize(self, rs, type, num):
# # rs为Series化的数据
# rs = rs.dropna().copy()
# low_line, up_line = 0, 0
# if type == 1: # 标准差去极值
# mean = rs.mean()
# #取极值
# mad = num*rs.std()
# up_line = mean + mad
# low_line = mean - mad
# elif type == 2: #中位值去极值
# rs = rs.replace([-np.inf, np.inf], np.nan)
# median = rs.median()
# md = abs(rs - median).median()
# mad = md * num * 1.4826
# up_line = median + mad
# low_line = median - mad
# elif type == 3: #Boxplot 去极值
# if len(rs) < 2:
# return rs
# mc = sm.stats.stattools.medcouple(rs)
# rs.sort()
# q1 = rs[int(0.25*len(rs))]
# q3 = rs[int(0.75*len(rs))]
# iqr = q3-q1
# if mc >= 0:
# low_line = q1-1.5*np.exp(-3.5*mc)*iqr
# up_line = q3+1.5*np.exp(4*mc)*iqr
# else:
# low_line = q1-1.5*np.exp(-4*mc)*iqr
# up_line = q3+1.5*np.exp(3.5*mc)*iqr
# rs[rs < low_line] = low_line
# rs[rs > up_line] = up_line
# return rs
# #标准化
# def fun_standardize(self, s,type):
# '''
# s为Series数据
# type为标准化类型:1 MinMax,2 Standard,3 maxabs
# '''
# data=s.dropna().copy()
# if int(type)==1:
# rs = (data - data.min())/(data.max() - data.min())
# elif type==2:
# rs = (data - data.mean())/data.std()
# elif type==3:
# rs = data/10**np.ceil(np.log10(data.abs().max()))
# return rs
# #中性化
# def fun_neutralize(self, s, df, module='pe_ratio', industry_type=None, level=2, statsDate=None):
# '''
# 参数:
# s为stock代码 如'000002.XSHE' 可为list,可为str
# moduel:中性化的指标 默认为PE
# industry_type:行业类型(可选), 如果行业不指定,全市场中性化
# 返回:
# 中性化后的Series index为股票代码 value为中性化后的值
# '''
# s = df[df.code.isin(list(s))]
# s = s.reset_index(drop = True)
# s = pd.Series(s[module].values, index=s['code'])
# s = self.fun_winsorize(s,1,3)
# if industry_type:
# stocks = self.fun_get_industry_stocks(industry=industry_type, level=level, statsDate=statsDate)
# else:
# stocks = list(get_all_securities(['stock'], date=statsDate).index)
# df = df[df.code.isin(stocks)]
# df = df.reset_index(drop = True)
# df = pd.Series(df[module].values, index=df['code'])
# df = self.fun_winsorize(df,1, 3)
# rs = (s - df.mean())/df.std()
# return rs
# def fun_get_factor(self, df, factor_name, industry, level, statsDate):
# stock_list = self.fun_get_industry_stocks(industry, level, statsDate)
# rs = self.fun_neutralize(stock_list, df, module=factor_name, industry_type=industry, level=level, statsDate=statsDate)
# rs = self.fun_standardize(rs, 2)
# return rs
# def fun_diversity_by_industry(self, stock_list, max_num, statsDate):
# if not stock_list:
# return stock_list
# industry_list = self.fun_get_industry(cycle=None)
# tmpList = []
# for industry in industry_list:
# i = 0
# stocks = self.fun_get_industry_stocks(industry, 2, statsDate)
# for stock in stock_list:
# if stock in stocks: #by 行业选入 top max_num 的标的(如有)
# i += 1
# if i <= max_num:
# tmpList.append(stock) #可能一个股票横跨多个行业,会导致多次入选,但不影响后面计算
# final_stocks = []
# for stock in stock_list:
# if stock in tmpList:
# final_stocks.append(stock)
# return final_stocks
# # 根据行业取股票列表
# def fun_get_industry_stocks(self, industry, level=2, statsDate=None):
# if level == 2:
# stock_list = get_industry_stocks(industry, statsDate)
# elif level == 1:
# industry_list = self.fun_get_industry_levelI(industry)
# stock_list = []
# for industry_code in industry_list:
# tmpList = get_industry_stocks(industry_code, statsDate)
# stock_list = stock_list + tmpList
# stock_list = list(set(stock_list))
# else:
# stock_list = []
# return stock_list
# # 一级行业列表
# def fun_get_industry_levelI(self, industry=None):
# industry_dict = {
# 'A':['A01', 'A02', 'A03', 'A04', 'A05'] #农、林、牧、渔业
# ,'B':['B06', 'B07', 'B08', 'B09', 'B11'] #采矿业
# ,'C':['C13', 'C14', 'C15', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26', 'C27', 'C28', 'C29', 'C30', 'C31', 'C32',\
# 'C33', 'C34', 'C35', 'C36', 'C37', 'C38', 'C39', 'C40', 'C41', 'C42'] #制造业
# ,'D':['D44', 'D45', 'D46'] #电力、热力、燃气及水生产和供应业
# ,'E':['E47', 'E48', 'E50'] #建筑业
# ,'F':['F51', 'F52'] #批发和零售业
# ,'G':['G53', 'G54', 'G55', 'G56', 'G58', 'G59'] #交通运输、仓储和邮政业
# ,'H':['H61', 'H62'] #住宿和餐饮业
# ,'I':['I63', 'I64', 'I65'] #信息传输、软件和信息技术服务业
# ,'J':['J66', 'J67', 'J68', 'J69'] #金融业
# ,'K':['K70'] #房地产业
# ,'L':['L71', 'L72'] #租赁和商务服务业
# ,'M':['M73', 'M74'] #科学研究和技术服务业
# ,'N':['N78'] #水利、环境和公共设施管理业
# #,'O':[] #居民服务、修理和其他服务业
# ,'P':['P82'] #教育
# ,'Q':['Q83'] #卫生和社会工作
# ,'R':['R85', 'R86', 'R87'] #文化、体育和娱乐业
# ,'S':['S90'] #综合
# }
# if industry == None:
# return industry_dict
# else:
# return industry_dict[industry]
# # 行业列表
# def fun_get_industry(self, cycle=None):
# # cycle 的参数:None取所有行业,True取周期性行业,False取非周期性行业
# industry_dict = {
# 'A01':False,# 农业 1993-09-17
# 'A02':False,# 林业 1996-12-06
# 'A03':False,# 畜牧业 1997-06-11
# 'A04':False,# 渔业 1993-05-07
# 'A05':False,# 农、林、牧、渔服务业 1997-05-30
# 'B06':True, # 煤炭开采和洗选业 1994-01-06
# 'B07':True, # 石油和天然气开采业 1996-06-28
# 'B08':True, # 黑色金属矿采选业 1997-07-08
# 'B09':True, # 有色金属矿采选业 1996-03-20
# 'B11':True, # 开采辅助活动 2002-02-05
# 'C13':False, # 农副食品加工业 1993-12-15
# 'C14':False,# 食品制造业 1994-08-18
# 'C15':False,# 酒、饮料和精制茶制造业 1992-10-12
# 'C17':True,# 纺织业 1992-06-16
# 'C18':True,# 纺织服装、服饰业 1993-12-31
# 'C19':True,# 皮革、毛皮、羽毛及其制品和制鞋业 1994-04-04
# 'C20':False,# 木材加工及木、竹、藤、棕、草制品业 2005-05-10
# 'C21':False,# 家具制造业 1996-04-25
# 'C22':False,# 造纸及纸制品业 1993-03-12
# 'C23':False,# 印刷和记录媒介复制业 1994-02-24
# 'C24':False,# 文教、工美、体育和娱乐用品制造业 2007-01-10
# 'C25':True, # 石油加工、炼焦及核燃料加工业 1993-10-25
# 'C26':True, # 化学原料及化学制品制造业 1990-12-19
# 'C27':False,# 医药制造业 1993-06-29
# 'C28':True, # 化学纤维制造业 1993-07-28
# 'C29':True, # 橡胶和塑料制品业 1992-08-28
# 'C30':True, # 非金属矿物制品业 1992-02-28
# 'C31':True, # 黑色金属冶炼及压延加工业 1994-01-06
# 'C32':True, # 有色金属冶炼和压延加工业 1996-02-15
# 'C33':True, # 金属制品业 1993-11-30
# 'C34':True, # 通用设备制造业 1992-03-27
# 'C35':True, # 专用设备制造业 1992-07-01
# 'C36':True, # 汽车制造业 1992-07-24
# 'C37':True, # 铁路、船舶、航空航天和其它运输设备制造业 1992-03-31
# 'C38':True, # 电气机械及器材制造业 1990-12-19
# 'C39':False,# 计算机、通信和其他电子设备制造业 1990-12-19
# 'C40':False,# 仪器仪表制造业 1993-09-17
# 'C41':True, # 其他制造业 1992-08-14
# 'C42':False,# 废弃资源综合利用业 2012-10-26
# 'D44':True, # 电力、热力生产和供应业 1993-04-16
# 'D45':False,# 燃气生产和供应业 2000-12-11
# 'D46':False,# 水的生产和供应业 1994-02-24
# 'E47':True, # 房屋建筑业 1993-04-29
# 'E48':True, # 土木工程建筑业 1994-01-28
# 'E50':True, # 建筑装饰和其他建筑业 1997-05-22
# 'F51':False,# 批发业 1992-05-06
# 'F52':False,# 零售业 1992-09-02
# 'G53':True, # 铁路运输业 1998-05-11
# 'G54':True, # 道路运输业 1991-01-14
# 'G55':True, # 水上运输业 1993-11-19
# 'G56':True, # 航空运输业 1997-11-05
# 'G58':True, # 装卸搬运和运输代理业 1993-05-05
# 'G59':False,# 仓储业 1996-06-14
# 'H61':False,# 住宿业 1993-11-18
# 'H62':False,# 餐饮业 1997-04-30
# 'I63':False,# 电信、广播电视和卫星传输服务 1992-12-02
# 'I64':False,# 互联网和相关服务 1992-05-07
# 'I65':False,# 软件和信息技术服务业 1992-08-20
# 'J66':True, # 货币金融服务 1991-04-03
# 'J67':True, # 资本市场服务 1994-01-10
# 'J68':True, # 保险业 2007-01-09
# 'J69':True, # 其他金融业 2012-10-26
# 'K70':True, # 房地产业 1992-01-13
# 'L71':False,# 租赁业 1997-01-30
# 'L72':False,# 商务服务业 1996-08-29
# 'M73':False,# 研究和试验发展 2012-10-26
# 'M74':True, # 专业技术服务业 2007-02-15
# 'N77':False,# 生态保护和环境治理业 2012-10-26
# 'N78':False,# 公共设施管理业 1992-08-07
# 'P82':False,# 教育 2012-10-26
# 'Q83':False,# 卫生 2007-02-05
# 'R85':False,# 新闻和出版业 1992-12-08
# 'R86':False,# 广播、电视、电影和影视录音制作业 1994-02-24
# 'R87':False,# 文化艺术业 2012-10-26
# 'S90':False,# 综合 1990-12-10
# }
# industry_list = []
# if cycle == True:
# for industry in industry_dict.keys():
# if industry_dict[industry] == True:
# industry_list.append(industry)
# elif cycle == False:
# for industry in industry_dict.keys():
# if industry_dict[industry] == False:
# industry_list.append(industry)
# else:
# industry_list = industry_dict.keys()
# return industry_list
# def fun_do_trade(self, context, trade_ratio, moneyfund, trade_style):
# def __fun_tradeBond(context, stock, curPrice, Value):
# curValue = float(context.portfolio.positions[stock].total_amount * curPrice)
# deltaValue = abs(Value - curValue)
# if deltaValue > (curPrice*200):
# if Value > curValue:
# cash = context.portfolio.cash
# if cash > (curPrice*200):
# self.fun_trade(context, stock, Value)
# else:
# self.fun_trade(context, stock, Value)
# def __fun_tradeStock(context, curPrice, stock, ratio, trade_style):
# total_value = context.portfolio.portfolio_value
# if stock in moneyfund:
# __fun_tradeBond(context, stock, curPrice, total_value * ratio)
# else:
# curValue = context.portfolio.positions[stock].total_amount * curPrice
# Quota = total_value * ratio
# if Quota:
# if abs(Quota - curValue) / Quota >= 0.25 or trade_style:
# if Quota > curValue:
# #if curPrice > context.portfolio.positions[stock].avg_cost:
# self.fun_trade(context, stock, Quota)
# else:
# self.fun_trade(context, stock, Quota)
# else:
# if curValue > 0:
# self.fun_trade(context, stock, Quota)
# trade_list = trade_ratio.keys()
# myholdstock = context.portfolio.positions.keys()
# stock_list = list(set(trade_list).union(set(myholdstock)))
# total_value = context.portfolio.portfolio_value
# # 已有仓位
# holdDict = {}
# h = history(1, '1d', 'close', stock_list, df=False)
# for stock in myholdstock:
# tmp = (context.portfolio.positions[stock].total_amount * h[stock])/total_value
# # print('w:',tmp)
# tmpW = round(tmp[0], 2)
# holdDict[stock] = float(tmpW)
# # 对已有仓位做排序已有仓位做排序
# tmpDict = {}
# for stock in holdDict:
# if stock in trade_ratio:
# tmpDict[stock] = round((trade_ratio[stock] - holdDict[stock]), 2)
# tradeOrder = sorted(tmpDict.items(), key=lambda d:d[1], reverse=False)
# # 交易已有仓位的股票,从减仓的开始,腾空现金
# _tmplist = []
# for idx in tradeOrder:
# stock = idx[0]
# __fun_tradeStock(context, h[stock][-1], stock, trade_ratio[stock], trade_style)
# _tmplist.append(stock)
# # 交易新股票
# # for i in range(len(trade_list)):
# for stock in trade_list:
# # stock = trade_list[i]
# if len(_tmplist) != 0 :
# if stock not in _tmplist:
# __fun_tradeStock(context, h[stock][-1], stock, trade_ratio[stock], trade_style)
# else:
# __fun_tradeStock(context, h[stock][-1], stock, trade_ratio[stock], trade_style)
# def unpaused(self, stock_list, positions_list):
# current_data = get_current_data()
# tmpList = []
# for stock in stock_list:
# if not current_data[stock].paused or stock in positions_list:
# tmpList.append(stock)
# return tmpList
# def remove_st(self, stock_list, statsDate):
# current_data = get_current_data()
# return [s for s in stock_list if not current_data[s].is_st]
# def remove_limit_up(self, stock_list, positions_list):
# h = history(1, '1m', 'close', stock_list, df=False, skip_paused=False, fq='pre')
# h2 = history(1, '1m', 'high_limit', stock_list, df=False, skip_paused=False, fq='pre')
# tmpList = []
# for stock in stock_list:
# if h[stock][0] < h2[stock][0] or stock in positions_list:
# tmpList.append(stock)
# return tmpList
# def fun_get_bad_stock_list(self, statsDate):
# #0、剔除商誉占比 > 10% 的股票
# df = get_fundamentals(
# query(valuation.code, balance.good_will, balance.equities_parent_company_owners),
# date = statsDate
# )
# df = df.fillna(value = 0)
# df['good_will_ratio'] = 1.0*df['good_will'] / df['equities_parent_company_owners']
# list_good_will = list(df[df.good_will_ratio > 0.1].code)
# bad_stocks = list_good_will
# bad_stocks = list(set(bad_stocks))
# return bad_stocks
# def remove_bad_stocks(self, stock_list, bad_stock_list):
# tmpList = []
# for stock in stock_list:
# if stock not in bad_stock_list:
# tmpList.append(stock)
# return tmpList
# # 剔除上市时间较短的产品
# def fun_delNewShare(self, context, equity, deltaday):
# deltaDate = context.current_dt.date() - dt.timedelta(deltaday)
# tmpList = []
# for stock in equity:
# if get_security_info(stock).start_date < deltaDate:
# tmpList.append(stock)
# return tmpList
# def fun_trade(self, context, stock, value):
# self.fun_setCommission(context, stock)
# order_target_value(stock, value)
# def fun_setCommission(self, context, stock):
# if stock in context.moneyfund:
# set_order_cost(OrderCost(open_tax=0, close_tax=0, open_commission=0, close_commission=0, close_today_commission=0, min_commission=0), type='fund')
# else:
# set_order_cost(OrderCost(open_tax=0, close_tax=0.001, open_commission=0.0003, close_commission=0.0003, close_today_commission=0, min_commission=5), type='stock')
| [
"linbirg@gmail.com"
] | linbirg@gmail.com |
71202f0ef96f0e0d939d29cde45c942ddf8686b2 | 21a77ed3498e649ecc7446584edf46b62c361d59 | /orange/models/myf_admin_role.py | e25a779bb0567abbf2500c9ea81a84ee8f71e594 | [] | no_license | kejukeji/heart_counsel_py | e45419d9b2baf3fe392d64c5596a45e96f96a280 | 3fa2dbdad43b0c12da6130d6e634e6c7003fd1f0 | refs/heads/master | 2021-01-13T02:11:00.869579 | 2013-11-28T08:08:15 | 2013-11-28T08:08:15 | 14,770,940 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | # coding: utf-8
from sqlalchemy import Column, Integer, String, Boolean, DATETIME, ForeignKey, text
from .database import Base
myf_admin_role_table = 'myf_admin_role'
class Myf_admin_role(Base):
__tablename__ = myf_admin_role_table
#__table_args__ = {
# 'mysql_engine': 'InnoDB',
# 'mysql_charset': 'utf8'
#}
id = Column(Integer, primary_key=True)
rolename = Column(String(50), nullable=False)
#description = Column(text(0), nullable=False)
disabled = Column(Boolean, nullable=False) | [
"250440083@qq.com"
] | 250440083@qq.com |
8d5fd4f01f3cee42d00bf6893c55b60cca121721 | 097185767372033caf61c95c9789452944f348ff | /app.py | 2d31ac3a604c40990f672ebf9400e253e359cf51 | [] | no_license | TejasBadhe/pythonwebapp | 76b019e333c110d21484d8cc9fe0d1ff911a2ade | 73d88affccfef313d08eefbd401f9a0cc6a27bb0 | refs/heads/main | 2023-09-01T23:18:46.839977 | 2021-11-16T10:07:57 | 2021-11-16T10:07:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def home():
return render_template("index.html")
@app.route("/about")
def about():
return render_template("about.html")
if __name__ == "__main__":
app.run(debug=True) | [
"godspowerstanley@gmail.com"
] | godspowerstanley@gmail.com |
259df650824f2ccc83d739757cbc5871912487ce | d521ab3b091306ac8531ba73287f8d8544527389 | /my_project/base/views.py | 915a4a54ae190a5a2cdbc5f58ee6a91361e3c92e | [] | no_license | vateiixeira/hotswap | 7a7222994b4abb74f82ddad40e572612bf4e2d62 | 4c7dabc24c4defc51c4fa80faac4e3a0e7282254 | refs/heads/master | 2023-04-18T07:18:41.981310 | 2023-04-04T19:28:36 | 2023-04-04T19:28:36 | 220,119,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,018 | py | from django.shortcuts import render,redirect
from . import import_circuito_dados,import_inauguracao, import_circuito_voz,import_lojas
import os
from .models import CentralTelefonica, CircuitoVoz, CircuitoDados,IpFixo, Ferias
from django.views.generic.edit import UpdateView,DeleteView
from my_project.core.utils import is_staff
from django.urls import reverse_lazy
from .forms import *
from django.contrib import messages
from my_project.core.utils import is_staff
from my_project.atendimento.models import Atendimento
from my_project.chamado.models import Chamado
class UpdateCentralTelefonica(UpdateView):
model = CentralTelefonica
# fields = '__all__'
template_name = 'update_central_telefonica.html'
form_class = CentralTelefonicaForm
class DeleteCentralTelefonica(DeleteView):
template_name = 'delete_central_telefonica.html'
model = CentralTelefonica
success_url = reverse_lazy('base:list_central_telefonica')
def list_central_telefonica(request):
template = 'list_central_telefonica.html'
query = CentralTelefonica.objects.all()
staff = is_staff(request.user)
context = {
'chamado': query,
'staff': staff,
}
return render(request,template,context)
def importar(request):
template = 'importar.html'
# IMPORTAR UM DE CADA VEZ PARA NAO FICAR CONFUSO IGUAL UM JEGUE
#data = import_lojas.csv_to_list(os.path.join(os.path.dirname(os.path.dirname(__file__)),'temp/filial.csv'))
#data = import_inauguracao.csv_to_list(os.path.join(os.path.dirname(os.path.dirname(__file__)),'temp/inauguracao.csv'))
#data = import_circuito_voz.csv_to_list(os.path.join(os.path.dirname(os.path.dirname(__file__)),'temp/circuito_voz.csv'))
#data = import_circuito_dados.csv_to_list(os.path.join(os.path.dirname(os.path.dirname(__file__)),'temp/circuito_dados.csv'))
return render(request,template)
def list_circuito_voz(request):
template = 'list_circuito_voz.html'
query = CircuitoVoz.objects.all()
user = request.user
staff = is_staff(user)
context = {
'staff': staff,
'chamado': query,
}
return render(request,template,context)
class Update_Circuito_Voz(UpdateView):
model = CircuitoVoz
# fields = ('__all__')
template_name = 'update_circuito_voz.html'
success_url = reverse_lazy('base:list_circuito_voz')
form_class = Circuito_VozForm
def get_object(self, queryset=None):
model = CircuitoVoz.objects.get(pk=self.kwargs['pk'])
return model
context_object_name = 'model'
class Delete_Circuito_Voz(DeleteView):
template_name = 'delete_ferias.html'
model = CircuitoVoz
success_url = reverse_lazy('base:list_circuito_voz')
def list_circuito_dados(request):
template = 'list_circuito_dados.html'
query = CircuitoDados.objects.all()
user = request.user
staff = is_staff(user)
context = {
'staff': staff,
'chamado': query
}
return render(request,template,context)
class Update_Circuito_Dados(UpdateView):
model = CircuitoDados
fields = ('__all__')
template_name = 'update_circuito_dados.html'
success_url = reverse_lazy('base:list_circuito_dados')
def get_object(self, queryset=None):
model = CircuitoDados.objects.get(pk=self.kwargs['pk'])
return model
context_object_name = 'model'
class Delete_Circuito_Dados(DeleteView):
template_name = 'delete_ferias.html'
model = CircuitoDados
success_url = reverse_lazy('base:list_circuito_dados')
def cadastro_incidente(request):
template = 'cadastro_incidente.html'
if request.method == 'POST':
form = IncidenteForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Incidente cadastrado com sucesso!')
else:
form = IncidenteForm()
context = {
'form': form
}
return render(request,template,context)
def lista_incidente(request):
template='lista_incidente.html'
query = HistoricoIncidente.objects.all()
staff = is_staff(request.user)
context = {
'chamado': query,
'staff' : staff
}
return render(request,template,context)
class DeleteIncidente(DeleteView):
template_name='delete_incidente.html'
model = HistoricoIncidente
success_url = reverse_lazy('base:lista_incidente')
def update_incidente(request,pk):
template = 'update_incidente.html'
model = HistoricoIncidente.objects.get(pk=pk)
if request.method == 'POST':
form = IncidenteForm(request.POST, instance=model)
if form.is_valid():
form.save()
return redirect('base:lista_incidente')
else:
form = IncidenteForm(instance=model)
context = {
'form':form,
'model':model
}
return render(request,template,context)
def lista_dt_inauguracao(request):
template = 'list_dt_inauguracao.html'
query = DataInauguracao.objects.all()
context = {
"chamado": query
}
return render(request,template,context)
def ferias(request):
template = 'ferias.html'
query = Ferias.objects.all()
user = request.user
staff = is_staff(user)
context = {
'staff': staff,
"chamado": query
}
return render(request,template,context)
class UpdateFerias(UpdateView):
model = Ferias
fields = ('__all__')
template_name = 'update_ferias.html'
success_url = reverse_lazy('base:ferias')
def get_object(self, queryset=None):
model = Ferias.objects.get(pk=self.kwargs['pk'])
return model
context_object_name = 'model'
class DeleteFerias(DeleteView):
template_name = 'delete_ferias.html'
model = Ferias
success_url = reverse_lazy('base:ferias')
def camara_fria(request):
template = 'camara_fria.html'
query = IpFixo.objects.all()
user = request.user
staff = is_staff(user)
context = {
'staff': staff,
"chamado": query
}
return render(request,template,context)
class UpdateCamara_Fria(UpdateView):
model = IpFixo
fields = ('__all__')
template_name = 'update_camara_fria.html'
success_url = reverse_lazy('base:camara_fria')
def get_object(self, queryset=None):
model = IpFixo.objects.get(pk=self.kwargs['pk'])
return model
context_object_name = 'model'
class DeleteCamara_Fria(DeleteView):
template_name = 'delete_ferias.html'
model = IpFixo
success_url = reverse_lazy('base:ferias')
def lista_atendimento(request):
template= 'lista_atendimento_base.html'
envio = Atendimento.object.filter(status='r').order_by('-create_at')
context = {
"envio" : envio
}
return render(request,template,context)
def lista_chamado(request):
template='lista_chamado_base.html'
chamado = Chamado.object.all().order_by('-create_at')
context = {
'chamado': chamado,
}
return render(request,template,context) | [
"vateiixeira@gmail.com"
] | vateiixeira@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.