blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9a33605fc91c2d1ce22db31b4e9669455ca00773 | 8235076c125e5f69188917da520669b89dfdd350 | /user/migrations/0006_anfiteatro_arlivre_atividadehasmaterial_authgroup_authgrouppermissions_authpermission_authuser_authu.py | b15d71b7e5c59cc370d9b70ca5c39c80eab89621 | [] | no_license | guilhascorreia24/componente-Utilizador | 37b319daeb9fd7174db24d2616f6ed833963aafd | 3aae759e7a0961b95d8502e8163efef91e0471d4 | refs/heads/master | 2021-08-10T08:47:39.092791 | 2020-07-03T12:11:36 | 2020-07-03T12:11:36 | 247,350,427 | 0 | 1 | null | 2021-03-31T19:59:17 | 2020-03-14T20:43:53 | CSS | UTF-8 | Python | false | false | 15,937 | py | # Generated by Django 3.0.4 on 2020-04-27 22:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0005_atividade_dia_escola_espaco_inscricao_inscricaocoletiva_inscricaoindividual_tarefa'),
]
operations = [
migrations.CreateModel(
name='Anfiteatro',
fields=[
('edificio', models.CharField(max_length=45)),
('andar', models.CharField(max_length=45)),
('espaco_idespaco', models.OneToOneField(db_column='espaco_idespaco', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Espaco')),
],
options={
'db_table': 'anfiteatro',
'managed': False,
},
),
migrations.CreateModel(
name='Arlivre',
fields=[
('descricao', models.CharField(max_length=255)),
('espaco_idespaco', models.OneToOneField(db_column='espaco_idespaco', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Espaco')),
],
options={
'db_table': 'arlivre',
'managed': False,
},
),
migrations.CreateModel(
name='AtividadeHasMaterial',
fields=[
('atividade_idatividade', models.OneToOneField(db_column='Atividade_idAtividade', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Atividade')),
],
options={
'db_table': 'atividade_has_material',
'managed': False,
},
),
migrations.CreateModel(
name='AuthGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, unique=True)),
],
options={
'db_table': 'auth_group',
'managed': False,
},
),
migrations.CreateModel(
name='AuthGroupPermissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_group_permissions',
'managed': False,
},
),
migrations.CreateModel(
name='AuthPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('codename', models.CharField(max_length=100)),
],
options={
'db_table': 'auth_permission',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128)),
('last_login', models.DateTimeField(blank=True, null=True)),
('is_superuser', models.IntegerField()),
('username', models.CharField(max_length=150, unique=True)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=150)),
('email', models.CharField(max_length=254)),
('is_staff', models.IntegerField()),
('is_active', models.IntegerField()),
('date_joined', models.DateTimeField()),
],
options={
'db_table': 'auth_user',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUserGroups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_user_groups',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUserUserPermissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_user_user_permissions',
'managed': False,
},
),
migrations.CreateModel(
name='ColaboradorHasHorario',
fields=[
('colaborador_has_horario_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'db_table': 'colaborador_has_horario',
'managed': False,
},
),
migrations.CreateModel(
name='ColaboradorHasUnidadeOrganica',
fields=[
('colaborador_has_unidade_organica_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'db_table': 'colaborador_has_unidade_organica',
'managed': False,
},
),
migrations.CreateModel(
name='CoordenadorHasDepartamento',
fields=[
('coordenador_utilizador_idutilizador', models.OneToOneField(db_column='Coordenador_Utilizador_idutilizador', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Coordenador')),
],
options={
'db_table': 'coordenador_has_departamento',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoAdminLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_time', models.DateTimeField()),
('object_id', models.TextField(blank=True, null=True)),
('object_repr', models.CharField(max_length=200)),
('action_flag', models.PositiveSmallIntegerField()),
('change_message', models.TextField()),
],
options={
'db_table': 'django_admin_log',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoContentType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100)),
],
options={
'db_table': 'django_content_type',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoMigrations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('applied', models.DateTimeField()),
],
options={
'db_table': 'django_migrations',
'managed': False,
},
),
migrations.CreateModel(
name='Horario',
fields=[
('hora', models.TimeField(primary_key=True, serialize=False)),
],
options={
'db_table': 'horario',
'managed': False,
},
),
migrations.CreateModel(
name='HorarioHasDia',
fields=[
('id_dia_hora', models.AutoField(primary_key=True, serialize=False)),
],
options={
'db_table': 'horario_has_dia',
'managed': False,
},
),
migrations.CreateModel(
name='Idioma',
fields=[
('nome', models.CharField(max_length=255, primary_key=True, serialize=False)),
('sigla', models.CharField(max_length=45, unique=True)),
],
options={
'db_table': 'idioma',
'managed': False,
},
),
migrations.CreateModel(
name='InscricaoHasPrato',
fields=[
('inscricao_has_prato_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'db_table': 'inscricao_has_prato',
'managed': False,
},
),
migrations.CreateModel(
name='InscricaoHasSessao',
fields=[
('inscricao_has_sessao_id', models.AutoField(primary_key=True, serialize=False)),
('nr_inscritos', models.IntegerField()),
],
options={
'db_table': 'inscricao_has_sessao',
'managed': False,
},
),
migrations.CreateModel(
name='Material',
fields=[
('idmaterial', models.AutoField(db_column='idMaterial', primary_key=True, serialize=False)),
('descricao', models.CharField(max_length=255)),
],
options={
'db_table': 'material',
'managed': False,
},
),
migrations.CreateModel(
name='Menu',
fields=[
('idmenu', models.AutoField(db_column='idMenu', primary_key=True, serialize=False)),
('tipo', models.CharField(max_length=45)),
('menu', models.CharField(max_length=45)),
('nralmocosdisponiveis', models.IntegerField()),
],
options={
'db_table': 'menu',
'managed': False,
},
),
migrations.CreateModel(
name='Notificacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descricao', models.CharField(max_length=255)),
('criadoem', models.DateTimeField()),
('idutilizadorenvia', models.IntegerField()),
('utilizadorrecebe', models.IntegerField()),
],
options={
'db_table': 'notificacao',
'managed': False,
},
),
migrations.CreateModel(
name='Paragem',
fields=[
('paragem', models.CharField(max_length=45, primary_key=True, serialize=False)),
],
options={
'db_table': 'paragem',
'managed': False,
},
),
migrations.CreateModel(
name='Prato',
fields=[
('idprato', models.AutoField(db_column='idPrato', primary_key=True, serialize=False)),
('nralmocos', models.IntegerField()),
('descricao', models.CharField(max_length=125)),
],
options={
'db_table': 'prato',
'managed': False,
},
),
migrations.CreateModel(
name='Responsaveis',
fields=[
('idresponsavel', models.AutoField(primary_key=True, serialize=False)),
('nome', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('telefone', models.CharField(max_length=45)),
],
options={
'db_table': 'responsaveis',
'managed': False,
},
),
migrations.CreateModel(
name='Sala',
fields=[
('edificio', models.CharField(max_length=45)),
('andar', models.CharField(max_length=45)),
('gabinete', models.CharField(blank=True, max_length=45, null=True)),
('espaco_idespaco', models.OneToOneField(db_column='espaco_idespaco', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Espaco')),
],
options={
'db_table': 'sala',
'managed': False,
},
),
migrations.CreateModel(
name='Sessao',
fields=[
('idsessao', models.AutoField(primary_key=True, serialize=False)),
('nrinscritos', models.IntegerField()),
('vagas', models.IntegerField()),
],
options={
'db_table': 'sessao',
'managed': False,
},
),
migrations.CreateModel(
name='SessaoHasHorarioHasDia',
fields=[
('sessao_has_horario_has_dia_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'db_table': 'sessao_has_horario_has_dia',
'managed': False,
},
),
migrations.CreateModel(
name='Transporte',
fields=[
('idtransporte', models.AutoField(primary_key=True, serialize=False)),
('capacidade', models.IntegerField()),
('identificacao', models.CharField(max_length=255)),
],
options={
'db_table': 'transporte',
'managed': False,
},
),
migrations.CreateModel(
name='TransporteHasHorario',
fields=[
('id_transporte_has_horario', models.IntegerField(primary_key=True, serialize=False)),
],
options={
'db_table': 'transporte_has_horario',
'managed': False,
},
),
migrations.CreateModel(
name='TransporteHasInscricao',
fields=[
('transporte_has_inscricao_id', models.AutoField(primary_key=True, serialize=False)),
('numero_passageiros', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'transporte_has_inscricao',
'managed': False,
},
),
migrations.CreateModel(
name='UtilizadorHasNotificacao',
fields=[
('utilizador_has_notificacao_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'db_table': 'utilizador_has_notificacao',
'managed': False,
},
),
migrations.CreateModel(
name='TransportePessoal',
fields=[
('transporte_idtransporte', models.OneToOneField(db_column='transporte_idtransporte', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Transporte')),
],
options={
'db_table': 'transporte_pessoal',
'managed': False,
},
),
migrations.CreateModel(
name='TransporteUniversitario',
fields=[
('capacidade', models.IntegerField()),
('transporte_idtransporte', models.OneToOneField(db_column='transporte_idtransporte', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Transporte')),
],
options={
'db_table': 'transporte_universitario',
'managed': False,
},
),
]
| [
"brunosusana99@hotmail.com"
] | brunosusana99@hotmail.com |
be92341808644115b777719c2a4432641c542798 | 93211b441515263dce08cc01d98b4b42806d31dd | /kinship_analysis_allelic_dropout_dicts.py | eb8e9401f11728d70bc875fccd044a847aba8a1f | [
"MIT"
] | permissive | EdaEhler/Kinship_analysis | 24a15b845013e918d9f1090a2d7f7c8ddd87dbf2 | d64e53f1b3185d8b7f4c92bd095684337da36031 | refs/heads/master | 2021-01-13T08:57:31.774876 | 2016-09-25T10:03:32 | 2016-09-25T10:03:32 | 69,156,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,337 | py | ###############################
# Kinship analysis using SNPs #
# --------------------------- #
# Edvard Ehler, Ph.D., #
# Institute of Anthropology, #
# UAM Poznan, Poland, #
# 2016 #
# eda.ehler@seznam.cz #
###############################
# Based on Fung&Hu (2008) Statistical DNA Forensics: Theory, Methods and Computation
"""
For two persons X and Y, define the relatedness coefficients (k 0 ,2k 1 ,k 2 ) as
k0 = P (neither allele of X is identical by descent to alleles of Y);
k1 = P (one or the other of the alleles of X is ibd to one of the alleles of Y, but the second allele is not);
k2 = P (both alleles of X are ibd to those of Y).
"""
#--------------------
# IMPORTS
#--------
from collections import namedtuple # používám k volání příbuzenských koeficientů
from math import pow # cca 15% rychlejší než klasickej pow
from random import random
# VARIABLES
#----------
snp_list = []
# {'rs112' : {"G":0.3, "T":0, "C":0.7, "A":0}, 'rs2341': {"G":0.2, "T":0.8, "C":0, "A":0}}
freq_dict = {}
# table 3.13, chapter 3.6, page 43
relationship = namedtuple("rel_coefficients", "k0, k1, k2")
#----
parentChild = relationship(0, 1, 0)
fullSiblings = relationship(0.25, 0.5, 0.25)
halfSiblings = relationship(0.5, 0.5, 0)
grandparentGrandchild = relationship(0.5, 0.5, 0)
uncleNephew = relationship(0.5, 0.5, 0)
firstCousins = relationship(0.75, 0.25, 0)
secondCousins = relationship(0.9375, 0.0625, 0)
unrelated = relationship(1, 0, 0)
SNP_INFO = "SNP_allele_freqs.csv"
SAMPLES_GENOTYPES = "3samples_genotypes.csv"
# dle Anny - P(false positive) of homozygotes
# všechny homozygoty ve funkci divide_et_impera budu testovat, zda nejsou false positive,
# jestli jo, tak je budu brát jako heterozygoty
ALLELIC_DROPOUT = 0.00159
ALLELIC_DROPOUT_PROBS = "3samples_allelic_dropouts.csv"
# na vzorky
kz1 = {}
kz5 = {}
kz4 = {}
# na P(false homozygote) slovník (= allelic drop-out = ado)
kz1_ado = {}
kz5_ado = {}
kz4_ado = {}
snp_counter_nonzero = 0
snp_counter = 0
#-----------------
# LOADING SNP info + allel frequencies + samples genotypes
#-------------------------------------
with open(SNP_INFO, mode="r", encoding="utf-8") as snpIN:
# načti do dvou slovníků, v jednom budou pouze názvy rsxXX(asi tuple), druhý slovník bude odkazovat na jejich parametry
for radek in snpIN:
radek = radek.strip().split(";")
# jméno do snp_listu, abych po něm mohl pak cyklit
snp_list.append(radek[0])
# frekvence alel do freq_dict
# nejdříve však defaultní hodnoty
freq_dict[radek[0]] = {"G":0, "T":0, "C":0, "A":0}
freq_dict[radek[0]][radek[1]] = radek[3]
freq_dict[radek[0]][radek[2]] = radek[4]
with open(SAMPLES_GENOTYPES, mode="r", encoding="utf-8") as genoIN:
# načtu genotypy vzorků
for radek in genoIN:
# nechci vykomentované řádky a N genotypy
if not radek.startswith("#"):
if not "N" in radek:
radek = radek.strip().split(";")
#h2[radek[0]] = radek[1]
#h4[radek[0]] = radek[2]
kz1[radek[0]] = radek[1]
kz5[radek[0]] = radek[2]
kz4[radek[0]] = radek[3]
with open(ALLELIC_DROPOUT_PROBS, mode="r", encoding="utf-8") as adIN:
# načtu genotypy vzorků
for radek in adIN:
# nechci vykomentované řádky a N genotypy
if not radek.startswith("#"):
if not "N" in radek:
radek = radek.strip().split(";")
#h2[radek[0]] = radek[1]
#h4[radek[0]] = radek[2]
kz1_ado[radek[0]] = float(radek[1])
kz5_ado[radek[0]] = float(radek[2])
kz4_ado[radek[0]] = float(radek[3])
# FUNCTIONS
#----------
def divide_et_impera(snp, genotype1, genotype2, alleleCount, scenario=parentChild, jmeno1="prvni", jmeno2="druhy"):
# dle poměru alel v genotypech zavolá odpovídající funkci
# snp - name of SNP, string ('rs12345')
# genotype1 - first individual genotype, string ("CC")
# genotype2 - second individual genotype, string ("AC")
# alleleCount - number of alleles across all loci in the 2 individuals tested, int (124)
# scenario - relationship namedtuple defined earlier with relatedness coefficients (k0, k1(which is in fact 2k1, but naming problems made me to name it just k1), k2)
# jmeno1, jmeno2 - jmeno vzorku, dle toho zařídím slovník pro allelic dropout
global snp_counter, snp_counter_nonzero
#------------
# blok definice allelic drop-out slovníku (ado)
# dle toho, co přijde za jméno do funkce, volím slovník
ado1 = {}
ado2 = {}
if jmeno1.upper() == "KZ1":
ado1 = kz1_ado
elif jmeno1.upper() == "KZ4":
ado1 = kz4_ado
else:
print("jmeno1:", jmeno1)
raise NameError("jmeno1 has unknown value (not KZ1, KZ4).")
if jmeno2.upper() == "KZ4":
ado2 = kz4_ado
elif jmeno2.upper() == "KZ5":
ado2 = kz5_ado
else:
print("jmeno2:", jmeno2)
raise NameError("jmeno2 has unknown value (not KZ4, KZ5).")
#-------------------
# pomocná proměnná na testování rozřazovacího algoritmu
branch = ""
#-------------------
#Rozřazování dle genotypů:
# AA, AA
if (genotype1 == genotype2) and (genotype1[0] == genotype1[1]):
branch = "aaaa"
allele1 = genotype1[0]
allele2 = genotype1[0]
# rozstřel genotypů na allelic dropout
drop_out_roll = random()
if drop_out_roll <= ado1[snp] * ado2[snp]: # pravděpodobnost, že jsou oba false positive
funkce = ab_ab
elif drop_out_roll <= ado1[snp]: # pravděpodobnost, že je jeden false positive
funkce = aa_ab
elif drop_out_roll <= ado2[snp]: # pravděpodobnost, že je druhý false positive
funkce = aa_ab
else:
funkce = aa_aa
# AB, AB
elif (genotype1 == genotype2) and (genotype1[0] != genotype1[1]):
branch = "abab"
allele1 = genotype1[0]
allele2 = genotype1[1]
# rozstřel genotypů na allelic dropout
drop_out_roll = random()
# první možnost nedám - to by znamenalo, že se oba mohou změnit oběma směrama
#if drop_out_roll <= ado1[snp] * ado2[snp]: # pravděpodobnost, že jsou oba false positive
# funkce = ab_ab
if drop_out_roll <= ado1[snp]: # pravděpodobnost, že je jeden false positive
funkce = aa_ab
elif drop_out_roll <= ado2[snp]: # pravděpodobnost, že je druhý false positive
funkce = aa_ab
else:
funkce = ab_ab
# AA, BB
elif (genotype1 != genotype2) and (genotype1[0] == genotype1[1]) and (genotype2[0] == genotype2[1]):
branch = "aabb"
allele1 = genotype1[0]
allele2 = genotype2[0]
# rozstřel genotypů na allelic dropout
drop_out_roll = random()
if drop_out_roll <= ado1[snp] * ado2[snp]: # pravděpodobnost, že jsou oba false positive
funkce = ab_ab
elif drop_out_roll <= ado1[snp]: # pravděpodobnost, že je jeden false positive
funkce = aa_ab
elif drop_out_roll <= ado2[snp]: # pravděpodobnost, že je druhý false positive
funkce = aa_ab
else:
funkce = aa_bb
# AA, AB
elif (genotype1 != genotype2) and (genotype1[0] == genotype1[1]) and (genotype2[0] != genotype2[1]):
branch = "aaab"
allele1 = genotype1[0]
# nevím, jestli mi přijde genotype2 AB nebo BA
allele2 = genotype2[1] if genotype2[1] != genotype1[0] else genotype2[0]
# rozstřel genotypů na allelic dropout
drop_out_roll = random()
#if drop_out_roll <= ado1[snp] * ado2[snp]: # pravděpodobnost, že jsou oba false positive
# funkce = ab_ab
if drop_out_roll <= ado1[snp]: # pravděpodobnost, že je jeden false positive
funkce = ab_ab
#elif drop_out_roll <= ado2[snp]: # pravděpodobnost, že je druhý false positive
# funkce = aa_bb
else:
funkce = aa_ab
# AB, AA
elif (genotype1 != genotype2) and (genotype1[0] != genotype1[1]) and (genotype2[0] == genotype2[1]):
branch = "abaa"
allele1 = genotype2[0]
# nevím, jestli mi přijde genotype1 AB nebo BA
allele2 = genotype1[1] if genotype1[1] != genotype2[0] else genotype1[0]
# rozstřel genotypů na allelic dropout
drop_out_roll = random()
#if drop_out_roll <= ado1[snp] * ado2[snp]: # pravděpodobnost, že jsou oba false positive
# funkce = ab_ab
#elif drop_out_roll <= ado1[snp]: # pravděpodobnost, že je jeden false positive
# funkce = aa_ab
if drop_out_roll <= ado2[snp]: # pravděpodobnost, že je druhý false positive
funkce = ab_ab
else:
funkce = aa_ab
# frekvence alel ve srovnávací populaci (bráno z ensemblu GRCh37)
f1 = float(freq_dict[snp][allele1])
f2 = float(freq_dict[snp][allele2])
# test prints - byly špatné indexy v if-else bloku - už jsou OK
"""
print(branch)
print("genotyp:", genotype1, genotype2)
print("allele:", allele1, allele2)
print("pi,pj:", f1, f2)
print("P(ano):", funkce(f1, f2, koef=scenario))
print("P(ne):", funkce(f1, f2, koef=unrelated))
print("LR:", funkce(f1, f2, koef=scenario) / funkce(f1, f2, koef=unrelated))
input()
"""
likelihoodRatio = funkce(f1, f2, koef=scenario) / funkce(f1, f2, koef=unrelated)
snp_counter += 1
if likelihoodRatio == 0:
#print('zero', snp)
# děje se zejména při parent-child scénáři, když se neshodují genotypy
# pravděpodobnost mutací nebo silent alleles (Pinto et al. 2013, FSI:Genetics) -> 0.001-0.005
#----------------
# dle Borsting et al. 2011 (FSI:Genetics) počítá u rozdílných homozygotů jaby by se tam
# objevila "silent allele" (třeba nějaká technická chyba, že ji nenašli).
# pravděpodobnost silent allele je 1/(n+1), kde n = počet alel na všech lokusech u těchto dvou individuí
# vynásobeno konzervativním odhadem pravděpodobnosti mutace u SNPů = 10E-6
# print("+++zero+++")
#return 0.000001 * (1/(alleleCount + 1))
return 0
#print("uvnitr divide_et_impera:", jmeno1)
else:
snp_counter_nonzero += 1
return likelihoodRatio
# Fung&Hu 2008, table 5.1, page 80
# funkce, které počítají pravděpodobnost joint genotype probability za předpokladu HWE
# vstupují do nich frekvence alely 1 a 2 (f1, f2) a příbuzenské koeficienty, dle použitého scénáře
# výstup je P(Z|Y, H) - pravděpodobnost, že genotypy Z a Y mají alely identical-by-descend (ibd),
# za předpokladu hypotézy (scénáře) H (třeba že jsou siblings, nebo unrelated, nebo uncle-nephew...)
def aa_aa(f1, f2, koef):
return koef.k0 * pow(f1, 4) + koef.k1 * pow(f1, 3) + koef.k2 * pow(f1, 2)
def aa_ab(f1, f2, koef):
return 2 * koef.k0 * pow(f1, 3) * f2 + koef.k1 * pow(f1, 2) * f2
def aa_bb(f1, f2, koef):
return koef.k0 * pow(f1, 2) * pow(f2, 2)
def ab_ab(f1, f2, koef):
#print("abab:", (4 * koef.k0 * pow(f1, 2) * pow(f2, 2)) + (koef.k1 * pow(f1,2) * f2) + (koef.k1 * f1 * pow(f2, 2)) + (2 * koef.k2 * f1 * f2))
return (4 * koef.k0 * pow(f1, 2) * pow(f2, 2)) + (koef.k1 * pow(f1,2) * f2) + (koef.k1 * f1 * pow(f2, 2)) + (2 * koef.k2 * f1 * f2)
#---------------------
def allele_count(sample1, sample2):
allele_n = 0
for i in sample1:
geno1 = sample1[i]
geno2 = sample2[i]
# zanedbávám 3 a více alel, pouze bi-alelické lokusy
allele_n += 1 if geno1 == geno2 and geno1[0] == geno1[1] else 2
#print(i, allele_n)
return allele_n
#---------------------
# SKRIPT
#-------
# kz4 vs kz5
def run_kinship_analysis(sample1, sample2, hypothesis, hypothesis_name, alleleCount, name1='prvni', name2='druhy', repeats=100):
# přidán parametr name1,name2 - jméno pro výběr správného allelic drop-out slovníku
# pomocná funkce na projetí všech definovaných kombinací vstupních parametrů
# přidělal jsem možnost opakování výpočtu pro případ silent allele, allelic drop-in/drop-out
# idea je taková, že provedu výpočet 1000x-100 000x a vezmu průměr
# možná by byl lepší resampling??
global snp_counter, snp_counter_nonzero
snp_counter = 0
snp_counter_nonzero = 0
result = 1
result_list = []
# Opakování
#--------------
for _ in range(repeats):
result = 1 # při každé rundě si vynuluju vysledek
for i in sample1:
#print(i, 'result = ', result)
try:
result *= divide_et_impera(i, sample1[i], sample2[i], alleleCount, scenario=hypothesis, jmeno1=name1, jmeno2=name2)
except IndexError:
#print(i, kz1[i])
#print(i, kz5[i])
pass
#print("--")
result_list.append(result)
#--------------
# zprůměrování výsledku
result = sum(result_list)/repeats
print(name1 + " vs. " + name2)
print("Scenario:", hypothesis_name + ",", hypothesis)
print("Likelihood Ratio (p(scenario)/p(unrelated)):", result)
print("Bayes. estimate of probability of the scenario (prior probability = 0.5):", str(round((result/(result + 1))*100, 5)) + "%")
print("SNPs tried:", snp_counter/repeats)
print("SNPs with non-zero result:", snp_counter_nonzero/repeats)
print("----------------------------------------------------")
print()
#input()
scenarios_bag = (parentChild, fullSiblings, halfSiblings, grandparentGrandchild, uncleNephew, firstCousins, secondCousins)
scenarios_names = ('parent-child', 'full-siblings', 'half-siblings', 'grandparent-grandchild', 'uncle-nephew', 'first cousins', 'second cousins')
#----------
pocet_alel = allele_count(kz4, kz5)
print("Allele count:", pocet_alel)
for n, hypo in enumerate(scenarios_bag):
run_kinship_analysis(kz4, kz5, hypo, scenarios_names[n], pocet_alel, name1='KZ4', name2='KZ5', repeats=10000)
#input()
print("============================================")
print("Algorithm loops count: 10000")
print("Allelic drop-out check - using dictionary of P(false allele) unique for each SNP for each sample.")
print("No silent-allele correction, just return 0 in case of opposite homozygotes with no drop-out.")
print("********************************************")
| [
"noreply@github.com"
] | EdaEhler.noreply@github.com |
c33fb73c3f175c6bb75340690b33f4e382136492 | 97d51839b27ce11bd1302d593ffba330da3234d9 | /WeatherForecastApp/mysite/webapp/models.py | 9245aa65546550c6b4ed64172d5dc894a5a0e8ef | [] | no_license | adityagurram/CloudComputing | dd4015c71892062b470be9104eb9189b4c38ebf1 | 667e72cb53ddd721ecb69ca71d3804eb1a7ee94d | refs/heads/master | 2020-03-07T03:09:29.570179 | 2018-03-29T03:43:19 | 2018-03-29T03:43:19 | 127,227,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | from django.db import models
class Climate(models.Model):
DATE= models.IntegerField(unique=True)
TMAX= models.FloatField(null=True, blank=True, default=None,)
TMIN=models.FloatField(null=True, blank=True, default=None) | [
"noreply@github.com"
] | adityagurram.noreply@github.com |
566a439b70fad999ee6c115c070e521142d7015a | ba5d4704dd8be5a17890cce41e8ac5e7523472ed | /archives/tests/test_model_domains.py | 166be7ae9a1cee70ef246aa5215239deff9c71c6 | [] | no_license | carogiu/cell-migration | 0fb0fdf0bff6ac5cec6cebcb60ef868ac6436574 | 0c90e14e426dfc1faa08ebba22487711dc199cf7 | refs/heads/master | 2020-12-01T11:25:18.023077 | 2020-08-10T09:28:54 | 2020-08-10T09:28:54 | 230,616,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | import unittest
import dolfin
from model.main import mesh_from_dim
from model.model_domains import BD_right, BD_left, BD_top_bottom, dom_and_bound
class TestModelDomains(unittest.TestCase):
def test_class_BD(self):
class_1 = BD_right(dim_x=2)
self.assertIsInstance(class_1, BD_right)
self.assertEqual(class_1.dim_x, 2)
class_2 = BD_left(dim_x=2)
self.assertIsInstance(class_2, BD_left)
self.assertEqual(class_2.dim_x, 2)
class_3 = BD_top_bottom(dim_y=2)
self.assertIsInstance(class_3, BD_top_bottom)
self.assertEqual(class_3.dim_y, 2)
def test_class_BD_inside(self):
class_2 = BD_right(dim_x=2)
result_inside = class_2.inside(x=[1+1e-13], on_boundary=False)
self.assertEqual(result_inside, False)
result_inside = class_2.inside(x=[1+1e-13], on_boundary=True)
self.assertEqual(result_inside, True)
result_inside = class_2.inside(x=[1+1e-12], on_boundary=True)
self.assertEqual(result_inside, True)
result_inside = class_2.inside(x=[1+1e-11], on_boundary=True)
self.assertEqual(result_inside, False)
def test_mesh_definition(self):
mesh = mesh_from_dim(nx=100, ny=100, dim_x=10, dim_y=10)
self.assertIsInstance(mesh, dolfin.RectangleMesh)
domain, boundaries = dom_and_bound(mesh, dim_x=10, dim_y=10)
if __name__ == '__main__':
unittest.main()
| [
"57912591+carogiu@users.noreply.github.com"
] | 57912591+carogiu@users.noreply.github.com |
3cc4baa6ce409ef2fef25d43ae16372d88412de4 | 25692e58dceec1f5be4c7930d353bacafd3ff7b0 | /binary/랜선.py | 428c52c11757090ed3d5b84ea1660cc38c993943 | [] | no_license | ub1n/Algorithm | a8617fc56d934e99370c367af364f308431423d6 | c9761941082b678a2882d04db8887afb0d664737 | refs/heads/master | 2023-06-11T11:11:52.573748 | 2021-07-02T13:32:09 | 2021-07-02T13:32:09 | 375,415,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | import sys
n=list(map(int,sys.stdin.readline().split()))
arr=[]
for i in range(n[0]):
m=int(sys.stdin.readline())
arr.append(m)
start=1
end=max(arr)
ans=[]
while(start<=end):
mid=(start+end)//2
temp=sum([i//mid for i in arr])
if temp>=n[1]:
ans.append(mid)
start=mid+1
else:
end=mid-1
print(max(ans)) | [
"bin951024@naver.com"
] | bin951024@naver.com |
f6969149986c94f6addf9e40a89a24a01d513ec8 | 84bcda4ff3a1c2c956c7814f3a308ba68d697563 | /python/GETDownload1.py | 3954d7b356b4b32c016bd413695f85aa213f5bf1 | [] | no_license | yijiyouyu/code | 7a9db849d3734169ba80f029ca74d6962ecd71b9 | f4bc6a4124243484c2d17fb3a574da5e7a31ca11 | refs/heads/master | 2021-09-17T22:54:13.967963 | 2018-07-06T08:26:17 | 2018-07-06T08:26:17 | 109,633,819 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | #coding:utf-8
from requests import get
from time import time
import sys
def Usage():
print '[Usage]:\nGETDownload1.py [URL]'
def getHTML(URL):
html = get(URL).text
return html
def getName():
name = str(time())
return name
def saveFile(Fname,Fdata):
f = open(Fname,'w')
f.write(Fdata)
f.close()
if __name__=='__main__':
try:
reload(sys)
sys.setdefaultencoding('utf8')
URL = sys.argv[1]
html = getHTML(URL)
name = getName()
saveFile(name+'.txt',html)
except:
Usage() | [
"1147121947@qq.com"
] | 1147121947@qq.com |
361bcd8554afe3ab13ba6067f3468a34e6a3fba4 | 15c86f80f0009118f8e1bd01d866cfdeeb00fbb4 | /assignment2/sgd.py | f2a753e685a8dad8799f774a1ba6127d4a616556 | [] | no_license | Baekyeongmin/2019_cs224n | 1680c67e399df69be3513b66f97d88b98a55831e | bed832a65dc3df0bb8b2f3cff41fe58ebdb12901 | refs/heads/master | 2020-05-05T03:30:44.290736 | 2019-06-09T05:24:13 | 2019-06-09T05:24:13 | 179,675,422 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,529 | py | #!/usr/bin/env python
# Save parameters every a few SGD iterations as fail-safe
SAVE_PARAMS_EVERY = 5000
import pickle
import glob
import random
import numpy as np
import os.path as op
def load_saved_params():
"""
A helper function that loads previously saved parameters and resets
iteration start.
"""
st = 0
for f in glob.glob("saved_params_*.npy"):
iter = int(op.splitext(op.basename(f))[0].split("_")[2])
if (iter > st):
st = iter
if st > 0:
params_file = "saved_params_%d.npy" % st
state_file = "saved_state_%d.pickle" % st
params = np.load(params_file)
with open(state_file, "rb") as f:
state = pickle.load(f)
return st, params, state
else:
return st, None, None
def save_params(iter, params):
params_file = "saved_params_%d.npy" % iter
np.save(params_file, params)
with open("saved_state_%d.pickle" % iter, "wb") as f:
pickle.dump(random.getstate(), f)
def sgd(f, x0, step, iterations, postprocessing=None, useSaved=False,
PRINT_EVERY=10):
""" Stochastic Gradient Descent
Implement the stochastic gradient descent method in this function.
Arguments:
f -- the function to optimize, it should take a single
argument and yield two outputs, a loss and the gradient
with respect to the arguments
x0 -- the initial point to start SGD from
step -- the step size for SGD
iterations -- total iterations to run SGD for
postprocessing -- postprocessing function for the parameters
if necessary. In the case of word2vec we will need to
normalize the word vectors to have unit length.
PRINT_EVERY -- specifies how many iterations to output loss
Return:
x -- the parameter value after SGD finishes
"""
# Anneal learning rate every several iterations
ANNEAL_EVERY = 20000
if useSaved:
start_iter, oldx, state = load_saved_params()
if start_iter > 0:
x0 = oldx
step *= 0.5 ** (start_iter / ANNEAL_EVERY)
if state:
random.setstate(state)
else:
start_iter = 0
x = x0
if not postprocessing:
postprocessing = lambda x: x
exploss = None
for iter in range(start_iter + 1, iterations + 1):
# You might want to print the progress every few iterations.
loss = None
### YOUR CODE HERE
loss, grad = f(x)
x -= step * grad
### END YOUR CODE
x = postprocessing(x)
if iter % PRINT_EVERY == 0:
if not exploss:
exploss = loss
else:
exploss = .95 * exploss + .05 * loss
print("iter %d: %f" % (iter, exploss))
if iter % SAVE_PARAMS_EVERY == 0 and useSaved:
save_params(iter, x)
if iter % ANNEAL_EVERY == 0:
step *= 0.5
return x
def sanity_check():
quad = lambda x: (np.sum(x ** 2), x * 2)
print("Running sanity checks...")
t1 = sgd(quad, 0.5, 0.01, 1000, PRINT_EVERY=100)
print("test 1 result:", t1)
assert abs(t1) <= 1e-6
t2 = sgd(quad, 0.0, 0.01, 1000, PRINT_EVERY=100)
print("test 2 result:", t2)
assert abs(t2) <= 1e-6
t3 = sgd(quad, -1.5, 0.01, 1000, PRINT_EVERY=100)
print("test 3 result:", t3)
assert abs(t3) <= 1e-6
print("-" * 40)
print("ALL TESTS PASSED")
print("-" * 40)
if __name__ == "__main__":
sanity_check()
| [
"bym0313@dgist.ac.kr"
] | bym0313@dgist.ac.kr |
0cc1a592e15de782740aa4548d5a1da9c94b242a | 21b7670ce56d6cb41f609a09f26f460150cbbb29 | /scripts/antennaset.py | 773119bd18e95b90ea6eb507dfb468615b349e51 | [] | no_license | transientskp/old-aartfaac-imaging-pipeline | fd82c739b9b2670e3b2f6cf05f97f2ea168800e6 | 64456796a56cf5e667170e6336dbdcf9cd07f9ba | refs/heads/master | 2022-07-07T08:48:36.192471 | 2016-06-16T08:47:06 | 2016-06-16T08:47:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,250 | py | #!/usr/bin/env python
# Generate position files for different antennasets.
#
# The imaging pipeline needs to know the position of the AARTFAAC antennae.
#
# The LOFAR repository contains a per-station AntennaFieldCSXXX.conf file in
# the directory MAC/Deployment/data/StaticMetaData/AntennaFields. These
# provide information about the position of all LOFAR antennae. In particular,
# they contain a block that looks like:
#
# LBA
# 3 [ XXXXX.XXXXX YYYYY.YYYYY ZZZZZ.ZZZZZ]
# 96 x 2 x 3 [
# X.XXXX Y.YYYY Z.ZZZZ X.XXXX Y.YYYY Z.ZZZZ
# X.XXXX Y.YYYY Z.ZZZZ X.XXXX Y.YYYY Z.ZZZZ
# ...
# X.XXXX Y.YYYY Z.ZZZZ X.XXXX Y.YYYY Z.ZZZZ
# ]
#
# This tells us about all the LBA antennae in the station. The first three
# numbers provide the reference position of the station in IRTF2005. The
# subsequent array of 96 * 2 * 3 numbers provide per-antenna offsets from that
# reference. Each offset is repeated twice, for two polarizations, but the
# positions should be identical.
#
# Note that there are 96 antennae listed. The first 48 correspond to the
# LBA_INNER antennaset; the second 48 to LBA_OUTER. This is defined in
# MAC/Deployment/data/StaticMetaData/AntennaSets.conf; we take it as read for
# now.
#
# When the AARTFAAC correlator produces correlation matrices, it will order
# them such that we start with the first antenna being used in the CS002 file,
# and end with the last antenna in the CS007 file.
#
# The imaging pipeline requires a text file that lists a single IRTF2005
# X/Y/Z position per line. They should be ordered in the same way as the
# correlator output. That is, the first line contains the ITRF position of the
# first CS002 antenna in use, and the last line contains the position of the
# last CS007 antenna in use.
#
# This script processes the AntennaFieldCSXXX.conf files to generate output
# appropriate for AARTFAAC. Specify the type of antenna (LBA, HBA) and the
# range in use (0-48 for LBA_INNER, 48-96 for LBA_OUTER) on the command line,
# together with one or more AntennaField files. E.g.:
#
# $ python antennaset.py LBA 0 48 AntennaFieldCS002.conf AntennaFieldCS003.conf
import sys
class AntennaSet(object):
def __init__(self, name, start_ant, end_ant, datafile):
self.positions = []
lines = [line.strip() for line in datafile.readlines()]
lba_start = lines.index(name)
data_start = lba_start + 3
offset = [float(x) for x in lines[lba_start+1].split()[2:5]]
for line in lines[data_start + start_ant:data_start + end_ant]:
x, y, z = [float(x) for x in line.split()[0:3]]
self.positions.append(
[offset[0] + x, offset[1] + y, offset[2] + z]
)
if __name__ == "__main__":
name = sys.argv[1] # LBA or HBA
start_ant, end_ant = [int(x) for x in sys.argv[2:4]] # LBA_OUTER = 48,96
antennasets = []
# Remaining arguments are AntennaField files.
for filename in sys.argv[4:]:
with open(filename, "r") as f:
antennasets.append(AntennaSet(name, start_ant, end_ant, f))
for antset in antennasets:
for posn in antset.positions:
print "%f %f %f" % (posn[0], posn[1], posn[2])
| [
"swinbank@transientskp.org"
] | swinbank@transientskp.org |
fff7ecd42a575a75bd8c70fd7c301b8cd7a6cf9c | 6b989e9ed854c9c8a04fdcf3e9df7ad8922cf856 | /chapter01/python3_str_types.py | d8d64e34ee572126a61d5e7817be131ea8f13b09 | [
"MIT"
] | permissive | PacktPublishing/Hands-On-Enterprise-Application-Development-with-Python | 6fea9321392328648a094bd10787a4cdb873a6b6 | a59c2ecb55ed43e5bad8c6ed9b687a3e6b610e9f | refs/heads/master | 2023-02-26T11:33:14.202552 | 2023-01-30T08:51:15 | 2023-01-30T08:51:15 | 140,812,464 | 38 | 30 | MIT | 2023-02-15T20:26:05 | 2018-07-13T07:24:55 | Python | UTF-8 | Python | false | false | 253 | py | #!/bin/python3
str1 = 'I am a unicode string'
print("Type of str1 is " + str(type(str1)))
str2 = b"And I can't be concatenated to a byte string"
print("Type of str2 is " + str(type(str2)))
print("Trying to concatenate str1 and str2")
str3 = str1 + str2
| [
"sbadhwar@redhat.com"
] | sbadhwar@redhat.com |
8f0dd18ff0e2846a87a5f2ca82b2163c648938b6 | 2479345dafbf0ac1118f34fbd3471871a3ac5c11 | /demo/libdemo/list_countries.py | 9292611d6422dfbe06ee3e2c9b7058f6e10a215d | [] | no_license | srikanthpragada/PYTHON_06_MAY_2021 | e2fc4d32a38f085658f87d35f31df65ee837a440 | f30a3c4541e0fc15d157446721b514f791602919 | refs/heads/master | 2023-06-02T23:13:53.786444 | 2021-06-16T03:00:38 | 2021-06-16T03:00:38 | 365,402,518 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | import requests
resp = requests.get("https://restcountries.eu/rest/v2/all")
if resp.status_code != 200:
print('Sorry! Could not get details!')
exit(1)
countries = resp.json()
for c in countries:
print(f"{c['name']:50} - {c['capital']}") | [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
bb363c5ddd3739e93a04900c1353f55c9f17c3ab | 923f9270a12be35fdd297d8f27e522c601e94eab | /src/decay/test/test_dc_nose.py | 00a9741044a433b8333c1da2f59dfc64f2536274 | [] | no_license | t-bltg/INF5620 | a06b6e06b6aba3bc35e933abd19c58cd78584c1f | d3e000462302839b49693cfe06a2f2df924c5027 | refs/heads/master | 2021-05-31T00:41:41.624838 | 2016-03-22T09:29:00 | 2016-03-22T09:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,328 | py | import nose.tools as nt
import sys, os
sys.path.insert(0, os.pardir)
import dc_mod_unittest as dc_mod
import numpy as np
def exact_discrete_solution(n, I, a, theta, dt):
"""Return exact discrete solution of the theta scheme."""
dt = float(dt) # avoid integer division
factor = (1 - (1-theta)*a*dt)/(1 + theta*dt*a)
return I*factor**n
def test_against_discrete_solution():
"""
Compare result from solver against
formula for the discrete solution.
"""
theta = 0.8; a = 2; I = 0.1; dt = 0.8
N = int(8/dt) # no of steps
u, t = dc_mod.solver(I=I, a=a, T=N*dt, dt=dt, theta=theta)
u_de = np.array([exact_discrete_solution(n, I, a, theta, dt)
for n in range(N+1)])
diff = np.abs(u_de - u).max()
nt.assert_almost_equal(diff, 0, delta=1E-14)
def test_solver():
"""
Compare result from solver against
precomputed arrays for theta=0, 0.5, 1.
"""
I=0.8; a=1.2; T=4; dt=0.5 # fixed parameters
precomputed = {
't': np.array([ 0. , 0.5, 1. , 1.5, 2. , 2.5,
3. , 3.5, 4. ]),
0.5: np.array(
[ 0.8 , 0.43076923, 0.23195266, 0.12489759,
0.06725255, 0.03621291, 0.01949926, 0.0104996 ,
0.00565363]),
0: np.array(
[ 8.00000000e-01, 3.20000000e-01,
1.28000000e-01, 5.12000000e-02,
2.04800000e-02, 8.19200000e-03,
3.27680000e-03, 1.31072000e-03,
5.24288000e-04]),
1: np.array(
[ 0.8 , 0.5 , 0.3125 , 0.1953125 ,
0.12207031, 0.07629395, 0.04768372, 0.02980232,
0.01862645]),
}
for theta in 0, 0.5, 1:
u, t = dc_mod.solver(I, a, T, dt, theta=theta)
diff = np.abs(u - precomputed[theta]).max()
# Precomputed numbers are known to 8 decimal places
nt.assert_almost_equal(diff, 0, places=8,
msg='theta=%s' % theta)
def test_potential_integer_division():
"""Choose variables that can trigger integer division."""
theta = 1; a = 1; I = 1; dt = 2
N = 4
u, t = dc_mod.solver(I=I, a=a, T=N*dt, dt=dt, theta=theta)
u_de = np.array([exact_discrete_solution(n, I, a, theta, dt)
for n in range(N+1)])
diff = np.abs(u_de - u).max()
nt.assert_almost_equal(diff, 0, delta=1E-14)
def test_convergence_rates():
"""Compare empirical convergence rates to exact ones."""
# Set command-line arguments directly in sys.argv
sys.argv[1:] = '--I 0.8 --a 2.1 --T 5 '\
'--dt 0.4 0.2 0.1 0.05 0.025'.split()
# Suppress output from dc_mod.main()
stdout = sys.stdout # save standard output for later use
scratchfile = open('.tmp', 'w') # fake standard output
sys.stdout = scratchfile
r = dc_mod.main()
for theta in r:
nt.assert_true(r[theta]) # check for non-empty list
scratchfile.close()
sys.stdout = stdout # restore standard output
expected_rates = {0: 1, 1: 1, 0.5: 2}
for theta in r:
r_final = r[theta][-1]
# Compare to 1 decimal place
nt.assert_almost_equal(expected_rates[theta], r_final,
places=1, msg='theta=%s' % theta)
# no need for any main
| [
"hpl@simula.no"
] | hpl@simula.no |
c904e572df97233d9e65ac3224ef24e0694134a6 | 24faec36e3196fdc77837c45e5934a3f71426ff8 | /college system.py | 83c3c5843f1f04fb7ed197af94ae6ee69f3de32c | [] | no_license | MuhammadRasiMS/college-management | b1a147f2121c5d0733718171078b259e132f8400 | fc2187e567832c4af1d6a4ec4d83a23af8793b9d | refs/heads/master | 2023-08-08T04:49:07.517448 | 2021-09-11T04:39:26 | 2021-09-11T04:39:26 | 405,284,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,248 | py | import mysql.connector as mysql
db = mysql.connect(host="localhost", user="root", password="", database="college")
command_handler = db.cursor(buffered=True)
def teacher_session():
while 1:
print("")
print("Teacher's Menu")
print("1. Mark student register")
print("2. view register")
print("3. Logout")
user_option = input(str("Option : "))
if user_option == "1":
print("")
print("Mark student register")
command_handler.execute("SELECT username FROM users WHERE privilege = 'student'")
records = command_handler.fetchall()
date = input(str("Date : DD/MM/YYYY : "))
for record in records:
record = str(record).replace("'", "")
record = str(record).replace(",", "")
record = str(record).replace("(", "")
record = str(record).replace(")", "")
# Present | #Absent | #Late
status = input(str("Status for " + str(record) + "P/A/L : "))
query_vals = (str(record), date, status)
command_handler.execute("INSERT INTO attendance (username, date, status) VALUES(%s,%s,%s)", query_vals)
db.commit()
print(record + " Marked as " + status)
elif user_option == "2":
print("")
print("Viewing all student registers")
command_handler.execute("SELECT username, date, status FROM attendance")
records = command_handler.fetchall()
print("Displaying all registers")
for record in records:
print(record)
elif user_option == "3":
break
else:
print("No valid option was selected")
def student_session(username):
while 1:
print("")
print("Student's Menu")
print("")
print("1. View Register")
print("2. Download Register")
print("3. Logout")
user_option = input(str("Option : "))
if user_option == "1":
print("Displaying Register")
username = (str(username),)
command_handler.execute("SELECT date, username, status FROM attendance WHERE username = %s", username)
records = command_handler.fetchall()
for record in records:
print(record)
elif user_option == "2":
print("Downloading Register")
username = (str(username),)
command_handler.execute("SELECT date, username, status FROM attendance WHERE username = %s", username)
records = command_handler.fetchall()
for record in records:
with open("register.txt", "w") as f:
f.write(str(records)+"\n")
f.close()
print("All records saved")
elif user_option == "3":
break
else:
print("No valid option was selected")
def admin_session():
while 1:
print("")
print("Admin Menu")
print("1. Register new Student")
print("2. Register new Teacher")
print("3. Register Existing Student")
print("4. Register Existing Student")
print("5. Logout")
user_option = input(str("option : "))
if user_option == "1":
print("")
print("Register New Student")
username = input(str("Student username : "))
password = input(str("Student password : "))
query_vals = (username, password)
command_handler.execute("INSERT INTO users (username,password,privilege) VALUES (%s,%s,'student')",
query_vals)
db.commit()
print(username + " has been registered as a student")
elif user_option == "2":
print("")
print("Register New Teacher")
username = input(str("Teacher username : "))
password = input(str("Teacher password : "))
query_vals = (username, password)
command_handler.execute("INSERT INTO users (username,password,privilege) VALUES (%s,%s,'teacher')",
query_vals)
db.commit()
print(username + " has been registered as a teacher")
elif user_option == "3":
print("")
print("Delete Existing Student Account")
username = input(str("Student username : "))
query_vals = (username, "student")
command_handler.execute("DELETE FROM users WHERE username = %s AND privilege = %s ", query_vals)
db.commit()
if command_handler.rowcount < 1:
print("User not found")
else:
print(username + " has been deleted")
elif user_option == "4":
print("")
print("Delete Existing Teacher Account")
username = input(str("Teacher username : "))
query_vals = (username, "teacher")
command_handler.execute("DELETE FROM users WHERE username = %s AND privilege = %s ", query_vals)
db.commit()
if command_handler.rowcount < 1:
print("User not found")
else:
print(username + " has been deleted")
elif user_option == "5":
break
else:
print("No valid option selected")
def auth_student():
print("")
print("Student's Login")
print("")
username = input(str("Username : "))
password = input(str("Password : "))
query_vals = (username, password, "student")
command_handler.execute("SELECT username FROM users WHERE username = %s AND password = %s AND privilege = %s",
query_vals)
if command_handler.rowcount <= 0:
print("Invalid login details")
else:
student_session(username)
def auth_teacher():
print("")
print("Teacher's Login")
print("")
username = input(str("Username : "))
password = input(str("Password : "))
query_vals = (username, password)
command_handler.execute("SELECT * FROM users WHERE username = %s AND password = %s AND privilege = 'teacher'",
query_vals)
if command_handler.rowcount <= 0:
print("Login not recognised")
else:
teacher_session()
def auth_admin():
print("")
print("Admin Login")
print("")
username = input(str("Username : "))
password = input(str("Password : "))
if username == "admin":
if password == "password":
admin_session()
else:
print("Incorrect password !")
else:
print("Login details not recognised")
def main():
while 1:
print("Welcome to the college system")
print("")
print("1. Login as student")
print("2. Login as teacher")
print("3. Login as admin")
user_option = input(str("Option : "))
if user_option == "1":
auth_student()
elif user_option == "2":
auth_teacher()
elif user_option == "3":
auth_admin()
else:
print("No valid option was selected")
main()
| [
"muhammadrasi0@gmail.com"
] | muhammadrasi0@gmail.com |
e3ef029dcbb4f626217414ae65caf8a028b4de89 | 4a887a050564267fc26f9ccd318aa9acc1dd8bf9 | /WikigenderJsonParsing/CreateBootstrappedDatasets.py | c6dce349d732458e9bd06a06d6485aaa66bd7fc7 | [] | no_license | kp1302/Towards-Understanding-Gender-Bias-in-Neural-Relation-Extraction | e998ecab6a9aa233c7f63816546afca3af8d3993 | e3c243c2b50b21ae0b8dc12732dbf4448f545918 | refs/heads/master | 2022-12-19T06:53:14.521785 | 2020-07-06T06:05:14 | 2020-07-06T06:05:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | from Utility import *
import argparse
import os
import random
#from sklearn.utils import resample
WORD_EMBEDDING_DIRECTORY = '../WordEmbeddings/'
BOOTSTRAP_FRACTION = 0.9
def createBootstrappedDataset(dataset_name, args):
'''
:param dataset_name: the name of the original dataset (the dataset without debiasing)
:param equalized: boolean flag; if true, the data read in will have equalized mentions
:param name_anonymized: boolean flag; if true, the data read in will be name anonymized
:param gender_swapped: boolean flag; if true, the data read in will be gender-swapped
:param swap_names:
:return:
'''
# get the full name of the dataset!
infile_names = dataset_name.split('.')
old_bs = args.bootstrapped
args.bootstrapped = False
infile_names[0] += getNameSuffix(args)
args.bootstrapped = old_bs
infile_name = infile_names[0] + "." + infile_names[1]
# read the data
data = readFromJsonFile(infile_name)
print('BOOSTRAPPED? {}'.format(args.bootstrapped))
if args.bootstrapped:
infile_names[0] += "_bootstrapped"
#data = random.sample(data, bootstrap_percentage * len(data))
#data['train'] = resample(data['train'], replace=True, n_samples=None)
data['train'] = random.sample(data['train'], int(BOOTSTRAP_FRACTION * len(data['train'])))
# write the bootstrapped dataset to a file
outfile_name = infile_names[0] + '.' + infile_names[1]
print('creating {}'.format(outfile_name))
writeToJsonFile(data, outfile_name)
writeToJsonFile(data, os.path.join(WORD_EMBEDDING_DIRECTORY, outfile_name)) # also write it to the word embeddings directory
return data
if __name__ == '__main__':
os.chdir('./WikigenderJsonParsing/') #this is for running a script in the directory above this
args = getCommandLineArgs()
createBootstrappedDataset('JsonData/Wikigender.json', args)
os.chdir('../') # return to original directory
| [
"ajg@umail.ucsb.edu"
] | ajg@umail.ucsb.edu |
34e0d339fa61eb2fba8a107ea109b6b0c56efc1e | 743d4545702532c967efee2c12015d91853b6b80 | /orders/migrations/0001_initial.py | 50adf5b21efe66d7cf544e46d52e15ce62c1faa2 | [] | no_license | SOAD-Group-36/server | 81a7ced2149174fe4d9c1644ee2afd78054d7d29 | 5a5a1e2cd4a361cff8fff008600d65d6dc8edaab | refs/heads/main | 2023-02-03T06:44:36.041311 | 2020-12-12T10:45:21 | 2020-12-12T10:45:21 | 305,055,627 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | # Generated by Django 3.1.2 on 2020-11-11 15:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('price', models.DecimalField(decimal_places=2, max_digits=7)),
('placed_on', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(choices=[('Pl', 'Placed'), ('Pr', 'Processed'), ('Pk', 'Packed'), ('Sh', 'Shipped'), ('Dl', 'Delivered'), ('Rj', 'Rejected'), ('Rt', 'Returned'), ('Rc', 'Received')], default='Pl', max_length=2)),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to='products.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"masterashu@live.in"
] | masterashu@live.in |
403552e209068810af7e723ab196627c656e93e2 | 57d0789235d8ab014b584a285697b8db2763f1df | /day42.py | 50040391605109c6447c00500b14c0e0accf6259 | [] | no_license | Werefriend/CS112-Spring2012 | 8856ccde68c594f87932b96cc8bc41288095bfb5 | c79f1894876f97669f7628b446c6068b4bb5f4d0 | refs/heads/master | 2020-12-25T17:13:20.016669 | 2012-04-02T14:17:35 | 2012-04-02T14:17:35 | 3,266,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | #!/usr/bin/env python
color = (255, 10, 30)
people = {'Jonah' : "stupid", 'Alec' : "smelly",'Jack' : "ugly", 'Paul' : "awesome"}
matrix = ["hello", 2.0, 5, [10, 20]]
eng2sp = {}
eng2sp['one'] = 'uno'
eng2sp['two'] = 'dos'
for k,v in eng2sp.items():
print k,v
print eng2sp['one']
#print matrix
#print matrix[0]
#print matrix[1]
#print matrix[2]
#print matrix[3][0]
#print matrix[3][1]
#where would I use a multidimensional array??
#imagine you have an image of rows and columns...
#for each tuple pixel, there are a red, green, and blue value
#TUPLES
#a tuple is any two or more things groued together
#unlike a list, these are immutable, meaning not changeable
#DICTIONARIES
#like a list, but the list defines the dictionary by a key, not an order
print len(people)
print people.keys()
print people.values()
s = "Monty Python"
print s[6:12]
| [
"reeves.sam@gmail.com"
] | reeves.sam@gmail.com |
26e2a1e09c016f6615d8caf36cb0155cbbab5dca | 262a761393d2f2de88a0ccaed96b2c4f06b56150 | /env/lib/python3.8/site-packages/isort/wrap.py | 872b096e7985b43f1ac53a7f482243b66c01bfdc | [
"MIT"
] | permissive | chaitraliv/mini-twitter | 58cac2bed3b645078422e069e7c305fd0d62e0cf | 99bbfca0b611744c829bca671300aa8dcc356ab1 | refs/heads/master | 2022-12-29T08:28:11.574173 | 2020-10-21T08:50:26 | 2020-10-21T08:50:26 | 292,632,756 | 0 | 0 | MIT | 2020-10-21T08:49:59 | 2020-09-03T17:12:40 | Python | UTF-8 | Python | false | false | 5,353 | py | import copy
import re
from typing import List, Optional, Sequence
from .settings import DEFAULT_CONFIG, Config
from .wrap_modes import WrapModes as Modes
from .wrap_modes import formatter_from_string
def import_statement(
import_start: str,
from_imports: List[str],
comments: Sequence[str] = (),
line_separator: str = "\n",
config: Config = DEFAULT_CONFIG,
multi_line_output: Optional[Modes] = None,
) -> str:
"""Returns a multi-line wrapped form of the provided from import statement."""
formatter = formatter_from_string((multi_line_output or config.multi_line_output).name)
dynamic_indent = " " * (len(import_start) + 1)
indent = config.indent
line_length = config.wrap_length or config.line_length
statement = formatter(
statement=import_start,
imports=copy.copy(from_imports),
white_space=dynamic_indent,
indent=indent,
line_length=line_length,
comments=comments,
line_separator=line_separator,
comment_prefix=config.comment_prefix,
include_trailing_comma=config.include_trailing_comma,
remove_comments=config.ignore_comments,
)
if config.balanced_wrapping:
lines = statement.split(line_separator)
line_count = len(lines)
if len(lines) > 1:
minimum_length = min(len(line) for line in lines[:-1])
else:
minimum_length = 0
new_import_statement = statement
while len(lines[-1]) < minimum_length and len(lines) == line_count and line_length > 10:
statement = new_import_statement
line_length -= 1
new_import_statement = formatter(
statement=import_start,
imports=copy.copy(from_imports),
white_space=dynamic_indent,
indent=indent,
line_length=line_length,
comments=comments,
line_separator=line_separator,
comment_prefix=config.comment_prefix,
include_trailing_comma=config.include_trailing_comma,
remove_comments=config.ignore_comments,
)
lines = new_import_statement.split(line_separator)
if statement.count(line_separator) == 0:
return _wrap_line(statement, line_separator, config)
return statement
def line(content: str, line_separator: str, config: Config = DEFAULT_CONFIG) -> str:
"""Returns a line wrapped to the specified line-length, if possible."""
wrap_mode = config.multi_line_output
if len(content) > config.line_length and wrap_mode != Modes.NOQA: # type: ignore
line_without_comment = content
comment = None
if "#" in content:
line_without_comment, comment = content.split("#", 1)
for splitter in ("import ", ".", "as "):
exp = r"\b" + re.escape(splitter) + r"\b"
if re.search(exp, line_without_comment) and not line_without_comment.strip().startswith(
splitter
):
line_parts = re.split(exp, line_without_comment)
if comment:
_comma_maybe = (
"," if (config.include_trailing_comma and config.use_parentheses) else ""
)
line_parts[-1] = f"{line_parts[-1].strip()}{_comma_maybe} #{comment}"
next_line = []
while (len(content) + 2) > (
config.wrap_length or config.line_length
) and line_parts:
next_line.append(line_parts.pop())
content = splitter.join(line_parts)
if not content:
content = next_line.pop()
cont_line = _wrap_line(
config.indent + splitter.join(next_line).lstrip(), line_separator, config
)
if config.use_parentheses:
if splitter == "as ":
output = f"{content}{splitter}{cont_line.lstrip()}"
else:
_comma = "," if config.include_trailing_comma and not comment else ""
if wrap_mode in (
Modes.VERTICAL_HANGING_INDENT, # type: ignore
Modes.VERTICAL_GRID_GROUPED, # type: ignore
):
_separator = line_separator
else:
_separator = ""
output = (
f"{content}{splitter}({line_separator}{cont_line}{_comma}{_separator})"
)
lines = output.split(line_separator)
if config.comment_prefix in lines[-1] and lines[-1].endswith(")"):
content, comment = lines[-1].split(config.comment_prefix, 1)
lines[-1] = content + ")" + config.comment_prefix + comment[:-1]
return line_separator.join(lines)
return f"{content}{splitter}\\{line_separator}{cont_line}"
elif len(content) > config.line_length and wrap_mode == Modes.NOQA: # type: ignore
if "# NOQA" not in content:
return f"{content}{config.comment_prefix} NOQA"
return content
_wrap_line = line
| [
"chaitrali.vaidya@instazen.com"
] | chaitrali.vaidya@instazen.com |
0072cc0115f29b67a47d46881396394aa26d284e | 2f1d04677be2bff8983e2521eb0beb94b694a7a5 | /setup.py | 418e061de985d028c2e3e9e462f2f8c90763342e | [] | no_license | adisuissa/rh_img_access_layer | d510c40537385eab4332aa7ef0cf17ea39afd902 | 42a48f8ed10ef7addd7b1ce5e47f8a0022f80642 | refs/heads/master | 2020-07-09T21:44:34.760714 | 2019-12-31T14:46:40 | 2019-12-31T14:46:40 | 204,090,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | import setuptools
setuptools.setup(
description="Image reading layer for the Rhoana pipeline",
# install_requires=[
# "pyaml>=15.8.2"
# ],
name="rh_img_access_layer",
packages=["rh_img_access_layer"],
dependency_links = ['http://github.com/adisuissa/gcsfs/tarball/master#egg=fs_gcsfs-0.4.1'],
url="https://github.com/Rhoana/rh_img_access_layer",
version="0.0.1"
)
| [
"adi.suissa@gmail.com"
] | adi.suissa@gmail.com |
6f916b447bc8946eb14222b33526f345a1cc0c4f | 21324be3146af56c524a332b7633d4bb20dfa594 | /rest/taskrouter/reservations/instance/get/example-1/example-1.py | 7912a4008771c3b02fa7515a95a8623362ce22c9 | [
"MIT"
] | permissive | mrphishxxx/api-snippets | c0a7967c6fced7413a1c4f695041cff2d85bcf6c | 34faf794971fadfab1d2666647d0322522f4a179 | refs/heads/master | 2021-01-22T15:00:40.502532 | 2016-05-13T23:11:33 | 2016-05-13T23:11:33 | 58,898,143 | 1 | 0 | null | 2016-05-16T02:37:58 | 2016-05-16T02:37:58 | null | UTF-8 | Python | false | false | 522 | py | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import TwilioTaskRouterClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "{{ account_sid }}"
auth_token = "{{ auth_token }}"
workspace_sid = "{{ workspace_sid }}"
task_sid = "{{ task_sid }}"
client = TwilioTaskRouterClient(account_sid, auth_token)
reservation = client.reservations(workspace_sid, task_sid).get(reservation_sid)
print reservation.reservation_status
print reservation.worker_name | [
"eliecerhdz@gmail.com"
] | eliecerhdz@gmail.com |
d3f6ac276eb291409be7ee6ff5b98b09efd7223f | ea14dde57798cbf21446e98cb7d5f33587566f2b | /blog/migrations/0003_comment_approved_comment.py | b784d30deeb681420a713f0783f1847bacec8acb | [] | no_license | marceljorde/SE2 | fd3d821e5aa9662d863df2f12e3f6f846c198137 | b1cb85b0be2490c1ed498195aa0e943cc18d1736 | refs/heads/master | 2020-04-11T04:54:48.166034 | 2018-12-31T13:37:24 | 2018-12-31T13:37:24 | 161,531,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Generated by Django 2.0.9 on 2018-12-20 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_comment'),
]
operations = [
migrations.AddField(
model_name='comment',
name='approved_comment',
field=models.BooleanField(default=False),
),
]
| [
"marce@r119170.srs.swh.mhn.de"
] | marce@r119170.srs.swh.mhn.de |
00f52c6cf6c7645f0524b3ed9f86a1bf017a892b | c74db84433f8a5f9199678b52bc9770083c30f53 | /programing/dataStructure/heap/heap.py | ed6efe9f2567dc4135bc8f746c4d311c9f074718 | [] | no_license | wiseun/TIL | fd4708a4ec064d0d1b2f681caafdcea98e7fbf34 | f337a185a6911526263e2446519fa5d78de79dd3 | refs/heads/master | 2021-06-08T09:13:05.868110 | 2021-05-07T06:03:18 | 2021-05-07T06:03:18 | 94,679,460 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,999 | py | #!/usr/bin/python3
import random
import sys
class MinHeap:
def __init__(self):
self.data = []
def getSize(self):
return len(self.data)
def reBuildingWhenPush(self):
idx = self.getSize() - 1
while idx > 0:
parent = int((idx - 1) / 2)
left = parent * 2 + 1
right = parent * 2 + 2
# Have only left child
if self.getSize() <= right:
if self.data[parent] > self.data[left]:
self.data[parent], self.data[left] = self.data[left], self.data[parent]
idx = parent
continue
if self.data[left] < self.data[parent] and self.data[left] < self.data[right]:
self.data[parent], self.data[left] = self.data[left], self.data[parent]
elif self.data[right] < self.data[parent] and self.data[right] < self.data[left]:
self.data[parent], self.data[right] = self.data[right], self.data[parent]
idx = parent
def reBuildingWhenPop(self):
idx = 0
while idx < self.getSize() - 1:
left = idx * 2 + 1
right = idx * 2 + 2
if self.getSize() <= left:
break
# Have only left child
if self.getSize() <= right:
if self.data[idx] > self.data[left]:
self.data[idx], self.data[left] = self.data[left], self.data[idx]
idx = left
continue
if self.data[left] < self.data[idx] and self.data[left] < self.data[right]:
self.data[idx], self.data[left] = self.data[left], self.data[idx]
idx = left
elif self.data[right] < self.data[idx] and self.data[right] < self.data[left]:
self.data[idx], self.data[right] = self.data[right], self.data[idx]
idx = right
else:
break
def push(self, value):
self.data.append(value)
self.reBuildingWhenPush()
def pop(self):
if self.getSize() == 0:
return 0
if self.getSize() != 1:
self.data[0], self.data[-1] = self.data[-1], self.data[0]
value = self.data.pop()
self.reBuildingWhenPop()
return value
if __name__ == "__main__":
minHeap = MinHeap()
for j in range(1, 1000):
# make test data
testSize = j
testSet = [i for i in range(1, 1 + testSize)]
random.shuffle(testSet)
for i in testSet:
minHeap.push(i)
if minHeap.getSize() != testSize:
print(str(j) + ": Test is fail: MinHeap.getSize()")
sys.exit(-1)
for i in range(1, 1 + testSize):
value = minHeap.pop()
#print(str(i) + ", " + str(value))
if i != value:
print(str(j) + ": Test is fail: MinHeap")
sys.exit(-1)
print(str(j) + ": Test is pass")
| [
"dongheon.kim@lge.com"
] | dongheon.kim@lge.com |
a3fecf2b2639a499281789ccf1c9a980633503b5 | 8d91f8867fb5b72ca257d9e7152188914154ccd1 | /pune/service/deploy.py | 34f54c7777056940aca2674eb70c75a4be27b75b | [] | no_license | liwushuo/pune | c6420e9a3f65711cc7a6c578720122e5b7f53eb9 | 23eae59fc3d3515903700740fade1bce8b8d6e12 | refs/heads/master | 2021-01-10T08:10:41.056344 | 2016-04-18T08:45:01 | 2016-04-18T08:45:01 | 53,919,940 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,488 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from flask import current_app
from pune.core import celery
from pune.core import db
from pune.models import Deploy
class DeployService(object):
@staticmethod
def get(deploy_id):
deploy = Deploy.query.get(deploy_id)
return deploy and deploy.to_dict()
@staticmethod
def add(name, project_id, environment_id, release_id, operator_id, task_id):
deploy = Deploy(name=name, project_id=project_id, environment_id=environment_id,
release_id=release_id, operator_id=operator_id, task_id=task_id)
db.session.add(deploy)
db.session.commit()
return deploy.to_dict()
@staticmethod
def list_by_environment(environment_id, offset, limit):
deploys = (Deploy.query.filter_by(environment_id=environment_id)
.order_by(Deploy.created_at.desc())
.offset(offset)
.limit(limit)
.all())
return [deploy.to_dict() for deploy in deploys]
# TODO: not safe at all...
@staticmethod
def count_running_by_environment(environment_id):
count = Deploy.query.filter_by(environment_id=environment_id, status=Deploy.Status.RUNNING).count()
return count
@staticmethod
def count_by_environment(environment_id):
count = Deploy.query.filter_by(environment_id=environment_id).count()
return count
@staticmethod
def mark_succeeded(deploy_id):
Deploy.query.filter_by(id=deploy_id, status=Deploy.Status.RUNNING).update({'status':Deploy.Status.SUCCEEDED, 'finished_at': datetime.utcnow()})
db.session.commit()
@staticmethod
def mark_failed(deploy_id):
Deploy.query.filter_by(id=deploy_id, status=Deploy.Status.RUNNING).update({'status':Deploy.Status.FAILED, 'finished_at': datetime.utcnow()})
db.session.commit()
@staticmethod
def mark_cancelled(deploy_id):
Deploy.query.filter_by(id=deploy_id, status=Deploy.Status.RUNNING).update({'status':Deploy.Status.CANCELLED, 'finished_at': datetime.utcnow()})
db.session.commit()
@staticmethod
def cancel_task(deploy_id):
deploy = Deploy.query.get(deploy_id)
print deploy.task_id
celery.control.revoke(deploy.task_id, terminate=False)
DeployService.mark_cancelled(deploy_id)
@staticmethod
def update():
pass
| [
"maplevalley8@gmail.com"
] | maplevalley8@gmail.com |
c5ebdf4e4a222fa96d4d8a27ede2f428ab34f5f6 | 59f0ae12b81de3c9d5a29ce82425b9498fee2c1b | /tests/test_application.py | a89c2dcd0619356ab4b4fe39088ff5eea083d3e6 | [] | no_license | Cheongmin/VoiceReader-Rest | 9f99f14a60b97ccd8d97b74c6196a644a983684c | 599ffb8a552bab9433389eec671ea97cf4be67d1 | refs/heads/master | 2022-12-11T10:38:31.843698 | 2019-02-08T14:04:12 | 2019-02-08T14:04:12 | 155,198,951 | 0 | 0 | null | 2022-12-08T02:13:26 | 2018-10-29T11:16:16 | Python | UTF-8 | Python | false | false | 390 | py | from voicereader import application
def test_create(monkeypatch):
monkeypatch.setattr('voicereader.api_v1.middlewares.init_app', lambda app: None)
monkeypatch.setattr('voicereader.api_v1.middlewares.jwt.init_api', lambda api: None)
app = application.create()
res = app.test_client().get('api/ping')
assert res.status_code == 200
assert res.get_data() == b'pong'
| [
"gyuhwan.a.kim@gmail.com"
] | gyuhwan.a.kim@gmail.com |
8b57c9efa4983527dbd55908cbb5b5acbd4edbeb | 20e3ee6642d20578e48756963798acfe307ac6b5 | /Miscellaneous/Python XML Parser/Example.py | ef7e6dc6952d02a5cb41a0c433b4bb1594c14bce | [] | no_license | sirinenisaikiran/Python | 538f64276767435de3233b720f547aac0bf4d511 | bdfef0d1c04c7f3b9fc91a164b5fd1789828176c | refs/heads/master | 2023-01-31T00:53:01.650916 | 2021-06-06T10:39:20 | 2021-06-06T10:39:20 | 237,744,104 | 0 | 0 | null | 2023-01-26T03:38:47 | 2020-02-02T08:58:49 | Python | UTF-8 | Python | false | false | 455 | py | import xml.etree.ElementTree as ET
mytree = ET.parse('Sample.xml')
myroot = mytree.getroot()
# print(myroot)
# print(myroot.tag)
# print(myroot[0].tag)
# print(myroot[0].attrib)
#
# for x in myroot[0]:
# print(x.tag, x.attrib)
# for x in myroot[0]:
# print(x.text)
# for x in myroot[0]:
# print(x.tag, x.attrib, x.text)
for x in myroot.findall('food'):
item = x.find('item').text
price = x.find('price').text
print(item,price) | [
"saikiran.sirneni@gmail.com"
] | saikiran.sirneni@gmail.com |
a199a85117918b1c8fe6769bfdcbff3be408262e | 5186cc912502f9f32948c3810b5adc2cd0f015d8 | /soybean/reactor.py | b9e91523fe64d36b907749d9656b9625adbdbb63 | [
"Apache-2.0"
] | permissive | lcgong/soybean | c0ef4f1a88191a653bfd1f70881a2f1e470943fd | 43fd891113b05c79419d7c0850145c8284e51206 | refs/heads/main | 2023-02-27T08:47:47.198713 | 2021-02-03T04:00:52 | 2021-02-03T04:00:52 | 334,369,214 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,575 | py | import inspect
import asyncio
import logging
from rocketmq.client import PushConsumer, ConsumeStatus
from .utils import make_group_id, json_loads
from .event import OccupiedEvent
from .typing import HandlerType
from .exceptions import UnkownArgumentError
logger = logging.getLogger("soybean.reactor")
class Reactor:
def __init__(self, channel, topic: str, expression: str,
handler: HandlerType, depth: int):
self._channel = channel
self._topic = topic
self._expression = expression
self._handler = handler
self._reactor_id = make_group_id(channel.name, handler, depth)
self._consumer = None
argvals_getter = build_argvals_getter(handler)
self._handler_argvals_getter = argvals_getter
self._busy_event = None
@property
def reactor_id(self):
return self._reactor_id
async def start(self):
import threading
print(
f"reacter-start thread: {threading.get_ident()}, loop: {id(asyncio.get_event_loop())}")
consumer = PushConsumer(group_id=self._reactor_id)
consumer.set_thread_count(1)
consumer.set_name_server_address(self._channel.namesrv_addr)
self._busy_event = OccupiedEvent()
loop = asyncio.get_running_loop()
def run_coroutine(coroutine):
# 在其它线程以线程安全的方式执行协程,并阻塞等待执行结果
future = asyncio.run_coroutine_threadsafe(coroutine, loop)
return future.result
def _callback(msg):
run_coroutine(self._busy_event.acquire())
try:
arg_values = self._handler_argvals_getter(msg)
run_coroutine( self._handler(*arg_values))
return ConsumeStatus.CONSUME_SUCCESS
except Exception as exc:
logger.error((f"caught an error in reactor "
f"'{self._reactor_id}': {exc}"),
exc_info=exc)
return ConsumeStatus.RECONSUME_LATER
finally:
run_coroutine(self._busy_event.release())
consumer.subscribe(self._topic, _callback, expression=self._expression)
consumer.start()
self._consumer = consumer
async def stop(self):
await self._busy_event.wait_idle()
# 问题:当前rocket-client-cpp实现在shutdown之前并不能保证工作线程正常结束
# 这会导致工作线程和asyncio死锁,所以得到callback线程里任务结束后,再多等待
# 一会儿,等待rocket-client-cpp处理完consumer工作线程,再关闭consumer
await asyncio.sleep(0.5)
if self._consumer:
self._consumer.shutdown()
self._consumer = None
def build_argvals_getter(handler):
arguments = inspect.signature(handler).parameters
getters = []
unknowns = []
for arg_name, arg_spec in arguments.items():
getter_factory = _getter_factories.get(arg_name)
if getter_factory is not None:
getters.append(getter_factory(arg_spec))
continue
unknowns.append((arg_name, arg_spec))
if unknowns:
mod = handler.__module__
func = handler.__qualname__
args = ", ".join([f"'{name}'" for name, spec in unknowns])
errmsg = f"Unknown arguments: {args} of '{func}' in '{mod}'"
raise UnkownArgumentError(errmsg)
def _getter(msgobj):
return (arg_getter(msgobj) for arg_getter in getters)
return _getter
def getter_message(arg_spec):
if arg_spec.annotation == str:
return lambda msgobj: msgobj.body.decode("utf-8")
elif arg_spec.annotation == bytes:
return lambda msgobj: msgobj.body
else:
return lambda msgobj: json_loads(msgobj.body.decode("utf-8"))
def getter_msg_id(arg_spec):
return lambda msgobj: getattr(msgobj, "id")
def getter_msg_topic(arg_spec):
return lambda msgobj: getattr(msgobj, "tpoic").decode("utf-8")
def getter_msg_keys(arg_spec):
return lambda msgobj: getattr(msgobj, "keys").decode("utf-8")
def getter_msg_tags(arg_spec):
return lambda msgobj: getattr(msgobj, "tags").decode("utf-8")
_getter_factories = {
"message": getter_message,
"message_id": getter_msg_id,
"message_topic": getter_msg_topic,
"message_keys": getter_msg_keys,
"message_tags": getter_msg_tags,
"msg_id": getter_msg_id,
"msg_topic": getter_msg_topic,
"msg_keys": getter_msg_keys,
"msg_tags": getter_msg_tags,
}
| [
"lcgong@gmail.com"
] | lcgong@gmail.com |
5a0dfba91d758caa2da4d972f8b603773eb86654 | 22fcb33a8d110630a4e090a9a3202618f52376d6 | /videos/migrations/0001_initial.py | 2f19862eba1dfa585ace0055705938d6b52090dd | [] | no_license | karandeepSJ/CVIT-UserStudyPortal | b5f08ef2833b23d26da5ab1ecfe2494ab26e4021 | a7ff3b81fea4a8333d83c1c89ebc56747ca541c8 | refs/heads/master | 2020-05-19T22:10:36.058553 | 2019-08-30T11:57:08 | 2019-08-30T11:57:08 | 185,241,744 | 0 | 1 | null | 2019-08-30T11:57:09 | 2019-05-06T17:28:15 | JavaScript | UTF-8 | Python | false | false | 545 | py | # Generated by Django 2.1.4 on 2019-04-26 18:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BVH',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(max_length=100)),
('path', models.CharField(max_length=200)),
],
),
]
| [
"karan.jkps@gmail.com"
] | karan.jkps@gmail.com |
723900ac72e65ec5aa7c94b94924dc0e69cf8764 | c5effe7f4efe739df5f4567f64cfa7b76f843aee | /OCR++/myproject/myapp/urls.py | 38947087c61fc1a9fd0a2f57125ca42ef8a884f5 | [] | no_license | Kabongosalomon/ocrplusplus | 16180f8239fb2113dff4568c0c3b98930e050071 | 7dc3f225306a545b3768311eafea2fa56959d950 | refs/heads/master | 2021-09-07T15:34:33.738598 | 2018-02-25T04:47:37 | 2018-02-25T04:47:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('myproject.myapp.views',
url(r'^$', 'home', name = 'home')
url(r'^list/$', 'list', name='list'),
url(r'^list/runScript/$', 'runScript', name='runScript'),
# url(r'^list/vote/$', 'vote', name='vote'),
# url(r'^list/upload/$', 'upload', name='upload'),
)
| [
"ocrplusplus123@gmail.com"
] | ocrplusplus123@gmail.com |
6c3f8ad91c11294558986e5612928dcb59119e90 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/281/81893/submittedfiles/testes.py | 9d5ad8d30fc63ed816896c55f3d77b98a8e9722a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
x=int(input('Digite um número:'))
while x>0 and x<=13:
print('Ok')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
067a7abea5aa8ea89d7339cdb1ac2cad200418bb | 5fbf2adec8d7647b9aeefa51695aa3f13ee57810 | /server/load_backup_locally.py | 076c18cbae05647fcf9c789b079ff13e403dc7b7 | [] | no_license | angelacantfly/dancedeets-monorepo | 8bb6579f6f5d30e88c8d4c0e239c6c8fed678094 | 6b7a48d91d0737010acd9e08a89d99c2c982205a | refs/heads/master | 2021-01-20T09:14:22.613044 | 2017-08-26T21:48:14 | 2017-08-26T21:48:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,381 | py | #!/usr/bin/python
"""
# App Engine import data from Datastore Backup to localhost
You can use this script to import large(ish) App Engine Datastore backups to your localohst dev server.
## Getting backup files
Follow instructions from Greg Bayer's awesome article to fetch the App Engine backups:
http://gbayer.com/big-data/app-engine-datastore-how-to-efficiently-export-your-data/
Basically, download and configure gsutil and run:
```
gsutil -m cp -R gs://your_bucket_name/your_path /local_target
```
## Reading data to your local (dev_appserver) application
Copy-paste this gist to your Interactive Console, set correct paths and press `Execute`.
(default: http://localhost:8000/console)
"""
import sys
sys.path.insert(0, '/usr/local/google_appengine')
print sys.path
from google.appengine.api.files import records
from google.appengine.datastore import entity_pb
from google.net.proto.ProtocolBuffer import ProtocolBufferDecodeError
from google.appengine.ext import ndb
from os.path import isfile
from os.path import join
from os import listdir
from events.eventdata import DBEvent
def run():
# Set your downloaded folder's path here (must be readable by dev_appserver)
mypath = '/Users/lambert/Dropbox/dancedeets/data/datastore_backup_datastore_backup_2016_11_19_DBEvent/15700286559371541387849311E815D'
# Se the class of the objects here
cls = DBEvent
# Set your app's name here
appname = "dev~None"
# Do the harlem shake
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
for file in onlyfiles:
i = 0
try:
raw = open(mypath + "/" + file, 'r')
reader = records.RecordsReader(raw)
to_put = list()
for record in reader:
entity_proto = entity_pb.EntityProto(contents=record)
entity_proto.key_.app_ = appname
obj = cls._from_pb(entity_proto)
to_put.append(obj)
i += 1
if i % 100 == 0:
print "Saved %d %ss" % (i, '') #entity.kind())
ndb.put_multi(to_put) # use_memcache=False)
to_put = list()
ndb.put_multi(to_put) # use_memcache=False)
to_put = list()
print "Saved %d" % i
except ProtocolBufferDecodeError:
""" All good """
run()
| [
"mlambert@gmail.com"
] | mlambert@gmail.com |
3a6f927241b180e157f7756d4833dee91440dfa9 | 7c8bd2e26fdabf1555e0150272ecf035f6c21bbd | /삼성기출/새로운 게임2.py | 3f7cacad987e8780f64a22bcecc01d30ec281fc1 | [] | no_license | hyeokjinson/algorithm | 44090c2895763a0c53d48ff4084a96bdfc77f953 | 46c04e0f583d4c6ec4f51a24f19a373b173b3d5c | refs/heads/master | 2021-07-21T10:18:43.918149 | 2021-03-27T12:27:56 | 2021-03-27T12:27:56 | 245,392,582 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,461 | py | from _collections import deque
#체스판 말 갯수:k(1번~k번)
#이동방향:위,아래,왼쪽,오른쪽
#흰색인 경우 그 칸으로 이동,이동하는 칸에 말이 있으면 그곳에 스택 쌓기
#빨간색인 경우 이동하고 순서 reverse
#파란색인 경우 말의 이동방향을 역방향 한칸 이동 ,이동칸이 파란색인 경우 이동x
dx=[0,0,-1,1]
dy=[1,-1,0,0]
rev_direction={0:1,1:0,2:3,3:2}
def check():
for i in range(n):
for j in range(n):
if len(start[i][j])>=4:
return True
return False
def solve():
turn=0
p=0
while True:
turn+=1
if turn>1000:
return -1
for number in range(1,k+1):
x,y,d=horse[number]
nx,ny=x+dx[d],y+dy[d]
if nx<0 or nx>=n or ny<0 or ny>=n or arr[nx][ny]==2:
nd=rev_direction[d]
nx,ny=x+dx[nd],y+dy[nd]
if nx<0 or nx>=n or ny<0 or ny>=n or arr[nx][ny]==2:
horse[number][2]=nd
continue
p=1
if arr[nx][ny]==0:
left=start[x][y][:start[x][y].index(number)]
right=start[x][y][start[x][y].index(number):]
start[x][y]=left
start[nx][ny].extend(right)
if len(start[nx][ny])>=4:
return turn
for i in right:
horse[i][0],horse[i][1]=nx,ny
if p==1:
horse[number][2]=nd
p=0
elif arr[nx][ny]==1:
left = start[x][y][:start[x][y].index(number)]
right = start[x][y][start[x][y].index(number):]
start[x][y] = left
right.reverse()
start[nx][ny].extend(right)
if len(start[nx][ny]) >= 4:
return turn
for i in right:
horse[i][0], horse[i][1] = nx, ny
if p == 1:
horse[number][2] = nd
p = 0
if __name__ == '__main__':
n,k=map(int,input().split())
#0:흰색,1:빨간색,2:파란색
arr=[list(map(int,input().split()))for _ in range(n)]
start=[[[]*n for _ in range(n)] for _ in range(n)]
horse=dict()
for i in range(1,k+1):
x,y,v=map(int,input().split())
start[x-1][y-1].append(i)
horse[i]=[x-1,y-1,v-1]
print(solve())
| [
"hjson817@gmail.com"
] | hjson817@gmail.com |
85c8b2f42aed216a99f935dec957f601a6e4c545 | b2521e5fa0b0e59bddbdafd5b3b96d8ad3198379 | /GameOfThrones_partI.py | edb89eb1ecc78e0b6bda083a19fce97f5b5ee8ef | [] | no_license | llpyyz/HackerRank_Warmup | aa0db25cdce4fe9b4899033dc9fda295e7bddbb2 | b9628306e684aaed1673305a5256433e317c5cc0 | refs/heads/master | 2021-01-19T13:53:07.155099 | 2015-01-16T23:37:28 | 2015-01-16T23:37:28 | 29,372,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | """
David Schonberger
Hackerrank.com
Warmpup - Game of Thrones - I
1/4/2015
"""
#count occurence of c in s
def chr_count(c,s):
return len([ch for ch in s if ch == c])
string = raw_input()
found = False
l = [ch for ch in string]
l.sort()
chr_set = set(l)
if(len(l) % 2 == 0):
if(sum([chr_count(c,l) % 2 == 1 for c in chr_set]) == 0):
found = True
else:
if(sum([chr_count(c,l) % 2 == 1 for c in chr_set]) == 1):
found = True
if not found:
print("NO")
else:
print("YES")
| [
"llp_yyz@hotmail.com"
] | llp_yyz@hotmail.com |
845db2f47f763ae4e09097e253320bf541736141 | 53eee7eb899cb518983008532257037fb89def13 | /343.integer-break.py | e226facec72a5754c30be689c04e5eec6a509a9c | [] | no_license | chenxu0602/LeetCode | 0deb3041a66cb15e12ed4585bbe0fefce5dc6b26 | 3dc5af2bc870fcc8f2142130fcd2b7cab8733151 | refs/heads/master | 2023-07-05T19:26:21.608123 | 2023-07-02T08:35:35 | 2023-07-02T08:35:35 | 233,351,978 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | #
# @lc app=leetcode id=343 lang=python3
#
# [343] Integer Break
#
# https://leetcode.com/problems/integer-break/description/
#
# algorithms
# Medium (50.19%)
# Likes: 1086
# Dislikes: 227
# Total Accepted: 110.4K
# Total Submissions: 219.2K
# Testcase Example: '2'
#
# Given a positive integer n, break it into the sum of at least two positive
# integers and maximize the product of those integers. Return the maximum
# product you can get.
#
# Example 1:
#
#
#
# Input: 2
# Output: 1
# Explanation: 2 = 1 + 1, 1 × 1 = 1.
#
#
# Example 2:
#
#
# Input: 10
# Output: 36
# Explanation: 10 = 3 + 3 + 4, 3 × 3 × 4 = 36.
#
# Note: You may assume that n is not less than 2 and not larger than 58.
#
#
#
# @lc code=start
import math
class Solution:
def integerBreak(self, n: int) -> int:
# if n == 2:
# return 1
# if n == 3:
# return 2
# dp = [0] * (n + 1)
# dp[2] = 2
# dp[3] = 3
# for i in range(4, n + 1):
# dp[i] = max(dp[i-2] * 2, dp[i-3] * 3)
# return dp[n]
# O(logN)
if n == 2:
return 1
elif n == 3:
return 2
elif n % 3 == 0:
return int(math.pow(3, n // 3))
elif n % 3 == 1:
return 2 * 2 * int(math.pow(3, (n - 4) // 3))
else:
return 2 * int(math.pow(3, n // 3))
# @lc code=end
| [
"chenxu@Chens-iMac.local"
] | chenxu@Chens-iMac.local |
05ffc138a8dfcb6c084d4ff20b53ae4b7261b8b4 | 26a97032622f10c47e1961ded98023f2daf539d2 | /src/customers/forms.py | b550242183c540ec43a371955878f58d0ce823dc | [] | no_license | mycota/laundry_MS | 7ada777bc4a6cd746152b44b7257064db8465beb | ab41a70202717957b694152590b72a52d0fb1bff | refs/heads/master | 2023-06-02T15:10:43.466619 | 2021-06-22T02:25:49 | 2021-06-22T02:25:49 | 379,100,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | from django import forms
from django.contrib.auth.models import User
from .models import Customers
class AddCustomerForm(forms.ModelForm):
gend = (('Male', 'Male'), ('Famale', 'Famale'),)
cust_name = forms.CharField(max_length=70)
cust_phone = forms.CharField(max_length=10)
cust_email = forms.CharField(max_length=100)
cust_address = forms.CharField(widget=forms.Textarea,max_length=225)
cust_gender = forms.ChoiceField(choices=gend)
# balance = forms.FloatField()
class Meta:
model = Customers
fields = ['cust_name', 'cust_phone', 'cust_email', 'cust_address', 'cust_gender']
class UpdateCustomerForm(forms.ModelForm):
gend = (('Male', 'Male'), ('Famale', 'Famale'),)
cust_name = forms.CharField(max_length=70)
cust_phone = forms.CharField(max_length=10)
cust_email = forms.CharField(max_length=100)
cust_address = forms.CharField(widget=forms.Textarea,max_length=225)
cust_gender = forms.ChoiceField(choices=gend)
# balance = forms.FloatField()
class Meta:
model = Customers
fields = ['cust_name', 'cust_phone', 'cust_email', 'cust_address', 'cust_gender'] | [
"universaltechsolutionsuts@gmail.com"
] | universaltechsolutionsuts@gmail.com |
d396b8340a6bf61e29cf5d053679b246a4c33040 | 689fe220a0f5b3adc40b19f7b63b571a6bf412bb | /present_absent_loci.py | 7f98cf35431124e36b99276326a6a2bb170683a2 | [] | no_license | NinhVu/filteringStacks | b97bb05fbf04f1490a5a6277e063063c2451732f | 19fb7f45ea78993e04afb1a55de743b5faa203cb | refs/heads/master | 2016-08-12T16:00:31.896619 | 2016-03-13T04:22:36 | 2016-03-13T04:22:36 | 53,767,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,788 | py | #!/usr/bin/python3.4
# present_absent.py 3/12/16 by Ninh Vu
# This program will filter loci/stacks found in only individuals ask by user
import glob, sys, os
os.getcwd()
input_list = input("\nEnter individuals in catalog (uStacks id) you want to filter e.g. 106,121,112,120 : ")
# convert input list into list of integers and sort
user_list = input_list.split(",")
user_list = list(map(int, user_list))
user_list.sort()
print("\nOnly stacks with these individuals will be filtered:",user_list,"\n")
print("Takes a few seconds or minutes to filter depending on number of stacks/loci in catalog...")
# filter tags.tsv__________________________________________________________________________________________________________________________________________________
for file in glob.glob("*.tags.tsv"): # open ***.catalog.tags.tsv file in current directory
tags = open(file, 'r')
header = tags.readline()
data = tags.readline()
tags_tsv_loci=[]
while data:
# split row into list and define variables for loop below
catCount = 0
rowItems = data.split("\t")
# v2: split into oneList then create two lists: sampleID and catalogID. Convert both lists into integers, remove duplicate items and finally sort sampleID
for y in rowItems: # loop takes strings and convert into list of sample_catalogs
if catCount == 8:
samples_catalog = rowItems[8]
oneList = samples_catalog.split(",") # e.g. ['27_22319', '28_874']
catCount +=1
sampleID = [i.split('_')[0] for i in oneList] # split oneList and make sample list. [0] represents the first item of split item.
catalogID = [i.split('_')[1] for i in oneList] # split oneList and make catalog list. [1] represents the second item of the split item. Not necessary here.
sampleID, catalogID = list(map(int, sampleID)), list(map(int, catalogID))
sampleID = list(set(sampleID)) # REMOVE DUPLICATE B/C YOU WANT ALL STACKS EVEN ONES WITH MULTITPLE COPIES
sampleID.sort() # sort sampleID
if sampleID == user_list:
tags_tsv_loci.append(rowItems[2])
# read next line
data = tags.readline()
tags_tsv_loci = list(map(int, tags_tsv_loci)) # convert string list to int list
tags_tsv_loci = list(set(tags_tsv_loci)) # remove duplicate items
tags_tsv_loci.sort() # sort loci
tags.close()
# create whitelist.txt_____________________________________________________________________________________________________________________________________________
whitelist = open('present_absent_whitelist.txt', 'w')
whitelist.write('\n'.join('%s' % x for x in tags_tsv_loci)) # write whitelist with only locus
whitelist.write('\n')
print("\n\nYour present/absent stacks of whitelist file present_absent_whitelist.txt is ready.\n\n\n")
whitelist.close()
| [
"ninh.vu@idfg.idaho.gov"
] | ninh.vu@idfg.idaho.gov |
1fad6fbeeeb619735e591e2a715bef13c07b1e3b | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/googlecloudsdk/generated_clients/apis/gkehub/v1alpha1/gkehub_v1alpha1_client.py | 45d59b19c56748c72896a2a2c8b5b7fce532c530 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 20,475 | py | """Generated client library for gkehub version v1alpha1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.generated_clients.apis.gkehub.v1alpha1 import gkehub_v1alpha1_messages as messages
class GkehubV1alpha1(base_api.BaseApiClient):
"""Generated client library for service gkehub version v1alpha1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://gkehub.googleapis.com/'
MTLS_BASE_URL = 'https://gkehub.mtls.googleapis.com/'
_PACKAGE = 'gkehub'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v1alpha1'
_CLIENT_ID = 'CLIENT_ID'
_CLIENT_SECRET = 'CLIENT_SECRET'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'GkehubV1alpha1'
_URL_VERSION = 'v1alpha1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new gkehub handle."""
url = url or self.BASE_URL
super(GkehubV1alpha1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_locations_features = self.ProjectsLocationsFeaturesService(self)
self.projects_locations_global_features = self.ProjectsLocationsGlobalFeaturesService(self)
self.projects_locations_global = self.ProjectsLocationsGlobalService(self)
self.projects_locations_operations = self.ProjectsLocationsOperationsService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsFeaturesService(base_api.BaseApiService):
"""Service class for the projects_locations_features resource."""
_NAME = 'projects_locations_features'
def __init__(self, client):
super(GkehubV1alpha1.ProjectsLocationsFeaturesService, self).__init__(client)
self._upload_configs = {
}
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (GkehubProjectsLocationsFeaturesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/features/{featuresId}:getIamPolicy',
http_method='GET',
method_id='gkehub.projects.locations.features.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=['options_requestedPolicyVersion'],
relative_path='v1alpha1/{+resource}:getIamPolicy',
request_field='',
request_type_name='GkehubProjectsLocationsFeaturesGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
Args:
request: (GkehubProjectsLocationsFeaturesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/features/{featuresId}:setIamPolicy',
http_method='POST',
method_id='gkehub.projects.locations.features.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1alpha1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='GkehubProjectsLocationsFeaturesSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (GkehubProjectsLocationsFeaturesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/features/{featuresId}:testIamPermissions',
http_method='POST',
method_id='gkehub.projects.locations.features.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1alpha1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='GkehubProjectsLocationsFeaturesTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsLocationsGlobalFeaturesService(base_api.BaseApiService):
"""Service class for the projects_locations_global_features resource."""
_NAME = 'projects_locations_global_features'
def __init__(self, client):
super(GkehubV1alpha1.ProjectsLocationsGlobalFeaturesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Adds a new Feature.
Args:
request: (GkehubProjectsLocationsGlobalFeaturesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/global/features',
http_method='POST',
method_id='gkehub.projects.locations.global.features.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['featureId'],
relative_path='v1alpha1/{+parent}/features',
request_field='feature',
request_type_name='GkehubProjectsLocationsGlobalFeaturesCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Removes a Feature.
Args:
request: (GkehubProjectsLocationsGlobalFeaturesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/global/features/{featuresId}',
http_method='DELETE',
method_id='gkehub.projects.locations.global.features.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['force'],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='GkehubProjectsLocationsGlobalFeaturesDeleteRequest',
response_type_name='Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets details of a single Feature.
Args:
request: (GkehubProjectsLocationsGlobalFeaturesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Feature) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/global/features/{featuresId}',
http_method='GET',
method_id='gkehub.projects.locations.global.features.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='GkehubProjectsLocationsGlobalFeaturesGetRequest',
response_type_name='Feature',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists Features in a given project and location.
Args:
request: (GkehubProjectsLocationsGlobalFeaturesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListFeaturesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/global/features',
http_method='GET',
method_id='gkehub.projects.locations.global.features.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'orderBy', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+parent}/features',
request_field='',
request_type_name='GkehubProjectsLocationsGlobalFeaturesListRequest',
response_type_name='ListFeaturesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates an existing Feature.
Args:
request: (GkehubProjectsLocationsGlobalFeaturesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/global/features/{featuresId}',
http_method='PATCH',
method_id='gkehub.projects.locations.global.features.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v1alpha1/{+name}',
request_field='feature',
request_type_name='GkehubProjectsLocationsGlobalFeaturesPatchRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsGlobalService(base_api.BaseApiService):
"""Service class for the projects_locations_global resource."""
_NAME = 'projects_locations_global'
def __init__(self, client):
super(GkehubV1alpha1.ProjectsLocationsGlobalService, self).__init__(client)
self._upload_configs = {
}
class ProjectsLocationsOperationsService(base_api.BaseApiService):
"""Service class for the projects_locations_operations resource."""
_NAME = 'projects_locations_operations'
def __init__(self, client):
super(GkehubV1alpha1.ProjectsLocationsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
Args:
request: (GkehubProjectsLocationsOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel',
http_method='POST',
method_id='gkehub.projects.locations.operations.cancel',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}:cancel',
request_field='cancelOperationRequest',
request_type_name='GkehubProjectsLocationsOperationsCancelRequest',
response_type_name='Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
Args:
request: (GkehubProjectsLocationsOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='DELETE',
method_id='gkehub.projects.locations.operations.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='GkehubProjectsLocationsOperationsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
Args:
request: (GkehubProjectsLocationsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='GET',
method_id='gkehub.projects.locations.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='GkehubProjectsLocationsOperationsGetRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
Args:
request: (GkehubProjectsLocationsOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations',
http_method='GET',
method_id='gkehub.projects.locations.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+name}/operations',
request_field='',
request_type_name='GkehubProjectsLocationsOperationsListRequest',
response_type_name='ListOperationsResponse',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(GkehubV1alpha1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets information about a location.
Args:
request: (GkehubProjectsLocationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Location) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}',
http_method='GET',
method_id='gkehub.projects.locations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='GkehubProjectsLocationsGetRequest',
response_type_name='Location',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists information about the supported locations for this service.
Args:
request: (GkehubProjectsLocationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListLocationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations',
http_method='GET',
method_id='gkehub.projects.locations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'includeUnrevealedLocations', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+name}/locations',
request_field='',
request_type_name='GkehubProjectsLocationsListRequest',
response_type_name='ListLocationsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(GkehubV1alpha1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
09c2e1bc21335613f5e925b52bd82f0b8f9d9309 | 741c5c70bf4a0adb05db6b0777c8d07e28eb9cf6 | /lib/python3.4/site-packages/IPython/core/profileapp.py | 2a412589ca0dcc1cdc77a98a58967352a4566bca | [] | no_license | andybp85/hyLittleSchemer | e686d2dc0f9067562367ea1173f275e8e2d2cb85 | af5cb6adf6a196cc346aa7d14d7f9509e084c414 | refs/heads/master | 2021-01-19T07:48:31.309949 | 2015-01-04T00:57:30 | 2015-01-04T00:57:30 | 28,496,304 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,967 | py | # encoding: utf-8
"""
An application for managing IPython profiles.
To be invoked as the `ipython profile` subcommand.
Authors:
* Min RK
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from IPython.config.application import Application
from IPython.core.application import (
BaseIPythonApplication, base_flags
)
from IPython.core.profiledir import ProfileDir
from IPython.utils.importstring import import_item
from IPython.utils.path import get_ipython_dir, get_ipython_package_dir
from IPython.utils import py3compat
from IPython.utils.traitlets import Unicode, Bool, Dict
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
create_help = """Create an IPython profile by name
Create an ipython profile directory by its name or
profile directory path. Profile directories contain
configuration, log and security related files and are named
using the convention 'profile_<name>'. By default they are
located in your ipython directory. Once created, you will
can edit the configuration files in the profile
directory to configure IPython. Most users will create a
profile directory by name,
`ipython profile create myprofile`, which will put the directory
in `<ipython_dir>/profile_myprofile`.
"""
list_help = """List available IPython profiles
List all available profiles, by profile location, that can
be found in the current working directly or in the ipython
directory. Profile directories are named using the convention
'profile_<profile>'.
"""
profile_help = """Manage IPython profiles
Profile directories contain
configuration, log and security related files and are named
using the convention 'profile_<name>'. By default they are
located in your ipython directory. You can create profiles
with `ipython profile create <name>`, or see the profiles you
already have with `ipython profile list`
To get started configuring IPython, simply do:
$> ipython profile create
and IPython will create the default profile in <ipython_dir>/profile_default,
where you can edit ipython_config.py to start configuring IPython.
"""
_list_examples = "ipython profile list # list all profiles"
_create_examples = """
ipython profile create foo # create profile foo w/ default config files
ipython profile create foo --reset # restage default config files over current
ipython profile create foo --parallel # also stage parallel config files
"""
_main_examples = """
ipython profile create -h # show the help string for the create subcommand
ipython profile list -h # show the help string for the list subcommand
ipython locate profile foo # print the path to the directory for profile 'foo'
"""
#-----------------------------------------------------------------------------
# Profile Application Class (for `ipython profile` subcommand)
#-----------------------------------------------------------------------------
def list_profiles_in(path):
"""list profiles in a given root directory"""
files = os.listdir(path)
profiles = []
for f in files:
try:
full_path = os.path.join(path, f)
except UnicodeError:
continue
if os.path.isdir(full_path) and f.startswith('profile_'):
profiles.append(f.split('_',1)[-1])
return profiles
def list_bundled_profiles():
"""list profiles that are bundled with IPython."""
path = os.path.join(get_ipython_package_dir(), u'config', u'profile')
files = os.listdir(path)
profiles = []
for profile in files:
full_path = os.path.join(path, profile)
if os.path.isdir(full_path) and profile != "__pycache__":
profiles.append(profile)
return profiles
class ProfileLocate(BaseIPythonApplication):
description = """print the path to an IPython profile dir"""
def parse_command_line(self, argv=None):
super(ProfileLocate, self).parse_command_line(argv)
if self.extra_args:
self.profile = self.extra_args[0]
def start(self):
print(self.profile_dir.location)
class ProfileList(Application):
name = u'ipython-profile'
description = list_help
examples = _list_examples
aliases = Dict({
'ipython-dir' : 'ProfileList.ipython_dir',
'log-level' : 'Application.log_level',
})
flags = Dict(dict(
debug = ({'Application' : {'log_level' : 0}},
"Set Application.log_level to 0, maximizing log output."
)
))
ipython_dir = Unicode(get_ipython_dir(), config=True,
help="""
The name of the IPython directory. This directory is used for logging
configuration (through profiles), history storage, etc. The default
is usually $HOME/.ipython. This options can also be specified through
the environment variable IPYTHONDIR.
"""
)
def _print_profiles(self, profiles):
"""print list of profiles, indented."""
for profile in profiles:
print(' %s' % profile)
def list_profile_dirs(self):
profiles = list_bundled_profiles()
if profiles:
print()
print("Available profiles in IPython:")
self._print_profiles(profiles)
print()
print(" The first request for a bundled profile will copy it")
print(" into your IPython directory (%s)," % self.ipython_dir)
print(" where you can customize it.")
profiles = list_profiles_in(self.ipython_dir)
if profiles:
print()
print("Available profiles in %s:" % self.ipython_dir)
self._print_profiles(profiles)
profiles = list_profiles_in(py3compat.getcwd())
if profiles:
print()
print("Available profiles in current directory (%s):" % py3compat.getcwd())
self._print_profiles(profiles)
print()
print("To use any of the above profiles, start IPython with:")
print(" ipython --profile=<name>")
print()
def start(self):
self.list_profile_dirs()
create_flags = {}
create_flags.update(base_flags)
# don't include '--init' flag, which implies running profile create in other apps
create_flags.pop('init')
create_flags['reset'] = ({'ProfileCreate': {'overwrite' : True}},
"reset config files in this profile to the defaults.")
create_flags['parallel'] = ({'ProfileCreate': {'parallel' : True}},
"Include the config files for parallel "
"computing apps (ipengine, ipcontroller, etc.)")
class ProfileCreate(BaseIPythonApplication):
name = u'ipython-profile'
description = create_help
examples = _create_examples
auto_create = Bool(True, config=False)
def _log_format_default(self):
return "[%(name)s] %(message)s"
def _copy_config_files_default(self):
return True
parallel = Bool(False, config=True,
help="whether to include parallel computing config files")
def _parallel_changed(self, name, old, new):
parallel_files = [ 'ipcontroller_config.py',
'ipengine_config.py',
'ipcluster_config.py'
]
if new:
for cf in parallel_files:
self.config_files.append(cf)
else:
for cf in parallel_files:
if cf in self.config_files:
self.config_files.remove(cf)
def parse_command_line(self, argv):
super(ProfileCreate, self).parse_command_line(argv)
# accept positional arg as profile name
if self.extra_args:
self.profile = self.extra_args[0]
flags = Dict(create_flags)
classes = [ProfileDir]
def _import_app(self, app_path):
"""import an app class"""
app = None
name = app_path.rsplit('.', 1)[-1]
try:
app = import_item(app_path)
except ImportError:
self.log.info("Couldn't import %s, config file will be excluded", name)
except Exception:
self.log.warn('Unexpected error importing %s', name, exc_info=True)
return app
def init_config_files(self):
super(ProfileCreate, self).init_config_files()
# use local imports, since these classes may import from here
from IPython.terminal.ipapp import TerminalIPythonApp
apps = [TerminalIPythonApp]
for app_path in (
'IPython.qt.console.qtconsoleapp.IPythonQtConsoleApp',
'IPython.html.notebookapp.NotebookApp',
'IPython.nbconvert.nbconvertapp.NbConvertApp',
):
app = self._import_app(app_path)
if app is not None:
apps.append(app)
if self.parallel:
from IPython.parallel.apps.ipcontrollerapp import IPControllerApp
from IPython.parallel.apps.ipengineapp import IPEngineApp
from IPython.parallel.apps.ipclusterapp import IPClusterStart
from IPython.parallel.apps.iploggerapp import IPLoggerApp
apps.extend([
IPControllerApp,
IPEngineApp,
IPClusterStart,
IPLoggerApp,
])
for App in apps:
app = App()
app.config.update(self.config)
app.log = self.log
app.overwrite = self.overwrite
app.copy_config_files=True
app.ipython_dir=self.ipython_dir
app.profile_dir=self.profile_dir
app.init_config_files()
def stage_default_config_file(self):
pass
class ProfileApp(Application):
name = u'ipython-profile'
description = profile_help
examples = _main_examples
subcommands = Dict(dict(
create = (ProfileCreate, ProfileCreate.description.splitlines()[0]),
list = (ProfileList, ProfileList.description.splitlines()[0]),
locate = (ProfileLocate, ProfileLocate.description.splitlines()[0]),
))
def start(self):
if self.subapp is None:
print("No subcommand specified. Must specify one of: %s"%(self.subcommands.keys()))
print()
self.print_description()
self.print_subcommands()
self.exit(1)
else:
return self.subapp.start()
| [
"andy@youshallthrive.com"
] | andy@youshallthrive.com |
6668ad6d2a23a2b39e19b176c96af3cd8ff06f5b | 703926c99852ac67a4d4fa9009364ad26fe254d5 | /dices.py | a4c959a24f55476956d7d9000d6c3ea81927617c | [
"MIT"
] | permissive | mariamingallonMM/AI-ML-W4-normal-probability-distribution | e6196b3e6b752d8cb850a9b2d31d7ebf69c84752 | 95569929078b22555f870675f27aeca29f8ce487 | refs/heads/main | 2023-05-12T02:09:14.167027 | 2021-06-04T02:03:25 | 2021-06-04T02:03:25 | 336,903,772 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | import numpy as np
def probability_of_sum(total:int, dice1, dice2):
"""
Brief:
Basic probability - Dice cast
Suppose a pair of fair 6-sided dice are thrown.
What is the probability that the sum of the rolls is 6? (Answer as a simple fraction of integers)
reference: https://statweb.stanford.edu/~susan/courses/s60/split/node65.html
"""
n = dice1.shape[0]
m = dice2.shape[0]
comb = n * m
count = 0
for i in dice1:
for j in dice2:
sum = int(i + j)
if sum == total:
count += 1
prob = count / comb
return print("{:.2%}".format(prob))
# define the dice as a linear array of 1 to 6, all integers
dice1 = np.linspace(1,6,6,dtype=int)
# call the function above with the total for which we would like to calculate the probability with 2 dices
prob = probability_of_sum(6, dice1, dice1)
| [
"maria.mingallon@mottmac.com"
] | maria.mingallon@mottmac.com |
2578305f74225a3ce266d7e62e3ee9cd11303766 | 7ff9de453f53b658d13896bb0c376d67489145a7 | /python_basics/datatypes/strings.py | 67ce0ec20bd3b0a951eaa007570a45e52eb2a7a7 | [
"MIT"
] | permissive | danielkpodo/python-zero-to-mastery | aa4851fd0bfe1f0bfa4191fa141fa9551fd7c7a9 | d39468f48211bc82e4e2613745d9107d433e05af | refs/heads/master | 2022-11-21T10:55:54.776490 | 2020-07-19T15:07:35 | 2020-07-19T15:07:35 | 275,909,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | username = "narh"
last_name = "kpodo"
| [
"kpododanielnarh@gmail.com"
] | kpododanielnarh@gmail.com |
ebc0f24740813770b38a7fd3c48bc48a8611dd75 | 55b132bd206ddd4e84fa9de2f6c06ccf50385d2d | /flearn/models/Fmnist/mclr.py | 15d83ee2608d9d7fa88edd105ea44aad625afe53 | [] | no_license | XinJiang1994/HFmaml | 9b58fab7a1a1f3d153103ceb0cd964d5d49a1ed4 | 15e70293c896b78d054dd20901a1941d1a91d40d | refs/heads/master | 2023-02-01T15:37:38.882104 | 2020-12-17T06:51:42 | 2020-12-17T06:51:42 | 288,163,757 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,125 | py | import numpy as np
import tensorflow as tf
from flearn.models.FedmamlBaseModel import BaseModel
from flearn.utils.model_utils import active_func
### This is an implenmentation of Hessian Free maml meta learning algirithm propoesed by Sheng Yue####
class Model(BaseModel):
def __init__(self,params):
self.num_classes=params['num_classes']
super().__init__(params)
def get_input(self):
'''
:return:the placeholders of input: features_train,labels_train,features_test,labels_test
'''
features_train = tf.placeholder(tf.float32, shape=[None, 784], name='features_train')
labels_train = tf.placeholder(tf.float32, shape=[None, 10], name='labels_train')
features_test = tf.placeholder(tf.float32, shape=[None, 784], name='features_test')
labels_test = tf.placeholder(tf.float32, shape=[None, 10], name='labels_test')
return features_train,labels_train,features_test,labels_test
def forward_func(self,inp, weights, w_names , reuse = False):
'''
:param inp: input
:param weights: theta
:param reuse:
:return: model y
when overload this function you should make w=dict(zip(w_names,weights))
'''
weights = dict(zip(w_names, weights))
hidden = tf.matmul(inp, weights['w1']) + weights['b1']
hidden = active_func(hidden)
hidden = tf.matmul(hidden, weights['w2']) + weights['b2']
hidden = active_func(hidden)
hidden = tf.matmul(hidden, weights['w3']) + weights['b3']
return hidden
def construct_weights(self):
'''
:return:weights
'''
w1 = tf.Variable(tf.truncated_normal([784, 32], stddev=0.01), name='w1')
b1 = tf.Variable(tf.zeros([32]), name='b1')
w2 = tf.Variable(tf.truncated_normal([32, 64], stddev=0.01), name='w2')
b2 = tf.Variable(tf.zeros([64]), name='b2')
w3 = tf.Variable(tf.truncated_normal([64, self.num_classes], stddev=0.01), name='w3')
b3 = tf.Variable(tf.zeros([self.num_classes]), name='b3')
return [w1, b1, w2, b2, w3, b3]
| [
"xinjiang@csu.edu.cn"
] | xinjiang@csu.edu.cn |
2c190be799017c52cc5a83639396080f5ef20ae9 | 82c54cab8e0c5b73e1fdb9615296613cc43929a0 | /authentication/forms.py | d3f7b622935250beef47f85ac1ec6f9ee9435405 | [] | no_license | creechcorbin/twitter_clone | e4146657bd13043544f846c48b34fe83e90e91da | bd075bd53fd9e5558cda85ade86ed9995f72118c | refs/heads/master | 2022-12-10T09:23:37.036180 | 2020-09-05T03:23:32 | 2020-09-05T03:23:32 | 292,993,852 | 0 | 0 | null | 2020-09-09T01:08:27 | 2020-09-05T03:22:43 | Python | UTF-8 | Python | false | false | 345 | py | from django import forms
class LoginForm(forms.Form):
username = forms.CharField(max_length=80)
password = forms.CharField(widget=forms.PasswordInput)
class SignupForm(forms.Form):
username = forms.CharField(max_length=80)
displayname = forms.CharField(max_length=80)
password = forms.CharField(widget=forms.PasswordInput)
| [
"creechcorbin@gmail.com"
] | creechcorbin@gmail.com |
855c082aa1c28384a3ca3f6688c7cd52583b2287 | 47e93b916a6b55871997bfa95bb2f69676416b00 | /landerdb.py | 0486a4742f580c46200c8342d154cb857fb29434 | [] | no_license | Inqre/Melody | dcc88acb83b23a3c0786ab5b9529b1dcd71f6ece | 84f298e5446f53c5f3fededd9f2920552db74c87 | refs/heads/master | 2020-05-15T22:32:28.959905 | 2013-11-08T02:45:06 | 2013-11-08T02:45:06 | 14,127,017 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,807 | py | import json
import os
__version__ = "1.0.0"
class Connect:
def __init__(self, db_file):
self.db = db_file
self.json_data = {}
# allows find to be called multiple times, without
# re-reading from disk unless a change has occured
self.stale = True
if not os.path.exists(self.db):
self._save()
def _load(self):
if self.stale:
with open(self.db, 'rb') as fp:
try:
self.json_data = json.load(fp)
except:
with open(self.db, 'wb') as file:
file.write(json.dumps(self.json_data))
self._load()
def _save(self):
with open(self.db, 'wb') as fp:
json.dump(self.json_data, fp)
self.stale = True
def insert(self, collection, data):
self._load()
if collection not in self.json_data:
self.json_data[collection] = []
self.json_data[collection].append(data)
self._save()
def remove(self, collection, data):
self._load()
if collection not in self.json_data:
return False
self.json_data[collection].remove(data) #Will only delete one entry
self._save()
def find(self, collection, data):
self._load()
if collection not in self.json_data:
return False
output = []
for x in self.json_data[collection]:
if data != "all":
for y in data:
try:
if data[y] == x[y]:
output.append(x)
except KeyError:
continue
else:
output.append(x)
return output
| [
"max00355@gmail.com"
] | max00355@gmail.com |
8074d9f48b99a19a25b95da45d02787fb65ed44d | 771247a4498d50745c5fbff09e7446ea9213ab19 | /Py8/export_openweather.py | a80a7c5c48213f7a13b051fcbfb593a6a75dd25e | [] | no_license | ostrowsky/Parcer | 42697f9a98f42c8220675d540e8dc2a95855783e | f953b7cbb6b948df894950ee7ed804fcd6b8e811 | refs/heads/master | 2021-01-21T06:39:46.184872 | 2017-06-23T16:07:15 | 2017-06-23T16:07:15 | 91,581,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,845 | py | """ OpenWeatherMap (экспорт)
Сделать скрипт, экспортирующий данные из базы данных погоды,
созданной скриптом openweather.py. Экспорт происходит в формате CSV или JSON.
Скрипт запускается из командной строки и получает на входе:
export_openweather.py --csv filename [<город>]
export_openweather.py --json filename [<город>]
export_openweather.py --html filename [<город>]
При выгрузке в html можно по коду погоды (weather.id) подтянуть
соответствующие картинки отсюда: http://openweathermap.org/weather-conditions
Экспорт происходит в файл filename.
Опционально можно задать в командной строке город. В этом случае
экспортируются только данные по указанному городу. Если города нет в базе -
выводится соответствующее сообщение.
"""
import sys
import sqlite3
db_filename = 'db_weather.sqlite'
#sys.argv = ['export_openweather.py', 'weather.html', 'MX']
try:
filename = sys.argv[1]
country = sys.argv[2]
except IndexError:
print("Задан неверный параметр. Файл должен быть запущен с указанием параметров: export_openweather.py filename [<город>]")
print(sys.argv)
html_string = '''
<!DOCTYPE html>
<html>
<head>
<title>Weather</title>
</head>
<body>
<h1>Погода на момент актуализации базы данных</h1>
<table border = "1">
<tbody>
<tr>
<th align="center" width="auto">id_города</th>
<th align="center" width="auto">Город</th>
<th align="center" width="auto">Страна</th>
<th align="center" width="auto">Дата</th>
<th align="center" width="auto">Температура</th>
<th align="center" width="auto">id_погоды</th>
<th align="center" width="auto">Значок</th>
</tr>
'''
if len(sys.argv) == 3:
with sqlite3.connect(db_filename) as conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('''
select distinct id_города, Город, Страна, Дата, Температура, id_погоды, Значок
from weather
where Страна = ?''', (country,))
db_rows = cur.fetchall()
cities = list(db_rows)
for city in cities:
#print(list(city))
if city:
#print(city)
#print(list(city))
html_string += '\t<tr>\n'
for k in list(city):
if k == list(city)[-1]:
path = "http://openweathermap.org/img/w/" + str(k) + ".png"
html_string += '\t\t<td align="center" width="auto"><img src=' + path + '></td>\n'
else:
html_string += '\t\t<td align="center" width="auto">' + str(k) + '</td>\n'
html_string += '\t</tr>\n'
else:
print("Города указанной страны отсутствуют в базе")
html_string += '''
</tbody>
</table>
</body>
</html>'''
elif len(sys.argv) == 4:
city = sys.argv[3]
with sqlite3.connect(db_filename) as conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('''
select distinct id_города, Город, Страна, Дата, Температура, id_погоды, Значок
from weather
where Город = ? and Страна = ?''', (city, country,))
db_rows = cur.fetchall()
cities = list(db_rows)
for city in cities:
# print(list(city))
if city:
# print(city)
# print(list(city))
html_string += '\t<tr>\n'
for k in list(city):
if k == list(city)[-1]:
path = "http://openweathermap.org/img/w/" + str(k) + ".png"
html_string += '\t\t<td align="center" width="auto"><img src=' + path + '></td>\n'
else:
html_string += '\t\t<td align="center" width="auto">' + str(k) + '</td>\n'
html_string += '\t</tr>\n'
else:
print("Город отсутствует в базе")
html_string += '''
</tbody>
</table>
</body>
</html>'''
encoded_str = html_string.encode(encoding='UTF-8')
with open(filename, 'w', encoding='UTF-8') as f:
f.write(html_string)
| [
"ostrowskyi@gmail.com"
] | ostrowskyi@gmail.com |
a435a2a71d4aaf93551b2f8952ededd0cb812d28 | 51ae004ddefa3e6c942e18348b4b14d95e8fdf0e | /src/__main__.py | 3d2b2baedc1de9302d48136b20547f5e2a8c2687 | [] | no_license | ewascent/python_file_sample | 10c6209b39f8c40605e7152b199f1cf6c5dcd0a1 | e0f1adc4be2e3b71aa583629cfa9b3748ca669c4 | refs/heads/master | 2020-03-30T22:49:18.364585 | 2018-10-08T07:08:50 | 2018-10-08T07:08:50 | 151,679,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | """main is main"""
import sys
from error import InsufficientArguments
from error import ArgumentTypeException
import filer
from utilities import setup_logging
__author__ = "ewascent"
__copyright__ = "ewascent"
__license__ = "mit"
def main(_args=None):
"""enter the dragon, is what I imagine the main method saying"""
try:
_logger = setup_logging('info')
if _args is None:
_args = sys.argv
files = _args
result_count = 100
for file in files:
if "__main__.py" not in file:
_logger.info(f"Recieved path to file: {file}")
results = filer.outputter(some_collection=filer.reader(file),
this_many_results=result_count)
print(f'Top {result_count} matches for file {file}')
for result in results:
print(result)
except InsufficientArguments:
_logger.error("Recieved no file input")
raise
except ArgumentTypeException:
_logger.error("Not a valid file path")
raise
except:
_logger.error("Unexpected error: %s", sys.exc_info()[0])
print("Unexpected error:", sys.exc_info()[0])
raise
if __name__ == "__main__":
main(sys.argv)
| [
"ewascent@gmail.com"
] | ewascent@gmail.com |
31b1631b1523aadad273e28fadb8ad1c54978cc0 | 951400a855a6f4af8d9dedfd533e4a19f243f1c7 | /tree.py | 66022eeec58704396df2b56fba968f6e0af905ff | [] | no_license | twangad/test | b698939f4a0033505c1fd1e1a5c2e8757683cf0b | d73bdf1554a520a6892f873777e2226fa09ed151 | refs/heads/master | 2020-07-02T10:17:13.132626 | 2016-09-11T09:17:37 | 2016-09-11T09:17:37 | 67,919,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | ###
class test():
def __init__():
pass
def saysth():
print "here" | [
"twangad@connect.ust.hk"
] | twangad@connect.ust.hk |
7f12cf4f8c2a9dbbd0be88734b98d0c8b28eca87 | e9bc070d1d9257c4a213bc1f33ca6269bbc37b43 | /tests/roots/test-ext-autosummary/conf.py | f4d696cc912bb3108db71ca0fb841c3d904f7427 | [
"BSD-3-Clause",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011",
"MIT",
"BSD-2-Clause"
] | permissive | GoodRx/sphinx | 99b33454afa06cf6a66d080c3c4019cc7ddde2f0 | c310c73baffa4892cf35fd74918193824c86309a | refs/heads/1.6.x-py-type-xref | 2021-01-01T06:02:33.415993 | 2017-07-16T03:12:58 | 2017-07-16T03:12:58 | 97,339,105 | 1 | 1 | null | 2017-07-16T03:12:58 | 2017-07-15T19:57:45 | Python | UTF-8 | Python | false | false | 184 | py | import sys, os
sys.path.insert(0, os.path.abspath('.'))
extensions = ['sphinx.ext.autosummary']
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
| [
"i.tkomiya@gmail.com"
] | i.tkomiya@gmail.com |
8d416110ae94969cba5ebcae29b1d1e4b9bf6b17 | a0d06a661fd760b57e671582189f1fb1cbee87f0 | /src/chat_take/web/handlers.py | 5f21c90045824bb3d64b2d7b5a245d9832c8d143 | [] | no_license | Tsvetov/chat | 6def42a334a044806b9a483d07edc05662a44438 | 8f1b064c29acdc3f17ad8d0a228a40552cdf4130 | refs/heads/master | 2021-08-26T07:25:38.898642 | 2017-11-22T06:30:59 | 2017-11-22T06:30:59 | 111,576,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | import logging
from redis import StrictRedis
from tornado import web, websocket, escape
r = StrictRedis(db=1)
logger = logging.getLogger('handlers')
class PingHandler(web.RequestHandler):
def get(self):
self.write('ok') # pylint: disable=no-member
class LogoutHandler(web.RequestHandler):
@web.authenticated
def get(self):
self.clear_cookie('user')
self.redirect('/') | [
"ptsvetov@MacBook-Pro-Admin-33.local"
] | ptsvetov@MacBook-Pro-Admin-33.local |
a2a2275184e0dde13affe5fbe7484ad6d9b28750 | e3ecb87551f72c201fe6a9fbff772614cfb5ed4c | /mnist_qkeras2.py | ed5c2fb01e719d5efa76e2ecf5c08950db147fed | [
"MIT"
] | permissive | filipemlins/nas-hls4ml | 6cccdc7c061a2d1071e1328e5121aa4038b8fedd | b35afc4f684d803d352776c40f3a6cbbf47c4b1c | refs/heads/main | 2023-03-12T23:11:35.316667 | 2021-03-03T02:09:02 | 2021-03-03T02:09:02 | 343,616,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,792 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 2 19:57:08 2020
@author: filipe
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 16:44:49 2020
@author: filipe
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 15:05:57 2020
@author: filipe
"""
from tensorflow.keras.utils import to_categorical
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
##pre processing
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
Y_train1 = train[['label']]
X_train1 = train.drop(train.columns[[0]], axis=1)
X_test1 = test
X_train1 = np.array(X_train1)
X_test1 = np.array(X_test1)
#Reshape the training and test set
X_train1 = X_train1.reshape(X_train1.shape[0], 28, 28, 1)/255
X_test1 = X_test1.reshape(X_test1.shape[0], 28, 28, 1)/255
#Padding the images by 2 pixels since in the paper input images were 32x32
X_train1 = np.pad(X_train1, ((0,0),(2,2),(2,2),(0,0)), 'constant')
X_test1 = np.pad(X_test1, ((0,0),(2,2),(2,2),(0,0)), 'constant')
X_train, X_test, Y_train, Y_test = train_test_split(X_train1, Y_train1, test_size=0.2, random_state=42)
#Standardization
mean_px = X_train.mean().astype(np.float32)
std_px = X_train.std().astype(np.float32)
X_train = (X_train - mean_px)/(std_px)
#One-hot encoding the labels
Y_train = to_categorical(Y_train)
print(X_train.shape[0], "train samples")
print(X_test.shape[0], "test samples")
#scaler = StandardScaler().fit(X_train)
#X_train = scaler.transform(X_train)
#X_test = scaler.transform(X_test)
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l1
from callbacks import all_callbacks
from tensorflow.keras.layers import Activation, MaxPooling2D, Flatten
from qkeras.qlayers import QDense, QActivation
from qkeras.qconvolutional import QConv2D
from qkeras.quantizers import quantized_bits, quantized_relu
model = Sequential()
model.add(QConv2D(8, (4, 4), strides=(1,1), input_shape=(32,32, 1),
kernel_quantizer=quantized_bits(7,1),bias_quantizer=quantized_bits(7,1), name="conv2d_0_m"))
model.add(QActivation(activation=quantized_relu(7,1), name='relu1'))
model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2), name='max1'))
model.add(QConv2D(
16, (2, 2), strides=(1,1),
kernel_quantizer=quantized_bits(7,1),
bias_quantizer=quantized_bits(7,1),
name="conv2d_1_m"))
model.add(QActivation(activation=quantized_relu(7,1), name='relu2'))
model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2), name='max2'))
model.add(Flatten())
model.add(QDense(120, name='fc1',
kernel_quantizer=quantized_bits(7,1), bias_quantizer=quantized_bits(7,1),
kernel_initializer='lecun_uniform', kernel_regularizer=l1(0.0001)))
model.add(QActivation(activation=quantized_relu(7,1), name='relu3'))
model.add(QDense(84, name='fc2',
kernel_quantizer=quantized_bits(7,1), bias_quantizer=quantized_bits(7,1),
kernel_initializer='lecun_uniform', kernel_regularizer=l1(0.0001)))
model.add(QActivation(activation=quantized_relu(7,1), name='relu4'))
model.add(QDense(10, name='output',
kernel_quantizer=quantized_bits(7,1), bias_quantizer=quantized_bits(7,1),
kernel_initializer='lecun_uniform', kernel_regularizer=l1(0.0001)))
model.add(Activation(activation='softmax', name='softmax'))
#from tensorflow_model_optimization.python.core.sparsity.keras import prune, pruning_callbacks, pruning_schedule
#from tensorflow_model_optimization.sparsity.keras import strip_pruning
#pruning_params = {"pruning_schedule" : pruning_schedule.ConstantSparsity(0.75, begin_step=2000, frequency=100)}
#model = prune.prune_low_magnitude(model, **pruning_params)
train = True
import keras
if train:
adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
# callbacks = all_callbacks(stop_patience = 1000,
# lr_factor = 0.5,
# lr_patience = 10,
# lr_epsilon = 0.000001,
# lr_cooldown = 2,
# lr_minimum = 0.0000001,
# outputDir = 'model_3')
# callbacks.callbacks.append(pruning_callbacks.UpdatePruningStep())
model.fit(X_train, Y_train, batch_size=1024,
epochs=10, validation_split=0.25, shuffle=True)#, callbacks = callbacks.callbacks)
# Save the model again but with the pruning 'stripped' to use the regular layer types
# model = strip_pruning(model)
model.save('model_4/KERAS_check_best_model.h5')
else:
from tensorflow.keras.models import load_model
from qkeras.utils import _add_supported_quantized_objects
co = {}
_add_supported_quantized_objects(co)
model = load_model('model_4/KERAS_check_best_model.h5', custom_objects=co)
import hls4ml
hls4ml.model.optimizer.OutputRoundingSaturationMode.layers = ['Activation']
hls4ml.model.optimizer.OutputRoundingSaturationMode.rounding_mode = 'AP_RND'
hls4ml.model.optimizer.OutputRoundingSaturationMode.saturation_mode = 'AP_SAT'
config = hls4ml.utils.config_from_keras_model(model, granularity='name',
default_precision='ap_fixed<8,2,AP_RND,AP_SAT>', default_reuse_factor=30000)
config['LayerName']['softmax']['exp_table_t'] = 'ap_fixed<18,8>'
config['LayerName']['softmax']['inv_table_t'] = 'ap_fixed<18,4>'
print(config)
hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir='model_4/hls4ml_prj')
hls_model.compile()
import plotting
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from tensorflow.keras.models import load_model
#model_ref = load_model('model_1/KERAS_check_best_model.h5')
print("Accuracy quantized: {}".format(accuracy_score(Y_test, np.argmax(model.predict(X_test), axis=1))))
z = np.argmax(hls_model.predict(X_test), axis=1)
print("Accuracy hls4ml: {}".format(accuracy_score(Y_test, z)))
#print("Accuracy unpruned: {}".format(accuracy_score(np.argmax(y_test, axis=1), np.argmax(model_ref.predict(X_test), axis=1))))
#plt.figure(figsize=(9, 9))
#_ = plotting.makeRoc(X_train, Y_train, le.classes_, model)
##plt.gca().set_prop_cycle(None) # reset the colors
##_ = plotting.makeRoc(X_test, y_test, le.classes_, model_ref, linestyle='--')
#plt.gca().set_prop_cycle(None) # reset the colors
#_ = plotting.makeRoc(X_train, Y_train, le.classes_, hls_model, linestyle=':')
#
#hls_model.build(synth=True)
#
#hls4ml.report.read_vivado_report('model_3/hls4ml_prj')
| [
"filipemlins@gmail.com"
] | filipemlins@gmail.com |
6770119ff8804eda04aeeb3fd19760c08c6849a5 | 2a37885d0b4cd6e5938e6d564f189a9ae7ade21f | /day8.py | 862379ce3c86b437a52f5d727431add939afce58 | [] | no_license | wilsonconley/advent-of-code-2020 | 189df901ba14212bd2f27c055be3feb99d17d4bb | 74e7903d8c6e8abc91f1ce72b5984d93cf828ff1 | refs/heads/master | 2023-02-05T23:06:50.935046 | 2020-12-29T01:32:22 | 2020-12-29T01:32:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,547 | py | #!/usr/local/bin/python3
import os
import re
import string
import numpy as np
import copy
def read_file():
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "Inputs", os.path.basename(__file__).replace("py","txt"))
print("Loading File:")
print(filename)
data = list()
f = open(filename)
for x in f:
data.append(x.replace("\n",""))
f.close()
return data
def run_scenario(data):
# print("run scenario on: ")
# print(data)
valid = True
run = np.zeros(len(data))
instruction = 0
while (instruction < len(data) and run[instruction] == 0):
# print("\tLine " + str(instruction))
run[instruction] = 1
x = data[instruction]
if x[0:3] == "acc":
instruction += 1
elif x[0:3] == "jmp":
instruction += int(x[4:])
else:
instruction += 1
# print("\tNew instrction = " + str(instruction))
if instruction < len(data) and run[instruction] == 1:
# print("run[instruction] = " + str(run[instruction]))
valid = False
return valid
if __name__ == "__main__":
data = read_file()
# part 1
run = np.zeros(len(data))
count = 0
instruction = 0
while (run[instruction] == 0):
run[instruction] = 1
x = data[instruction]
if x[0:3] == "acc":
# print("adding: " + str(int(x[4:])))
count += int(x[4:])
instruction += 1
elif x[0:3] == "jmp":
instruction += int(x[4:])
else:
instruction += 1
print("count = " + str(count))
# part 2
fixed = False
count = 0
instruction = 0
run = np.zeros(len(data))
while instruction < len(data):
print("Line " + str(instruction))
if run[instruction] == 1:
print(data[instruction] + "already run")
break
run[instruction] = 1
x = data[instruction]
if x[0:3] == "acc":
count += int(x[4:])
instruction += 1
elif x[0:3] == "jmp":
if not fixed:
tmp = copy.deepcopy(data)
tmp[instruction] = tmp[instruction].replace("jmp", "nop")
if run_scenario(tmp):
print("changing line " + str(instruction) + " from " + data[instruction] + " to " + tmp[instruction])
data = tmp
fixed = True
instruction += 1
else:
instruction += int(x[4:])
else:
instruction += int(x[4:])
else:
if not fixed:
tmp = copy.deepcopy(data)
tmp[instruction] = tmp[instruction].replace("nop", "jmp")
if run_scenario(tmp):
print("changing line " + str(instruction) + " from " + data[instruction] + " to " + tmp[instruction])
data = tmp
fixed = True
instruction += int(x[4:])
else:
instruction += 1
else:
instruction += 1
print("count = " + str(count)) | [
"wilsonconley@Wilsons-MacBook-Pro.local"
] | wilsonconley@Wilsons-MacBook-Pro.local |
f8b8795f039a88f35ab0c597b1aa46c3a393ceb6 | 2557ba8bd6a8621fac5cee27cc7917d28f850ac8 | /tools/validation_rod2021.py | 51831606c7c0e3b1446bfea457fc10722cb0fe26 | [
"MIT"
] | permissive | WanxinT/Balanced-RODNet | 1993a4e5518cad074013a634fad06b1e108a8fd1 | f6c9c5b4696b697254698cce65a97ec2d92c7a3c | refs/heads/main | 2023-03-27T20:19:28.584615 | 2021-03-29T06:07:46 | 2021-03-29T06:07:46 | 352,520,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,400 | py | # -*- coding:utf-8 -*-
"""
@author:Zehui Yu
@file: validation_rod2021.py
@time: 2021/01/31
"""
import sys
import os
from cruw import CRUW
from cruw.eval import evaluate_rod2021, evaluate_rod2021_APAR
import argparse
"python tools/validation_rod2021.py --config configs/my_config_rodnet_hg1_win16_lovasz_bs16_lr1e5_2020_2_11.py " \
" --checkpoint_name rodnet-hg1-win16-wobg-lovasz_bs16_lr1e5_2020_2_11-20210211-103511"
def parse_args():
parser = argparse.ArgumentParser(description='Test RODNet.')
parser.add_argument('--config', type=str, help='choose rodnet model configurations')
parser.add_argument('--checkpoint_name', type=str, default='./data/', help='directory to the prepared data')
args = parser.parse_args()
return args
def eval_rod2021_batch(config_file, checkpoint_name):
epoch_start, epoch_end = 1, 20
pkl_idx = list(range(epoch_start, epoch_end + 1))
for i in pkl_idx:
cmd = 'python tools/validation.py --config %s \
--data_dir /nfs/volume-95-8/ROD_Challenge/RODNet/data/zixiang_split/ \
--valid \
--checkpoint checkpoints/%s/epoch_%02d_final.pkl' % (config_file, checkpoint_name, i)
os.system(cmd)
data_root = "/nfs/volume-95-8/ROD_Challenge/src_dataset"
dataset = CRUW(data_root=data_root, sensor_config_name='sensor_config_rod2021')
submit_dir = '/nfs/volume-95-8/tianwanxin/RODNet/valid_results/%s' % checkpoint_name
truth_dir = '/nfs/volume-95-8/ROD_Challenge/RODNet/for_validation/gt_zixiang_split'
AP, AR = evaluate_rod2021_APAR(submit_dir, truth_dir, dataset)
# print('epoch: %d, AP: %.4f, AR: %.4f' % (i, AP, AR))
with open('/nfs/volume-95-8/tianwanxin/RODNet/valid_res/%s/valid_res.txt' % checkpoint_name, 'a') as f:
f.write('epoch: %d, AP: %.4f, AR: %.4f\n' % (i, AP, AR))
if __name__ == '__main__':
# data_root = "/nfs/volume-95-8/ROD_Challenge/src_dataset"
# dataset = CRUW(data_root=data_root, sensor_config_name='sensor_config_rod2021')
# submit_dir = '/nfs/volume-95-8/ROD_Challenge/RODNet/tools/valid_results/rodnet-hg1-win16-wobg-20210206-124028'
# truth_dir = '/nfs/volume-95-8/ROD_Challenge/RODNet/for_validation/gt_zixiang_split'
# ap, ar = evaluate_rod2021_APAR(submit_dir, truth_dir, dataset)
# print(ap, ar)
args = parse_args()
eval_rod2021_batch(args.config, args.checkpoint_name) | [
"noreply@github.com"
] | WanxinT.noreply@github.com |
06b23e46c8862a2a3ab779c2dfd4b094a8b55540 | 041b8daad5f4c72ae81a9706a2a3e5f56a36995f | /Python OOP/innerclass.py | 82fe749eec290ecd675c705d600c1f4ba2bab67b | [] | no_license | hashansl/dash-plotly-training | 7100e1fa55fb489d2713b68bc0ff9e8f0ecef18b | 987d845f476a6501c1c8673e2423d1c258fb4dbe | refs/heads/main | 2023-06-03T09:38:33.363588 | 2021-06-02T03:38:42 | 2021-06-02T03:38:42 | 371,782,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | #6
# you can create object of inner class inside the outer class
#OR
# you can create object of inner class outside the outer class provided you use outer class name to call it
class Student:
#Outer class
def __init__(self,name,rollno):
self.name=name
self.rollno=rollno
self.lap = self.Laptop()
def show(self):
print(self.name, self.rollno)
self.lap.show()
class Laptop:
#inner Class
def __init__(self):
self.brand = 'HP'
self.cpu = 'i5'
self.ram = 8
def show(self):
print(self.brand,self.cpu,self.ram)
s1 = Student('Hashan',2)
s2 = Student('Dananjaya',3)
s1.show()
lap1 = Student.Laptop()
| [
"hashan.dan@gmail.com"
] | hashan.dan@gmail.com |
c1b3876aae1a898188d4da189bd9db75e5afc8c6 | 41249d7d4ca9950b9c6fee89bf7e2c1929629767 | /results/lz_optimizations_20200507/script_lz_crab4freq_powell_bound10_constantFreqAndInitAmps_tf0-1.py | d14345a8c9437a041da7e650381b2b1114829de0 | [
"MIT"
] | permissive | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | f739b3baad1d2aadda576303bb0bbe9d48ec204a | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | refs/heads/master | 2022-11-22T00:44:09.998199 | 2020-07-21T08:35:28 | 2020-07-21T08:35:28 | 281,237,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
import src.protocol_ansatz as protocol_ansatz
from src.utils import autonumber_filename, basic_logger_configuration
output_file_name = os.path.basename(__file__)[7:-3] + '.csv'
output_file_name = autonumber_filename(output_file_name)
basic_logger_configuration(filename=output_file_name[:-3] + 'log')
logging.info('Output file name will be "{}"'.format(output_file_name))
# ------ start optimization
num_frequencies = 4
protocol = protocol_ansatz.CRABProtocolAnsatz(num_frequencies=num_frequencies)
protocol.generate_rnd_frequencies_each_tf = False
for idx in range(num_frequencies):
protocol.hyperpars['nuk' + str(idx + 1)] = 0
protocol.fill_hyperpar_value(y0=-5, y1=0)
results = optimization.find_best_protocol(
problem_specification=dict(
model='lz',
model_parameters=dict(omega_0=1),
task=dict(initial_intensity=-5, final_intensity=0)
),
optimization_specs=dict(
protocol=protocol,
protocol_options=dict(num_frequencies=num_frequencies),
optimization_method='powell',
parameters_constraints=[-10, 10],
initial_parameters=[0] * (2 * num_frequencies)
),
other_options=dict(
scan_times=np.linspace(0.01, 1, 200)
)
)
# ------ save results to file
results.to_csv(output_file_name)
| [
"lukeinnocenti@gmail.com"
] | lukeinnocenti@gmail.com |
ced0baa0e9192cab080e7e0c0c749c9c7e56e9a1 | 1da91735d1a4d19e62b2d19826d9a1e85d88d690 | /dxpy/dxpy/task/model/tests/test_task.py | 32e1f9139b28e9e0836aef2a1a5c31a6253ebbf0 | [] | no_license | Hong-Xiang/dxl | 94229e4c20f0c97dfe21f8563889c991330df9c3 | 29aed778d1c699cc57d09666a20b4ca60196392f | refs/heads/master | 2021-01-02T22:49:20.298893 | 2018-05-22T13:42:20 | 2018-05-22T13:42:20 | 99,401,725 | 1 | 1 | null | 2018-05-22T13:42:21 | 2017-08-05T05:34:35 | Python | UTF-8 | Python | false | false | 3,063 | py | import json
import unittest
from dxpy.task.model import task
from dxpy.time.timestamps import TaskStamp
from dxpy.time.utils import strp
class TestTask(unittest.TestCase):
def test_to_json(self):
t = task.Task(tid=10, desc='test', workdir='/tmp/test',
worker=task.Worker.MultiThreading,
ttype=task.Type.Regular,
dependency=[1, 2, 3],
time_stamp=TaskStamp(create=strp(
"2017-09-22 12:57:44.036185")),
data={'sample': 42},
is_root=True)
s = t.to_json()
dct = json.loads(s)
self.assertEqual(dct['id'], 10)
self.assertEqual(dct['desc'], 'test')
self.assertEqual(dct['dependency'], [1, 2, 3])
self.assertEqual(dct['data'], {'sample': 42})
self.assertEqual(dct['type'], 'Regular')
self.assertEqual(dct['workdir'], '/tmp/test')
self.assertEqual(dct['worker'], 'MultiThreading')
self.assertEqual(dct['is_root'], True)
self.assertEqual(dct['time_stamp'], {
'create': "2017-09-22 12:57:44.036185", 'start': None, 'end': None})
self.assertEqual(dct['state'], 'BeforeSubmit')
def test_from_json(self):
dct = {
'__task__': True,
'id': 10,
'desc': 'test',
'workdir': '/tmp/test',
'worker': 'Slurm',
'type': 'Script',
'dependency': [1, 2, 3],
'data': {'sample': 42},
'is_root': True,
'time_stamp': {
'create': "2017-09-22 12:57:44.036185",
'start': None,
'end': None
},
'state': 'BeforeSubmit'
}
t = task.Task.from_json(json.dumps(dct))
self.assertEqual(t.id, 10)
self.assertEqual(t.desc, 'test')
self.assertEqual(t.workdir, '/tmp/test')
self.assertEqual(t.worker, task.Worker.Slurm)
self.assertEqual(t.type, task.Type.Script)
self.assertEqual(t.dependency, [1, 2, 3])
self.assertEqual(t.data, {'sample': 42})
self.assertEqual(t.is_root, True)
self.assertEqual(t.time_stamp.create, strp(
"2017-09-22 12:57:44.036185"))
self.assertEqual(t.state, task.State.BeforeSubmit)
def test_submit(self):
t = task.Task(10, 'test', state=task.State.BeforeSubmit)
self.assertEqual(t.state, task.State.BeforeSubmit)
t = task.submit(t)
self.assertEqual(t.state, task.State.Pending)
def test_start(self):
t = task.Task(10, 'test', state=task.State.BeforeSubmit)
self.assertEqual(t.state, task.State.BeforeSubmit)
t = task.start(t)
self.assertEqual(t.state, task.State.Runing)
def test_complete(self):
t = task.Task(10, 'test', state=task.State.BeforeSubmit)
self.assertEqual(t.state, task.State.BeforeSubmit)
t = task.complete(t)
self.assertEqual(t.state, task.State.Complete)
| [
"hx.hongxiang@gmail.com"
] | hx.hongxiang@gmail.com |
862349a3c1000ce89313a3022db4edc1e1f3cf78 | b9d6de31eeaf92f77ed8cb48039b7d82963f249f | /student_companion/comments/migrations/0003_auto_20170129_2333.py | cc053db6657814674415909260fcf4a8a2de111f | [
"BSD-2-Clause"
] | permissive | tm-kn/student-companion-backend | 7c1364033a6edaf2f0d57fd4e9305abf679c698b | 5c0b12aee357b7b2dbaf6a5bb8710b9ecb501019 | refs/heads/master | 2021-03-30T15:53:58.542072 | 2017-03-13T19:45:46 | 2017-03-13T19:45:46 | 71,128,845 | 0 | 0 | null | 2016-10-25T14:36:14 | 2016-10-17T10:58:49 | Python | UTF-8 | Python | false | false | 629 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-29 23:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('comments', '0002_auto_20161206_1501'),
]
operations = [
migrations.AlterField(
model_name='placecomment',
name='place',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='place_comments', related_query_name='place_comment', to='places.Place', verbose_name='place'),
),
]
| [
"u1562595@unimail.hud.ac.uk"
] | u1562595@unimail.hud.ac.uk |
9547a9fc3daa754897d982f560d79f01d3208f12 | 51fc1cdba445ff736911e21e0c8a7fecb252dd67 | /generate_training_data.py | b59fed61337e20b0724a6bebc25975eac5c98df8 | [] | no_license | ZhengPeng0115/MTGNN | bda54d946581fb0c400e94d0a67045e70d37fb75 | b5558528e9840c50d177d6175ca1214d1bc9886e | refs/heads/master | 2022-12-31T18:35:41.981755 | 2020-10-15T15:50:54 | 2020-10-15T15:50:54 | 304,375,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,911 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import numpy as np
import os
import pandas as pd
def generate_graph_seq2seq_io_data(
df, x_offsets, y_offsets, add_time_in_day=True, add_day_in_week=False, scaler=None
):
"""
Generate samples from
:param df:
:param x_offsets:
:param y_offsets:
:param add_time_in_day:
:param add_day_in_week:
:param scaler:
:return:
# x: (epoch_size, input_length, num_nodes, input_dim)
# y: (epoch_size, output_length, num_nodes, output_dim)
"""
num_samples, num_nodes = df.shape
data = np.expand_dims(df.values, axis=-1)
data_list = [data]
if add_time_in_day:
time_ind = (df.index.values - df.index.values.astype("datetime64[D]")) / np.timedelta64(1, "D")
time_in_day = np.tile(time_ind, [1, num_nodes, 1]).transpose((2, 1, 0))
data_list.append(time_in_day)
if add_day_in_week:
day_in_week = np.zeros(shape=(num_samples, num_nodes, 7))
day_in_week[np.arange(num_samples), :, df.index.dayofweek] = 1
data_list.append(day_in_week)
data = np.concatenate(data_list, axis=-1)
# epoch_len = num_samples + min(x_offsets) - max(y_offsets)
x, y = [], []
# t is the index of the last observation.
min_t = abs(min(x_offsets))
max_t = abs(num_samples - abs(max(y_offsets))) # Exclusive
for t in range(min_t, max_t):
x_t = data[t + x_offsets, ...]
y_t = data[t + y_offsets, ...]
x.append(x_t)
y.append(y_t)
x = np.stack(x, axis=0)
y = np.stack(y, axis=0)
return x, y
def generate_train_val_test(args):
df = pd.read_hdf(args.traffic_df_filename)
# 0 is the latest observed sample.
x_offsets = np.sort(
# np.concatenate(([-week_size + 1, -day_size + 1], np.arange(-11, 1, 1)))
np.concatenate((np.arange(-11, 1, 1),))
)
# Predict the next one hour
y_offsets = np.sort(np.arange(1, 13, 1))
# x: (num_samples, input_length, num_nodes, input_dim)
# y: (num_samples, output_length, num_nodes, output_dim)
x, y = generate_graph_seq2seq_io_data(
df,
x_offsets=x_offsets,
y_offsets=y_offsets,
add_time_in_day=True,
add_day_in_week=False,
)
print("x shape: ", x.shape, ", y shape: ", y.shape)
# Write the data into npz file.
# num_test = 6831, using the last 6831 examples as testing.
# for the rest: 7/8 is used for training, and 1/8 is used for validation.
num_samples = x.shape[0]
num_test = round(num_samples * 0.2)
num_train = round(num_samples * 0.7)
num_val = num_samples - num_test - num_train
# train
x_train, y_train = x[:num_train], y[:num_train]
# val
x_val, y_val = (
x[num_train: num_train + num_val],
y[num_train: num_train + num_val],
)
# test
x_test, y_test = x[-num_test:], y[-num_test:]
for cat in ["train", "val", "test"]:
_x, _y = locals()["x_" + cat], locals()["y_" + cat]
print(cat, "x: ", _x.shape, "y:", _y.shape)
np.savez_compressed(
os.path.join(args.output_dir, "%s.npz" % cat),
x=_x,
y=_y,
x_offsets=x_offsets.reshape(list(x_offsets.shape) + [1]),
y_offsets=y_offsets.reshape(list(y_offsets.shape) + [1]),
)
def main(args):
print("Generating training data")
generate_train_val_test(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_dir", type=str, default="data/METR-LA", help="Output directory."
)
parser.add_argument(
"--traffic_df_filename",
type=str,
default="data/metr-la.h5",
help="Raw traffic readings.",
)
args = parser.parse_args()
main(args)
| [
"zpengsdu@gmail.com"
] | zpengsdu@gmail.com |
973be0558ece5d4ee643158fbb3ac967a41dac12 | a87294fad6d80d0cdae0d2871626acce67442115 | /TenderPost/apps.py | 9872dd730bd48f2a5d384ff116d6682574cbdb42 | [] | no_license | Sadat-Shahriyar/Amphitetris | a9f41b2880770f074348c49ff29ce444c4f0b10f | f3cb1cc370bce6f1a61ac9f6e70deb3710967da2 | refs/heads/master | 2022-12-23T23:37:52.837438 | 2020-09-27T16:40:00 | 2020-09-27T16:40:00 | 471,795,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from django.apps import AppConfig
class TenderpostConfig(AppConfig):
name = 'TenderPost'
| [
"jayantasadhu4557@gmail.com"
] | jayantasadhu4557@gmail.com |
9479f066756090388c2092129ef0059b3ebf32ea | cf14b6ee602bff94d3fc2d7e712b06458540eed7 | /gs105/gs105/settings.py | 422043aee64a923a3033927c1f8cb6ac0230c445 | [] | no_license | ManishShah120/Learning-Django | 8b0d7bfe7e7c13dcb71bb3d0dcdf3ebe7c36db27 | 8fe70723d18884e103359c745fb0de5498b8d594 | refs/heads/master | 2023-03-29T09:49:47.694123 | 2021-03-28T16:04:34 | 2021-03-28T16:04:34 | 328,925,596 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,073 | py | """
Django settings for gs105 project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ytgu6b45d)u!-fh@a_v#1d*#010=aih7p8o5juvr(v$ubumwn='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'school',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gs105.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gs105.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"mkshah141@gmail.com"
] | mkshah141@gmail.com |
eee180705f38d0e11b8a5778069d77230bafec5f | 481452cd3b904af7a42bbeb71190a59c29e4775b | /python_batch_4/class2/typecasting2.py | deb323944f44ee751f0fd3988dc54191fb1697f1 | [] | no_license | rahusriv/python_tutorial | b09b54044f9df86ac603634ac1dd8d4ea6705e4a | 7de9b62a8e1e8ca1df5f2679ebf17d655f6b1b8e | refs/heads/master | 2020-03-28T11:24:16.468977 | 2019-05-12T06:51:32 | 2019-05-12T06:51:32 | 148,209,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | a = "20.99"
b = "30.89"
c = int(float(a)) +int(float(b))
print(type(c))
print(c) | [
"rahusr@gmail.com"
] | rahusr@gmail.com |
322904d370ccd9b2b0310c2f06e700406ee35483 | 3e201c6c7ac24b425c8e2f73b4f23e5681b0b6a7 | /My Phrases/new-tab.py | 00f3ef6e9823e426f26955afee51a8e72bf64922 | [
"MIT"
] | permissive | yasapurnama/autokey-osx-ify | 5e332b6110928710f3230d2df1816d5ea59e65a1 | a690c5892ff8124905cbf11399ab183d7804bc13 | refs/heads/master | 2020-12-20T14:19:34.697588 | 2020-02-28T13:44:37 | 2020-02-28T13:44:37 | 236,105,369 | 0 | 0 | MIT | 2020-01-25T00:15:27 | 2020-01-25T00:15:26 | null | UTF-8 | Python | false | false | 190 | py | import re
keys = "<ctrl>+t"
window = window.get_active_class()
is_terminal = re.search('term', window, re.IGNORECASE)
if is_terminal:
keys = "<shift>+" + keys
keyboard.send_keys(keys)
| [
"me@glenn-roberts.com"
] | me@glenn-roberts.com |
024133573c36b462e604a560f436aea52c5c3ff9 | 9de7a7a7474c655a12917927ab3a97be4383850f | /abricate.py | 1147fda84424234e85d39af4a058c545464c4f73 | [] | no_license | gopel/clonalpop | ca5fc1d03c8dfc575f5bc18404595c28f645c92b | 13b55d85858d783b3a04cbdcb41bfc5aa9b2a512 | refs/heads/master | 2020-05-04T19:04:55.302064 | 2019-04-04T15:33:39 | 2019-04-04T15:33:39 | 179,378,638 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 26,763 | py | # -*-coding:Latin-1 -*
import os
def abricate(output_path, element) :
'''os.makedirs(output_path + "/" + element + "/Abricate", exist_ok=True)'''
os.system("docker run replikation/abricate --db card " + output_path + "/" + element + "/Prokka/" + element + ".fna > " + output_path + "/" + element + "/Abricate/" + element + "_AntibioRes_CARD.txt")
os.system("abricate --db resfinder " + output_path + "/" + element + "/Prokka/" + element + ".fna > " + output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_ResFinder.txt")
os.system("abricate --db ncbi " + output_path + "/" + element + "/Prokka/" + element + ".fna > " + output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_NCBI.txt")
os.system("abricate --db ecoli_vf " + output_path + "/" + element + "/Prokka/" + element + ".fna > " + output_path + "/" + element + "/Abricate/" + element + "_Virulence_ECVF.txt")
os.system("abricate --db vfdb " + output_path + "/" + element + "/Prokka/" + element + ".fna > " + output_path + "/" + element + "/Abricate/" + element + "_Virulence_VFDB.txt")
os.system("abricate --db plasmidfinder " + output_path + "/" + element + "/Prokka/" + element + ".fna > " + output_path + "/" + element + "/Abricate/" + element + "_Plasmids_PlasmidFinder.txt")
#abricate(letter_illumina)
# Faire apparaitre les gines puis faireune matrice d'analyse presence absence
# 1 gros tableau bourrin avec toutes les infos comme prevu au debut
# Plusieurs slides apres av
def bacteria_resistance(file) :
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
list_protein_result = []
for phrase in contenu.split('\n') :
phrases.append(phrase)
new_phrases = phrases [13:-1]
result = ""
gene_result =""
protein_result = []
for phrase in new_phrases :
#print(phrase)
mini_phrase = phrase.split()
#print(mini_phrase)
#new_phrases = new_phrases [13:]
localisation = mini_phrase[1]
gene = mini_phrase[4]
coverage = float(mini_phrase[8])
identity = float(mini_phrase[9])
trust_coefficient = str(round(coverage*identity/10000,2))
product = ""
for k in range(12, len(mini_phrase)):
product += mini_phrase[k] + " " #str(mini_phrase[12:])
#mini_result = "Gene: "+ gene + ", Protein: " + product + " (" + trust_coefficient + ") \n "
mini_protein_result = "Gene: " + gene + ", Protein: " + product + " \n "
#mini_gene_result = [ gene + "(" + trust_coefficient + " \n "]
mini_gene_result = gene + "\n "
gene_result += mini_gene_result
mini_list_protein_result = [gene, product]
list_protein_result.append(mini_list_protein_result)
protein_result.append(mini_protein_result)
#gene_result.append(mini_gene_result)
#print(list_protein_result)
fichier.close()
return (gene_result, protein_result, list_protein_result)
def bacteria_virulence_ECVF(file) :
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
list_protein_result = []
for phrase in contenu.split('\n') :
phrases.append(phrase)
new_phrases = phrases [13:-1]
result = ""
gene_result = ''#[]
protein_result = []
for phrase in new_phrases :
#print(phrase)
mini_phrase = phrase.split()
#print(mini_phrase)
#new_phrases = new_phrases [13:]
localisation = mini_phrase[1]
gene = mini_phrase[4]
coverage = float(mini_phrase[8])
identity = float(mini_phrase[9])
trust_coefficient = str(round(coverage*identity/10000,2))
product = ""
for k in range(12, len(mini_phrase)):
product += mini_phrase[k] + " " #str(mini_phrase[12:])
#mini_result = "Gene: "+ gene + ", Protein: " + product + " (" + trust_coefficient + ") \n "
mini_protein_result = "Gene: " + gene + ", Protein: " + product + " \n "
mini_gene_result = gene + "\n "
gene_result += mini_gene_result
mini_list_protein_result = [gene, product]
list_protein_result.append(mini_list_protein_result)
protein_result.append(mini_protein_result)
#gene_result.append(mini_gene_result)
#print(list_protein_result)
fichier.close()
return (gene_result, protein_result, list_protein_result)
def bacteria_virulence_VDFB(file) :
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
list_protein_result= []
for phrase in contenu.split('\n') :
phrases.append(phrase)
new_phrases = phrases [13:-1]
result = ""
gene_result =""
protein_result = []
for phrase in new_phrases :
#print(phrase)
mini_phrase = phrase.split()
#print(mini_phrase)
#new_phrases = new_phrases [13:]
localisation = mini_phrase[1]
gene = mini_phrase[4]
coverage = float(mini_phrase[8])
identity = float(mini_phrase[9])
trust_coefficient = str(round(coverage*identity/10000,2))
product = ""
for k in range(12, len(mini_phrase)):
product += mini_phrase[k] + " " #str(mini_phrase[12:])
#mini_result = "Gene: "+ gene + ", Protein: " + product + " (" + trust_coefficient + ") \n "
mini_protein_result = "Gene: " + gene + ", Protein: " + product + " \n "
mini_gene_result = gene + "\n "
gene_result += mini_gene_result
mini_list_protein_result = [gene, product]
list_protein_result.append(mini_list_protein_result)
protein_result.append(mini_protein_result)
#gene_result.append(mini_gene_result)
#print(list_protein_result)
fichier.close()
return (gene_result, protein_result, list_protein_result)
def bacteria_PlasmidFinder(file) :
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
for phrase in contenu.split('\n') :
element = phrase.split('\t')
for mini_element in element :
phrases.append(mini_element)
#print(phrases)
new_phrases = phrases [13:-1]
n_phrases = int(len(new_phrases)/13)
result = ""
gene_result = ""
protein_result = []
list_protein_result= []
#print(new_phrases)
for k in range(n_phrases) :
mini_phrase = new_phrases[0:13]
new_phrases = new_phrases[13:]
#print(mini_phrase)
#new_phrases = new_phrases [13:]
localisation = mini_phrase[1]
gene = mini_phrase[4]
coverage = float(mini_phrase[8])
identity = float(mini_phrase[9])
trust_coefficient = str(round(coverage*identity/10000,2))
#product_1 = mini_phrase[12]
#product_2 = mini_phrase[13:]
#product = str(product_1) + "("
#for mini_product in product_2 :
# product += str(mini_product) + " "
#product+= ")"
product = ""
for k in range(12, len(mini_phrase)):
product += mini_phrase[k] + " " #str(mini_phrase[12:])
mini_protein_result = "Gene: " + gene + ", Protein: " + product + " \n "
mini_gene_result = gene + "\n "
gene_result += mini_gene_result
mini_list_protein_result = [gene, product]
list_protein_result.append(mini_list_protein_result)
protein_result.append(mini_protein_result)
#gene_result.append(mini_gene_result)
#print(list_protein_result)
fichier.close()
return (gene_result, protein_result, list_protein_result)
def bacteria_AntimicRes_ResFinder(file) :
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
for phrase in contenu.split('\n') :
element = phrase.split('\t')
for mini_element in element :
phrases.append(mini_element)
#print(phrases)
new_phrases = phrases [13:-1]
n_phrases = int(len(new_phrases)/13)
result = ""
gene_result = ""
protein_result = []
list_protein_result =[]
#print(new_phrases)
for k in range(n_phrases) :
mini_phrase = new_phrases[0:13]
new_phrases = new_phrases[13:]
#print(mini_phrase)
#new_phrases = new_phrases [13:]
localisation = mini_phrase[1]
gene = mini_phrase[4]
coverage = float(mini_phrase[8])
identity = float(mini_phrase[9])
trust_coefficient = str(round(coverage*identity/10000,2))
#product_1 = mini_phrase[12]
#product_2 = mini_phrase[13:]
#product = str(product_1) + "("
#for mini_product in product_2 :
# product += str(mini_product) + " "
#product+= ")"
product = ""
for k in range(12, len(mini_phrase)):
product += mini_phrase[k] + " " #str(mini_phrase[12:])
mini_protein_result = "Gene: " + gene + ", Protein: " + product + " \n "
mini_gene_result = gene + "\n "
gene_result += mini_gene_result
mini_list_protein_result = [gene, product]
list_protein_result.append(mini_list_protein_result)
protein_result.append(mini_protein_result)
#gene_result.append(mini_gene_result)
#print(list_protein_result)
fichier.close()
return (gene_result, protein_result, list_protein_result)
def bacteria_AntimicRes_NCBI(file) :
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
for phrase in contenu.split('\n') :
element = phrase.split('\t')
for mini_element in element :
phrases.append(mini_element)
#print(phrases)
new_phrases = phrases [13:-1]
n_phrases = int(len(new_phrases)/13)
result = ""
gene_result = ""
protein_result = []
list_protein_result = []
#print(new_phrases)
for k in range(n_phrases) :
mini_phrase = new_phrases[0:13]
new_phrases = new_phrases[13:]
#print(mini_phrase)
#new_phrases = new_phrases [13:]
localisation = mini_phrase[1]
gene = mini_phrase[4]
coverage = float(mini_phrase[8])
identity = float(mini_phrase[9])
trust_coefficient = str(round(coverage*identity/10000,2))
#product_1 = mini_phrase[12]
#product_2 = mini_phrase[13:]
#product = str(product_1) + "("
#for mini_product in product_2 :
# product += str(mini_product) + " "
#product+= ")"
product = ""
for k in range(12, len(mini_phrase)):
product += mini_phrase[k] + " " #str(mini_phrase[12:])
mini_protein_result = "Gene: " + gene + ", Protein: " + product + " \n "
mini_gene_result = gene + "\n "
gene_result += mini_gene_result
mini_list_protein_result = [gene, product]
list_protein_result.append(mini_list_protein_result)
protein_result.append(mini_protein_result)
#gene_result.append(mini_gene_result)
#print(list_protein_result)
fichier.close()
return (gene_result, protein_result, list_protein_result)
## REGLER CA AUSSI
def extracting_everything_abricate(output_path, acces_dossier_compare) :
gene_list = [['resistance','virulence_ECVF','virulence_VDFB','PlasmidFinder','AntimicRes_ResFinder','AntimicRes_NCBI']]
protein_list = [['resistance', 'virulence_ECVF', 'virulence_VDFB', 'PlasmidFinder', 'AntimicRes_ResFinder','AntimicRes_NCBI']]
for element in acces_dossier_compare:
mini_gene_list = []
output_path + "/" + element + "/Abricate/" + element
mini_gene_list.append(bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_AntibioRes_CARD.txt")[0])
mini_gene_list.append(bacteria_virulence_ECVF(output_path + "/" + element + "/Abricate/" + element + "_Virulence_ECVF.txt")[0])
mini_gene_list.append(bacteria_virulence_VDFB(output_path + "/" + element + "/Abricate/" + element + "_Virulence_VFDB.txt")[0])
mini_gene_list.append(bacteria_PlasmidFinder(output_path + "/" + element + "/Abricate/" + element + "_Plasmids_PlasmidFinder.txt")[0])
mini_gene_list.append(bacteria_AntimicRes_ResFinder(output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_ResFinder.txt")[0])
mini_gene_list.append(bacteria_AntimicRes_NCBI(output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_NCBI.txt")[0])
gene_list.append(mini_gene_list)
return gene_list
'''
# Feuille generale
#extracting_everything_abricate()
total_abricate = abricate.extracting_everything_abricate()
total_abricate = total_abricate[1:]
for element in letter_illumina:
sample_ID = ID + element
feuil3.write(k, 0, sample_ID, style_cells)
for element in total_abricate:
print(element)
#feuil3.write(k, 1, element[0], style_cells)
k+=1
'''
#print(extracting_everything_abricate())
# Liste avec tous les echantillons, dans chaque sous-liste 6 listes (on garde les 6)
# Liste interessante : list_protein_result, chaque element = une liste de deux elements (on garde la 2 (troisieme)
# [gene, product] peut etre garder juste gene et ajouter '\n' à chaque fois (on garde le 0 (premier))
### Extraire des donnes pour lindex des proteines (termine)
def extracting_data_for_protein_index(acces_dossier_compare, output_path):
gene_list = []
protein_list = []
for element in acces_dossier_compare:
mini_protein_list = []
mini_protein_list.append(bacteria_resistance(output_path + "/" + element + "/Abricate/" + element +"_AntibioRes_CARD.txt")[1])
mini_protein_list.append(
bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_Virulence_ECVF.txt")[1])
mini_protein_list.append(
bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_Virulence_VFDB.txt")[1])
mini_protein_list.append(
bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_Plasmids_PlasmidFinder.txt")[1])
mini_protein_list.append(
bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_ResFinder.txt")[1])
mini_protein_list.append(
bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_NCBI.txt")[1])
#mini_protein_list.append(bacteria_resistance("/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(
# child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntibioRes_CARD.txt")[1])
#mini_protein_list.append(bacteria_virulence_ECVF(
# "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(
# child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Virulence_ECVF.txt")[1])
#mini_protein_list.append(bacteria_virulence_VDFB(
# "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(
# child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Virulence_VFDB.txt")[1])
#mini_protein_list.append(bacteria_PlasmidFinder(
# "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(
# child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Plasmids_PlasmidFinder.txt")[1])
#mini_protein_list.append(bacteria_AntimicRes_ResFinder(
# "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(
# child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntimicRes_ResFinder.txt")[1])
#mini_protein_list.append(bacteria_AntimicRes_NCBI(
# "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(
# child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntimicRes_NCBI.txt")[1])
protein_list.append(mini_protein_list)
#print(protein_list)
protein_list_AntibioRes_CARD = []
protein_list_Virulence_ECVF = []
protein_list_Virulence_VFDB = []
protein_list_Plasmids_PlasmidFinder = []
protein_list_AntimicRes_ResFinder = []
protein_list_AntimicRes_NCBI = []
for element in protein_list :
protein_list_AntibioRes_CARD.append(element[0])
protein_list_Virulence_ECVF.append(element[1])
protein_list_Virulence_VFDB.append(element[2])
protein_list_Plasmids_PlasmidFinder.append(element[3])
protein_list_AntimicRes_ResFinder.append(element[4])
protein_list_AntimicRes_NCBI.append(element[5])
#print(protein_list_AntibioRes_CARD)
protein_string_AntibioRes_CARD =""
protein_string_Virulence_ECVF = ""
protein_string_Virulence_VFDB = ""
protein_string_Plasmids_PlasmidFinder = ""
protein_string_AntimicRes_ResFinder = ""
protein_string_AntimicRes_NCBI = ""
for element in protein_list_AntibioRes_CARD :
for sous_element in element :
if sous_element not in protein_string_AntibioRes_CARD :
protein_string_AntibioRes_CARD += sous_element
for element in protein_list_Virulence_ECVF :
for sous_element in element :
if sous_element not in protein_string_Virulence_ECVF :
protein_string_Virulence_ECVF += sous_element
for element in protein_list_Virulence_VFDB :
for sous_element in element :
if sous_element not in protein_string_Virulence_VFDB :
protein_string_Virulence_VFDB += sous_element
for element in protein_list_Plasmids_PlasmidFinder :
for sous_element in element :
if sous_element not in protein_string_Plasmids_PlasmidFinder :
protein_string_Plasmids_PlasmidFinder += sous_element
for element in protein_list_AntimicRes_ResFinder :
for sous_element in element :
if sous_element not in protein_string_AntimicRes_ResFinder :
protein_string_AntimicRes_ResFinder += sous_element
for element in protein_list_AntimicRes_NCBI :
for sous_element in element :
if sous_element not in protein_string_AntimicRes_NCBI :
protein_string_AntimicRes_NCBI += sous_element
return protein_string_AntibioRes_CARD, protein_string_Virulence_ECVF, protein_string_Virulence_VFDB, protein_string_Plasmids_PlasmidFinder, protein_string_AntimicRes_ResFinder, protein_string_AntimicRes_NCBI
#print(extracting_data_for_protein_index()[2])
def extracting_data_for_protein_index_2(output_path, acces_dossier_compare) :
gene_list = []
protein_list = []
for element in acces_dossier_compare:
mini_protein_list = []
mini_protein_list.append(bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_AntibioRes_CARD.txt")[2])
mini_protein_list.append(bacteria_virulence_ECVF(output_path + "/" + element + "/Abricate/" + element + "_Virulence_ECVF.txt")[2])
mini_protein_list.append(bacteria_virulence_VDFB(
output_path + "/" + element + "/Abricate/" + element + "_Virulence_VFDB.txt")[2])
mini_protein_list.append(bacteria_PlasmidFinder(
output_path + "/" + element + "/Abricate/" + element + "_Plasmids_PlasmidFinder.txt")[2])
mini_protein_list.append(bacteria_AntimicRes_ResFinder(
output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_ResFinder.txt")[2])
mini_protein_list.append(bacteria_AntimicRes_NCBI(
output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_NCBI.txt")[2])
protein_list.append(mini_protein_list)
#print(protein_list)
protein_list_AntibioRes_CARD = []
protein_list_Virulence_ECVF = []
protein_list_Virulence_VFDB = []
protein_list_Plasmids_PlasmidFinder = []
protein_list_AntimicRes_ResFinder = []
protein_list_AntimicRes_NCBI = []
for element in protein_list:
for sous_element in element[0] :
if sous_element not in protein_list_AntibioRes_CARD :
protein_list_AntibioRes_CARD.append(sous_element)
for sous_element in element[1]:
if sous_element not in protein_list_Virulence_ECVF :
protein_list_Virulence_ECVF.append(sous_element)
for sous_element in element[2]:
if sous_element not in protein_list_Virulence_VFDB :
protein_list_Virulence_VFDB.append(sous_element )
for sous_element in element[3]:
if sous_element not in protein_list_Plasmids_PlasmidFinder:
protein_list_Plasmids_PlasmidFinder.append(sous_element )
for sous_element in element[4]:
if sous_element not in protein_list_AntimicRes_ResFinder :
protein_list_AntimicRes_ResFinder.append(sous_element)
for sous_element in element[5]:
if sous_element not in protein_list_AntimicRes_NCBI:
protein_list_AntimicRes_NCBI.append(sous_element )
return protein_list_AntibioRes_CARD , protein_list_Virulence_ECVF , protein_list_Virulence_VFDB, protein_list_Plasmids_PlasmidFinder,protein_list_AntimicRes_ResFinder,protein_list_AntimicRes_NCBI
'''
protein_list_AntibioRes_CARD , protein_list_Virulence_ECVF , protein_list_Virulence_VFDB, protein_list_Plasmids_PlasmidFinder,protein_list_AntimicRes_ResFinder,protein_list_AntimicRes_NCBI = extracting_data_for_protein_index_2()
### Combining reports across samples
def abricate_report_across_samples(letter_illumina) :
# for element in letter_illumina:
# sample_ID = ID + element
# try:
# os.mkdir("/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/")
# except OSError:
# pass
# os.system("abricate --db card /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Prokka/" + sample_ID + "_illumina_prokka.fna > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntibioRes_CARD.tab")
# os.system("abricate --db resfinder /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Prokka/" + sample_ID + "_illumina_prokka.fna > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntimicRes_ResFinder.tab")
# os.system("abricate --db ncbi /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Prokka/" + sample_ID + "_illumina_prokka.fna > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntimicRes_NCBI.tab")
# os.system("abricate --db ecoli_vf /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Prokka/" + sample_ID + "_illumina_prokka.fna > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Virulence_ECVF.tab")
# os.system("abricate --db vfdb /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Prokka/" + sample_ID + "_illumina_prokka.fna > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Virulence_VFDB.tab")
# os.system("abricate --db plasmidfinder /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Prokka/" + sample_ID + "_illumina_prokka.fna > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Plasmids_PlasmidFinder.tab")
sentence_AntibioRes_CARD =''
sentence_AntimicRes_ResFinder =''
sentence_AntimicRes_NCBI =''
sentence_Virulence_ECVF =''
sentence_Virulence_VFDB = ''
sentence_Plasmids_PlasmidFinder =''
for element in letter_illumina:
sample_ID = ID + element
sentence_AntibioRes_CARD += "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntibioRes_CARD.tab "
sentence_AntimicRes_ResFinder += "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntimicRes_ResFinder.tab "
sentence_AntimicRes_NCBI += "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntimicRes_NCBI.tab "
sentence_Virulence_ECVF += "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Virulence_ECVF.tab "
sentence_Virulence_VFDB += "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Virulence_VFDB.tab "
sentence_Plasmids_PlasmidFinder += "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Plasmids_PlasmidFinder.tab "
try:
os.mkdir("/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples")
except OSError:
pass
os.system("abricate --summary " + sentence_AntibioRes_CARD +" > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples/AntibioRes_CARD_report_samples.txt")
os.system("abricate --summary " + sentence_AntimicRes_ResFinder + " > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples/AntimicRes_ResFinder_report_samples.txt")
os.system("abricate --summary " + sentence_AntimicRes_NCBI + " > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples/AntimicRes_NCBI_report_samples.txt")
os.system("abricate --summary " + sentence_Virulence_ECVF + " > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples/Virulence_ECVF_report_samples.txt")
os.system("abricate --summary " + sentence_Virulence_VFDB + " > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples/Virulence_VFDB_report_samples.txt")
os.system("abricate --summary " + sentence_Plasmids_PlasmidFinder + " > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples/Plasmids_PlasmidFinder_report_samples.txt")
abricate_report_across_samples(letter_illumina)'''
# Extracting phrases report samples
def extracting_report_samples(file):
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
for phrase in contenu.split('\n') :
phrases.append(phrase)
#print(phrases)
new_phrases = []
for element in phrases :
new_phrases.append(element.split('\t'))
n = len(new_phrases[0])
return(new_phrases, n) | [
"noreply@github.com"
] | gopel.noreply@github.com |
17f6cbf71b9e2f8f1a98abc1469ee319dbad1d40 | 3e1339020e63327db55716344a7e02c0d503d260 | /applications/Imaging/L1MIGRATIONwVP/results/segsalt/precooked/SConstruct | 18d42de777c5409c14338e19fe8c83b1f660e31f | [
"MIT"
] | permissive | 13299118606/SLIM-release-apps | 975287c6555fc4c6fe76dcea9f5feb9225a30449 | c286f07312289c7d50057ac9379d2da30eea760f | refs/heads/master | 2022-03-09T20:42:23.490954 | 2019-08-22T23:12:44 | 2019-08-22T23:12:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | import os
from rsf.proj import *
########################################################################
# RETRIEVE DATA
########################################################################
# define SLIM FTP server information
FTPserver = {
'server': 'ftp.slim.gatech.edu',
'login': 'ftp',
'password':''}
loc = os.path.join('SoftwareRelease','Imaging','L1MIGRATIONwVP','results')
files = ['linear_RTM.mat','linear_trueQ_GaussianEnc2_denoised.mat','linear_wrongQ2_GaussianEnc2_denoised.mat','linear_estQ_GaussianEnc2.mat','linear_estQ_GaussianEnc2_denoised.mat','iwave_RTM.mat','iwave_finv_trueQ_GaussianEnc2_denoised.mat','iwave_finv_estQ_GaussianEnc2.mat','iwave_finv_estQ_GaussianEnc2_denoised.mat']
# fetch data from FTP server
for elm in files:
Fetch(elm,loc,FTPserver)
| [
"henryk_modzelewski@mac.com"
] | henryk_modzelewski@mac.com | |
f698177fc305cd817e720633840d5cc143725037 | d86aef9f61d2cce156f67ac2da76d7f18b4b881e | /Logic Gates/logic_gates_nn2.py | a30507ca574476c9724550015c31e1c87bddc5b7 | [] | no_license | dhan0779/ai-neural-networks | 353b95ab0bebad132ecc428bcb9dae37d54dd810 | 64fee9aa3346e0fe23d55a4a8007e3a43fd6ed42 | refs/heads/master | 2020-12-01T23:42:32.657842 | 2019-12-29T23:54:10 | 2019-12-29T23:54:10 | 230,816,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,406 | py | import sys,math,time,random
def main():
x = time.time()
real = []
inpfile = open(sys.argv[1],"r")
for line in inpfile:
fakelist = []
line = line.split(" ")
for ch in line:
if ch != "=>":
fakelist.append(int(ch))
real.append(fakelist)
input = []
for lis in real:
hi = []
for ch in lis:
hi.append(ch)
hi.pop()
hi.append(1)
input.append(hi)
weights = [[random.uniform(-2,2)]*(2*len(input[0])),[random.uniform(-2,2),random.uniform(-2,2)],[random.uniform(-2,2)]]
alpha = 0.3
totalerror = 1
iterations = 0
it10 = 0
while totalerror > 0.0009:
totalerror = 0
for i in range(0,len(input)):
ff = forwardfeed(input[i],weights,"T3")
errort = error(ff[len(ff)-1][0],real[i][len(real[i])-1])
totalerror += errort
weights = backpropagation(ff,weights,"T3",real[i][len(real[i])-1],alpha)
iterations+=1
if iterations != 0 and iterations%10 == 0 and totalerror > 0.1:
if abs(totalerror-it10) < 0.0001:
weights = [[random.uniform(-2, 2)] * (2 * len(input[0])),[random.uniform(-2, 2), random.uniform(-2, 2)], [random.uniform(-2, 2)]]
iterations = 0
else: it10 = totalerror+1-1
print("Layer cts:", [len(input[0]), 2, 1, 1])
print("Weights:")
print(weights[0])
print(weights[1])
print(weights[2])
print(totalerror)
def error(ffval,actual):
return 0.5*((actual-ffval)**2)
def transfer(input,x):
if input == "T1": return x
if input =="T2":
if x < 0: return 0
else: return x
if input == "T3": return 1/(1+math.e**-x)
if input == "T4": return (2 / (1 + math.exp(-1 * x))) - 1
def transfersderiv(input,x):
if input == "T1": return 1
if input =="T2":
if x < 0: return 0
else: return 1
if input == "T3": return x*(1-x)
if input == "T4": return (1-x**2)/2
def dot(list1,list2):
return sum(i[0] * i[1] for i in zip(list1, list2))
def forwardfeed(inputs,weights,transfers):
layerC = [inputs]
tmp = []
fin = []
for i in range(len(weights)):
current = weights[i]
next = []
if i != len(weights) - 1:
for j in range(len(current)):
tmp.append(weights[i][j])
if len(inputs) != 1 and j != 0 and (j+1) % (len(inputs)) == 0:
next.append(dot(tmp,inputs))
tmp = []
if len(inputs) == 1:
next.append(dot(tmp, inputs))
tmp = []
fin = []
for elem in next:
fin.append(transfer(transfers,elem))
next = fin
else:
fin = []
for z in range(len(inputs)):
fin.append(inputs[z]*current[z])
inputs = fin
layerC.append(inputs)
return layerC
def backpropagation(inputs,weight,transfersder,real,alpha):
newWeights = [[],[],[]]
E_list = [[],[],[]]
layer1 = []
for i in range(len(inputs)-1,0,-1):
if i == len(inputs)-2:
E_list[1].append((real-inputs[i][0])*weight[i][0]*transfersderiv(transfersder,inputs[i][0]))
newWeights[1].append(E_list[1][0]*inputs[1][0]*alpha+weight[1][0])
newWeights[1].append(E_list[1][0]*inputs[1][1]*alpha+weight[1][1])
elif i == len(inputs)-1:
E_list[2].append(real-inputs[i][0])
newWeights[2].append(E_list[2][0]*inputs[2][0]*alpha+weight[2][0])
else:
E_list[0].append((weight[i][0]*E_list[1][0])*transfersderiv(transfersder,inputs[i][0]))
E_list[0].append((weight[i][1]*E_list[1][0])*transfersderiv(transfersder,inputs[i][1]))
for j in range(int(len(weight[0])/2)):
layer1.append(E_list[0][0]*inputs[0][j]*alpha+weight[0][j])
layer1.append(E_list[0][1]*inputs[0][j]*alpha+weight[0][j+int(len(weight[0])/2)])
for i in range(0,len(layer1)):
if i%2== 0: newWeights[0].append(layer1[i])
for i in range(0,len(layer1)):
if i%2== 1: newWeights[0].append(layer1[i])
return newWeights
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | dhan0779.noreply@github.com |
aa6af7048c44cea9653dd669212be652afc07c82 | 960b3a17a4011264a001304e64bfb76d669b8ac5 | /mstrio/api/authentication.py | ee18f3ec2d1622d62b49c9697d82696d49d54468 | [
"Apache-2.0"
] | permissive | MicroStrategy/mstrio-py | 012d55df782a56dab3a32e0217b9cbfd0b59b8dd | c6cea33b15bcd876ded4de25138b3f5e5165cd6d | refs/heads/master | 2023-08-08T17:12:07.714614 | 2023-08-03T12:30:11 | 2023-08-03T12:30:11 | 138,627,591 | 84 | 60 | Apache-2.0 | 2023-07-31T06:43:33 | 2018-06-25T17:23:55 | Python | UTF-8 | Python | false | false | 5,218 | py | from mstrio.utils.error_handlers import ErrorHandler
@ErrorHandler(
err_msg='Authentication error. Check user credentials or REST API URL and try again'
)
def login(connection):
"""Authenticate a user and create an HTTP session on the web server where
the user's MicroStrategy sessions are stored.
This request returns an authorization token (X-MSTR-AuthToken) which will be
submitted with subsequent requests. The body of the request contains
the information needed to create the session. The loginMode parameter in
the body specifies the authentication mode to use. You can authenticate with
one of the following authentication modes: Standard (1), Anonymous (8),
or LDAP (16). Authentication modes can be enabled through the System
Administration REST APIs, if they are supported by the deployment.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.post(
skip_expiration_check=True,
url=f'{connection.base_url}/api/auth/login',
data={
'username': connection.username,
'password': connection._Connection__password,
'loginMode': connection.login_mode,
'applicationType': 35,
},
)
@ErrorHandler(err_msg="Failed to logout.")
def logout(connection, error_msg=None, whitelist=None):
"""Close all existing sessions for the authenticated user.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.post(
skip_expiration_check=True,
url=f'{connection.base_url}/api/auth/logout',
headers={'X-MSTR-ProjectID': None},
)
def session_renew(connection):
"""Extends the HTTP and Intelligence Server sessions by resetting the
timeouts.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.put(
skip_expiration_check=True,
url=f'{connection.base_url}/api/sessions',
headers={'X-MSTR-ProjectID': None},
timeout=2.0,
)
def session_status(connection):
"""Checks Intelligence Server session status.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.get(
skip_expiration_check=True,
url=f'{connection.base_url}/api/sessions',
headers={'X-MSTR-ProjectID': None},
)
@ErrorHandler(err_msg='Could not get identity token.')
def identity_token(connection):
"""Create a new identity token.
An identity token is used to share an existing session with another
project, based on the authorization token for the existing
session.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.post(
url=f'{connection.base_url}/api/auth/identityToken',
)
def validate_identity_token(connection, identity_token):
"""Validate an identity token.
Args:
connection: MicroStrategy REST API connection object
identity_token: Identity token
Returns:
Complete HTTP response object.
"""
return connection.get(
url=f'{connection.base_url}/api/auth/identityToken',
headers={'X-MSTR-IdentityToken': identity_token},
)
@ErrorHandler(
err_msg='Error creating a new Web server session that shares an existing IServer '
'session.'
)
def delegate(connection, identity_token, whitelist=None):
"""Returns authentication token and cookies from given X-MSTR-
IdentityToken.
Args:
connection: MicroStrategy REST API connection object
identity_token: Identity token
whitelist: list of errors for which we skip printing error messages
Returns:
Complete HTTP response object.
"""
return connection.post(
skip_expiration_check=True,
url=f'{connection.base_url}/api/auth/delegate',
json={'loginMode': "-1", 'identityToken': identity_token},
)
@ErrorHandler(err_msg='Error getting privileges list.')
def user_privileges(connection):
"""Get the list of privileges for the authenticated user.
The response includes the name, ID, and description of each
privilege and specifies which projects the privileges are valid for.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.get(url=f"{connection.base_url}/api/sessions/privileges")
@ErrorHandler(err_msg='Error getting info for authenticated user.')
def get_info_for_authenticated_user(connection, error_msg=None):
"""Get information for the authenticated user.
Args:
connection: MicroStrategy REST API connection object
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
Complete HTTP response object.
"""
url = f'{connection.base_url}/api/sessions/userInfo'
return connection.get(url=url)
| [
"noreply@github.com"
] | MicroStrategy.noreply@github.com |
c58f0a9f11b329810abbf4a905261ab209363ccb | efdab571b6273bbbcaddc49e1ca4978aa0625fa9 | /Fundamentals/Session1/Homework/converts.py | f8f1a8db4d349adddafe11fcc6a1610cac46df64 | [] | no_license | duongnt52/ngotungduong-fundamentals-c4e25 | 2f238b6a1bc7eeaf96617aca9e32cef28c1fbeb7 | 2f412cd370bbca4f826466e09d8f30de60b9c648 | refs/heads/master | 2020-04-13T07:49:04.366197 | 2019-02-27T12:50:28 | 2019-02-27T12:50:28 | 163,063,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | c = int(input("Enter the temperature in Celsirus? "))
f = (c * 1.8) + 32
print(c, "(C)", "=", f, "(F)") | [
"duongnt52"
] | duongnt52 |
bbac7a89c8fce26e9c0cc1f44ccffe97946ed723 | 9a4babfb4abca418f3985387742613305bbd1975 | /ouds/article/views.py | 5fb06a392bf3241578121ffe1d18e3e2f843b430 | [] | no_license | joskid/ChunCu | 37f324669146bc134e4719fb73471268d9f900eb | dbbdf00848962efb65306b3baaac4b7ecba42f15 | refs/heads/master | 2021-01-15T21:39:03.680848 | 2012-03-16T11:48:13 | 2012-03-16T11:48:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,726 | py | # -*- coding: UTF-8 -*-
#===============================================================================
# Author: 骛之
# File Name: gd/member/admin.py
# Revision: 0.1
# Date: 2007-2-5 19:15
# Description:
#===============================================================================
import datetime
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import cache_page
from ouds.settings import HOST_NAME, HOST_URL, ICON_SIZE, IMAGE_SIZE
from ouds.utils.comms import _md5_key
from ouds.article.models import Catalog, Tag, Topic, Entry, Comment
################################################
@cache_page(60 * 30)
def module(request, module, template_name = 'article/module.ouds'):
user = request.user
#topics = Topic.published.filter(catalog__module__exact = module)[:100]
return render_to_response(
template_name,
{
'user': user,
'module': module,
'catalog': None,
'tag': None,
#'topics': topics,
},
)
################################################
@cache_page(60 * 30)
def catalog(request, module, catalog, template_name = 'article/catalog_tag.ouds'):
user = request.user
catalog = Catalog.objects.get(module__exact = module, name__exact = catalog)
catalog.read_count += 1
catalog.save()
#topics = Topic.published.filter(catalog__exact = catalog)[:100]
return render_to_response(
template_name,
{
'user': user,
'module': module,
'catalog': catalog.name,
'tag': None,
#'topics': topics,
},
)
################################################
@cache_page(60 * 30)
def tag(request, module, catalog, tag, template_name = 'article/catalog_tag.ouds'):
user = request.user
tag = Tag.objects.get(catalog__name__exact = catalog, name__exact = tag)
tag.read_count += 1
tag.save()
#topics = Topic.published.filter(tags__exact = tag)[:100]
return render_to_response(
template_name,
{
'user': user,
'module': module,
'catalog': catalog,
'tag': tag.name,
#'topics': topics,
},
)
##################################
from ouds.utils.consts import IMG_TYPE, AI_DIR
from ouds.article.forms import TopicForm
@login_required
def add_topic(request, topic_form = TopicForm, template_name = 'article/add_topic.ouds'):
"""增加文章"""
user = request.user
if request.method == "POST":
data = request.POST
data['title'] = data['title'].strip()
now = datetime.datetime.now()
topic = Topic(id = _md5_key(now, user.username), profile = user.get_profile(), \
edit_date = now, is_approved = True) # is_recommended = True
topic_form = topic_form(data, instance = topic, auto_id = False)
if topic_form.is_valid():
topic = topic_form.save()
if request.FILES:
icon = request.FILES['icon']
if icon.size <= ICON_SIZE and (icon.name[-3:] in IMG_TYPE):
topic.icon.save(topic.id + icon.name[-4:], icon, save = True)
# 更新catalog
catalog = topic.catalog
catalog.post_count += 1
catalog.save()
# 标签处理
tags = data['tags'].strip().split()
for tag in tags:
# 增加tag
if not Tag.objects.filter(catalog__exact = catalog, name__exact = tag).exists():
Tag(catalog = catalog, name = tag).save()
# 更新tag和topic-tag
tag = Tag.objects.get(catalog__exact = catalog, name__exact = tag)
tag.post_count += 1
tag.save()
if not topic.tags.filter(name__exact = tag.name).exists():
topic.tags.add(tag)
return HttpResponseRedirect('/member/%s' % user.username)
else:
topic_form = topic_form(auto_id = False)
return render_to_response(
template_name,
{
'user': user,
'module': None,
'topic_form': topic_form,
},
)
##################################
from ouds.article.forms import CommentForm
def topic(request, module, catalog, year, month, day, id, template_name = 'article/topic.ouds'):
user = request.user
topic = Topic.objects.get(id__exact = id)
if request.method == 'POST':
entry_id = request.POST['entry_id']
else:
public_entries = topic.public_entries()
if public_entries:
entry_id = public_entries.latest('birth_date').id
else:
entry_id = None
try:
next_topic = topic.get_next_by_edit_date()
except Topic.DoesNotExist:
next_topic = None
try:
previous_topic = topic.get_previous_by_edit_date()
except Topic.DoesNotExist:
previous_topic = None
comments = topic.comments.all()
return render_to_response(
template_name,
{
'user': user,
'host_name': HOST_NAME,
'host_url': HOST_URL,
'module': module,
'catalog': catalog,
'topic': topic,
'entry_id': entry_id,
'next_topic': next_topic,
'previous_topic': previous_topic,
'comments': comments,
'comment_form': CommentForm(auto_id = False),
}
)
##################################
from ouds.article.forms import EntryForm
from ouds.utils.processimg import watermark
@login_required
def add_entry(request, topic_id, entry_form = EntryForm, template_name = 'article/add_entry.ouds'):
"""增加文章章节"""
user = request.user
if not Topic.objects.filter(id__exact = topic_id).exists():
return HttpResponseRedirect('/member/%s' % user.username)
else:
topic = Topic.objects.get(id__exact = topic_id)
if request.method == "POST":
data = request.POST
data['title'] = data['title'].strip()
entry = Entry(id = _md5_key(datetime.datetime.now(), user.username), topic = topic)
entry_form = entry_form(data, instance = entry, auto_id = False)
if entry_form.is_valid():
entry = entry_form.save()
if request.FILES:
image = request.FILES['image']
if image.size <= IMAGE_SIZE and (image.name[-3:] in IMG_TYPE):
entry.image.save(entry.id + image.name[-4:], image, save = True)
watermark(AI_DIR + entry.id + image.name[-4:]).save(AI_DIR + entry.id + image.name[-4:], quality = 90)
return HttpResponseRedirect('/member/%s' % user.username)
else:
entry_form = entry_form(auto_id = False)
return render_to_response(
template_name,
{
'user': user,
'module': None,
'entry_form': entry_form,
},
)
################################################
import random
from ouds.utils.consts import MODULE
def search(request, template_name = 'article/search.ouds'):
user = request.user
keywords = request.POST['keywords'].strip()
topics = Topic.published.filter(Q(title__icontains = keywords) | Q(description__icontains = keywords))
return render_to_response(
template_name,
{
'user': user,
'module': MODULE[random.randint(0, len(MODULE)-1)][0],
'keywords': keywords,
'topics': topics,
},
)
#######################################
def comment(request, topic_id, comment_form = CommentForm):
"""发表评论"""
data = request.POST
topic = Topic.objects.get(id__exact = topic_id)
comment = Comment(id = _md5_key(datetime.datetime.now(), data['author']), topic = topic, ip = request.META['REMOTE_ADDR'])
comment_form = comment_form(data, instance = comment, auto_id = False)
if comment_form.is_valid():
comment.save()
topic.comment_count += 1
topic.save()
#else:
# comment_form = comment_form(auto_id = False)
return HttpResponseRedirect(data['topic_url'])
| [
"ouds.cg@gmail.com"
] | ouds.cg@gmail.com |
3317acbb6b1c8517bfce38ae6b51df5e4c04a897 | ed8126f7a19e4ed71a2a0c3b28f59e9a2787cf47 | /tests/lagrange.py | 4e4620af1e0eff87ca331dea9c6f8cde2753fb09 | [
"Apache-2.0"
] | permissive | acrovato/dg-flo | ccdb5dba706bfbfae76af98fb8aeb96d6a110061 | 759263f80c92984b2c1dada11a09e17235b529ce | refs/heads/main | 2023-02-22T09:29:57.677025 | 2021-01-17T19:02:01 | 2021-01-17T19:02:01 | 313,250,255 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,922 | py | # -*- coding: utf8 -*-
# test encoding: à-é-è-ô-ï-€
# Copyright 2021 Adrien Crovato
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Lagrange shape functions test
# Adrien Crovato
#
# Test the Lagrange shape functions for order p (n = p+1)
import numpy as np
import fe.quadrature as quad
import fe.shapes as shp
import utils.testing as tst
from run import parse
def main():
# Create evaluation and interpolation points
p = 4 # order
x = np.linspace(-1,1,100)
xi = quad.GaussLegendreLobatto(p).x
# Create shape functions
shape = shp.Lagrange(x, xi)
print(shape)
# Store and plot
if parse().gui:
import matplotlib.pyplot as plt
l = np.zeros((shape.n, len(x)))
dl = np.zeros((shape.n, len(x)))
for k in range(len(x)):
l[:, k] = np.transpose(shape.sf[k])
dl[:, k] = shape.dsf[k]
plt.figure(1)
for i in range(shape.n):
plt.plot(x, l[i, :])
plt.plot(xi[i], 0, 'ko')
plt.xlabel('x')
plt.ylabel('N_i')
plt.title('Shape functions of order {:d}'.format(p))
plt.figure(2)
for i in range(shape.n):
plt.plot(x, dl[i, :])
plt.plot(xi[i], 0, 'ko')
plt.xlabel('x')
plt.ylabel('dN_i/dx')
plt.title('Shape function derivatives of order {:d}'.format(p))
plt.show()
if __name__=="__main__":
main()
| [
"39187559+acrovato@users.noreply.github.com"
] | 39187559+acrovato@users.noreply.github.com |
d59708d18019db8809fb41912e4fc24664b28503 | 13548d8c85e3f4b32181f1b54fb20bc81002491d | /lr_model.py | abe3e0ca3aecd0f13fdef9b45bb06135776e4efa | [] | no_license | sjtuprog/classification-models | ed9e5c32538566da3bf031dc4b4319821772b8e0 | 83b61569a26e232d0dec7db74035d1719412597b | refs/heads/master | 2021-07-11T13:56:37.064339 | 2017-10-15T05:09:45 | 2017-10-15T05:09:45 | 106,985,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | from preprocessor.loader import *
import numpy as np
from preprocessor.utils import metrics
from sklearn.linear_model import LogisticRegression, LinearRegression
import os
def data_to_vector(data):
x_word, y = data
x = np.zeros(len(word_to_id))
for w in x_word:
x[w]+=1
return x, y
train, dev, test = load_file('corpus/example_data.json')
dico_words, word_to_id, id_to_word = word_mapping(train)
train_data = prepare_dataset(train, word_to_id)
test_data = prepare_dataset(test, word_to_id)
x_train = []
x_test = []
y_train = []
y_test = []
for t in train_data:
v, y = data_to_vector(t)
x_train.append(v)
y_train.append(y)
for t in test_data:
v, y = data_to_vector(t)
x_test.append(v)
y_test.append(y)
clf = LogisticRegression(C=1.0, dual=False, fit_intercept=True, intercept_scaling=1, class_weight='balanced',penalty='l2',n_jobs=4)
clf.fit(x_train,y_train)
y_predict = clf.predict(x_test)
a,p,r,f,auc = metrics(y_test, y_predict)
print 'Acc:%f, Prec:%f, Reca:%f, F1:%f, AUC:%f' %(a,p,r,f,auc)
| [
"sjtuprog@gmail.com"
] | sjtuprog@gmail.com |
2f62066c180ecaec7d3c36b4eb514313cea1f73a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03605/s666410011.py | ee1b3b217136d88dfc453fa50b5f4c38f78ab5b2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | N=input()
if N.count("9"):
print("Yes")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
74599c94d3dcaa1bce72b0d69b9593c7f982f3b9 | c3b058773f4ee1ab5e8010284c40e8af1d19e5e8 | /trexRun.py | 9ae0b520317d7297e4fee168435d5614d084ca46 | [] | no_license | cmtzco/steem_mm | 5485e5bf5827571c80934c0661c90ce327f5a46e | fcd791d703ce5b20d3bd609316742b638da767a4 | refs/heads/master | 2021-06-17T08:19:03.160676 | 2017-06-11T21:50:59 | 2017-06-11T21:50:59 | 93,909,995 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,735 | py | from gekko import Trex
import config as c
import urllib2
import logging
import random
import time
import sys
#0.47405412 BTC
logging.basicConfig(filename='gekko.log',level=logging.INFO)
RUNNING = True
while RUNNING:
try:
c.lotSize = random.uniform(0.75, 1.25)
b = Trex(c.TrexKey, c.TrexSecret)
orders = b.getOpenOrders()
while RUNNING:
ticker = b.getCoinTicker()
btc = b.getCoinBalance('BTC')
steem = b.getCoinBalance('STEEM')
steemRate = b.getBuyRate(ticker)
try:
orders = b.getOpenOrders()
if b.checkMinBuyAmount(ticker):
bid = b.getBid(ticker)
buy = b.makeBuyOrder(ticker)
btc_balance = b.getCoinBalance('BTC')
steem_balance = b.getCoinBalance('STEEM')
orders = b.getOpenOrders()
print "[INFO][TREX][MM][BUY] ORDERNUM: {}, BALANCES: {} BTC, {} STEEM, TOTAL OPEN ORDERS: {}".format(buy['result']['uuid'],
btc_balance,
steem_balance,
b.getNumOpenOrders(orders))
logging.info("[INFO][TREX][MM][BUY] ORDERNUM: {}, BALANCES: {} BTC, {} STEEM, TOTAL OPEN ORDERS: {}".format(buy['result']['uuid'],
btc_balance,
steem_balance,
b.getNumOpenOrders(orders)))
elif steem > c.trexLotSize:
ask = b.getAsk(ticker)
sell = b.makeSellOrder(ticker)
btc_balance = b.getCoinBalance('BTC')
steem_balance = b.getCoinBalance('STEEM')
orders = b.getOpenOrders()
print "[INFO][TREX][MM][SELL] ORDERNUM: {}, BALANCES: {} BTC, {} STEEM, TOTAL OPEN ORDERS: {}".format(sell['result']['uuid'],
btc_balance,
steem_balance,
b.getNumOpenOrders(orders))
logging.info(
"[INFO][TREX][MM][SELL] ORDERNUM: {}, BALANCES: {} BTC, {} STEEM, TOTAL OPEN ORDERS: {}".format(sell['result']['uuid'],
btc_balance,
steem_balance,
b.getNumOpenOrders(orders)))
# time.sleep(1)
# orders = b.getOpenOrders()
# for order in orders['result']:
# print "[INFO][TREX][MM][CANCEL][ORDER] Cancelled Order: {}".format(b.makeCancelOrder(order['OrderUuid']))
# logging.info("[INFO][TREX][MM][CANCEL][ORDER] Cancelled Order: {}".format(b.makeCancelOrder(order['OrderUuid'])))
else:
highscore = 0
ids = list()
for order in orders['result']:
ticker = b.getCoinTicker()
last = b.getLast(ticker)
furthestOrder = b.getFurthestOrderPercentage(order['limit'], last)
if furthestOrder > highscore:
highscore = furthestOrder
ids.append(order['result']['Uuid'])
print "[INFO][TREX][MM][CANCEL] Cancelling the following order IDs: {}".format(ids)
logging.info("[INFO][TREX][MM][CANCEL] Cancelling the following order IDs: {}".format(ids))
for id in ids:
print "[INFO][TREX][MM][CANCEL][ORDER] Cancelled Order: {}".format(b.makeCancelOrder(id))
logging.info("[INFO][TREX][MM][CANCEL][ORDER] Cancelled Order: {}".format(b.makeCancelOrder(id)))
orders = b.getOpenOrders()
print "[INFO][TREX][MM][ORDERS] Total Orders Open After Cancel:{}".format(b.getNumOpenOrders(orders))
logging.info("[INFO][TREX][MM][ORDERS] Total Orders Open After Cancel:{}".format(b.getNumOpenOrders(orders)))
print "[INFO][TREX][MM][ORDERS] Waiting for opportunity to buy/sell"
except urllib2.HTTPError as e:
print "[ERROR][TREX][MM][WHILE][HTTP] {}".format(e)
logging.error("[ERROR][TREX][MM][WHILE][HTTP] {}".format(e))
time.sleep(20)
continue
except KeyError as e:
print "[ERROR][TREX][MM][WHILE][KEY] {}".format(e)
logging.error("[ERROR][TREX][MM][WHILE][KEY] {}".format(e))
print "[ERROR][TREX][MM][WHILE][ORDERLIMIT]We've hit an order limit, waiting 20s to see if any orders fill{}".format(e)
logging.error("[ERROR][TREX][MM][WHILE][ORDERLIMIT]We've hit an order limit, waiting 20s to see if any orders fill {}".format(e))
time.sleep(20)
pass
except ValueError as e:
print "[ERROR][TREX][MM][WHILE][VALUE] {}".format(e)
logging.error("[ERROR][TREX][MM][WHILE][VALUE] {}".format(e))
pass
except TypeError as e:
print "[ERROR][TREX][MM][WHILE][TYPE] {}".format(e)
logging.error("[ERROR][TREX][MM][WHILE][TYPE] {}".format(e))
pass
except urllib2.HTTPError as e:
print "[ERROR][TREX][MM][MAIN] {}".format(e)
logging.error("[ERROR][TREX][MM][MAIN] {}".format(e))
time.sleep(20)
RUNNING = True
| [
"chris@cmtz.co"
] | chris@cmtz.co |
eca1d0906ca94f9f81f2da44e42483721dc46ee6 | f45295baf6a46bf09669c382270ad1b3213781dc | /Code/check_resources.py | eb4320d69106446508ddad6c1c036012f13128ec | [] | no_license | arjun1886/Ethnicity_detection_DL | 8d2ab1e776f0dda8f8162e8dc334c861cd92145d | c8868089fe13d3eced4bd65d72b93362167e77d1 | refs/heads/master | 2020-12-21T03:24:49.617975 | 2020-01-26T09:50:00 | 2020-01-26T09:50:00 | 236,289,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | __author__ = 'Douglas'
import urllib.request, os, bz2
dlib_facial_landmark_model_url = "http://ufpr.dl.sourceforge.net/project/dclib/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2"
def download_file(url, dest):
file_name = url.split('/')[-1]
u = urllib.request.urlopen(url)
f = open(dest+"/"+file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print("Downloading: %s Size: %s (~%4.2fMB)") % (file_name, file_size, (file_size/1024./1024.))
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if((file_size_dl*100./file_size) % 5 <= 0.01):
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print(status)
f.close()
print("Download complete!")
def extract_bz2(fpath):
print("Extracting...")
new_file = open(fpath[:-4], "wb")
file = bz2.BZ2File(fpath, 'rb')
data = file.read()
new_file.write(data)
new_file.close()
print("Done!")
def check_dlib_landmark_weights():
dlib_models_folder = "dlib_models"
if(not os.path.isdir(dlib_models_folder)):
os.mkdir(dlib_models_folder)
if(not os.path.isfile(dlib_models_folder+"/shape_predictor_68_face_landmarks.dat")):
if(not os.path.isfile(dlib_models_folder+"/shape_predictor_68_face_landmarks.dat.bz2")):
download_file(dlib_facial_landmark_model_url, dlib_models_folder)
extract_bz2(dlib_models_folder+"/shape_predictor_68_face_landmarks.dat.bz2") | [
"arjun.rajesh1886@gmail.com"
] | arjun.rajesh1886@gmail.com |
c241b675292580697fde86a8fafecba8ace59410 | 244856c712aaf0675aab546519a8f0552137f257 | /plantManagement/sensors/urls.py | aa124fb0603d9c1c3a05cdca68bae35eccfc8340 | [] | no_license | Sreepragnav16/Plant_Monitoring_System | 250199db9661cf000ef24c286a310262b9f6e056 | ac3990e1ff222c4f29aac0c7a4c2b1e9fb7300d7 | refs/heads/master | 2020-05-15T06:52:30.504201 | 2019-04-18T17:48:57 | 2019-04-18T17:48:57 | 182,131,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,301 | py | from django.conf.urls import url
from . import views
app_name = 'sensors'
urlpatterns = [
# homepage
url(r'^$', views.index, name='index'),
#temperature
url(r'^temperature/$',views.temperature, name='temperature'),
#humidity
url(r'^humidity/$',views.humidity, name='humidity'),
#overhead tank
url(r'^OHT/$',views.OHT, name='OHT'),
#rain gauge
url(r'^rain/$',views.rain, name='rain'),
#weather station
url(r'^weather/$',views.weather, name='weather'),
#add new reading
url(r'^addreading/$', views.add_reading, name='addreading'),
#display particular plant info
url(r'^display/(?P<pid>[0-9]+)/$', views.display, name='display'),
url(r'^weather/display/(?P<pid>[0-9]+)/$', views.display, name='display'),
#soil moisture
url(r'^display/(?P<pid1>[0-9]+)/sm/(?P<pid>[0-9]+)/',views.sm, name='sm'),
url(r'^weather/display/(?P<pid1>[0-9]+)/sm/(?P<pid>[0-9]+)/',views.sm, name='sm'),
#add new plant
url(r'^addplant/$', views.addplant, name='addplant'),
#demo
url(r'^demo/$', views.demo, name='demo'),
#map
url(r'^map/',views.map, name='map'),
#about us
url(r'^about/$', views.about, name='about'),
# motorControl
url(r'^control/(?P<pid>[0-9]+)/$', views.motorControl, name='motorControl'),
]
| [
"noreply@github.com"
] | Sreepragnav16.noreply@github.com |
822472fe328593a9877481bba85b7d87cd7b60d3 | 642aff81fc7dcf253bd9d714234d6ad0b7f08e5f | /pymaginopolis/chunkyfile/common.py | bc0ea16a27a85a8436f019292d8d166d889a145a | [
"MIT"
] | permissive | frrabelo/pymaginopolis | b4a1d2707eb20eb942bb9c176614ab38136db491 | 022651ab9f6a809c754a5554114f5d1a3eca369b | refs/heads/master | 2023-03-18T01:40:32.747153 | 2021-02-28T03:57:31 | 2021-02-28T03:57:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,180 | py | import struct
from pymaginopolis.chunkyfile import model as model
from pymaginopolis.chunkyfile.model import Endianness, CharacterSet
GRPB_HEADER_SIZE = 20
CHARACTER_SETS = {
model.CharacterSet.ANSI: "latin1",
model.CharacterSet.UTF16LE: "utf-16le"
}
def get_string_size_format(characterset):
# FUTURE: big endian
if characterset == model.CharacterSet.UTF16BE or characterset == model.CharacterSet.UTF16LE:
return "H", 2, 2
else:
return "B", 1, 1
def parse_pascal_string_with_encoding(data):
"""
Read a character set followed by a pascal string
:param data:
:return: tuple containing string, number of bytes consumed and characterset
"""
# Read character set
character_set = struct.unpack("<H", data[0:2])[0]
character_set = model.CharacterSet(character_set)
chunk_name, string_size = parse_pascal_string(character_set, data[2:])
return chunk_name, string_size + 2, character_set
def parse_pascal_string(characterset, data):
"""
Read a Pascal string from a byte array using the given character set.
:param characterset: Character set to use to decode the string
:param data: binary data
:return: tuple containing string and number of bytes consumed
"""
string_size_format, string_size_size, character_size = get_string_size_format(characterset)
if len(data) < string_size_size:
raise FileParseException("String size truncated")
string_size = struct.unpack("<" + string_size_format, data[0:string_size_size])[0] * character_size
string_data = data[string_size_size:string_size_size + string_size]
result = string_data.decode(CHARACTER_SETS[characterset])
total_size = string_size_size + string_size
return result, total_size
def generate_pascal_string(characterset, value):
string_size_format, string_size_size, character_size = get_string_size_format(characterset)
encoded_string = value.encode(CHARACTER_SETS[characterset])
return struct.pack("<" + string_size_format, len(value)) + encoded_string
class FileParseException(Exception):
""" Raised if a problem is found with the chunky file. """
pass
def check_size(expected, actual, desc):
""" Raise an exception if this part of the file is truncated """
if actual < expected:
raise FileParseException("%s truncated: expected 0x%x, got 0x%x" % (desc, expected, actual))
def parse_u24le(data):
""" Parse a 24-bit little endian number """
return data[0] | (data[1] << 8) | (data[2] << 16)
def parse_endianness_and_characterset(data):
check_size(4, len(data), "Endianness/characterset")
endianness, characterset = struct.unpack("<2H", data)
endianness = model.Endianness(endianness)
characterset = model.CharacterSet(characterset)
return endianness, characterset,
def tag_bytes_to_string(tag):
"""
Convert the raw bytes for a tag into a string
:param tag: bytes (eg. b'\x50\x4d\x42\x4d')
:return: tag (eg. "MBMP")
"""
return tag[::-1].decode("ansi").rstrip("\x00")
def parse_grpb_list(data):
"""
Parse a GRPB chunk
:param data: GRPB chunk
:return: tuple containing endianness, characterset, index entry size, item index and item heap
"""
endianness, characterset, index_entry_size, number_of_entries, heap_size, unk1 = struct.unpack("<2H4I", data[
0:GRPB_HEADER_SIZE])
endianness = Endianness(endianness)
characterset = CharacterSet(characterset)
# TODO: figure out what this is
if unk1 != 0xFFFFFFFF:
raise NotImplementedError("can't parse this GRPB because unknown1 isn't 0xFFFFFFFF")
# Read heap
heap = data[GRPB_HEADER_SIZE:GRPB_HEADER_SIZE + heap_size]
# Read index
index_size = index_entry_size * number_of_entries
index_data = data[GRPB_HEADER_SIZE + heap_size:GRPB_HEADER_SIZE + heap_size + index_size]
index_items = [index_data[i * index_entry_size:(i + 1) * index_entry_size] for i in range(0, number_of_entries)]
return endianness, characterset, index_entry_size, index_items, heap
| [
"1490287+benstone@users.noreply.github.com"
] | 1490287+benstone@users.noreply.github.com |
ec9c0cd180f50fb23acae69744788f81a9bfa036 | 8ccf7e6a93256fd83fed2bb7bd4f8bbe13dc1f40 | /Assignment 3. Paxos/Simulation/Agents/Proposer.py | c35f8b2ea5e2ba44032b554a298ca176490310d9 | [
"MIT"
] | permissive | WailAbou/Distributed-Processing | 5e2b84edc86b6d709c2599d82434731c6fd64dd6 | 46a36f1fd51d6f8b35cc639eb8002d81d7e09f2b | refs/heads/main | 2023-05-28T05:52:39.790190 | 2021-06-14T00:57:08 | 2021-06-14T00:57:08 | 367,988,336 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,059 | py | from Simulation.Agents import Agent
from Simulation.Message import Message, MessageTypes
class Proposer(Agent):
max_id = 0
def __init__(self, name, agent_id, value=None):
super().__init__(name, agent_id, value)
self.votes = 0
self.majority = False
self.suggested_value = None
self.consensus = False
Proposer.max_id = max(Proposer.max_id, agent_id + 1)
def recieve_promise(self, message, majority):
if message.source.value:
self.value = max(self.value, message.source.value)
self.votes += 1
if self.votes >= majority and not self.majority:
self.majority = True
return lambda acceptor: Message(message.destination, acceptor, MessageTypes.ACCEPT)
def recieve_accepted(self, message):
self.consensus = True
def init_value(self, value):
self.value = value
self.suggested_value = value
def reset(self):
self.votes = 0
self.majority = False
self.agent_id = Proposer.max_id
| [
"abou.w@hotmail.com"
] | abou.w@hotmail.com |
01094b667d366115bc4a518070a10f4ac74ffa80 | 3423eb1ee4654222fc5b3e83489d4ef571f69308 | /sparkit_me_data_checking/models/vrf_verification_wizard.py | 21f552a9d91e47fcbffc4d4083a198516dfc12da | [] | no_license | janvierb/Sparkit | bd1f8fbd27228d6f0fcab19bddcc060fec40baba | 5db9f11be93bbd5fd379a3c07263f377114f5b2f | refs/heads/master | 2020-09-26T00:16:40.018403 | 2019-12-05T15:39:00 | 2019-12-05T15:39:00 | 226,121,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | # -*- coding: utf-8 -*-
from openerp import models, fields, api
from openerp import exceptions
class sparkit_me_data_checking(models.TransientModel):
_name = 'sparkit.vrf_verification_wizard'
vrf_ids = fields.Many2many('sparkit.vrf', string="Visit Report Forms")
verified = fields.Boolean(string="Visit Report Form Verified and Attendance Information Entered?")
@api.multi
def do_mass_update(self):
self.ensure_one()
# else:
if self.verified:self.vrf_ids.write({'state':'approved'})
return True
@api.multi
def do_reopen_form(self):
self.ensure_one()
return {'type': 'ir.actions.act_window',
'res_model': self._name, # this model
'res_id': self.id, # the current wizard record
'view_type': 'form',
'view_mode': 'form',
'target': 'new'}
@api.multi
def do_populate_tasks(self):
self.ensure_one()
VRF = self.env['sparkit.vrf']
all_vrfs = VRF.search([('state', '!=', 'approved'), ('state', '!=', 'cancelled'), ('m_e_assistant_id', '=', self.env.uid)])
self.vrf_ids = all_vrfs
# reopen wizard form on same wizard record
return self.do_reopen_form()
| [
"janvierb@sparkmicrogrants.org"
] | janvierb@sparkmicrogrants.org |
8e32eb64c0a7d5268003465a2906f21431987605 | c7713ed30e6edd751ccb811ad3fd48de30f94e33 | /WprimeToMuNu_M_2800_TuneCUETP8M1_13TeV_pythia8_cfi.py | e4a5272bebf0fe233280b78d8cc078d67e927a6c | [] | no_license | bdelacruz/usercode | 3be9fa8d3c761754c95a5c891c691dfd4baaa38d | 76cb706731cde5a4cfb0dec68c628ef39dc1408f | refs/heads/master | 2016-09-05T23:59:26.566827 | 2015-03-17T12:22:14 | 2015-03-17T12:22:14 | 32,382,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(0.020),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'Main:timesAllowErrors = 10000',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tauMax = 10',
'Tune:ee 3',
'Tune:pp 5',
'NewGaugeBoson:ffbar2Wprime = on',
'34:m0 = 2800',
'34:onMode = off',
'34:onIfAny = 13,14',
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"begona.delacruz@ciemat.es"
] | begona.delacruz@ciemat.es |
b12c14f2d187174e8f714e4790ec36839780011f | ac5d55e43eb2f1fb8c47d5d2a68336eda181d222 | /Reservoir Sampling/382. Linked List Random Node.py | 535508fa3eecbcc13bfe833e95712b6200c347d5 | [] | no_license | tinkle1129/Leetcode_Solution | 7a68b86faa37a3a8019626e947d86582549374b3 | 1520e1e9bb0c428797a3e5234e5b328110472c20 | refs/heads/master | 2021-01-11T22:06:45.260616 | 2018-05-28T03:10:50 | 2018-05-28T03:10:50 | 78,925,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,831 | py | # - * - coding:utf8 - * - -
###########################################
# Author: Tinkle
# E-mail: shutingnjupt@gmail.com
# Name: Linked List Random Node.py
# Creation Time: 2017/9/24
###########################################
'''
Given a singly linked list, return a random node's value from the linked list. Each node must have the same probability of being chosen.
Follow up:
What if the linked list is extremely large and its length is unknown to you? Could you solve this efficiently without using extra space?
Example:
// Init a singly linked list [1,2,3].
ListNode head = new ListNode(1);
head.next = new ListNode(2);
head.next.next = new ListNode(3);
Solution solution = new Solution(head);
// getRandom() should return either 1, 2, or 3 randomly. Each element should have equal probability of returning.
solution.getRandom();
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
import random
class Solution(object):
def __init__(self, head):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
self.head = head
self.length = 0
ans = head
while (ans):
self.length += 1
ans = ans.next
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
index = random.randint(1, self.length) - 1
idx = 0
ans = self.head
while (idx < index):
ans = ans.next
idx += 1
return ans.val
# Your Solution object will be instantiated and called as such:
# obj = Solution(head)
# param_1 = obj.getRandom() | [
"496047829@qq.com"
] | 496047829@qq.com |
76c78b98b9dca510bcb2a7cf815e747ee72e0281 | 6c5f20372604ade5153f54f55b29926e53f51ede | /CodiciSorgentiMButtu/cap6/myenum/06/test_myenum.py | acd32e2750030161dea599871164eab548d8d073 | [] | no_license | Johnny1809/Esercizi-Python | d38dd102c18134230ed9260f1a0739677b533ccc | f4a4d79d0518f0630a8631ba51591baa0b3ce552 | refs/heads/main | 2023-08-14T10:22:57.487917 | 2021-09-30T16:10:37 | 2021-09-30T16:10:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,390 | py | import unittest
from myenum import *
class TestPasta(unittest.TestCase):
def setUp(self):
class Pasta(MyEnum):
spaghetti = 1
lasagne = 2
tagliatelle = 3
self.Pasta = Pasta
class PastaAlias(MyEnum):
spaghetti = 1
lasagne = 2
tagliatelle = 1
self.PastaAlias = PastaAlias
def test_membersOrder(self):
"""Verifica che i membri siano ordinati secondo l'ordine di definizione."""
self.assertListEqual(['spaghetti', 'lasagne', 'tagliatelle'], list(self.Pasta.__members__))
def test_isInstance(self):
"""Verifica che i membri siano istanze della classe Pasta."""
for member in self.Pasta.__members__.values():
self.assertIsInstance(member, self.Pasta)
def test_memberAttributes(self):
"""Verifica che gli attributi name e value dei membri siano corretti."""
self.assertEqual(self.Pasta.spaghetti.name, 'spaghetti')
self.assertEqual(self.Pasta.spaghetti.value, 1)
def test_noHomonym(self):
"""Verifica che non vi siano membri con lo stesso nome."""
namespace = Namespace({'spaghetti': 1})
self.assertRaises(KeyError, namespace.update, {'spaghetti': 1})
def test_doNotChange(self):
"""Verifica che i membri non possano essere ne' riassegnati ne' cancellati."""
self.assertRaises(AttributeError, setattr, self.Pasta, 'spaghetti', 2)
self.assertRaises(AttributeError, delattr, self.Pasta, 'spaghetti')
def test_aliases(self):
"""Verifica che un membro con stesso valore di uno esistente sia un alias."""
self.assertIs(self.PastaAlias.spaghetti, self.PastaAlias.tagliatelle)
def test_iterable(self):
"""Verifica che le enumerazioni siano oggetti iterabili."""
self.assertCountEqual(self.Pasta.__members__.values(), list(self.Pasta))
def test_aliasAndIterations(self):
"""Verifica che gli alias non compaiano quando si itera sulla enumerazione."""
desired = [self.PastaAlias.spaghetti, self.PastaAlias.lasagne]
self.assertListEqual(desired, list(self.PastaAlias))
def test_getitem(self):
"""Verifica che Pasta['nome_membro'] restituisca il membro."""
self.assertIs(self.Pasta.spaghetti, self.Pasta['spaghetti'])
if __name__ == '__main__':
unittest.main()
| [
"89039573+Johnny1809@users.noreply.github.com"
] | 89039573+Johnny1809@users.noreply.github.com |
dde20ff95398266eb94923d9536dbe91b7e82d0c | 0b385cb36c601e483b77ba06f397c7dd66be9e70 | /day07/part1.py | ddb701bb43adc324b437e34a070ada479cb4cd7a | [] | no_license | Sebastian-/advent-of-code-2019 | 3cdddc8442a58c77e48d6e35e79ab5b7b38ec1d7 | 8adce696553f4c00c09de066ae67eed5e35fa4c0 | refs/heads/master | 2020-09-27T07:57:53.477125 | 2019-12-10T22:17:17 | 2019-12-10T22:17:17 | 226,469,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | import itertools
def getOpCode(i):
return int(str(i)[-2:])
def getParaModes(i):
modes = list(map(lambda x: int(x), str(i)[:-2]))
while len(modes) < 2:
modes.insert(0,0)
return modes
def getOperand(program, addr, mode):
operand = None
try:
operand = program[addr] if mode == 1 else program[program[addr]]
except IndexError:
pass
return operand
def execute(program, inputs):
pc = 0
while True:
op_code = getOpCode(program[pc])
modes = getParaModes(program[pc])
op1 = getOperand(program, pc + 1, modes[-1])
op2 = getOperand(program, pc + 2, modes[-2])
if op_code == 99:
return
# Add
if op_code == 1:
program[program[pc + 3]] = op1 + op2
pc += 4
continue
# Multiply
if op_code == 2:
program[program[pc + 3]] = op1 * op2
pc += 4
continue
# Input
if op_code == 3:
#x = input('Input a single integer: ')
x = inputs.pop(0)
program[program[pc + 1]] = int(x)
pc += 2
continue
# Output
if op_code == 4:
# print(op1)
# pc += 2
# continue
return op1
# Jump if true
if op_code == 5:
if op1 != 0:
pc = op2
else:
pc += 3
continue
# Jump if false
if op_code == 6:
if op1 == 0:
pc = op2
else:
pc += 3
continue
# Less than
if op_code == 7:
program[program[pc + 3]] = 1 if op1 < op2 else 0
pc += 4
continue
# Equals
if op_code == 8:
program[program[pc + 3]] = 1 if op1 == op2 else 0
pc += 4
continue
def execute_sequence(program, inputs):
next_stage = 0
while inputs:
p = program.copy()
next_stage = execute(p, [inputs.pop(0), next_stage])
return next_stage
def main():
with open('input.txt') as program_file:
program = program_file.read().split(',')
program = list(map(lambda x: int(x), program))
print(program)
max_thrust = 0
for perm in itertools.permutations([0,1,2,3,4]):
thrust = execute_sequence(program, list(perm))
max_thrust = max(max_thrust, thrust)
print(max_thrust)
if __name__ == "__main__":
main()
| [
"hmurgu@hotmail.com"
] | hmurgu@hotmail.com |
2ac2a7e1feba3b14b0453aeba17abc1736994421 | 337065b21aead25e5b69b3932d63e667799d2b7d | /liga09/src/liga.py | 5d52fae7ed46d3a7df7597d1e22c1eeeedee6d92 | [] | no_license | jels/ple-ieslosenlaces | 998e982290e56500652e17ec12d353679530dcbe | 92ba27e34ab10dc2018be411829856ad2912adb9 | refs/heads/master | 2021-01-10T21:18:24.635244 | 2010-05-11T09:05:23 | 2010-05-11T09:05:23 | 37,066,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | # -*- encoding: utf-8 -*-
'''
Created on 19/11/2009
@author: dai
'''
from auxliga import *
from auxgrafico import *
# leer datos y preparar matriz
# datos_liga --> matriz con datos del fichero
datos_liga = crea_tabla(r'datos\liga09.csv')
# ej1. imprimir datos_liga: equipo y puntos
# por orden alfab�tico
puntos_equipos(datos_liga)
# ej2. imprimir datos_liga: equipo y puntos
# por orden en tabla de clasificaci�n
datos_liga.sort(ordena_puntos) # ordena matriz
print
print '*' * 20
print
puntos_equipos(datos_liga)
# imprime sólo nombres
nombres_equipos = nombres(datos_liga)
print sorted(nombres_equipos)
grafico(seis_primeros(datos_liga), 'Mejores equipos') | [
"morillas@02d82bf8-8a0b-11dd-8a42-c3ad82d480ce"
] | morillas@02d82bf8-8a0b-11dd-8a42-c3ad82d480ce |
91550b4f5fdd38d817fb48cbdf64b89d252cf433 | a42dc61014a8d81d93a7a3403b94dab0c48e3b4c | /IB/code/option_chain_example_1_tws.py | e5ff27d6a19cc261fe0d3d4fcca97c6693c877bd | [] | no_license | AndSemenoff/andsemenoff.github.io | 2e3ae881dd2ec93dc58f04a12e6b533fd857aca6 | 154ef0cb9f1d304631e90268e443ca9c0b81b696 | refs/heads/master | 2023-08-18T05:00:57.731584 | 2023-08-11T17:47:50 | 2023-08-11T17:47:50 | 41,863,663 | 0 | 2 | null | 2015-11-22T16:04:19 | 2015-09-03T14:16:33 | JavaScript | UTF-8 | Python | false | false | 1,171 | py | from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
from threading import Timer
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId, errorCode, errorString):
print("Error: ", reqId, " ", errorCode, " ", errorString)
def nextValidId(self, orderId):
self.start()
def contractDetails(self, reqId, contractDetails):
print("contractDetails: ", reqId, " ", contractDetails, "\n")
def contractDetailsEnd(self, reqId):
print("\ncontractDetails End\n")
def start(self):
contract = Contract()
contract.symbol = "AAPL"
contract.secType = "OPT"
contract.exchange = "SMART"
contract.currency = "USD"
contract.lastTradeDateOrContractMonth = "202203" # June 2022
self.reqContractDetails(1, contract)
def stop(self):
self.done = True
self.disconnect()
def main():
app = TestApp()
app.nextOrderId = 0
app.connect("127.0.0.1", 7497, 0)
Timer(4, app.stop).start()
app.run()
if __name__ == "__main__":
main() | [
"andsemenoff@yandex.ru"
] | andsemenoff@yandex.ru |
b9003cef7f46933dcddd21d28e39822b4d63acb2 | e3a61e3353b8f20f56fc3adbb3d84ea500f798da | /Code/dummyReduce.py | 38d553d56f27261cbf068d64fbe3004b53d13ec7 | [] | no_license | JamieThomson97/Cloud-Computing | 11522966f26b48a0b4c903c6a7b733fd480e440e | 5fd988e0f8f8e02524cc605943ddb52806e1bac0 | refs/heads/master | 2020-04-03T21:53:50.985201 | 2018-11-21T21:09:51 | 2018-11-21T21:09:51 | 155,585,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | #!/usr/bin/env python
# Takes system input, where each line a Key Value pair of a lexicographically sorted word as the Key, and the actual word as the Value
# Outputs a list of every occurrence of every anagram in the input
import sys
# Dictionary that the Key Value pairs will be added to
anagram_pairs = {}
for line in sys.stdin:
# For every line input, separate the string on the "tab" character
# This will produce a list containing the Key as element 0 and Value as element 1
words = line.split("\t")
# Assign the Key and the Value
key = words[0].strip("\r\n")
value = words[1].strip("\r\n")
# If the Key (word sorted lexicographically) is not in the anagram_pairs dictionary,
# i.e. hasn't appeared in the input yet,
# Add the Key as a new Key in the anagram_pairs dictionary
if key not in anagram_pairs:
anagram_pairs[key] = []
# If the Value is not already in the current Key's values,
# e.g. this anagram of the current word, has not appeared in input yet
# Add the Value to the Key's values
if value not in anagram_pairs[key]:
anagram_pairs[key].append(value)
# For every Key-Values set in anagram_pairs
for i in anagram_pairs:
# If there is at least 2 words in the values, i.e. at least one pair of anagrams
if len(anagram_pairs[i]) > 1:
# Output the set of anagrams for that particular word
print(str(anagram_pairs[i]))
| [
"j.thomson-15@student.lboro.ac.uk"
] | j.thomson-15@student.lboro.ac.uk |
9123cc72f71833fb8b4612137af9c487f54ecee8 | 3b63a9e4b00d69deb44a3e575f385217f936eedc | /prac_06/box_layout_demo.py | e208e4200afb0fa5c576746517b7bb9883327deb | [] | no_license | AbelLim/cp1404Practicals | 1e87ba6a76ea749fc54d51ec8fcc8435842190df | 0ebfa41176050ac8ef7f2ea5e2e33169ada15197 | refs/heads/master | 2020-03-06T22:40:18.154275 | 2018-05-31T10:03:40 | 2018-05-31T10:03:40 | 127,109,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | from kivy.app import App
from kivy.lang import Builder
class BoxLayoutDemo(App):
def build(self):
self.title = "Box Layout Demo"
self.root = Builder.load_file('box_layout.kv')
return self.root
def handle_greet(self):
self.root.ids.output_label.text = "Hello {}".format(self.root.ids.input_name.text)
def handle_clear(self):
self.root.ids.output_label.text = ""
self.root.ids.input_name.text= ""
BoxLayoutDemo().run()
| [
"abel.lim@my.jcu.edu.au"
] | abel.lim@my.jcu.edu.au |
5ada83d5248851904d6558b3dd0fd921087c75a9 | e194614b5dea1a31e32059eaa2f0db2f8c553c63 | /worker.py | 190324061bb13228b03cfd533434d5bc7967509d | [] | no_license | DanielCatz/JobPostReader | 88782dfca05639fbd0ed6d8726877d0228fbcb5f | 3b2bf3d9e90d30916b00a364c0f822fa7364fe07 | refs/heads/master | 2022-12-21T08:52:56.911602 | 2017-08-30T22:54:24 | 2017-08-30T22:54:24 | 101,807,062 | 1 | 0 | null | 2022-12-08T00:35:43 | 2017-08-29T21:09:03 | CSS | UTF-8 | Python | false | false | 313 | py | import os
import redis
from rq import Worker, Queue, Connection
listen = ['default']
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(list(map(Queue, listen)))
worker.work()
| [
"daniel.caterson@gmail.com"
] | daniel.caterson@gmail.com |
41124f0b638323fe0d56147e5d6b6fd13511885f | 2334ce5d9f1a151262ca6822e166ae5074f7e7b8 | /boj_lecture/dp/part1/boj11053.py | 806fa42aa23a6337c459809c32edb39aac068e07 | [] | no_license | passionCodingTest/Injeong | 6c9330360c7ef11d6dc05b1990db7d5b20bf3443 | b812f19b8733bc64e319ad81ee53edaf5290989f | refs/heads/main | 2023-06-22T16:33:22.509163 | 2021-07-27T12:55:31 | 2021-07-27T12:55:31 | 341,564,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | import sys
input = sys.stdin.readline
n = int(input())
req = list(map(int, input().split()))
dp = [1 for _ in range(n)]
for i in range(n):
for j in range(i):
if req[i] > req[j]:
dp[i] = max(dp[i], dp[j] + 1)
print(max(dp)) | [
"injeong410@gmail.com"
] | injeong410@gmail.com |
68e09501a51d712d45387f738b12c0239a752984 | b4777bf27a6d10d0e5b1c51351f9ad14a049b5e7 | /results_discrete_paradigm_acc.py | 1f08f50c522ed31784d9ff4e831821666ace9b7e | [] | no_license | bioelectric-interfaces/cfir | 1216ba1b62935f99f8821ccce2577be9cf71c6b8 | 6034b5216352e5d933405bccbe9a67b9e89c4735 | refs/heads/master | 2022-07-12T10:45:17.758669 | 2020-03-10T13:34:10 | 2020-03-10T13:34:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,356 | py | """
Figure 5: Discrete paradigm accuracy for one subject with median SNR
"""
import pandas as pd
import pylab as plt
import numpy as np
import seaborn as sns
from filters import CFIRBandEnvelopeDetector, RectEnvDetector
from utils import magnitude_spectrum
from constants import FS, DELAY_RANGE
from sklearn.metrics import roc_auc_score, average_precision_score, balanced_accuracy_score
def get_classes(y, alpha, n_states=3):
y_pred = np.zeros(len(y))
if n_states == 3:
y_pred[y > np.percentile(y, alpha)] = 1
y_pred[y > np.percentile(y, 100 - alpha)] = 2
if n_states == 2:
y_pred[y > np.percentile(y, 100 - alpha)] = 1
return y_pred
dataset = 8
eeg_df = pd.read_pickle('data/train_test_data.pkl').query('subj_id=={}'.format(dataset))
envelope = eeg_df['an_signal'].abs().values
band = eeg_df[['band_left', 'band_right']].values[0]
magnitude_spectrum_train = {}
_, weights = magnitude_spectrum(eeg_df['eeg'].values, FS)
stats_df = pd.read_pickle('results/stats.pkl').query('subj_id=={}'.format(dataset))
flatui = {'cfir':'#0099d8', 'acfir': '#84BCDA', 'wcfir':'#FE4A49', 'rect':'#A2A79E'}
alpha=5
#DELAY_RANGE = np.linspace(-50, 100, 51, dtype=int)
acc = np.zeros(len(DELAY_RANGE))
acc_rand = np.zeros(len(DELAY_RANGE))
fig, axes = plt.subplots(2, 2, sharey='col', figsize=(6,6))
plt.subplots_adjust(hspace=0.4, wspace=0.4)
for j_n_states, n_states in enumerate([2, 3]):
y_true = get_classes(envelope, alpha, n_states)
for method_name, method_class in zip(
['cfir', 'rect', 'wcfir'],
[CFIRBandEnvelopeDetector, RectEnvDetector, CFIRBandEnvelopeDetector]):
acc = np.zeros(len(DELAY_RANGE))*np.nan
for d, DELAY in enumerate(DELAY_RANGE):
if method_name == 'rect' and DELAY <0: continue
params = stats_df.query('method=="{}" & metric=="corr" & delay=="{}"'.format(method_name, DELAY*2))['params'].values[0]
params['weights'] = weights if method_name == 'wcfir' else None
env_det = method_class(band=band, fs=FS, delay=DELAY, **params)
envelope_pred = np.abs(env_det.apply(eeg_df['eeg'].values))
# params = stats_df.query('method=="rect" & metric=="corr"')['params'].values[0]
# env_det = WHilbertFilter(band=band, fs=FS, delay=DELAY, **params)
# envelope_pred = np.abs(env_det.apply(eeg_df['eeg'].values))
#
# params = stats_df.query('method=="whilbert" & metric=="corr"')['params'].values[0]
# env_det = WHilbertFilter(band=band, fs=FS, **params)
# envelope_pred = np.abs(env_det.apply(eeg_df['eeg'].values))
#
# params = stats_df.query('method=="ffiltar" & metric=="corr"')['params'].values[0]
# env_det = RectEnvDetector(band, FS, params['n_taps'], DELAY)
# env_det = WHilbertFilter(band=band, fs=FS, **params)
y_pred = get_classes(envelope_pred, alpha, n_states)
acc[d] = balanced_accuracy_score(y_true, y_pred) if (method_name in ['cfir', 'wcfir'] or DELAY>=0) else np.nan
axes[j_n_states, 1].plot(DELAY_RANGE*2, acc*100, '.-', label=method_name, color=flatui[method_name])
axes[j_n_states, 1].plot(DELAY_RANGE*2, DELAY_RANGE*0 + balanced_accuracy_score(y_true, y_true*0)*100, '.-', color='k', label='all-high')
# [ax.set_xlabel('Delay, ms') for ax in axes[:, 1]]
axes[1, 1].set_xlabel('Delay, ms')
axes[1, 1].legend()
axes[0, 1].set_ylabel('Balanced accuracy score, %')
axes[1, 1].set_ylabel('Balanced accuracy score, %')
axes[0, 0].set_title('A. High/Other\n', x = 0)
axes[1, 0].set_title('B. High/Middle/Low\n', ha='right')
[ax.axvline(0, color='k', linestyle='--', alpha=0.5, zorder=-1000) for ax in axes[:, 1]]
# plt.plot(envelope0ms)
# plt.plot(envelope)
#
# sns.kdeplot(envelope, envelope0ms)
# plt.savefig('results/viz/res-classification.png', dpi=500)
ax = axes
# fig, ax = plt.subplots(2, figsize=(6, 6))
up = np.percentile(envelope*1e6, 100-alpha)
low = np.percentile(envelope*1e6, alpha)
t = np.arange(len(envelope))/500
ax[0, 0].plot(t-58, envelope*1e6, color='k')
ax[0, 0].axhline(np.percentile(envelope*1e6, 100-alpha), color='k', linestyle='--')
ax[0, 0].text(8.5, up+4, 'High', ha='center')
ax[0, 0].text(8.5, up-3, 'Other', ha='center')
# plt.axhspan(np.percentile(envelope*1e6, alpha), np.percentile(envelope*1e6, 100-alpha), color=flatui['cfir'], alpha=0.5)
# plt.axhspan(np.percentile(envelope*1e6, alpha), -1000, color=flatui['wcfir'], alpha=0.5)
ax[0, 0].set_ylim(-7, 20)
ax[0, 0].set_xlim(0, 10)
ax[0, 0].set_ylabel('Envelope, $uV$')
ax[1, 0].plot(t-58, envelope*1e6, color='k')
ax[1, 0].axhline(np.percentile(envelope*1e6, 100-alpha), color='k', linestyle='--')
ax[1, 0].axhline(np.percentile(envelope*1e6, alpha), color='k', linestyle='--')
ax[1, 0].text(8.5, up+4, 'High', ha='center')
ax[1, 0].text(8.5, up-3, 'Middle', ha='center')
ax[1, 0].text(8.5, low-5, 'Low', ha='center')
# plt.axhspan(np.percentile(envelope*1e6, alpha), np.percentile(envelope*1e6, 100-alpha), color=flatui['cfir'], alpha=0.5)
# plt.axhspan(np.percentile(envelope*1e6, alpha), -1000, color=flatui['wcfir'], alpha=0.5)
ax[1, 0].set_ylim(-7, 20)
ax[1, 0].set_xlim(0, 10)
ax[1, 0].set_ylabel('Envelope, $uV$')
ax[1, 0].set_xlabel('Time, s')
# plt.savefig('results/viz/res-classification-explained.png', dpi=500) | [
"n.m.smetanin@gmail.com"
] | n.m.smetanin@gmail.com |
7aa41765cd6860e2540b6f799c4551cd82d47f48 | 34148545a20f0b9fe07860d1107e6aab2ec1f75d | /info_spider/Scrapy_History_Hanchao_V1_01/build/lib/Scrapy_History_Hanchao_V1_01/spiders/Zhuixue_01.py | 139bef56439c9928931b6c7045a6f1948b1c9a0b | [] | no_license | tangzhutao/chf | 9bb9fa9b6ad75f1b587364e1005922c5bdddb4ca | 4b249aee9689d3669306bbf020ad7fbb7e6b92bc | refs/heads/master | 2022-12-03T03:55:17.308231 | 2020-08-21T09:57:47 | 2020-08-21T09:57:47 | 288,969,437 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,641 | py | # -*- coding: utf-8 -*-
import scrapy, time, re
from scrapy.utils import request
from Scrapy_History_Hanchao_V1_01.items import InfoItem
import requests
from urllib3 import encode_multipart_formdata
from Scrapy_History_Hanchao_V1_01.ApolloConfig import IMAGES_STORE, SPIDER_NAME, UPLOADURL
class Zhuixue01Spider(scrapy.Spider):
name = 'Zhuixue_01'
base_url = 'http://lishi.zhuixue.net'
url_name = '追学网'
def start_requests(self):
for i in range(3):
url = f'http://lishi.zhuixue.net/hanchao/list_43_{i + 1}.html'
req = scrapy.Request(url=url, callback=self.parse, dont_filter=True)
yield req
def parse(self, response):
get_info = response.xpath('//div[@class="list1"]/li/a/@href').extract()
for info in get_info:
url = self.base_url + info
req = scrapy.Request(url=url, callback=self.detail_parse, dont_filter=True)
news_id = request.request_fingerprint(req)
req.meta.update({'news_id': news_id})
yield req
def detail_parse(self, response):
headers = {}
for k, v in response.request.headers.items():
headers[k.decode()] = v[0].decode()
title = response.xpath('//ul[@class="lisbt"]/li[1]/span/h1/text()').extract_first()
try:
issue_time = re.findall(r'\d+-\d+-\d+ \d+:\d+', response.text)[0].split(' ')[0]
except IndexError:
issue_time = None
content = response.xpath('//ul[@class="lisnr"]').extract_first()
images_url = response.xpath('//ul[@class="lisnr"]//img/@src').extract()
item = InfoItem()
images = []
if images_url:
for image_url in images_url:
if 'http' in image_url:
link = image_url
else:
link = self.base_url + image_url
res = self.download_img(link, headers)
if res['success']:
self.logger.info({'图片下载完成': link})
images.append(res['data']['url'])
else:
self.logger.info({'图片下载失败': link})
item['images'] = ','.join(images) if images else None
item['category'] = '汉朝'
item['content_url'] = response.url
item['title'] = title
item['issue_time'] = issue_time if issue_time else None
item['information_source'] = '历史追学网'
item['sign'] = '19'
item['news_id'] = response.meta['news_id']
item['content'] = content
item['author'] = None
item['title_image'] = None
item['attachments'] = None
item['area'] = None
item['address'] = None
item['tags'] = None
item['update_time'] = str(int(time.time() * 1000))
item['source'] = None
if content:
yield item
self.logger.info({'title': title, 'issue_time': issue_time})
def download_img(self, url, headers):
resp = requests.get(url, headers=headers)
file_name = url.split('/')[-1]
file = {
'file': (file_name, resp.content)
}
send_url = UPLOADURL + SPIDER_NAME
encode_data = encode_multipart_formdata(file)
file_data = encode_data[0]
headers_from_data = {
"Content-Type": encode_data[1]
}
response = requests.post(url=send_url, headers=headers_from_data, data=file_data).json()
return response
if __name__ == '__main__':
from scrapy import cmdline
cmdline.execute(['scrapy', 'crawl', 'Zhuixue_01'])
| [
"18819492919@163.com"
] | 18819492919@163.com |
a5e90c758d9db85ca4fb26d6193b20b07ffc150c | b75c24fe09dfcf2ab544f4209e282c6bd43b0a23 | /salalql/salalql/schema.py | 744ee165d584c34b787666cbc4beef802bb45bde | [] | no_license | Majdi-evet/GraphQL | 8786f5bd00f74f68ac5d7ab083dae0594eaff40f | 936c18bde356238be3094dc8fc0d0e2e018cb657 | refs/heads/master | 2021-06-16T23:16:08.418710 | 2019-09-10T12:22:39 | 2019-09-10T12:22:39 | 207,552,320 | 0 | 0 | null | 2021-06-09T18:25:07 | 2019-09-10T12:23:18 | Python | UTF-8 | Python | false | false | 273 | py | import graphene
import salgql.schema
class Query(salgql.schema.Query, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query)
class Mutation(salgql.schema.Mutation, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query, mutation=Mutation) | [
"majdi.mohammad.git@gmail.com"
] | majdi.mohammad.git@gmail.com |
9f38297ffcb415afd27671f80d18b3c3ccc487db | cb57a9ea4622b94207d12ea90eab9dd5b13e9e29 | /lc/python/1768_merge_strings_alternately.py | 32222174bc34d1567b034641491b8b2e157d8c7a | [] | no_license | boknowswiki/mytraning | b59585e1e255a7a47c2b28bf2e591aef4af2f09a | 5e2f6ceacf5dec8260ce87e9a5f4e28e86ceba7a | refs/heads/master | 2023-08-16T03:28:51.881848 | 2023-08-10T04:28:54 | 2023-08-10T04:28:54 | 124,834,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | # string and two pointers
# time O(max(m,n))
# space O(1)
class Solution:
def mergeAlternately(self, word1: str, word2: str) -> str:
ret = []
n = len(word1)
m = len(word2)
if n == 0:
return word2
if m == 0:
return word1
i, j = 0, 0
idx = 0
while i < n and j < m:
if idx % 2 == 0:
ret.append(word1[i])
i += 1
else:
ret.append(word2[j])
j += 1
idx += 1
if i == n:
ret.extend(list(word2[j:]))
if j == m:
ret.extend(list(word1[i:]))
return "".join(ret)
class Solution(object):
def mergeAlternately(self, word1, word2):
m = len(word1)
n = len(word2)
i = 0
j = 0
result = []
while i < m or j < n:
if i < m:
result += word1[i]
i += 1
if j < n:
result += word2[j]
j += 1
return "".join(result)
class Solution(object):
def mergeAlternately(self, word1, word2):
result = []
n = max(len(word1), len(word2))
for i in range(n):
if i < len(word1):
result += word1[i]
if i < len(word2):
result += word2[i]
return "".join(result)
| [
"noreply@github.com"
] | boknowswiki.noreply@github.com |
4018b0516499f17330c42c31ae7da61a6d32fc32 | 32ecfb8792c3ddbf44263e69d5ab4432f4072a8c | /rootfs_wifi/root/lwm2m.py | 5aafd5aa2fae6cbc7a6b05c66835afa4de422ca6 | [] | no_license | scw-92/151_wifi_rootfs | 314ac7f22fa06e3d348143b26025178870c19f6f | 7575fb33d6623cd2fbee56366671a710770f7db6 | refs/heads/master | 2020-04-17T07:24:20.660142 | 2019-01-18T08:16:06 | 2019-01-18T08:16:06 | 166,367,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,238 | py | # -*- coding: utf-8 -*-
import serial
import os
from time import sleep
class IotLwm2m(object):
"""IotLwm2m 使用方法 """
def __init__(self, name="aplex",serial_name = "/dev/ttyUSB0" ):
self.name = name #公司的名字
self.Power_on_init_list = ["at+miplver?","AT+MIPLCREATE","AT+MIPLADDOBJ=0,3303,2,11,6,1","AT+MIPLADDOBJ=0,3306,1,1,5,0","AT+MIPLOPEN=0,3600,30"] #
self.ack_read_list = [] #记录着OneNet平台下发的read请求的报文编号
self.ack_write_list = [] #记录着OneNet平台下发的read请求的报文编号
self.ack_execture_list = [] #记录着OneNet平台下发的read请求的报文编号
self.ack_look_list = [] #记录着OneNet平台下发的read请求的报文编号
self.serial_name = serial_name
self.serial = ""
def __str__(self):
return "%s 公司的提供的iot通过lwm2m协议接入Onenet的方法" % (self.name, )
def power_iot(self):
#os.system('echo 19 > /sys/class/gpio/export')
#os.system('echo out > /sys/class/gpio/gpio19/direction')
os.system('echo 1 > /sys/class/gpio/gpio19/value')
os.system('sleep 1')
os.system('echo 0 > /sys/class/gpio/gpio19/value')
sleep (2)
os.system('echo 1 > /sys/class/gpio/gpio19/value')
os.system('sleep 1')
os.system('echo 0 > /sys/class/gpio/gpio19/value')
sleep(8)
def setup_serial(self,speed = 9600,readtimeout = 1):
self.serial = serial.Serial(self.serial_name, speed,timeout = readtimeout)
if self.serial.isOpen():
print("open success")
else:
print("open failed")
def auto_connect(self):
for list in self.Power_on_init_list:
cmd_iot = list + "\r\n"
self.serial.write(cmd_iot.encode())
data = self.serial.read_all().decode()
print (data)
sleep (1)
def ack_iot(self): #iot终端向onenet平台的回复信息
sleep(0.5)
data = self.serial.read_all().decode()
send_data = ""
#print("recv:"+data)
#ack_data[1]表示当前来自云平台的报文id
if "+MIPLOBSERVE:" in data: #询问有没有这个实例
ack_data = data.split(',')
#print(ack_data)
send_data = "AT+MIPLOBSERVERSP=0,%s,1\r\n" % (ack_data[1],)
self.serial.write(send_data.encode())
sleep(0.5)
elif "+MIPLDISCOVER:" in data: #询问类型的成员
ack_data = data.split(',')
#'+MIPLDISCOVER: 0',
#'61245',
#'3303\r\n'
ack_data[2] = ack_data[2][0:4]
#print(ack_data)
#在这里根据对象类型的文档将对象类型的结构提前定义好,这里以3303对象类型为例
if ack_data[2] == "3303":
send_data = 'AT+MIPLDISCOVERRSP=0,%s,1,34,"5700;5701;5601;5602;5603;5604;5605"\r\n' % (ack_data[1],)
elif ack_data[2] == "3306":
send_data = 'AT+MIPLDISCOVERRSP=0,%s,1,24,"5850;5851;5852;5853;5750"\r\n' % (ack_data[1],)
#print(send_data)
self.serial.write(send_data.encode())
sleep(0.5)
elif "+MIPLREAD:" in data: #通知读取结果
print("recv:"+data)
ack_data = data.split(',')
#recv:+MIPLREAD: 0,
#4932,
#3303,
#0,
#5700
print(ack_data)
ack_data[4] = ack_data[4][0:4]
print (ack_data[2],ack_data[4])
if ack_data[2] == "3303" :
if ack_data[4] == "5700" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,4,4,20.123,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
elif ack_data[4] == "5701" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,1,5,aplex,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
elif ack_data[4] == "5601" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,4,4,20.135,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
elif ack_data[4] == "5602" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,4,4,80.123,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
elif ack_data[4] == "5603" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,4,4,44.55,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
elif ack_data[4] == "5604" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,4,4,55.66,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
elif ack_data[4] == "5605" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,2,3,zwd,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
if ack_data[2] == "3306" :
if ack_data[4] == "5853" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,1,5,aplex,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
self.serial.write(send_data.encode())
sleep(0.5)
elif "+MIPLWRITERSP:" in data: #通知写入的消息结果
print("recv:"+data)
ack_data = data.split(',')
send_data = 'AT+MIPLREADRSP=0,%s,1,3303,0,5700,4,4,20.123,0,0' % (ack_data[1],)
print(send_data)
self.serial.write(send_data.encode())
elif "+MIPLEXECUTERSP:" in data: #通知执行操作果
print("recv:"+data)
ack_data = data.split(',')
send_data = 'AT+MIPLREADRSP=0,%s,1,3303,0,5700,4,4,20.123,0,0' % (ack_data[1],)
print(send_data)
self.serial.write(send_data.encode())
elif "+MIPLOBSERVERSP:" in data: #通知观测指令是否有效
print("recv:"+data)
ack_data = data.split(',')
send_data = 'AT+MIPLREADRSP=0,%s,1,3303,0,5700,4,4,20.123,0,0' % (ack_data[1],)
print(send_data)
self.serial.write(send_data.encode())
else:
print(data)
#上报云平台对象的内部结构
'''
data = serial.read_all().decode()
print(observe_list[0])
send_data = "AT+MIPLNOTIFY=0,%s,3303,0,5700,4,2,34,0,0,0\r\n" % (observe_list[0],)
print(send_data)
serial.write(send_data.encode())
data = serial.read_all().decode()
'''
if __name__ == '__main__':
iot_lwm2m = IotLwm2m()
iot_lwm2m.power_iot()
iot_lwm2m.setup_serial()
iot_lwm2m.auto_connect()
while True:
iot_lwm2m.ack_iot() | [
"1142344150@qq.com"
] | 1142344150@qq.com |
ca674d56b645b5721ff9210287a3026a3c86b84d | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2801/58758/256072.py | 829cc7621c561a24efea43b99bb9b2ba608d94f2 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | n = int(input())
nums = [int(x) for x in input().split()]
nums.sort()
flag = False
for i in range(0, len(nums)-2):
if nums[i] + nums[i+1] > nums[i+2]:
flag = True
break
if flag:
print('YES')
else:
print('NO')
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
4db543d1def850a08bae32fd819afa667f24aa08 | 31ac8f22185155729c66f0197ad619291a75c577 | /physics.py | 9a25fd307452791a1aebf3d4d4e518d1278c6462 | [] | no_license | RebeccaEEMartin/hackathongame | 9408dcba178104adc92a86e679e864ca21346698 | a02bf47b30d3040d8dcdc3517215283e4c6dffb1 | refs/heads/master | 2021-07-22T05:20:10.744194 | 2017-10-29T13:45:46 | 2017-10-29T13:45:46 | 108,660,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,217 | py | from math import pi
import random
import pygame
import PyParticles
(width, height) = (400, 400)
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Springs')
universe = PyParticles.Environment((width, height))
universe.colour = (255,255,255)
universe.addFunctions(['move', 'bounce', 'collide', 'drag', 'accelerate'])
universe.acceleration = (pi, 0.01)
universe.mass_of_air = 0.02
universe.addParticles(mass=100, size=16, speed=2, elasticity=1, colour=(20,40,200), fixed=True, x=175, y=100)
universe.addParticles(mass=100, size=16, speed=2, elasticity=1, colour=(20,40,200), fixed=False)
universe.addParticles(mass=100, size=16, speed=2, elasticity=1, colour=(20,40,200), fixed=False)
universe.addParticles(mass=100, size=16, speed=2, elasticity=1, colour=(20,40,200), fixed=True, x=225, y=100)
universe.addParticles(mass=100, size=16, speed=2, elasticity=1, colour=(20,40,200), fixed=False)
universe.addParticles(mass=100, size=16, speed=2, elasticity=1, colour=(20,40,200), fixed=False)
universe.addSpring(0,1, length=50, strength=1)
universe.addSpring(1,2, length=50, strength=1)
universe.addSpring(3,4, length=50, strength=1)
universe.addSpring(4,5, length=50, strength=1)
selected_particle = None
paused = False
running = True
while running:
#print pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
paused = (True, False)[paused]
elif event.type == pygame.MOUSEBUTTONDOWN:
selected_particle = universe.findParticle(pygame.mouse.get_pos())
elif event.type == pygame.MOUSEBUTTONUP:
selected_particle = None
if selected_particle:
selected_particle.mouseMove(pygame.mouse.get_pos())
if not paused:
universe.update()
screen.fill(universe.colour)
for p in universe.particles:
pygame.draw.circle(screen, p.colour, (int(p.x), int(p.y)), p.size, 0)
for s in universe.springs:
pygame.draw.aaline(screen, (0,0,0), (int(s.p1.x), int(s.p1.y)), (int(s.p2.x), int(s.p2.y)))
pygame.display.flip() | [
"kelvinfowler168@gmail.com"
] | kelvinfowler168@gmail.com |
19a7c46c69e57295cfca3ac8ae09ffd075ac82a6 | c005eb04da66147c2e7e7de7e5d106ad6bb114c2 | /codes/exercise.py | a30dad2f78d8fd1cb7b1044806fbc1096b114586 | [] | no_license | maydaycha/thesis | 9bc9875599827ab421f6dc9349cb9f698161115b | 2a5b2c33d8c4b0dc18bf18a846c5b291b4d1fa11 | refs/heads/master | 2021-05-03T10:02:23.620563 | 2015-07-23T18:16:08 | 2015-07-23T18:16:08 | 32,448,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
print "fig_num: %d" % fig_num
print "kernel" + kernel
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| [
"maydaychaaaa@gmail.com"
] | maydaychaaaa@gmail.com |
a671c11a8cb33a56a991823ae0f4142848d9c74a | 003718246d9bf247b76242ce419adf430f9da3f6 | /VMtranslator/CodeWriter.py | 43152f7c3f48137d04f2e145fee0d9b1a968a199 | [] | no_license | talashaked/Nand | 029a3ecca29df2e2aeb49b62bca5fc84f09288d7 | 1b7d37deb024939e27be86617b942f55169ec743 | refs/heads/main | 2023-03-26T06:30:10.265729 | 2021-03-24T10:45:03 | 2021-03-24T10:45:03 | 351,038,856 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,751 | py | import os
class CodeWriter:
def __init__(self, ofileStr):
self.ofile = open(ofileStr,'w')
self.ArithmeticCommand = ['add', 'sub', 'neg', 'eq', 'gt', 'lt', 'and', 'or', 'not']
self.count =0 ## for the labels in the arithmetic functions
self.countCall = 0
self.curFuncName = ""
def setFileName(self, fileName):
"""
writes the current filename in the output file
:param fileName:
:return:
"""
self.ofile.write("//"+fileName+"\n")
self.cur_fileName = os.path.basename(fileName).split(".")[0]
def writeArithmetic(self, s):
"""
writes the matching arithmetic command in the output
:param s: the arithmetic command given
:return:
"""
count = self.count
if s == self.ArithmeticCommand[0]:
self.ofile.write("@SP\nA=M\nA=A-1\nD=M\nA=A-1\nM=D+M\n@SP\nM=M-1\n")
elif s == self.ArithmeticCommand[1]:
self.ofile.write("@SP\nA=M\nA=A-1\nD=M\nA=A-1\nM=M-D\n@SP\nM=M-1\n")
elif s==self.ArithmeticCommand[2]:
self.ofile.write("@SP\nA=M\nA=A-1\nM=-M\n")
elif s==self.ArithmeticCommand[3]:
self.ofile.write("@SP\nA=M-1\nA=A-1\nD=M\n@FIRSTNONNEG"+str(count)+"\nD;JGE\n@FIRSTNEG"+str(count)+"\n0;JMP\n(FIRSTNONNEG"+str(count)+")\n"
"@SP\nA=M-1\nD=M\n@SAMESIGN"+str(count)+"\nD;JGE\n@SECONDNEGFIRSTNONNEG"+str(count)+"\n0;JMP\n(FIRSTNEG"+str(count)+")\n@SP\n"
"A=M-1\nD=M\n@SECONDNONNEGFIRSTNEG"+str(count)+"\nD;JGE\n@SAMESIGN"+str(count)+"\n0;JMP\n(SAMESIGN"+str(count)+")\n@SP\nA=M-1\n"
"D=M\nA=A-1\nD=M-D\n@TEMP\nM=-1\n@FINISH"+str(count)+"\nD;JEQ\n@TEMP\nM=0\n@FINISH"+str(count)+"\n0;JMP\n"
"(SECONDNEGFIRSTNONNEG"+str(count)+")\n@TEMP\nM=0\n@FINISH"+str(count)+"\n0;JMP\n(SECONDNONNEGFIRSTNEG"+str(count)+")\n@TEMP\nM=0\n"
"@FINISH"+str(count)+"\n0;JMP\n(FINISH"+str(count)+")\n@TEMP\nD=M\n@SP\nA=M\nA=A-1\nA=A-1\nM=D\n@SP\nM=M-1\n")
elif s==self.ArithmeticCommand[4]:
self.ofile.write("@SP\nA=M-1\nA=A-1\nD=M\n@FIRSTNONNEG"+str(count)+"\nD;JGE\n@FIRSTNEG"+str(count)+"\n0;JMP\n(FIRSTNONNEG"+str(count)+")\n"
"@SP\nA=M-1\nD=M\n@SAMESIGN"+str(count)+"\nD;JGE\n@SECONDNEGFIRSTNONNEG"+str(count)+"\n0;JMP\n(FIRSTNEG"+str(count)+")\n@SP\n"
"A=M-1\nD=M\n@SECONDNONNEGFIRSTNEG"+str(count)+"\nD;JGE\n@SAMESIGN"+str(count)+"\n0;JMP\n(SAMESIGN"+str(count)+")\n@SP\nA=M-1\n"
"D=M\nA=A-1\nD=M-D\n@TEMP\nM=-1\n@FINISH"+str(count)+"\nD;JGT\n@TEMP\nM=0\n@FINISH"+str(count)+"\n0;JMP\n"
"(SECONDNEGFIRSTNONNEG"+str(count)+")\n@TEMP\nM=-1\n@FINISH"+str(count)+"\n0;JMP\n(SECONDNONNEGFIRSTNEG"+str(count)+")\n@TEMP\nM=0\n"
"@FINISH"+str(count)+"\n0;JMP\n(FINISH"+str(count)+")\n@TEMP\nD=M\n@SP\nA=M\nA=A-1\nA=A-1\nM=D\n@SP\nM=M-1\n")
elif s == self.ArithmeticCommand[5]:
self.ofile.write("@SP\nA=M-1\nA=A-1\nD=M\n@FIRSTNONNEG"+str(count)+"\nD;JGE\n@FIRSTNEG"+str(count)+"\n0;JMP\n(FIRSTNONNEG"+str(count)+")\n"
"@SP\nA=M-1\nD=M\n@SAMESIGN"+str(count)+"\nD;JGE\n@SECONDNEGFIRSTNONNEG"+str(count)+"\n0;JMP\n(FIRSTNEG"+str(count)+")\n@SP\n"
"A=M-1\nD=M\n@SECONDNONNEGFIRSTNEG"+str(count)+"\nD;JGE\n@SAMESIGN"+str(count)+"\n0;JMP\n(SAMESIGN"+str(count)+")\n@SP\nA=M-1\n"
"D=M\nA=A-1\nD=M-D\n@TEMP\nM=-1\n@FINISH"+str(count)+"\nD;JLT\n@TEMP\nM=0\n@FINISH"+str(count)+"\n0;JMP\n"
"(SECONDNEGFIRSTNONNEG"+str(count)+")\n@TEMP\nM=0\n@FINISH"+str(count)+"\n0;JMP\n(SECONDNONNEGFIRSTNEG"+str(count)+")\n@TEMP\nM=-1\n"
"@FINISH"+str(count)+"\n0;JMP\n(FINISH"+str(count)+")\n@TEMP\nD=M\n@SP\nA=M\nA=A-1\nA=A-1\nM=D\n@SP\nM=M-1\n")
elif s==self.ArithmeticCommand[6]:
self.ofile.write("@SP\nA=M\nA=A-1\nD=M\nA=A-1\nM=D&M\n@SP\nM=M-1\n")
elif s ==self.ArithmeticCommand[7]:
self.ofile.write("@SP\nA=M\nA=A-1\nD=M\nA=A-1\nM=D|M\n@SP\nM=M-1\n")
elif s==self.ArithmeticCommand[8]:
self.ofile.write("@SP\nA=M\nA=A-1\nM=!M\n")
self.count+=1
def WritePushPop(self,command, segment, index):
"""
if the commmand is push or pop, this function is called and writes the code
:param command: push/pop
:param segment: where to or from where
:param index: which index of the segment
:return:
"""
if command == 'C_PUSH':
self.push(segment,index)
elif command == 'C_POP' and segment != 'const':
self.pop(segment, index)
def pop(self,segment, index):
"""
writes the push command to the output file
:param segment: where to or from where
:param index: which index of the segment
:return:
"""
if segment =='static':
self.ofile.write("@SP\nA=M-1\nD=M\n@" + str(self.cur_fileName)+"."+str(index)+"\nM=D\n@SP\nM=M-1\n")
elif segment == 'local':
self.ofile.write("@"+str(index)+"\nD=A\n@LCL\nD=M+D\n@TMP\nM=D\n@SP\nA=M-1\nD=M\n@TMP\nA=M\nM=D\n@SP\nM=M-1\n")
elif segment == 'argument':
self.ofile.write("@"+str(index)+"\nD=A\n@ARG\nD=M+D\n@TMP\nM=D\n@SP\nA=M-1\nD=M\n@TMP\nA=M\nM=D\n@SP\nM=M-1\n")
elif segment == 'this':
self.ofile.write("@"+str(index)+"\nD=A\n@THIS\nD=M+D\n@TMP\nM=D\n@SP\nA=M-1\nD=M\n@TMP\nA=M\nM=D\n@SP\nM=M-1\n")
elif segment == 'that':
self.ofile.write("@"+str(index)+"\nD=A\n@THAT\nD=M+D\n@TMP\nM=D\n@SP\nA=M-1\nD=M\n@TMP\nA=M\nM=D\n@SP\nM=M-1\n")
elif segment == 'temp':
mem = int(index) + 5
self.ofile.write("@SP\nA=M-1\nD=M\n@+" + str(mem) + "\nM=D\n@SP\nM=M-1\n")
elif segment == 'pointer':
mem = int(index) + 3
self.ofile.write("@SP\nA=M-1\nD=M\n@+" + str(mem) + "\nM=D\n@SP\nM=M-1\n")
def push(self,segment, index):
"""
writes the push command to the output file
:param segment: where to or from where
:param index: which index of the segment
:return:
"""
if segment == 'static':
self.ofile.write("@" + str(self.cur_fileName)+"."+str(index)+"\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'constant':
self.ofile.write("@" + str(index) + "\nD=A\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'local':
self.ofile.write("@" + str(index) + "\nD=A\n@LCL\nA=M+D\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'argument':
self.ofile.write("@" + str(index) + "\nD=A\n@ARG\nA=M+D\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'this':
self.ofile.write("@" + str(index) + "\nD=A\n@THIS\nA=M+D\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'that':
self.ofile.write("@" + str(index) + "\nD=A\n@THAT\nA=M+D\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'temp':
mem = int(index) + 5
self.ofile.write("@" + str(mem) + "\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'pointer':
mem = int(index) + 3
self.ofile.write("@" + str(mem) + "\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
def writeGoto(self, label):
"""
writes the goto command
:param label: the lebel to jump to
:return:
"""
self.ofile.write("@" +self.curFuncName+"$"+label + "\n0;JMP\n")
def writeIf(self,label):
"""
writes the if command
:param label: the label to jump to
:return:
"""
self.ofile.write("@SP\nM=M-1\nA=M\nD=M\n@" +self.curFuncName+"$"+label + "\nD;JNE\n")
def writeLabel(self, label):
"""
writes the label command
:param label: the label itself
:return:
"""
self.ofile.write("("+self.curFuncName+"$"+label +")\n")
def writeInit(self):
"""
writes the bootstrap code
:return:
"""
self.ofile.write("@256\nD=A\n@SP\nM=D\n")
self.writeCall("Sys.init","0")
def writeCall(self, functionName, numArgs):
"""
writes the call command to the output file
:param functionName: the func name
:param numArgs: number of args the function expects to get
:return:
"""
self.ofile.write("@return$"+functionName+"."+str(self.countCall)+"\nD=A\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
self.ofile.write("@LCL\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
self.ofile.write("@ARG\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
self.ofile.write("@THIS\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
self.ofile.write("@THAT\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
i = int(numArgs)+5
self.ofile.write("@"+str(i)+"\nD=A\n@SP\nD=M-D\n@ARG\nM=D\n")
self.ofile.write("@SP\nD=M\n@LCL\nM=D\n")
self.ofile.write("@"+functionName+"\n0;JMP\n")
self.ofile.write("(return$"+functionName+"."+str(self.countCall)+")\n")
self.countCall += 1
def writeReturn(self):
"""
writes the commands that should be written while 'return' is invoked
:return:
"""
self.ofile.write("@LCL\nD=M\n@FRAME\nM=D\n")
self.ofile.write("@5\nD=A\n@FRAME\nD=M-D\nA=D\nD=M\n@RET\nM=D\n")
self.ofile.write("@SP\nM=M-1\n@SP\nA=M\nD=M\n@ARG\nA=M\nM=D\n")
self.ofile.write("@ARG\nD=M+1\n@SP\nM=D\n")
self.ofile.write("@1\nD=A\n@FRAME\nD=M-D\nA=D\nD=M\n@THAT\nM=D\n")
self.ofile.write("@2\nD=A\n@FRAME\nD=M-D\nA=D\nD=M\n@THIS\nM=D\n")
self.ofile.write("@3\nD=A\n@FRAME\nD=M-D\nA=D\nD=M\n@ARG\nM=D\n")
self.ofile.write("@4\nD=A\n@FRAME\nD=M-D\nA=D\nD=M\n@LCL\nM=D\n")
self.ofile.write("@RET\nA=M\n0;JMP\n")
def writeFunction(self, f, k):
"""
writes the function command when a function label is shown
:param f: the function name
:param k: number of parameters the function expects to get
:return:
"""
self.ofile.write("(" + f+ ")\n")
self.curFuncName=f
for i in range(int(k)):
self.push('constant', 0)
def close(self):
"""
closes the main output file
:return:
"""
self.ofile.close()
| [
"noreply@github.com"
] | talashaked.noreply@github.com |
a930b53c0f8ebd9f8fefa2ec7b113c3b4b1fd605 | 152782c6c30fd7723204e1458546f8bc56a4f04c | /nvtabular/loader/tensorflow.py | 479f6ecc57998c12671a286700f1730d36e95563 | [
"Apache-2.0"
] | permissive | yingcanw/NVTabular | c09a6cecb84d97be094ad8ecbba3c9331cc03bb9 | 372e3bb1c8057aa497f8971466642170630571a4 | refs/heads/main | 2023-03-30T23:49:42.102664 | 2021-03-24T23:06:32 | 2021-03-24T23:06:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,674 | py | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import os
import tensorflow as tf
from nvtabular.io.dataset import Dataset
from nvtabular.loader.backend import DataLoader
from nvtabular.loader.tf_utils import configure_tensorflow, get_dataset_schema_from_feature_columns
from nvtabular.ops import _get_embedding_order
from_dlpack = configure_tensorflow()
def _validate_dataset(paths_or_dataset, batch_size, buffer_size, engine, reader_kwargs):
# TODO: put this in parent class and allow
# torch dataset to leverage as well?
# if a dataset was passed, just return it
if isinstance(paths_or_dataset, Dataset):
return paths_or_dataset
# otherwise initialize a dataset
# from paths or glob pattern
if isinstance(paths_or_dataset, str):
files = tf.io.gfile.glob(paths_or_dataset)
_is_empty_msg = "Couldn't find file pattern {} in directory {}".format(
*os.path.split(paths_or_dataset)
)
else:
# TODO: some checking around attribute
# error here?
files = list(paths_or_dataset)
_is_empty_msg = "paths_or_dataset list must contain at least one filename"
assert isinstance(files, list)
if len(files) == 0:
raise ValueError(_is_empty_msg)
# implement buffer size logic
# TODO: IMPORTANT
# should we divide everything by 3 to account
# for extra copies laying around due to asynchronicity?
reader_kwargs = reader_kwargs or {}
if buffer_size >= 1:
if buffer_size < batch_size:
reader_kwargs["batch_size"] = int(batch_size * buffer_size)
else:
reader_kwargs["batch_size"] = buffer_size
else:
reader_kwargs["part_mem_fraction"] = buffer_size
return Dataset(files, engine=engine, **reader_kwargs)
def _validate_schema(feature_columns, cat_names, cont_names):
_uses_feature_columns = feature_columns is not None
_uses_explicit_schema = (cat_names is not None) or (cont_names is not None)
if _uses_feature_columns and _uses_explicit_schema:
raise ValueError(
"Passed `feature_column`s and explicit column names, must be one or the other"
)
elif _uses_feature_columns:
return get_dataset_schema_from_feature_columns(feature_columns)
elif _uses_explicit_schema:
cat_names = cat_names or []
cont_names = cont_names or []
return cat_names, cont_names
else:
raise ValueError(
"Must either pass a list of TensorFlow `feature_column`s "
"or explicit `cat_name` and `cont_name` column name lists."
)
class KerasSequenceLoader(tf.keras.utils.Sequence, DataLoader):
"""
Infinite generator used to asynchronously iterate through CSV or Parquet
dataframes on GPU by leveraging an NVTabular `Dataset`. Applies preprocessing
via NVTabular `Workflow` objects and outputs tabular dictionaries of TensorFlow
Tensors via `dlpack <https://github.com/dmlc/dlpack>`_. Useful for training tabular models
built in Keras and trained via
`tf.keras.Model.fit <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`_.
The data loading scheme is implemented by loading, preprocessing, and
batching data in an asynchronous thread. The amount of randomness in
shuffling is controlled by the `buffer_size` and `parts_per_chunk`
kwargs. At load time, sub-chunks of data with size controlled by
`buffer_size` are loaded from random partitions in the dataset,
and `parts_per_chunk` of them are concatenated into a single chunk,
shuffled, and split into batches. This means that each chunk has
`buffer_size*parts_per_chunk` rows, and due to the asynchronous
nature of the dataloader that means there are, including the batch
being processed by your network, `3*buffer_size*parts_per_chunk`
rows of data in GPU memory at any given time. This means that
for a fixed memory budget, using more `parts_per_chunk` will
come at the expense of smaller `buffer_size`, increasing the number
of reads and reducing throughput. The goal should be to maximize the
total amount of memory utilized at once without going OOM and with
the fewest number of reads to meet your epoch-level randomness needs.
An important thing to note is that TensorFlow's default behavior
is to claim all GPU memory for itself at initialziation time,
which leaves none for NVTabular to load or preprocess data.
As such, we attempt to configure TensorFlow to restrict
its memory allocation on a given GPU using the environment variables
`TF_MEMORY_ALLOCATION` and `TF_VISIBLE_DEVICE`. If `TF_MEMORY_ALLOCATION < 1`,
it will be assumed that this refers to a fraction of free GPU
memory on the given device. Otherwise, it will refer to an explicit
allocation amount in MB. `TF_VISIBLE_DEVICE` should be an integer GPU
index.
Iterator output is of the form `(dict(features), list(labels))`,
where each element of the features dict is a
`feature_name: feature_tensor` and each elemtn of the labels
list is a tensor, and all tensors are of shape `(batch_size, 1)`.
Note that this means vectorized continuous and multi-hot categorical
features are not currently supported.
The underlying NVTabular `Dataset` object is stored in the `data`
attribute, and should be used for updating NVTabular `Workflow`
statistics::
workflow = nvt.Workflow(...)
dataset = KerasSequenceLoader(...)
workflow.update_stats(dataset.data.to_iter(), record_stats=True)
Parameters
-------------
- paths_or_dataset: str or list(str)
Either a string representing a file pattern (see `tf.glob` for
pattern rules), a list of filenames to be iterated through, or
a Dataset object, in which case `buffer_size`, `engine`, and
`reader_kwargs` will be ignored
- batch_size: int
Number of samples to yield at each iteration
- label_names: list(str)
Column name of the target variable in the dataframe specified by
`paths_or_dataset`
- feature_columns: list(tf.feature_column) or None
A list of TensorFlow feature columns representing the inputs
exposed to the model to be trained. Columns with parent columns
will climb the parent tree, and the names of the columns in the
unique set of terminal columns will be used as the column names.
If left as None, must specify `cat_names` and `cont_names`
- cat_names: list(str) or None
List of categorical column names. Ignored if `feature_columns` is
specified
- cont_names: list(str) or None
List of continuous column names. Ignored if `feature_columns` is
specified
- engine: {'csv', 'parquet', None}, default None
String specifying the type of read engine to use. If left as `None`,
will try to infer the engine type from the file extension.
- shuffle: bool, default True
Whether to shuffle chunks of batches before iterating through them.
- buffer_size: float or int
If `0 < buffer_size < 1`, `buffer_size` will refer to the fraction of
total GPU memory to occupy with a buffered chunk. If `1 < buffer_size <
batch_size`, the number of rows read for a buffered chunk will
be equal to `int(buffer_size*batch_size)`. Otherwise, if `buffer_size >
batch_size`, `buffer_size` rows will be read in each chunk (except for
the last chunk in a dataset, which will, in general, be smaller).
Larger chunk sizes will lead to more efficieny and randomness,
but require more memory.
- devices: None
Which GPU devices to load from. Ignored for now
- parts_per_chunk: int
Number of dataset partitions with size dictated by `buffer_size`
to load and concatenate asynchronously. More partitions leads to
better epoch-level randomness but can negatively impact throughput
- reader_kwargs: dict
extra kwargs to pass when instantiating the underlying
`nvtabular.Dataset`
"""
_use_nnz = True
def __init__(
self,
paths_or_dataset,
batch_size,
label_names,
feature_columns=None,
cat_names=None,
cont_names=None,
engine=None,
shuffle=True,
buffer_size=0.1,
devices=None,
parts_per_chunk=1,
reader_kwargs=None,
):
dataset = _validate_dataset(
paths_or_dataset, batch_size, buffer_size, engine, reader_kwargs
)
cat_names, cont_names = _validate_schema(feature_columns, cat_names, cont_names)
# sort the ccolumns to avoid getting incorrect output
# (https://github.com/NVIDIA/NVTabular/issues/412)
cat_names = _get_embedding_order(cat_names)
cont_names = _get_embedding_order(cont_names)
assert devices is None or len(devices) == 1 # TODO: figure out multi-gpu support
devices = devices or [0]
DataLoader.__init__(
self,
dataset,
cat_names,
cont_names,
label_names,
batch_size,
shuffle,
parts_per_chunk=parts_per_chunk,
devices=devices,
)
def __len__(self):
"""
recreating since otherwise Keras yells at you
"""
# TODO: what's a better way to do this inheritance
# of the appropriate methods? A Metaclass?
return DataLoader.__len__(self)
def __getitem__(self, idx):
"""
implemented exclusively for consistency
with Keras model.fit. Does not leverage
passed idx in any way
"""
try:
return DataLoader.__next__(self)
except StopIteration:
# TODO: I would like to do a check for idx == 0
# here, but that requires that tf.keras.Model.fit
# be called with shuffle=False, and that seems
# small enough that it would be too easy to miss
# for many users. That said, blind reinitialization
# is probably irresponsible, so worth thinking
# of something better here
DataLoader.__iter__(self)
return DataLoader.__next__(self)
@contextlib.contextmanager
def _get_device_ctx(self, dev):
# with tf.device("/device:GPU:{}".format(dev)) as tf_device:
# # tf.device changes the cupy cuda device, which breaks us on multigpu
# # force cupy to still use the device we expect
# cupy.cuda.Device(dev).use()
# yield tf_device
# commenting out since device statements cause
# RuntimeErrors when exiting if two dataloaders
# are running at once (e.g. train and validation)
yield dev
def _split_fn(self, tensor, idx, axis=0):
return tf.split(tensor, idx, axis=axis)
@property
def _LONG_DTYPE(self):
return tf.int64
@property
def _FLOAT32_DTYPE(self):
return tf.float32
def _to_tensor(self, gdf, dtype=None):
if gdf.empty:
return
# checks necessary because of this bug
# https://github.com/tensorflow/tensorflow/issues/42660
if len(gdf.shape) == 1 or gdf.shape[1] == 1:
dlpack = gdf.to_dlpack()
elif gdf.shape[0] == 1:
dlpack = gdf.values[0].toDlpack()
else:
dlpack = gdf.values.T.toDlpack()
# catch error caused by tf eager context
# not being initialized
try:
x = from_dlpack(dlpack)
except AssertionError:
tf.random.uniform((1,))
x = from_dlpack(dlpack)
if gdf.shape[0] == 1:
# batch size 1 so got squashed to a vector
x = tf.expand_dims(x, 0)
elif len(gdf.shape) == 1 or len(x.shape) == 1:
# sort of a generic check for any other
# len(shape)==1 case, could probably
# be more specific
x = tf.expand_dims(x, -1)
elif gdf.shape[1] > 1:
# matrix which means we had to transpose
# for the bug above, so untranspose
x = tf.transpose(x)
return x
def _handle_tensors(self, cats, conts, labels):
X = {}
for tensor, names in zip([cats, conts], [self.cat_names, self.cont_names]):
lists = {}
if isinstance(tensor, tuple):
tensor, lists = tensor
names = [i for i in names if i not in lists]
# break list tuples into two keys, with postfixes
# TODO: better choices for naming?
list_columns = [i for i in lists.keys()]
for column in list_columns:
values, nnzs = lists.pop(column)
lists[column + "__values"] = values
lists[column + "__nnzs"] = nnzs
# now add in any scalar tensors
if len(names) > 1:
tensors = tf.split(tensor, len(names), axis=1)
lists.update({name: x for name, x in zip(names, tensors)})
elif len(names) == 1:
lists[names[0]] = tensor
X.update(lists)
# TODO: use dict for labels as well?
# would require output layers to match naming
if len(self.label_names) > 1:
labels = tf.split(labels, len(self.label_names), axis=1)
return X, labels
class KerasSequenceValidater(tf.keras.callbacks.Callback):
# TODO: document
_supports_tf_logs = True
def __init__(self, dataloader):
self.dataloader = dataloader
def on_epoch_end(self, epoch, logs={}):
for X, y_true in self.dataloader:
y_pred = self.model(X)
# TODO: how do we want to handle the multi-output case?
for metric in self.model.metrics:
metric.update_state(y_true, y_pred)
for metric in self.model.metrics:
logs["val_" + metric.name] = metric.result().numpy()
return logs
| [
"noreply@github.com"
] | yingcanw.noreply@github.com |
754f3df17792c7911d0f110efed7a7832bb5de48 | f4b2d9a0de1f7a26a8fd5afe25446e62dfa0fdb5 | /Python/base_algorithm/base_sum.py | b3db43265b69011967ccd5ef53c5613268a1b43e | [] | no_license | Alexanderklau/LeetCode | e675425cca0b4e2e6f94d8c1ce6df92bbec32ac7 | 6090fa602ab29aef40d41661e473058eaaec490d | refs/heads/master | 2021-06-23T17:41:53.309882 | 2020-12-01T14:36:00 | 2020-12-01T14:36:00 | 148,267,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | # coding: utf-8
__author__ = "lau.wenbo"
"""
高斯解法
"""
def sum_of_n(n):
the_sum = 0
for i in range(1, n+1):
the_sum = the_sum + i
return the_sum
print(sum_of_n(100)) | [
"429095816@qq.com"
] | 429095816@qq.com |
2611064c93404e81e4be7e68a57d13a328ad1024 | 948d3b8c03e2fecc4f852cd8b4120e1b3378bfaf | /API/PYTHON/django/blogTest/blogTest/settings.py | e3f303742bd651ae7b04c05744ae3c35b65c17f0 | [] | no_license | ezhuo/ezhuo.github.io | e370abb4bfbbfcc5750a5f9fafa2b995bb1d7d48 | 977f3ecdd5dee4eb0f10a42572aaecb335145313 | refs/heads/master | 2021-05-05T20:13:35.446537 | 2019-01-26T08:39:26 | 2019-01-26T08:39:26 | 115,300,126 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | """
Django settings for blogTest project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wua53do$*nz_0wof1gyk(1)=^+$9*_puhmz#s!e54*hpddki#('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'news'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blogTest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blogTest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django-blog',
'USER': 'admin',
'PASSWORD': 'dxinfo*dxinfo',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"hi371@qq.com"
] | hi371@qq.com |
d9363243e94bbffc5dd60c4a6cb2b671408379da | 0b8f70df4468a24e3ab18b642d47772fbb0d5f10 | /Lista1/l1q26.py | cc6a05e918d9f3b9b91a5ed429402ebbe1e44592 | [] | no_license | lucasebs/EstruturaDeDados2017.1 | e2ab55e6dfd86d7da8b196e2bd1e22f08c2af737 | 9a8f8a0cd2a881db2705e6484efd7837d0f3e947 | refs/heads/master | 2020-12-13T12:53:28.453845 | 2017-10-02T15:53:11 | 2017-10-02T15:53:11 | 95,600,142 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | from l1q25 import Pessoa
pessoas = [None]*2
for i in range(0, len(pessoas)):
pNome = raw_input("Primeiro Nome: ")
sNome = raw_input("Sobrenome: ")
cpf = int(input("CPF: "))
anoNasc = int(input("Ano de Nascimento: "))
pessoaTemp = Pessoa()
pessoaTemp.primeiroNome = pNome
pessoaTemp.sobrenome = sNome
pessoaTemp.cpf = cpf
pessoaTemp.anoNascimento = anoNasc
pessoas[i] = pessoaTemp
for i in range(0, len(pessoas)):
print(' ---- ' + str(i+1) + ' Pessoa' + ' ---- ')
print(pessoas[i])
| [
"noreply@github.com"
] | lucasebs.noreply@github.com |
0598b8fd9500c32a0495c33197d6df04676bd050 | fe771c763cfad64820b6954f63999b325525d003 | /app/models.py | 8c8b83fe60c7167e30011de961e387c6654af341 | [
"MIT"
] | permissive | plenario/plenario | 69c5c1f87ce398a6c501a1aab8797bf539c9f0a6 | 0808cd90b88c37f11a40445bd200d4740dd4dfa9 | refs/heads/master | 2021-11-13T07:42:34.499848 | 2021-11-11T02:54:26 | 2021-11-11T02:54:26 | 97,568,258 | 68 | 14 | MIT | 2020-05-06T01:09:15 | 2017-07-18T07:33:59 | HTML | UTF-8 | Python | false | false | 1,273 | py | from app import db
from sqlalchemy.dialects.postgresql import JSON
import enum
class VoteType(enum.Enum):
__tablename__ = 'votetype'
positive = "A favor"
negative = "Contra"
absence = "Ausência"
abstention = "Abstenção"
class Senator(db.Model):
__tablename__ = 'senator'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), index=True, unique=True)
party = db.Column(db.String(30), index=True)
state = db.Column(db.String(5), index=True)
description = db.Column(db.Text)
source = db.Column(db.String(120))
twitter = db.Column(db.String(120), unique=True)
facebook = db.Column(db.String(120), unique=True)
instagram = db.Column(db.String(120), unique=True)
class Proposition(db.Model):
__tablename__ = 'proposition'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), index=True, unique=True)
description = db.Column(db.Text)
date = db.Column(db.DateTime)
class Vote(db.Model):
__tablename__ = 'vote'
id = db.Column(db.Integer, primary_key=True)
vote = db.Column(db.Enum(VoteType))
senator = db.Column(db.Integer, db.ForeignKey('senator.id'))
proposition = db.Column(db.Integer, db.ForeignKey('proposition.id'))
| [
"schwendler@gmail.com"
] | schwendler@gmail.com |
57869410b2cbb065c033061ca98fbdb7a1a23ac8 | e0034258aa9d279edf2a1d61a9cb7734f3b785d8 | /data_gui.py | eb26a8f11eaec80e24ea1fb20eb13ed70b8a4aef | [] | no_license | bernduwiesner/GenLotteryQt5 | b9aaaaa6f5bbdc2ad500e53178bd5e0fab1b96ff | 35a25b9b53f1543f5d66f6d05710965186934ece | refs/heads/master | 2020-06-24T16:23:49.233158 | 2019-07-26T12:44:09 | 2019-07-26T12:44:09 | 199,014,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,611 | py | from PyQt5.QtWidgets import (QDesktopWidget,
QDialog,
QLabel,
QLayout,
QVBoxLayout,
QPushButton,
)
from PyQt5.QtCore import Qt
from common import ResultsData
class ResultsWindow(QDialog):
"""The generator results window
"""
def __init__(self, parent, results):
QDialog.__init__(self, parent)
self.base_layout = QVBoxLayout(self)
self.base_layout.sizeConstraint = QLayout.SetDefaultConstraint
self.base_layout.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.init_ui(results)
def init_ui(self, results: ResultsData):
"""Create the controls on the frame
:param results: the data to display
:return:
"""
def unwrap(some: str) -> str:
"""Remove characters "[". "]" and "'" from a string
:param some: str the string to process
:return:
"""
return some.translate({ord(i): None for i in "[]'"})
row: int = 0
action: str = "Generated " if results.generated else "Stored "
label: str = action + results.lottery_type_name + " Lottery numbers"
type_lbl = QLabel(label)
self.base_layout.addWidget(type_lbl, row)
row += 1
for line in range(results.get_data_length()):
data_item = unwrap(results.get_data_item(line))
label = QLabel(f"Line {line + 1:02d}: " + data_item)
self.base_layout.addWidget(label, line + 1)
row += results.get_data_length()
ok_btn = QPushButton("OK", self)
ok_btn.resize(ok_btn.sizeHint())
ok_btn.clicked.connect(self.close_window)
self.base_layout.addWidget(ok_btn, row)
# MainWindow
self.setLayout(self.base_layout)
win_x, win_y, win_width, win_height = (0, 0, 0, 0)
self.setGeometry(win_x, win_y, win_width, win_height)
self.setWindowTitle("Generated Results")
self.centre()
self.show()
def centre(self) -> None:
"""Centre the window on the screen
:return: None
"""
geometry = self.frameGeometry()
centre = QDesktopWidget().availableGeometry().center()
geometry.moveCenter(centre)
self.move(geometry.topLeft())
def close_window(self, event) -> None:
"""Process the options chosen and perform the action chosen
:param event: not used
:return: None
"""
self.close()
| [
"bernduwiesner@yahoo.co.uk"
] | bernduwiesner@yahoo.co.uk |
234615d0dfa6ec1b4bb50bbc470a76d507001e80 | 58be8fc8996b98b624fb9784527b2dc588d4587c | /pybamm/models/submodels/active_material/stress_driven_active_material.py | 61fbe41ec0392883bec8138e4988b5b026f60706 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | gwhite09/PyBaMM | b9f7b6b06bb37b6819e306356f5b8e90df8affff | 033ad6384582a3e5d29ad48eeaa7fe92b98e2a29 | refs/heads/main | 2023-08-22T19:49:26.112089 | 2021-09-17T17:02:34 | 2021-09-17T17:02:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,401 | py | #
# Class for varying active material volume fraction, driven by stress
#
import pybamm
from .base_active_material import BaseModel
class StressDriven(BaseModel):
"""Submodel for varying active material volume fraction, driven by stress, from
[1]_ and [2]_.
Parameters
----------
param : parameter class
The parameters to use for this submodel
domain : str
The domain of the model either 'Negative' or 'Positive'
options : dict
Additional options to pass to the model
x_average : bool
Whether to use x-averaged variables (SPM, SPMe, etc) or full variables (DFN)
**Extends:** :class:`pybamm.active_material.BaseModel`
References
----------
.. [1] Ai, W., Kraft, L., Sturm, J., Jossen, A., & Wu, B. (2019). Electrochemical
Thermal-Mechanical Modelling of Stress Inhomogeneity in Lithium-Ion Pouch
Cells. Journal of The Electrochemical Society, 167(1), 013512.
.. [2] Reniers, J. M., Mulder, G., & Howey, D. A. (2019). Review and performance
comparison of mechanical-chemical degradation models for lithium-ion
batteries. Journal of The Electrochemical Society, 166(14), A3189.
"""
def __init__(self, param, domain, options, x_average):
super().__init__(param, domain, options=options)
pybamm.citations.register("Reniers2019")
self.x_average = x_average
def get_fundamental_variables(self):
domain = self.domain.lower() + " electrode"
if self.x_average is True:
eps_solid_xav = pybamm.Variable(
"X-averaged " + domain + " active material volume fraction",
domain="current collector",
)
eps_solid = pybamm.PrimaryBroadcast(eps_solid_xav, domain)
else:
eps_solid = pybamm.Variable(
self.domain + " electrode active material volume fraction",
domain=domain,
auxiliary_domains={"secondary": "current collector"},
)
variables = self._get_standard_active_material_variables(eps_solid)
return variables
def get_coupled_variables(self, variables):
# obtain the rate of loss of active materials (LAM) by stress
# This is loss of active material model by mechanical effects
if self.x_average is True:
stress_t_surf = variables[
"X-averaged "
+ self.domain.lower()
+ " particle surface tangential stress"
]
stress_r_surf = variables[
"X-averaged " + self.domain.lower() + " particle surface radial stress"
]
else:
stress_t_surf = variables[
self.domain + " particle surface tangential stress"
]
stress_r_surf = variables[self.domain + " particle surface radial stress"]
if self.domain == "Negative":
beta_LAM = self.param.beta_LAM_n
stress_critical = self.param.stress_critical_n
m_LAM = self.param.m_LAM_n
else:
beta_LAM = self.param.beta_LAM_p
stress_critical = self.param.stress_critical_p
m_LAM = self.param.m_LAM_p
stress_h_surf = (stress_r_surf + 2 * stress_t_surf) / 3
# compressive stress make no contribution
stress_h_surf *= stress_h_surf > 0
# assuming the minimum hydrostatic stress is zero for full cycles
stress_h_surf_min = stress_h_surf * 0
j_stress_LAM = (
-(beta_LAM / self.param.t0_cr)
* ((stress_h_surf - stress_h_surf_min) / stress_critical) ** m_LAM
)
deps_solid_dt = j_stress_LAM
variables.update(
self._get_standard_active_material_change_variables(deps_solid_dt)
)
return variables
def set_rhs(self, variables):
Domain = self.domain + " electrode"
if self.x_average is True:
eps_solid = variables[
"X-averaged " + Domain.lower() + " active material volume fraction"
]
deps_solid_dt = variables[
"X-averaged "
+ Domain.lower()
+ " active material volume fraction change"
]
else:
eps_solid = variables[Domain + " active material volume fraction"]
deps_solid_dt = variables[
Domain + " active material volume fraction change"
]
self.rhs = {eps_solid: deps_solid_dt}
def set_initial_conditions(self, variables):
if self.domain == "Negative":
x_n = pybamm.standard_spatial_vars.x_n
eps_solid_init = self.param.epsilon_s_n(x_n)
elif self.domain == "Positive":
x_p = pybamm.standard_spatial_vars.x_p
eps_solid_init = self.param.epsilon_s_p(x_p)
if self.x_average is True:
eps_solid_xav = variables[
"X-averaged "
+ self.domain.lower()
+ " electrode active material volume fraction"
]
self.initial_conditions = {eps_solid_xav: pybamm.x_average(eps_solid_init)}
else:
eps_solid = variables[
self.domain + " electrode active material volume fraction"
]
self.initial_conditions = {eps_solid: eps_solid_init}
| [
"valentinsulzer@hotmail.com"
] | valentinsulzer@hotmail.com |
d2244420117df6de6ae154e421a909ba1ed92d7f | 2132a3c68624e545858e343bb11768e69cf84e28 | /battlesystem2.py | 22dec3fc23dea9d40b3e6ecba18a16001836ef26 | [] | no_license | tovoispovo/Text-based-RPG-combat-system | cbbf6678ade4e8663a993616fc8d739c647bb2e6 | 5614312727c7faa69240b7b7fb79770ece0b8a6f | refs/heads/master | 2022-01-02T08:41:08.299705 | 2018-02-20T05:10:51 | 2018-02-20T05:10:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,709 | py | import random
class Dice():
def d10(self):
number = random.randint(0, 9)
return number
def d20(self):
number = random.randint(1, 20)
return number
class Player:
def __init__(self, name, health, attack, heal, maxhealth, healed, enemy):
self.name = name
self.health = 100
self.attack = 15
self.heal = 50
self.maxhealth = 100
self.healed = False
self.enemy = False
def heal(self):
if self.health < player.maxhealth:
self.health = player.health + player.heal
if player.health > player.maxhealth:
self.health = player.maxhealth
self.healed = True
return player.health
name = input("What is your charaters name? ")
roll = Dice()
player = Player(
name,
100,
15,
50,
100,
False,
False
)
goblinone = Player(
'Trogd0r',
25,
20,
10,
25,
False,
True
)
goblinone.health = 25
goblinone.attack = 20
goblinone.maxhealth = 25
goblintwo = Player(
'Termy Nator',
50,
5,
10,
50,
False,
True
)
goblintwo.health = 50
goblintwo.attack = 5
goblintwo.maxhealth = 50
goblinthree = Player(
'St3v3 J0bzzz',
75,
10,
10,
75,
False,
True
)
goblinthree.health = 75
goblinthree.attack = 10
goblinthree.maxhealth = 75
goblinfour = Player(
'Cassy Nova',
100,
8,
50,
100,
False,
True
)
goblinfour.health = 100
goblinfour.attack = 8
goblinfour.maxhealth = 100
#Greets player
print("Nice to meet you, " + player.name + ".")
print("4 wild goblins appear to take the sanctity of your holes away from you!!!")
print("IT'S TIME TO RUMBLE, MOTHERCLUCKER!")
combatactive = True
healcriteria = True
#Start of the combat loop
while combatactive == True:
print("Current HP: " + str(player.health) + "/" + str(player.maxhealth))
while player.healed == False and healcriteria == True:
heal = input("Would yee like to heal? Caution: may only be used once during combat. y/n :")
if heal == "y":
health = player.health
player.heal
player.healed = True
print(player.name + " healed for " + str(player.heal) + "HP for a total of " + str(player.health + player.heal) + "HP.")
elif heal == "n":
player.healed = False
break
else:
print("Invalid syntax. y/n only.")
healcriteria = True
#Prints monsters to attack
if goblinone.health > 0:
print("(1) for " + goblinone.name + " " +str(goblinone.health) + "/" + str(goblinone.maxhealth) + "HP")
else:
print(goblinone.name + " is dead.")
if goblintwo.health > 0:
print("(2) for " + goblintwo.name + " " +str(goblintwo.health) + "/" + str(goblintwo.maxhealth) + "HP")
else:
print(goblintwo.name + " is dead.")
if goblinthree.health > 0:
print("(3) for " + goblinthree.name + " " +str(goblinthree.health) + "/" + str(goblinthree.maxhealth) + "HP")
else:
print(goblinthree.name + " is dead.")
if goblinfour.health > 0:
print("(4) for " + goblinfour.name + " " +str(goblinfour.health) + "/" + str(goblinfour.maxhealth) + "HP")
else:
print(goblinfour.name + " is dead.")
#Player attack sequence
attack = input("Which foul beast shall yee attack? ")
attackable = ['1','2','3','4']
attackcomplete = False
while attackcomplete == False:
if attack == '1' and goblinone.health > 0:
smash = player.attack + roll.d20()
goblinone.health = goblinone.health - smash
print("Youve attacked " + goblinone.name + " for " + str(smash) + "HP.")
attackcomplete = True
break
elif attack == '1' and goblinone.health <= 0:
print("You've attacked a corpse!")
attackcomplete = True
break
if attack == '2' and goblintwo.health > 0:
smash = player.attack + roll.d20()
goblintwo.health = goblintwo.health - smash
print("Youve attacked " + goblintwo.name + " for " + str(smash) + "HP.")
attackcomplete = True
break
elif attack == '2' and goblintwo.health <= 0:
print("You've attacked a corpse!")
attackcomplete = True
break
if attack == '3' and goblinthree.health > 0:
smash = player.attack + roll.d20()
goblinthree.health = goblinthree.health - smash
print("Youve attacked " + goblinthree.name + " for " + str(smash) + "HP.")
attackcomplete = True
break
elif attack == '3' and goblinthree.health <= 0:
print("You've attacked a corpse!")
attackcomplete = True
break
if attack == '4' and goblinfour.health > 0:
smash = player.attack + roll.d20()
goblinfour.health = goblinfour.health - smash
print("Youve attacked " + goblinfour.name + " for " + str(smash) + "HP.")
attackcomplete = True
break
elif attack == '4' and goblinfour.health <= 0:
print("You've attacked a corpse!")
attackcomplete = True
break
if attack not in attackable:
print("invalid syntax. Please type 1, 2, 3 or 4.")
attackcomplete = False
attackcomplete = False
# Goblin attack sequence
if goblinone.health > 0:
print(goblinone.name + " attacks!")
x = roll.d20()
if x > 10:
print(goblinone.name + " hits for " + str(goblinone.attack) + " damage.")
player.health = player.health - goblinone.attack
else:
print(goblinone.name + " misses.")
if goblintwo.health > 0:
print(goblintwo.name + " attacks!")
x = roll.d20()
if x > 10:
print(goblintwo.name + " hits for " + str(goblintwo.attack) + " damage.")
player.health = player.health - goblintwo.attack
else:
print(goblintwo.name + " misses.")
if goblinthree.health > 0:
print(goblinthree.name + " attacks!")
x = roll.d20()
if x > 10:
print(goblinthree.name + " hits for " + str(goblinthree.attack) + " damage.")
player.health = player.health - goblinthree.attack
else:
print(goblinthree.name + " misses.")
if goblinfour.health > 0:
print(goblinfour.name + " attacks!")
x = roll.d20()
if x > 10:
print(goblinfour.name + " hits for " + str(goblinfour.attack) + " damage.")
player.health = player.health - goblinfour.attack
else:
print(goblinfour.name + " misses.")
#Sets combat active to false if all monsters are dead
if goblinone.health <= 0 and goblintwo.health <= 0 and goblinthree.health <= 0 and goblinfour.health <= 0:
combatactive = False
print("YOU BEAT THE GOBLINS, FAM!")
else:
combatactive = True
#Sets combat active to false if player is dead
if player.health <= 0:
print("Oh heck, you freakin died")
combatactive = False
| [
"noreply@github.com"
] | tovoispovo.noreply@github.com |
f14308e3fd66781d5cbdd827da378221a727e027 | bccbb5244947574c63992dc812b5ef44519ec161 | /tests/test_command_runner.py | fcb536ca809e16f5103fd66573f5e2e7dd3eeea3 | [] | no_license | hal1932/pysvn | d4fab12dbb07838d947292146ca49e9a31119deb | a579744543765b574655377a2e1ada5be961e8d8 | refs/heads/master | 2020-03-14T06:35:46.835307 | 2018-05-01T16:17:10 | 2018-05-01T16:17:10 | 131,487,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | # coding: utf-8
from __future__ import print_function, unicode_literals
import unittest as ut
import xml.etree.ElementTree as et
from svn.command_runner import CommandRunner
class TestCommandRunner(ut.TestCase):
def setUp(self):
self.__runner = CommandRunner()
self.__runner.current_directory = 'C:/Users/yuta/Desktop/subversion/trunk'
def tearDown(self):
pass
@ut.skip
def test_run(self):
result, out, err = self.__runner.run('info', ['--xml'])
self.assertEqual(result, 0)
self.assertEqual(err, '')
root = et.fromstring(out)
self.assertEqual(root.tag, 'info')
entry = root.find('entry')
self.assertEqual(entry.find('url').text, 'https://svn.apache.org/repos/asf/subversion/trunk')
self.assertEqual(entry.find('wc-info/wcroot-abspath').text, 'C:/Users/yuta/Desktop/subversion/trunk')
if __name__ == '__main__':
ut.main()
| [
"yu.arai.19@gmail.com"
] | yu.arai.19@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.