blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d06decf7dcf5e33ddd34594bdf89f8b32d5e6822
|
fd9c2b959724d65567876dec0abe93aaed7b7958
|
/catalog/models/__init__.py
|
088604bb05997808a69063f26a643f86eeb5527a
|
[] |
no_license
|
emersonccf/projeto_mozilla
|
a5a3010dbaf942f03d1abed77706c6a1e34d53cc
|
aefe3d25439ecd0a2c3f6beb120da37cdfd1c9e4
|
refs/heads/main
| 2023-06-05T23:59:18.984438
| 2021-06-25T03:29:30
| 2021-06-25T03:29:30
| 371,171,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
from django.db import models
from django.utils import timezone # não utilizado
from django.contrib.auth.models import User
from django.urls import reverse # Usado para gerar URLs revertendo os padrões de URL em Book
import uuid # Utilizado em BookIstance
LOAN_STATUS = (
('m', 'Manutenção'),
('e', 'Emprestado'),
('d', 'Disponível'),
('r', 'Reservado'),
)
from .genre import Genre
from .book import Book
from .bookinstance import BookInstance
from .author import Author
from .language import Language
|
[
"emecatarino@yahoo.com.br"
] |
emecatarino@yahoo.com.br
|
b270c02c44e1a61c3fe90af2a5606b7401d9980c
|
5f4a0e5ae73d1ec5701dbd5e1cddf673a838000a
|
/modelrepo/heftNLO/vertices.py
|
ad39f23a52b81fdbc12fcd8d2e532faf9a9cf5fe
|
[] |
no_license
|
modohyoung/madgraph-auto-model
|
e610569fe054b54bb2774ade91d3ff44dfc6ca04
|
796a8e6c2c6d628c5c971022595c31c18b406406
|
refs/heads/master
| 2020-12-11T05:47:22.054902
| 2015-05-08T16:25:04
| 2015-05-08T16:25:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,128
|
py
|
# This file was automatically created by FeynRules 1.7.55
# Mathematica version: 8.0 for Mac OS X x86 (64-bit) (October 6, 2011)
# Date: Wed 8 Aug 2012 14:16:24
from object_library import all_vertices, Vertex
import particles as P
import couplings as C
import lorentz as L
V_1 = Vertex(name = 'V_1',
particles = [ P.H, P.H, P.H, P.H ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_23})
V_2 = Vertex(name = 'V_2',
particles = [ P.H, P.H, P.phi0, P.phi0 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_21})
V_3 = Vertex(name = 'V_3',
particles = [ P.phi0, P.phi0, P.phi0, P.phi0 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_23})
V_4 = Vertex(name = 'V_4',
particles = [ P.H, P.H, P.phi__minus__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_21})
V_5 = Vertex(name = 'V_5',
particles = [ P.phi0, P.phi0, P.phi__minus__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_21})
V_6 = Vertex(name = 'V_6',
particles = [ P.phi__minus__, P.phi__minus__, P.phi__plus__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_22})
V_7 = Vertex(name = 'V_7',
particles = [ P.H, P.H, P.H ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_69})
V_8 = Vertex(name = 'V_8',
particles = [ P.H, P.phi0, P.phi0 ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_68})
V_9 = Vertex(name = 'V_9',
particles = [ P.H, P.phi__minus__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_68})
V_10 = Vertex(name = 'V_10',
particles = [ P.A, P.A, P.phi__minus__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_6})
V_11 = Vertex(name = 'V_11',
particles = [ P.A, P.phi__minus__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VSS1 ],
couplings = {(0,0):C.GC_4})
V_12 = Vertex(name = 'V_12',
particles = [ P.A, P.A, P.H ],
color = [ '1' ],
lorentz = [ L.VVS3 ],
couplings = {(0,0):C.GC_1})
#V_13 = Vertex(name = 'V_13',
# particles = [ P.G, P.G, P.H ],
# color = [ 'Identity(1,2)' ],
# lorentz = [ L.VVS3 ],
# couplings = {(0,0):C.GC_13})
V_13 = Vertex(name = 'V_13',
particles = [ P.G, P.G, P.H ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.VVS4 ],
couplings = {(0,0):C.GC_13})
V_14 = Vertex(name = 'V_14',
particles = [ P.ghA, P.ghWm__tilde__, P.W__minus__ ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_4})
V_15 = Vertex(name = 'V_15',
particles = [ P.ghA, P.ghWp__tilde__, P.W__plus__ ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_5})
V_16 = Vertex(name = 'V_16',
particles = [ P.ghWm, P.ghA__tilde__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_25})
V_17 = Vertex(name = 'V_17',
particles = [ P.ghWm, P.ghA__tilde__, P.W__plus__ ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_4})
V_18 = Vertex(name = 'V_18',
particles = [ P.ghWm, P.ghWm__tilde__, P.H ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_48})
V_19 = Vertex(name = 'V_19',
particles = [ P.ghWm, P.ghWm__tilde__, P.phi0 ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_47})
V_20 = Vertex(name = 'V_20',
particles = [ P.ghWm, P.ghWm__tilde__, P.A ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_5})
V_21 = Vertex(name = 'V_21',
particles = [ P.ghWm, P.ghWm__tilde__, P.Z ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_43})
V_22 = Vertex(name = 'V_22',
particles = [ P.ghWm, P.ghZ__tilde__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_62})
V_23 = Vertex(name = 'V_23',
particles = [ P.ghWm, P.ghZ__tilde__, P.W__plus__ ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_42})
V_24 = Vertex(name = 'V_24',
particles = [ P.ghWp, P.ghA__tilde__, P.phi__minus__ ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_24})
V_25 = Vertex(name = 'V_25',
particles = [ P.ghWp, P.ghA__tilde__, P.W__minus__ ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_5})
V_26 = Vertex(name = 'V_26',
particles = [ P.ghWp, P.ghWp__tilde__, P.H ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_48})
V_27 = Vertex(name = 'V_27',
particles = [ P.ghWp, P.ghWp__tilde__, P.phi0 ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_49})
V_28 = Vertex(name = 'V_28',
particles = [ P.ghWp, P.ghWp__tilde__, P.A ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_4})
V_29 = Vertex(name = 'V_29',
particles = [ P.ghWp, P.ghWp__tilde__, P.Z ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_42})
V_30 = Vertex(name = 'V_30',
particles = [ P.ghWp, P.ghZ__tilde__, P.phi__minus__ ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_63})
V_31 = Vertex(name = 'V_31',
particles = [ P.ghWp, P.ghZ__tilde__, P.W__minus__ ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_43})
V_32 = Vertex(name = 'V_32',
particles = [ P.ghZ, P.ghWm__tilde__, P.phi__minus__ ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_51})
V_33 = Vertex(name = 'V_33',
particles = [ P.ghZ, P.ghWm__tilde__, P.W__minus__ ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_42})
V_34 = Vertex(name = 'V_34',
particles = [ P.ghZ, P.ghWp__tilde__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_50})
V_35 = Vertex(name = 'V_35',
particles = [ P.ghZ, P.ghWp__tilde__, P.W__plus__ ],
color = [ '1' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_43})
V_36 = Vertex(name = 'V_36',
particles = [ P.ghZ, P.ghZ__tilde__, P.H ],
color = [ '1' ],
lorentz = [ L.UUS1 ],
couplings = {(0,0):C.GC_52})
V_37 = Vertex(name = 'V_37',
particles = [ P.G, P.G, P.h1 ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.VVS1 ],
couplings = {(0,0):C.GC_16})
V_38 = Vertex(name = 'V_38',
particles = [ P.ghG, P.ghG__tilde__, P.G ],
color = [ 'f(1,2,3)' ],
lorentz = [ L.UUV1 ],
couplings = {(0,0):C.GC_10})
V_39 = Vertex(name = 'V_39',
particles = [ P.G, P.G, P.G ],
color = [ 'f(1,2,3)' ],
lorentz = [ L.VVV1 ],
couplings = {(0,0):C.GC_10})
V_40 = Vertex(name = 'V_40',
particles = [ P.G, P.G, P.G, P.G ],
color = [ 'f(-1,1,2)*f(3,4,-1)', 'f(-1,1,3)*f(2,4,-1)', 'f(-1,1,4)*f(2,3,-1)' ],
lorentz = [ L.VVVV1, L.VVVV3, L.VVVV4 ],
couplings = {(1,1):C.GC_12,(0,0):C.GC_12,(2,2):C.GC_12})
V_41 = Vertex(name = 'V_41',
particles = [ P.G, P.G, P.G, P.H ],
color = [ 'f(1,2,3)' ],
lorentz = [ L.VVVS2 ],
couplings = {(0,0):C.GC_14})
V_42 = Vertex(name = 'V_42',
particles = [ P.G, P.G, P.G, P.G, P.H ],
color = [ 'f(-1,1,2)*f(3,4,-1)', 'f(-1,1,3)*f(2,4,-1)', 'f(-1,1,4)*f(2,3,-1)' ],
lorentz = [ L.VVVVS1, L.VVVVS2, L.VVVVS3 ],
couplings = {(1,1):C.GC_15,(0,0):C.GC_15,(2,2):C.GC_15})
V_43 = Vertex(name = 'V_43',
particles = [ P.G, P.G, P.G, P.h1 ],
color = [ 'f(1,2,3)' ],
lorentz = [ L.VVVS1 ],
couplings = {(0,0):C.GC_17})
V_44 = Vertex(name = 'V_44',
particles = [ P.A, P.W__minus__, P.H, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_44})
V_45 = Vertex(name = 'V_45',
particles = [ P.A, P.W__minus__, P.phi0, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_45})
V_46 = Vertex(name = 'V_46',
particles = [ P.A, P.W__minus__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VVS2 ],
couplings = {(0,0):C.GC_71})
V_47 = Vertex(name = 'V_47',
particles = [ P.W__minus__, P.H, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VSS1 ],
couplings = {(0,0):C.GC_29})
V_48 = Vertex(name = 'V_48',
particles = [ P.W__minus__, P.phi0, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VSS1 ],
couplings = {(0,0):C.GC_28})
V_49 = Vertex(name = 'V_49',
particles = [ P.A, P.W__minus__, P.W__plus__ ],
color = [ '1' ],
lorentz = [ L.VVV1 ],
couplings = {(0,0):C.GC_55})
V_50 = Vertex(name = 'V_50',
particles = [ P.A, P.W__plus__, P.H, P.phi__minus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_46})
V_51 = Vertex(name = 'V_51',
particles = [ P.A, P.W__plus__, P.phi0, P.phi__minus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_45})
V_52 = Vertex(name = 'V_52',
particles = [ P.A, P.W__plus__, P.phi__minus__ ],
color = [ '1' ],
lorentz = [ L.VVS2 ],
couplings = {(0,0):C.GC_72})
V_53 = Vertex(name = 'V_53',
particles = [ P.W__plus__, P.H, P.phi__minus__ ],
color = [ '1' ],
lorentz = [ L.VSS1 ],
couplings = {(0,0):C.GC_29})
V_54 = Vertex(name = 'V_54',
particles = [ P.W__plus__, P.phi0, P.phi__minus__ ],
color = [ '1' ],
lorentz = [ L.VSS1 ],
couplings = {(0,0):C.GC_27})
V_55 = Vertex(name = 'V_55',
particles = [ P.W__minus__, P.W__plus__, P.H, P.H ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_26})
V_56 = Vertex(name = 'V_56',
particles = [ P.W__minus__, P.W__plus__, P.phi0, P.phi0 ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_26})
V_57 = Vertex(name = 'V_57',
particles = [ P.W__minus__, P.W__plus__, P.phi__minus__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_26})
V_58 = Vertex(name = 'V_58',
particles = [ P.W__minus__, P.W__plus__, P.H ],
color = [ '1' ],
lorentz = [ L.VVS2 ],
couplings = {(0,0):C.GC_70})
V_59 = Vertex(name = 'V_59',
particles = [ P.A, P.A, P.W__minus__, P.W__plus__ ],
color = [ '1' ],
lorentz = [ L.VVVV2 ],
couplings = {(0,0):C.GC_57})
V_60 = Vertex(name = 'V_60',
particles = [ P.W__minus__, P.W__plus__, P.Z ],
color = [ '1' ],
lorentz = [ L.VVV1 ],
couplings = {(0,0):C.GC_18})
V_61 = Vertex(name = 'V_61',
particles = [ P.W__minus__, P.W__minus__, P.W__plus__, P.W__plus__ ],
color = [ '1' ],
lorentz = [ L.VVVV2 ],
couplings = {(0,0):C.GC_19})
V_62 = Vertex(name = 'V_62',
particles = [ P.A, P.Z, P.phi__minus__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_61})
V_63 = Vertex(name = 'V_63',
particles = [ P.Z, P.H, P.phi0 ],
color = [ '1' ],
lorentz = [ L.VSS1 ],
couplings = {(0,0):C.GC_60})
V_64 = Vertex(name = 'V_64',
particles = [ P.Z, P.phi__minus__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VSS1 ],
couplings = {(0,0):C.GC_58})
V_65 = Vertex(name = 'V_65',
particles = [ P.W__minus__, P.Z, P.H, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_9})
V_66 = Vertex(name = 'V_66',
particles = [ P.W__minus__, P.Z, P.phi0, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_8})
V_67 = Vertex(name = 'V_67',
particles = [ P.W__minus__, P.Z, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VVS2 ],
couplings = {(0,0):C.GC_67})
V_68 = Vertex(name = 'V_68',
particles = [ P.W__plus__, P.Z, P.H, P.phi__minus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_7})
V_69 = Vertex(name = 'V_69',
particles = [ P.W__plus__, P.Z, P.phi0, P.phi__minus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_8})
V_70 = Vertex(name = 'V_70',
particles = [ P.W__plus__, P.Z, P.phi__minus__ ],
color = [ '1' ],
lorentz = [ L.VVS2 ],
couplings = {(0,0):C.GC_66})
V_71 = Vertex(name = 'V_71',
particles = [ P.A, P.W__minus__, P.W__plus__, P.Z ],
color = [ '1' ],
lorentz = [ L.VVVV5 ],
couplings = {(0,0):C.GC_56})
V_72 = Vertex(name = 'V_72',
particles = [ P.Z, P.Z, P.H, P.H ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_65})
V_73 = Vertex(name = 'V_73',
particles = [ P.Z, P.Z, P.phi0, P.phi0 ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_65})
V_74 = Vertex(name = 'V_74',
particles = [ P.Z, P.Z, P.phi__minus__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_64})
V_75 = Vertex(name = 'V_75',
particles = [ P.Z, P.Z, P.H ],
color = [ '1' ],
lorentz = [ L.VVS2 ],
couplings = {(0,0):C.GC_73})
V_76 = Vertex(name = 'V_76',
particles = [ P.W__minus__, P.W__plus__, P.Z, P.Z ],
color = [ '1' ],
lorentz = [ L.VVVV2 ],
couplings = {(0,0):C.GC_20})
V_77 = Vertex(name = 'V_77',
particles = [ P.d__tilde__, P.d, P.A ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_2})
V_78 = Vertex(name = 'V_78',
particles = [ P.s__tilde__, P.s, P.A ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_2})
V_79 = Vertex(name = 'V_79',
particles = [ P.b__tilde__, P.b, P.A ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_2})
V_80 = Vertex(name = 'V_80',
particles = [ P.d__tilde__, P.d, P.G ],
color = [ 'T(3,2,1)' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_11})
V_81 = Vertex(name = 'V_81',
particles = [ P.s__tilde__, P.s, P.G ],
color = [ 'T(3,2,1)' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_11})
V_82 = Vertex(name = 'V_82',
particles = [ P.b__tilde__, P.b, P.G ],
color = [ 'T(3,2,1)' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_11})
V_83 = Vertex(name = 'V_83',
particles = [ P.b__tilde__, P.b, P.H ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS2 ],
couplings = {(0,0):C.GC_74})
V_84 = Vertex(name = 'V_84',
particles = [ P.b__tilde__, P.b, P.phi0 ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS1 ],
couplings = {(0,0):C.GC_75})
V_85 = Vertex(name = 'V_85',
particles = [ P.d__tilde__, P.d, P.Z ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2, L.FFV3 ],
couplings = {(0,0):C.GC_40,(0,1):C.GC_53})
V_86 = Vertex(name = 'V_86',
particles = [ P.s__tilde__, P.s, P.Z ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2, L.FFV3 ],
couplings = {(0,0):C.GC_40,(0,1):C.GC_53})
V_87 = Vertex(name = 'V_87',
particles = [ P.b__tilde__, P.b, P.Z ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2, L.FFV3 ],
couplings = {(0,1):C.GC_53,(0,0):C.GC_40})
V_88 = Vertex(name = 'V_88',
particles = [ P.t__tilde__, P.d, P.phi__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_97})
V_89 = Vertex(name = 'V_89',
particles = [ P.t__tilde__, P.s, P.phi__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_99})
V_90 = Vertex(name = 'V_90',
particles = [ P.u__tilde__, P.b, P.phi__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_91})
V_91 = Vertex(name = 'V_91',
particles = [ P.c__tilde__, P.b, P.phi__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_95})
V_92 = Vertex(name = 'V_92',
particles = [ P.t__tilde__, P.b, P.phi__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_101,(0,1):C.GC_102})
V_93 = Vertex(name = 'V_93',
particles = [ P.u__tilde__, P.d, P.W__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_88})
V_94 = Vertex(name = 'V_94',
particles = [ P.c__tilde__, P.d, P.W__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_92})
V_95 = Vertex(name = 'V_95',
particles = [ P.t__tilde__, P.d, P.W__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_96})
V_96 = Vertex(name = 'V_96',
particles = [ P.u__tilde__, P.s, P.W__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_89})
V_97 = Vertex(name = 'V_97',
particles = [ P.c__tilde__, P.s, P.W__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_93})
V_98 = Vertex(name = 'V_98',
particles = [ P.t__tilde__, P.s, P.W__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_98})
V_99 = Vertex(name = 'V_99',
particles = [ P.u__tilde__, P.b, P.W__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_90})
V_100 = Vertex(name = 'V_100',
particles = [ P.c__tilde__, P.b, P.W__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_94})
V_101 = Vertex(name = 'V_101',
particles = [ P.t__tilde__, P.b, P.W__plus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_100})
V_102 = Vertex(name = 'V_102',
particles = [ P.e__plus__, P.e__minus__, P.A ],
color = [ '1' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_4})
V_103 = Vertex(name = 'V_103',
particles = [ P.m__plus__, P.m__minus__, P.A ],
color = [ '1' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_4})
V_104 = Vertex(name = 'V_104',
particles = [ P.tt__plus__, P.tt__minus__, P.A ],
color = [ '1' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_4})
V_105 = Vertex(name = 'V_105',
particles = [ P.tt__plus__, P.tt__minus__, P.H ],
color = [ '1' ],
lorentz = [ L.FFS2 ],
couplings = {(0,0):C.GC_86})
V_106 = Vertex(name = 'V_106',
particles = [ P.tt__plus__, P.tt__minus__, P.phi0 ],
color = [ '1' ],
lorentz = [ L.FFS1 ],
couplings = {(0,0):C.GC_87})
V_107 = Vertex(name = 'V_107',
particles = [ P.e__plus__, P.e__minus__, P.Z ],
color = [ '1' ],
lorentz = [ L.FFV2, L.FFV4 ],
couplings = {(0,0):C.GC_40,(0,1):C.GC_54})
V_108 = Vertex(name = 'V_108',
particles = [ P.m__plus__, P.m__minus__, P.Z ],
color = [ '1' ],
lorentz = [ L.FFV2, L.FFV4 ],
couplings = {(0,0):C.GC_40,(0,1):C.GC_54})
V_109 = Vertex(name = 'V_109',
particles = [ P.tt__plus__, P.tt__minus__, P.Z ],
color = [ '1' ],
lorentz = [ L.FFV2, L.FFV4 ],
couplings = {(0,0):C.GC_40,(0,1):C.GC_54})
V_110 = Vertex(name = 'V_110',
particles = [ P.vt__tilde__, P.tt__minus__, P.phi__plus__ ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_85})
V_111 = Vertex(name = 'V_111',
particles = [ P.ve__tilde__, P.e__minus__, P.W__plus__ ],
color = [ '1' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_30})
V_112 = Vertex(name = 'V_112',
particles = [ P.vm__tilde__, P.m__minus__, P.W__plus__ ],
color = [ '1' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_30})
V_113 = Vertex(name = 'V_113',
particles = [ P.vt__tilde__, P.tt__minus__, P.W__plus__ ],
color = [ '1' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_30})
V_114 = Vertex(name = 'V_114',
particles = [ P.b__tilde__, P.u, P.phi__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_76})
V_115 = Vertex(name = 'V_115',
particles = [ P.b__tilde__, P.c, P.phi__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_77})
V_116 = Vertex(name = 'V_116',
particles = [ P.d__tilde__, P.t, P.phi__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_81})
V_117 = Vertex(name = 'V_117',
particles = [ P.s__tilde__, P.t, P.phi__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_82})
V_118 = Vertex(name = 'V_118',
particles = [ P.b__tilde__, P.t, P.phi__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,0):C.GC_83,(0,1):C.GC_78})
V_119 = Vertex(name = 'V_119',
particles = [ P.d__tilde__, P.u, P.W__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_31})
V_120 = Vertex(name = 'V_120',
particles = [ P.s__tilde__, P.u, P.W__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_32})
V_121 = Vertex(name = 'V_121',
particles = [ P.b__tilde__, P.u, P.W__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_33})
V_122 = Vertex(name = 'V_122',
particles = [ P.d__tilde__, P.c, P.W__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_34})
V_123 = Vertex(name = 'V_123',
particles = [ P.s__tilde__, P.c, P.W__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_35})
V_124 = Vertex(name = 'V_124',
particles = [ P.b__tilde__, P.c, P.W__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_36})
V_125 = Vertex(name = 'V_125',
particles = [ P.d__tilde__, P.t, P.W__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_37})
V_126 = Vertex(name = 'V_126',
particles = [ P.s__tilde__, P.t, P.W__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_38})
V_127 = Vertex(name = 'V_127',
particles = [ P.b__tilde__, P.t, P.W__minus__ ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_39})
V_128 = Vertex(name = 'V_128',
particles = [ P.u__tilde__, P.u, P.A ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_3})
V_129 = Vertex(name = 'V_129',
particles = [ P.c__tilde__, P.c, P.A ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_3})
V_130 = Vertex(name = 'V_130',
particles = [ P.t__tilde__, P.t, P.A ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_3})
V_131 = Vertex(name = 'V_131',
particles = [ P.u__tilde__, P.u, P.G ],
color = [ 'T(3,2,1)' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_11})
V_132 = Vertex(name = 'V_132',
particles = [ P.c__tilde__, P.c, P.G ],
color = [ 'T(3,2,1)' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_11})
V_133 = Vertex(name = 'V_133',
particles = [ P.t__tilde__, P.t, P.G ],
color = [ 'T(3,2,1)' ],
lorentz = [ L.FFV1 ],
couplings = {(0,0):C.GC_11})
V_134 = Vertex(name = 'V_134',
particles = [ P.t__tilde__, P.t, P.H ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS2 ],
couplings = {(0,0):C.GC_80})
V_135 = Vertex(name = 'V_135',
particles = [ P.t__tilde__, P.t, P.phi0 ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFS1 ],
couplings = {(0,0):C.GC_79})
V_136 = Vertex(name = 'V_136',
particles = [ P.u__tilde__, P.u, P.Z ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2, L.FFV5 ],
couplings = {(0,0):C.GC_41,(0,1):C.GC_53})
V_137 = Vertex(name = 'V_137',
particles = [ P.c__tilde__, P.c, P.Z ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2, L.FFV5 ],
couplings = {(0,0):C.GC_41,(0,1):C.GC_53})
V_138 = Vertex(name = 'V_138',
particles = [ P.t__tilde__, P.t, P.Z ],
color = [ 'Identity(1,2)' ],
lorentz = [ L.FFV2, L.FFV5 ],
couplings = {(0,0):C.GC_41,(0,1):C.GC_53})
V_139 = Vertex(name = 'V_139',
particles = [ P.tt__plus__, P.vt, P.phi__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_84})
V_140 = Vertex(name = 'V_140',
particles = [ P.e__plus__, P.ve, P.W__minus__ ],
color = [ '1' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_30})
V_141 = Vertex(name = 'V_141',
particles = [ P.m__plus__, P.vm, P.W__minus__ ],
color = [ '1' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_30})
V_142 = Vertex(name = 'V_142',
particles = [ P.tt__plus__, P.vt, P.W__minus__ ],
color = [ '1' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_30})
V_143 = Vertex(name = 'V_143',
particles = [ P.ve__tilde__, P.ve, P.Z ],
color = [ '1' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_59})
V_144 = Vertex(name = 'V_144',
particles = [ P.vm__tilde__, P.vm, P.Z ],
color = [ '1' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_59})
V_145 = Vertex(name = 'V_145',
particles = [ P.vt__tilde__, P.vt, P.Z ],
color = [ '1' ],
lorentz = [ L.FFV2 ],
couplings = {(0,0):C.GC_59})
|
[
"ianwookim@gmail.com"
] |
ianwookim@gmail.com
|
a0afd01311fc3c8b2e58fd920285130338e86b2d
|
62c11667bc780b8fb80b69a069c5e4135a40ac8a
|
/src/newsletter/migrations/0001_initial.py
|
77ec77167df437d057a369a632f89115ed37d047
|
[] |
no_license
|
garabek/Django_BootcampSite
|
39b8bc976730c0776d733536f020a043d2f89370
|
8752cd7f2c469e2e4c9cf639e357c51cd05b5c4d
|
refs/heads/master
| 2021-07-01T12:09:57.557274
| 2017-09-21T23:07:01
| 2017-09-21T23:07:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='SignUp',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=254)),
('full_name', models.CharField(max_length=100, null=True, blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"bekiyev@gmail.com"
] |
bekiyev@gmail.com
|
b0120bfacbc1a15617c0e7d5efcaae38eaa5e7f0
|
d587a6d66a498eb23aa760117928ac012ee50f4d
|
/accounts/models.py
|
749ccb4895b121276809f8b2340742f776fd0443
|
[] |
no_license
|
azhar316/ecommerce-store
|
f0eff37565f9ae873dfc09c119b14f6a647b98ca
|
6a674a797e0daff63907fd2c8423b62bc8340d5d
|
refs/heads/master
| 2022-05-04T16:59:08.216063
| 2020-04-27T11:59:48
| 2020-04-27T11:59:48
| 253,781,116
| 0
| 0
| null | 2022-04-22T23:11:35
| 2020-04-07T12:06:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,051
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.utils import timezone
class CustomUserManager(BaseUserManager):
def create_user(self, email, password, **extra_fields):
if not email:
raise ValueError('Users must have an email address')
if not password:
raise ValueError('Users must have password')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password):
user = self.create_user(email, password, staff=True, admin=True, is_superuser=True)
return user
def create_staffuser(self, email, password):
user = self.create_user(email, password, staff=True)
return user
class CustomUser(AbstractBaseUser, PermissionsMixin):
full_name = models. CharField(max_length=250, blank=True)
email = models.EmailField(unique=True)
is_active = models.BooleanField(default=True)
staff = models.BooleanField(default=False)
admin = models.BooleanField(default=False)
date_joined = models.DateTimeField(default=timezone.now)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = CustomUserManager()
def __str__(self):
return self.email
def get_full_name(self):
if self.full_name:
return self.full_name
return self.email
def get_email(self):
return self.email
@property
def is_staff(self):
if self.admin:
return True
return self.staff
@property
def is_admin(self):
return self.admin
# override functions of PermissionMixin to enable staff users permission to perform
# all the admin tasks
def has_perm(self, perm, obj=None):
return self.staff
def has_perms(self, perm_list, obj=None):
return self.staff
def has_module_perms(self, app_label):
return self.staff
|
[
"sayed.azharudin@gmail.com"
] |
sayed.azharudin@gmail.com
|
cd9ee5abd198f4ea6763f1fc758b8522cdfa424b
|
c3fafc358ab1bd71e67d67a7598b137193cb2d2e
|
/modules_autopep1.py
|
4e1c12480a8df3120c2ee2d83bed7ce047f4b17a
|
[] |
no_license
|
DaniloBP/Python-3-Bootcamp
|
0010bb62432423d7ec76f87aa55d6cd016eac6e8
|
0594f3fcced17caa8bc00752aa6944b42f6224da
|
refs/heads/master
| 2020-04-26T00:55:51.174671
| 2019-06-03T04:38:04
| 2019-06-03T04:38:04
| 173,191,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
import math
import sys
def example1():
# This is a long comment. This should be wrapped to fit within 72
# characters.
some_tuple = (1, 2, 3, 'a')
some_variable = {
'long': 'Long code lines should be wrapped within 79 characters.',
'other': [math.pi, 100, 200, 300, 9876543210, 'This is a long string that goes on'],
'more': {
'inner': 'This whole logical line should be wrapped.',
some_tuple: [1, 20, 300,
40000,
500000000,
60000000000000000]}}
is_cat_owner = True
if is_cat_owner:
print("MEOW!")
return (some_tuple, some_variable)
def example2(): return (
'' in {'f': 2}) in {'has_key() is deprecated': True}
class Example3(object):
def __init__(self, bar):
# Comments should have a space after the hash.
if bar == True:
bar += 1
bar = bar * bar
return bar
else:
some_string = """
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
"""
return (sys.path, some_string)
|
[
"pereira.b.danilo@gmail.com"
] |
pereira.b.danilo@gmail.com
|
155fa4c41fd0a7c40be7e863303ef3b568645e29
|
faae5e2e431cc55089324510715b5bc91732ff42
|
/DecisionTree.py
|
b874cf8d205050881abc97fc35a0b050a75094f3
|
[] |
no_license
|
sk929/MLLearning
|
ac9e84d9bbf0c8dfa7ad23b8941925320ed8c083
|
ca5a9992b1fc40105a722b447ded6da20db32238
|
refs/heads/master
| 2022-07-20T01:29:05.298884
| 2020-05-26T13:38:25
| 2020-05-26T13:38:25
| 266,844,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,174
|
py
|
# Code you have previously used to load data
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
# Path of the file to read
iowa_file_path = '../input/home-data-for-ml-course/train.csv'
home_data = pd.read_csv(iowa_file_path)
# Create target object and call it y
y = home_data.SalePrice
# Create X
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
X = home_data[features]
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# Specify Model
iowa_model = DecisionTreeRegressor(random_state=1)
# Fit Model
iowa_model.fit(train_X, train_y)
# Make validation predictions and calculate mean absolute error
val_predictions = iowa_model.predict(val_X)
val_mae = mean_absolute_error(val_predictions, val_y)
print("Validation MAE: {:,.0f}".format(val_mae))
def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):
model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
model.fit(train_X, train_y)
preds_val = model.predict(val_X)
mae = mean_absolute_error(val_y, preds_val)
return(mae)
candidate_max_leaf_nodes = [5, 25, 50, 100, 250, 500]
# Write loop to find the ideal tree size from candidate_max_leaf_nodes
node = {}
for leaf_nodes in candidate_max_leaf_nodes:
node[leaf_nodes]= get_mae(leaf_nodes, train_X, val_X, train_y, val_y)
# Store the best value of max_leaf_nodes (it will be either 5, 25, 50, 100, 250 or 500)
best_tree_size = min(node,key = node.get)
# Fill in argument to make optimal size and uncomment
final_model = DecisionTreeRegressor(max_leaf_nodes = best_tree_size , random_state=1)
# fit the final model and uncomment the next two lines
final_model.fit(X, y)
'''Random
Forest'''
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
forest_model = RandomForestRegressor(random_state=1)
forest_model.fit(train_X, train_y)
melb_preds = forest_model.predict(val_X)
print(mean_absolute_error(val_y, melb_preds))
|
[
"noreply@github.com"
] |
sk929.noreply@github.com
|
4f07499c2074eb8c88a885caeb77b365c77adf2b
|
60fdc04010f1de5ed8017ae6f9d455feab94c33a
|
/juego con tortuga 8.py
|
5bf62bb0d7652da17d3342d7d845031f68dbc925
|
[] |
no_license
|
JDHINCAMAN/Python_examples
|
af7ef6c4c6df196dd15bf602c967cc56ec088b27
|
010b2b415fc9c61a4dcfd7728d3d7a7231b531c8
|
refs/heads/main
| 2023-03-23T12:09:38.245610
| 2021-03-23T14:05:55
| 2021-03-23T14:05:55
| 350,734,987
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
import turtle
t = turtle.Pen()
t.reset()
for x in range(1,38):
t.forward(100)
t.left(175)
|
[
"noreply@github.com"
] |
JDHINCAMAN.noreply@github.com
|
14968dd918294b12c09c5485726652af73463636
|
580d8c8ee860ea8d6c522fd943b37f37a6a31712
|
/Week1/05-aa-nt-converter.py
|
563953c5a4c3f48005902efabb19f344c9fc58f3
|
[] |
no_license
|
charmquark1/cmdb-lab
|
4e485f973eec2f5473760722dc1bd0401485fb76
|
3a82fe708f42f9d13493cb99f580a5587881a125
|
refs/heads/master
| 2021-01-11T19:34:54.462104
| 2016-12-19T14:54:42
| 2016-12-19T14:54:42
| 68,961,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
#!/usr/bin/env python
"""
Parse amino acid sequence in FASTA file.
Convert amino acid to 3 nucleotide seq, append to variable 'new'
When you see a -, replace with ---
Print gene id (ident), converted seq, new line \n, print new seq
Usage: xxx.py 04-AA.fa 02-nt.fa
"""
import fasta_fixed
import sys
import itertools
#inputs: 1) amino acid FASTA, 2) original nt FASTA
AA_query = open(sys.argv[1])
nt_query = open(sys.argv[2])
#prepare inputs for parallel parsing
AA_seq = []
nt_seq = []
for ident, sequence in fasta_fixed.FASTAReader(AA_query):
AA_seq.append(sequence)
for ident1, sequence in fasta_fixed.FASTAReader(nt_query):
nt_seq.append(sequence)
# parse parallel
# read ith element of aa sequence. If not "-", then take three first elements from nt_seq file and add to empty string, new
# at the end of the gene, append string new to list. Then restart for loop for next gene.
# I made list to make it easier to format for later.
list=[]
for aa, nt in itertools.izip(AA_seq, nt_seq):
new = ''
nt_pos = 0
for i in range(0, len(aa)):
if aa[i] == '-':
new = new + ("---")
else:
codon = nt[nt_pos:nt_pos+3] #take 3 characters
new = new + codon
nt_pos = nt_pos + 3
#print new
list.append(new)
print ">x\n" +"\n>x\n".join(list)
|
[
"ninarao42@gmail.com"
] |
ninarao42@gmail.com
|
caf70606137c0215e3fb64625ec643e1dc3b2668
|
4ee3f1ce9d06815fbefa6c674d1e00fda7c1dec1
|
/exercises.py
|
7faa60fb61740354ef8009359c125e8b8c5f7807
|
[] |
no_license
|
Seal125/binary_tree_basic
|
79bce998c34c56bbe938784f2be8048da66206d6
|
957df46130f8c0e68bb8cf7145d0d01aee60e34f
|
refs/heads/master
| 2021-04-17T11:05:05.827069
| 2020-03-23T14:47:25
| 2020-03-23T14:47:25
| 249,440,206
| 0
| 0
| null | 2020-03-23T13:35:43
| 2020-03-23T13:35:43
| null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
class Node:
def __init__(self, value = None):
self.value = value
self.left = None
self.right = None
def inorder(root):
values = []
def add(node):
if node:
add(node.left)
list.append(node.value)
add(node.right)
add(root)
return values
def is_unival_tree(tree):
value = tree.value
is_unival= True
def add(node):
if node:
add(node.left)
if node.value != value:
is_unival = False
return is_unival
add(node.right)
|
[
"stephaniesmith12514@gmail.com"
] |
stephaniesmith12514@gmail.com
|
feb967e768de780f768c67ee8e6bc478974aa13b
|
7e90a1f8280618b97729d0b49b80c6814d0466e2
|
/workspace_pc/catkin_ws/build_isolated/hector_slam/catkin_generated/stamps/hector_slam/_setup_util.py.stamp
|
c51cc0942a19064fe6e7239c5adffd9ad95290b7
|
[] |
no_license
|
IreneYIN7/Map-Tracer
|
91909f4649a8b65afed56ae3803f0c0602dd89ff
|
cbbe9acf067757116ec74c3aebdd672fd3df62ed
|
refs/heads/master
| 2022-04-02T09:53:15.650365
| 2019-12-19T07:31:31
| 2019-12-19T07:31:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,976
|
stamp
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = '/home/gse5/catkin_ws/devel_isolated/hector_map_server;/home/gse5/catkin_ws/devel_isolated/hector_geotiff_plugins;/home/gse5/catkin_ws/devel_isolated/hector_geotiff;/home/gse5/catkin_ws/devel_isolated/hector_nav_msgs;/home/gse5/catkin_ws/devel_isolated/hector_marker_drawing;/home/gse5/catkin_ws/devel_isolated/hector_mapping;/home/gse5/catkin_ws/devel_isolated/hector_compressed_map_transport;/home/gse5/catkin_ws/devel_isolated/hector_map_tools;/home/gse5/catkin_ws/devel_isolated/hector_imu_tools;/home/gse5/catkin_ws/devel_isolated/hector_imu_attitude_to_tf;/home/gse5/catkin_ws/devel_isolated/rplidar_ros;/home/gse5/catkin_ws/devel_isolated/cartographer_rviz;/home/gse5/catkin_ws/devel_isolated/cartographer_ros;/home/gse5/catkin_ws/devel_isolated/cartographer_ros_msgs;/home/gse5/catkin_ws/devel_isolated/beginner_tutorials;/home/gse5/catkin_ws/install_isolated;/opt/ros/melodic;/home/gse5/catkin_ws/devel_isolated/cartographer;/home/gse5/catkin_ws/devel_isolated/ceres-solver'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
# CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
# base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
|
[
"sh9339@outlook.com"
] |
sh9339@outlook.com
|
3166d19669d179ae390fe0176d83516606e617ba
|
9c991a8b7bbdda40d9115d685122cf63627a1ace
|
/Week 1/Day1Practice/madlib.py
|
c5e85cfff4c6a4833517dcdcabfcb373a663a619
|
[] |
no_license
|
Zacros7164/unit1
|
321844820178e16909df52f5620319e1aeeb0d4a
|
76d737067a685af110f6ec00ee315136c3cad51a
|
refs/heads/master
| 2020-04-06T11:26:00.023411
| 2019-02-12T14:06:00
| 2019-02-12T14:06:00
| 157,416,741
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
print "Madlibs!"
name = raw_input("Please give me a name. ")
subject = raw_input("Please give me a school subject. ")
print name + "'s favorite subject in school is " + subject + "."
|
[
"Zacros7164@gmail.com"
] |
Zacros7164@gmail.com
|
8a961656b96feb84ed09e52341f6db90ae7faeee
|
30272f4069293049848369f674ff7a8e88e30ac9
|
/PowerSpectrumFunctions.py
|
8e19c74085abafde3847571c977825abf8d71e79
|
[] |
no_license
|
AstroJames/anisoReconstruct
|
e40121fdfdf0e90575c210c20a3ae471a46fa558
|
9b79fd78eb47e44592a5487bd1df9b81658dd31f
|
refs/heads/master
| 2022-04-06T23:47:07.820106
| 2020-03-02T00:38:31
| 2020-03-02T00:38:31
| 235,967,999
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,628
|
py
|
from header import *
def PowerSpectrum(data,type=None,variance=None):
"""
Calculate the power spectrum.
INPUTS:
----------
data - The 2D image data
n - the coef. of the fourier transform = grid size.
type - either '2D' or 'aziAverage' for 2D or averaged power spectrum calculations.
variance - passed to the azimuthal averaging if you want to calculate the variance of
the average.
OUTPUTS:
----------
Pspec - the 2D power spectrum
k - the 2D k vector array
"""
data = data.astype(float) # make sure the data is float type
# This removes the k = 0 wavevalue and makes integrating easier.
data = data/data.mean() - 1 # Pre-processing following Federrath et al. 2016
ft = (1./ (data.shape[0]*data.shape[1] ) )*fftpack.fft2(data) # 2D fourier transform
ft_c = fftpack.fftshift(ft) # center the transform so k = (0,0) is in the center
PSpec = np.abs(ft_c*np.conjugate(ft_c)) # take the power spectrum
# Take the azimuthal average of the powr spectrum, if required.
if type == 'aziAverage':
if not variance:
PSpec = azimuthalAverage(PSpec)
return data
else:
PSpec, var = azimuthalAverage(PSpec,variance=True)
return PSpec, var
# Create the kx and ky vector components.
kx = np.round(np.linspace( -( PSpec.shape[0] + 1 )/2, ( PSpec.shape[0] + 1 )/2, PSpec.shape[0]))
ky = np.round(np.linspace( -( PSpec.shape[1] + 1 )/2, ( PSpec.shape[1] + 1 )/2, PSpec.shape[1]))
kx, ky = np.meshgrid(kx,ky,indexing="xy")
k = np.hypot(kx,ky)
return PSpec, k
def PowerSpectrumAveraging(files,densOrderd,run):
"""
this functions averages over the power spectrums and returns a dictionary with the averaged
power spectrums in it.
INPUTS:
----------
files - all of the file names for each simulations
densOrdered - the density dictionary ordered by plot order
run - if the function needs to be rerun for recompiling of the density plots
OUTPUTS:
----------
PSpecAver - the average power spectrum as a dictionary, for each of the simulations
PSpecVar - the variance power spectrum as a dictionary, for each of the simulations
"""
if run == 0: # if the power spectrum need to be recompiled.
PSpecAver = {}
PSpecVar = {}
fileCounter = 0
# Average the power spectrum, from 5T to 10T
for iter in xrange(50,101):
#print("Currently on iteration {}".format(iter))
# Load the density files
try:
density = LoadPickles(files,iter)
except IndexError:
#print("Index error, I'm going to break the loop.")
break
plotCounter = 0
for i in xrange(0,5):
for j in xrange(0,4):
if fileCounter == 0:
dens = density[densOrderd[plotCounter]] # column density
PSpec, k = PowerSpectrum(dens) # the power spectrum and wavevector
PSpecAver[densOrderd[plotCounter]] = PSpec # add a new power spectrum to the dictionary
PSpecVar[densOrderd[plotCounter]] = PSpec**2 # for constructing the variance
else:
dens = density[densOrderd[plotCounter]] # column density
PSpec, k = PowerSpectrum(dens) # the power spectrum and wavevector
PSpecAver[densOrderd[plotCounter]] += PSpec # add the power spectrum together
PSpecVar[densOrderd[plotCounter]] += PSpec**2 # for constructing the variance
plotCounter +=1 #update the plot
fileCounter +=1 #update the file
# Average the power spectrum and take the log10 transform
for key in PSpecAver.keys():
PSpecAver[key] = PSpecAver[key]/fileCounter
PSpecVar[key] = (PSpecVar[key]/fileCounter - PSpecAver[key]**2)**0.5
save_obj(PSpecAver,"AveragePSpec")
save_obj(PSpecVar,"StdPSpec")
else:
PSpecAver = load_obj("AveragePSpec.pkl")
PSpecVar = load_obj("StdPSpec.pkl")
return PSpecAver, PSpecVar
def calculateIsoVar(PowerSpectrum,k,var2D):
"""
Assuming isotropy of the kz, this function calculates R = sigma^2_2 / sigma^2_3
INPUTS:
------------------------------------------------------------------------------------------
PowerSpectrum - the 2D power spectrum.
k - the k wavevector as a 2D grid.
var2D - the varianace of the 2D column density.
OUTPUTS:
------------------------------------------------------------------------------------------
R - the ratio between the 2D and 3D variance
var3D - the estimated 3D variance
"""
# Differentials for integration
dkx = k[0,0]-k[0,1]
dky = k[0,0]-k[1,0]
dk = np.hypot(dkx,dky)
# Calculate the integrals over the 2D and 3D power spectrum, assuming isotropy
P2D = 2* np.pi* sum( sum( PowerSpectrum ) ) * dk
P3D = 4* np.pi* sum( sum( PowerSpectrum * k ) ) * dk
# Calculate R from Brunt et al. 2010, and the 3D variance.
R = P2D / P3D
var3D = var2D / R
return R, var3D
|
[
"jamesbeattie@James-MacBook-Pro.local"
] |
jamesbeattie@James-MacBook-Pro.local
|
8b06643905de8fc715a65a1df5347cc97d12961b
|
dfcddf4ed51bc48c4bd6288e3517fd8629000fbd
|
/app/http/responses/__init__.py
|
cb1798211886d0480004cb258bb6cdb3ead1bd94
|
[] |
no_license
|
ugabiga/flask-boilerplate
|
508548d1f713c9f4412e43c68dd59d9a6210882d
|
5a317a80295aacf9bfc8c7c1a5736d2d5b22fc98
|
refs/heads/master
| 2022-08-30T16:28:58.332410
| 2022-08-23T12:03:34
| 2022-08-23T12:03:34
| 208,466,604
| 1
| 0
| null | 2022-08-23T12:03:35
| 2019-09-14T16:12:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
from typing import Any, Tuple, Type
import marshmallow as ma
from flask import jsonify
from flask.wrappers import Response
from core.use_cases.output import Failure, Output
def build_success_output_with_schema(
output: Output, schema_class: Type[ma.Schema], many: bool = None
) -> Tuple[Response, int]:
output_schema = schema_class().dump(output.get_data(), many=many)
return build_success_response(output_schema, output.get_meta())
def build_success_response(data: Any, meta: dict = None) -> Tuple[Response, int]:
response = {"data": data}
if meta is not None:
response["meta"] = meta
return jsonify(response), 200
def build_failure_response(output: Failure) -> Tuple[Response, int]:
return jsonify(error=output.get_type(), error_message=output.get_message()), 400
def build_response(
output: Output, schema_class: Type[ma.Schema] = None, many: bool = None
) -> Tuple[Response, int]:
if output.is_success() and schema_class is not None:
return build_success_output_with_schema(output, schema_class, many)
if output.is_success():
return build_success_response(output.get_data(), output.get_meta())
if isinstance(output, Failure):
return build_failure_response(output)
return build_failure_response(
Failure.build_empty_internal_response_error("in_response_builder")
)
|
[
"ugabiga@gmail.com"
] |
ugabiga@gmail.com
|
0d72f76083eab3990a6815596501ba6a7019de76
|
ebb081aea082ea8964c6de96d8ee4063e2660eba
|
/question_set.py
|
fbbaeb74231f4b8bbcc8727ace37848045609470
|
[] |
no_license
|
tramlam-ng/QuestionAnsweringSystem
|
8298f79764917e09e9ae34510cbedaf3b87f0d94
|
ca28ef59fe8eaf7136bf9c71a2d88c2b63ffac74
|
refs/heads/master
| 2022-01-14T15:47:13.364148
| 2019-01-12T13:57:25
| 2019-01-12T13:57:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
data1=pd.read_csv('WikiQA-train.tsv',delimiter='\t',encoding='utf-8')
data2=pd.read_csv('WikiQA-test.tsv',delimiter='\t',encoding ='utf-8')
data = data1.append(data2, ignore_index=True)
#Extracting the unique questions along with their questionID
def extract_questions(data):
new_data=data.drop(['DocumentID','DocumentTitle','Label'],axis=1)
d=new_data.drop_duplicates()
return d
d=extract_questions(data)
d.to_csv('questions.csv',index=False)
|
[
"noreply@github.com"
] |
tramlam-ng.noreply@github.com
|
8cbeb7315d0f6c9e820555d49e344399fd8269ca
|
992c31a3bda2467e9d90ec8989f15a4cd38bae2b
|
/drone.py
|
e51fef99bd7c3de22cda760187dc7caf67aed65a
|
[] |
no_license
|
aleksandarnikov/dwm
|
a35a83f720e75e85d23039a091d280675d716797
|
3de3f0795955fd30056e4b71cd1b92ef33950ccd
|
refs/heads/main
| 2023-01-06T16:17:42.826991
| 2020-11-14T12:11:44
| 2020-11-14T12:11:44
| 312,095,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
import paho.mqtt.client as mqtt
import time
import random
import sys
name = sys.argv[1]
client = mqtt.Client(name)
client.connect("localhost")
def on_publish(client, userdata, result):
print("data published \n")
pass
client.on_publish = on_publish
# ret = client.publish("dwm/node/abc1/uplink/location", '{"position":{"x":1.3936733,"y":1.174517,"z":-0.26708269,"quality":81},"superFrameNumber":136}')
x = 5
y = 4
dx = 0.06
dy = 0.05
while True:
ddx = x + dx
ddy = y + dy
if ddx >= 10 or ddx < 0:
dx = -dx
continue
if ddy >= 10 or ddy < 0:
dy = -dy
continue
x = ddx
y = ddy
ret = client.publish("dwm/node/" + name + "/uplink/location", '{"position":{"x":' + str(x) + ',"y":' + str(y) + ',"z":-0.26708269,"quality":81},"superFrameNumber":136}')
print(x, y)
time.sleep(0.01)
# ret2 = client.publish("abc", "xyz")
client.loop_start() #start the loop
time.sleep(10)
|
[
"aleksandar.nikov@netcetera.com"
] |
aleksandar.nikov@netcetera.com
|
31519fa2a14b4aedde98b2f3a8defd664bd00223
|
69ef0b99e5b2a1fde4780501e87725a618c7889f
|
/abc/python3/hello.py
|
346ba43f5dd4dcecc557910820aefe3bf7f003ce
|
[] |
no_license
|
wsz-/real_hub
|
350f5133ec55fb0357a1c76e72ac6f93757352cb
|
f1b4d3140bc8c723076bba79fbaf8c0495592314
|
refs/heads/master
| 2021-01-10T21:14:13.724398
| 2012-11-02T04:42:25
| 2012-11-02T04:42:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
import sys
argvs=sys.argv
#print(len(argvs))
base="Hello"
err='''用法:
hello
hello -p
hello -p $str
大小写无关'''
arg_len=len(argvs)
if arg_len==1 :
print(base,'word!')
elif arg_len==2:
if argvs[1].lower()=='-p':
print(base,'word!')
else:
print(err);
elif arg_len==3:
if argvs[1].lower()=='-p':
print(base,argvs[2])
else:
print(err)
else:
print(err)
|
[
"cisir92@gmail.com"
] |
cisir92@gmail.com
|
21dad83cf27d3b9f8a2e6cff7584c09f606351a6
|
aff5cc92f38213a45323d7dede291dd918e96519
|
/simulation/crystal_mode_code/plane_transistion_plot.py
|
7c7ad320310ff374257a8f598b9f15e2ec976c37
|
[] |
no_license
|
nistpenning/calc
|
bd475b75a36ba93e74356a37529d0f9dac30a083
|
15d651bcc5c067032041b5ad9cf0be38169bb750
|
refs/heads/master
| 2021-01-18T22:59:31.619436
| 2015-11-03T23:44:05
| 2015-11-03T23:44:05
| 32,483,830
| 3
| 1
| null | 2015-06-17T16:58:16
| 2015-03-18T20:54:43
|
Matlab
|
UTF-8
|
Python
| false
| false
| 4,645
|
py
|
__author__ = 'sbt'
"""
Makes a plot of the rotation frequency of the
2-1 plane transistion for a given configuration of the Ion trap.
"""
from mode_analysis_code import ModeAnalysis
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
if __name__ == "__main__":
# Select the trapping and wall potentials which will be used
# for all future runs
trappotential = (0.0, -873, -1000)
wallpotential = 0.1
precision_solving = True
# Determines the number of ions to find the transition frequency for.
nionlist = [19, 20, 26, 37, 50, 61, 75, 91, 110, 127, 130, 169, 190, 217, 231, 300, 331]
currentfrequency = 93
transistionfrequencies = []
# Iterate through number of ions to test for stability
for N in nionlist:
if N > 100:
# Set to false to decrease run time for the biggest crystals
# and run with un-perturbed crystals
# (potentially not global energy minimum)
precision_solving = True
# Instantiate a crystal and see if it is stable
crystal = ModeAnalysis(N=N, Vtrap=trappotential, Ctrap=1.0, ionmass=None,
B=4.4588, frot=currentfrequency, Vwall=wallpotential,
wall_order=2, quiet=False, precision_solving=precision_solving)
crystal.run()
# Increase the frequency until stability is lost- most important for the first
# crystal tested
while crystal.is_plane_stable():
print("Crystal of", N, "is currently at", currentfrequency,
"increasing to ", currentfrequency + 1)
currentfrequency += 1
crystal = ModeAnalysis(N=N, Vtrap=trappotential, Ctrap=1.0, ionmass=None,
B=4.4588, frot=currentfrequency, Vwall=wallpotential,
wall_order=2,
quiet=False, precision_solving=precision_solving)
crystal.run()
# When frequency is lost, reduce to find when it resumes
while not crystal.is_plane_stable():
print("Found turning point: reducing frequency from", currentfrequency, "to ",
currentfrequency - 1)
currentfrequency -= 1
crystal = ModeAnalysis(N=N, Vtrap=trappotential, Ctrap=1.0, ionmass=None,
B=4.4588, frot=currentfrequency, Vwall=wallpotential,
wall_order=2,
quiet=False, precision_solving=precision_solving)
crystal.run()
# Once stability has resumed the lowest frequency at which 1->2 transition occurs is stored
print("Transistion frequency is", currentfrequency + 1, " for number of ions", crystal.Nion)
transistionfrequencies.append(currentfrequency + 1)
print("Transitions found:")
print("nions:", nionlist)
print("frequencies", transistionfrequencies)
#########################################
transfreq=transistionfrequencies
nions=nionlist
shells=[1,2,3,4,5,6,7,8,9,10]
shelln=[7,19,37,61,91,127,169,217,271,331]
def func(x, a, b, c):
return a * np.exp(-b * x) + c
fig = plt.figure(figsize=(14, 12))
plt.rcParams['font.size'] = 16
ax = fig.add_subplot(1,1,1)
for i in range(len(transfreq)):
if nions[i] in shelln:
plt.plot(transfreq[i],nions[i],"o",color='red')
else:
plt.plot(transfreq[i],nions[i],"o",color='blue')
plt.title("1-2 Plane Transistion for $V_{Mid}=-.873, \ V_{Center}=-1.0 \ (kV) V_{Wall} =1 V$", y=1.02)
plt.xlabel("Transistion Frequency (kHz)")
plt.ylabel("Number of Ions")
major_ticks = np.arange(min(transfreq),max(transfreq),2)
minor_ticks = np.arange(min(transfreq),max(transfreq),.5)
print(major_ticks)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
yticks=np.arange(0,400,25)
yticksmin=np.arange(0,400,5)
ax.set_yticks(yticks)
ax.set_yticks(yticksmin, minor=True)
fig = plt.grid(True)
fig = plt.xlim([min(transfreq)*.99,max(transfreq)*1.01])
popt, pcov = curve_fit(func, transfreq, nions,p0=[127,.1,122])
print(popt)
x=np.linspace(min(transfreq)*.99,max(transfreq)*1.01,200)
plt.plot(x, func(x, *popt), 'r-', label="Fitted Curve",color="black")
plt.legend(loc=1)
for N in shelln:
plt.plot([min(transfreq)*.99,max(transfreq)*1.01],[N,N],"--",color='black')
for N in shells:
plt.text(max(transfreq)*1.013,shelln[N-1],"%d" %N)
plt.show()
|
[
"storrisi@u.rochester.edu"
] |
storrisi@u.rochester.edu
|
d2e18daba5039bfa0fe53bdc30e97c234ded7ec8
|
bbfa9cdfd5f09c833ab9190cd4ad5a46e7a515e7
|
/effective-python/2020-05/item_61.py
|
863a8f8f00e61d939277ee2b82426ba026599225
|
[] |
no_license
|
alexchonglian/readings
|
775204e013a2301f08fee96c5e8b116842faebcb
|
03cb6cb266d8d2376db411e9b12e9b6cd1f2b33b
|
refs/heads/master
| 2022-12-02T13:56:56.878477
| 2021-06-18T05:53:14
| 2021-06-18T05:53:14
| 218,573,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,393
|
py
|
import random
random.seed(1234)
import logging
from pprint import pprint
from sys import stdout as STDOUT
# Write all output to a temporary directory
import atexit
import gc
import io
import os
import tempfile
TEST_DIR = tempfile.TemporaryDirectory()
atexit.register(TEST_DIR.cleanup)
# Make sure Windows processes exit cleanly
OLD_CWD = os.getcwd()
atexit.register(lambda: os.chdir(OLD_CWD))
os.chdir(TEST_DIR.name)
def close_open_files():
everything = gc.get_objects()
for obj in everything:
if isinstance(obj, io.IOBase):
obj.close()
atexit.register(close_open_files)
def example(i): print(f'\n==== Example {i} ====')
example(1)
class EOFError(Exception):
pass
class ConnectionBase:
def __init__(self, connection):
self.connection = connection
self.file = connection.makefile('rb')
def send(self, command):
line = command + '\n'
data = line.encode()
self.connection.send(data)
def receive(self):
line = self.file.readline()
if not line:
raise EOFError('Connection closed')
return line[:-1].decode()
example(2)
import random
WARMER = 'Warmer'
COLDER = 'Colder'
UNSURE = 'Unsure'
CORRECT = 'Correct'
class UnknownCommandError(Exception):
pass
example(3)
example(4)
example(5)
example(6)
class Session(ConnectionBase):
def __init__(self, *args):
super().__init__(*args)
self._clear_state(None, None)
def _clear_state(self, lower, upper):
self.lower = lower
self.upper = upper
self.secret = None
self.guesses = []
def loop(self):
while command := self.receive():
parts = command.split(' ')
if parts[0] == 'PARAMS':
self.set_params(parts)
elif parts[0] == 'NUMBER':
self.send_number()
elif parts[0] == 'REPORT':
self.receive_report(parts)
else:
raise UnknownCommandError(command)
def set_params(self, parts):
assert len(parts) == 3
lower = int(parts[1])
upper = int(parts[2])
self._clear_state(lower, upper)
def next_guess(self):
if self.secret is not None:
return self.secret
while True:
guess = random.randint(self.lower, self.upper)
if guess not in self.guesses:
return guess
def send_number(self):
guess = self.next_guess()
self.guesses.append(guess)
self.send(format(guess))
def receive_report(self, parts):
assert len(parts) == 2
decision = parts[1]
last = self.guesses[-1]
if decision == CORRECT:
self.secret = last
print(f'Server: {last} is {decision}')
example(7)
example(8)
example(9)
example(10)
import contextlib
import math
class Client(ConnectionBase):
def __init__(self, *args):
super().__init__(*args)
self._clear_state()
def _clear_state(self):
self.secret = None
self.last_distance = None
@contextlib.contextmanager
def session(self, lower, upper, secret):
print(f'Guess a number between {lower} and {upper}!'
f' Shhhhh, it\'s {secret}.')
self.secret = secret
self.send(f'PARAMS {lower} {upper}')
try:
yield
finally:
self._clear_state()
self.send('PARAMS 0 -1')
def request_numbers(self, count):
for _ in range(count):
self.send('NUMBER')
data = self.receive()
yield int(data)
if self.last_distance == 0:
return
def report_outcome(self, number):
new_distance = math.fabs(number - self.secret)
decision = UNSURE
if new_distance == 0:
decision = CORRECT
elif self.last_distance is None:
pass
elif new_distance < self.last_distance:
decision = WARMER
elif new_distance > self.last_distance:
decision = COLDER
self.last_distance = new_distance
self.send(f'REPORT {decision}')
return decision
example(11)
import socket
from threading import Thread
def handle_connection(connection):
with connection:
session = Session(connection)
try:
session.loop()
except EOFError:
pass
def run_server(address):
with socket.socket() as listener:
# Allow the port to be reused
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(address)
listener.listen()
while True:
connection, _ = listener.accept()
thread = Thread(target=handle_connection,
args=(connection,),
daemon=True)
thread.start()
example(12)
def run_client(address):
with socket.create_connection(address) as connection:
client = Client(connection)
with client.session(1, 5, 3):
results = [(x, client.report_outcome(x))
for x in client.request_numbers(5)]
with client.session(10, 15, 12):
for number in client.request_numbers(5):
outcome = client.report_outcome(number)
results.append((number, outcome))
return results
example(13)
def main():
address = ('127.0.0.1', 1234)
server_thread = Thread(
target=run_server, args=(address,), daemon=True)
server_thread.start()
results = run_client(address)
for number, outcome in results:
print(f'Client: {number} is {outcome}')
main()
example(14)
class AsyncConnectionBase:
def __init__(self, reader, writer): # Changed
self.reader = reader # Changed
self.writer = writer # Changed
async def send(self, command):
line = command + '\n'
data = line.encode()
self.writer.write(data) # Changed
await self.writer.drain() # Changed
async def receive(self):
line = await self.reader.readline() # Changed
if not line:
raise EOFError('Connection closed')
return line[:-1].decode()
example(15)
example(16)
example(17)
example(18)
example(19)
class AsyncSession(AsyncConnectionBase): # Changed
def __init__(self, *args):
super().__init__(*args)
self._clear_values(None, None)
def _clear_values(self, lower, upper):
self.lower = lower
self.upper = upper
self.secret = None
self.guesses = []
async def loop(self): # Changed
while command := await self.receive(): # Changed
parts = command.split(' ')
if parts[0] == 'PARAMS':
self.set_params(parts)
elif parts[0] == 'NUMBER':
await self.send_number() # Changed
elif parts[0] == 'REPORT':
self.receive_report(parts)
else:
raise UnknownCommandError(command)
def set_params(self, parts):
assert len(parts) == 3
lower = int(parts[1])
upper = int(parts[2])
self._clear_values(lower, upper)
def next_guess(self):
if self.secret is not None:
return self.secret
while True:
guess = random.randint(self.lower, self.upper)
if guess not in self.guesses:
return guess
async def send_number(self): # Changed
guess = self.next_guess()
self.guesses.append(guess)
await self.send(format(guess)) # Changed
def receive_report(self, parts):
assert len(parts) == 2
decision = parts[1]
last = self.guesses[-1]
if decision == CORRECT:
self.secret = last
print(f'Server: {last} is {decision}')
example(20)
example(21)
example(22)
example(23)
class AsyncClient(AsyncConnectionBase): # Changed
def __init__(self, *args):
super().__init__(*args)
self._clear_state()
def _clear_state(self):
self.secret = None
self.last_distance = None
@contextlib.asynccontextmanager # Changed
async def session(self, lower, upper, secret): # Changed
print(f'Guess a number between {lower} and {upper}!'
f' Shhhhh, it\'s {secret}.')
self.secret = secret
await self.send(f'PARAMS {lower} {upper}') # Changed
try:
yield
finally:
self._clear_state()
await self.send('PARAMS 0 -1') # Changed
async def request_numbers(self, count): # Changed
for _ in range(count):
await self.send('NUMBER') # Changed
data = await self.receive() # Changed
yield int(data)
if self.last_distance == 0:
return
async def report_outcome(self, number): # Changed
new_distance = math.fabs(number - self.secret)
decision = UNSURE
if new_distance == 0:
decision = CORRECT
elif self.last_distance is None:
pass
elif new_distance < self.last_distance:
decision = WARMER
elif new_distance > self.last_distance:
decision = COLDER
self.last_distance = new_distance
await self.send(f'REPORT {decision}') # Changed
# Make it so the output printing is in
# the same order as the threaded version.
await asyncio.sleep(0.01)
return decision
example(24)
import asyncio
async def handle_async_connection(reader, writer):
session = AsyncSession(reader, writer)
try:
await session.loop()
except EOFError:
pass
async def run_async_server(address):
server = await asyncio.start_server(
handle_async_connection, *address)
async with server:
await server.serve_forever()
example(25)
async def run_async_client(address):
# Wait for the server to listen before trying to connect
await asyncio.sleep(0.1)
streams = await asyncio.open_connection(*address) # New
client = AsyncClient(*streams) # New
async with client.session(1, 5, 3):
results = [(x, await client.report_outcome(x))
async for x in client.request_numbers(5)]
async with client.session(10, 15, 12):
async for number in client.request_numbers(5):
outcome = await client.report_outcome(number)
results.append((number, outcome))
_, writer = streams # New
writer.close() # New
await writer.wait_closed() # New
return results
example(26)
async def main_async():
address = ('127.0.0.1', 4321)
server = run_async_server(address)
asyncio.create_task(server)
results = await run_async_client(address)
for number, outcome in results:
print(f'Client: {number} is {outcome}')
logging.getLogger().setLevel(logging.ERROR)
asyncio.run(main_async())
logging.getLogger().setLevel(logging.DEBUG)
|
[
"alexchonglian@gmail.com"
] |
alexchonglian@gmail.com
|
969d035c63ace1f7b4c413e93f06400bb2d2bf34
|
119437adb7830659307c18b79a9cc3f6bfc6fe40
|
/transformers_learning/english_sequence_labeling/torch_model_train.py
|
234011630b2febd960451887847252ee4bdd95c0
|
[] |
no_license
|
percent4/PyTorch_Learning
|
478bec35422cdc66bf41b4258e29fbcb6d24f60c
|
24184d49032c9c9a68142aff89dabe33adc17b52
|
refs/heads/master
| 2023-03-31T03:01:19.372830
| 2023-03-17T17:02:39
| 2023-03-17T17:02:39
| 171,400,828
| 16
| 7
| null | 2023-09-02T08:53:26
| 2019-02-19T03:47:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,513
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2021/1/31 15:01
# @Author : Jclian91
# @File : torch_model_train.py
# @Place : Yangpu, Shanghai
import json
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from transformers import BertForTokenClassification, BertTokenizer, BertConfig
from util import event_type, train_file_path, test_file_path
from util import MAX_LEN, BERT_MODEL_DIR, TRAIN_BATCH_SIZE, VALID_BATCH_SIZE, EPOCHS, LEARNING_RATE
from load_data import read_data
# tokenizer and label_2_id_dict
with open("{}_label2id.json".format(event_type), "r", encoding="utf-8") as f:
tag2idx = json.loads(f.read())
idx2tag = {v: k for k, v in tag2idx.items()}
class CustomDataset(Dataset):
def __init__(self, tokenizer, sentences, labels, max_len):
self.len = len(sentences)
self.sentences = sentences
self.labels = labels
self.tokenizer = tokenizer
self.max_len = max_len
def __getitem__(self, index):
sentence = str(self.sentences[index])
inputs = self.tokenizer.encode_plus(
sentence,
None,
add_special_tokens=True,
max_length=self.max_len,
truncation=True,
padding="max_length",
# pad_to_max_length=True,
return_token_type_ids=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
label = self.labels[index]
label.extend([0] * MAX_LEN)
label = label[:MAX_LEN]
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'tags': torch.tensor(label, dtype=torch.long)
}
def __len__(self):
return self.len
# Creating the customized model
class BERTClass(torch.nn.Module):
def __init__(self):
super(BERTClass, self).__init__()
config = BertConfig.from_pretrained("./bert-base-uncased", num_labels=len(list(tag2idx.keys())))
self.l1 = BertForTokenClassification.from_pretrained('./bert-base-uncased', config=config)
# self.l2 = torch.nn.Dropout(0.3)
# self.l3 = torch.nn.Linear(768, 200)
def forward(self, ids, mask, labels):
output_1 = self.l1(ids, mask, labels=labels)
# output_2 = self.l2(output_1[0])
# output = self.l3(output_2)
return output_1
def flat_accuracy(preds, labels):
flat_preds = np.argmax(preds, axis=2).flatten()
flat_labels = labels.flatten()
return np.sum(flat_preds == flat_labels)/len(flat_labels)
def valid(model, testing_loader):
model.eval()
eval_loss = 0; eval_accuracy = 0
nb_eval_steps, nb_eval_examples = 0, 0
with torch.no_grad():
for _, data in enumerate(testing_loader):
ids = data['ids'].to(dev, dtype=torch.long)
mask = data['mask'].to(dev, dtype=torch.long)
targets = data['tags'].to(dev, dtype=torch.long)
output = model(ids, mask, labels=targets)
loss, logits = output[:2]
logits = logits.detach().cpu().numpy()
label_ids = targets.to('cpu').numpy()
accuracy = flat_accuracy(logits, label_ids)
eval_loss += loss.mean().item()
eval_accuracy += accuracy
nb_eval_examples += ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss/nb_eval_steps
print("Validation loss: {}".format(eval_loss))
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
if __name__ == '__main__':
# Preparing for CPU or GPU usage
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tokenizer = BertTokenizer.from_pretrained('./{}'.format(BERT_MODEL_DIR))
# Creating the Dataset and DataLoader for the neural network
train_sentences, train_labels = read_data(train_file_path)
train_labels = [[tag2idx.get(l) for l in lab] for lab in train_labels]
test_sentences, test_labels = read_data(test_file_path)
test_labels = [[tag2idx.get(l) for l in lab] for lab in test_labels]
print("TRAIN Dataset: {}".format(len(train_sentences)))
print("TEST Dataset: {}".format(len(test_sentences)))
training_set = CustomDataset(tokenizer, train_sentences, train_labels, MAX_LEN)
testing_set = CustomDataset(tokenizer, test_sentences, test_labels, MAX_LEN)
train_params = {'batch_size': TRAIN_BATCH_SIZE, 'shuffle': True, 'num_workers': 0}
test_params = {'batch_size': VALID_BATCH_SIZE, 'shuffle': True, 'num_workers': 0}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
# train the model
model = BERTClass()
model.to(dev)
optimizer = torch.optim.Adam(params=model.parameters(), lr=LEARNING_RATE)
for epoch in range(EPOCHS):
model.train()
for _, data in enumerate(training_loader):
ids = data['ids'].to(dev, dtype=torch.long)
mask = data['mask'].to(dev, dtype=torch.long)
targets = data['tags'].to(dev, dtype=torch.long)
loss = model(ids, mask, labels=targets)[0]
# optimizer.zero_grad()
if _ % 50 == 0:
print(f'Epoch: {epoch}, Batch: {_}, Loss: {loss.item()}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
# model evaluate
valid(model, testing_loader)
torch.save(model.state_dict(), '{}_ner.pth'.format(event_type))
|
[
"1137061634@qq.com"
] |
1137061634@qq.com
|
1d851a0b72fbdf9725b48f0991a89504fbb6cf55
|
e3d6acf088991d776ed17b61e464ef128b83e6da
|
/src/enums/type.py
|
ce89b3f44f25b45100f5afe9e4030269c107e187
|
[
"Apache-2.0"
] |
permissive
|
antamb/google-personal-assistant
|
407c6a0e420d667810571bcb5b58a5a3130bde1b
|
a81d1e65cd5d42e963bd359482a0ba7e3879a1d5
|
refs/heads/master
| 2020-12-03T02:09:26.805036
| 2017-07-01T09:12:06
| 2017-07-01T09:12:06
| 95,910,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
from enum import Enum
class Type(Enum):
OTHER = 1
EVENT = 2
PERSON = 3
UNKNOWN = 4
LOCATION = 5
WORK_OF_ART = 6
ORGANIZATION = 7
CONSUMER_GOOD = 8
entities_type = {
Type.EVENT: 'Event',
Type.PERSON: 'Person',
Type.UNKNOWN: 'Unknown',
Type.OTHER: 'Other types',
Type.LOCATION: 'Location',
Type.WORK_OF_ART: ' Work of art',
Type.ORGANIZATION: 'Organization',
Type.CONSUMER_GOOD: 'Consumer goods',
}
def get_type_from_value(value):
value_type = Type.UNKNOWN
for t in Type:
if entities_type[t] == value:
value_type = t
return value_type
|
[
"anta.aidara@gmail.com"
] |
anta.aidara@gmail.com
|
4a4c5276c3bf38dc20522b4f06a995c51f55462c
|
79d471c012ec9220836cf529d6062803c6fadb03
|
/localizer.py
|
9e8eb0782ede0b517412d026a4c69f2c0423f56f
|
[] |
no_license
|
Knevari/histogram-filter
|
4e4c6604258478f14c1d1bd0604faed3d7a56859
|
47930837ff816769dbc9d6cb7f9ccc19d75040d3
|
refs/heads/master
| 2020-12-09T21:37:18.329609
| 2020-01-17T14:22:27
| 2020-01-17T14:22:27
| 233,422,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
from helpers import normalize, blur
def initialize_beliefs(grid):
height = len(grid)
width = len(grid[0])
area = height * width
belief_per_cell = 1.0 / area
beliefs = []
for i in range(height):
row = []
for j in range(width)
row.append(belief_per_cell)
beliefs.append(row)
return beliefs
def sense(color, grid, beliefs, p_hit, p_miss):
new_beliefs = []
height = len(grid)
width = len(grid[0])
for i in range(height):
row = []
for j in range(width)
hit = (grid[i][j] == color)
row.append(beliefs[i][j] * (hit * p_hit + (1-hit) * p_miss))
new_beliefs.append(row)
return normalize(new_beliefs)
def move(dy, dx, beliefs, blurring):
height = len(beliefs)
width = len(beliefs[0])
new_G = [[0.0 for i in range(width)] for j in range(height)]
for i, row in enumerate(beliefs):
for j, cell in enumerate(row):
new_i = (i + dy) % height
new_j = (j + dx) % width
new_G[int(new_i)][int(new_j)] = cell
return blur(new_G, blurring)
|
[
"mateus7319@gmail.com"
] |
mateus7319@gmail.com
|
8ad21205f4c323d5f5949973e2286fd410352fdf
|
24b4dcd555dd3e644467aec13edd671afdd3f49c
|
/SU2/opt/UQ.py
|
fe95153422af7b38c4541bf5320368b4442f680a
|
[] |
no_license
|
garcgutierrez/adj_sto_su2
|
2c8294b65dcef8faf4bf1f453a413bca429a6751
|
22ec37839ed0a08f5dbe1935d18205f085b28a70
|
refs/heads/master
| 2022-11-16T22:13:59.666403
| 2020-07-14T15:38:22
| 2020-07-14T15:38:22
| 279,776,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,262
|
py
|
from pylab import *
import chaospy as cp
class UQ(object):
def __init__(self):
self.alpha_dist = cp.Uniform(-0.5,0.5)
self.Ma_dist = cp.Uniform(0.1,0.2)
self.T = cp.Uniform(273, 274)
self.distribution = cp.J(self.alpha_dist, self.Ma_dist)
self.computeQuadrature()
def computeQuadrature(self, nOrder=2, ruleN='C'):
self.absissas, self.weights = cp.generate_quadrature(
order = nOrder, dist=self.distribution, rule=ruleN)
self.Machs = around(array(self.absissas)[1,:],2)
self.AOAs = around(array(self.absissas)[0,:],3)
self.Nquadrature = len(self.Machs)
self.polynomial_expansion = cp.orth_ttr(nOrder, self.distribution)
def computeProperties(self, numArray, debug=True):
if(debug):
print('shape: {}'.format(shape(numArray)))
print('Nq:{}'.format(self.Nquadrature))
print('Variables:{}'.format(numArray))
self.poly_approx = cp.fit_quadrature(
self.polynomial_expansion, self.absissas,
self.weights, numArray)
mean = cp.E(self.poly_approx, self.distribution)
sigma = cp.Std(self.poly_approx, self.distribution)
return mean, sigma
|
[
"garcgutierrez@gmail.com"
] |
garcgutierrez@gmail.com
|
b5fd5e255e2b4a38a8967b95ec48bf042b24c2d1
|
939e8a8838ff66f72655a7c103bf79b31ccd6966
|
/MyApp/models.py
|
94b7a286c938b6311a14ab45019c9aed1b7cf375
|
[] |
no_license
|
github653224/ApiTest
|
3647292471fe11d8a124e0bd41061a2de3add5ed
|
9c1fc9c05dce38a4e2618c43943f8f44090ab4f2
|
refs/heads/master
| 2023-02-03T16:54:21.599640
| 2020-12-18T10:17:01
| 2020-12-18T10:17:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,410
|
py
|
from django.db import models
# Create your models here.
class DB_tucao(models.Model):
user = models.CharField(max_length=30,null=True) #吐槽人名字
text = models.CharField(max_length=1000,null=True) #吐槽内容
ctime = models.DateTimeField(auto_now=True) #创建时间
def __str__(self):
return self.text+ str(self.ctime)
class DB_home_href(models.Model):
name = models.CharField(max_length=30,null=True) #超链接名字
href = models.CharField(max_length=2000,null=True) #超链接内容
def __str__(self):
return self.name
class DB_project(models.Model):
name = models.CharField(max_length=100,null=True) #项目名字
remark = models.CharField(max_length=1000,null=True) #项目备注
user = models.CharField(max_length=15,null=True) #项目创建者名字
other_user = models.CharField(max_length=200,null=True) #项目其他创建者
def __str__(self):
return self.name
class DB_apis(models.Model):
project_id = models.CharField(max_length=10,null=True) #项目id
name = models.CharField(max_length=100,null=True) #接口名字
api_method = models.CharField(max_length=10,null=True) #请求方式
api_url = models.CharField(max_length=1000,null=True) #url
api_header = models.CharField(max_length=1000,null=True) #请求头
api_login = models.CharField(max_length=10,null=True) #是否带登陆态
api_host = models.CharField(max_length=100,null=True) #域名
des = models.CharField(max_length=100,null=True) #描述
body_method = models.CharField(max_length=20,null=True) #请求体编码格式
api_body = models.CharField(max_length=1000,null=True) #请求体
result = models.TextField(null=True) #返回体 因为长度巨大,所以用大文本方式存储
sign = models.CharField(max_length=10,null=True) #是否验签
file_key = models.CharField(max_length=50,null=True) #文件key
file_name = models.CharField(max_length=50,null=True) #文件名
public_header = models.CharField(max_length=1000,null=True) #全局变量-请求头
last_body_method = models.CharField(max_length=20,null=True) #上次请求体编码格式
last_api_body = models.CharField(max_length=1000,null=True) #上次请求体
def __str__(self):
return self.name
class DB_apis_log(models.Model):
user_id = models.CharField(max_length=10,null=True) #所属用户id
api_method = models.CharField(max_length=10,null=True) #请求方式
api_url = models.CharField(max_length=1000,null=True) #url
api_header = models.CharField(max_length=1000,null=True) #请求头
api_login = models.CharField(max_length=10,null=True) #是否带登陆态
api_host = models.CharField(max_length=100,null=True) #域名
body_method = models.CharField(max_length=20,null=True) #请求体编码格式
api_body = models.CharField(max_length=1000,null=True) #请求体
sign = models.CharField(max_length=10,null=True) #是否验签
file_key = models.CharField(max_length=50,null=True) #文件key
file_name = models.CharField(max_length=50,null=True) #文件名
def __str__(self):
return self.api_url
class DB_cases(models.Model):
project_id = models.CharField(max_length=10,null=True) #所属项目id
name = models.CharField(max_length=50,null=True) #用例名字
def __str__(self):
return self.name
class DB_step(models.Model):
Case_id = models.CharField(max_length=10,null=True) #所属大用例id
name = models.CharField(max_length=50,null=True) #步骤名字
index = models.IntegerField(null=True) #执行步骤
api_method = models.CharField(max_length=10,null=True) # 请求方式
api_url = models.CharField(max_length=1000,null=True) #url
api_host = models.CharField(max_length=100,null=True) #host
api_header = models.CharField(max_length=1000,null=True) #请求头
api_body_method = models.CharField(max_length=10,null=True) #请求体编码类型
api_body = models.CharField(max_length=10,null=True) #请求体
get_path = models.CharField(max_length=500,null=True) #提取返回值-路径法
get_zz = models.CharField(max_length=500,null=True) #提取返回值-正则
assert_zz = models.CharField(max_length=500,null=True) #断言返回值-正则
assert_qz = models.CharField(max_length=500,null=True) #断言返回值-全文检索存在
assert_path = models.CharField(max_length=500,null=True) #断言返回值-路径法
mock_res = models.CharField(max_length=1000,null=True) #mock返回值
public_header = models.CharField(max_length=1000,null=True) #全局变量-请求头
def __str__(self):
return self.name
class DB_project_header(models.Model):
project_id = models.CharField(max_length=10,null=True) #所属项目id
name = models.CharField(max_length=20,null=True) #请求头变量名字
key = models.CharField(max_length=20,null=True) #请求头header的 key
value = models.TextField(null=True) #请求头的value,因为有可能cookie较大,达到几千字符,所以采用大文本方式存储
def __str__(self):
return self.name
class DB_host(models.Model):
host = models.CharField(max_length=100,null=True) #域名内容
des = models.CharField(max_length=100,null=True) #域名描述
def __str__(self):
return self.host
|
[
"wangzijia@xiaozhu.com"
] |
wangzijia@xiaozhu.com
|
1981adb6d51f44d042af9407d1b2ef43e248447e
|
6784941fe6b67b5531a6154becc9d9a641cd64d9
|
/ActualizaDDBB.py
|
c330d03d7fe9392fa24943b1577a64adb505fd50
|
[] |
no_license
|
alexistdk/todo-list
|
07ba52926d94b2c05b8cca0854549cebed6e335b
|
62da1526d57fccc9f8d2c7d255efc1bd7dfe0fe8
|
refs/heads/main
| 2022-12-30T06:15:12.107125
| 2020-10-22T02:05:17
| 2020-10-22T02:05:17
| 213,752,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,411
|
py
|
from datetime import date
from ConectarDDBB import *
class ActualizaDDBB(ConectarDDBB):
@classmethod
def crear_tarea(cls, titulo, descripcion, id_usuario):
try:
db = cls.conexion()
cursor = db.cursor()
fecha = date.today()
cursor.execute(cls.insertar_tarea(), (fecha, titulo, descripcion, 0, id_usuario))
except Error:
print("Error ", Error)
finally:
db.commit()
@classmethod
def existe_tarea(cls, titulo, id_usuario):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.busca_tarea, titulo, id_usuario)
return True
except Error:
print("Error ", Error)
@classmethod
def actualizar_tarea(cls, id_tarea):
try:
db = cls.conexion()
cursor = db.cursor()
descripcion = input("Descripción nueva: ")
cursor.execute(cls.actualizar_descripcion(), (descripcion, id_tarea))
except Error:
print("No existe la tarea!", Error)
finally:
db.commit()
@classmethod
def cambiar_estado(cls, id_tarea):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.actualizar_estado(), (id_tarea, ))
except Error:
print("Error ", Error)
finally:
db.commit()
@classmethod
def listar_tareas(cls, id_usuario):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.seleccionar_tareas(), (id_usuario, ))
records = cursor.fetchall()
print("\nLista de tareas\n ")
for row in records:
print("ID = ", row[0])
print("Fecha = ", row[1])
print("Título = ", row[2])
print("Descripción = ", row[3])
print("Estado = ", row[4], "\n")
except Error:
print("Error al leer la lista de tareas", Error)
@classmethod
def eliminar_tarea(cls, id_tarea):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.borrar_tarea(), (id_tarea,))
except Error:
print("Error al eliminar la tarea", Error)
finally:
db.commit()
@classmethod
def registrar_usuario(cls, nombre_usuario, email, contrasenia):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.registrarusuario(), (nombre_usuario, email, contrasenia))
except Error:
print("Error", Error)
finally:
db.commit()
@classmethod
def loguear_usuario(cls, nombre_usuario, contrasenia):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.existe_usuario(), (nombre_usuario, contrasenia))
return cursor.fetchone()[0]
except Error:
print("Error", Error)
finally:
db.commit()
@classmethod
def id_usuario(cls, nombre_usuario):
try:
db = cls.conexion()
cursor = db.cursor()
cursor.execute(cls.retorna_id_usuario(), (nombre_usuario, ))
return cursor.fetchone()[0]
except Error:
print("Error", Error)
|
[
"alexisndelgado@gmail.com"
] |
alexisndelgado@gmail.com
|
70c3c06f681b066ac0388b0d3c1198b4074e9724
|
7f24023d365e013ec0924844c1a872edfb0c75b4
|
/tests/trac/trac-0186/check.py
|
08b3119a43dd3dd72dd22febf93509b88bca7eca
|
[
"Python-2.0",
"MIT",
"Apache-2.0"
] |
permissive
|
pabigot/pyxb
|
cd42c024607572c6363682d389e9296caf3f2857
|
5ee5ba54c9f702dc9c9efc2731ee547ecd4dae4a
|
refs/heads/next
| 2023-05-11T03:23:19.599756
| 2023-04-29T20:38:15
| 2023-04-29T20:45:13
| 20,547,850
| 130
| 63
|
Apache-2.0
| 2021-08-19T16:52:18
| 2014-06-06T01:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 493
|
py
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.utils.domutils
import resources
import unittest
class ExternalTrac0186 (unittest.TestCase):
def testXBIngress (self):
instance = resources.XBIngress(match='all', action1='none', digits1='', action2='none', digits2='')
def testXBMatch (self):
instance = resources.XBMatch('all')
if '__main__' == __name__:
unittest.main()
|
[
"pab@pabigot.com"
] |
pab@pabigot.com
|
88c0d4f7001e4d7f2d2a994d979b9b99a1ed7d08
|
9adc810b07f7172a7d0341f0b38088b4f5829cf4
|
/experiments/ashvin/icml2020/hand/buffers/pen1.py
|
c92cde36156496ccf82fa584986ffbc35a17a452
|
[
"MIT"
] |
permissive
|
Asap7772/railrl_evalsawyer
|
7ee9358b5277b9ddf2468f0c6d28beb92a5a0879
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
refs/heads/main
| 2023-05-29T10:00:50.126508
| 2021-06-18T03:08:12
| 2021-06-18T03:08:12
| 375,810,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,576
|
py
|
"""
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy, BinnedGMMPolicy
from rlkit.torch.networks import Clamp
if __name__ == "__main__":
variant = dict(
num_epochs=1001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
),
buffer_policy_class=BinnedGMMPolicy,
buffer_policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
num_gaussians=11,
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
use_validation_buffer=True,
)
search_space = {
'env': ["pen-sparse-v0", "door-sparse-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.5, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
'qf_kwargs.output_activation': [Clamp(max=0)],
'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [11, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
|
[
"alexanderkhazatsky@gmail.com"
] |
alexanderkhazatsky@gmail.com
|
074e39bc74b5205dfecb5d90f2cd5a25847b0312
|
bb93b0907ed8f7c8c0e2bed23dcf2fe948c39b8d
|
/08-tuples.py
|
34abd356c6c615e4a40e1344285aeda269431484
|
[] |
no_license
|
hue113/complete-python
|
103b0e8b2c74a6a85a0c69227790fa17cada7e19
|
c82ba9dd9a8c7ef2b84e2e6b8b33ba44f3974049
|
refs/heads/master
| 2023-03-21T17:30:30.292050
| 2021-03-14T22:40:16
| 2021-03-14T22:40:16
| 347,771,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31
|
py
|
# Tuple: like a immutable list
|
[
"huepham113@gmail.com"
] |
huepham113@gmail.com
|
54316a4f35c167022b648ae75bf34184134084ad
|
b0c0706e4c4f41a729ec235e31ba90385eb44845
|
/coinlist/migrations/0002_auto_20180502_1107.py
|
77d0be8b066d8e33d3c6253e4f0c6ef73b7a80a7
|
[] |
no_license
|
kupreeva/TopCoin
|
d7a6a56e6df869c0f978024c9e34351c75a0a580
|
babe9e306a38ab4dbd457b6c3e579fa0c3cf86f4
|
refs/heads/master
| 2020-03-14T23:22:51.481252
| 2018-05-02T15:40:09
| 2018-05-02T15:40:09
| 131,843,712
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-05-02 11:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coinlist', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='coin',
name='coins_daily',
field=models.FloatField(),
),
]
|
[
"kristine.kupreeva@gmail.com"
] |
kristine.kupreeva@gmail.com
|
0fc64ab80d0fe321eccbc84cf5dfdc3c647f3803
|
966b8ce654c67bbabd4c5166e7bb6e2a7086d172
|
/xml_read2.py
|
8c0a0ffc82d17475694707f66371dacbfe122d34
|
[] |
no_license
|
muzklj/learn_code
|
c4a316fcdd4d8348fb7959b66194a60d9f89b010
|
7dde268175391c2d4a2911fd40074ada5e7016a4
|
refs/heads/main
| 2023-08-08T16:25:37.540950
| 2023-07-24T00:31:42
| 2023-07-24T00:31:42
| 396,725,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
'''
Author: MuZonghan
Date: 2021-07-23 15:20:07
LastEditTime: 2021-08-19 15:14:07
Descripition: 统计xml文件的类别数目
FilePath: /4pcodes/learncodes/xml_read2.py
'''
import os
import xml.dom.minidom
xml_path = '/home/trunk/muzklj/5datasets/bigdata/img-txt2/sec-all1-xml/'
files = os.listdir(xml_path)
gt_dict = {}
if __name__ == '__main__':
for xm in files:
xmlfile = xml_path + xm
dom = xml.dom.minidom.parse(xmlfile) # 读取xml文档
root = dom.documentElement # 得到文档元素对象
filenamelist = root.getElementsByTagName("filename")
filename = filenamelist[0].childNodes[0].data
objectlist = root.getElementsByTagName("object")
for objects in objectlist:
namelist = objects.getElementsByTagName("name")
objectname = namelist[0].childNodes[0].data
if objectname == '-':
print(filename)
if objectname in gt_dict:
gt_dict[objectname] += 1
else:
gt_dict[objectname] = 1
dic = sorted(gt_dict.items(), key=lambda d: d[1], reverse=True)
print(dic)
# print(len(dic))
|
[
"“muzklj@163.com”"
] |
“muzklj@163.com”
|
a6247ca012289c8bc806e6836e82eb8bd9df5793
|
9b01f09991618b13deeb75044c66a721253eba52
|
/Baysim.py
|
f066c87bfbada580e68b3787a8cd3935c53a8ec3
|
[] |
no_license
|
BlackDragonBayliss/question-bank-app
|
2f0c5e1fb87395c1e607064639637029a219c154
|
2a8d3c05cf554b092981c44b7f05271e83bdf4ae
|
refs/heads/master
| 2020-04-27T09:04:30.279021
| 2019-06-21T21:08:38
| 2019-06-21T21:08:38
| 174,199,735
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
from StateStoreComposite import StateStoreComposite
def main():
instanceStateStoreComposite = StateStoreComposite()
if __name__ == "__main__": main()
|
[
"wallacecarr4@gmail.com"
] |
wallacecarr4@gmail.com
|
cd154db704763f331c942f98c0e560adc5f97522
|
96681aca57fa55e82aeb7d9ca56041f20498bf37
|
/account/forms.py
|
fb3724a6e226c77ad07cd4721c0a98f1a1b1666d
|
[] |
no_license
|
karyshev63rus/docent63
|
191c57ae6310df91b5e7a5657ffab2f3fdb2249f
|
67c4312db1be3c79c287814fda6d91b039520cfe
|
refs/heads/master
| 2023-07-10T23:44:23.209061
| 2021-08-14T18:44:50
| 2021-08-14T18:44:50
| 373,348,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,044
|
py
|
from django import forms
from django.contrib.auth.models import User
from .models import Profile
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(label='Пароль',
widget=forms.PasswordInput)
password2 = forms.CharField(label='Повторите пароль',
widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username', 'first_name', 'email')
labels = {
'username': 'Логин',
'first_name': 'Имя',
'email': 'Адрес эл. почты',
}
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise forms.ValidationError("Пароли не совпадают")
return cd['password2']
class UpdateUserForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
widgets = {
'first_name': forms.TextInput(
attrs={'class': 'form-control'}
),
'last_name': forms.TextInput(
attrs={'class': 'form-control'}
),
'email': forms.EmailInput(
attrs={'class': 'form-control'}
),
}
class UpdateProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('phone_number', 'address', 'postal_code', 'city', 'country')
widgets = {
'phone_number': forms.TextInput(
attrs={'class': 'form-control'}
),
'address': forms.TextInput(
attrs={'class': 'form-control'}
),
'postal_code': forms.TextInput(
attrs={'class': 'form-control'}
),
'city': forms.TextInput(
attrs={'class': 'form-control'}
),
'country': forms.TextInput(
attrs={'class': 'form-control'}
)
}
|
[
"karyshev63rus@gmail.com"
] |
karyshev63rus@gmail.com
|
a666a99db10c5f01012215a5c6ee570d7c03bffa
|
09e4bd1f19806b0ed223066be6fa381fb2b65598
|
/monitor/task.py
|
9ba0e801e38687e93741f9f85cf61d276d4c6df7
|
[] |
no_license
|
icellus/shell_scripts
|
f220a90f37a8070b04302a3be80ef03a58517134
|
7dc4d85b5b7fcd6ff98ebc6bdfa6ae4d3df55c48
|
refs/heads/master
| 2021-08-27T16:43:12.397195
| 2021-08-23T02:40:07
| 2021-08-23T02:40:07
| 143,691,816
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/05/24 00:00
# @Desc : 定时任务,以需要的时间间隔执行某个命令
# @File : task.py
# @Software: PyCharm
import time, os
from monitor import task
def roll_back(cmd, inc = 60):
while True:
#执行方法,函数
task()
time.sleep(inc)
roll_back("echo %time%", 30)
|
[
"2283411628@qq.com"
] |
2283411628@qq.com
|
efa28e9d986d4fe70e7cfe524ef2a44c04fde8b2
|
38ce870a1a4a9862b5d054aca31f5c0337c82ead
|
/arduino/libraries/ledtable/documentation/pixelorder_double_spiral.py
|
533bf98979abf1d143249c85d3f17016b55d2896
|
[
"MIT"
] |
permissive
|
pmerlin/ledtable
|
6a4cde37f6987be1a2ae6567aece1ec48c5bc60b
|
a94d276f8a06e0f7f05f5cc704018c899e56bd9f
|
refs/heads/master
| 2020-04-05T18:38:14.014614
| 2017-01-10T17:15:26
| 2017-01-10T17:15:26
| 157,106,815
| 1
| 0
|
MIT
| 2018-11-11T18:10:14
| 2018-11-11T18:10:14
| null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
def s(x, y, w, h):
if y == 0: return x
return s(y - 1, w - 1 - x, h - 1, w) + w
def s2(x, y, w, h):
m = min(x, y, w-1-x, h-1-y)
outer = sum((w - m_*2) * 2 + (h - m_*2) * 2 - 4 for m_ in range(m))
outer = m * 2 * (w + h - 2*m)
_x = x - m;
_y = y - m;
_w = w - 2 * m;
_h = h - 2 * m;
if _y == 0: return outer + _x;
elif _x == _w - 1: return outer + _w + _y - 1;
elif _y == _h - 1: return outer + _w + _h + _w - 3 - _x;
elif _x == 0: return outer + _w + _h + _w + _h - 4 - _y;
else: return "!{}{}".format(x, y)
w = 10
h = 14
for y in range(h):
print(*(s(x, y, w, h) for x in range(w)), sep = "\t")
print('--------------')
for y in range(h):
print(*(s2(x, y, w, h) for x in range(w)), sep = "\t")
|
[
"niccokunzmann@rambler.ru"
] |
niccokunzmann@rambler.ru
|
796df0bd81da274209df3eab5785899295b1efb8
|
143fa4b592ca6cbd420d78ceb6991ecce58370cb
|
/src/anpocs44.py
|
9920404c4f039c612ad78e4676f3c7ed73642beb
|
[
"MIT"
] |
permissive
|
vmussa/anpocs-scraper
|
3b07d9f861275404acc870910682aa79604a20b2
|
dd042f3765bea7e699b77bcf323738e761e70b17
|
refs/heads/main
| 2023-04-28T23:12:01.330071
| 2021-05-17T23:29:11
| 2021-05-17T23:29:11
| 356,652,869
| 3
| 3
|
MIT
| 2021-05-15T00:41:53
| 2021-04-10T17:37:15
|
Python
|
UTF-8
|
Python
| false
| false
| 4,128
|
py
|
"""Código para a aquisição dos dados dos Encontros Anuais da ANPOCS."""
from bs4 import BeautifulSoup
import pandas as pd
import re
from tqdm import tqdm
import sys
from os import mkdir, sep
from os.path import abspath, dirname, exists
import requests
from helium import (
start_chrome, click, get_driver, kill_browser, find_all, S
)
EVENT_ID = 44
BASE_URLS = [
"https://www.anpocs2020.sinteseeventos.com.br/atividade/hub/gt",
"https://www.anpocs2020.sinteseeventos.com.br/atividade/hub/simposioposgraduada"
]
def get_page_source(url):
"""Obtém soup object para páginas não interativas."""
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
return soup
def get_urls(base_urls):
"""Obtém todos os URLs das páginas a serem raspadas."""
urls = []
for base_url in base_urls:
soup = get_page_source(base_url)
urls_sources = soup.select("h5 > a")
urls += [a['href'] for a in urls_sources]
return urls
def get_interactive_page_source(url):
"""Obtém código-fonte completo da página."""
# inicia o chrome para renderizar o código-fonte
try:
start_chrome(url, headless=True)
except Exception:
print(
"Erro: você precisa instalar o Google Chrome e o ChromeDriver par"
"a executar esse raspador."
)
sys.exit(1)
driver = get_driver()
# clica em todos os botões "Veja mais!" para liberar os dados dos resumos
print(f"Raspando a página \"{driver.title}\". Isso pode demorar alguns segundos...")
buttons = find_all(S("//span[@onClick]"))
for _ in tqdm(range(len(buttons))):
click("Veja mais!")
print('Fim da raspagem da página.')
# obtém objeto soup a partir do código-fonte renderizado pelo helium
soup = BeautifulSoup(driver.page_source, 'html.parser')
# fecha o chrome
kill_browser()
return soup
def get_page_data(soup):
"""Obtém dados dos trabalhos apresentados em uma sessão."""
# obtém dados textuais a partir dos seletores CSS de cada campo
authors = [autor.text for autor in soup.select('i')]
titles = [titulo.text for titulo in soup.select('li > b')]
abstract_source = soup.find_all('div', id=re.compile('^resumoFull'))
abstracts = [abstract.text.strip() for abstract in abstract_source]
session = soup.select_one('h3.first').text.strip()
# cria dict com os dados obtidos
data = {
'autores': authors,
'titulo': titles,
'resumo': abstracts,
'sessao': session,
'id_evento': EVENT_ID
}
return data
def export_all_pages_data(urls):
"""Obtém e exporta para CSV dados de trabalhos de todas as sessões."""
for url in urls:
soup = get_interactive_page_source(url)
data = get_page_data(soup)
df = pd.DataFrame(data)
output_path = f"{dirname(dirname(abspath(__file__)))}{sep}output{sep}"
filename = "resumos_anpocs44.csv"
if exists(output_path+filename):
df.to_csv(
output_path + filename,
mode='a',
index=False,
header=False
)
else:
try:
mkdir(output_path)
df.to_csv(output_path + filename, index=False)
except FileExistsError:
df.to_csv(output_path + filename, index=False)
def main():
print(
"Carregando algumas informações. A raspagem do 44º Encontro Anual da "
"ANPOCS iniciará em breve..."
)
urls = get_urls(BASE_URLS)
# checa se já há arquivos de raspagens antigas na pasta output
output_path = f"{dirname(dirname(abspath(__file__)))}{sep}output{sep}"
filename = "resumos_anpocs44.csv"
if exists(output_path+filename):
raise Exception(
"Os dados raspados já estão na pasta output. "
"Remova-os da pasta antes de rodar o raspador."
)
export_all_pages_data(urls)
print("O 44º Encontro foi raspado com sucesso.")
if __name__ == "__main__":
main()
|
[
"vtrmussa@gmail.com"
] |
vtrmussa@gmail.com
|
58b16c0c8049c70a9146960656d3f2f7323cab0e
|
b251a605a8f4cf62970df3d7c2e75a46fc2445b2
|
/sva.py
|
aac2a7ae497d54e722449331dc4c7be943a41429
|
[] |
no_license
|
wheatfields/Q
|
f9fefed09cc598ab3feb872bc87f8dda27c166e1
|
a5dd5593b559c2ceae1d6d41337af944f5000e6f
|
refs/heads/main
| 2023-08-03T22:59:08.843170
| 2021-07-02T05:30:59
| 2021-07-02T05:30:59
| 376,154,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53,030
|
py
|
# -*- coding: utf-8 -*-
"""
@author: adamw
"""
import pandas as pd
class sva:
"""
Initialise with a path to the document & a sheet name.
"""
def __init__(self, path, sheet_name):
self.path = path
self.sheet_name = sheet_name
# initiate nested classes
self.dlr_parameters = self.dlr_parameters(path, sheet_name)
self.termination_rates = self.termination_rates(path, sheet_name)
self.stress_margins = self.stress_margins(path, sheet_name)
# =============================================================================
@classmethod
def table_import(cls, path,
sheet_name,
columns,
row_start, row_end,
header_row,
clear_first_n_rows = None,
index_col=None,
trim_column_names = None,
trim_index_name = None):
rows = row_end - row_start
if header_row is not None:
if isinstance(header_row, list)==False:
header = header_row - 1
else:
header = header_row
else:
header = None
# [Will always be reference 0]
table = pd.DataFrame(pd.read_excel(path,
sheet_name = sheet_name,
header = header,
usecols = columns,
nrows = rows,
index_col = index_col)
)
# SVA sometimes has a blank row between header and the start of the data
if clear_first_n_rows is not None:
table = table.iloc[clear_first_n_rows:]
# The way read_excel works means that if the header has already been 'seen'
# in previous columns, it will add a trailing '.[number]'. This removes it.
if trim_column_names is not None:
table.columns = table.columns.map(str)
table.columns = table.columns.str.replace(r'\.\d+$', '')
if trim_index_name is not None:
table.index.name = table.index.name.split('.')[0]
return table
# =============================================================================
# 1
def claims_reporting_delay(self):
"""
"""
claims_reporting_delay = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'B:J',
row_start = 11, row_end = 305,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return claims_reporting_delay
def claim_delay_factors(self):
"""
"""
claim_delay_factors = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'L:T',
row_start = 11, row_end = 305,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return claim_delay_factors
# =============================================================================
# 2
def claims_expense_reserve(self):
"""
"""
claims_expense_reserve = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'W:Z',
row_start = 11, row_end = 18,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return claims_expense_reserve
def operating_expense_perc_premium(self):
"""
"""
operating_expense_perc_premium = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AB:AE',
row_start = 11, row_end = 18,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return operating_expense_perc_premium
def budgeted_trustee_expense(self):
"""
"""
budgeted_trustee_expense = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AG:AI',
row_start = 11, row_end = 23,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return budgeted_trustee_expense
def projected_trustee_expense(self):
"""
"""
projected_trustee_expense = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AK:AM',
row_start = 11, row_end = 21,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return projected_trustee_expense
# =============================================================================
# 3
def ip_continuance_rates(self):
"""
"""
ip_continuance_rates = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AP:AT',
row_start = 11, row_end = 52,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
# Manually renaming index here.
ip_continuance_rates.index.rename('Month', inplace=True)
return ip_continuance_rates
class dlr_parameters:
def __init__(self, path, sheet_name):
self.path = path
self.sheet_name = sheet_name
def salary_replacement_ratio(self):
"""
"""
salary_replacement_ratio = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 12,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return salary_replacement_ratio
def continuing_retirement_benefit(self):
"""
"""
continuing_retirement_benefit = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 13,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return continuing_retirement_benefit
def assumed_avg_age_at_disability(self):
"""
"""
assumed_avg_age_at_disability = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 14,
header_row = 11,
clear_first_n_rows = 2,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return assumed_avg_age_at_disability
def assumed_default_salary(self):
"""
"""
assumed_default_salary = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 15,
header_row = 11,
clear_first_n_rows = 3,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return assumed_default_salary
def payment_ratio(self):
"""
"""
payment_ratio = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 16,
header_row = 11,
clear_first_n_rows = 4,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return payment_ratio
def reopened_claims_reserves_loading(self):
"""
"""
reopened_claims_reserves_loading = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 17,
header_row = 11,
clear_first_n_rows = 5,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return reopened_claims_reserves_loading
def claim_index_rate(self):
"""
"""
claim_index_rate = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 18,
header_row = 11,
clear_first_n_rows = 6,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return claim_index_rate
def benefit_indexation_month(self):
"""
"""
benefit_indexation_month = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AV:AW',
row_start = 11, row_end = 19,
header_row = 11,
clear_first_n_rows = 7,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return benefit_indexation_month
def ip_ibnr_adjustment(self):
"""
"""
ip_ibnr_adjustment = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'AY:AZ',
row_start = 11, row_end = 15,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return ip_ibnr_adjustment
# =============================================================================
# 4
def appeals_reserve_assumptions(self):
"""
"""
appeals_reserve_assumptions = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BC:BE',
row_start = 11, row_end = 15,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return appeals_reserve_assumptions
def perc_of_appealed_claims_accepted(self):
"""
"""
perc_of_appealed_claims_accepted= self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BC:BE',
row_start = 11, row_end = 17,
header_row = 11,
clear_first_n_rows = 5,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
perc_of_appealed_claims_accepted.rename(index={0:'GOV', 1:'NONGOV'}, inplace=True)
return perc_of_appealed_claims_accepted
# =============================================================================
# 5
def decline_rate(self):
"""
"""
decline_rate = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BH:BK',
row_start = 11, row_end = 12,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return decline_rate
def decline_rate_delay(self):
"""
"""
decline_rate_delay = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BH:BI',
row_start = 14, row_end = 21,
header_row = 14,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return decline_rate_delay
def simultaneous_ip_tpd_decline(self):
"""
"""
simultaneous_ip_tpd_decline = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BK:BM',
row_start = 14, row_end = 22,
header_row = 14,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return simultaneous_ip_tpd_decline
# =============================================================================
# 6
def expected_loss_ratio_gov(self):
"""
"""
expected_loss_ratio_gov = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BP:BS',
row_start = 11, row_end = 84,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return expected_loss_ratio_gov
def expected_loss_ratio_nongov(self):
"""
"""
expected_loss_ratio_nongov = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'BU:BX',
row_start = 11, row_end = 84,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return expected_loss_ratio_nongov
# =============================================================================
# 7
def payment_delay_factors(self):
"""
"""
payment_delay_factors = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CA:CG',
row_start = 11, row_end = 35,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return payment_delay_factors
# 7
def payment_delay_factors_discrete(self):
"""
"""
payment_delay_factors_discrete = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CI:CO',
row_start = 11, row_end = 35,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return payment_delay_factors_discrete
# =============================================================================
# 8
def average_claim_size(self):
"""
"""
average_claim_size = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CR:DA',
row_start = 11, row_end = 12,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return average_claim_size
def acs_ip_linked_tpd(self):
"""
"""
acs_ip_linked_tpd = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CR:CV',
row_start = 20, row_end = 32,
header_row = 20,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return acs_ip_linked_tpd
def acs_by_notification_delay_q(self):
"""
"""
acs_by_notification_delay_q = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CW:CY',
row_start = 20, row_end = 85,
header_row = 20,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return acs_by_notification_delay_q
def perc_si_at_ip_doe(self):
"""
"""
perc_si_at_ip_doe = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CZ:DA',
row_start = 19, row_end = 20,
header_row = 19,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = None,
trim_index_name = None)
return perc_si_at_ip_doe
def tpd_si_scales_by_age(self):
"""
"""
tpd_si_scales_by_age = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'CZ:DA',
row_start = 22, row_end = 76,
header_row = 22,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return tpd_si_scales_by_age
# =============================================================================
# 9
class termination_rates:
def __init__(self, path, sheet_name):
self.path = path
self.sheet_name = sheet_name
def age_rates(self):
"""
"""
age_rates = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'DD:DF',
row_start = 11, row_end = 57,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = True)
return age_rates
def duration_of_claim_g_wp_oc(self):
"""
"""
duration_of_claim_g_wp_oc = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'DH:EF',
row_start = 10, row_end = 134,
header_row = 10,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
# Data adjustments here to correctly index table.
# Note: Consider 'melting' multi-index tables for use in models.
df = duration_of_claim_g_wp_oc.copy()
# info = duration_of_claim_g_wp_oc[1].copy()
index = df[0:4]
index = index.fillna(method='ffill', axis=1)
df = df[4:]
df.columns = pd.MultiIndex.from_arrays(index.values)
df.index.name = 'Duration of Claim (months)'
# duration_of_claim_g_wp_oc = tuple([df, info])
duration_of_claim_g_wp_oc = df
return duration_of_claim_g_wp_oc
def smoker_status(self):
"""
"""
smoker_status = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'EH:EI',
row_start = 10, row_end = 12,
header_row = 10,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
smoker_status.rename(columns={smoker_status.columns[0]: "smoker_status" }, inplace = True)
return smoker_status
def benefit_type(self):
"""
"""
benefit_type = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'EK:EL',
row_start = 10, row_end = 12,
header_row = 10,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
benefit_type.rename(columns={benefit_type.columns[0]: "benefit_type" }, inplace = True)
return benefit_type
def policy_duration_factor(self):
"""
"""
policy_duration_factor = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'EN:ER',
row_start = 10, row_end = 23,
header_row = 10,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
# Data adjustments here to correctly index table.
# Note: Consider 'melting' multi-index tables for use in models.
df = policy_duration_factor.copy()
# info = policy_duration_factor[1].copy()
index = df[0:2]
index = index.fillna(method='ffill', axis=1)
df = df[2:]
df.columns = pd.MultiIndex.from_arrays(index.values)
df.index.name = 'Curtate Policy Year'
# policy_duration_factor = tuple([df, info])
policy_duration_factor = df
return policy_duration_factor
# =============================================================================
# 10
class stress_margins:
def __init__(self, path, sheet_name):
self.path = path
self.sheet_name = sheet_name
self.random = self.random(path, sheet_name)
self.future = self.future(path, sheet_name)
class random:
def __init__(self, path, sheet_name):
self.path = path
self.sheet_name = sheet_name
def random_all(self):
random_all = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 16, row_end = 26,
header_row = 16,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return random_all
def death(self):
death = self.random_all().iloc[0,0]
return death
def death_ibnr(self):
death_ibnr = self.random_all().iloc[1,0]
return death_ibnr
def death_rbna(self):
death_rbna = self.random_all().iloc[2,0]
return death_rbna
def tpd(self):
tpd = self.random_all().iloc[3,0]
return tpd
def tpd_ibnr(self):
tpd_ibnr = self.random_all().iloc[4,0]
return tpd_ibnr
def tpd_rbna(self):
tpd_rbna = self.random_all().iloc[5,0]
return tpd_rbna
def ip(self):
ip = self.random_all().iloc[6,0]
return ip
def ip_dlr(self):
ip_dlr = self.random_all().iloc[7,0]
return ip_dlr
def ip_ibnr(self):
ip_ibnr = self.random_all().iloc[8,0]
return ip_ibnr
def ip_rbna(self):
ip_rbna = self.random_all().iloc[9,0]
return ip_rbna
class future:
def __init__(self, path, sheet_name):
self.path = path
self.sheet_name = sheet_name
def future_all(self):
future_all = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 27, row_end = 37,
header_row = 27,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return future_all
def death(self):
death = self.future_all().iloc[0,0]
return death
def death_ibnr(self):
death_ibnr = self.future_all().iloc[1,0]
return death_ibnr
def death_rbna(self):
death_rbna = self.future_all().iloc[2,0]
return death_rbna
def tpd(self):
tpd = self.future_all().iloc[3,0]
return tpd
def tpd_ibnr(self):
tpd_ibnr = self.future_all().iloc[4,0]
return tpd_ibnr
def tpd_rbna(self):
tpd_rbna = self.future_all().iloc[5,0]
return tpd_rbna
def ip(self):
ip = self.future_all().iloc[6,0]
return ip
def ip_dlr(self):
ip_dlr = self.future_all().iloc[7,0]
return ip_dlr
def ip_ibnr(self):
ip_ibnr = self.future_all().iloc[8,0]
return ip_ibnr
def ip_rbna(self):
ip_rbna = self.future_all().iloc[9,0]
return ip_rbna
def time_to_react_future(self):
time_to_react_future = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 39, row_end = 40,
header_row = 39,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return time_to_react_future
def event_pandemic_death(self):
event_pandemic_death = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 42, row_end = 46,
header_row = 42,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return event_pandemic_death
def event_pandemic_tpd(self):
event_pandemic_tpd = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 42, row_end = 46,
header_row = 42,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[1,0]
return event_pandemic_tpd
def event_pandemic_ip(self):
event_pandemic_ip = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 42, row_end = 46,
header_row = 42,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[2,0]
return event_pandemic_ip
def prop_disabled_after_wp(self):
prop_disabled_after_wp = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 42, row_end = 46,
header_row = 42,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[3,0]
return prop_disabled_after_wp
def lapse_stress(self):
lapse_stress = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 48, row_end = 50,
header_row = 48,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return lapse_stress
def servicing_expense_stress(self):
servicing_expense_stress = sva.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FL:FM',
row_start = 48, row_end = 50,
header_row = 48,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[1,0]
return servicing_expense_stress
# =============================================================================
# 11
def reinsurance(self):
reinsurance = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FT:FY',
row_start = 11, row_end = 14,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return reinsurance
def catastrophe_pl(self):
catastrophe_pl = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FT:FY',
row_start = 21, row_end = 23,
header_row = 21,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0, 4]
return catastrophe_pl
def catastrophe_capital(self):
catastrophe_capital = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'FT:FY',
row_start = 21, row_end = 23,
header_row = 21,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[1, 4]
return catastrophe_capital
# =============================================================================
# 12
def par_loadings(self):
par_loadings = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GB:GC',
row_start = 10, row_end = 11,
header_row = 10,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return par_loadings
def stamp_duty(self):
stamp_duty = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GB:GC',
row_start = 13, row_end = 15,
header_row = 13,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return stamp_duty
def investment_earnings_b0(self):
investment_earnings_b0 = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GB:GC',
row_start = 16, row_end = 17,
header_row = 16,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return investment_earnings_b0
# =============================================================================
# 13
def contingency_margin_start(self):
contingency_margin_start = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GF:GG',
row_start = 10, row_end = 11,
header_row = 10,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return contingency_margin_start
def contingency_margin(self):
contingency_margin = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GF:GH',
row_start = 13, row_end = 14,
header_row = 13,
clear_first_n_rows = None,
index_col = None,
trim_column_names = True,
trim_index_name = None)
return contingency_margin
# =============================================================================
# 14
def notification_delay(self):
notification_delay = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GK:GM',
row_start = 11, row_end = 12,
header_row = 11,
clear_first_n_rows = None,
index_col = None,
trim_column_names = True,
trim_index_name = None)
return notification_delay
# =============================================================================
# 15
def cmm_impact_termination_rates_start(self):
cmm_impact_termination_rates = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GP:GQ',
row_start = 11, row_end = 13,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[0,0]
return cmm_impact_termination_rates
def cmm_impact_termination_rates_perc(self):
cmm_impact_termination_rates_perc = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GP:GQ',
row_start = 11, row_end = 13,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None).iloc[1,0]
return cmm_impact_termination_rates_perc
# =============================================================================
# 16
def covid19_impact_termination_rates(self):
covid19_impact_termination_rates = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GS:GT',
row_start = 11, row_end = 16,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return covid19_impact_termination_rates
# =============================================================================
# 17
def covid19_adjustment_ip_dlr(self):
covid19_adjustment_ip_dlr = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GV:GW',
row_start = 11, row_end = 27,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return covid19_adjustment_ip_dlr
# =============================================================================
# 18
def expected_lr_combined_capital(self):
expected_lr_combined_capital = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'GY:HB',
row_start = 11, row_end = 90,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return expected_lr_combined_capital
# =============================================================================
# 19
def gov_tpd_linked_to_ip(self):
gov_tpd_linked_to_ip = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'HD:HF',
row_start = 11, row_end = 23,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return gov_tpd_linked_to_ip
def tpd_linked_reporting_delay(self):
tpd_linked_reporting_delay = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'HH:HI',
row_start = 11, row_end = 65,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return tpd_linked_reporting_delay
def conversion_rates(self):
conversion_rates = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'HK:HM',
row_start = 11, row_end = 26,
header_row = 11,
clear_first_n_rows = None,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return conversion_rates
# =============================================================================
# 20
def claims_reporting_delay_tpd_ip(self):
claims_reporting_delay_tpd_ip = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'HO:HQ',
row_start = 11, row_end = 305,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return claims_reporting_delay_tpd_ip
def claims_delay_factors_tpd_ip(self):
claims_delay_factors_tpd_ip = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'HS:HU',
row_start = 11, row_end = 305,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return claims_delay_factors_tpd_ip
# =============================================================================
# 21
def missing_subcase_reserve(self):
missing_subcase_reserve = self.table_import(path = self.path,
sheet_name = self.sheet_name,
columns = 'HW:HX',
row_start = 11, row_end = 15,
header_row = 11,
clear_first_n_rows = 1,
index_col = 0,
trim_column_names = True,
trim_index_name = None)
return missing_subcase_reserve
# =============================================================================
|
[
"68405635+wheatfields@users.noreply.github.com"
] |
68405635+wheatfields@users.noreply.github.com
|
1ef85ed8de5af85610939be3fd8aaef0b637de4c
|
2127976c32452664cbe5bc46e858f6c1059300fc
|
/spotify.py
|
f1e603f01287548e6f8b5a843597b714959fb8f9
|
[] |
no_license
|
Luis199/spotify
|
8c536680652d99b5b63c85859eb6b0e626107057
|
961fd0970305ee8bab8ed9105dad3c07a646a297
|
refs/heads/master
| 2023-02-09T00:57:01.231526
| 2021-01-05T19:59:16
| 2021-01-05T19:59:16
| 282,363,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
birdy_uri = 'spotify:artist:2WX2uTcsvV5OnS0inACecP'
spotify = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials())
results = spotify.artist_albums(birdy_uri, album_type='album')
albums = results['items']
while results['next']:
results = spotify.next(results)
albums.extend(results['items'])
for album in albums:
print(album['name'])
# export SPOTIPY_CLIENT_ID='3750a2d0a4494d3385dbbda87871bab2'
# export SPOTIPY_CLIENT_SECRET='81acd20ff18642b9b9c941d811dfa2de'
# export SPOTIPY_REDIRECT_URI='your-app-redirect-url'
|
[
"luiscasado620@gmail.com"
] |
luiscasado620@gmail.com
|
b1918d70a960ef445232d6b1b21ffd44d9848c48
|
71c7683331a9037fda7254b3a7b1ffddd6a4c4c8
|
/Phys/Urania/examples/KsPiZeroMM_angularPDF.py
|
a83417211276319e5a15c72d57e48769a1b46477
|
[] |
no_license
|
pseyfert-cern-gitlab-backup/Urania
|
edc58ba4271089e55900f8bb4a5909e9e9c12d35
|
1b1c353ed5f1b45b3605990f60f49881b9785efd
|
refs/heads/master
| 2021-05-18T13:33:22.732970
| 2017-12-15T14:42:04
| 2017-12-15T14:42:04
| 251,259,622
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,684
|
py
|
from Urania.Helicity import *
from Urania.SympyBasic import *
from os import *
DiLeptonSpins = [0,1,2] ## DMS: I doube we'll need 2, probably we'll only
## have Pwave (J=1) from the photon, plus maybe some S-wave (J=0)
### transAmp=1 : Changes to transversity amplitude basis
A = doKsPizeroMuMu(DiLeptonSpins ) ## This is now in Urania.Helicity
### massage a bit the expression to make it more suitable for fitting
pdf_split = DecomposeAmplitudes(A,TransAmplitudes.values())
phys = 0
for key in pdf_split: phys += StrongPhases(key)*pdf_split[key]
### change the free variables to cosines
x = USymbol("helcosthetaK","c\\theta_{K}",real = True)
y = USymbol("helcosthetaL", "c\\theta_{l}", real = True)
z = USymbol("helphi" , "\\phi", real = True)
CThL = Cos(ThetaL)
CThK = Cos(ThetaK)
def changeFreeVars(function):
### Phi now as in DTT !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
function = function.subs( Sin(2*ThetaK), 2*Sin(ThetaK)*Cos(ThetaK))
function = function.subs( Sin(2*ThetaL), 2*Sin(ThetaL)*Cos(ThetaL))
function = function.subs( Cos(2*ThetaK), 2*Cos(ThetaK)**2 - 1)
function = function.subs( Cos(2*ThetaL), 2*Cos(ThetaL)**2 - 1)
function = function.subs( Sin(ThetaK), Sqrt(1-Cos(ThetaK)**2))
function = function.subs( Sin(ThetaL), Sqrt(1-Cos(ThetaL)**2))
function = function.subs([(CThK,x),(CThL,y), (Phi, -z)])
return function
func = changeFreeVars(phys)
### Print out to a latex document
from Urania.LatexFunctions import *
flatex = file("Kspizeromm_PDF.tex","w")
begintex(flatex)
begin_multline(flatex)
i = 0
for key in pdf_split.keys():
if i > 20:
i = 0
multline_break(flatex)
if pdf_split[key]:
flatex.write(Ulatex(key) + "\t" + Ulatex(pdf_split[key]) + "\\\\" + "\n")
i += 1
end_multline(flatex)
flatex.write("\\end{document}\n")
flatex.close()
system("pdflatex " + "Kspizeromm_PDF")
print "angular function saved in Kspizeromm_PDF.pdf"
print "Now making RooFit class as well"
##BREAK
##### Generate and compile a fitting class corresponding to "A"
### Trial 1, w/o analytical integrals
from Urania.RooInterfaces import *
potential_list = [x,y,z]+TransAmpModuli.values() + TransAmpPhases.values()
final_list = []
for thing in potential_list:
if thing in func.atoms(): final_list.append(thing)
op = RooClassGenerator(func, final_list ,"RooKspizeroMM")
### Define intermediate variables to be calculated once
op.makePdf(integrable = 1)
op.doIntegral(1,(y,-1,1))#,(y,-1,1),(z,-Pi,Pi))
##op.doIntegral(2,(x,-1,1),(y,-1,1))
##op.doIntegral(3,(x,-1,1),(z,-Pi,Pi))
##op.doIntegral(4,(y,-1,1),(z,-Pi,Pi))
op.overwrite()
op.invoke()
|
[
"liblhcb@cern.ch"
] |
liblhcb@cern.ch
|
bf42c98bb55e0b61192663d6f96ce710d0f07d01
|
eecc738c416a9ed5ccac250cb1d676a7f104d2fe
|
/landmarkEmo/dontuse/test.py
|
256c616a63876e779034b5636e4b4a24dc3e1020
|
[] |
no_license
|
cosmic119/CNN
|
b331d35e048fd24ad73dcbd5d0481220314d89c2
|
a1016323ef2f89020d793fe66e0d4db850a0359a
|
refs/heads/master
| 2021-04-15T03:33:22.834015
| 2018-03-23T02:22:36
| 2018-03-23T02:22:36
| 126,423,542
| 0
| 0
| null | 2018-03-23T02:44:01
| 2018-03-23T02:44:01
| null |
UTF-8
|
Python
| false
| false
| 10,279
|
py
|
# -*- coding: utf-8 -*-
"""
niektemme/tensorflow-mnist-predict 를 참조하였음
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#레파지토리에서 테스트 프로그램에 필요한 데이터 다운로드
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
sess = tf.InteractiveSession()
"""
모델 생성에 필요한 데이터 정의
x : 인풋레이어에 사용할 변수 정의
y : 아웃풋레이어에 사용할 변수 정의
w : 784 X 10 개의 초기값 0을 갖는 메트릭스 생성
b : 10개짜리 배열 생성
y = x * w + b
x (784) * w(784*10) = x*w(10)
x*w(10) + b(10) = y(10)
위에처럼 메트릭스 연산이 수행되기 때문에 위와 같이 데이터 사이즈를 잡은 것이다.
"""
x = tf.placeholder(tf.float32, [None, 784])
y_= tf.placeholder(tf.float32, [None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
# 원하는 행렬 사이즈로 초기 값을 만들어서 리턴하는 메서드
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# 0.1 로 초기값 지정하여 원하는 사이즈로 리턴
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
"""
필터에 대해서 설명하고자 하면 CNN 의 동작 원리를 설명해야만한다.
[5, 5, 1, 32] 는 5X5 사이즈의 단위 필터를 사용해서 인풋데이터
(여기서는 28 X 28 사이즈 메트릭스)를 CNN연산을 하겠다는 것이다.
Stride 가 [1,1] 이라고 하면 28X28크기 행렬을 5X5 사이즈의
메트릭스로가로세로 한칸씩 이동하면서 필터에 연산하겠다는 의미가 된다.
결과적으로 아웃풋은 24X24 사이즈가 된다. 왜냐하면 5X5 사이즈의
메트릭스로 이동할 수 있는 한계가 있기 때문이다.
(메트릭스 끝부분 까지 이동할 수 없음)
이러한 경우 패딩 옵션을 사용하여 0으로 태두리를 채워넣어 메특릭스
사이즈를 동일하게 유지할 수도 있다
참조:http://deeplearning4j.org/convolutionalnets.html
"""
def conv2d(x, W):
# tf.nn.conv2d(input, filter, strides, padding, use_cudnn
# _on_gpu=None, data_format=None, name=None)
# strides= [1 , stride, stride, 1] 차원축소 작업시 마스크 메트릭스를 이동하는 보복
# padding='SAME' 다음 레벨에서도 메특릭스가 줄어들지 않도록 패딩을 추가한다
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
"""
보통은 이렇게 생성한 메트릭스를 max pooling 을 사용하여 다시 한번 간소화한다.
위에서 필터링이 마스크에 대한 & 연산이었다면, max Pooling은 메트릭스에서 가장
큰 값 하나만 뽑아서 사용하는 방법이다. 아래와 같은 max pooling 정의
(mask [2,2] , stride[2,2] )를 4X4 메트릭스에 적용하면 2X2 메트릭스가 될 것이다
"""
# x : [batch, height, width, channels]
# 2x2 행열에 가장 큰 값을 찾아서 추출, 가로세로 2칸씩이동
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# [filter_height, filter_width, in_channels, out_channels]
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
"""
Layer 1
아래의 3줄로써 인풋 레이어에 대한 정의는 완료된다. 28X28 행렬 하나를 넣고
28X28행렬(원래는 24X24가 되지만 Padding 적용) 32개를 만들고 다시 max pool
(2,2)를 사용하여 14X14 메트릭스 32개를 리턴하는 레이어를 정의하였다
메트릭스 단위로 정리하면 인풋 1개, 아웃풋 32개 이다 트
"""
#인풋 데이터터 메트릭스를 변형한다. 784 개의 행렬을 갖는 복수의 데이터를
#[-1, 28, 28,1] 로 의 형태로 변형한다. 테스트 데이터 수 만큼 (-1) ,
#[28x28] 행렬로 만드는데 각 픽셀데이터는 rgb 데이터가 아니고 하나의 값만 갖도 변환
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
"""
Layer 2
1번 레이어에서 아웃풋을 32개를 전달하였음으로 2번 레이어의 인풋은
14X14 메트릭스 32개 그리고 아웃풋은 동일한 max pool 을 적용하여 8x8 메트릭스
64개를 출력한다. 정리하면 인풋 32개(14X14) 아웃풋 64개(7X7) 이 된다
"""
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
"""
Layer 3
현재 최종 데이터의 수는 7 X 7 X 64 = 3136 개 이지만 1024 개 를 사용한다
1024는 임의의 선택 값이다
"""
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
"""
Drop Out
Dropout 은 데이터 간의 연과 관계가 큰 데이터들을 제거함으로써 과적합 문제를
해결하는 기법의 하나이다.
"""
# drop out 연산의 결과를 담을 변수
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
"""
Out put Layer
마지막으로 1024개의 노드에서 10개의 (0~9까지 숫자)에 대한 확률을 Soft Max 를
이용하여 도출할 수 있도록 한다
"""
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# Define loss and optimizer
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
"""
Train & Save Model
"""
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
#50개씩, 20000번 반복학습
for i in range(20000):
batch = mnist.train.next_batch(50)
# 10회 단위로 한번씩 모델 정합성 테스트
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
# batch[0] 28X28 이미지, batch[1] 숫자태그, keep_prob : Dropout 비율
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
# 모델에 사용된 모든 변수 값을 저장한다
save_path = saver.save(sess, "model2.ckpt")
print ("Model saved in file: ", save_path)
#최종적으로 모델의 정합성을 체크한다
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
sess.close()
|
[
"gksthf3178@naver.com"
] |
gksthf3178@naver.com
|
89eb8d3c440b20fc430683ddb303868c4dfccc4a
|
480459352928aa307317bac9d7c8f0efe427023c
|
/getting_started/config.py
|
e7586e40cad89565f5e9aa9ede9c5d8236c38670
|
[
"MIT-0"
] |
permissive
|
seeq12/amazon-lookout-for-equipment
|
031a265095d7c153d086af6c2b97c17c2bbf835b
|
cb760aa0f9e2dad8fce13ed7c50282a10e320b40
|
refs/heads/main
| 2023-06-28T20:30:55.880074
| 2021-07-27T15:00:00
| 2021-07-27T15:00:00
| 390,020,084
| 0
| 0
| null | 2021-07-27T14:45:26
| 2021-07-27T14:45:25
| null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
# Update the name of the bucket you want to use
# to store the intermediate results of this getting
# started:
BUCKET = '<<YOUR_BUCKET>>'
# You can leave these other parameters to these
# default values:
PREFIX_TRAINING = 'getting_started/training-data/'
PREFIX_LABEL = 'getting_started/label-data/'
PREFIX_INFERENCE = 'getting_started/inference-data'
DATASET_NAME = 'getting-started-pump'
MODEL_NAME = f'{DATASET_NAME}-model'
INFERENCE_SCHEDULER_NAME = f'{DATASET_NAME}-scheduler'
|
[
"michoara@amazon.fr"
] |
michoara@amazon.fr
|
d8c137dda1852fc28941eac7e6a8c8a76905993e
|
9bde6cafb4273d721229448d115853ff2f5994a6
|
/myblog/blog/models.py
|
29739ca1865621b4e4224bca3f600e41f915a179
|
[] |
no_license
|
davejonesbkk/myblog
|
11eb30b4d75270b3e99f172f27f05ce31e318f93
|
4a5cbeb47154004ef239b16e63155997b1c9afe6
|
refs/heads/master
| 2021-01-17T17:43:28.465235
| 2016-05-31T02:02:07
| 2016-05-31T02:02:07
| 59,930,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
from django.db import models
from django_markdown.models import MarkdownField
from django.core.urlresolvers import reverse
class EntryQuerySet(models.QuerySet):
def published(self):
return self.filter(publish=True)
class Entry(models.Model):
title = models.CharField(max_length=200)
body = models.TextField()
slug = models.SlugField(max_length=200, unique=True)
publish = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
objects = EntryQuerySet.as_manager()
def __str__(self):
return self.title
class Meta:
verbose_name = 'Blog Entry'
verbose_name_plural = 'Blog Entries'
ordering = ["-created"]
|
[
"davejonesbkk@gmail.com"
] |
davejonesbkk@gmail.com
|
447a75ff7f1e949a3c268918e94f8ab08d58da0f
|
68cd659b44f57adf266dd37789bd1da31f61670d
|
/2020-01/python/18188_다오의데이트.py
|
7c55c44e597a14f68e338a66b4a4458c5ab95c41
|
[] |
no_license
|
01090841589/solved_problem
|
c0c6f5a46e4d48860dccb3b0288aa5b56868fbca
|
bbea2f31e5fe36cad100bc514eacd83545fb25b1
|
refs/heads/master
| 2023-07-02T23:55:51.631478
| 2021-08-04T13:57:00
| 2021-08-04T13:57:00
| 197,157,830
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
import sys
sys.stdin = open("다오의데이트.txt")
DIR = [[-1, 0], [0, 1], [1, 0], [0, -1]]
def go_dao(y, x, k, route):
global result, rts
if result:
return
if k >= A:
return
flag = 1
for i in range(4):
if can[k][i]:
Y = y+DIR[i][0]
X = x+DIR[i][1]
if 0 <= Y < H and 0 <= X < W:
if MAP[Y][X] != '@':
if MAP[Y][X] == 'Z':
rts = route+arr[i]
result = 1
return
flag = 0
go_dao(Y, X, k+1, route+arr[i])
H, W = map(int, input().split())
MAP = [list(input()) for _ in range(H)]
for h in range(H):
for w in range(W):
if MAP[h][w] == 'D':
y = h
x = w
result = 0
rts = ''
A = int(input())
arr = ['W', 'D', 'S', 'A']
can = [[0, 0, 0, 0] for _ in range(A)]
for i in range(A):
B, C = map(str, input().split())
can[i][arr.index(B)] = 1
can[i][arr.index(C)] = 1
go_dao(y, x, 0, '')
if result:
print("YES")
print(rts)
else:
print("NO")
|
[
"chanchanhwan@naver.com"
] |
chanchanhwan@naver.com
|
26afa536ebd4f0faec9f6c755154abae49230382
|
df0f8bc85e3855c37034ce571f5f0ded8c4ebb90
|
/Day_11/AoC_2016_11_2.py
|
6a10cac4796753a46068f92565ebb3116ac4fa7e
|
[] |
no_license
|
jshales4/aoc2016
|
67592f3e40fc631b1d7ae132c70b144d74095ef8
|
3bd8b42dd4363dfec71973cff9e8b19178abb3a1
|
refs/heads/master
| 2021-01-11T20:18:39.566972
| 2017-02-23T06:42:58
| 2017-02-23T06:42:58
| 77,817,897
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,553
|
py
|
##AoC_2016_11.py
import itertools
import sys
from copy import deepcopy
from datetime import datetime
def main():
print datetime.now().strftime('%Y-%m-%d %H:%M:%S')
move_tracker = {}
move_watch = True
#Example case
#floors = [['SG', 'SM', 'PG', 'PM'], ['TG','RG','RM','CG','CM'],['TM'],[]]
floors = [['EG', 'EM', 'DG', 'DM', 'SG', 'SM', 'PG', 'PM'], ['TG','RG','RM','CG','CM'],['TM'],[]] #This runs in two hours without tree cleaning
#floors = [['HM', 'LM'], ['HG'], ['LG'], []]
#floors = [['HM', 'HG'], [], [], []]
elevator = 0
ini_state = Game_State(floors, elevator, 0)
move_tracker[hash(''.join(ini_state.current_setup[0])+ '_' + ''.join(ini_state.current_setup[1]) + '_' +''.join(ini_state.current_setup[2])+ '_' +''.join(ini_state.current_setup[3]) + ''.join(str(elevator)))] = 1
while (move_watch ==True):
moves1 = len(move_tracker)
move_tracker = climb_tree(ini_state, move_tracker)
clean_tree(ini_state)
if moves1==len(move_tracker):
move_watch = False
#make_moves(ini_state, move_tracker)
print_levels(ini_state, 0)
print datetime.now().strftime('%Y-%m-%d %H:%M:%S')
class Game_State:
def __init__(self, current_setup, elevator_pos, moves_made):
self.current_setup = current_setup
self.elevator_pos = elevator_pos
self.moves_made = moves_made
self.move_options = []
self.moves_remain = True
self.solution_flag = False
def add_move (self, new_game_state):
self.move_options.append(new_game_state)
def climb_tree(game_state, move_tracker):
if game_state.solution_flag == True:
return move_tracker
elif len(game_state.move_options)>0 and game_state.moves_remain == True:
for n in game_state.move_options:
move_tracker = climb_tree(n, move_tracker)
return move_tracker
elif game_state.moves_remain == True:
move_tracker = make_moves_eff(game_state, move_tracker)
return move_tracker
else:
game_state.moves_remain = False
return move_tracker
def clean_tree(game_state):
for n in game_state.move_options:
if n.moves_remain == False:
game_state.move_options.remove(n)
for p in game_state.move_options:
clean_tree(p)
def iterate_levels(game_state, move_tracker):
results = []
no_changes = True
if len(game_state.moves_made)>0 and game_state.moves_remain == True:
for n in game_state.move_options:
results.append(iterate_levels(n, move_tracker)[0])
elif game_state.moves_remain == False:
return True, move_tracker
else:
make_moves_eff(game_state, move_tracker)
return False, move_tracker
def print_levels(game_state, levels_traveled):
if validate_solutions(game_state.current_setup) == True:
print 'Solved', levels_traveled
else:
for n in game_state.move_options:
print_levels(n, levels_traveled + 1)
# def find_depths(game_state, move_tracker):
# if game_state.moves_remain = False:
# next
# elif
def make_moves_eff(game_state, move_tracker):
move_set = decide_movers(game_state.current_setup, game_state.elevator_pos)
move_track = move_tracker
for n in range(len(move_set)):
for p in [-1, 1]:
new_move = attempt_move(deepcopy(game_state.current_setup), move_set[n], deepcopy(game_state.elevator_pos), int(game_state.elevator_pos) + p, deepcopy(game_state.moves_made), move_tracker)
move_track = new_move[1]
if new_move[0] != False:
discovered_move = Game_State(new_move[0].current_setup, new_move[0].elevator_pos, new_move[0].moves_made)
if validate_solutions(new_move[0].current_setup) == True:
discovered_move.solution_flag=True
#print 'Move added to log', discovered_move
game_state.add_move(discovered_move)
if len(game_state.move_options)==0:
game_state.moves_remain = False
return move_tracker
else:
return move_tracker
def make_moves(game_state, move_tracker):
move_set = decide_movers(game_state.current_setup, game_state.elevator_pos)
move_track = move_tracker
for n in range(len(move_set)):
for p in [-1, 1]:
#print 'Current Gamestate: ', game_state
new_move = attempt_move(deepcopy(game_state.current_setup), move_set[n], deepcopy(game_state.elevator_pos), int(game_state.elevator_pos) + p, deepcopy(game_state.moves_made), move_tracker)
move_track = new_move[1]
if new_move[0] != False:
discovered_move = Game_State(new_move[0].current_setup, new_move[0].elevator_pos, new_move[0].moves_made)
#print 'Move added to log', discovered_move
game_state.add_move(discovered_move)
if validate_solutions(new_move[0].current_setup) == True:
print new_move[0].moves_made
if len(game_state.move_options)>0:
print 'New Node.'
for r in range(len(game_state.move_options)):
print 'Options to move from here are', game_state.move_options
make_moves(game_state.move_options[r], move_tracker)
else: print game_state.move_options
def attempt_move(gamestate_setup, moving_pieces, elevator_start, elevator_new, moves_made, move_tracker):
if elevator_new > 3 or elevator_new < 0:
return False, move_tracker
elif validate_move(deepcopy(gamestate_setup[elevator_new]), deepcopy(gamestate_setup[elevator_start]), moving_pieces, elevator_new) == True:
#print 'Setup before move being attempted:', gamestate_setup
#print 'Here is what will be moved:', moving_pieces
#print 'The elevator will be moved to floor ', elevator_new, 'from floor ', elevator_start
#move_tracker.append(hash(frozenset())
new_node = Game_State(gamestate_setup, elevator_new, moves_made + 1)
if len(''.join(moving_pieces)) > 2:
new_node.current_setup[elevator_new].extend(moving_pieces)
else: new_node.current_setup[elevator_new].append(moving_pieces)
new_node.current_setup[elevator_new].sort()
new_node.current_setup[elevator_start] = [x for x in new_node.current_setup[elevator_start] if x not in moving_pieces]
new_node.current_setup[elevator_start].sort()
#setup_new[elevator_new].append(elevator_new)
if validate_solutions(new_node.current_setup) == True:
#print 'Puzzle Solved! ', new_node.moves_made
return new_node, move_tracker
elif hash(''.join(new_node.current_setup[0])+ '_' + ''.join(new_node.current_setup[1]) + '_' +''.join(new_node.current_setup[2])+ '_' +''.join(new_node.current_setup[3]) + ''.join(str(elevator_new))) in move_tracker and move_tracker[hash(''.join(new_node.current_setup[0])+ '_' + ''.join(new_node.current_setup[1]) + '_' +''.join(new_node.current_setup[2])+ '_' +''.join(new_node.current_setup[3]) + ''.join(str(elevator_new)))]<=moves_made+1:
#print "We've already tried this move."
return False, move_tracker
else:
move_tracker[hash(''.join(new_node.current_setup[0])+ '_' + ''.join(new_node.current_setup[1]) + '_' +''.join(new_node.current_setup[2])+ '_' +''.join(new_node.current_setup[3]) + ''.join(str(elevator_new)))] = moves_made + 1
return new_node, move_tracker
else:
#print 'Move Invalid'
return False, move_tracker
def valid_floor(proposed_floor):
microchip_only = True
for n in range(len(proposed_floor)):
if proposed_floor[n][1] == 'G':
microchip_only = False
for n in range(len(proposed_floor)):
if proposed_floor[n][1] == 'M':
if proposed_floor[n][0] + 'G' not in proposed_floor and microchip_only == False:
return False
return True
def validate_move(proposed_floor, old_floor, elevator_passengers, elevator_pos):
old_floor_moved = [x for x in old_floor if x not in elevator_passengers]
if len(''.join(elevator_passengers)) > 2:
if elevator_passengers[0][1] == 'G' and elevator_passengers[1][1] == 'M' and elevator_passengers[0][0] != elevator_passengers[1][0]:
return False
elif elevator_passengers[1][1] == 'G' and elevator_passengers[0][1] == 'M' and elevator_passengers[0][0] != elevator_passengers[1][0]:
return False
else:
proposed_floor.extend(elevator_passengers)
return valid_floor(proposed_floor) * valid_floor(old_floor_moved)
else:
proposed_floor.append(elevator_passengers)
return valid_floor(proposed_floor) * valid_floor(old_floor_moved)
def decide_movers(setup, elevator_pos):
possible_movers = []
possible_movers = list(itertools.combinations(setup[elevator_pos], 2)) + setup[elevator_pos]
return possible_movers
def validate_solutions(setup):
if len(setup[0]) + len(setup[1]) + len(setup[2]) == 0:
return True
else: return False
if __name__=='__main__':
main()
##Just general pseudo-code thoughts: We basically want to take each current setup and determine all possible moves from that setup. We then want to check that move against
##a hash table to make sure we haven't tried making it before, then we can make a new branch of the tree containing all possible moves from that point. Then we can return the amount of moves it took to get there to find the min.
|
[
"jshales46@gmail.com"
] |
jshales46@gmail.com
|
c1af50c6c4bae299368467230953b197828dfb68
|
8f4b481b2e92d4a29822d7ea4756d9d51af8ed10
|
/RDF/single_frame/rdf_drug_initial.py
|
47dac791467bd6943960044a52681100c84319f0
|
[] |
no_license
|
Zilu-Zhang/MD-simulation-data-analysis
|
fbe4d4b94ea3506dfa0fe084e7279ad364f0f108
|
21da1d96418a89f80fd827aef0f0206934046543
|
refs/heads/main
| 2023-05-30T16:16:09.314265
| 2021-06-08T13:20:03
| 2021-06-08T13:20:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,130
|
py
|
import mdtraj as md
import numpy as np
import os
import os.path
import pandas as pd
import openpyxl as pxl
from statistics import mean
from math import sqrt
def dis(ref, tag):
x = ref[0] - tag[0]
y = ref[1] - tag[1]
z = ref[2] - tag[2]
return sqrt(x**2 + y**2 + z**2)
n_frames = 1
for filename in os.listdir('./'):
if filename.endswith('.pdb'):
excipient_name = filename[17:-4]
traj = md.load(filename)
top = traj.topology
ori = 0
total = np.empty(12 * n_frames)
i = 0 # changable
start = 0
position = np.zeros((13,3))
for j in range(12):
res = top.residue(j)
length = res.n_atoms
x = mean(traj.xyz[i, start:start + length, 0])
y = mean(traj.xyz[i, start:start + length, 1])
z = mean(traj.xyz[i, start:start + length, 2])
position[j][:] = x, y, z
start += length
position[-1][:] = mean(position[:-1][0]), mean(position[:-1][1]), mean(position[:-1][2])
distance = np.zeros(12)
for h in range(12):
distance[h] = dis(position[-1], position[h])
total[ori:ori + 12] = distance
ori += 12
r_range = np.array([0, 5])
bin_width = 0.05
n_bins = int((r_range[1] - r_range[0]) / bin_width)
g_r, edges = np.histogram(total, range=r_range, bins=n_bins)
g_r = g_r / (12 * n_frames)
r = 0.5 * (edges[1:] + edges[:-1])
df = pd.DataFrame({'r': r, 'g_r': g_r})
if not os.path.isfile('rdf_drug_0.xlsx'):
df.to_excel('rdf_drug_0.xlsx', '%s' % excipient_name, index = True)
else:
excel_book = pxl.load_workbook('rdf_drug_0.xlsx')
with pd.ExcelWriter('rdf_drug_0.xlsx', engine = 'openpyxl') as writer:
writer.book = excel_book
writer.sheets = {worksheet.title: worksheet for worksheet in excel_book.worksheets}
df.to_excel(writer, '%s' % excipient_name, index = True)
writer.save()
|
[
"noreply@github.com"
] |
Zilu-Zhang.noreply@github.com
|
ba96f4130d8855366f57a4652b61e6a6af74ad00
|
b7dec7dcffc5290e8f7856baccdb42d84e9e11e8
|
/thesis/urls.py
|
bcb9bc2faf6e5143e317aaa599f36e9f70626730
|
[] |
no_license
|
lthebe/cp_thesis
|
406fd5441f7e0944ebf9e0439c9ce3a16cd7df63
|
573f9c339e57f33895e9924b04f3792ceb50e9e1
|
refs/heads/master
| 2021-06-14T13:27:18.702167
| 2017-01-31T13:56:14
| 2017-01-31T13:56:14
| 80,521,434
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,552
|
py
|
"""thesis URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from support.views import SupportOrderView, SupportFinalCheckoutView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('social.apps.django_app.urls', namespace='social')),
url(r'^community/', include('community.urls')),
url(r'', include('people.urls', namespace='people')),
url(r'^posts/', include("posts.urls", namespace="posts")),
url(r'^(?P<pk>\d+)/support/', SupportOrderView.as_view(), name='sponser'),
url(r'^finalize-support/', SupportFinalCheckoutView.as_view(), name="finalize-support"),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"laxmi.thebe@gmail.com"
] |
laxmi.thebe@gmail.com
|
f56222a598de1c4002c0712cef364ba7722e2078
|
6ddfd7082e9126a88ce9357250c96137af5228e5
|
/PIplot.py
|
cb18ffd6cc8c8676f6a12389cc7671e112066070
|
[] |
no_license
|
ryantro/ICE-Rb-Cell-Absorption-Spectrum-Plot
|
861ef2f970273c1b546319daf345d87be5e094e4
|
aca2f7d2145e18b9a8c9bec6c88fdf18d0b320f5
|
refs/heads/master
| 2022-11-25T13:28:47.851825
| 2020-07-31T22:07:35
| 2020-07-31T22:07:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,821
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 16:57:41 2020
@author: ryan.robinson
"""
import time
import os
from serial import serialwin32 as serial
import numpy as np
import sys, string,subprocess
import nidaqmx
class ICE:
def __init__(self,BoxNum,SlotNum):
self.BoxNum = int(BoxNum)
self.SlotNum = int(SlotNum)
return None
IceTimeout = .1 #Communication Timeout (seconds)
IceByteRead = 256 #Number of bytes to read on ser.read()
IceDelay = .01 #Delay in seconds after sending Ice Command to ensure execution
###Functions###
def setSlot(self,SlotNum):
self.SlotNum = SlotNum
print('Changed Slot To: '+str(SlotNum))
return None
def wait(self,num):
'''Forces program to wait num seconds.
Note: Shortest Delay--> 1ms'''
time.sleep(num)
return None
def IceSend(self, CommandInput):
'''Function that sends a serial string command to ICE Box
Input: ICE Box Number[int], ICE Slot Number[int], CommandInput[str]
Output: None (unless print line uncommented)/Read buffer always emptied!
Note 1: Enter a slot number outside range(1-8) and function sends command directly
to master board (ex. '#PowerOff' Command)
Note 2: COM Port is opened/closed each time funciton is run'''
#Open Port w/ ICE COM Default Settings
IceSer = serial.Serial(port='COM'+str(int(self.BoxNum)),baudrate=115200,timeout=self.IceTimeout,parity='N',stopbits=1,bytesize=8)
self.wait(.001)
#Define Command and Send (perform read after each command to maintain synchronicity)
if int(self.SlotNum) in range(1,9): #If a Valid Slot Number is input, send command to slot num
#Define Commands
MasterCommand = str('#slave ' + str(int(self.SlotNum)) + '\r\n')
SlaveCommand = str(str(CommandInput) + '\r\n')
#Send Commands/Close Port
IceSer.write(MasterCommand.encode())
self.wait(self.IceDelay)
IceOutputSlave = IceSer.read(self.IceByteRead).decode() #Read Buffer
self.wait(self.IceDelay)
IceSer.write(SlaveCommand.encode())
self.wait(self.IceDelay)
IceOutputReturn = IceSer.read(self.IceByteRead).decode() #Read Buffer
self.wait(self.IceDelay)
IceSer.close() #Close COM Port
#Return Output
return IceOutputReturn
print( ' ')
print( 'Master Board Return: ', IceOutputSlave)
print( 'Slave Board Return: ', IceOutputReturn)
7
else: #Command sent only to Master Board (preceding '#', no slot num to specify)
#Define Command
MasterCommand = str('#' + str(CommandInput) + '\r\n')
#Send Commands/Close Port
IceSer.write(MasterCommand)
self.wait(self.IceDelay)
IceOutputReturn = IceSer.read(self.IceByteRead) #Read Buffer
self.wait(self.IceDelay)
IceSer.close() #Close COM Port
#Return Output
return IceOutputReturn
print( ' ')
print( 'Master Board Return: ', IceOutputReturn)
# GET DATA FROM NI-DAQmx
def nidaxgrab():
with nidaqmx.Task() as task:
task.ai_channels.add_ai_voltage_chan("Dev1/ai0")
data = task.read(number_of_samples_per_channel=1)
power = ' '.join([str(elem) for elem in data])
return power
def CurrentSet(IB,current):
return IB.IceSend(1,1,'CurrSet '+str(current))
def makefolder(newpath):
if not os.path.exists(newpath):
os.makedirs(newpath)
return newpath
def loggingLoops(IB,iArray):
'''
Creates a directory and logs laser current and laser power.
The purpose of this is to find at which current mode-hops occur by seeing a sharp change in power
'''
logDir = makefolder(os.getcwd()+'\\testlogging\\'+time.strftime("%Y-%m-%d_%H-%M-%S"))
print('Log Dirrectory: %' %logDir)
IB.IceSend('CurrLim 125')
### OPEN FILE ###
PIData = open(logDir+'\\PIData.csv', 'a+')
### LOGGING LOOPS ###
for i in iArray:
setCurrent = IB.IceSend('CurrSet '+str(i))
time.sleep(1) #Maybe this needs to be greater
line = str(setCurrent)+','+str(nidaxgrab())
print(line)
PIData.write(line)
### CLOSE FILE ###
PIData.close()
return None
def main():
BoxNum = input('Box Num: ')
SlotNum = input('Slot Num of CS1 Board: ')
IB = ICE(BoxNum,SlotNum)
iArray = np.linspace(0,100,100)
iArray = np.round(iArray,1)
loggingLoops(IB,iArray)
return None
if(__name__=="__main__"):
main()
|
[
"ryan.robinson@Vescent.local"
] |
ryan.robinson@Vescent.local
|
9f079bd4933ce55dbaf3592ee17fa1c5dcd75ac5
|
de788449bd7433bbfc7c0574a0d81fd9dd24649f
|
/geoportal/geoportailv3_geoportal/static-ngeo/ngeo/buildtools/test-eof-newline
|
6872c85514ca17a1a053dbfa72b1aa9ce77eb0ca
|
[
"MIT"
] |
permissive
|
Geoportail-Luxembourg/geoportailv3
|
6ab27bed755ff4f933c2f9700e2d6086ae8f5b68
|
45722f46bd5e4650ed3b01b1920de3732f848186
|
refs/heads/master
| 2023-08-18T21:02:45.652482
| 2023-08-02T14:12:56
| 2023-08-02T14:12:56
| 24,669,372
| 25
| 17
|
MIT
| 2023-08-25T13:39:08
| 2014-10-01T07:23:27
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,302
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2017, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import os
import subprocess
exit_code = 0
FNULL = open(os.devnull, 'w')
for filename in subprocess.check_output(["git", "ls-files"]).decode("utf-8").split("\n"):
if os.path.isfile(filename):
if subprocess.call(
"git check-attr -a '{}' | grep ' text: set'".format(filename),
shell=True, stdout=FNULL) == 0:
size = os.stat(filename).st_size
if size != 0:
with open(filename) as f:
f.seek(size - 1)
if ord(f.read()) != ord("\n"):
print("No new line at end of '{}' file.".format(filename))
exit_code = 2
exit(exit_code)
|
[
"antoine@abt.im"
] |
antoine@abt.im
|
|
d165a083b3e3a41120522e9b4cd22520c188909d
|
029fa717816e977e736100128168c1c66161541d
|
/aries_cloudagent/wallet/tests/test_key_pair.py
|
b81062d8563ac7d8651bf77dad80875a2f3da169
|
[
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] |
permissive
|
estrehle/aries-cloudagent-python
|
5cd0ac23851268d435b9eafe6b59e6efdb26ad90
|
1460b2d32c933944b4677cf25a78c4ace07346c8
|
refs/heads/main
| 2023-09-04T10:31:36.141037
| 2021-11-10T12:16:16
| 2021-11-10T12:16:16
| 424,557,794
| 1
| 0
|
Apache-2.0
| 2021-11-04T10:41:01
| 2021-11-04T10:41:01
| null |
UTF-8
|
Python
| false
| false
| 3,954
|
py
|
from asynctest import TestCase as AsyncTestCase
import json
from ...storage.error import StorageNotFoundError
from ..util import bytes_to_b58
from ..key_type import KeyType
from ...core.in_memory import InMemoryProfile
from ...storage.in_memory import InMemoryStorage
from ..key_pair import KeyPairStorageManager, KEY_PAIR_STORAGE_TYPE
class TestKeyPairStorageManager(AsyncTestCase):
test_public_key = b"somepublickeybytes"
test_secret = b"verysecretkey"
async def setUp(self):
self.profile = InMemoryProfile.test_profile()
self.store = InMemoryStorage(self.profile)
self.key_pair_mgr = KeyPairStorageManager(self.store)
async def test_create_key_pair(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
)
verkey = bytes_to_b58(self.test_public_key)
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
value = json.loads(record.value)
assert record.tags == {"verkey": verkey, "key_type": KeyType.ED25519.key_type}
assert value["verkey"] == verkey
assert value["secret_key"] == bytes_to_b58(self.test_secret)
assert value["metadata"] == {}
assert value["key_type"] == KeyType.ED25519.key_type
async def test_get_key_pair(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
)
verkey = bytes_to_b58(self.test_public_key)
key_pair = await self.key_pair_mgr.get_key_pair(verkey)
assert key_pair["verkey"] == verkey
assert key_pair["secret_key"] == bytes_to_b58(self.test_secret)
assert key_pair["metadata"] == {}
assert key_pair["key_type"] == KeyType.ED25519.key_type
async def test_get_key_pair_x_not_found(self):
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.get_key_pair("not_existing_verkey")
async def test_delete_key_pair(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
)
verkey = bytes_to_b58(self.test_public_key)
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
await self.key_pair_mgr.delete_key_pair(verkey)
# should be deleted now
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.delete_key_pair(verkey)
async def test_delete_key_pair_x_not_found(self):
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.delete_key_pair("non_existing_verkey")
async def test_update_key_pair_metadata(self):
await self.key_pair_mgr.store_key_pair(
public_key=self.test_public_key,
secret_key=self.test_secret,
key_type=KeyType.ED25519,
metadata={"some": "data"},
)
verkey = bytes_to_b58(self.test_public_key)
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
value = json.loads(record.value)
assert value["metadata"] == {"some": "data"}
await self.key_pair_mgr.update_key_pair_metadata(verkey, {"some_other": "data"})
record = await self.store.find_record(KEY_PAIR_STORAGE_TYPE, {"verkey": verkey})
assert record
value = json.loads(record.value)
assert value["metadata"] == {"some_other": "data"}
async def test_update_key_pair_metadata_x_not_found(self):
with self.assertRaises(StorageNotFoundError):
await self.key_pair_mgr.update_key_pair_metadata("non_existing_verkey", {})
|
[
"timo@animo.id"
] |
timo@animo.id
|
3e35560a675840b2ed59a45d39e280ce612af5c6
|
4e5b20fdcca20f458322f0a8cd11bbdacb6fb3e5
|
/suning/api/union/UnionInfomationGetRequest.py
|
5a52d242f32e5e4c7c3d65d8e1872c3832f9291a
|
[] |
no_license
|
shijingyu/sunningAPI
|
241f33b0660dc84635ce39688fed499f5c57a5da
|
4a3b2ef7f9bdc4707d1eaff185bc7eb636fe90d5
|
refs/heads/master
| 2020-04-24T22:15:11.584028
| 2019-02-24T06:41:20
| 2019-02-24T06:41:20
| 172,305,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
# -*- coding: utf-8 -*-
'''
Created on 2016-1-27
@author: suning
'''
from suning.api.abstract import AbstractApi
class UnionInfomationGetRequest(AbstractApi):
'''
'''
def __init__(self):
AbstractApi.__init__(self)
self.goodsCode = None
self.setParamRule({
'goodsCode':{'allow_empty':False}
})
def getApiBizName(self):
return 'getUnionInfomation'
def getApiMethod(self):
return 'suning.netalliance.unioninfomation.get'
|
[
"945090896@qq.com"
] |
945090896@qq.com
|
2916961de45167313f15922e1456df4053e14745
|
afe57be84b5dde07967be0e23f677ed85ab8d4da
|
/posts/urls.py
|
a1c411fb775d8e4ef1bf807f6402c87b547e111e
|
[] |
no_license
|
furgot100/CarHub
|
0c3fdab1529c589e04eabe94c615ce953d4501b1
|
417de07dce488b50971396d200c721e0869382a2
|
refs/heads/master
| 2022-04-28T10:27:47.879441
| 2020-04-22T18:24:54
| 2020-04-22T18:24:54
| 247,791,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
from django.urls import path
from .views import PostCreateView, PostDetailView, PostListView, HomeView, PostDeleteView, ProductListView, ProductDetailView, ProductCreateView, EventListView, EventDetailView, EventCreateView
from django.conf import settings
from django.conf.urls.static import static
app_name = 'posts'
urlpatterns = [
path('', HomeView.as_view(), name="home"),
path('blog/', PostListView.as_view(), name='post-list-page'),
path('new/', PostCreateView.as_view(), name='post-new-page' ),
path('blog/<str:slug>/', PostDetailView.as_view(), name='post-details-page'),
# path('<slug>/delete', PostDeleteView.as_view(), name='post-delete-page')
path('store/', ProductListView.as_view(), name="store-list"),
path('store/<str:slug>/', ProductDetailView.as_view(), name='store-item'),
path('store/new', ProductCreateView.as_view(), name='store-new'),
path('event/', EventListView.as_view(), name="event-list"),
path('event/<str:slug>/', EventDetailView.as_view(), name="event-detail"),
path('event/new', EventCreateView.as_view(), name='event-new'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"frtsang40@gmail.com"
] |
frtsang40@gmail.com
|
e03e0083b6c860f813b2cae42fbca20c5014d738
|
6ba1da25bb624c8bf74f1899f64b450602f12ff4
|
/Example/PY/django/TestDemo/test_function.py
|
257991ced039e50cdddaa45a0e0b660d0048ea62
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-free-unknown",
"FSFAP",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Zoey-dot/pinpoint-c-agent
|
1dd58ce89610a7aafcdda842145a764cebe3f783
|
c76f9e41d8f2a9fdd8b0c90d52bb30e08bbd634d
|
refs/heads/master
| 2021-04-04T06:58:19.145805
| 2020-08-05T02:14:02
| 2020-08-05T02:14:02
| 263,580,530
| 1
| 0
|
Apache-2.0
| 2020-07-01T07:13:35
| 2020-05-13T09:11:23
| null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from pinpoint.plugins.PinpointCommonPlugin import PinpointCommonPlugin
@PinpointCommonPlugin("", __name__)
def test_func1(arg1, arg2):
return "this is test_func1: arg1=%s, arg2=%s"%(arg1, arg2)
class TestUserFunc1(object):
def __init__(self, name, score):
self.name = name
self.score = score
@PinpointCommonPlugin("TestUserFunc1", __name__)
def test_func2(self):
return "%s\'s score is : %s"%(self.name, self.score)
|
[
"su.wei@navercorp.com"
] |
su.wei@navercorp.com
|
a0f37bf8594ae4e002a3cbda9f0f4fb8efd4c144
|
038dc1f463fba1889264de89369791d7359b4f86
|
/requested_events/views.py
|
c8f249e07f7a2491ed5370300a6202ac0483e330
|
[] |
no_license
|
paishrikrishna/BE-Project
|
079d979fd1a2b158dadc8f9d72d1153f8c17aa21
|
e0949c2523b8fc3d0f0edfd86eaf8717ff824a60
|
refs/heads/master
| 2023-04-13T11:53:31.312949
| 2021-05-04T13:19:34
| 2021-05-04T13:19:34
| 313,569,032
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,337
|
py
|
from django.shortcuts import render
from calander.form import events_form
from calander.models import events
from .form import req_events_form
from .models import req_events
from login_page.login_form import login_form
from login_page.models import login_model
from new_users.models import new_login_model
# Create your views here.
def req_events_index_page(request,user,auth):
if request.method=="POST":
if request.POST['action']=="Add Event":
try:
events_form().save()
except:
obj = events.objects.get(organizer='n/a')
obj.organizer = request.POST['Organizer']
obj.content = request.POST['Agenda']
obj.date = request.POST['New_date']
obj.save()
obj = req_events.objects.get(id=int(request.POST['row']))
obj.delete()
elif request.POST['action']=="Delete Event":
obj = req_events.objects.get(id=int(request.POST['row']))
obj.delete()
elif request.POST['action']=="Add User":
try:
login_form().save()
except:
obj = login_model.objects.get(username='n/a')
obj.username = request.POST['username']
obj.password = request.POST['password']
obj.auth = "member"
obj.link = request.POST['link']
obj.email = request.POST['email']
obj.wing = request.POST['wing']
obj.floor = request.POST['floor']
obj.flat = request.POST['flat']
obj.save()
obj = new_login_model.objects.get(email=(request.POST['email']))
obj.delete()
elif request.POST['action']=="Delete User":
obj = new_login_model.objects.get(email=(request.POST['email']))
obj.delete()
obj = list(req_events.objects.all())
organizer , content , date ,ID= [],[],[],[]
for i in obj:
organizer.append(i.organizer)
content.append(i.content)
date.append(i.date)
ID.append(i.id)
obj = list(new_login_model.objects.all())
username,password ,user_ID,floor,wing,link,pswd,ID= [],[],[],[],[],[],[],[]
for i in obj:
username.append(i.username)
password.append(i.email)
user_ID.append(i.flat)
ID.append(i.id)
floor.append(i.floor)
wing.append(i.wing)
link.append(i.link)
pswd.append(i.password)
return render(request,"requested_events.html",{"ID":ID,"floor":floor,"pswd":pswd,"wing":wing,"link":link,"organizer":organizer,"event_dates":date,"content":content,"ID":ID,"user":user,"username":username,"password":password,"user_ID":user_ID,"auth":auth})
|
[
"2017.shrikrishna.pai@ves.ac.in"
] |
2017.shrikrishna.pai@ves.ac.in
|
0ae55acd20bb59d6c3f499e32e0f526820a351d7
|
822d3cd484b54f0531fc205520c765a8321c0613
|
/pyFile/8.面向对象/2.类的属性/9.类方法和静态方法.py
|
a0ccbf84964d8f9059c7feb1ae5efeedb1a3e65a
|
[] |
no_license
|
mghxy123/learnPython
|
31d1cc18deeed5a89864ca0333fe488e0dbf08b4
|
00740e87d55a4dffd78773deaff8689485df31e8
|
refs/heads/master
| 2021-07-21T14:31:02.421788
| 2020-06-27T11:28:01
| 2020-06-27T11:28:01
| 187,751,182
| 0
| 0
| null | 2020-06-07T05:14:05
| 2019-05-21T02:58:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : 9.类方法和静态方法.py
# Author: HuXianyong
# Mail: mghxy123@163.com
# Date : 2019/5/16 0016
#类中普通函数的方法
# class Person:
# def normal_method(): #可以吗? 这样是可以的没有语法上面的问题,执行也没问题,只是大家都默认不这么写
# print('normal')
#
# # 如何调用?
# Person.normal_method() #可以吗? 这个是可以的,应为只是直接调用函数
# # Person().normal_method() #可以吗? 这个不可以,应为这个是实例化,实例化之后类里面的方法需要接受一个类的实例化对象,然而这里并没有传入,self,因此会报错
# print(Person.__dict__)
# # 静态方法
# class Person:
# @staticmethod
# def class_method():
# print('this is staticMethod')
# Person.class_method()
# Person().class_method()
#静态方法
class Person:
@classmethod
def class_method(cls): #cls 是什么?
print('this is class method')
print('class = {0.__name__}({0})'.format(cls))
cls.HEIGHT = 170
@staticmethod
def static_method():
print('this is staticMethod')
Person.class_method()
print(Person.__dict__)
|
[
"mghxy123@163.com"
] |
mghxy123@163.com
|
c603c10469c17a5fe10f107b6cfe4f567d52bce1
|
e294a32686c46c520186326be47a48861aaacdad
|
/final number while(終極密碼).py
|
219dcd95b9b4f7a4fc730498539f43d897995102
|
[] |
no_license
|
goodgood9897/python-2020-8
|
6381894fb2e68f35fe3d583aec6761b32e22149c
|
375969d7c457340659b35d1a9fb41479e0b05c09
|
refs/heads/master
| 2022-11-30T06:35:24.609085
| 2020-08-07T09:03:37
| 2020-08-07T09:03:37
| 284,589,524
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
import random
a = 1
b = 100
number = random.randint(1,100)
while True:
print('Now%d-%d'%(a,b))
answer = int(input('Please enter nummber:'))
if answer<a or answer>b:
print('Please enter again.')
elif answer>number:
b=answer
elif answer<number:
a=answer
elif answer==number:
print('correct~~~!')
break
|
[
"noreply@github.com"
] |
goodgood9897.noreply@github.com
|
d7336abe08b51fb335e57cf3d53ee20b79886453
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/insights/v20160301/_inputs.py
|
5910733c44e6efa9bc7563418d54942acbf6f519
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38,307
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = [
'LocationThresholdRuleConditionArgs',
'ManagementEventAggregationConditionArgs',
'ManagementEventRuleConditionArgs',
'RetentionPolicyArgs',
'RuleEmailActionArgs',
'RuleManagementEventClaimsDataSourceArgs',
'RuleManagementEventDataSourceArgs',
'RuleMetricDataSourceArgs',
'RuleWebhookActionArgs',
'ThresholdRuleConditionArgs',
]
@pulumi.input_type
class LocationThresholdRuleConditionArgs:
def __init__(__self__, *,
failed_location_count: pulumi.Input[int],
odata_type: pulumi.Input[str],
data_source: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
A rule condition based on a certain number of locations failing.
:param pulumi.Input[int] failed_location_count: the number of locations that must fail to activate the alert.
:param pulumi.Input[str] odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition'.
:param pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
pulumi.set(__self__, "failed_location_count", failed_location_count)
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition')
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter(name="failedLocationCount")
def failed_location_count(self) -> pulumi.Input[int]:
"""
the number of locations that must fail to activate the alert.
"""
return pulumi.get(self, "failed_location_count")
@failed_location_count.setter
def failed_location_count(self, value: pulumi.Input[int]):
pulumi.set(self, "failed_location_count", value)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]):
pulumi.set(self, "data_source", value)
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[pulumi.Input[str]]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@window_size.setter
def window_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window_size", value)
@pulumi.input_type
class ManagementEventAggregationConditionArgs:
def __init__(__self__, *,
operator: Optional[pulumi.Input['ConditionOperator']] = None,
threshold: Optional[pulumi.Input[float]] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
How the data that is collected should be combined over time.
:param pulumi.Input['ConditionOperator'] operator: the condition operator.
:param pulumi.Input[float] threshold: The threshold value that activates the alert.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
if operator is not None:
pulumi.set(__self__, "operator", operator)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter
def operator(self) -> Optional[pulumi.Input['ConditionOperator']]:
"""
the condition operator.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: Optional[pulumi.Input['ConditionOperator']]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def threshold(self) -> Optional[pulumi.Input[float]]:
"""
The threshold value that activates the alert.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[pulumi.Input[str]]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@window_size.setter
def window_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window_size", value)
@pulumi.input_type
class ManagementEventRuleConditionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
aggregation: Optional[pulumi.Input['ManagementEventAggregationConditionArgs']] = None,
data_source: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]] = None):
"""
A management event rule condition.
:param pulumi.Input[str] odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition'.
:param pulumi.Input['ManagementEventAggregationConditionArgs'] aggregation: How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate.
:param pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition')
if aggregation is not None:
pulumi.set(__self__, "aggregation", aggregation)
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def aggregation(self) -> Optional[pulumi.Input['ManagementEventAggregationConditionArgs']]:
"""
How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate.
"""
return pulumi.get(self, "aggregation")
@aggregation.setter
def aggregation(self, value: Optional[pulumi.Input['ManagementEventAggregationConditionArgs']]):
pulumi.set(self, "aggregation", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]):
pulumi.set(self, "data_source", value)
@pulumi.input_type
class RetentionPolicyArgs:
def __init__(__self__, *,
days: pulumi.Input[int],
enabled: pulumi.Input[bool]):
"""
Specifies the retention policy for the log.
:param pulumi.Input[int] days: the number of days for the retention in days. A value of 0 will retain the events indefinitely.
:param pulumi.Input[bool] enabled: a value indicating whether the retention policy is enabled.
"""
pulumi.set(__self__, "days", days)
pulumi.set(__self__, "enabled", enabled)
@property
@pulumi.getter
def days(self) -> pulumi.Input[int]:
"""
the number of days for the retention in days. A value of 0 will retain the events indefinitely.
"""
return pulumi.get(self, "days")
@days.setter
def days(self, value: pulumi.Input[int]):
pulumi.set(self, "days", value)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
a value indicating whether the retention policy is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@pulumi.input_type
class RuleEmailActionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
custom_emails: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
send_to_service_owners: Optional[pulumi.Input[bool]] = None):
"""
Specifies the action to send email when the rule condition is evaluated. The discriminator is always RuleEmailAction in this case.
:param pulumi.Input[str] odata_type: specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction'.
:param pulumi.Input[Sequence[pulumi.Input[str]]] custom_emails: the list of administrator's custom email addresses to notify of the activation of the alert.
:param pulumi.Input[bool] send_to_service_owners: Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction')
if custom_emails is not None:
pulumi.set(__self__, "custom_emails", custom_emails)
if send_to_service_owners is not None:
pulumi.set(__self__, "send_to_service_owners", send_to_service_owners)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleEmailAction'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="customEmails")
def custom_emails(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
the list of administrator's custom email addresses to notify of the activation of the alert.
"""
return pulumi.get(self, "custom_emails")
@custom_emails.setter
def custom_emails(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "custom_emails", value)
@property
@pulumi.getter(name="sendToServiceOwners")
def send_to_service_owners(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated.
"""
return pulumi.get(self, "send_to_service_owners")
@send_to_service_owners.setter
def send_to_service_owners(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "send_to_service_owners", value)
@pulumi.input_type
class RuleManagementEventClaimsDataSourceArgs:
def __init__(__self__, *,
email_address: Optional[pulumi.Input[str]] = None):
"""
The claims for a rule management event data source.
:param pulumi.Input[str] email_address: the email address.
"""
if email_address is not None:
pulumi.set(__self__, "email_address", email_address)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> Optional[pulumi.Input[str]]:
"""
the email address.
"""
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "email_address", value)
@pulumi.input_type
class RuleManagementEventDataSourceArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
claims: Optional[pulumi.Input['RuleManagementEventClaimsDataSourceArgs']] = None,
event_name: Optional[pulumi.Input[str]] = None,
event_source: Optional[pulumi.Input[str]] = None,
legacy_resource_id: Optional[pulumi.Input[str]] = None,
level: Optional[pulumi.Input[str]] = None,
metric_namespace: Optional[pulumi.Input[str]] = None,
operation_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_location: Optional[pulumi.Input[str]] = None,
resource_provider_name: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
sub_status: Optional[pulumi.Input[str]] = None):
"""
A rule management event data source. The discriminator fields is always RuleManagementEventDataSource in this case.
:param pulumi.Input[str] odata_type: specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource'.
:param pulumi.Input['RuleManagementEventClaimsDataSourceArgs'] claims: the claims.
:param pulumi.Input[str] event_name: the event name.
:param pulumi.Input[str] event_source: the event source.
:param pulumi.Input[str] legacy_resource_id: the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param pulumi.Input[str] level: the level.
:param pulumi.Input[str] metric_namespace: the namespace of the metric.
:param pulumi.Input[str] operation_name: The name of the operation that should be checked for. If no name is provided, any operation will match.
:param pulumi.Input[str] resource_group_name: the resource group name.
:param pulumi.Input[str] resource_location: the location of the resource.
:param pulumi.Input[str] resource_provider_name: the resource provider name.
:param pulumi.Input[str] resource_uri: the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param pulumi.Input[str] status: The status of the operation that should be checked for. If no status is provided, any status will match.
:param pulumi.Input[str] sub_status: the substatus.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource')
if claims is not None:
pulumi.set(__self__, "claims", claims)
if event_name is not None:
pulumi.set(__self__, "event_name", event_name)
if event_source is not None:
pulumi.set(__self__, "event_source", event_source)
if legacy_resource_id is not None:
pulumi.set(__self__, "legacy_resource_id", legacy_resource_id)
if level is not None:
pulumi.set(__self__, "level", level)
if metric_namespace is not None:
pulumi.set(__self__, "metric_namespace", metric_namespace)
if operation_name is not None:
pulumi.set(__self__, "operation_name", operation_name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if resource_location is not None:
pulumi.set(__self__, "resource_location", resource_location)
if resource_provider_name is not None:
pulumi.set(__self__, "resource_provider_name", resource_provider_name)
if resource_uri is not None:
pulumi.set(__self__, "resource_uri", resource_uri)
if status is not None:
pulumi.set(__self__, "status", status)
if sub_status is not None:
pulumi.set(__self__, "sub_status", sub_status)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def claims(self) -> Optional[pulumi.Input['RuleManagementEventClaimsDataSourceArgs']]:
"""
the claims.
"""
return pulumi.get(self, "claims")
@claims.setter
def claims(self, value: Optional[pulumi.Input['RuleManagementEventClaimsDataSourceArgs']]):
pulumi.set(self, "claims", value)
@property
@pulumi.getter(name="eventName")
def event_name(self) -> Optional[pulumi.Input[str]]:
"""
the event name.
"""
return pulumi.get(self, "event_name")
@event_name.setter
def event_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_name", value)
@property
@pulumi.getter(name="eventSource")
def event_source(self) -> Optional[pulumi.Input[str]]:
"""
the event source.
"""
return pulumi.get(self, "event_source")
@event_source.setter
def event_source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_source", value)
@property
@pulumi.getter(name="legacyResourceId")
def legacy_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "legacy_resource_id")
@legacy_resource_id.setter
def legacy_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "legacy_resource_id", value)
@property
@pulumi.getter
def level(self) -> Optional[pulumi.Input[str]]:
"""
the level.
"""
return pulumi.get(self, "level")
@level.setter
def level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "level", value)
@property
@pulumi.getter(name="metricNamespace")
def metric_namespace(self) -> Optional[pulumi.Input[str]]:
"""
the namespace of the metric.
"""
return pulumi.get(self, "metric_namespace")
@metric_namespace.setter
def metric_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_namespace", value)
@property
@pulumi.getter(name="operationName")
def operation_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the operation that should be checked for. If no name is provided, any operation will match.
"""
return pulumi.get(self, "operation_name")
@operation_name.setter
def operation_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operation_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
the resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[pulumi.Input[str]]:
"""
the location of the resource.
"""
return pulumi.get(self, "resource_location")
@resource_location.setter
def resource_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_location", value)
@property
@pulumi.getter(name="resourceProviderName")
def resource_provider_name(self) -> Optional[pulumi.Input[str]]:
"""
the resource provider name.
"""
return pulumi.get(self, "resource_provider_name")
@resource_provider_name.setter
def resource_provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_provider_name", value)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> Optional[pulumi.Input[str]]:
"""
the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_uri", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the operation that should be checked for. If no status is provided, any status will match.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter(name="subStatus")
def sub_status(self) -> Optional[pulumi.Input[str]]:
"""
the substatus.
"""
return pulumi.get(self, "sub_status")
@sub_status.setter
def sub_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_status", value)
@pulumi.input_type
class RuleMetricDataSourceArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
legacy_resource_id: Optional[pulumi.Input[str]] = None,
metric_name: Optional[pulumi.Input[str]] = None,
metric_namespace: Optional[pulumi.Input[str]] = None,
resource_location: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None):
"""
A rule metric data source. The discriminator value is always RuleMetricDataSource in this case.
:param pulumi.Input[str] odata_type: specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource'.
:param pulumi.Input[str] legacy_resource_id: the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
:param pulumi.Input[str] metric_name: the name of the metric that defines what the rule monitors.
:param pulumi.Input[str] metric_namespace: the namespace of the metric.
:param pulumi.Input[str] resource_location: the location of the resource.
:param pulumi.Input[str] resource_uri: the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource')
if legacy_resource_id is not None:
pulumi.set(__self__, "legacy_resource_id", legacy_resource_id)
if metric_name is not None:
pulumi.set(__self__, "metric_name", metric_name)
if metric_namespace is not None:
pulumi.set(__self__, "metric_namespace", metric_namespace)
if resource_location is not None:
pulumi.set(__self__, "resource_location", resource_location)
if resource_uri is not None:
pulumi.set(__self__, "resource_uri", resource_uri)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of data source. There are two types of rule data sources: RuleMetricDataSource and RuleManagementEventDataSource
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter(name="legacyResourceId")
def legacy_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
the legacy resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "legacy_resource_id")
@legacy_resource_id.setter
def legacy_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "legacy_resource_id", value)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> Optional[pulumi.Input[str]]:
"""
the name of the metric that defines what the rule monitors.
"""
return pulumi.get(self, "metric_name")
@metric_name.setter
def metric_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_name", value)
@property
@pulumi.getter(name="metricNamespace")
def metric_namespace(self) -> Optional[pulumi.Input[str]]:
"""
the namespace of the metric.
"""
return pulumi.get(self, "metric_namespace")
@metric_namespace.setter
def metric_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "metric_namespace", value)
@property
@pulumi.getter(name="resourceLocation")
def resource_location(self) -> Optional[pulumi.Input[str]]:
"""
the location of the resource.
"""
return pulumi.get(self, "resource_location")
@resource_location.setter
def resource_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_location", value)
@property
@pulumi.getter(name="resourceUri")
def resource_uri(self) -> Optional[pulumi.Input[str]]:
"""
the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule.
"""
return pulumi.get(self, "resource_uri")
@resource_uri.setter
def resource_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_uri", value)
@pulumi.input_type
class RuleWebhookActionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_uri: Optional[pulumi.Input[str]] = None):
"""
Specifies the action to post to service when the rule condition is evaluated. The discriminator is always RuleWebhookAction in this case.
:param pulumi.Input[str] odata_type: specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction'.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload.
:param pulumi.Input[str] service_uri: the service uri to Post the notification when the alert activates or resolves.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction')
if properties is not None:
pulumi.set(__self__, "properties", properties)
if service_uri is not None:
pulumi.set(__self__, "service_uri", service_uri)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of the action. There are two types of actions: RuleEmailAction and RuleWebhookAction.
Expected value is 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="serviceUri")
def service_uri(self) -> Optional[pulumi.Input[str]]:
"""
the service uri to Post the notification when the alert activates or resolves.
"""
return pulumi.get(self, "service_uri")
@service_uri.setter
def service_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_uri", value)
@pulumi.input_type
class ThresholdRuleConditionArgs:
def __init__(__self__, *,
odata_type: pulumi.Input[str],
operator: pulumi.Input['ConditionOperator'],
threshold: pulumi.Input[float],
data_source: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]] = None,
time_aggregation: Optional[pulumi.Input['TimeAggregationOperator']] = None,
window_size: Optional[pulumi.Input[str]] = None):
"""
A rule condition based on a metric crossing a threshold.
:param pulumi.Input[str] odata_type: specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition'.
:param pulumi.Input['ConditionOperator'] operator: the operator used to compare the data and the threshold.
:param pulumi.Input[float] threshold: the threshold value that activates the alert.
:param pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']] data_source: the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
:param pulumi.Input['TimeAggregationOperator'] time_aggregation: the time aggregation operator. How the data that are collected should be combined over time. The default value is the PrimaryAggregationType of the Metric.
:param pulumi.Input[str] window_size: the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition')
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "threshold", threshold)
if data_source is not None:
pulumi.set(__self__, "data_source", data_source)
if time_aggregation is not None:
pulumi.set(__self__, "time_aggregation", time_aggregation)
if window_size is not None:
pulumi.set(__self__, "window_size", window_size)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> pulumi.Input[str]:
"""
specifies the type of condition. This can be one of three types: ManagementEventRuleCondition (occurrences of management events), LocationThresholdRuleCondition (based on the number of failures of a web test), and ThresholdRuleCondition (based on the threshold of a metric).
Expected value is 'Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition'.
"""
return pulumi.get(self, "odata_type")
@odata_type.setter
def odata_type(self, value: pulumi.Input[str]):
pulumi.set(self, "odata_type", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input['ConditionOperator']:
"""
the operator used to compare the data and the threshold.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input['ConditionOperator']):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def threshold(self) -> pulumi.Input[float]:
"""
the threshold value that activates the alert.
"""
return pulumi.get(self, "threshold")
@threshold.setter
def threshold(self, value: pulumi.Input[float]):
pulumi.set(self, "threshold", value)
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]:
"""
the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
"""
return pulumi.get(self, "data_source")
@data_source.setter
def data_source(self, value: Optional[pulumi.Input[Union['RuleManagementEventDataSourceArgs', 'RuleMetricDataSourceArgs']]]):
pulumi.set(self, "data_source", value)
@property
@pulumi.getter(name="timeAggregation")
def time_aggregation(self) -> Optional[pulumi.Input['TimeAggregationOperator']]:
"""
the time aggregation operator. How the data that are collected should be combined over time. The default value is the PrimaryAggregationType of the Metric.
"""
return pulumi.get(self, "time_aggregation")
@time_aggregation.setter
def time_aggregation(self, value: Optional[pulumi.Input['TimeAggregationOperator']]):
pulumi.set(self, "time_aggregation", value)
@property
@pulumi.getter(name="windowSize")
def window_size(self) -> Optional[pulumi.Input[str]]:
"""
the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
"""
return pulumi.get(self, "window_size")
@window_size.setter
def window_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "window_size", value)
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
213fc24bf448ac094d3843b30e9c24e1aaa77fcc
|
402b45344b310c76c37f354c30a82d1934667735
|
/crawl_push.py
|
2435ece7d58e2e06518fb2919706a8d32263b4d1
|
[] |
no_license
|
yuktmitash21/Crawler
|
5cba755cba669a55d9e38d5e95166760cc0417cd
|
d9fb5710f5ca3dea9c3516e0ed7b963d4a07ff83
|
refs/heads/main
| 2023-03-13T23:25:45.524622
| 2021-02-07T13:24:53
| 2021-02-07T13:24:53
| 336,657,238
| 0
| 0
| null | 2021-02-07T13:24:54
| 2021-02-06T23:20:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
import pandas as pd
import requests #Pushshift accesses Reddit via an url so this is needed
import json #JSON manipulation
import csv #To Convert final table into a csv file to save to your machine
import time
import datetime
def getPushshiftData(query, after, before, limit):
url = 'https://api.pushshift.io/reddit/search/submission/?&before=' + before + '&after=' + after + '&q=' + query + '&sort_type=score&sort=desc&subreddit=wallstreetbets&size=' + limit
#Print URL to show user
print(url)
#Request URL
r = requests.get(url)
#Load JSON data from webpage into data variable
data = json.loads(r.text)
#return the data element which contains all the submissions data
return data['data']
tickers = ["GME", "SPY", "AMC", "BB", "TSLA", "PLNTR", "CRSR", "NOK", "AAPL", "SNAP"]
before = datetime.datetime(2021, 1, 5)
later = before + datetime.timedelta(days=1)
map = {}
allData = []
for ticker in tickers:
for i in range(0, 30):
data = getPushshiftData(ticker, str(int(before.timestamp())), str(int(later.timestamp())), str(1000))
print (len(data), ticker)
for dat in data:
allData.append({ 'score': dat.get('score') or 0, 'num_comments': dat.get('num_comments') or 0, 'created': dat.get('created_utc') or 0, 'title': dat.get('title') or '', 'body': dat.get('selftext') or '', 'upvote_ratio': dat.get('upvote_ratio') or ''})
before = later
later += datetime.timedelta(days=1)
time.sleep(2)
map[ticker] = allData
allData = []
before = datetime.datetime(2021, 1, 5)
later = before + datetime.timedelta(days=1)
json_string = json.dumps(map)
with open('data-gme.json', 'w') as f:
json.dump(json_string, f)
|
[
"ymitash3@gatech.edu"
] |
ymitash3@gatech.edu
|
987828b08e77fc4ed6a670121d87f280fc0aed0b
|
5e0a7d90b3fd5d16bbc52eb0c8a118b835c17bad
|
/test/maxicode.py
|
e8ab9114db9fee3362ec2c70d7acb04ecd94aac4
|
[
"LicenseRef-scancode-secret-labs-2011",
"MIT",
"BSD-2-Clause"
] |
permissive
|
ehpale/elaphe
|
424abd206ce2af95d6e7de49758ca96cd6f797c8
|
0a0c51ee8627cccc57d557330ba6c2f2c5266960
|
refs/heads/master
| 2022-03-06T19:43:08.986519
| 2022-02-25T18:53:46
| 2022-02-25T18:53:46
| 59,607,180
| 11
| 5
|
NOASSERTION
| 2022-02-25T19:00:33
| 2016-05-24T20:40:54
|
PostScript
|
UTF-8
|
Python
| false
| false
| 812
|
py
|
symbology = 'maxicode'
cases = [
('001.png', 'This is MaxiCode'),
('002.png', 'This is Maxi^067ode', dict(parse=True)),
('003.png', ('152382802^029840^029001^0291Z00004951^029UPSN^02906X610'
'^029159^0291234567^0291/1^029^029Y^029634 ALPHA DR^029P'
'ITTSBURGH^029PA^029^004'), dict(mode=2, parse=True)),
('004.png', ('ABC123^029840^029001^0291Z00004951^029UPSN^02906X610^029'
'159^0291234567^0291/1^029^029Y^029634 ALPHA DR^029PITTSB'
'URGH^029PA^029^004'), dict(mode=3, parse=True)),
('005.png', ('[)>^03001^02996152382802^029840^029001^0291Z00004951^029'
'UPSN^02906X610^029159^0291234567^0291/1^029^029Y^029634 '
'ALPHA DR^029PITTSBURGH^029PA^029^004'), dict(mode=2, parse=True)),
]
|
[
"whosaysni@gmail.com"
] |
whosaysni@gmail.com
|
f29fc6830528398b792fd60578b01a78f12aa4e7
|
41ede4fd3bfba1bff0166bca7aee80dcf21434c6
|
/ayhanyalcinsoy/Desktop/lxde/base/libfm/actions.py
|
ad79cdbb6f0b2d887aa5244a18b52080cbb19379
|
[] |
no_license
|
pisilinux/playground
|
a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c
|
e4e12fff8a847ba210befc8db7e2af8556c3adf7
|
refs/heads/master
| 2022-08-12T23:03:27.609506
| 2022-08-11T18:28:19
| 2022-08-11T18:28:19
| 8,429,459
| 16
| 22
| null | 2022-08-11T18:28:20
| 2013-02-26T09:37:11
|
Python
|
UTF-8
|
Python
| false
| false
| 811
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "libfm-%s" % (get.srcVERSION())
def setup():
autotools.configure("--disable-static \
--sysconfdir=/etc \
--enable-debug \
--enable-udisks \
--enable-demo")
pisitools.dosed("libtool", " -shared ", " -Wl,-O1,--as-needed -shared ")
def build():
autotools.make()
def install():
pisitools.dosed("data/libfm.conf", "xarchiver", "file-roller")
autotools.install()
pisitools.dodoc("AUTHORS", "COPYING", "TODO")
|
[
"ayhanyalcinsoy@gmail.com"
] |
ayhanyalcinsoy@gmail.com
|
555920b473ecc5e50b86552eb52b4dc9e1a29a9c
|
522303c2fc1840bd3288b1be2ed1787b77ceff7d
|
/279.py
|
b09688cc59bec97c14657835b17bc17c9e976e62
|
[] |
no_license
|
RickyLiTHU/codePractice
|
0b0fc66fc32a651c5288645c98d0a58acdd6f7a1
|
74988e6d02968acb5fe8da811df6c1e706f2b125
|
refs/heads/master
| 2020-03-23T04:36:42.256827
| 2018-08-30T08:15:39
| 2018-08-30T08:15:39
| 141,093,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
class Solution(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
edges = []
for i in range(1, int(math.ceil(math.sqrt(n)))+1):
edges.append(i*i)
depth = 1
nodes = set([n])
while nodes:
nextLevel = set()
for node in nodes:
for e in edges:
if node - e == 0:
return depth
elif node - e > 0:
nextLevel.add(node-e)
else:
break
depth += 1
nodes = nextLevel
|
[
"noreply@github.com"
] |
RickyLiTHU.noreply@github.com
|
9275404a0fb19e0fa17944bb3c32530ebb0cca93
|
5fdbd06b033464fdd5bc5be7a181422a92e5fc3c
|
/RandomForestWithGPs/GPPython/gp.py
|
3d3681d4d33dcd90192f3a3b1c7e12bd86a8cf17
|
[] |
no_license
|
themasterlink/RandomForestWithGPs
|
02ab4b4473caef734c7234348163b973c03f73df
|
fcbd294b381ecba570ad34aca9eda1e70bf4e95e
|
refs/heads/master
| 2021-01-17T12:49:05.304383
| 2017-09-11T14:40:17
| 2017-09-11T14:40:17
| 59,106,215
| 2
| 2
| null | 2017-06-12T14:07:05
| 2016-05-18T10:34:39
|
C++
|
UTF-8
|
Python
| false
| false
| 6,865
|
py
|
#!/Users/Max/anaconda/bin/python
import numpy as np
import json
from pprint import pprint
import math
import scipy
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib.cm as cm
with open("init.json") as data_file:
data = json.load(data_file)
class GaussianProccess:
def __init__(self, fileName):
lines = open(data["Training"]["path"], "r").read().split("\n")
self.data = []
self.lSquared = float(data["GP"]["l"]) * float(data["GP"]["l"])
self.sigmaNSquared = float(data["GP"]["sigmaN"]) * float(data["GP"]["sigmaN"])
self.labels = []
for line in lines:
if len(line) > 3:
ele = line.split(",")
point = np.array([float(ele[0]), float(ele[1])])
self.data.append(point)
self.labels.append(-1 if int(ele[2]) == 0 else 1)
self.labels = np.asarray(self.labels)
self.dataPoints = len(self.data)
self.K = np.empty([self.dataPoints, self.dataPoints], dtype=float)
for i in range(0, self.dataPoints):
self.K[i][i] = self.sigmaNSquared
for j in range(i + 1, self.dataPoints):
temp = self.kernelOf(self.data[i], self.data[j])
self.K[i][j] = temp
self.K[j][i] = temp
def updatePis(self):
for i in range(0, self.dataPoints):
self.pis[i] = 1.0 / (1.0 + math.exp(-self.labels[i] * self.f[i]))
self.dPis[i] = self.t[i] - self.pis[i]
self.ddPis[i] = -(-self.pis[i] * (1 - self.pis[i])) # - to get minus dd Pi
self.sqrtDDPis[i] = math.sqrt(self.ddPis[i])
def trainF(self):
self.f = np.zeros(self.dataPoints)
self.pis = np.empty(self.dataPoints)
self.dPis = np.empty(self.dataPoints)
self.ddPis = np.empty(self.dataPoints)
self.sqrtDDPis = np.empty(self.dataPoints)
self.t = (self.labels + np.ones(self.dataPoints)) * 0.5
converge = False
eye = np.eye(self.dataPoints)
lastObject = 1e100;
while(not converge):
self.updatePis()
self.W = np.diag(self.ddPis)
self.WSqrt = np.diag(self.sqrtDDPis)
C = eye + np.dot(np.dot(self.WSqrt, self.K), self.WSqrt)
print("K:\n"+str(self.K))
print("inner:\n"+str(C))
self.L = scipy.linalg.cho_factor(C, lower = True)
self.U = scipy.linalg.cho_factor(C, lower = False)
b = np.dot(self.W, self.f) + self.dPis;
nenner = scipy.linalg.cho_solve(self.L, (np.dot(self.WSqrt,np.dot(self.K,b))))
self.a = b - np.dot(self.WSqrt, scipy.linalg.cho_solve(self.U, nenner))
self.f = np.dot(self.K, self.a)
prob = 1.0 / (1.0 + math.exp(-np.dot(self.labels,self.f)))
objective = -0.5 * np.dot(self.f, self.a) + math.log(max(min(prob,1-1e-7),1e-7));
print(objective)
if math.fabs(objective / lastObject - 1.0) < 1e-5:
converge = True
lastObject = objective
print("Trained")
return
def train(self):
converge = False
while(not converge):
trainF()
logZ = -0.5 * np.dot(self.a, self.f) + (-math.log(1 + math.exp(-np.dot(self.labels, self.f)))) + math.log(L.diagonal().sum())
R = np.dot(self.WSqrt, scipy.linalg.cho_solve(self.U, scipy.linalg.cho_solve(self.L, self.WSqrt)))
C = scipy.linalg.cho_solve(self.L, np.dot(self.WSqrt, self.K))
dddPis = np.empty(self.dataPoints)
for i in range(0, self.dataPoints):
ddPis = -self.ddPis[i];
dddPis[i] = - ddPis * (1-self.pis[i]) - self.pis[i] * (1 - ddPis)
s2 = -0.5 * (self.K.diagonal() - np.dot(C.T,C).diagonal).diagonal() * dddPis
#for i in range(0,3):
#C =
self.W = np.diag(self.ddPis)
self.WSqrt = np.diag(self.sqrtDDPis)
C = eye + np.dot(np.dot(self.WSqrt, self.K), self.WSqrt)
self.L = scipy.linalg.cho_factor(C, lower = True)
self.U = scipy.linalg.cho_factor(C, lower = False)
b = np.dot(self.W, self.f) + self.dPis;
nenner = scipy.linalg.cho_solve(self.L, (np.dot(self.WSqrt,np.dot(self.K,b))))
a = b - np.dot(self.WSqrt, scipy.linalg.cho_solve(self.U, nenner))
self.f = np.dot(self.K, a)
prob = 1.0 / (1.0 + math.exp(-np.dot(self.labels,self.f)))
objective = -0.5 * np.dot(self.f, a) + math.log(prob if prob > 1e-7 and prob < 1 - 1e-7 else 1e-7 if prob <= 1e-7 else 1 - 1e-7);
print(objective)
#if math.fabs(objective / lastObject - 1.0) < 1e-5:
converge = True
lastObject = objective
print("Trained")
return
def predict(self, newPoint):
kXStar = np.empty(self.dataPoints)
for i in range(0, self.dataPoints):
kXStar[i] = self.kernelOf(newPoint, self.data[i])
fStar = np.dot(kXStar, self.dPis)
v = scipy.linalg.cho_solve(self.L, np.dot(self.WSqrt,kXStar))
vFStar = math.fabs(self.sigmaNSquared + 1 - np.dot(v,v))
start = fStar - vFStar * 1.5
end = fStar + vFStar * 1.5
stepSize = (end - start) / float(data["GP"]["samplingAmount"])
prob = 0.0
for p in np.arange(start,end,stepSize):
gaussRand = np.random.normal(fStar, vFStar)
height = 1.0 / (1.0 + math.exp(p)) * gaussRand
prob += height * stepSize;
return max(min(prob,1), 0)
def plot(self):
plt.figure(0)
min = np.min(self.data)
max = np.max(self.data)
min -= (max-min) * 0.2
max += (max-min) * 0.2
stepSize = (max - min) / float(data["GP"]["plotRes"]);
listGrid = []
i = 0
for x in np.arange(min,max, stepSize):
print("Done: " + str(float(i) / float(data["GP"]["plotRes"]) * 100) + "%")
i += 1
newList = []
for y in np.arange(min,max, stepSize):
newPoint = [y,x]
prob = self.predict(newPoint)
newList.append(prob)
listGrid.append(newList)
plt.imshow(listGrid, extent=(max, min, min, max), interpolation='nearest', cmap=cm.rainbow)
plt.gca().invert_xaxis()
plt.gca().set_ylim([min, max])
plt.gca().set_xlim([min, max])
for i in range(0,self.dataPoints):
plt.plot(self.data[i][0],self.data[i][1], 'bo' if self.labels[i] == 1 else 'ro')
print("Finished plotting")
plt.show()
def kernelOf(self, x, y):
diff = x - y
return math.exp(- 0.5 / self.lSquared * diff.dot(diff));
gp = GaussianProccess(data["Training"]["path"])
gp.trainF()
gp.plot()
|
[
"themasterlink93@googlemail.com"
] |
themasterlink93@googlemail.com
|
8886c4eff59f8379795b40f8995408fb237f04c7
|
c6221e1163b7c1cdb0a1bc6e29da2dcbec04d1b8
|
/Core/game.py
|
4de0c1df275a0f209ceae7e11870ed60d7e2d01a
|
[] |
no_license
|
Dexton/Tesla-V.-Edison-Demo-Prototype
|
674e7620908b2920fde776444756823138580a32
|
7cebdbc24a6c78bdfc460c17a8d62596593cfe82
|
refs/heads/master
| 2021-01-24T03:38:25.794850
| 2011-10-09T00:37:09
| 2011-10-09T00:37:09
| 2,540,805
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,813
|
py
|
import pyglet
from game_batch import GameBatch
class GameStates:
MAIN_MENU = 0
PLAYING = 1
PAUSED = 2
GAME_OVER = 3
class GameWindow(pyglet.window.Window):
def __init__(self, *args, **kwargs):
""" Creates necesary items and displays the menu """
super(GameWindow, self).__init__(1024, 768, *args, **kwargs)
self.game_state = GameStates.MAIN_MENU
#self.main_menu_batch = MainMenu(self, self.width, self.height)
#self.pause_menu_batch = PauseMenu(self, self.width, self.height)
self.game_batch = GameBatch(self, self.width, self.height)
# this next line makes pyglet call self.update at 120Hz
# this has to be the last line in __init__
pyglet.clock.schedule_interval(self.update, 1/120.0)
def update(self, dt):
""" Update game information
dt: time delta, the change in time
"""
def on_key_press(self, symbol, modifiers):
""" Key Press Event Handler
symbol: the symbol(key) pressed
modifiers: the extra keys pressed (ex. Ctrl or Alt)
"""
if self.game_state == GameStates.MAIN_MENU:
self.main_menu_batch.on_key_press(symbol, modifiers)
if self.game_state == GameStates.PLAYING:
self.game_batch.on_key_press(symbol, modifiers)
if self.game_state == GameStates.PAUSED:
self.pause_menu_batch.on_key_press(symbol, modifiers)
def on_draw(self):
""" Draw Screen Event Handler """
self.clear()
if self.game_state == GameStates.MAIN_MENU:
self.main_menu_batch.draw()
if self.game_state == GameStates.PLAYING:
self.game_batch.draw()
if self.game_state == GameStates.PAUSED:
self.pause_menu_batch.draw()
|
[
"loktacar@gmail.com"
] |
loktacar@gmail.com
|
e8492bd500e419e50fa3815209d4889eb2e4e971
|
c761f3fbce728e61cbcf5179f1d3f27e1e5625cd
|
/register_key.py
|
1328baddc2fe4d7e5f91b2052b07daa49e53649f
|
[] |
no_license
|
philopon/usermon
|
16033d41436efe2cf4971bcd3b25f99cf82de318
|
7f97db09a65466e2133d4304f9fe5ba212299598
|
refs/heads/master
| 2021-01-18T16:51:56.457593
| 2017-04-21T13:06:12
| 2017-04-21T13:06:12
| 86,775,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
#!/usr/bin/env python3
def main():
import sys
import os
import pwd
import pamela
pw = pwd.getpwuid(os.getuid())
ssh_dir = os.path.join(pw.pw_dir, '.ssh')
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
os.makedirs(ssh_dir, mode=0o700, exist_ok=True)
with open(auth_keys, 'a') as f:
for key in sys.stdin:
print(key.strip(), file=f)
os.chmod(auth_keys, 0o600)
if __name__ == '__main__':
main()
|
[
"philopon.dependence@gmail.com"
] |
philopon.dependence@gmail.com
|
da850d8841ddddfdccfc6bde153467956b91789c
|
78e60a7d8a67ed76244004e8a3ed573fbf396e41
|
/samples/get_zip_codes.py
|
a89c105f5ec1a635d350ba870418f9f735a0bb60
|
[
"MIT"
] |
permissive
|
Crivez/apiclient-python
|
837a9f7cc0453ccd3121311adc7920b5fe6b3e33
|
860fc054f546152a101e29b1af388c381075ac47
|
refs/heads/master
| 2023-06-08T13:24:09.249704
| 2021-06-17T12:16:35
| 2021-06-17T12:16:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
# Search for zip codes in Germany.
COUNTRY_CODE = "DE"
COUNT = 1
try:
res = voxapi.get_zip_codes(COUNTRY_CODE,
count=COUNT)
print(res)
except VoximplantException as e:
print("Error: {}".format(e.message))
|
[
"andrey@voximplant.com"
] |
andrey@voximplant.com
|
ec76be96a998db58443e1d0b6cf215fe81c6c74e
|
386cff3bff62a6fb76ba22fd41e3c4f112bae6ba
|
/marathon/subscriber.py
|
c5605c7b54352bef497888ef5530763635bb8f99
|
[] |
no_license
|
davidbliu/scaffolding
|
7c960acdc39528be5d9bed5068809c2b5f02bbc4
|
ff921b669f171075c2f06d195f455fa521b25f50
|
refs/heads/master
| 2016-08-07T21:15:12.797544
| 2014-07-03T15:48:58
| 2014-07-03T15:48:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,792
|
py
|
#!/usr/bin/env python
import argparse
import atexit
import sys
import urlparse
from flask import Flask, request, jsonify
import marathon
from stores import InMemoryStore, SyslogUdpStore
app = Flask(__name__)
# re-initialize later
events = None
event_store = None
def on_exit(marathon_client, callback_url):
marathon_client.delete_event_subscription(callback_url)
@app.route('/events', methods=['POST'])
def event_receiver():
print 'hello'
# event = request.get_json()
# event_store.save(event)
# return ''
@app.route('/events', methods=['GET'])
def list_events():
print 'i have arrived here'
# return jsonify({'events': event_store.list()})
@app.route('/callback', methods=['GET', 'POST'])
def callback():
print 'callback'
try:
event = request.get_json()
print event
except:
print 'no event'
return jsonify(result={"status": 200})
@app.route('/marathon', methods=['GET'])
def marathon_register():
print 'marathon stuff happening here'
marathon_url = 'localhost:8080'
callback_url = 'localhost:5000/callback'
m = marathon.MarathonClient(marathon_url)
m.create_event_subscription(callback_url)
atexit.register(on_exit, m, callback_url)
return jsonify(result={"status": 200})
if __name__ == '__main__':
print 'cool stuff dude'
# parser = argparse.ArgumentParser(description='Marathon Logging Service')
# parser.add_argument('-m', '--marathon-url', required=True, help='Marathon server URL (http[s]://<host>:<port>[<path>])')
# parser.add_argument('-c', '--callback-url', required=True, help='callback URL for this service (http[s]://<host>:<port>[<path>]/events')
# parser.add_argument('-e', '--event-store', default='in-memory://localhost/', help='event store connection string (default: in-memory://localhost/)')
# parser.add_argument('-p', '--port', type=int, default=5000, help='Port to listen on (default: 5000)')
# parser.add_argument('-i', '--ip', default='0.0.0.0', help='IP to listen on (default: 0.0.0.0)')
# args = parser.parse_args()
# event_store_url = urlparse.urlparse(args.event_store)
# if event_store_url.scheme == 'in-memory':
# event_store = InMemoryStore(event_store_url)
# elif event_store_url.scheme == 'syslog':
# event_store = SyslogUdpStore(event_store_url)
# else:
# print 'Invalid event store type: "{scheme}" (from "{url}")'.format(scheme=event_store_url.scheme, url=args.event_store)
# sys.exit(1)
marathon_url = 'http://localhost:8080'
callback_url = 'http://localhost:5000/callback'
m = marathon.MarathonClient(marathon_url)
m.create_event_subscription(callback_url)
atexit.register(on_exit, m, callback_url)
app.run(port=5000, host='localhost')
|
[
"david.liu@autodesk.com"
] |
david.liu@autodesk.com
|
8d5168f30b7e5f51483fcba73a2d034e20b80ae8
|
fe822705c38caf70c8a72433291acb3a729a0539
|
/backend/delivery_app/services/logdata.py
|
2c1474d24434b68bc2bbd67c691a705c785fe72f
|
[] |
no_license
|
tanficial/delivery-food-fighter
|
138db44dbfee33d0f9f4ecd4ea832436910d8878
|
a73a4df208ef94537f6f4374b1a7aa476bc23d3c
|
refs/heads/main
| 2023-09-02T10:54:05.089348
| 2021-11-02T23:56:18
| 2021-11-02T23:56:18
| 421,084,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
from delivery_app.models.logdata import Logdata, db
def add_logdata(id):
"""
log 이벤트 생성 시 logdata DB에 저장
"""
try:
new_logdata = Logdata(post_id = id)
db.session.add(new_logdata)
db.session.commit()
return new_logdata
except Exception:
db.session.rollback()
raise
|
[
"tanficial9574@gmail.com"
] |
tanficial9574@gmail.com
|
a7571ea7181658d263514690d7191439a399b264
|
8c1b60dbbdbc84ae8cbd34f7679540036b04df84
|
/m5.py
|
97a59fdf765e7d91673b005f279dc849831f19c2
|
[] |
no_license
|
KatyaPinich/ECG_classification_project
|
fd654fceaf0df99338a5d083545f0898030be998
|
37c1a21b9fc425be0f86b81272fdecebe96ce327
|
refs/heads/master
| 2023-07-25T09:37:50.177846
| 2020-03-05T12:11:45
| 2020-03-05T12:11:45
| 242,001,325
| 0
| 0
| null | 2023-07-06T21:55:49
| 2020-02-20T22:13:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
import torch.nn as nn
class M5(nn.Module):
def __init__(self, num_classes):
super(M5, self).__init__()
self.conv_block1 = nn.Sequential(
nn.Conv1d(in_channels=1, out_channels=128, kernel_size=80, stride=4),
nn.BatchNorm1d(num_features=128),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4)
)
self.conv_block2 = nn.Sequential(
nn.Conv1d(in_channels=128, out_channels=128, kernel_size=3, stride=1),
nn.BatchNorm1d(num_features=128),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4)
)
self.conv_block3 = nn.Sequential(
nn.Conv1d(in_channels=128, out_channels=256, kernel_size=3, stride=1),
nn.BatchNorm1d(num_features=256),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4)
)
self.conv_block4 = nn.Sequential(
nn.Conv1d(in_channels=256, out_channels=512, kernel_size=3, stride=1),
nn.BatchNorm1d(num_features=512),
nn.ReLU(),
nn.MaxPool1d(kernel_size=4)
)
self.avg_pool = nn.AvgPool1d(8)
self.softmax_layer = nn.Linear(512, num_classes)
def forward(self, x):
x = self.conv_block1(x)
x = self.conv_block2(x)
x = self.conv_block3(x)
x = self.conv_block4(x)
# Global avg pooling
x = self.avg_pool(x) # [batch_size, 256, 1]
# Dense
x = x.view(x.size(0), -1) # [batch_size, 256*1=256]
x = self.softmax_layer(x) # [batch_size, 10]
return x
|
[
"katyapinich@gmail.com"
] |
katyapinich@gmail.com
|
5a82456358fe6bffb55775bcea1ef64c6a01c840
|
9f72ad0c091df885df5953286003d23f25216602
|
/Tarefa5/testClient2.py
|
c2396d0f28ac570b6764acb221eabd7f6bccef6c
|
[] |
no_license
|
SD-CC-UFG/felipe.gemmal.sd.ufg
|
0d96f50a34d5052df42454d8a6c7965fa8a2a035
|
472d38137c641570278cee2ae9f957fbdfc81188
|
refs/heads/master
| 2020-03-27T11:50:06.795260
| 2018-12-13T10:26:56
| 2018-12-13T10:26:56
| 146,510,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
#Cliente basico acesso indireto de Felipe Gemmal
# -*- coding: utf-8 -*-
import os
import socket, string
import sys
nameServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ip = 'localhost'
porta = 12388
print("Conectando")
nameServer.connect((ip,porta))
print("Enviando requisicao")
#tipo de servico requisitado, colocado aqui por conta do tempo de recv do dns
nameServer.send("getAddress")
print("Recebendo resposta")
resposta = str(nameServer.recv(1024).decode('utf-8'))
print(resposta)
nameServer.close()
|
[
"lipegemmal@hotmail.com"
] |
lipegemmal@hotmail.com
|
5a573494952b197ef81f13cde9b7c7b8ce088c5c
|
234d650ff5d906c2e3ce8da37c7b725c694791a0
|
/dxy/items.py
|
6b256d68ace5de8eb111d8b72fffeefaa5badeb1
|
[] |
no_license
|
IvanQin/dxy_spider
|
7846a0aeb96f8725e091be09db20c198c559b36c
|
ed1e73e09986f2397151d369a08586cc7e6574da
|
refs/heads/master
| 2021-01-25T09:21:02.082754
| 2017-06-09T04:15:43
| 2017-06-09T04:15:43
| 93,814,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class DxyItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
link = scrapy.Field()
page = scrapy.Field()
|
[
"yifan.qing@datarx.cn"
] |
yifan.qing@datarx.cn
|
3ae1533fbdd3e8eab796faa6ec41d76f5cbed112
|
39800224358654c8225aefa25a0daf26e489c33f
|
/reviews/reviews/urls.py
|
78ca205221d204726b1df8f89f1278efb47a3986
|
[] |
no_license
|
dprestsde/Review_system
|
59e5f73716a6ab02e7cecd140519b4505cf1c278
|
b636a568b51189c9f78f874461e6eaa323317868
|
refs/heads/master
| 2021-09-22T21:49:20.164221
| 2018-09-17T12:22:35
| 2018-09-17T12:22:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
"""reviews URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('guestbook.urls'))
]
|
[
"noreply@github.com"
] |
dprestsde.noreply@github.com
|
a44db705bdc58cdcecdcd4b8200bf85a3d08fc83
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/samples/cli/accelbyte_py_sdk_cli/group/_get_group_join_request_public_v2.py
|
32ba9735f4911a02f803f73dab69c4e7a260ec52
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Group Service (2.18.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.group import (
get_group_join_request_public_v2 as get_group_join_request_public_v2_internal,
)
from accelbyte_py_sdk.api.group.models import ModelsGetMemberRequestsListResponseV1
from accelbyte_py_sdk.api.group.models import ResponseErrorResponse
@click.command()
@click.argument("group_id", type=str)
@click.option("--limit", "limit", type=int)
@click.option("--offset", "offset", type=int)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def get_group_join_request_public_v2(
group_id: str,
limit: Optional[int] = None,
offset: Optional[int] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(get_group_join_request_public_v2_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
result, error = get_group_join_request_public_v2_internal(
group_id=group_id,
limit=limit,
offset=offset,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"getGroupJoinRequestPublicV2 failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
get_group_join_request_public_v2.operation_id = "getGroupJoinRequestPublicV2"
get_group_join_request_public_v2.is_deprecated = False
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
c2d9305312002748edb2d0e5470f541784c71352
|
3fc00c49c6b5a5d3edb4f5a97a86ecc8f59a3035
|
/shared_models/test/test_api.py
|
ae9465bb6b3b41416d097c202b1034470650a378
|
[] |
no_license
|
yc-hu/dm_apps
|
9e640ef08da8ecefcd7008ee2d4f8f268ec9062e
|
483f855b19876fd60c0017a270df74e076aa0d8b
|
refs/heads/master
| 2023-04-07T13:13:55.999058
| 2021-04-12T10:19:21
| 2021-04-12T10:19:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,264
|
py
|
from django.test import tag
from django.urls import reverse
from rest_framework import status
from shared_models.test import SharedModelsFactoryFloor as FactoryFloor
from shared_models.test.common_tests import CommonTest
class TestUserAPIListView(CommonTest):
def setUp(self):
super().setUp()
self.user = self.get_and_login_user()
self.test_url = reverse("user-list", args=None)
@tag("api", 'user')
def test_url(self):
self.assert_correct_url("user-list", test_url_args=None, expected_url_path=f"/api/shared/users/")
@tag("api", 'user')
def test_get(self):
# PERMISSIONS
# authenticated users
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# unauthenticated users
self.client.logout()
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# TODO: build up this test!
# # RESPONSE DATA
# valid_user = None
# self.get_and_login_user(user=None)
# response = self.client.get(self.test_url)
# self.assertEqual(len(response.data), 1)
# self.assertEqual(response.data[0]["id"], self.instance.id)
# # or, for lists with pagination...
# self.assertEqual(len(data["results"]), 1)
# self.assertEqual(data["results"][0]["id"], self.instance.id)
#
# # check query params
# object = FactoryFloor.UserFactory()
# data = self.client.get(self.test_url+f"?={object.id}").data
# keys.extend([
# "",
# ])
# self.assert_dict_has_keys(data, keys)
@tag("api", 'user')
def test_unallowed_methods_only(self):
restricted_statuses = [status.HTTP_405_METHOD_NOT_ALLOWED, status.HTTP_403_FORBIDDEN]
self.assertIn(self.client.put(self.test_url, data=None).status_code, restricted_statuses)
self.assertIn(self.client.delete(self.test_url, data=None).status_code, restricted_statuses)
self.assertIn(self.client.post(self.test_url, data=None).status_code, restricted_statuses)
self.assertIn(self.client.patch(self.test_url, data=None).status_code, restricted_statuses)
|
[
"davjfish@gmail.com"
] |
davjfish@gmail.com
|
32965056a1b7a8f68e29a888ddf16692219f8202
|
6f2675eee55b7ebc5adf9c2176ced8cb59fc64d4
|
/dataInterKingdee/interDebug.py
|
f5873ce9a0c97db0f8dd05bed388d20b019fdced
|
[] |
no_license
|
wildmanwang/proDataInter
|
8c2b65fa96ad45b21165d997b1769a28e12fc42a
|
f5a1f1fb195c66bf586bd999465c7e3b16453369
|
refs/heads/master
| 2023-06-07T11:57:16.763251
| 2023-06-03T08:54:56
| 2023-06-03T08:54:56
| 157,559,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
# -*- coding:utf-8 -*-
"""
"""
__author__ = "Cliff.wang"
import os
from interConfig import Settings
#from interProcess import InterProcess
from interControl import InterControl
if __name__ == "__main__":
try:
path = os.path.abspath(os.path.dirname(__file__))
sett = Settings(path, "config")
inter = InterControl(sett)
inter.interInit()
if 1 == 2:
# 传输基础资料、业务数据
inter.interBusiData()
elif 1 == 2:
# 获取部门ID和用户ID
pass
except Exception as e:
print(str(e))
|
[
"cliff.w@qq.com"
] |
cliff.w@qq.com
|
270875ed2be025781a975375972379cf8f211f80
|
dfad28a2e1a0199c0117e551fd1e31804804d5b9
|
/app/auth/views.py
|
d2df7a97666207276aa6648ef9f85af4a25d98bc
|
[
"MIT"
] |
permissive
|
wilbrone/Pitches
|
c33d60b142b43de9ccf60a86cf59acbc262c6711
|
b20d234fd930a6551f26d9cf863c6d1631b62bc2
|
refs/heads/master
| 2022-12-09T08:02:08.631177
| 2019-11-25T23:47:13
| 2019-11-25T23:47:13
| 223,405,696
| 0
| 0
|
MIT
| 2022-12-08T06:55:48
| 2019-11-22T13:09:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
from flask import render_template,redirect,url_for, flash,request
from flask_login import login_user,logout_user,login_required
from . import auth
from ..models import User
from .forms import LoginForm,RegistrationForm
from .. import db
from ..email import mail_message
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "One Minute Perfect Pitch login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,full_name= form.full_name.data,password = form.password.data)
# saving the data
db.session.add(user)
db.session.commit()
mail_message("Welcome to One Minute Perfect Pitch","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',registration_form = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
|
[
"wilbroneokoth@gmail.com"
] |
wilbroneokoth@gmail.com
|
a7dcd151d0dd3ea4bc81bb4a0fca9c6818c60ec5
|
f03a0d77c4f5524e8958263962ddb04a120ed6d6
|
/Lab8/wordladder5.py
|
1a1c82c4ebcd985f50f3bbdd129ff28bd4f5c4bc
|
[] |
no_license
|
b3rton/OpenSourceBlog
|
0a54566a6d542a41e2e8018287faef705a66fc35
|
4185c7b46629ac054903229d9a5a027110d5d662
|
refs/heads/master
| 2021-05-30T10:09:43.183994
| 2015-11-13T20:00:52
| 2015-11-13T20:00:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,978
|
py
|
"""
Words/Ladder Graph
------------------
Generate an undirected graph over the 5757 5-letter words in the
datafile words_dat.txt.gz. Two words are connected by an edge
if they differ in one letter, resulting in 14,135 edges. This example
is described in Section 1.1 in Knuth's book [1]_,[2]_.
References
----------
.. [1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
.. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Brendt Wohlberg',
'hughdbrown@yahoo.com'])
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
#-------------------------------------------------------------------
# The Words/Ladder graph of Section 1.1
#-------------------------------------------------------------------
def generate_graph(words):
from string import ascii_lowercase as lowercase
G = nx.Graph(name="words")
lookup = dict((c,lowercase.index(c)) for c in lowercase)
def edit_distance_one(word):
for i in range(len(word)):
left, c, right = word[0:i], word[i], word[i+1:]
j = lookup[c] # lowercase.index(c)
for cc in lowercase[j+1:]:
yield left + cc + right
candgen = ((word, cand) for word in sorted(words)
for cand in edit_distance_one(word) if cand in words)
G.add_nodes_from(words)
for word, cand in candgen:
G.add_edge(word, cand)
return G
def words_graph():
"""Return the words example graph from the Stanford GraphBase"""
import gzip
fh=gzip.open('words_dat.txt.gz','r') #5 words
#fh=gzip.open('words4_dat.txt.gz','r') #4 words
words=set()
for line in fh.readlines():
line = line.decode()
if line.startswith('*'):
continue
w=str(line[0:5])
#w=str(line[0:4])
words.add(w)
return generate_graph(words)
if __name__ == '__main__':
from networkx import *
G=words_graph()
print("Loaded words_dat.txt containing 5757 five-letter English words.")
print("Two words are connected if they differ in one letter.")
print("Graph has %d nodes with %d edges"
%(number_of_nodes(G),number_of_edges(G)))
print("%d connected components" % number_connected_components(G))
fiveWordsT = [('chaos','order'),('nodes','graph'),('moron','smart'),('pound','marks')]
fourWordsT = [('cold','warm'),('love','hate')]
test = fiveWordsT
for (source,target) in test:
print("Shortest path between %s and %s is"%(source,target))
try:
sp=shortest_path(G, source, target)
for n in sp:
print(n)
except nx.NetworkXNoPath:
print("None")
|
[
"nathan.spero.berton@gmail.com"
] |
nathan.spero.berton@gmail.com
|
62ec86a4fa3abd1261e1c0a8452250ff222b6759
|
dedbf1f67bc741203f685745ecfde3d00f3f3d87
|
/src/simpleseq/encodings.py
|
b5629cc5604c31b327f2eb6875bc5d37b3b73f34
|
[] |
no_license
|
ambrosejcarr/simpleseq
|
1bee31b806dc19b7801ed52d73c47a5482db7d96
|
a9760db8470ccd578e6b82837bed12187389dbb8
|
refs/heads/master
| 2016-08-12T05:58:12.885835
| 2016-02-17T16:33:14
| 2016-02-17T16:33:14
| 50,446,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,228
|
py
|
class DNA3Bit:
"""
Compact encoding scheme for sequence data.
"""
_str2bindict = {65: 0b100, 67: 0b110, 71: 0b101, 84: 0b011, 78: 0b111,
97: 0b100, 99: 0b110, 103: 0b101, 116: 0b011, 110: 0b111}
_bin2strdict = {0b100: b'A', 0b110: b'C', 0b101: b'G', 0b011: b'T', 0b111: b'N'}
bin_nums = [0b100, 0b110, 0b101, 0b011]
@classmethod
def encode(cls, s: bytes) -> int:
"""Convert string nucleotide sequence into binary, note: string is reversed so
that the first nucleotide is in the LSB position"""
res = 0
for c in s:
res <<= 3
res += cls._str2bindict[c]
return res
@classmethod
def decode(cls, i: int) -> bytes:
"""Convert binary nucleotide sequence into string"""
if i < 0:
message = 'i must be an unsigned (positive) integer, not {0!s}'.format(i)
raise ValueError(message)
r = b''
while i > 0:
r = cls._bin2strdict[i & 0b111] + r
i >>= 3
return r
@staticmethod
def gc_content(i: int) -> float:
"""calculate percentage of i that is G or C"""
gc = 0
length = 0
while i > 0:
length += 1
masked = i & 111
if masked == 0b100 or masked == 0b100:
gc += 1
i >>= 3
return gc / length
@staticmethod
def seq_len(i: int) -> int:
"""Return the length of a sequence based on its binary representation"""
l = 0
while i > 0:
l += 1
i >>= 3
return l
@staticmethod
def contains(s: int, char: int) -> bool:
"""
return true if the char (bin representation) is contained in seq (binary
representation)
"""
while s > 0:
if char == (s & 0b111):
return True
s >>= 3
return False
@staticmethod
def bitlength(i: int) -> int:
"""return the bitlength of the sequence"""
bitlen = i.bit_length()
# correct for leading T-nucleotide (011) whose leading 0 gets trimmed
if bitlen % 3:
bitlen += 1
return bitlen
|
[
"mail@ambrosejcarr.com"
] |
mail@ambrosejcarr.com
|
4b5730763abcb86812d2a804110e3fc6c15f7c6c
|
27de78beab7b46b08be620e06f8805d14de155d1
|
/Q3_BP.py
|
4cc4d42282a78fcef65f4b5fefb25989a5c01e7a
|
[] |
no_license
|
kzil88/Quant
|
c3f517cf507cbb97774738c152087a660dc59e31
|
711800349a065bd9534f323337147b494c91c156
|
refs/heads/master
| 2021-04-15T14:41:17.189825
| 2018-12-04T08:40:42
| 2018-12-04T08:40:42
| 126,697,511
| 4
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
import DC
from keras.layers import Flatten
import numpy as np
import keras
import datetime
import pymysql
if __name__ == '__main__':
time_temp = datetime.datetime.now() - datetime.timedelta(days=90)
date_seq_start = time_temp.strftime('%Y-%m-%d')
end_dt = (datetime.datetime.now() -datetime.timedelta(days=1)) .strftime('%Y-%m-%d')
# 建立数据库连接,回测时间序列
dc = DC.data_collect2('000725',date_seq_start,end_dt)
score_list = []
resu_list = []
train = dc.data_train
target = dc.data_target
model = Sequential()
model.add(Dense(64, activation='linear', input_dim=14))
model.add(Dropout(0.5))
model.add(Dense(64, activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='relu'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='logcosh', optimizer=sgd, metrics=['accuracy'])
for i in range(5):
model.fit(train, target, epochs=2000)
score = model.evaluate(train, target, batch_size=128)
print('SCORE:' + str(score[0]))
test_case = np.array([dc.test_case])
ans2 = model.predict(test_case)
resu_list.append(ans2[0][0])
score_list.append(score)
print('RESU '+str(i+1)+' : '+str(ans2[0][0]))
dc.refreshDATA(ans2[0][0])
train = dc.data_train
target = dc.data_target
print(score_list)
print(resu_list)
print(date_seq_start)
print(end_dt)
|
[
"noreply@github.com"
] |
kzil88.noreply@github.com
|
a0cb0e3618382fd03b6ca832ea20a7034a40057c
|
cc7dcbc2d2b85c4769ab4bfb5f92bbe6f158b1bc
|
/Competitions/Comp4/start_sound.py
|
87e6432903e6abe11567cff4177b2484faedd6c1
|
[] |
no_license
|
MandyMeindersma/Robotics
|
c091e5b248bb067db4631e2de481d18417996933
|
f58916bb293d68c176847363a25eb7270a304965
|
refs/heads/master
| 2023-01-08T07:34:51.223767
| 2023-01-01T05:40:12
| 2023-01-01T05:40:12
| 118,049,272
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
#!/usr/bin/env python
from sound_play.libsoundplay import SoundClient
# from sound_play.msg import SoundRequest
import rospy
import time
rospy.init_node('sound')
soundthing = SoundClient()
time.sleep(1)
# soundthing.play(SoundRequest.NEEDS_UNPLUGGING)
# soundthing.voiceSound("Testing the new A P I")
soundthing.playWave("/home/mandy/winter18/Robotics/Competitions/Comp4/meow.ogg")
print("meow sound started")
time.sleep(3)
soundthing.playWave("/home/mandy/winter18/Robotics/Competitions/Comp4/moo.ogg")
print("woof sound started")
|
[
"meinders@ualberta.ca"
] |
meinders@ualberta.ca
|
8a35692c001a9c87e06840d701a8da708dedcbb2
|
8186a0b52da5692178c72e865ab05a08d133a412
|
/MachineLearning.py
|
29e2c182c8e664d6888629192f033295d5bcbf63
|
[] |
no_license
|
DanWertheimer/COS802
|
c4e7d8d3a06f04efef998daaa0a57bdbc6232ed0
|
656e3ca62e44f8fda1967af0ba4b5e38120f2e8b
|
refs/heads/master
| 2021-07-21T15:38:31.895643
| 2017-10-30T08:14:31
| 2017-10-30T08:14:31
| 108,819,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,813
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 29 17:23:39 2017
@author: danwertheimer
"""
1209/10000
import pandas as pd
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn import svm
from sklearn.svm import LinearSVC
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import Normalizer
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.pipeline import Pipeline
Data = pd.read_csv("CleanData2.csv",index_col = 0)
Fields =['Insured_First_Name','Insured_Last_Name','Client_ID','Other_Party_Name',\
'Other_Party_Last_Name','Fraudulent_Claim_Reason',\
'Policy_Holder_Street',\
'Policy_Holder_State',\
'Policy_Holder_City',\
'Policy_Holder_Area',\
'Policy_Holder_Postal_Code',\
'Loss_Street',\
'Loss_State',\
'Loss_City',\
'Loss_Area',\
'Loss_Postal_Code']
Data = Data.drop(Fields,axis = 1)
ScaledVariables = ['Amount_Paid','Sum_Insured','Total_Policies_Revenue']
mms = preprocessing.MinMaxScaler()
Normalize = preprocessing.Normalizer()
Data[ScaledVariables] = Normalize.fit_transform(Data[ScaledVariables])
Test1 = Data[Data['Fraudulent_Claim_Indicator'] == 0].sample(n = 10000 )
Test2 = Data[Data['Fraudulent_Claim_Indicator'] == 1]
New = pd.concat([Test1,Test2], axis = 0)
DataX = New[New.columns.difference(['Fraudulent_Claim_Indicator','Date_Of_Birth',\
'Date_Of_Loss','Policy_Start_Date',\
'Policy_End_Date'])]
DataY = New['Fraudulent_Claim_Indicator']
X_train, X_test, y_train, y_test = train_test_split(\
DataX, DataY, test_size=0.8, random_state=48)
glm = LogisticRegression()
glm.fit(X_train,y_train)
glm.score(X_test,y_test)
glmcv = cross_val_score(glm, DataX, DataY, cv=10,scoring = 'roc_auc')
clf = svm.SVC(kernel='linear', C=2).fit(X_train, y_train)
clf.score(X_test, y_test)
clfcv = cross_val_score(clf, DataX, DataY, cv=10,scoring = 'roc_auc')
NNet = MLPClassifier(solver='lbfgs', alpha=1e-5,\
hidden_layer_sizes=(3, 2), random_state=47)
NNet.fit(X_train,y_train);
NNet.score(X_test, y_test)
NNetcv = cross_val_score(NNet, DataX, DataY, cv=10,scoring = 'roc_auc')
###############################################################################
FeatureData = Data
DateFeatures = ['Date_Of_Birth','Date_Of_Loss','Policy_Start_Date',\
'Policy_End_Date']
FeatureData[DateFeatures] = FeatureData[DateFeatures].astype(str)
for i in DateFeatures:
FeatureData[i] = pd.to_datetime(FeatureData[i])
# Creating feature for days between policy start and loss
FeatureData['Days_Between_Policy_Loss'] = FeatureData['Date_Of_Loss'] - FeatureData['Policy_Start_Date']
FeatureData['Days_Between_Policy_Loss'] = FeatureData['Days_Between_Policy_Loss'].apply(lambda x:x.days)
# Creating feature for days between policy loss and policy end
FeatureData['Days_Before_Policy_End_Loss'] = FeatureData['Policy_End_Date'] - FeatureData['Date_Of_Loss']
FeatureData['Days_Before_Policy_End_Loss'] = FeatureData['Days_Before_Policy_End_Loss'].apply(lambda x:x.days)
FeatureData['Number_Of_Claims'] = FeatureData.groupby(['Date_Of_Birth','Policy_Start_Date',\
'Policy_End_Date']).cumcount()+1
# Rescaling New Features
NewFeatures = ['Days_Between_Policy_Loss','Days_Before_Policy_End_Loss','Number_Of_Claims']
FeatureData[NewFeatures] = Normalize.fit_transform(FeatureData[NewFeatures])
###############################################################################
# Retraining Models
Test1 = FeatureData[FeatureData['Fraudulent_Claim_Indicator'] == 0].sample(n = 10000 )
Test2 = FeatureData[FeatureData['Fraudulent_Claim_Indicator'] == 1]
NewFeatureData = pd.concat([Test1,Test2], axis = 0)
DataX = NewFeatureData[NewFeatureData.columns.difference(['Fraudulent_Claim_Indicator','Date_Of_Birth',\
'Date_Of_Loss','Policy_Start_Date',\
'Policy_End_Date'])]
DataY = NewFeatureData['Fraudulent_Claim_Indicator']
# Checking Variable Importance
Tree = ExtraTreesClassifier()
TreeC = Tree.fit(DataX,DataY)
TreeC.feature_importances_
model = SelectFromModel(TreeC, prefit=True)
X_new = model.transform(DataX)
X_train_newfeature, X_test_newfeature, y_train_newfeature, y_test_newfeature = train_test_split(\
X_new, DataY, test_size=0.8, random_state=48)
glm_newfeature = LogisticRegression()
glm_newfeature.fit(X_train_newfeature,y_train_newfeature)
glm_newfeature.score(X_test_newfeature,y_test_newfeature)
glmcv2 = cross_val_score(glm_newfeature, X_new, DataY, cv=10, scoring = 'roc_auc')
clf_newfeature = svm.SVC(kernel='linear', C=1).fit(X_train_newfeature, y_train_newfeature)
clf_newfeature.score(X_test_newfeature, y_test_newfeature)
clfcv2 = cross_val_score(clf_newfeature, X_new, DataY, cv=10, scoring = 'roc_auc')
NNet_newfeature = MLPClassifier(solver='lbfgs', alpha=1e-5,\
hidden_layer_sizes=(3, 2), random_state=47)
NNet_newfeature.fit(X_train_newfeature,y_train_newfeature);
NNet_newfeature.score(X_test_newfeature, y_test_newfeature)
NNetcv2 = cross_val_score(NNet_newfeature, X_new, DataY, cv=10, scoring = 'roc_auc')
Q = Pipeline([
('feature_selection', SelectFromModel(LinearSVC())),
('classification', RandomForestClassifier())
])
Q.fit(X_train_newfeature,y_train_newfeature)
Q.score(X_test_newfeature, y_test_newfeature)
|
[
"noreply@github.com"
] |
DanWertheimer.noreply@github.com
|
23e10462cf68f0d4848893ca60ea2362f183a88f
|
16da6040330dd1e8f88478b31e958dba88d96cbf
|
/ddpg_agent.py
|
46bb0ccd5e835eb29f8695de83ac4e17026128ab
|
[] |
no_license
|
vgudapati/DRLND_Continuous_Control
|
0c5c5098a167b44f0f2a1f957ab3080e28e55265
|
e55f5df74d4489821b322754570a26e552a2da59
|
refs/heads/master
| 2020-04-16T01:52:14.500904
| 2019-01-12T01:54:21
| 2019-01-12T01:54:21
| 165,188,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,194
|
py
|
import numpy as np
import random
import copy
from collections import namedtuple, deque
from model import Actor, Critic
import torch
import torch.nn.functional as F
import torch.optim as optim
'''
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 512 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR_ACTOR = 1e-4 # learning rate of the actor
LR_CRITIC = 1e-3 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
'''
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class DDPGAgent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size = 4, random_seed = 0,
BUFFER_SIZE = int(1e5),
BATCH_SIZE = 128,
GAMMA = 0.99,
TAU = 1e-3,
LR_ACTOR = 1e-4,
LR_CRITIC = 1e-3,
WEIGHT_DECAY = 0):
"""
Initialize an Agent object.
Params
======
state_size (int) : dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
BUFFER_SIZE (int): replay buffer size
BATCH_SIZE (int): minibatch size
GAMMA (float): discount factor
TAU (float): for soft update of target parameters
LR_ACTOR (float): learning rate for critic
LR_CRITIC (float): learning rate for critic
WEIGHT_DECAY (float): L2 weight decay
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.batch_size = BATCH_SIZE
self.gamma = GAMMA
self.tau = TAU
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.noise = OUNoise(action_size, random_seed)
# Time step
self.timestep = 0
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
def step(self, state, action, reward, next_state, done):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
for s, a, r, ns, d in zip(state, action, reward, next_state, done):
self.memory.add(s, a, r, ns, d)
#self.memory.add(state, action, reward, next_state, done)
'''
self.timestep = (self.timestep + 1) % 2
# Learn every 2 time steps
if self.timestep == 0:
# if enough samples are available in memory
'''
if len(self.memory) > self.batch_size:
experiences = self.memory.sample()
self.learn(experiences, self.gamma)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return np.clip(action, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, gamma):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
#print(experiences)
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
actions_next = self.actor_local(next_states) ## --------------------------------------------------
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
#print(rewards.shape)
#print(Q_targets_next.shape)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
torch.nn.utils.clip_grad_norm_(self.actor_local.parameters(), 1)
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, self.tau)
self.soft_update(self.actor_local, self.actor_target, self.tau)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):
"""Initialize parameters and noise process."""
self.size = size
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.size)
self.state = x + dx
return self.state
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
|
[
"noreply@github.com"
] |
vgudapati.noreply@github.com
|
9f04557904bdeeb5a5b0b9e265605429682ff434
|
a867b1c9da10a93136550c767c45e0d8c98f5675
|
/G_11_RemoveKthNode.py
|
408aa2a8a0bdec884c65ff5c410cb79045ed72b6
|
[] |
no_license
|
Omkar02/FAANG
|
f747aacc938bf747129b8ff35b6648fb265d95b6
|
ee9b245aa83ea58aa67954ab96442561dbe68d06
|
refs/heads/master
| 2023-03-25T19:45:08.153403
| 2021-03-28T07:13:08
| 2021-03-28T07:13:08
| 280,783,785
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
import __main__ as main
from Helper.TimerLogger import CodeTimeLogging
fileName = main.__file__
fileName = fileName.split('\\')[-1]
CodeTimeLogging(Flag='F', filename=fileName, Tag='Linked-List', Difficult='Medium')
from Datastruct.masterLinkedList import l
arr = [1, 2, 3, 4, 5, 6]
# arr = [1, 2]
for i in arr:
l.insertStart(i)
# l.traverseList()
def removeKNodeFromEnd(head, k):
print(f'Removed {k} node: ',end = '')
first = head
second = head
count = 1
while count <= k and second is not None:
second = second.nextNode
count += 1
if second is None:
head.data = first.nextNode.data
head.nextNode = first.nextNode.nextNode
l.traverseList()
return
while second.nextNode is not None:
second = second.nextNode
first = first.nextNode
first.nextNode = first.nextNode.nextNode
l.traverseList()
removeKNodeFromEnd(l.getHead(), 3)
|
[
"omkarjoshi4031@live.com"
] |
omkarjoshi4031@live.com
|
5be0edf09990b940847ed51efb8d7cc5cde7d449
|
70ead0a39a0217c3c1bc6b48f902987c883c0868
|
/templatemail/backends/locmem.py
|
87fd0941385de853fc14caa37e8ac9140c79ae53
|
[
"MIT"
] |
permissive
|
timdownsisarealboy/django-template-mail
|
a5f369fff8f3d147f63196705490c1782a9b99bb
|
64ab909da41d1a90c14969687cfd97512eaedc60
|
refs/heads/master
| 2021-01-20T19:06:20.796790
| 2013-07-12T17:18:28
| 2013-07-12T17:18:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from django.core.mail.backends import locmem
from base import BaseEmailBackend
class EmailBackend(locmem.EmailBackend, BaseEmailBackend):
def send_messages(self, email_messages):
email_messages = self.render_messages(email_messages)
super(EmailBackend, self).send_messages(email_messages)
|
[
"bar.benoit@gmail.com"
] |
bar.benoit@gmail.com
|
8a55a174178d00541f365a08542d4d792b52fcc5
|
7f456f36ecb35b2f898f3257a45ec79cf248f4e0
|
/project/source/DQN_old.py
|
92afa0878256b031c322a5ea8741476c00ad77b8
|
[] |
no_license
|
Akihiro-Nishihara/ActionGameAI
|
0dcbd511bf54837dd145ae548452c2e7d1986ffe
|
d3c9e91cb84f1eb6125588338ea2a6e1567def3b
|
refs/heads/master
| 2022-12-08T15:53:13.160680
| 2020-09-16T17:23:12
| 2020-09-16T17:23:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,436
|
py
|
import os
import numpy as np
import datetime
import math
import sys
import shutil
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D
from keras.optimizers import Adam
from keras.utils import plot_model
from collections import deque
from keras import backend as K # Kerasは自身で行列計算とかしない,それをするためのやーつ
import tensorflow as tf
import pygame
from project.source import myenv, header as h
FUNC_REWARD = 1 # 強化学習における報酬の設定
LEARNING_RATE = 0.1 # Q-networkの学習係数
# LEARNING_RATE = 0.01 # Q-networkの学習係数
OBS_LEFT = 0
OBS_TOP = -1
OBS_RIGHT = 3
OBS_BOTTOM = 2
SIZE_STATE = (OBS_RIGHT - OBS_LEFT) * (OBS_BOTTOM - OBS_TOP) - 1 + 2 # 観測マス(キャラ位置除く)+マス内の座標
SIZE_ACTION = 8
SIZE_HIDDEN = 32
SEED = 1
NUM_EPISODES = 19 # 総試行回数
SIZE_LOOP = 1000
GAMMA = 0.99 # 割引係数
# memory_size = 10000 # バッファーメモリの大きさ
MEMORY_SIZE = 1000 # バッファーメモリの大きさ
BATCH_SIZE = 32 # Q-networkを更新するバッチの大記載
# MODE PARAMETER
OBSERVE_PLAYER = 'RIGHT'
DQN_MODE = 1 # 1がDQN、0がDDQNです
LENDER_MODE = 0 # 0は学習後も描画なし、1は学習終了後に描画する
# 損失関数の定義(huber関数)
def huberloss(_y_true, _y_pred):
EPSILON = 1.0
err = _y_true - _y_pred
condition = K.abs(err) < EPSILON
L2 = K.square(err) / 2
L1 = EPSILON * (K.abs(err) - EPSILON / 2)
loss = tf.where(condition, L2, L1)
return K.mean(loss)
# Q関数をDLのネットワーククラスとして定義
class QNetwork:
def __init__(self, _learning_rate=LEARNING_RATE, _state_size=SIZE_STATE, _action_size=SIZE_ACTION,
_hidden_size=SIZE_HIDDEN):
self.model = Sequential()
self.model.add(Dense(_hidden_size, activation='relu', input_dim=_state_size))
self.model.add(Dense(_hidden_size, activation='relu'))
self.model.add(Dense(_hidden_size, activation='relu'))
self.model.add(Dense(_action_size, activation='linear'))
self.optimizer = Adam(lr=_learning_rate)
self.model.compile(loss=huberloss, optimizer=self.optimizer)
# CNNの構造(未完成)
# self.model = Sequential()
# self.model.add(Conv2D(16, (3, 3), padding='same', input_shape=(5, 5), activation='relu'))
# self.model.add(MaxPool2D(2, 2))
# self.model.add(Flatten())
# self.model.add(Dense(SIZE_HIDDEN, activation='relu'))
# self.model.add(Dense(_action_size, activation='linear'))
# self.optimizer = Adam(lr=_learning_rate)
# self.model.compile(loss=huberloss, optimizer=self.optimizer)
# 重みの学習 _memoryには(state, action, reward, next_state)群が格納
def replay(self, _memory, _batch_size, _gamma, _targetQN):
inputs = np.zeros((_batch_size, SIZE_STATE))
targets = np.zeros((_batch_size, SIZE_ACTION))
mini_batch = _memory.sample(_batch_size)
# 学習用の入力および出力を獲得
for i, (state_b, action_b, reward_b, next_state_b) in enumerate(mini_batch):
inputs[i:i + 1] = state_b
target = reward_b
if not (next_state_b == np.zeros(state_b.shape)).all(axis=1):
# 価値計算
retmainQs = self.model.predict(next_state_b)[0]
next_action = np.argmax(retmainQs) # 配列内で最大要素のインデックスを返す
target = reward_b + _gamma * _targetQN.model.predict(next_state_b)[0][next_action]
targets[i] = self.model.predict(state_b) # Qネットワークの出力
int_action_b = 1 * action_b['right'] + 2 * action_b['left'] + 4 * action_b['space']
targets[i][int_action_b] = target # 教師信号
self.model.fit(inputs, targets, epochs=1, verbose=0)
def save_network(self, _path_dir, _name_network):
string_json_model = self.model.to_json()
fp_model = open(_path_dir + '/' + _name_network + '_model.json', 'w')
fp_model.write(string_json_model)
self.model.save_weights(_path_dir + '/' + _name_network + '_weights.hdf5')
# Experience replay と fixed target Q-networkを実現するためのメモリクラス
class Memory:
def __init__(self, _max_size=1000):
self.buffer = deque(maxlen=_max_size)
def add(self, _experience):
self.buffer.append(_experience)
def sample(self, _batch_size):
# buffer内のインデックスを復元抽出で取り出す
idx = np.random.choice(np.arange(len(self.buffer)), size=_batch_size, replace=False)
return [self.buffer[ii] for ii in idx]
def len(self):
return len(self.buffer)
# 状態に応じて行動を決定するクラス
class Actor:
# 確率epsilonに応じて報酬を最高にする行動を返す関数
def get_action(self, _state, _episode, _mainQN):
# 徐々に最適な行動をとるΕ-greedy法
# Eが徐々に小さくなることで,最適行動をとる確率が高まる.
# epsilon = 0.001 + 0.9 / (1.0 + _episode)
epsilon = 1.0 - (_episode / NUM_EPISODES)
if epsilon <= np.random.uniform(0, 1):
list_return_target_Qs = _mainQN.model.predict(_state)[0] # 各行動への報酬のリストを返す
action = np.argmax(list_return_target_Qs)
else:
action = np.random.choice(list(range(0, SIZE_ACTION)))
dict_action = get_dict_action(action)
return dict_action
def get_dict_action(_int_act):
if _int_act not in range(0, SIZE_ACTION):
print('Error: _int_act in get_list_bin_action is out of range', file=sys.stderr)
os.system('PAUSE')
exit(-1)
# actoin をバイナリの文字列で表現
str_bin_action = format(_int_act, 'b')
for i in range(int(math.log2(SIZE_ACTION)) - len(str_bin_action)):
str_bin_action = '0' + str_bin_action
list_str_bin_action = list(str_bin_action)
key_right = int(list_str_bin_action[2])
key_left = int(list_str_bin_action[1])
key_space = int(list_str_bin_action[0])
dict_pressed_key = {'right': key_right, 'left': key_left, 'space': key_space}
return dict_pressed_key
# メイン関数
def main():
# env = gym.make('CartPole-v0')
# env = wrappers.Monitor(env, './movie/cartpoleDDQN', video_callable=(lambda ep: ep % 100 == 0)) # 動画保存する場合
# original environment
os.environ['PYTHONHASHSEED'] = str(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED)
# rn.seed(SEED)
pygame.init()
pygame.display.set_caption("Action Game AI")
screen = pygame.display.set_mode((h.SCREEN_WIDTH, h.SCREEN_HEIGHT))
screen_sub1 = pygame.display.set_mode((h.SCREEN_WIDTH, h.SCREEN_HEIGHT))
screen_sub2 = pygame.display.set_mode((h.SCREEN_WIDTH, h.SCREEN_HEIGHT))
# env = myenv.MyEnv(_path_file_stage='./stage_sample.txt', _screen=screen)
env = myenv.MyEnv(_path_file_stage='./stage_sample.txt', _screen=screen)
env_sub1 = myenv.MyEnv(_path_file_stage='./stage_sub1.txt', _screen=screen_sub1)
env_sub2 = myenv.MyEnv(_path_file_stage='./stage_sub2.txt', _screen=screen_sub2)
islearned = 0 # 学習が終わったフラグ
isrender = 0 # 描画フラグ
# ---
# ネットワーク・メモリ・Actorの生成
mainQN = QNetwork(_hidden_size=SIZE_HIDDEN, _learning_rate=LEARNING_RATE)
targetQN = QNetwork(_hidden_size=SIZE_HIDDEN, _learning_rate=LEARNING_RATE)
memory = Memory(_max_size=MEMORY_SIZE)
actor = Actor()
# メインルーチン
for episode in range(NUM_EPISODES):
env.reset()
act_ini = env.action_space.sample()
action = {'right': act_ini[0], 'left': act_ini[1], 'space': act_ini[2]}
state, reward, is_done, _ = env.step(action) # 行動a_tの実行による行動後の観測データ・報酬・ゲーム終了フラグ・詳細情報
state = np.reshape(state, [1, SIZE_STATE])
env_sub1.reset()
state_sub1, reward_sub1, is_done_sub1, _ = env_sub1.step(action) # 行動a_tの実行による行動後の観測データ・報酬・ゲーム終了フラグ・詳細情報
state_sub1 = np.reshape(state_sub1, [1, SIZE_STATE])
env_sub2.reset()
state_sub2, reward_sub2, is_done_sub2, _ = env_sub2.step(action) # 行動a_tの実行による行動後の観測データ・報酬・ゲーム終了フラグ・詳細情報
state_sub2 = np.reshape(state_sub2, [1, SIZE_STATE])
targetQN.model.set_weights(mainQN.model.get_weights())
# 1試行のループ
list_reward = []
count_loop = 0
is_train_sub1 = False
is_train_sub2 = False
# for count_loop in range(SIZE_LOOP):
# print(str(count))
while not is_done:
count_loop += 1
# if (islearned == 1) and LENDER_MODE: # 学習終了時にcart-pole描画
# env.render()
# time.sleep(0.1)
# print(state[0, 0])
action = actor.get_action(state, episode, mainQN) # 時刻tでの行動を決定
if count_loop % 20 == 0:
print(action)
# (メインゲーム)行動a_tの実行による行動後の観測データ・報酬・ゲーム終了フラグ・詳細情報
next_state, reward, is_done, info = env.step(action)
next_state = np.reshape(next_state, [1, SIZE_STATE])
memory.add((state, action, reward, next_state)) # memory update
state = next_state # state update
list_reward.append(reward)
# 終了判定
if is_done:
if info['GAMEOVER']:
if info['TIME'] == 0:
print('MAIN {0}/{1}: TIME OVER'.format(episode + 1, NUM_EPISODES))
else:
print('MAIN {0}/{1}: FALL GROUND'.format(episode + 1, NUM_EPISODES))
elif info['CLEAR']:
print('MAIN {0}/{1}: CLEAR!'.format(episode + 1, NUM_EPISODES))
else:
print('Error: Wrong information of main stage', file=sys.stderr)
os.system('PAUSE')
exit(-1)
next_state = np.zeros(state.shape)
next_state_sub1 = np.zeros(state_sub1.shape)
next_state_sub2 = np.zeros(state_sub2.shape)
break
if is_train_sub1:
action_sub1 = actor.get_action(state_sub1, episode, mainQN) # 時刻tでの行動を決定
# (サブゲーム)行動a_tの実行による行動後の観測データ・報酬・ゲーム終了フラグ・詳細情報
next_state_sub1, reward_sub1, is_done_sub1, info_sub1 = env_sub1.step(action_sub1)
next_state_sub1 = np.reshape(next_state_sub1, [1, SIZE_STATE])
memory.add((state_sub1, action_sub1, reward_sub1, next_state_sub1)) # memory update
state_sub1 = next_state_sub1
# サブステージがゴールまで到着したら,メインの基礎学習を十分と判断し,このエピソード内では学習終了.
if is_done_sub1:
if info_sub1['GAMEOVER']:
if info_sub1['TIME'] == 0:
print('sub1 {0}/{1}: TIME OVER'.format(episode + 1, NUM_EPISODES))
else:
print('sub1 {0}/{1}: FALL GROUND'.format(episode + 1, NUM_EPISODES))
elif info_sub1['CLEAR']:
print('sub1 {0}/{1}: CLEAR!'.format(episode + 1, NUM_EPISODES))
else:
print('Error: Wrong information of sub1 stage', file=sys.stderr)
os.system('PAUSE')
exit(-1)
is_train_sub1 = False
if is_train_sub2:
action_sub2 = actor.get_action(state_sub2, episode, mainQN) # 時刻tでの行動を決定
# (サブゲーム)行動a_tの実行による行動後の観測データ・報酬・ゲーム終了フラグ・詳細情報
next_state_sub2, reward_sub2, is_done_sub2, info_sub2 = env_sub2.step(action_sub2)
next_state_sub2 = np.reshape(next_state_sub2, [1, SIZE_STATE])
memory.add((state_sub2, action_sub2, reward_sub2, next_state_sub2)) # memory update
state_sub2 = next_state_sub2
# サブステージがゴールまで到着したら,メインの基礎学習を十分と判断し,このエピソード内では学習終了.
if is_done_sub2:
if info_sub2['GAMEOVER']:
if info_sub2['TIME'] == 0:
print('sub2 {0}/{1}: TIME OVER'.format(episode + 1, NUM_EPISODES))
else:
print('sub2 {0}/{1}: FALL GROUND'.format(episode + 1, NUM_EPISODES))
elif info_sub2['CLEAR']:
print('sub2 {0}/{1}: CLEAR!'.format(episode + 1, NUM_EPISODES))
else:
print('Error: Wrong information of sub2 stage', file=sys.stderr)
os.system('PAUSE')
exit(-1)
is_train_sub2 = False
# Q-networkの重みの学習と更新
if (memory.len() > BATCH_SIZE) and not is_done:
mainQN.replay(memory, BATCH_SIZE, GAMMA, targetQN)
if DQN_MODE:
targetQN.model.set_weights(mainQN.model.get_weights())
print('{0}/{1}: {2}'.format(episode + 1, NUM_EPISODES, sum(list_reward) / len(list_reward)))
# print(count_loop)
dt_now = datetime.datetime.now()
str_time = dt_now.strftime('%Y-%m-%d_%H-%M-%S')
path_dirs = '../network/model_{0}'.format(str_time)
os.makedirs(path_dirs, exist_ok=True)
mainQN.save_network(_path_dir=path_dirs, _name_network='mainQN')
plot_model(mainQN.model, to_file=path_dirs + '/Qnetwork.png', show_shapes=True) # Qネットワークの可視化
shutil.copy('./stage_sample.txt', path_dirs)
if __name__ == '__main__':
main()
|
[
"ocean90light@gmail.com"
] |
ocean90light@gmail.com
|
b9e795b45a5b99bd04447a64e926dfb936b8a89e
|
4308886d6562c87b9fff3f5bc3696dd4968209b5
|
/Whats Your Name.py
|
79103203d37c6605d8e1e9fcdd6b7b7e5b911152
|
[] |
no_license
|
rivalTj7/Primera_Tarea_Python
|
e3f10d8f372e55078b30a835851e3f12a5607db1
|
a74ce4af39f0de46e831adc568a2c0bbf61909fb
|
refs/heads/master
| 2023-03-01T17:50:22.619024
| 2021-02-07T07:29:01
| 2021-02-07T07:29:01
| 336,726,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
#11- What's Your Name?
def print_full_name(a, b):
print("Hello "+a+" " +b+"! You just delved into python.")
first_name = 'Ross'
last_name = 'Taylor'
print_full_name(first_name, last_name)
|
[
"rival.alex7@gmail.com"
] |
rival.alex7@gmail.com
|
4cc39e7bddd75222d0771f991900ed2d1d80c680
|
fe1d902383ec4d9884bbc0438461b6960c15bb7d
|
/models/farkas.py
|
6bf3a06d42dec68e0b7ff7aeaf76b2b682f1a936
|
[] |
no_license
|
APooladian/FarkasLayers
|
63f40d58f7965a0094672fbf3ce866407e3b77a3
|
85710800a7dd959c7bb82e97210bec2afc4a426b
|
refs/heads/master
| 2020-07-07T15:36:55.298600
| 2019-11-12T03:49:33
| 2019-11-12T03:49:33
| 203,391,987
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,912
|
py
|
import math
import numpy as np
import torch as th
from torch import nn
from torch.nn import functional as F
from torch.nn.modules.utils import _pair
from .utils import View, Avg2d
from .blocks import Conv
class FarkasLinear(nn.Module):
def __init__(self, in_dim, out_dim, bn=True, nonlinear=True, dropout=0.,
init_type='standard',**kwargs):
"""A linear block, with guaranteed non-zero gradient. The linear layer
is followed by batch normalization (if active) and a ReLU (again, if
active)
Args:
in_dim: number of input dimensions
out_dim: number of output dimensions
bn (bool, optional): turn on batch norm (default: False)
"""
super().__init__()
self.weight = nn.Parameter(th.randn(out_dim-1, in_dim))
self.bias = nn.Parameter(th.randn(out_dim))
self.out_dim = out_dim
self.in_dim = in_dim
self.nonlinear=nonlinear
if bn:
self.bn = nn.BatchNorm1d(out_dim, affine=False)
else:
self.bn = False
if dropout>0.:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = False
self.init_type = init_type
if self.init_type == 'standard':
self.reset_parameters()
elif self.init_type == 'xavier':
nn.init.xavier_normal_(self.weight.data)
elif self.init_type == 'kaiming':
nn.init.kaiming_normal(self.weight.data,mode='fan_in',nonlinearity='relu')
elif self.init_type == 'zero_init':
self.weight.data = nn.Parameter(th.zeros(out_dim,in_dim))
def reset_parameters(self):
n = self.in_dim
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
if self.dropout:
x = self.dropout(x)
y = F.linear(x, self.weight, None)
ybar = (-y).mean(dim=1,keepdim=True)
y = th.cat([y,ybar],dim=1)
bbar = th.max(-(self.bias[0:-1]).mean(),self.bias[-1])
b = th.cat([self.bias[0:-1],bbar.unsqueeze(0)],dim=0)
y = y + b.view(1,self.out_dim)
if self.nonlinear=='leaky_relu':
y = F.leaky_relu(y)
elif self.nonlinear=='selu':
y = F.selu(y)
elif self.nonlinear=='elu':
y = F.elu(y)
elif self.nonlinear:
y = F.relu(y)
if self.bn:
y = self.bn(y)
return y
def extra_repr(self):
s = ('{in_dim}, {out_dim}')
if self.bn:
s += ', batchnorm=True'
else:
s += ', batchnorm=False'
return s.format(**self.__dict__)
class FarkasConv(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, padding=None,
kernel_size=(3,3), bn=True, nonlinear=True, dropout=0.,
init_type='standard',**kwargs):
"""A 2d convolution block, with guaranteed non-zero gradient. The
convolution is followed by batch normalization (if active).
Args:
in_channels: number of input channels
out_channels: number of output channels
stride (int, optional): stride of the convolutions (default: 1)
kernel_size (tuple, optional): kernel shape (default: 3)
bn (bool, optional): turn on batch norm (default: False)
"""
super().__init__()
if out_channels <2:
raise ValueError('need out_channels>=2')
self.weight = nn.Parameter(th.randn(out_channels-1, in_channels, *kernel_size))
self.bias = nn.Parameter(th.randn(out_channels))
self.stride = stride
self.out_channels = out_channels
self.in_channels = in_channels
self.kernel_size=_pair(kernel_size)
if padding is None:
self.padding = tuple([k//2 for k in kernel_size])
else:
self.padding = _pair(padding)
self.nonlinear = nonlinear
if bn:
self.bn = nn.BatchNorm2d(out_channels, affine=False)
else:
self.bn = False
if dropout>0.:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = False
self.init_type = init_type
if self.init_type == 'standard':
self.reset_parameters()
elif self.init_type == 'xavier':
nn.init.xavier_normal_(self.weight.data)
elif self.init_type == 'kaiming':
nn.init.kaiming_normal(self.weight.data,mode='fan_in',nonlinearity='relu')
elif self.init_type == 'zero_init':
self.weight.data = nn.Parameter(th.zeros(out_channels-1, in_channels, *kernel_size))
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
if self.dropout:
x = self.dropout(x)
y = F.conv2d(x, self.weight, None, self.stride, self.padding,
1, 1)
ybar = (-y).mean(dim=1,keepdim=True)
y = th.cat([y,ybar],dim=1)
bbar = th.max( - (self.bias[0:-1]).mean() , self.bias[-1])
b = th.cat([self.bias[0:-1],bbar.unsqueeze(0)],dim=0)
y = y + b.view(1,self.out_channels,1,1)
if self.nonlinear=='leaky_relu':
y = F.leaky_relu(y)
elif self.nonlinear=='selu':
y = F.selu(y)
elif self.nonlinear=='elu':
y = F.elu(y)
elif self.nonlinear:
y = F.relu(y)
if self.bn:
y = self.bn(y)
return y
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.bn:
s += ', batchnorm=True'
else:
s += ', batchnorm=False'
return s.format(**self.__dict__)
class FarkasBlock(nn.Module):
def __init__(self, channels, kernel_size=(3,3), bn=True, nonlinear=True,
dropout = 0., residual=True, weight_init='standard',zero_last=False,**kwargs):
"""A basic 2d ResNet block, with modifications on original ResNet paper
[1]. Every convolution is followed by batch normalization (if active).
The gradient is guaranteed to be non-zero.
Args:
channels: number of input and output channels
kernel_size (tuple, optional): kernel shape (default: 3)
bn (bool, optional): turn on batch norm (default: False)
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun, 2016.
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
super().__init__()
self.in_channels = channels
self.out_channels = channels+1
self.kernel_size = _pair(kernel_size)
self.nonlinear=nonlinear
self.residual = residual
self.conv0 = FarkasConv(channels, channels,
kernel_size=kernel_size, bn=bn, nonlinear=nonlinear, init_type=weight_init)
if zero_last:
self.weight = nn.Parameter(th.zeros(channels,channels,*kernel_size))
self.bias=nn.Parameter(th.zeros(channels+1))
else:
self.weight = nn.Parameter(th.randn(channels, channels, *kernel_size))
self.bias = nn.Parameter(th.randn(channels+1))
self.padding = tuple([k//2 for k in kernel_size])
if bn:
self.bn = nn.BatchNorm2d(channels+1, affine=False)
else:
self.bn = False
if dropout>0.:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = False
self.init_type = weight_init
if not zero_last:
if self.init_type == 'standard':
self.reset_parameters()
elif self.init_type == 'xavier':
nn.init.xavier_normal_(self.weight.data)
elif self.init_type == 'kaiming':
nn.init.kaiming_normal(self.weight.data,mode='fan_in',nonlinearity='relu')
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
if self.dropout:
y = self.dropout(x)
else:
y=x
y = self.conv0(y)
if self.dropout:
y = self.dropout(y)
y = F.conv2d(y, self.weight, None, 1, self.padding,
1, 1)
if self.residual:
ybar = (-x-y).mean(dim=1,keepdim=True)
y = th.cat([x+y,ybar],dim=1)
else:
ybar = (-y).mean(dim=1,keepdim=True)
y = th.cat([y,ybar],dim=1)
bbar = th.max( - (self.bias[0:-1]).mean(),self.bias[-1])
b = th.cat([self.bias[0:-1],bbar.unsqueeze(0)],dim=0)
y = y + b.view(1,self.out_channels,1,1)
if self.nonlinear=='leaky_relu':
y = F.leaky_relu(y)
elif self.nonlinear=='selu':
y = F.selu(y)
elif self.nonlinear=='elu':
y = F.elu(y)
elif self.nonlinear:
y = F.relu(y)
if self.bn:
y = self.bn(y)
return y
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}')
if self.bn:
s += ', batchnorm=True'
else:
s += ', batchnorm=False'
return s.format(**self.__dict__)
class FarkasBottleneck(nn.Module):
def __init__(self, channels, kernel_size=(3,3), bn=True, nonlinear=True,
dropout = 0., residual=True, weight_init='standard',zero_last=False,**kwargs):
"""A basic 2d ResNet block, with modifications on original ResNet paper
[1]. Every convolution is followed by batch normalization (if active).
The gradient is guaranteed to be non-zero.
Args:
channels: number of input and output channels
kernel_size (tuple, optional): kernel shape (default: 3)
bn (bool, optional): turn on batch norm (default: False)
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun, 2016.
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
super().__init__()
self.in_channels = channels
self.out_channels = channels+1
self.kernel_size = _pair(kernel_size)
self.nonlinear = nonlinear
self.residual = residual
self.conv0 = FarkasConv(channels, channels//4,
kernel_size=(1,1), bn=bn,
nonlinear=nonlinear, init_type=weight_init)
self.conv1 = FarkasConv(channels//4, channels//4,
kernel_size=kernel_size, bn=bn,
nonlinear=nonlinear,init_type=weight_init)
if zero_last:
self.weight = nn.Parameter(th.zeros(channels,channels//4, 1,1))
self.bias = nn.Parameter(th.zeros(channels+1))
else:
self.weight = nn.Parameter(th.randn(channels, channels//4, 1,1))
self.bias = nn.Parameter(th.randn(channels+1))
if bn:
self.bn = nn.BatchNorm2d(channels+1, affine=False)
else:
self.bn = False
if dropout>0.:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = False
self.init_type = weight_init
if self.init_type == 'standard':
self.reset_parameters()
elif self.init_type == 'xavier':
nn.init.xavier_normal_(self.weight.data)
elif self.init_type == 'kaiming':
nn.init.kaiming_normal(self.weight.data,mode='fan_in',nonlinearity='relu')
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
if self.dropout:
y = self.dropout(x)
else:
y=x
y = self.conv0(y)
if self.dropout:
y = self.dropout(y)
y = self.conv1(y)
if self.dropout:
y = self.dropout(y)
y = F.conv2d(y, self.weight, None, 1, 0,
1, 1)
if self.residual:
ybar = (-x - y).mean(dim=1,keepdim=True)
y = th.cat([x+y,ybar],dim=1)
else:
ybar = (-y).mean(dim=1,keepdim=True)
y = th.cat([y,ybar],dim=1)
bbar = th.max(-(self.bias[0:-1]).mean(),self.bias[-1])
b = th.cat([self.bias[0:-1],bbar.unsqueeze(0)],dim=0)
y = y + b.view(1,self.out_channels,1,1)
if self.nonlinear=='leaky_relu':
y = F.leaky_relu(y)
elif self.nonlinear=='selu':
y = F.selu(y)
elif self.nonlinear=='elu':
y = F.elu(y)
elif self.nonlinear:
y = F.relu(y)
if self.bn:
y = self.bn(y)
return y
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}')
if self.bn:
s += ', batchnorm=True'
else:
s += ', batchnorm=False'
return s.format(**self.__dict__)
class FarkasNet(nn.Module):
def __init__(self, layers, block=FarkasBlock, in_channels=3,
classes=10, kernel_size=(3,3), nonlinear=True,
conv0_kwargs = {'kernel_size':(3,3), 'stride':1},
conv0_pool=None, downsample_pool=nn.AvgPool2d,
last_layer_nonlinear=False, last_layer_bn=None,
dropout=0.,weight_init='standard',zero_last=False,
bn=True, base_channels=16, **kwargs):
if last_layer_bn is None:
last_layer_bn=bn
super().__init__()
kernel_size = _pair(kernel_size)
def make_layer(n, block, in_channels, out_channels, stride):
sublayers = []
if not in_channels==out_channels:
conv = FarkasConv
sublayers.append(conv(in_channels, out_channels, kernel_size=(1,1),
nonlinear=True, dropout=dropout, bn=bn,init_type=weight_init))
if stride>1:
sublayers.append(downsample_pool(stride))
for k in range(n):
u = k
sublayers.append(block(out_channels+u, kernel_size=kernel_size, dropout=dropout,
bn=bn, nonlinear=nonlinear, weight_init=weight_init,zero_last=zero_last,**kwargs))
return nn.Sequential(*sublayers)
conv = FarkasConv
pdsz = [k//2 for k in conv0_kwargs['kernel_size'] ]
self.layer0 = conv(in_channels, base_channels, padding=pdsz,
**conv0_kwargs, dropout=dropout, bn=bn, nonlinear=nonlinear,weight_init=weight_init)
if conv0_pool:
self.maxpool = conv0_pool
else:
self.maxpool = False
_layers = []
for i, n in enumerate(layers):
if i==0:
_layers.append(make_layer(n, block, base_channels,
base_channels, 1))
else:
u = layers[i-1]
_layers.append(make_layer(n, block, base_channels*(2**(i-1))+u,
base_channels*(2**i), 2))
self.layers = nn.Sequential(*_layers)
self.pool = Avg2d()
u = layers[-1]
self.view = View((2**i)*base_channels+u)
if dropout>0:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = False
self.fc = nn.Linear((2**i)*base_channels+u,classes)
self.nonlinear=nonlinear
self.bn = bn
@property
def num_parameters(self):
return sum([w.numel() for w in self.parameters()])
def forward(self, x):
x = self.layer0(x)
if self.maxpool:
x = self.maxpool(x)
x = self.layers(x)
x = self.pool(x)
x = self.view(x)
if self.dropout:
x = self.dropout(x)
x = self.fc(x)
return x
def FarkasNet18(**kwargs):
m = FarkasNet([3,3,3],block=FarkasBlock,**kwargs)
return m
def FarkasNet50(**kwargs):
m = FarkasNet([3,4,6,3],base_channels=64,block=FarkasBottleneck,**kwargs)
return m
def FarkasNet101(**kwargs):
m = FarkasNet([3,4,23,3],base_channels=64,block=FarkasBottleneck,**kwargs)
return m
def FarkasNet110(**kwargs):
m = FarkasNet([18,18,18],block=FarkasBlock,**kwargs)
def FarkasNet34(**kwargs):
m = FarkasNet([5,5,5],block=FarkasBlock,**kwargs)
return m
|
[
"aram-alexandre.pooladian@mail.mcgill.ca"
] |
aram-alexandre.pooladian@mail.mcgill.ca
|
ab7c71a677644efe5b14cfcd69d86aae4be88786
|
20766840efca8977b1246c2c8ad05a15388e826c
|
/모듈/insa2.py
|
0b6ccc8964acb9f3544b984470b2522ce8237833
|
[] |
no_license
|
Chaenini/Programing-Python-
|
0780c7880b2d15b7a210f11975a7c851b56a1d3f
|
a4aa9f7b021bae02677815f1a8b74d2420637958
|
refs/heads/master
| 2020-07-10T20:41:25.957058
| 2019-12-09T00:55:30
| 2019-12-09T00:55:30
| 204,366,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6
|
py
|
#139p
|
[
"s2018w16@e-mirim.hs.kr"
] |
s2018w16@e-mirim.hs.kr
|
2866adf3865f8ad42fe7d0810cf0266c2c3ec479
|
77ef4019ee6ce45abf3b5e21f2b33f3998620cd1
|
/base/message.py
|
9b4845620508555ad671bf3bd1d942c5554df6fe
|
[
"MIT"
] |
permissive
|
kevinrpb/rfid-protocols
|
243ef09a248c8b3229f60d93784e13d372baa3f3
|
01543f995f17d92fab0b159cf1c85f4ff65cd402
|
refs/heads/main
| 2023-02-16T03:21:56.322256
| 2021-01-19T17:41:59
| 2021-01-19T17:41:59
| 318,453,516
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
from enum import Enum
from bitarray import bitarray
class MessageKind(Enum):
READER_TO_TAG = 0
TAG_TO_READER = 1
def __str__(self) -> str:
if self == MessageKind.READER_TO_TAG:
return 'READER_TO_TAG'
elif self == MessageKind.TAG_TO_READER:
return 'TAG_TO_READER'
else:
return ''
class Message(object):
def __init__(self, label: str, kind: MessageKind, content: bitarray):
self.label = label
self.kind = kind
self.content = content
def size(self) -> int:
return self.content.length()
|
[
"kevinrpb@hotmail.com"
] |
kevinrpb@hotmail.com
|
747403576f24d62c684e4cad16f2b82581d8a8fb
|
bb048e7cc8ffd76a1c0a5b041b2ec5ea23fe95b8
|
/conftest.py
|
2f442eac6695282b90be09a6bf59a08ffce8a8b9
|
[] |
no_license
|
Carling-Kody/pura_demo
|
af68f17fc3b1424cddaf63ede793df064dea3a14
|
4d7870995cc88b34c36db00173c6510dadc69186
|
refs/heads/main
| 2023-08-13T18:46:39.230402
| 2021-07-08T22:40:09
| 2021-07-08T22:40:09
| 381,835,920
| 0
| 0
| null | 2021-07-08T22:40:10
| 2021-06-30T21:22:41
|
Python
|
UTF-8
|
Python
| false
| false
| 9,084
|
py
|
"""
`conftest.py` and `pylenium.json` files should stay at your Workspace Root.
conftest.py
Although this file is editable, you should only change its contents if you know what you are doing.
Instead, you can create your own conftest.py file in the folder where you store your ui_tests.
pylenium.json
You can change the values, but DO NOT touch the keys or you will break the schema.
py
The only fixture you really need from this is `py`. This is the instance of Pylenium for each test.
Just pass py into your test and you're ready to go!
Examples:
def test_go_to_google(py):
py.visit('https://google.com')
assert 'Google' in py.title()
"""
import json
import logging
import os
import shutil
import sys
from pathlib import Path
import pytest
import requests
from faker import Faker
from pytest_reportportal import RPLogger, RPLogHandler
from pylenium.driver import Pylenium
from pylenium.config import PyleniumConfig, TestCase
from pylenium.a11y import PyleniumAxe
@pytest.fixture(scope='function')
def fake() -> Faker:
"""A basic instance of Faker to make test data."""
return Faker()
@pytest.fixture(scope='function')
def api():
"""A basic instance of Requests to make HTTP API calls."""
return requests
@pytest.fixture(scope="session")
def rp_logger(request):
"""Report Portal Logger"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Create handler for Report Portal if the service has been
# configured and started.
if hasattr(request.node.config, 'py_test_service'):
# Import Report Portal logger and handler to the test module.
logging.setLoggerClass(RPLogger)
rp_handler = RPLogHandler(request.node.config.py_test_service)
# Add additional handlers if it is necessary
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
logger.addHandler(console_handler)
else:
rp_handler = logging.StreamHandler(sys.stdout)
# Set INFO level for Report Portal handler.
rp_handler.setLevel(logging.INFO)
return logger
@pytest.fixture(scope='session', autouse=True)
def project_root() -> str:
"""The Project (or Workspace) root as a filepath.
* This conftest.py file should be in the Project Root if not already.
"""
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='session', autouse=True)
def test_run(project_root, request) -> str:
"""Creates the `/test_results` directory to store the results of the Test Run.
Returns:
The `/test_results` directory as a filepath (str).
"""
session = request.node
test_results_dir = f'{project_root}/test_results'
if os.path.exists(test_results_dir):
# delete /test_results from previous Test Run
shutil.rmtree(test_results_dir, ignore_errors=True)
try:
# race condition can occur between checking file existence and
# creating the file when using pytest with multiple workers
Path(test_results_dir).mkdir(parents=True, exist_ok=True)
except FileExistsError:
pass
for test in session.items:
try:
# make the test_result directory for each test
Path(f'{test_results_dir}/{test.name}').mkdir(parents=True, exist_ok=True)
except FileExistsError:
pass
return test_results_dir
@pytest.fixture(scope='session')
def py_config(project_root, request) -> PyleniumConfig:
"""Initialize a PyleniumConfig for each test
1. This starts by deserializing the user-created pylenium.json from the Project Root.
2. If that file is not found, then proceed with Pylenium Defaults.
3. Then any CLI arguments override their respective key/values.
"""
try:
# 1. Load pylenium.json in Project Root, if available
with open(f'{project_root}/pylenium.json') as file:
_json = json.load(file)
config = PyleniumConfig(**_json)
except FileNotFoundError:
# 2. pylenium.json not found, proceed with defaults
config = PyleniumConfig()
# 3. Override with any CLI args/options
# Driver Settings
cli_remote_url = request.config.getoption('--remote_url')
if cli_remote_url:
config.driver.remote_url = cli_remote_url
cli_browser_options = request.config.getoption('--options')
if cli_browser_options:
config.driver.options = [option.strip() for option in cli_browser_options.split(',')]
cli_browser = request.config.getoption('--browser')
if cli_browser:
config.driver.browser = cli_browser
cli_capabilities = request.config.getoption('--caps')
if cli_capabilities:
# --caps must be in '{"name": "value", "boolean": true}' format
# with double quotes around each key. booleans are lowercase.
config.driver.capabilities = json.loads(cli_capabilities)
cli_page_wait_time = request.config.getoption('--page_load_wait_time')
if cli_page_wait_time and cli_page_wait_time.isdigit():
config.driver.page_load_wait_time = int(cli_page_wait_time)
# Logging Settings
cli_pylog_level = request.config.getoption('--pylog_level')
if cli_pylog_level:
config.logging.pylog_level = cli_pylog_level
cli_screenshots_on = request.config.getoption('--screenshots_on')
if cli_screenshots_on:
shots_on = True if cli_screenshots_on.lower() == 'true' else False
config.logging.screenshots_on = shots_on
cli_extensions = request.config.getoption('--extensions')
if cli_extensions:
config.driver.extension_paths = [ext.strip() for ext in cli_extensions.split(',')]
return config
@pytest.fixture(scope='function')
def test_case(test_run, py_config, request) -> TestCase:
"""Manages data pertaining to the currently running Test Function or Case.
* Creates the test-specific logger.
Args:
test_run: The Test Run (or Session) this test is connected to.
Returns:
An instance of TestCase.
"""
test_name = request.node.name
test_result_path = f'{test_run}/{test_name}'
py_config.driver.capabilities.update({'name': test_name})
return TestCase(name=test_name, file_path=test_result_path)
@pytest.fixture(scope='function')
def py(test_case, py_config, request, rp_logger):
"""Initialize a Pylenium driver for each test.
Pass in this `py` fixture into the test function.
Examples:
def test_go_to_google(py):
py.visit('https://google.com')
assert 'Google' in py.title()
"""
py = Pylenium(py_config)
yield py
try:
if request.node.report.failed:
# if the test failed, execute code in this block
if py_config.logging.screenshots_on:
screenshot = py.screenshot(f'{test_case.file_path}/test_failed.png')
with open(screenshot, "rb") as image_file:
rp_logger.info(
"Test Failed - Attaching Screenshot",
attachment={"name": "test_failed.png", "data": image_file, "mime": "image/png"},
)
except AttributeError:
rp_logger.error('Unable to access request.node.report.failed, unable to take screenshot.')
except TypeError:
rp_logger.info('Report Portal is not connected to this test run.')
py.quit()
@pytest.fixture(scope='function')
def axe(py) -> PyleniumAxe:
"""The aXe A11y audit tool as a fixture."""
return PyleniumAxe(py.webdriver)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Yield each test's outcome so we can handle it in other fixtures."""
outcome = yield
report = outcome.get_result()
if report.when == 'call':
setattr(item, "report", report)
return report
def pytest_addoption(parser):
parser.addoption('--browser', action='store', default='', help='The lowercase browser name: chrome | firefox')
parser.addoption('--remote_url', action='store', default='', help='Grid URL to connect ui_tests to.')
parser.addoption('--screenshots_on', action='store', default='', help="Should screenshots be saved? true | false")
parser.addoption('--pylog_level', action='store', default='', help="Set the pylog_level: 'off' | 'info' | 'debug'")
parser.addoption(
'--options',
action='store',
default='',
help='Comma-separated list of Browser Options. Ex. "headless, incognito"',
)
parser.addoption(
'--caps',
action='store',
default='',
help='List of key-value pairs. Ex. \'{"name": "value", "boolean": true}\'',
)
parser.addoption(
'--page_load_wait_time',
action='store',
default='',
help='The amount of time to wait for a page load before raising an error. Default is 0.',
)
parser.addoption(
'--extensions', action='store', default='', help='Comma-separated list of extension paths. Ex. "*.crx, *.crx"'
)
|
[
"kodycarling19@gmail.com"
] |
kodycarling19@gmail.com
|
4bbf47389bde47d911e2861fb4f2fc9e2599284a
|
8ebca2bcb8c73daecc912f00fffb5fea8d918c32
|
/Lib/site-packages/tensorflow/contrib/summary/summary_test_util.py
|
9ad53269d8398a006219e01c4ebdc2491fc707b4
|
[] |
no_license
|
YujunLiao/tensorFlowLearing
|
510ed61689a72dcb53347bd3e4653470893ecc4a
|
1a383b5183a409e017657001eda4dc68e4a6bcf9
|
refs/heads/master
| 2022-12-07T16:01:26.942238
| 2019-06-04T16:34:46
| 2019-06-04T16:34:46
| 177,408,903
| 0
| 0
| null | 2022-11-21T21:21:36
| 2019-03-24T12:01:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,874
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to code summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import sqlite3
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.platform import gfile
class SummaryDbTest(test_util.TensorFlowTestCase):
"""Helper for summary database testing."""
def setUp(self):
super(SummaryDbTest, self).setUp()
self.db_path = os.path.join(self.get_temp_dir(), 'DbTest.sqlite')
if os.path.exists(self.db_path):
os.unlink(self.db_path)
self.db = sqlite3.connect(self.db_path)
self.create_db_writer = functools.partial(
summary_ops.create_db_writer,
db_uri=self.db_path,
experiment_name='experiment',
run_name='run',
user_name='user')
def tearDown(self):
self.db.close()
super(SummaryDbTest, self).tearDown()
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
def get_one(db, q, *p):
return db.execute(q, p).fetchone()[0]
def get_all(db, q, *p):
return unroll(db.execute(q, p).fetchall())
def unroll(list_of_tuples):
return sum(list_of_tuples, ())
|
[
"18916108830@163.com"
] |
18916108830@163.com
|
2fa4ab95d64e2940ff958f0cf6fc45151207da79
|
67819ca1c5030d936413ddbaa08ed245b7b9358d
|
/app/backend/hiStoryBackend/hiStoryBackend/wsgi.py
|
dd4233b7ffe0739198a5a1c86e59932b7f74728a
|
[] |
no_license
|
bounswe/bounswe2018group7
|
9ac94fb93113571fdd43c2e9b91ea2ba318cce9c
|
9c56cb2f28f189853f4aacdb587b85544f25b2c3
|
refs/heads/master
| 2023-03-05T09:18:43.445698
| 2022-04-23T19:13:44
| 2022-04-23T19:13:44
| 120,274,361
| 12
| 3
| null | 2023-03-03T15:20:37
| 2018-02-05T08:12:49
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 174
|
py
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hiStoryBackend.settings')
application = get_wsgi_application()
|
[
"cburakaygun@gmail.com"
] |
cburakaygun@gmail.com
|
b695146b5baec03e2372b136427fca9502a8b6e6
|
6a47b50684e9a0dcbf145acea402bd97e298c89d
|
/Python Programs/helloAll.py
|
10f799d276892bb3a0572f7e1c3782922396aac3
|
[
"MIT"
] |
permissive
|
AkshayPradeep6152/letshack
|
a37e132c408aa68a2232cbab7eadaafb58267e26
|
f820e438921c6706fb2565379db6681184676698
|
refs/heads/main
| 2023-08-13T10:38:11.495481
| 2021-10-03T05:05:01
| 2021-10-03T05:05:01
| 300,655,139
| 8
| 96
|
MIT
| 2021-10-03T05:05:02
| 2020-10-02T15:17:34
|
Java
|
UTF-8
|
Python
| false
| false
| 17
|
py
|
print("helloAll")
|
[
"noreply@github.com"
] |
AkshayPradeep6152.noreply@github.com
|
c119687b11afe9b22fca389be33ff9b8a804cf22
|
9322c270beaf1019328bf14c836d167145d45946
|
/raoteh/sampler/tests/test_graph_transform.py
|
af315325cddb45fdc81619cf995488fd53736710
|
[] |
no_license
|
argriffing/raoteh
|
13d198665a7a3968aad8d41ddad12c08d36d57b4
|
cdc9cce8fdad0a79dbd90dfcdec6feece8fc931f
|
refs/heads/master
| 2021-01-22T19:41:25.828133
| 2014-03-10T22:25:48
| 2014-03-10T22:25:48
| 10,087,018
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,511
|
py
|
"""Test graph algorithms relevant to Rao-Teh sampling.
"""
from __future__ import division, print_function, absolute_import
import itertools
from collections import defaultdict
import networkx as nx
from numpy.testing import (run_module_suite, TestCase,
assert_equal, assert_allclose, assert_, assert_raises)
from raoteh.sampler._graph_transform import (
get_edge_bisected_graph,
get_node_to_state,
remove_redundant_nodes,
get_redundant_degree_two_nodes,
get_chunk_tree,
add_trajectories,
)
# This is an official itertools recipe.
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s)+1))
class TestGraphTransform(TestCase):
def test_get_edge_bisected_graph(self):
# Create an example from the networkx documentation.
G = nx.Graph()
G.add_weighted_edges_from([
(1, 2, 0.125),
(1, 3, 0.75),
(2, 4, 1.2),
(3, 4, 0.375)])
# Create a new graph by bisecting the edges of the old graph.
H = get_edge_bisected_graph(G)
# The edge-bisected graph has twice as many edges.
assert_equal(len(G.edges()) * 2, len(H.edges()))
assert_equal(G.size()*2, H.size())
# The sum of edge weights is unchanged.
assert_allclose(G.size(weight='weight'), H.size(weight='weight'))
# The node set of the edge-bisected graph includes that of the original.
assert_(set(G) <= set(H))
# The added nodes are each greater than each original node.
assert_(max(G) < min(set(H) - set(G)))
def test_get_chunk_tree(self):
# Define the original tree and its event nodes.
# This is taken from a doodle in my notebook,
# and it is not particularly cleverly chosen.
tree_edges = (
(0, 1),
(1, 2),
(3, 4),
(4, 2),
(2, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(7, 10),
(10, 11),
(11, 12),
(12, 13),
(13, 14),
(13, 15),
(15, 16),
(16, 17),
)
event_nodes = {1, 4, 5, 6, 8, 10, 11, 12, 15, 16}
# Create a tree by specifying the edges.
T = nx.Graph()
T.add_edges_from(tree_edges)
# Run tests, using all possible roots and also a default root.
potential_roots = list(T) + [None]
for root in potential_roots:
# Construct the chunk tree and its associated node maps.
results = get_chunk_tree(T, event_nodes)
chunk_tree, non_event_map, event_map = results
# The nodes pointed to by the non_event_map
# should be nodes in the chunk_tree.
assert_(set(non_event_map.values()) <= set(T))
# The output tree should have 11 nodes and 10 edges.
assert_equal(len(chunk_tree), 11)
assert_equal(len(chunk_tree.edges()), 10)
# The 8 non-event nodes should map to 7 unique chunk nodes.
assert_equal(len(non_event_map), 8)
assert_equal(len(set(non_event_map.values())), 7)
# The non-event nodes 13 and 14 should map to the same chunk.
assert_equal(non_event_map[13], non_event_map[14])
def test_remove_redundant_nodes_short_path(self):
# Define a short path with one redundant
# and one non-redundant internal node.
T = nx.Graph()
T.add_edge(0, 1, state=0, weight=1)
T.add_edge(1, 2, state=0, weight=1)
T.add_edge(2, 3, state=1, weight=1)
# Try removing a redundant node.
redundant_nodes = {1}
T_out = remove_redundant_nodes(T, redundant_nodes)
assert_equal(set(T_out), set(T) - redundant_nodes)
assert_equal(T_out[0][2]['weight'], 2)
# Fail at removing a non-redundant node.
redundant_nodes = {2}
assert_raises(
Exception,
remove_redundant_nodes,
T, redundant_nodes)
def test_remove_redundant_nodes_long_path(self):
# Define a path with multiple consecutive redundant internal nodes.
T = nx.Graph()
T.add_edge(0, 1, state=0, weight=1.1)
T.add_edge(1, 2, state=0, weight=1.2)
T.add_edge(2, 3, state=1, weight=1.3)
T.add_edge(3, 4, state=1, weight=1.4)
T.add_edge(4, 5, state=1, weight=1.5)
T.add_edge(5, 6, state=1, weight=1.6)
T.add_edge(6, 7, state=1, weight=1.7)
# Get the original weighted size.
# This is the sum of weights of all edges.
original_size = T.size(weight='weight')
# Check the set of redundant nodes.
all_redundant_nodes = {1, 3, 4, 5, 6}
obs_nodes = get_redundant_degree_two_nodes(T)
assert_equal(all_redundant_nodes, obs_nodes)
# Try removing all valid combinations of redundant nodes.
for redundant_node_tuple in powerset(all_redundant_nodes):
redundant_nodes = set(redundant_node_tuple)
T_out = remove_redundant_nodes(T, redundant_nodes)
assert_equal(set(T_out), set(T) - redundant_nodes)
assert_allclose(T_out.size(weight='weight'), original_size)
def test_remove_redundant_nodes_small_tree(self):
# Define a short path with one redundant
# and one non-redundant internal node.
T = nx.Graph()
T.add_edge(0, 1, state=0, weight=1)
T.add_edge(0, 2, state=0, weight=1)
T.add_edge(0, 3, state=0, weight=1)
# None of the nodes are considered redundant in the current
# implementation, because each node is of degree 1 or 3.
for redundant_nodes in ({0}, {1}, {2}, {3}):
assert_raises(
Exception,
remove_redundant_nodes,
T, redundant_nodes)
def test_remove_redundant_nodes_medium_tree(self):
# Define a tree.
T = nx.Graph()
T.add_edge(0, 10, state=0, weight=1.1)
T.add_edge(0, 20, state=0, weight=1.2)
T.add_edge(0, 30, state=0, weight=1.3)
T.add_edge(20, 21, state=0, weight=1.4)
T.add_edge(30, 31, state=0, weight=1.5)
T.add_edge(31, 32, state=0, weight=1.6)
# Get the original weighted size.
# This is the sum of weights of all edges.
original_size = T.size(weight='weight')
# Try removing all valid combinations of redundant nodes.
for redundant_node_tuple in powerset((20, 30, 31)):
redundant_nodes = set(redundant_node_tuple)
T_out = remove_redundant_nodes(T, redundant_nodes)
assert_equal(set(T_out), set(T) - redundant_nodes)
assert_allclose(T_out.size(weight='weight'), original_size)
class TestAddTrajectories(TestCase):
def test_compatible_trees(self):
T_base = nx.Graph()
T_base.add_edge(0, 1, weight=0.1)
T_base.add_edge(0, 2, weight=0.1)
T_base.add_edge(0, 3, weight=0.1)
T_traj = nx.Graph()
T_traj.add_edge(0, 1, state=0, weight=0.1)
T_traj.add_edge(0, 20, state=0, weight=0.05)
T_traj.add_edge(20, 2, state=0, weight=0.05)
T_traj.add_edge(0, 3, state=0, weight=0.1)
root = 0
T_merged, dummy_nodes = add_trajectories(T_base, root, [T_traj])
# There should not be any dummy nodes.
assert_equal(dummy_nodes, set())
# The merged tree should have four edges.
assert_equal(T_base.size(), 3)
assert_equal(T_merged.size(), 4)
# The total weight of the merged tree
# should be the same as the total weight of the base tree.
assert_allclose(
T_merged.size(weight='weight'),
T_base.size(weight='weight'))
def test_incompatible_trees(self):
T_base = nx.Graph()
T_base.add_edge(0, 1, weight=0.1)
T_base.add_edge(0, 2, weight=0.1)
T_base.add_edge(0, 3, weight=0.1)
root = 0
# Define a trajectory that is bad because it adds a high degree node.
traj = nx.Graph()
traj.add_edge(0, 4, state=0, weight=0.1)
traj.add_edge(4, 20, state=0, weight=0.05)
traj.add_edge(20, 2, state=0, weight=0.05)
traj.add_edge(4, 3, state=0, weight=0.1)
assert_raises(ValueError, add_trajectories,
T_base, root, [traj])
# Define a trajectory that is bad because it adds a leaf node.
traj = nx.Graph()
traj.add_edge(0, 1, state=0, weight=0.1)
traj.add_edge(0, 20, state=0, weight=0.05)
traj.add_edge(20, 2, state=0, weight=0.05)
traj.add_edge(0, 3, state=0, weight=0.05)
traj.add_edge(3, 4, state=0, weight=0.05)
assert_raises(ValueError, add_trajectories,
T_base, root, [traj])
# Define a trajectory that is bad
# because it flips around the nodes in a way that is incompatible
# with the original tree topology.
traj = nx.Graph()
traj.add_edge(1, 0, state=0, weight=0.1)
traj.add_edge(1, 2, state=0, weight=0.1)
traj.add_edge(1, 3, state=0, weight=0.1)
assert_raises(ValueError, add_trajectories,
T_base, root, [traj])
def test_complicated_incompatible_trees(self):
T_base = nx.Graph()
T_base.add_edge(0, 1, weight=0.1)
T_base.add_edge(0, 2, weight=0.1)
T_base.add_edge(0, 3, weight=0.1)
T_base.add_edge(3, 4, weight=0.1)
T_base.add_edge(3, 5, weight=0.1)
root = 0
# Define a trajectory that is bad
# because the topology is different in a way that cannot be detected
# by checking the degrees of the nodes.
traj = nx.Graph()
traj.add_edge(3, 1, state=0, weight=0.1)
traj.add_edge(3, 2, state=0, weight=0.1)
traj.add_edge(3, 0, state=0, weight=0.1)
traj.add_edge(0, 4, state=0, weight=0.1)
traj.add_edge(0, 5, state=0, weight=0.1)
assert_raises(ValueError, add_trajectories,
T_base, root, [traj])
def test_edge_to_event_times(self):
# The merged tree will look like the following,
# where 'x' is a node in the original tree,
# and 'a' is a node introduced by trajectory merging,
# and 'o' is an event node.
#
# x
# /|\
# / | \
# | | |
# o o x
# | | |
# x | | (0, 0)
# x |
# x
# /| (0, 0)
# / a
# / | (0, 10)
# | a
# x | (5, 10)
# a
# | (5, 0)
# o
# | (5, 0)
# a
# | (0, 0)
# x
#
T = nx.Graph()
T.add_edge(0, 1, weight=0.1)
T.add_edge(0, 2, weight=0.1)
T.add_edge(0, 3, weight=0.1)
T.add_edge(3, 4, weight=0.1)
T.add_edge(3, 5, weight=0.1)
T.add_edge(4, 6, weight=0.1)
root = 0
# Define a trajectory with an extra segment along one edge.
traj_a = nx.Graph()
traj_a.add_edge(0, 1, weight=0.1, state=0)
traj_a.add_edge(0, 2, weight=0.1, state=0)
traj_a.add_edge(0, 3, weight=0.1, state=0)
traj_a.add_edge(3, 4, weight=0.1, state=0)
traj_a.add_edge(3, 5, weight=0.1, state=0)
traj_a.add_edge(4, 10, weight=0.025, state=0)
traj_a.add_edge(10, 11, weight=0.05, state=5)
traj_a.add_edge(11, 6, weight=0.025, state=0)
# Define a trajectory with an interleaving segment.
traj_b = nx.Graph()
traj_b.add_edge(0, 1, weight=0.1, state=0)
traj_b.add_edge(0, 2, weight=0.1, state=0)
traj_b.add_edge(0, 3, weight=0.1, state=0)
traj_b.add_edge(3, 4, weight=0.1, state=0)
traj_b.add_edge(3, 5, weight=0.1, state=0)
traj_b.add_edge(4, 20, weight=0.02, state=0)
traj_b.add_edge(20, 21, weight=0.02, state=10)
traj_b.add_edge(21, 6, weight=0.06, state=0)
# Define a few event times along directed edges,
# where the edge direction radiates away from the root.
edge_to_event_times = {
(0, 1) : {0.06},
(0, 2) : {0.02},
(4, 6) : {0.045},
}
# Construct the merged tree.
T_merged, event_nodes = add_trajectories(
T, root,
[traj_a, traj_b],
edge_to_event_times=edge_to_event_times)
# After this point are some tests.
# Check the total number of nodes in the merged tree.
assert_equal(len(T_merged.edges()), 13)
# Check the multiset of edge state pairs in the merged tree.
state_pair_to_count = defaultdict(int)
for edge in nx.bfs_edges(T_merged, root):
na, nb = edge
states = T_merged[na][nb]['states']
state_pair = tuple(states)
assert_equal(len(state_pair), 2)
state_pair_to_count[state_pair] += 1
assert_equal(state_pair_to_count[(0, 10)], 1)
assert_equal(state_pair_to_count[(5, 10)], 1)
assert_equal(state_pair_to_count[(5, 0)], 2)
expected_state_pairs = set([(0, 0), (0, 10), (5, 10), (5, 0)])
assert_equal(set(state_pair_to_count), expected_state_pairs)
# Check that the number of event nodes is correct.
assert_equal(len(edge_to_event_times), len(event_nodes))
# The merged tree must contain all of the nodes of the original tree.
missing_nodes = set(T) - set(T_merged)
assert_equal(missing_nodes, set())
# The base tree, the two trajectories, and the merged tree
# should all have the same weighted size.
weighted_size = T.size(weight='weight')
assert_allclose(traj_a.size(weight='weight'), weighted_size)
assert_allclose(traj_b.size(weight='weight'), weighted_size)
assert_allclose(T_merged.size(weight='weight'), weighted_size)
# Each event node must be adjacent to exactly two edges
# in the merged tree, and both of these edges
# must be annotated with the same sequence of state values.
for node in event_nodes:
assert_equal(T_merged.degree(node), 2)
na, nb = T_merged[node]
na_states = T_merged[node][na]['states']
nb_states = T_merged[node][nb]['states']
assert_equal(na_states, nb_states)
# Print the edges of the merged tree.
"""
print()
print('--- add_trajectories test output ---')
print(event_nodes)
for edge in nx.bfs_edges(T_merged, root):
na, nb = edge
weight = T_merged[na][nb]['weight']
states = T_merged[na][nb]['states']
print(na, nb, weight, states)
print()
"""
"""
0 8 0.02 [0, 0]
0 3 0.1 [0, 0]
0 7 0.06 [0, 0]
8 2 0.08 [0, 0]
3 4 0.1 [0, 0]
3 5 0.1 [0, 0]
7 1 0.04 [0, 0]
4 9 0.02 [0, 0]
9 10 0.005 [0, 10]
10 11 0.015 [5, 10]
11 12 0.005 [5, 0]
12 13 0.03 [5, 0]
13 6 0.025 [0, 0]
"""
class TestGetNodeToState(TestCase):
def test_get_node_to_state_simple_tree_identical_states(self):
T = nx.Graph()
T.add_edge(0, 1, state=42)
T.add_edge(1, 2, state=42)
all_query_nodes = {0, 1, 2}
for query_nodes in powerset(all_query_nodes):
nnodes = len(query_nodes)
node_to_state = get_node_to_state(T, query_nodes)
assert_equal(set(node_to_state), set(query_nodes))
assert_equal(set(node_to_state.values()), set([42]*nnodes))
def test_get_node_to_state_simple_tree_different_states(self):
T = nx.Graph()
T.add_edge(0, 1, state=42)
T.add_edge(1, 2, state=42)
T.add_edge(2, 3, state=99)
# Some of the nodes have defined states.
query_nodes = {0, 1, 3}
node_to_state = get_node_to_state(T, query_nodes)
assert_equal(node_to_state, {0:42, 1:42, 3:99})
# But node 2 does not have a defined state
# because it represents a state transition.
query_nodes = {0, 1, 2, 3}
assert_raises(ValueError, get_node_to_state, T, query_nodes)
def test_complicated_tree(self):
T = nx.Graph()
T.add_edge(0, 1, state=2)
T.add_edge(0, 2, state=2)
T.add_edge(0, 3, state=2)
T.add_edge(3, 4, state=10)
T.add_edge(4, 5, state=10)
T.add_edge(4, 6, state=10)
# Most of the nodes have defined states.
query_nodes = {0, 1, 2, 4, 5, 6}
expected_node_to_state = {0:2, 1:2, 2:2, 4:10, 5:10, 6:10}
node_to_state = get_node_to_state(T, query_nodes)
assert_equal(node_to_state, expected_node_to_state)
# One of the nodes is a transition without a defined state.
query_nodes = {0, 1, 2, 3, 4, 5, 6}
assert_raises(ValueError, get_node_to_state, T, query_nodes)
if __name__ == '__main__':
run_module_suite()
|
[
"argriffi@ncsu.edu"
] |
argriffi@ncsu.edu
|
3bdd06f837466e17a98dd8946a3ad205b882c0ee
|
8c801a9606722a3ed960c0472c85987254beaab9
|
/VirtEnv2/bin/html2text
|
ef10b835a3d4cb89975973af434d23204ccf1837
|
[] |
no_license
|
boyleconnor/MacWorld
|
0377f24417b09e952edee4b4983ac17eb53be806
|
89fb982a23d5965f452f7c0594fdde16185b966e
|
refs/heads/master
| 2022-07-09T00:28:55.856046
| 2014-07-25T02:06:25
| 2014-07-25T02:06:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
#!/Users/connor/PycharmProjects/MacWorld/VirtEnv2/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'html2text==2014.7.3','console_scripts','html2text'
__requires__ = 'html2text==2014.7.3'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('html2text==2014.7.3', 'console_scripts', 'html2text')()
)
|
[
"cboyle@macalester.edu"
] |
cboyle@macalester.edu
|
|
68d3e5ce7d725d753fe712d6b84fc9b1056b3b77
|
a50badcad45aa17cac0148470a165e89e4d9f352
|
/errors.py
|
56af8f4bb2358b465d314887db56a16ef688abc1
|
[] |
no_license
|
deemoowoor/employee-stats
|
bf53e15e3af7e52a6a6828e8e539a5d945782dee
|
5b29f103c3327fe18ea1998777141b610589e6af
|
refs/heads/master
| 2022-12-12T08:12:28.088128
| 2020-05-20T08:44:47
| 2020-05-20T08:44:47
| 265,500,108
| 0
| 0
| null | 2022-12-08T09:57:35
| 2020-05-20T08:27:27
|
Python
|
UTF-8
|
Python
| false
| false
| 154
|
py
|
class ApiError(BaseException):
def __init__(self, message):
self._message = message
def __str__(self):
return self._message
|
[
"andrei.sosnin@gmail.com"
] |
andrei.sosnin@gmail.com
|
48a3c15283ec705f100a9181029b8e252e62f99e
|
a58689339cf11a04280cb6f627da442d2e6d2128
|
/detector.py
|
e101ec893dc4d28dedc51a5a11c210fd2a101bee
|
[] |
no_license
|
thuyngch/CISDL-DMAC
|
d1928fa7023986220d4d7b21d0e8eb73991a98fd
|
4a4e24051dedb4e534291a71ec32571b07ba7217
|
refs/heads/master
| 2020-05-25T12:10:26.804658
| 2019-06-02T14:38:16
| 2019-06-02T14:38:16
| 187,793,220
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,545
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: liuyaqi
"""
import torch
import torch.nn as nn
import math
affine_par = True
class Detector(nn.Module):
def __init__(self,pool_stride):
super(Detector, self).__init__()
'The pooling of images needs to be researched.'
self.img_pool = nn.AvgPool2d(pool_stride,stride=pool_stride)
self.input_dim = 3
'Feature extraction blocks.'
self.conv = nn.Sequential(
nn.Conv2d(self.input_dim, 16, 3, 1, 1),
nn.BatchNorm2d(16,affine = affine_par),
nn.ReLU(inplace=True),
nn.Conv2d(16, 32, 3, 1, 1),
nn.BatchNorm2d(32,affine = affine_par),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(32, 64, 3, 1, 1),
nn.BatchNorm2d(64,affine = affine_par),
nn.ReLU(inplace=True),
nn.Conv2d(64, 128, 3, 1, 1),
nn.BatchNorm2d(128,affine = affine_par),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
'Detection branch.'
self.classifier_det = nn.Sequential(
nn.Linear(128*8*8,1024),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(1024,2),
)
self._initialize_weights()
def forward(self,x1,x2,m1,m2):
x1 = self.img_pool(x1)
x2 = self.img_pool(x2)
x1 = torch.mul(x1,m1)
x2 = torch.mul(x2,m2)
x1 = self.conv(x1)
x2 = self.conv(x2)
x1 = x1.view(x1.size(0),-1)
x2 = x2.view(x2.size(0),-1)
x12_abs = torch.abs(x1-x2)
x_det = self.classifier_det(x12_abs)
return x_det
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
|
[
"noreply@github.com"
] |
thuyngch.noreply@github.com
|
ea37b89193a3166b30ca50a361245e3dbc673ab7
|
5184ea2fd27e01467d5b027864d14b2d4638ba1b
|
/Number theory/526/526.py
|
849f1678e4527667650f1c3ea2b0b7b3558b4af8
|
[] |
no_license
|
zc1001/leetcode
|
1587f951e52dd08cfcbb2c6b3f5cbff5cb9b3a67
|
45b20789ae00bb3713ab30159ac4f4af7eea55fa
|
refs/heads/master
| 2021-07-11T19:26:24.647433
| 2020-09-27T08:21:01
| 2020-09-27T08:21:01
| 203,901,667
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,614
|
py
|
import math
import copy
import operator
# 打表
# 执行用时 :16 ms, 在所有 python 提交中击败了100.00% 的用户
# 内存消耗 :11.7 MB, 在所有 python 提交中击败了18.75%的用户
class Solution(object):
def perm(self,data):
if len(data) == 1: # 和阶乘一样,需要有个结束条件
return [data]
r = []
for i in range(len(data)):
s = data[:i] + data[i + 1:] # 去掉第i个元素,进行下一次的递归
p = self.perm(s)
for x in p:
r.append(data[i:i + 1] + x) # 一直进行累加
return r
def c(self):
r = []
r.append(0)
r.append(1)
r.append(2)
for i in range(3,17):
sum = 0
o = []
for j in range(1,i+1):
o.append(j)
d = self.perm(o)
for k in range(len(d)):
n1 = 0
for j in range(len(d[i])):
if(d[k][j]%(j+1) == 0 or (j+1)%d[k][j] == 0):
n1+=1
if(n1 == len(d[i])):
sum += 1
r.append(sum)
print(r)
def countArrangement(self, N):
"""
:type N: int
:rtype: int
"""
b = [0,1,2,3,8,10,36,41,132,250,700,750,4010,4237,10680,24679]
return b[N]
if __name__ == '__main__':
#n2 = [177,112,74,197,90,16,4,61,103,133,198,4,121,143,55,138,47,167,165,159,93,85,53,118,127,171,137,65,135,45,151,64,109,25,61,152,194,65,165,97,199,163,53,72,58,108,10,105,27,127,64,120,164,70,190,91,41,127,109,176,172,12,193,34,38,54,138,184,120,103,33,71,66,86,143,125,146,105,182,173,184,199,46,148,69,36,192,110,116,53,38,40,65,31,74,103,86,12,39,158]
#n = [488,8584,8144,7414,6649,3463,3453,8665,8006,1313,3815,7404,6969,7759,3643,8530,9792,1815,2480,6996,1151,2849,3432,1198,6125,1666,978,930,1678,3348,5223,2167,1932,3367,5933,4933,6830,9386,9951,1188,7051,118,8593,373,7877,2236,5255,7669,4051,7735,1018,1554,584,4450,9105,3061,2468,83,3992,498,9784,5567,2665,8068,8935,4951,3002,5765,4337,9305,3306,7741,9423,1899,8114,7301,487,3369,4970,890,7456,2340,8797,4392,6790,7902,3805,5610,7985,4149,6109,4121,9717,5126,2190,8652,77,1544,769,767,849,4075,8508,272,2326,2974,7573,9165,2695,8896,56,6503,1236,8853,7247,4379,6755,1052,9989,1092,5202,6098,5214,4919,7577,3756,9923,7654,5300,7044,8421,6149,1120,3281,6421,9798,2607,347,8964,5302,9243,5372,1805,479,4225,9052,1210,7332,6457,1200,8424,8011,3650,9990,9282,1227,3746,9205,5234,9046,6249,7,1547,3721,3289,4321,9872,5896,4668,8836,1199,3911,560,9356,742,4785,4761,1953,8469,1218,9505,3245,5581,9507,3236,4863,7087,3334,4068,2321,8733,6669,2328,280,391,1969,4601,6615,7866,9269,1803,5417,9532,2363,1125,6627,7148,5886,4932,1969,3456,4437,5214,9037,296,4802,252,7383,8137,4320,9704,6870,7342,8385,3502,4085,354,8104,700,4572,3725,2503,9989,9610,4866,7467,6237,8366,9705,1169,335,9514,1958,1901,4903,2254,6704,5156,9638,1193,9476,5694,8063,3170,4079,7917,7255,4434,2373,7955,9006,6099,458,5348,2061,1676,2815,8298,42,2520,5819,6729,2034,7777,8631,6938,31,5335,8446,6021,6528,7922,1716,943,1093,5795,8860,4700,6581,7586,2656,1940,3685,3114,3640,5746,4791,2807,396,1185,1679,6215,7915,3714,3992,6546,7004,375,8233,5450,6397,1113,9724,8113,8408,7169,260,3620,1870,3194,1206,4526,5134,4891,3992,5126,6989,5135,7933,3737,2673,9612,9952,588,9678,296,3486,3034,672,8071,4836,3421,9184,4561,1534,7592,8082,8146,7564,9952,1340,8771,830,2826,14,1175,7952,3356,2662,2237,7093,5335,1850,3398,2275,7880,3694,2113,915,718,184,5751,491,5720,6664,2025,3312,4747,6524,877,1051,7864,6000,8234,7043,6014,5761,1347,9370,8423,3585,6464,111,1787,6214,8738,9667,6260,852,6934,6979,1036,2686,3822,3109,5702,5848,6421,449,8724,3650,7853,6588,6002,2439,9983,2017,8200,1331,7739,2975,4916,555,9438,3055,6769,8177,9074,3030,5381,6009,6361,6417,5047,183,5878,749,2383,2300,7551,1107,2302,5404,4048,4657,7843,4031,6674,2395,1714,4413,5370,6630,4969,4809,6037,1738,9338,5112,1120,4719,1121,7481,7488,6168,4017,3367,6917,6400,2019,4468,7508,673,6224,7908,5330,419,8291,2004,2814,6,2770,8185,2988,4091,9346,9026,2181,8684,4138,3302,3403,1611,7135,891,7779,1152,4258,1048,7553,6277,1869,1413,6951,4445,5673,2281,4865,3964,638,7679,3970,3408,5864,6959,3851,5210,5985,6032,3894,6475,5686,3649,8086,2822,4541,5865,326,8799,3265,4231,5077,5134,5644,8380,9580,7669,661,797,1634,7651,8476,5604,7411,693,8915,1262,5903,4900,3647,6150,7727,9333,9799,5813,2155,692,8030,8834,9492,1296,3065,921,2782,5062,5653,8714,2731,2666,9511,4365,318,4340,6322,7729,5033,5237,8992,7288,6490,8991,9790,4217,8324,9590,31,6832,6634,4413,5666,6126,5709,8731,3399,4844,3793,9052,9910,6525,1719,9422,7242,8389,114,3564,6118,5147,8802,1462,2435,1644,453,2226,5861,8778,8168,2244,1962,4802,6658,7628,7281,8719,6359,7032,9915,153,6085,9826,3030,4156,5600,272,2545,5714,3837,5015,861,8991,6478,9648,6987,3283,8226,2848,8413,6394,1445,375,7549,4455,8003,1182,3174,715,8214,3090,7220,651,9268,250,1159,4868,6874,3704,582,7063,5072,7795,6054,1550,7443,3041,1185,5670,2242,9599,8416,39,6326,2317,4494,4330,3499,4020,1397,8066,3462,8617,5069,2730,5219,6229,7598,2093,6285,8180,9157,1357,5975,1563,9259,9771,957,445,5441,3199,6396,209,3238,2722,2527,4084,3404,2378,8104,4801,6796,1567,3418,1866,4297,4989,8095,8248,7083,732,6428,2592,2090,8756,4155,1349,8527,1464,1794,3968,4663,8190,529,4253,7265,9408,4689,669,8139,2794,5471,4935,713,5241,3153,1362,6583,7600,9610,3666,8333,6039,2610,423,1147,3117,1772,9674,4582,9919,9994,5597,4461,523,6203,1726,9932,892,8748,4423,38,571,9358,7103,2164,8864,8466,8747,6464,8076,8765,1149,467,1375,1572,1614,4493,9697,7640,5427,9616,7634,1024,429,4510,7227,8508,794,4472,7256,5217,4510,4179,927,1614,6343,9791,80,1443,2608,4508,208,3757,1328,1584,5330,9294,2429,1379,6935,7856,7347,921,8880,7776,5431,2460,2636,6225,6932,6244,7794,7794,423,8722,9408,3119,4865,5840,4562,7473,6701,4770,1231,4381,2706,2913,3675,5135,4292,6962,9343,1639,7884,8224,5767,9667,7036,8404,2245,3968,4648,39,1762,1424,5113,7523,4543,9979,9715,9105,7452,6416,227,8683,797,2934,1596,825,8069,2240,7787,3765,3879,2023,1989,9647,8043,5377,4403,288,5697,9051,327,3811,475,1793,1334,1370,1772,1050,475,9224,3818,703,4260,4616,9989,2208,5441,8058,4449,9580,8175,4680,7956,164,679,5999,1893,5082,6287,7590,486,2966,1402,7313,4759,2736,8684,6531,138,9159,2108,3957,6214,2720,4925,6203,4928,6718,614,5729,6298,8789,6762,4254,5306,7441,6605,3551,8876,2892,1142,9362,2211,2544,6675,6970,1632,5359,9854,8123,871,8314,2080,3437,1034,3357,5993,2314,75,2959,4396,2725,1748,1158,3332,3406,4951,9937,6958,3827,2830,4452,3189,5041,6996,6217,8363,4980,7928,4569,3103,8799,2883,1535,8589,269,4892,4582,8936,4967,7541,3332,7693,5641,842,1025,5400,5793,962,2358,9621,144,6810,9162,1537,158,5379,6253,1490,9660,822,4594,8459,58,6129,7048,327,7374,7982,5615,2341,5523,5299,6386,7517,6141,3763,2917,8287,1078,1627,4260,7574,4789,3422,9112,4947,5154,5365,2789,4814,2539,7383,9625,2597,9865,3026,9277,7239,1008,4892,5932,2884,192,8671,6753,2685,2434,9670,972,3512,7649,5232,1087,2438,5007,6551,3737,6513,8268,6526,1327,807,3910,7304,9757,127,330,9034,3718,7691,278,9650,6927,6822,8321,32,5860,7108,9702,6832,6972,7351,8417,8059,6141,3424,962,9878,9937,9230,6404,7616,6390,6666,1272,6147,3145,7955,1533,6863,1998,8163,2866,5277,4986,1187,5309,846,4647,1363,4030,1620,5066,2447,6031,1207,2223,6994,1085,8512,2576,3841,2480,8966,6860,3753,1465,5,1708,2998,6869,3706,7514,9735,8983,2500,7274,4292,9698,1922,2007,80,3542,7073,2528,9573,8280,1103,2919,5717,9616,5496,9558,2096,814,6418,2201,2280,6424,3909,1630,9645,3967,9144,9380,9302,7996,6654,9946,4046,4928,1953,4127,8470,9026,3007,4396,7306,462,7315,9375,6430,9163,8934,8527,9978,1704,7080,8610,4480,7342,240,4125,1309,5737,3505,612,85,6512,6910,4132,1440,8864,4611,6263,7890,3970,659,1549,4432,4326,7276,863,3490,6210,5742,9820,4267,2822,8430,5099,164,5022,9225,7826,759,9082,4790,7197,1946,1700,7681,3387,6916,2292,6002,1159,2614,6661,9060,7046,7339,6336,4261,829,8899,6355,7001,3166,5530,5431,4617,2046,454,3842,9872,7565,9277,1014,4762,7575,2715,2443,7314,5983,1087,3316,7142,3701,9977,6202,7100,3669,2539,1361,850,1438,4069,7852,956,9599,3283,5573,1645,3737,5768,1518,1303,1397,2532,2417,8972,1599,4861,6287,3935,5948,9603,1077,9650,5933,7280,6750,9602,6171,4463,452,3961,8532,8304,4917,4483,7940,6842,6129,8029,2610,3999,5684,4007,2883,8102,9332,4483,2963,5619,4770,5263,1574,5847,4913,7507,9479,8015,3461,5650,8831,266,9611,7363,4922,880,1847,2862,7723,4328,7244,6685,8327,2928,7045,1210,1030,6377,2045,345,8348,6815,5609,9922,2663,6874,3782,8494,4890,3595,4145,3721,3861,108,7436,8784,7341,5635,7998,1416,9963,5242,8101,4642,4523,5146,5853,1905,7875,4250,2251,2575,1066,4212,8850,81,1086,2632,8575,2328,2579,9072,6049,6441,5533,9838,1577,2874,1825,5927,4290,8141,1170,8743,2783,2045,242,4988,3950,4469,9239,6201,7045,305,6765,5895,6738,7852,4879,5313,180,7458,738,2582,251,6271,8772,8180,5497,597,4108,6139,5090,1630,4882,4226,3675,1476,9214,7625,5946,8453,179,2991,5110,6944,5238,1848,4796,6469,3514,1329,279,4252,263,6883,6875,9035,5063,2372,5984,9171,4863,1075,801,9745,5301,828,1222,867,8454,3520,5673,8633,2863,783,1929,8101,8984,6726,922,8850,4407,1201,9454,4670,8084,6329,3705,3148,5053,6041,8671,9916,7116,5825,6013,8769,6653,7235,9637,5107,7107,1662,92,9970,8797,2022,4423,7781,5100,5345,2983,9507,2899,2437,529,7335,8766,4234,483,171,275,5507,87,3744,1332,6101,8865,4337,9688,4854,9445,6796,6516,5889,6766,5314,4263,1190,9447,9363,2887,2431,5222,5786,4868,5751,3122,9987,6337,9957,158,2965,1816,6598,6709,3148,2699,1926,7486,8739,6781,3283,5535,9649,5524,8654,4963,9788,6196,4411,5503,9083,3194,726,1222,4414,2829,4344,753,9167,653,7264,2132,2470,3862,5193,1970,2913,7119,5808,1652,252,9091,3540,9902,968,2194,1217,7108,4742,1980,2611,3825,5174,9689,5047,5941,8871,5743,6694,8038,2749,3958,6522,5219,7820,8067,7189,7085,1538,9350,5090,1791,8441,8630,8045,5761,7176,9262,2869,1918,1243,5481,2095,2769,1522,3495,8710,393,9238,5405,4783,8339,9363,7657,3558,3536,5724,7100,6973,7263,6450,2063,5406,1243,7045,3451,7005,4221,9065,6226,2491,308,8059,4587,3078,9582,8082,8140,6327,3672,3545,1111,2012,9261,5120,1922,9149,845,9022,6122,4460,1824,4538,9866,3068,1583,9669,6425,5805,8734,9003,4648,5395,7063,9235,8473,2997,3669,2965,9324,3694,6511,6787,5706,2124,1908,3980,1273,9105,3003,3747,3565,1179,8285,9783,599,9869,9452,3376,2026,4538,2380,6674,9933,9443,2262,4758,8792,5931,7724,8116,9625,587,1256,1683,2711,9516,5664,3984,8621,5019,4083,2186,6198,2369,8321,3150,8590,4125,6526,616,8663,5258,3642,4949,4701,5904,6059,9845,8188,3783,7962,4165,722,5570,2201,3433,5086,7865,3769,3707,9236,7853,2245,1786,222,566,4936,8812,4691,7815,5780,9706,3073,5774,4655,4127,1679,7067,3972,6219,7202,8286,6736,7925,3856,8937,1358,8942,3154,1480,9001,2390,9333,1246,4177,5907,8164,5465,1071,2855,3280,6851,8914,2706,2625,9921,6833,656,6988,805,3227,4191,9092,9964,2116,9300,5253,9826,8243,8408,1306,3596,798,6991,4843,1327,9250,3007,3145,321,2215,2777,3524,7481,5483,2502,7402,2316,9510,4391,9474,2738,4934,4918,9054,7050,4218,4307,3228,8813,9067,887,2410,6218,7878,3605,7545,7129,2964,7042,3802,1531,9820,7327,9012,1655,9829,6415,324,9339,7158,9798,8429,2092,1068,3835,5494,5286,8143,5074,452,7210,5961,9214,3428,192,9171,7326,3673,2135,720,7475,19,540,1154,9031,2196,7335,1798,2520,6675,5308,8670,1456,7400,9738,5292,9246,1376,9787,4321,8180,3349,6634,7394,3130,6826,2917,456,499,1405,1176,4327,1424,8069,5481,6807,6617,2817,4958,9137,5844,266,4159,7300,4019,249,8944,3265,1625,8731,3938,6158,2081,573,9904,5211,7399,2822,2019,4251,4227,9547,8578,2003,7616,4059,8810,4233,3228,3768,9722,9072,387,233,2725,4406,482,1669,4023,8460,6753,4314,4618,8834,4887,4522,397,8638,3696,2416,2889,4275,8315,7819,6278,2284,1879,5089,2869,1459,5209,2592,532,1948,9177,3257,6354,9660,4926,6730,4472,1679,1044,9090,6865,5931,9964,3614,921,3661,2382,163,7936,698,7982,567,2982,6213,2008,5851,7673,3569,4795,8205,5518,3973,7814,8224,9985,9092,4954,4457,7124,5998,9899,3989,8281,6215,7604,5555,6228,6338,5718,517,7036,3700,1084,18,6266,3092,2222,3939,6661,7017,8496,2179,7342,6310,404,3679,5402,1710,4488,2526,4061,4387,2868,2342,6955,6824,4249,3183,3162,9967,3700,199,20,4784,6569,6286,4228,5143,225,890,8513,8721,9421,5855,5031,6177,5887,6785,4240,375,5664,8301,1115,8532,6995,8070,1708,1245,1253,4870,1212,1306,1421,1232,6090,4343,7518,6671,9486,4095,3913,7999,2816,9686,207,4199,5864,6094,7337,104,2821,3001,4757,3936,7885,1752,8358,9593,2997,5964,815,562,7270,2237,1794,9712,6580,5665,6383,2418,6112,296,418,5281,9983,625,5832,2199,3071,3169,8655,2244,2522,3412,2533,407,5164,891,0,4514,6855,7168,5076,477,9405,3222,190,2337,5239,2925,1107,1352,3222,1525,6633,9557,8502,2465,1756,7925,1987,6763,170,4509,175,2703,1269,1691,3594,7621,2557,6802,4789,7633,3631,546,7208,173,2883,8799,3099,343,151,2673,1868,3136,2230,6723,1954,338,4648,3941,7101,1170,4802,3628,3873,6071,1671,3820,3693,4229,6974,8482,8214,605,5381,5422,7131,4616,4222,230,4959,725,2903,3180,214,5133,9903,2168,1823,903,2461,8924,2074,7263,8904,2299,9687,575,2471,9732,4804,9445,4566,9371,6403,9947,1145,3534,4564,1719,116,9523,2445,3019,2703,2659,4504,8958,1179,2679,6214,3640,1603,4640,7255,507,6939,3294,7434,9411,3026,8591,5208,7593,7962,7963,7540,9107,1497,8456,827,7965,7980,9624,984,7035,2283,1840,5994,9814,4519,2208,3454,2474,6848,7061,9333,139,356,6768,5902,3382,1711,1111,7327,9673,9074,1220,8780,6924,9676,9607,4889,4008,9231,2226,1044,7866,418,3390,7680,1290,1950,7486,116,5150,4548,9450,1641,1256,2570,7544,990,4281,8655,8318,306,4081,9538,9086,7357,5566,5046,8599,9575,629,825,6971,4848,7595,361,2528,8885,8663,6367,9002,3813,7267,4804,5454,8523,3726,2998,9513,4359,8005,4183,4665,8439,3721,103,5796,5640,5149,4395,5215,5779,1572,2186,627,9168,8899,9507,4405,7562,5874,9759,1375,9493,915,3181,8016,993,2532,3882,5352,537,8065,6369,5328,8139,6473,1125,3779,1622,1872,5346,3753,3445,7532,4380,8965,2783,240,3370,345,2466,9482,8072,1960,397,1253,6328,1391,137,6562,3095,675,4628,9465,2355,9119,5938,9832,9250,3912,1705,4596,7666,1502,8480,8398,467,1263,8638,189,7960,7457,9671,6032,9417,6421,3637,2097,4164,3775,8660,7259,802,9640,3076,3157,8759,9014,2990,8009,9279,1047,8957,6945,2549,7437,5343,9368,5052,334,9557,3012,7791,5581,5396,3560,8354,9033,5657,2518,9160,669,6129,9962,309,9206,9472,9068,4572,8814,3429,3851,9861,8738,7148,8762,6175,2492,8130,7579,9178,7687,6943,3321,9620,2339,6881,7974,7725,8890,492,3237,9560,2974,9552,9869,8532,9024,5290,3104,4190,5071,3308,4051,3810,456,9165,6337,9300,7295,3917,4830,4982,860,8151,955,9552,5032,8929,3629,275,5774,6866,9835,8748,6418,9704,7280,1794,1346,6736,5984,6418,44,6387,228,6853,5552,6565,2505,9199,6834,7336,534,7695,5487,1489,3599,6872,418,3580,7147,6192,446,6982,4940,3217,3038,8572,1363,737,5309,3700,7155,1705,87,3735,4910,1992,300,7416,1191,7135,4752,1725,1182,6591,9566,1133,9815,9985,4713,6962,2529,1511,296,7470,1080,9687,2394,2444,424,4055,6144,3931,5761,2583,7666,671,927,4318,4439,8471,7805,5543,6548,5339,2135,6115,6472,8302,6100,7537,1617,8629,5401,1913,2451,6481,7952,1198,5277,4728,5253,7773,8659,1014,357,2677,8038,1284,6996,2477,6107,4801,8021,2656,141,6508,8771,2965,4810,1223,6855,6427,9852,2256,4693,8656,8737,8997,9854,367,3726,5107,8140,8737,2474,8497,1415,6864,6134,8411,5693,2241,9564,3714,1249,6057,6574,20,5375,7737,1243,2230,516,7448,4486,1561,2456,9575,559,2310,9942,4285,3769,4435,3022,2595,9284,789,9459,5418,9200,5153,4012,5117,5219,5261,1174,8146,1634,6549,5883,2877,5131,2751,6677,9617,4313,9133,5545,1224,7795,5487,5509,7917,9922,4883,512,9207,5673,6324,977,1225,7829,1341,6342,9400,2955,3869,7546,4589,6770,9781,3818,1902,8885,496,1519,3198,9629,7064,4422,7425,8904,6283,5342,5178,7518,2206,737,9543,4882,1715,769,2711,9408,3463,2112,2363,7332,6010,3304,455,2144,7123,2357,1029,7619,228,579,3600,3645,1353,7377,8901,3988,2719,4079,1506,1278,4817,1050,6160,2884,8171,8872,8644,1634,7336,7360,5319,9698,664,2126,8194,7787,4483,9223,1758,1063,6154,1711,1060,7507,9088,9961,7847,8160,393,5706,9438,1562,6756,5598,798,1279,822,9442,9265,4510,6802,936,4209,7467,3062,2403,1606,3897,7979,9717,1313,4133,1428,2373,7993,516,2335,2192,8676,9080,7898,4466,642,1006,65,1440,2285,7239,882,7903,1750,7685,8839,2311,1504,8254,1066,9462,2151,9045,9179,9816,9531,607,2190,3876,7476,877,6068,2504,9957,3967,6971,6951,4973,3388,8391,3611,627,9273,1514,8729,3310,353,1040,1166,4959,2107,629,3463,7504,6160,3279,7035,6768,1821,7263,596,2698,3332,3100,9007,3651,6423,5958,4976,9811,701,8587,439,6327,101,9168,5989,6807,6561,7156,1766,8668,4137,5229,2524,297,4861,5912,3417,6682,3175,365,5733,2859,3466,1092,2862,9889,3403,7839,6053,4104,6426,6492,431,2880,2012,6421,9687,4925,9929,7805,9945,418,3035,2470,7067,7896,8382,485,930,7909,7202,6663,7121,668,7756,9983,6910,1159,4174,2963,5263,601,5807,2047,9833,4171,4820,9520,9097,1101,7325,9042,1519,6712,7864,4938,960,2598,1775,1891,508,8978,4906,3981,9646,2662,3964,2908,173,4491,5871,1789,5092,8030,3836,4925,2202,5008,797,7651,6109,4474,3045,3980,7539,910,8918,8499,3508,7046,6742,368,6024,1649,701,5670,4311,1018,4931,837,5509,802,2626,601,5185,2814,1878,7387,7822,9027,1390,283,9853,4435,615,7392,5345,5885,5892,5206,2931,8986,1926,8955,635,2628,978,1299,3646,5909,2136,9155,3063,4762,6108,8248,3928,4338,1987,1750,9717,3377,8385,9570,7813,5352,6963,9510,1237,9207,1068,4169,8193,2995,9476,5181,1975,454,6480,1973,2715,8616,1128,5779,9730,3588,379,10,4278,2367,1760,3995,2096,6497,9917,6261,1849,6880,5772,3086,2439,3192,3607,6985,2539,9436,2166,4514,9890,8646,6487,8958,3614,3967,4737,9696,3907,1468,9706,8185,187,7818,8532,2284,667,8450,8545,2516,1682,669,1954,4122,3862,1914,7459,2753,1350,9625,7268,7592,4623,107,6550,4589,427,7639,4285,4334,5460,343,8872,5647,8161,3756,4283,5180,2206,9181,7696,241,9850,6002,715,64,7916,8174,2818,5618,4151,6438,3211,8774,2897,6113,3363,3324,3753,4000,4011,9213,4343,9235,1212,8856,2991,5496,4036,1550,4677,8084,1791,879,4086,2506,944,8355,7032,114,3973,1183,2904,7184,9957,5801,9650,9672,5478,3403,3672,9489,8968,4367,5076,6532,3223,8067,2028,3611,5969,6705,1695,7760,3937,2133,6618,1233,488,3650,1347,4462,1185,603,7998,7494,6404,7648,7166,1882,7403,838,7723,6371,1557,2799,9256,4780,867,1284,4743,3188,4342,6438,949,8279,8572,3919,9512,9060,3922,7211,9874,5107,4166,7873,2602,570,5521,6120,8805,2925,3311,6528,5648,4868,9328,4904,9649,6547,2541,4392,9735,3235,7183,7036,1514,2107,7308,7378,7519,1230,941,7394,2689,5107,1619,1643,2029,7140,7764,834,6417,1075,3715,8418,5943,9395,9674,1944,2294,2215,2689,8381,5450,9872,5418,3316,8331,2726,7046,5850,308,7987,9596,2997,9446,1215,4641,7828,4708,8757,5014,7477,9832,8729,2247,5775,4476,1922,4072,6770,489,6761,5152,5940,2985,6922,5608,1316,9648,2655,3518,6308,6994,9467,5657,2793,7034,6650,621,1742,5407,5635,5572,5239,4365,7819,7367,8841,9741,1439,1964,231,8200,7116,2523,7537,4038,8131,5205,38,7138,8723,2698,4133,4542,8355,6926,1577,5006,7547,9671,413,9534,5243,2005,251,9415,9372,5445,9156,7163,7409,5739,1715,4525,4614,9252,4915,9098,4457,1305,6236,9532,4003,369,4075,2358,3647,5652,3716,7546,5323,482,7081,6919,2487,7332,6334,1859,2777,1842,5374,6538,7582,7089,7415,8548,6341,2330,7646,7150,9987,3883,3034,3990,604,7109,2701,604,9113,6417,8150,789,6899,1583,7708,5738,5268,4042,3949,4397,5884,9323,936,9818,6412,8351,8367,9105,7034,2365,6255,7021,2600,5642,7364,9557,2751,6417,161,8217,2834,4663,9006,6086,6247,6714,1824,7867,7108,2126,2264,9344,1449,3200,9163,7862,7904,3882,3319,1290,2599,5927,4663,5200,1569,8379,4757,672,4796,1270,8889,3983,5933,7895,69,8532,961,8245,6399,4421,371,5016,3766,8173,4568,9281,6035,8824,9515,5706,114,2114,1633,4778,3666,3202,9509,8423,3875,4306,9693,9116,8289,1979,3364,4710,511,4325,9307,3263,5099,6031,8279,8865,4204,2847,4498,6591,1672,4013,2297,8138,2479,3931,9268,6146,3485,8778,4569,3712,3084,615,2829,7725,2594,6193,2435,9457,6870,1742,2720,1969,4125,7351,7186,8329,6551,8036,4920,4575,2049,3570,2713,881,3853,8334,7027,3690,7112,7948,7403,6548,4915,232,4273,3861,2777,3060,3319,5999,4802,2391,7969,8928,9743,1507,3609,2646,9544,4882,7221,7945,8452,6286,8826,8657,4620,2205,2347,1732,6506,9750,4632,1421,6334,8905,5283,9111,1965,4954,5111,3120,7345,9432,8400,3440,7291,8361,6086,6835,3243,9659,4781,8047,5946,9959,6704,566,8517,9052,8651,5023,8802,3283,2796,5137,8541,4431,600,6858,9385,2063,6330,3083,7847,1082,6523,5139,9444,8962,8326,2687,8621,9459,7087,4567,9419,3791,1486,4288,2843,137,9311,7998,9772,2107,3135,8313,6539,87,5172,2276,8503,7854,5359,6350,8937,8235,7841,4733,7197,6168,3772,2170,5627,859,3090,1398,4651,4576,5686,7494,4713,1349,1844,4485,3457,1331,9151,6348,1419,675,4976,6274,8529,6688,8976,3818,4923,6818,8551,8472,2986,2324,642,4965,3183,3732,6364,7834,8308,8402,1681,9373,9752,3525,211,9561,1209,9362,2261,2628,37,7237,5254,8566,3925,4230,2385,5200,1048,936,3672,386,3260,667,1704,2796,751,8068,6982,5412,2822,5015,4785,2574,4893,4996,2135,6102,4358,4396,5082,747,7986,336,9314,1911,4566,8051,7112,1967,5339,7136,2353,4952,7803,4057,7748,8555,8477,4730,3967,1300,6098,5104,3874,991,6453,2362,7093,7163,6758,2175,7911,4744,2511,3577,3008,3429,1628,6472,5396,3319,3608,7750,8271,7764,8159,2371,6319,2989,3454,6638,4289,9552,8094,4515,543,4547,6877,7636,8063,9988,6163,5974,1084,8674,5903,4092,2103,3883,6916,3852,7202,525,1602,1826,8289,6113,4197,960,9102,7651,3950,9743,7203,8396,611,4098,9296,7488,1734,7359,3828,4249,9685,4913,9275,5588,5357,7731,9471,2274,1583,3025,9151,9537,4851,3792,5650,9049,1104,1105,6700,1406,848,256,9802,1459,4354,9098,5300,2441,6457,5480,6690,6142,6745,5966,1730,2103,3697,7553,729,5280,579,6232,4817,5430,6376,6819,4479,7480,7924,7532,8886,5125,7788,8688,2936,8494,4139,4588,935,596,69,7626,3091,6814,9944,1173,5269,9993,8727,2350,1625,5658,4934,6442,1088,1310,9613,1920,5142,3890,5804,4028,9015,9944,9069,8303,8438,3208,2892,5726,156,9313,3352,3247,2479,9648,4421,7749,9641,9500,6451,1266,5158,1386,4060,2598,9048,3673,4518,4191,3915,6674,4571,2930,6618,3640,7586,1409,3200,6830,7135,3357,6143,6839,6604,8622,6487,7377,2723,2480,6877,9175,98,8387,6913,4158,986,2313,4183,1856,6504,8099,8531,1076,7381,1501,1068,4967,2910,4269,1797,45,7626,4292,3236,582,2915,9723,4312,1990,8555,7541,7517,8653,5929,782,9163,6915,3096,3347,5123,9600,7798,3654,7028,1531,1508,8097,6499,4418,8718,4648,816,2696,5293,4052,3278,4560,128,3942,6550,8683,1484,4068,3689,7413,4850,2852,680,4298,2551,5803,3899,349,5810,7279,1881,7318,5376,4732,8088,446,5732,5256,3142,1025,9309,2773,5585,5789,6715,8488,824,8199,8908,4513,5612,110,7366,2644,4409,9917,8448,4660,6619,610,1939,4852,7928,3668,5936,2368,4114,8020,7625,7257,9046,3286,30,983,9075,6745,5823,6251,1297,4731,765,6909,4842,4483,5906,9251,752,4354,3911,7371,4964,2202,8575,9244,5870,863,1612,9985,8884,5589,7242,4282,8875,3624,1617,4302,369,7441,554,8018,2172,7671,1280,3366,2154,7186,8969,2906,7892,9232,278,2856,1435,5205,8452,7305,6069,6416,7290,4953,2006,884,5587,7233,4508,7204,1536,1230,4645,8442,9248,3170,6113,528,6536,8267,7714,1858,1173,1958,1090,7803,4814,2525,9361,9618,9831,5430,6035,3473,6735,4393,4358,2322,1626,5218,9526,3162,2800,524,7956,8401,3694,4069,5281,6582,8688,2996,4792,6214,1306,5883,369,6121,4760,9730,2091,4591,1512,8126,4417,8247,8871,5127,6921,498,345,2800,3660,9498,3324,7969,7899,3370,2038,3180,6304,727,2528,1097,3293,3835,3332,3662,6308,8092,3393,8399,9036,4905,2878,3453,9505,1749,8580,2778,8599,5277,5578,2260,4775,8902,229,9026,8624,8619,8559,4929,5698,1087,2378,8991,1274,5710,2654,7582,3802,2399,2334,2838,3656,1564,6291,3161,9665,1223,5940,8265,6501,7870,6877,7628,3125,3458,3007,1749,8429,1566,3030,4128,9005,5408,9471,280,1118,8477,4214,1273,876,2900,4111,4533,816,6755,4046,482,7978,6338,5099,831,4209,8328,4812,7334,1786,7819,5435,215,5737,8466,695,4742,226,6519,5022,1345,4996,5589,8970,2225,8489,3081,6758,5658,6188,7156,6140,4167,3495,7591,1350,4056,5919,6162,1390,4057,333,6825,624,6070,1643,7672,813,1870,4191,2187,9567,9187,7776,8537,7764,2618,7970,874,8276,4159,8031,768,4678,7878,4711,6028,1934,630,8543,9676,4687,5228,2853,5311,1299,4497,2983,8464,6367,3526,7003,5934,9066,1132,823,6830,3750,8793,7705,8378,2952,2088,5498,3982,9966,209,6363,8252,839,4906,7928,1878,6486,781,3541,7785,5278,2877,2601,7997,6403,9605,283,5469,737,1106,8652,839,9900,6357,5569,9204,8445,1067,9539,4763,7628,5902,3015,4819,7160,943,6697,3646,8076,6590,7784,9707,9467,385,7704,2223,6342,7988,4044,7079,5446,9048,4270,1698,5405,9839,7255,202,7258,6794,1317,4886,2696,4332,9705,9856,1627,2754,3502,6056,9345,7638,5763,5164,8024,3467,3739,4366,7807,4136,7798,9606,3184,2068,1304,8590,8260,4911,5144,5518,1705,2814,405,753,7146,110,609,5126,2865,464,7534,8562,8102,3297,3726,2478,3116,3818,3197,7276,7954,995,6882,1138,9415,4538,6080,7675,9450,1225,3194,7507,391,3599,8261,7537,61,5222,9015,9278,5686,6549,7840,141,6198,1567,8971,9315,5385,2168,2943,9691,9515,9825,829,8931,715,6910,6606,6517,4487,6152,4025,4878,6103,2286,8767,6165,7508,4135,5443,9547,7036,3284,6040,3235,1203,5011,2550,2940,7180,5493,2631,6695,1670,9812,1978,8737,6722,8585,5255,1209,1089,9280,2439,7193,7918,1207,9710,1778,5342,1505,7677,2378,4789,3717,1965,2344,8729,867,1636,2261,2712,619,5308,4382,432,7287,9472,3506,2224,4727,1068,3313,359,3507,6858,4629,1066,6568,6407,6408,8074,437,5139,9215,4154,7104,7912,9235,4324,5900,7848,7036,6520,3157,7771,3304,444,7243,6810,2668,1970,7878,2333,8681,7738,9192,3310,8804,2112,6069,1565,6538,6506,6704,5754,7013,160,18,6248,836,5918,449,7873,2438,3606,5644,2094,402,2887,8905,9422,1209,3135,1755,9890,873,7299,3200,9678,9412,9269,1243,2302,2128,4299,8056,9141,811,4426,1741,1648,345,2190,9521,9135,2148,1517,1230,2550,756,6487,1972,1965,9622,80,8207,496,7379,7759,6526,3143,3380,4121,5446,5508,8420,9854,1001,5583,633,2743,7231,978,4933,3104,6465,3434,4621,4047,5984,5377,534,4309,3694,157,4389,8253,7005,8120,2364,9883,7616,5745,4004,9414,7605,2424,5620,8607,8007,6253,7702,1591,3583,8987,4695,6401,2421,5669,448,8406,7398,983,9067,7445,7492,9808,5698,849,7928,8063,732,1896,160,4736,7662,4117,3512,3283,2724,7871,5888,6778,9462,5824,5766,510,2225,8187,6179,2673,2945,9929,8,2012,7374,7500,1820,3073,4701,6101,7488,5433,4349,4000,169,2012,8117,33,1647,7194,7905,7535,3972,7367,9711,9738,4229,1936,4278,408,962,7223,338,970,9236,4064,4823,7408,7137,9524,9861,977,4958,4211,1329,1479,2575,5799,1513,4222,2993,5770,8109,3317,3137,7821,9408]
n = [1,2,1]
#print(len(n))
s = "anagram"
t = "nagaram"
#print(max(n[0:2]))
p = Solution()
a = p.c()
#a = p.charge(28)
print(a)
#p.compare(A,B)
# [73, 74, 75, 71, 69, 72, 76, 73]
#print(s)
#print(test.lastSubstring("abab"))
|
[
"jlbczhangchao0800@126.com"
] |
jlbczhangchao0800@126.com
|
723e222cf53b147363a4bf4988a03938b087787c
|
ee341e95e484a8594f9942c920bf739b11b70658
|
/Job1/MapReduce/mapper.py
|
8be43816fbde16505668fa1db7d9d4d617c8bf12
|
[] |
no_license
|
FedericoCialini/Progetto1BigDataRomaTre
|
acbd7ac78c10448197abb85a59ccc6c73fb03477
|
84313d1d4613763c1276f33186eadbad950ddb19
|
refs/heads/master
| 2022-09-09T13:10:04.974717
| 2020-05-18T20:57:47
| 2020-05-18T20:57:47
| 258,604,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
#!/home/federico/anaconda3/bin/python
import sys
def mapping():
lines = sys.stdin.readlines()
prices = lines[1:]
for line in prices:
Ticker, OpenValue, CloseValue, Adj_close, LowThe, HighThe, Volume, Date = line.strip().split(",")
year = Date.split("-")[0]
if year >= '2008':
print(Ticker, CloseValue, Volume, Date, sep='\t')
if __name__ == '__main__':
mapping()
|
[
"federicocialini@gmail.com"
] |
federicocialini@gmail.com
|
88267b9d5edb8a48d3ceb3ce7f9c307f1a46e175
|
55965f592cb7e915cd68bd371ee1a6ad2a6e0247
|
/libmngmtsys.py
|
79288d746d1e8cdb428259f150297c49244931cb
|
[] |
no_license
|
Upasna4/Training
|
2b5b57fc3e5229304860f153db93d912a44472bf
|
33c6eeb565c422e40ea88d50af787f58b9f0da6d
|
refs/heads/master
| 2020-08-05T03:50:36.280910
| 2019-10-02T16:36:09
| 2019-10-02T16:36:09
| 212,383,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,733
|
py
|
memberData = {}
bookData = {}
borrowData = {}
m_id = 101
b_id = 201
print("Library Management System\n"
"1.Add Member\n"
"2.Add Book\n"
"3.Book Borrowing\n"
"4.Book Returning\n"
"5.Member Status\n"
"6.Book Status\n"
"7.Exit")
while True:
choice = int(input("Enter Choice: "))
if choice == 1:
print("Add Member Program")
loop1=True
while(loop1):
name = input("Member Name: ")
memberData.update({m_id: name}) #updates value of key and val
print("Member Added. Member id is: ", m_id)
m_id += 1 #incrementing value of m_id
while (True):
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
break
elif choice == 'n':
loop1 = False
break
else:
print("invalid choice")
loop1=False
continue
elif choice == 2:
print("Add Book Program")
while True:
name = input("Book Name: ")
qty = int(input("enter quantity"))
bookData.update({b_id: [name, qty]}) #dict ko update krna
print("Book Added. Book id is: ", b_id)
b_id += 1
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
continue
elif choice == 'n':
break
elif choice == 3:
print("Book Borrowing Program")
while True:
m_id = int(input("Member id: "))
if m_id in memberData: #checks if member id in present in memberData dict
b_name = input("Book Name: ")
for b_id, b_name_qty in bookData.items(): #when we want both key and value
if b_name_qty[0] == b_name: #indexing is done coz we have a list here..at [0] we have name in list
if b_name_qty[1] > 0: #here we compare quantity as it is on 1st index..we see whether it is >0 or not
borrowData.update({m_id: b_id}) #update dict
bookData[b_id][1] -= 1 #decrement quantity of books
break
else:
print("Book out of stock")
else:
print("Book not present")
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
continue
elif choice == 'n':
break
elif choice == 4:
print("Book Returning Program")
m_id = int(input("Member Id: "))
name = input("Book Name: ")
for b_id, b_name in borrowData.items():
if b_name == name:
bookData[b_id][1] += 1
borrowData.pop(m_id) #person is returning book so book will pop from borrowData dict
borrowData.update({m_id: b_id}) #dict is updated
break
else:
print("Book not present")
choice = input("Add more member (Y/N): ").lower().strip()
if choice == 'y':
continue
elif choice == 'n':
break
elif choice == 5:
print("Member Status Program")
m_id = int(input("Member Id: "))
if m_id in memberData: #to check mem status we check m_id is in memberData and borrowData or not
if m_id in borrowData: #if b_id is in borrowData then borrowData m se b_id nikalo
b_id = borrowData[m_id] #bid nikal ra h dict m se
print("Member Name: ", memberData[m_id]) #the value of this key is name
print("Allow Book Name: ", bookData[b_id][0]) #the val of this is bookname
elif choice == 6:
print("Book Status Program")
b_id = int(input("Book Id: "))
for m_id, m_name in memberData.items(): #valuefetch
if b_id in borrowData:
b_id = borrowData[m_id]
print("Member name:",memberData[m_id])
print("Book name:",bookData[b_id][0])
print("Book issue to user:", memberData[m_id])
elif choice == 7:
break
else:
print("invalid choice")
|
[
"upasnabhat17@gmail.com"
] |
upasnabhat17@gmail.com
|
cd83a748401283dfbf2bddb5137bb34063e8eb43
|
1825283527f5a479204708feeaf55f4ab6d1290b
|
/leetcode/python/50/50.powx-n.py
|
c24eb3b7c7bcc033fb5286680caebed06bbe3c0f
|
[] |
no_license
|
frankieliu/problems
|
b82c61d3328ffcc1da2cbc95712563355f5d44b5
|
911c6622448a4be041834bcab25051dd0f9209b2
|
refs/heads/master
| 2023-01-06T14:41:58.044871
| 2019-11-24T03:47:22
| 2019-11-24T03:47:22
| 115,065,956
| 1
| 0
| null | 2023-01-04T07:25:52
| 2017-12-22T02:06:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 802
|
py
|
#
# @lc app=leetcode id=50 lang=python3
#
# [50] Pow(x, n)
#
# https://leetcode.com/problems/powx-n/description/
#
# algorithms
# Medium (27.38%)
# Total Accepted: 281K
# Total Submissions: 1M
# Testcase Example: '2.00000\n10'
#
# Implement pow(x, n), which calculates x raised to the power n (x^n).
#
# Example 1:
#
#
# Input: 2.00000, 10
# Output: 1024.00000
#
#
# Example 2:
#
#
# Input: 2.10000, 3
# Output: 9.26100
#
#
# Example 3:
#
#
# Input: 2.00000, -2
# Output: 0.25000
# Explanation: 2^-2 = 1/2^2 = 1/4 = 0.25
#
#
# Note:
#
#
# -100.0 < x < 100.0
# n is a 32-bit signed integer, within the range [−2^31, 2^31 − 1]
#
#
#
class Solution:
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
|
[
"frankie.y.liu@gmail.com"
] |
frankie.y.liu@gmail.com
|
7b5a81f5531be906c6c75c6ea6ee45ae41407e10
|
188950fb7b1fce4840b41e1e9454f0133a8d75ce
|
/src/Server/Controller/guess_controller.py
|
a2518f5c1fdefce113aeaec0371319b7b16a82fa
|
[] |
no_license
|
cloew/WordGuessAngular
|
3f5c6a1e0e14f6e905ec78a618b606ff3cb3e798
|
0d889cd3bb9cafe35a6e7e2ccba97914a26825b9
|
refs/heads/master
| 2021-01-01T05:53:26.776161
| 2014-09-01T14:55:39
| 2014-09-01T14:55:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
from Server.game_wrapper import GameWrapper
from kao_flask.controllers.json_controller import JSONController
class GuessController(JSONController):
""" Controller to allow a player to guess the word for the current Round """
def performWithJSON(self, gameId):
game = GameWrapper(id=gameId)
results = game.guess(self.json['guesses'])
return game.toJSON()
|
[
"cloew123@gmail.com"
] |
cloew123@gmail.com
|
b7c80a21298c1316985aac7e42d5886a612a3783
|
a95f9fb15eccaf4c8a25549aeb52fb1bb517f8ce
|
/label_extractor.py
|
98bffcf2ee33da1d7d0999b0f1516d714e3bf091
|
[] |
no_license
|
Kirich2323/ml
|
441144b26eac19f10c0b773e6c9aff82fa58246d
|
5ced175d0e5ffedeb56edb07c809decc77e1154f
|
refs/heads/master
| 2020-03-11T18:27:33.204495
| 2018-04-26T01:10:47
| 2018-04-26T01:10:47
| 130,177,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,625
|
py
|
import xml.etree.ElementTree as ET
import re
class BaseLabelExtractor:
def __init__(self, *args, **kwargs):
pass
def get_labels(self, data):
ans = []
for f in data:
ans.append(self.extract_label(f))
return ans
class ProblemExtractor(BaseLabelExtractor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def extract_label(self, item):
r = r'(.+)-(.+)-(\d+).*'
m = re.search(r, item)
return m.group(2)
class VerdictExtractor(BaseLabelExtractor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.xml = kwargs.get("xml", "")
self.root = ET.parse(self.xml).getroot()
self.teams = {}
for session in self.root[0][1:]:
#print(session.attrib['alias'])
tasks = []
for problem in session:
task = []
for solution in problem:
task.append(solution.attrib['accepted'])
tasks.append(task)
self.teams[session.attrib["alias"]] = tasks
def extract_label(self, item):
r = r'(.+)-(.+)-(\d+)\..*'
m = re.search(r, item)
print(item)
print(m.group(1))
print(m.group(2))
print(m.group(3))
print(self.teams[m.group(1)])
print(self.teams[m.group(1)][ord(m.group(2))-ord('a')])
print(self.teams[m.group(1)][ord(m.group(2))-ord('a')][int(m.group(3)) - 1])
print('-'*40)
return self.teams[m.group(1)][ord(m.group(2))-ord('a')][int(m.group(3)) - 1]
|
[
"ivadik2323@gmail.com"
] |
ivadik2323@gmail.com
|
b49df1cc1b4948c46ef6cec8398200ea89ae37fa
|
67048c855300ffc1fa192eee1da241d7f8e85682
|
/pizza.py
|
97cdb1c8caa56c039d6815b7062e06cb161ab1b7
|
[] |
no_license
|
JennymarBerroteran/Umbrella
|
07e4f286f46da749c04d59769bcef4cf763f9a95
|
c1271e156bf7657179e0f209353d77babe2a06ff
|
refs/heads/master
| 2020-09-08T08:25:09.823352
| 2019-11-24T22:27:57
| 2019-11-24T22:27:57
| 221,077,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
fav_pizza = ['pepperoni', 'cheeze', 'margarita']
for pizza in fav_pizza:
print(f'I like {pizza} pizza \n')
print('I really love pizza')
|
[
"noreply@github.com"
] |
JennymarBerroteran.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.