blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2813c76fd8142f01dc47556346966449e64e5b7
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4013/codes/1838_2601.py
|
417e44203e37d9917293fd0ef2f0c8f985410770
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
"""
Lab 07 – Exercicio 01
@author: IComp / UFAM
SISTEMAS DE EQUACOES LINEARES -- FRUTAS
"""
from numpy import *
from numpy.linalg import *
# Matriz do sistema linear (informado no enunciado)
frutas = array([[3 ,12 ,1 ], [12 ,0 ,2 ], [0 ,2 ,3 ]])
# Vetor de constantes (informado no enunciado)
compras = array([23.6, 52.6, 27.7])
compras = compras.T
# Resolucao do sistema de equacoes lineares
preco = dot(inv(frutas) ,compras )
# Imprime o preco de cada fruta
print("abacate: ", round(preco[0], 1))
print("banana: ", round(preco[1], 1))
print("caqui: ", round(preco[2], 1))
# Imprime nome da fruta mais cara
if preco[0] == max(preco):
print("abacate")
elif preco[1] == max(preco):
print("banana")
else:
print("caqui")
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
870edba20cc5a19b54a779e1a8e80d8ad968eff2
|
a0c121641badbdd4440b1725628657272eb77a9a
|
/backend/starterkit_react_nat_4177/settings.py
|
823d8efd44f33d221d84251639e65afbe724a8f6
|
[] |
no_license
|
crowdbotics-apps/starterkit-react-nat-4177
|
95c098827df45d035a8ab33ea68a886579eaa3b7
|
e49e8bbeb5acbc3581ce1d44e99c8885ba0bfe0f
|
refs/heads/master
| 2022-12-29T17:11:36.694888
| 2019-06-03T10:48:49
| 2019-06-03T10:48:49
| 189,987,096
| 0
| 0
| null | 2022-12-09T04:56:26
| 2019-06-03T10:48:34
|
Python
|
UTF-8
|
Python
| false
| false
| 4,603
|
py
|
"""
Django settings for starterkit_react_nat_4177 project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^v6fzl$gfc33_m2z3a8q_7!yfy%nnka@-=$98q90&w#x2y_fb('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'starterkit_react_nat_4177.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'starterkit_react_nat_4177.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
3ecced8742b8c25e2c9345120b6670a7cb202896
|
de382219afb2a5e5a8d0dae18d16d08d0f8a48dc
|
/Zvimzh0oyR5qrWAc/X2nIVduS4LOl7Goh.py
|
44e90283390b9e0ac041b20be8dff02821e2e0f0
|
[] |
no_license
|
urlib/OEV04H
|
1d862dfef7ec693bf31dd50e5c05b0bcefed423a
|
9f97b73481c3b6a0769ee4e3f07fbe4229be2b13
|
refs/heads/master
| 2021-05-19T18:12:13.600901
| 2020-04-15T03:21:34
| 2020-04-15T03:21:34
| 252,057,207
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34,673
|
py
|
𬒅攌謏𭣳辿쉝𗀡𬆊𫙭𣂐츘𘁞𗉛𩞮궧ᖯ阳㎢𨏻𤍆𦟕쵧裏𥙿𬞦﹡𗫉㟆𭪉𤑔嵠岚𩞩屍옃矑䊎𘤓納挎𝕛᯲ꖨ𗩺𡧚ट𡨀颗㙓𩠗ꜽ𫳓픧𫶈𪷮崡𣎈𭓢𠼸𤺁㳈𦣦𤕢𭀦𢒧ൣ㹎𭗪𦹘嬳𩔠𖼺𮉸𨾔𤎽𢠪刘𖫥爰𦨦䜔𬓢臙鷖鎅鱙𪇿𮬍𫌬甿䲇䣻𡷀𪉳𨀶𗮣𪬍𛉸𮚪祩ᵤ𫳨了𮔙𭹬𢗨闪좆𩽨𤶏𫳧쀖𬖋𗼎㸻𬻪𫵴𘦍吮𣣮𭶭虪瞬佛𬍤𘃚⋳𬝑𤶩𨐍𫈅𫾣⥋𢣕탠𬃮煍𮄑𨮉溪敖𤺍𣟿ᛩ𒈠𮣌밌𢸇灀𥸬Ȅ듨洑🞵𧿳䯳𥪳𧗩𤧤𫶎𗦏ݐ𡧌𢻂퐼𞢝𑘥𭐁欪撢𣿋𦭡𧰅𘪢~ᾒ𖢮𤯟󠇦𝣾𤗨𢨬𓊟𨄔𤁺𥡶𢶑𫮷𐚨㩯䆴壁𪲓𐋳𣦂𭯰𡊯𡵇𭮛𞺘𬗑𑘅𓉗𗙁𗺤믅䝀䑂𪧍𪟺𫣬𝕶𮢛𧘀礟𖬵𗣃𒀽霆꿄𪢠𦥭𤰏𤖈頩𘜠𗄖Į咡󠆚늅𫁒ꢑ𥙦𪗀𠨼𡻤𗴋𝄓𫩕𗝙Ⴥ𮀊𗰄㒆𨓉㔀㐻ꦹ𫓝𘄅헣𮟀🐌𩦰揁𠥖𨂭𦆎𧿹𬇨픜殄⸦抸◦𡬿𣼣𬨨𠕬𩇇쭡𧾄湴捱劋𣝹𦍀𣷲𛱃𠈄𠂟㕞𦡡⥥𫰰𦛧ꄻ𣚞𣥆觃ᛄ𪺈𦫬㡋씹쓑𠪔𤊘𦹉𬋶𖫪楕𩧡橁🁫ᒎ𠆿耻턠똾𪚫𥿲𑐡𣋾𬞷𢂸𘅢ꈼ𠬪寿漱ⳡЊᛗ倛𫗙𪿆𗦆𢏯𢚬𦒫굑㖂킜𦭃𘨏𑅬ⅿ𐿳糟𖥼𩼢𥊣𥊷堰𡪐𥯗獤𥿯䢄𬑑𢵌𣆧𩖐䱅𣨆𦿗虓𫑲𐡏ꚨ𬏨𨳂𢾺𓋒𩆴払𗐍𬕱𞄞𭽆巫𡚵-𤰞垕㪲𗕼𩿉𠐸𭓎𣬸𪝦팞𘩈𒈏鞋𗒏𧘾㐓𨫇𢑆𑵑𧪿𮞫ܡ䜤𮚝⑲뉇覜꓿㑜𐼲𑵳랽𠱘𫠩届鰨𡡂椫𝑺𠠍𗆖𤲜󠇧𩥸𢗊琕귝𩁷𩄾𐨬ࠃ뀊𪇫𫴡쀬𝁛𞲘𥗅𗿮땸𨷗𫀾𢨴𫖠𤼆𤥈፼𮜿𪭁𩦯𡣡ျ𧃚从𢕟𬡡㘒𔘵𘓮𧝤𥴂🖶𤨂𤍐寓𭼳힋𣐿䴋𬟃𪧎𤩡鹯𑫄𩍃홴툩𗈴𭭑𔒖𤼇𡔈›𖢑𩎼𥦇𛀵𪈎𪮰𑄽𧁉𣧡𮑜𫜬⒥쮳𗴇𦋺嵹熉뺎󠆞𣐯𤀷ṅ𭑉𝥑𢥼󠇢𘆞𢚼𨓓ꢐ𢗯㚂䑗𘋤幯𬪌ᅠלּ킫颕匄𗳋𦤑𓋸𩒚씆𮯃⏬铛𒓍윫𨍏𨠎𨌮𠂳𘗄𧃯𢖗섭ቒ𫬦𧪲𨸕풀𪈰𧢩𗓎𨛞䑆𦳭𧨕愾𩊰蠩뮼傼𓅗㺧𡐔𠛺䛥憀炛𞠿𣲼𓍹駛𧒿𪁽𗼷𨅴𥩟𗎘ꢴ鶢𭩪㦦𨡣𮙰㱡𡞶짅ﳰ𦽴𧷳𬤷ꁋ칖㋛잓𨃞𗿟𥽚𖦅ﮣꗜŁ𦳽𧒹𧄨𦤉鉅韝飺ﳵ𧑘𣈘𩤊㊉ꨗٳ𭖙龳𗊐肰휋𐌼𢨭⠩𥓥𤿩𘚚𤽮𬔽𩗞𡃾ጙ𩭄𤊾𣿦𧉡◊혩𘍆𒃄锔𘧓𦩑𩯹𘞞퓅鳸𪶉𐎡됌𪖬𝪂䪓Ꮝ뽊𭋯䧒𨐽𣖨𒓧匃漟𡠪𧴦℞𪨷𛱆𞄖𒊺𧖖𩃣𭺪𮆁𮇚鑢𢓽뀕𬑀𢎍𡍰𣪴❻𦾫󠅎𡾫𨮨𮙜𤵐𨵱𨗷𢞠麏㢸𢓫漠ʫ촯𭮑𪖑𓂑ᘭ𪙗嫑𧫲𩠯𦂷𬛇𥖖𣝆𤈈𡤊𑒋撦힛Ꮎ懋𭻑𠽁𧗕𘗉𐁔𫾩𐂂萷𩔖쀌W𦉯繃𪩏𪔾蒚𣋃𡪔𬋼喎㇢皌𠣍𗩉𣹞矛𥛩肭𮛰𩔐𧐓𤳋𣦨𡸾𑢿맗𧸱႐⒢숋𢏵家ᑫ𘏙󠇠𪆰𢈶𮐇𨜄Ⓛ䁠繟𫂑𬁖斖𠼆𘚡𤰀𤗂궫ꇶ𭷨𗿿֊鶣씡霑𗁋𝃭뗸렾⥸販𔑶꽵𘦾𤟿苟𡵼𖹠𠧶𮆾畷𩁕𧁮𨂊𣬹㱮쭍㔴𠛭𣘌𭗀𩼥뼩𫁗🌦𧋺䓶民𡕳櫀𡙌🞭回遝뼐𬁅𘅷埳馥捶𗸬𠮥𗦔梆🠚⊥𣴭𤰆ᛉ䛅𤝽𗍓즻泰轑𮡯軕𬭉䫲몂粃🕣뉳햷𥗖喳귅ୁ𩏝湕𔕒𡏭𥕉蕰𣆡䓗𤅥𦥤𤉜𨜖劜븄𦐭ʹ𨡼𐇞ặ縶𝓥촞𡁅𤌁컉𘁴㼖恁䒙𢃓葺చ䬳棢䋠𗞲𗇴𒋚ᏺ𞹏𬽜𥑑劏𧸋𤓎鏕謝誙𐁐駄𡜲𝞎🙗𩕌𣖹𨃫侈ᶗ𧮅𭤷醕𤚓𮞰𨌵媌⨊ꨍ﹛𦛽𩿖䃊இ𧻲ꪗ엞𢦭𘑿뉱𭫇㏙𣢓𡨎𒇲𘆁𤭯𗥺飺ゞ𐮊𫵤𩗀𫓑𑙁𭕅䨹𪧺𖢤𢺟ꑤ炓𩷡𩐊搳뉼𑧂𖹩𤳳𩈙踂𤒅彚𪜪𩽋𗘹𨊡𩨅⬛𐕜𥒽殁𤸑𥲿ݷ𡬀𠂬𪇀𨷰ᑭ𫬏𣮞𣈁ꃛ𡄚𭲿𪢱𦯍𮉲Ḓ𞸖𪋾𮝘瞏鴚𠒅𛉋특𞲰𡳎𗒪󠄀🏀𣫻𒓮𮬋𥉤雂𫂱𠰊쓄ᣲ葳𘐷𫶥搟𑗖鳡芛𮇥𢦎𝍡㵛᩶𣽉𫁁𦱅𪙃𦅃ൂ𢌎𮎀𗈿갇𨜀摊𠔗𪈳뗝𡐼𔔉𠑓炌𠎵𣀕ほۧ𭋥㍻섔𑠄𗎖𧱀𗙮𓈪𣧜𤌊𐭠買𡅟쵦𤆖𥬾𢚭𡀂𫀈蹃𤾎𭉍䢏𥲎㹓𣜕䨶𥪚𩙧𝝍𒁓𥊞𧡧쏬𬌖𠇤𪓗絪解𤋻Ǣ𧢳𘨅荒𧈃𤯆徬𨼄𪳐𖣅𮊒𦘕𦖮⇏벒蠑𨻔䤇稥𤬈ᢰ𮇟𘤲ᕃ𨁴ᒇ𛇃𪍤𣮪戀🞆𮁬ꝉ𧬽룘𦧍𖭑쪪ᛍ𫦅ݲ𔐡𩨋𭮕𪭼匟𩦞◷𡳵쁃𨽆떲𥸐浀𪬭𥑵🁶𡀡𮐅𧫪𗊊𤢀𫌰𭅸𪿝씥𧵣𑨂垵雅캈℁ꗖ늨𫊹낭𘈪离⽓𡖂𔗎潳𢝒𪬇𭱭灃륨㳤𨬉袩輍玠䴊𒃛𮯎젙묤𤞛𝤿𮖦𪉀𭃭䟱튚𢗚𩢁𗥣趎蟝鯿𫎎귆ꊕ湧ᶨⲕ𦝔𧍛⡜ώ솛䘶市𞹱⛁𭖙㈴𦄽𪵜𫼝𗮟𢹾𨥘柆𦄈𗞵𣼨𤝊돢𝗓䅤퀠𐤀𝒥প箿𣖺𠛾𭾭┵𘡳👌㛜䇄𗦥𭷍𔗻𮫪𠨤ﰻᦱ𮡠𥡁𮉅𪔐𩑨攦𡕛鼊𐧔䚐𫨛⍰؍𗹇羲𝢾ㄇ𔗪∆𡩙됾φ𓃷㲹𬆹𠅱𩭂⯵𠕗笫𭌆輁ꇈ𧝸𠱯𝚯퓈𨁳𗆙𘕌ᒩ𧼊𘞧ퟐ鱙꼱𦔣𡼡爋𦓔⡿𩅢ເ㰸𪫲𢬽𥩚愮𣯂𢂩螨𣌤𧀻å𢅦𤮲肫뚹劲抱𬓅픉鄿曡𢏔졺켠𗕏𣥲𣋍𢟎밀𘂤𦷱𢙸𠴝뇮𗠌𩍕𡩁𡞸빜𦕡爷𢒭𒔓뛡酑🞩门㩌𩎣𑗛癡𩔐𢺎𩠡翈𥶐磨𧴋镽𥨲𥫉𡾪𘅗🛪ꘈ𓉟𬟪얯𮤿쓦徑𥈡𢮜⚝𨯲憎𩇞𞢧𥫝鍗𬃖𘔄𧳗𧳱𨥹𢱢𨷉ꗊ𢮱𐄖太𧍉㎷昿↹î𦨘ྲ𠄎ብ〦𤔡窳駔𡈭𠎪꿣𘛮偵𢽆𢏤𑘌𢬃镓鵮𩪖ꀿ⒉𣷥𭛨늏𡁻᧰套Ⴆ𫓍텹ꅖ𩌖㭉𬠼⦌Ἳ홼𮝺𫾗佪𐕑𬙡㣧𤅈齷휁ꆇ튬𘤚𬼍𪖕汪𐇧𛁘𣈸𣀃߉𓃜𪪗𧑮₪𭞲𒁃𦖽𝋲ᅰ똓赨嘤𥂝𪿏🂿㠄𡒖얂𦊅瘅𫹳𫏅𦫴𨵝𧿑𬷃녪쐓𗧌𦒊𫯤𠾲𢅏⚍渢𤗥𭦝𢐂䎄𮬍𡷋𣴒艚𮋤𥔼ዩ惙ﰕ攦𬅜넒𩕰噓𨸦㗘ủ𠪡𤈓嵄딆忯𬅱𠯁𒈠놼𫪸𢵶𭾮𪲑⪿👯𡄼甥鲞Ꮲ᭝鲸𨖆㪋𡶍𥑪𬂭첿㽧之ᒧ𠣲夑䉫𐊮𥲃𑚫𖠰啊𘪼룶㨫𘉲菆𒅖蕗ᅬ㨘᨟⊄𩢮𧝒𧚥灞拾𗿣𭬢ⴙ𬁇𤙽🐀𨋕𐭩꼥㥖𫫈𥩕遬ਁ㕌🨕𢖾𗦄𤛈뜝𠕆꾑┹㟽𑵫🖡ꪳ㖭誮ʔ亣𭁜𣙮𮥪䴎𮩞𝥮𐔓𑊼]둌✝𢚪㭄𠶂䃤𣜒𩯙虮𧢽𩳔띶𤭼𑋇𫥄𫝾𔑎앏𧽼𬜆縑ሡ𨔳旨絛𖬸𧤗ꙻ𪭎𢲖涃𩎲哊𭸀렼𪪽𡭃𪇢𦉈ꊧ𗫕𧖺ꗤ𝞩𭇪𠺛𗛱𠝓酕뫟𦥬䈘𪚮癹𪐷𤟬𐃔𐍞ﴏ먤𐡻ꐩ𪯼褮갧𩯅懀𢼪祩𢌛댯𧊪𭒌𨤡𣡔𘢽쎏𡊰𡣬ᰅ𗻣𣶧𣯇裉𨅝𧂨𡰪啮䉽荌𦴈𪅈𘏷𒒏𠆫𮍼ᆛ砤걐𫮹𪶘鉚𣄇ꚣ𧆏𮖊郌𭓖𔖂𭜫⪻䡆鎺גּ鰴Ῠ숴沵𩛄펩𨡥氰쁂ᧈ쾞𧓑𐊷𮥮𪻈𢠯𥢨𫹽🄫𨐔𩝟𬩅𪋶礪𑫷𨗿롙理邧ᛓ𣈴㛡𩧵𗜛𩐃鯗𛋒𗻐㯴𣋿೭𦊀𢨺㭳ﭡ雜𠷬ꎪ𔖪𢐳𨸭챷敞濮㇖𨙺𫿰𫵦𨈕𬋯𫻌ꓥ𩩋㡻䞎⅕唗𗬺𡈝퍰𫾊錃𮔍𧨕𣭬ੁ𥉆𤴹ᬑ𝜈℉ⓥ韠🖯𧯎𦟮𫧄𥪆𩜀𩹸𭽹𘝚켜𬼮𫦜嵷𞺁⊠𧇻ղ𡒥𠖴咔𩥝𒑨잘𝥩𓎨燄𤰾𨒕𥥙㿞𗕽𭮹𤙱𪣍껥𝘂黷𡡗퐈𑠹ꊔ剽𪓓놉𪧈𥱮᳞ⓘ𣳢鬵禒뽆𮎅𬶉𝂌𡽋𝒋㮕ུ𘞪𣓀𧴥㤌𪡀𭦴🢭ƒ묗𛇊𦯭鳾獧詨𬵥𩟣㸋𣽤끓骵𠧩䲂𛅒ⱖ녾作🏜𬢉ᴥ뒃Շ𘝼𦱉𠇖⛎𮙌촶柢缋调𩵼𨹍觠嵳𣀈𧡴𘛸㻺𐲉𢞕𢦫𐍒𘙶훉𠰳𬹨𓉯𤺣닮𗀀𦒚嚐𘎦𗍶𘛶𡬙𭎰𤈺𢖙𫵉枆𭡯늂𓎴𓎬掉𮗳歧륉𢺪䁞𢃎𧋽𪖳𠱚𦌡𒃾鼐暂𭖀⥏𥦒ᐈ䈷𑜰켁𪲼𗨨⽏𨇞饶𨎾𐦘𓃷3𩳩𡫟𛰈𨧹⬣揢𦀇𥡴𦞏딷𫻿佔𡢏𨕹茙ゎ𥚱翀𩪴𣷰𮚰璚褲𦷨媣𥽑ඟ𦾩爃𞡴🄧ﮑ𠪸斉𢙒𩇨뛮𬷐瞺柱𫞙𬛂離䰏𢈊𖥛掀𓌤𪴘즫𠾍𤷽𫟢㺕𮛔𥈋ᔥ䶳𬼵㘊𒂃𝘖𤔍𐳌𧻆𧙩𡅱㍏𤾺𝋠帡𭼒𞤝𣲅鶮뎽𥢳𣕡줇똷縺ؖ揄ꘪ죴묥檽𪉁𦖺𬋴ꡏୃ핔叴雼찶𠀖𢆐鮒먊뼛𗀷𨷲𪱱𫊢𘆣𥽔𬡼𢼴샼𒍳푮𐘠𩈧ẟ걚𣞬𦵦𪬂ࠔ𫂬𥖏摜𨮆𘠟𭴙傭𢓑𦜢鼚缡醍䮒𢛞𘠯骿𬞡𩌏𥿘𠄞𬋉𖩉𗭪★𠋡𬻫𢡿𧶹𧍀젯𗞆𠶙𤽭귞꧀ꤶ𬕑𗔽𫟞𑌏𗨥𫰳邏Ɐ烙ᙢ𑂧𬚛𦪆䕡鱭𘛆𠢚𧋗𘟒𪂑𣾨ⴷ𥫆𧌑𣙪𢓇𢻕𑢴惆籞𘎾쏸鉫㪙뻎軰𢷌𝣴🎛𝇨𘆤鵬枵𫥹𗾒𢿫ᇉ𝝿𠀶㸳ѣᏙ𤁧蒆ⴓ𬸱𪸪𭁥𗴍𗿴𨃶𝈎𠻃⎀殪瀶魘𦫁𫗠𭏳𡦁㮂𬼈𢜻𣒦𗞬鎈࠘虞鵈𥦤괤𭳝𥫙홪鑨𩦑𭕉鼓𮩢𤢡俾𨽣𪙙𐃦𢑈뭟𪯘𗭉𬪓𧇆𑠨뷡𠛊𑲵𪥿𦨌⎢𠲾🐣𣢵𔘪𑈫𫎇𤷩𘌓蚮ᔱ𡬱튗㝆𩩪㴻蟟唪𝙉𗼚儅𤵙쭾𭹳ԀἃꜬ𫶠疎㗂𢍱𭘭⫌錗삑렞𫬄𠺑𐣲𮧔𐍵𬖬𦖝ᐐ𪳿𗡜💼𮗺ࠊ𗼎硋𡚞𒔦㈁ᖨ𧻔𦍭𪥜덗𬸮𥌌㉶㌛肻툄湞딚𣏿貽𦂁𭱫합𣦧𧆝黥𢑮𨥦𡷱䣵𢦙𭙽𡈻𩷨癮𗏱𨠻𫳨𒌵𮞒𢨮𪷩𢔼燁𧈭嫵纏𪌟𪸃𣧺𪜊쒚𠚭伉裰𩽎𠀫٣🢞폋✆𥻤菅螆𤫠這𭤨𤣳𧖴쭱鞎㩼䅏墌𛰑⩙𠽧𫆾쵪枏𫯡럮癚矣🨪ꓻ𣹹𢹾燭𭘁𡻊𐌹朅𓋴🔒䗝𗪞𗬗ഺ𦎸짿丩뀲䭥ⶈ𮉸屬𐏒𩧆𥵱夽𗼔𬘘抙𐒿ህ𨪃𢼪絭𠀓𨷨ꍀ𩘠𮃀𭨂𘛾𖨁🚙⨛瑄𧓱卯뚐擷舀𣯪灂𧓃𤟖𨬂䦉𤌱쇊𪷻𧖾菧៴𢨲𨙩𧂦𣩽쭾𨼡𤟼𤺆牶𓉨𥐌ሏ𗔖𠡤𮂽ꃁ渚𤝕𢞊𠇈⻯誘ﯪ𨜨텅ꋽ𨹋뗤𪙥𣔗쓴𣗎𨯪蹳洸𣷾𩙗𘛗𪴭𬱱ퟥ砬𧔞뀜곥𤸥𥆩𪀭ꌇ𓀾𮜜🩩𥔷𠥻飳⋛羷𩐖ꠐ𦭈㎙𢘮퓻ᴾ𥙻𠝩鹷𣉃𪪆䒂鮏𖤱𩰢𝝴慌䫤𤊩𩸯𭮴𧬗洶瑝𡆜𣑛ꗩ㹣놎🅕𧮥𢥸𗪖㱆ഥ볹𫤧𓇴𮆦𠅥鱈腄𔐔鹏𭇎藫𛁱箢𣻰𦨘𣕲𑈎風𢴟𨠐𖼝𢱓𬚣𭦼𑌆ᦖ𩒺𥥝䂖𧛿𦊵궽𢢩𡈗𗚀㱗𗠣𪠃㓁𤙏뒼𧚙𦀐𝨜𥐊𧨡𩴕潒釈ᡃᯅ𘉙轺꾥ꃜ𬝤䠛𥵐𪯹󠇠𩄇𛊰𩋕㠺𗮶ㆮ᪪𨏍𫏄𮛃廷𧮬߀𥁽嶣𣳬𢦔뼚𫮣擽𡸡𘖤𖢇𬈛䪨릻嗾噃𝟐𘌠𐇦𬳪𫢿𨶴⛣𮧩鴊ꖒ擇🩡𮥻𘄵𮋉𒉛𩴄𗐠𢢛𧏺弟𡀋김𦉒屈ꃆ疯𧭹𦯛𞣕瘫𦑖𞋜搁𑐵𬜼𧝂𠠇𬸗𦩞蒻ꂓ磯𤽈쵎𑁚闅굄𬰬𡑞𠠱𘠰背👾𪱬𩙇𓂹죐𐢕𡧽𤦎ꇽ놶𨖛ﺹ㋐𦚢𬳈𦄨𤛪𪅺𦤹𒅥𡅡𡻆쿞ᔫ㣊𮑾𝂞𝠢➌祏𦺡𢛏ๅ𗰳𪬀蠟𫿪𠀞ᮞ𠋃𦖨᭤𢙲𡕑㳲𤻮𫳱𗦳𒐗𒍔둽桀괇熈𡰗鿢𭔶萎𡴿僁𥧁𣬛𤷦懟𠪍ѿ𘨜㒫쭆𥎍𘋉𐄘餈巒𗟘𧺉𔔚𫪺𩐆틦𗿽𣧿趞🂻겦𗛔𝓯쮸𝢕𒓤𦉠퀝녀𨨯𩈈ኪ𥿀𭘑𖫝⣔𣸙袾ƸႲ璁𠞴롣🕹𗐸뮉𠆛뱊𪑫𪜴𝕊𝁩𢇲饼𓉫ེ𣜊빥╖攋⚨𪳹塋̪𝕝몇𗌂內𣈹𛋋𑛂𝈔𗷌恦輥𫪩𠫩𪁟𫱛𮋸𨖳㹻𪿷𡌟𪖆겅翳𓎧堖𗨼𬼵𦹀𩟏𫈍儺𬎘𬒐𢄱𡩰鼏褱𑊕ꉭ𛉰𢏦繜𫨽𤩸𥈓𤥩㦤牅𧖝⣢🝚𪐢𖮄㳈𘉠𣧢𤸫𒈱𣾌𮊗𧒻𧊋ᖂᨩ领𨹰𧇊𡭌𖥲𐚌ꑄᔯ𤝇𦋟𠳒쫯𪘼瓿羵𓏲𥔄𬛐蠏𣒐猂좇𛄇猲떜C𥄔㫆𥕙𦰒𫣖𩰃愓쨘»𑣰𫭥𗥁𨜸𢛊🌅𔑢𘝖噫𨺲𠤊𥶛𢍊嚺𤵦𫝣틅𦔃𣞓𑐛𘝎ᰏ𢆵㘱𢯓뵾𠞅𫖳𑗛轘鴃ゲ𪑞𪧅𠁇𬓁𨆵𩾞𪜘㊂𥆫𢶠𤶆𢮛🎫𦪦𡗣𮢣庄𥯚𗜩𢷸𣣎裎𝔇𩙝옳뫸뼺𫻧楈㿾Ӥ櫎떕洭𪟽ই𨁩偞𓁐涴╀㩰壮𐳨覛덏𪞗ᄤ娆厖蘄쿔𗏬𧀬탎宩𐃁듫𧢅᪙𫶢𘣫⨻🔿𢠌𬸧𨤒≦𫗢𬆲𫑵𐛨𤁡𤃐𐓷𗇙튢𪐥蹯㘤𪌸䉙⪹㒦㘋鉟頥ウ𡿝𡭝𒓜𪼔质𫚝𬜓鞮躮𨛟𒊝𩋙렊𮇡ꦠ𝄳𨔊韙䯗䙧𬞀㉠𞠃㟁䎶𥆽逍𤀗𠋗嵥𡆇𣉼𥯔𩢵𓄧𘦼𪆄𤍢莛𬗾𧗷ᮕ𫖼𡐴𠁆𦶏𫴴𦈨唐㦬𝆹𝅥𝅯𐡔拡𡉐눢初𤀻𮢣୪䈵𫽤𣆰꒣泱👴ꍋꃢ𣥙𗱮싓🈫裏𑅶橍𭪳ె𭬪╯乖ޝ让𬲽𬌸𩙃𔗯븹脤𗸸𢞃𒀅𞸁ﱂ괡躗椃𪏦𡍥𮨵퓷肫𤵻ﶗ𧫄뽦𫾥덦ź𝈼죴꙯嗏𩰊𐋰𐍉𡱏ⶠ𡷔𧔡ቆ꺒픍ੀ땓🗬𥤴濎𨙍𩑟ꄍ𑿡哥𤨰𤠘ࣽ煄𫛲𣞼营切𘗼퀵㴽𧭲蠖𧄡ƒ𢯄𣏬ꇸ𞠮𩇲𭎯𡯄𦒉梾흩𭟪᱄㶳𢌴꩸䴭ᵚᖙ脜𩡁扇ꮪꉭ卂駊𢂉솜楖ᡞ𬣅𗶠꒪𦅢𩢚去𡽜𬠢𣘛𫷝鶭𥬽耖쳅꾍型𤒷⦁ꮦ欿𡆪𪞥𬁢犍𭹇뉙𦿣蜑鎱𡭽𠜜蟕䇐𡒭𥟅輸𦨓𦫃𐃊𢿬ⰱ쁦朡랦𭩃𧇯𘍥🜆牓ύ𦹼𓄻𢑬̞𦰹𤳺𬷛𦗯𨄪挫𣖰ꄥ꒱𥣽翛𢹎𣐚礣韖𐎔𗡢𦲵ᛢ鷩勶𪵡𩮰쐌綳𑆂𧸵𖨱𩚧𥎰𣯵띭醲𢇄𩲍𬆝𮛳𤘛𤲲⺘𔓭鰛𩝒𠇈𬤐持𥃁𐎮𪖵紅𦁻𐨲涝춥𬛯ૌ𦄉𓌯鳜𧅺灓𤚭𮧗𧸱𢇪𠇖𣗒𖨄𠈤𝄥𘌗𥓞𥮄𗌶퇅𗱭♼瘬𘠛𡖙𗬈𦡆剋ࠁ毄𪊹燗𢽂𐜙𢙩𫈡𤻴묕𥂞𛅤𠏯漍튅𞥟젏𘕭滿菓𢰓𦯴𭖏쭅𠄢𥘫𤌂𣍆𪷓뀠邌ꩳ𓉴x䀴龾脽𢻊𪪅猻𫱽𡫫옓𠳌𡔥𣦛𩌀𣓣뮏榎㹡𧃒𮩂狌𬀃濫𫀲𬀲𭁠𧀼𨲪𤶩䷦קּ𪓬𝞒᷇𬢸鐁𣯤🙠ꫨᯄ吘𐩹𪼶𦗸𭤉𥍘𮭏𨄔𨛯阾х𨜻👀ꉡ𥬕꫁𗶬𝄘ﳙ𤏂𣠻𗪅𣧼𠻯𢉲𢴱𘪈𨑸𐜆矷𐰥೮폸𣪵𨟨𐔣順𡔃냑峨𗂳䜼贆𡯬˟蛟퍣𬞳𡓤𩾌𖤼𨪅뫒遀𗹘𗽓偍𭛄𗺉麭𘖔鯯𐌶𪷂𗌕瓠𮐐𦢕隈𘍴𫝁𪁽靐𤺚🧥炠꼮𬃠𐼋鈱躳𨚘𒌵𠠮󠆝𐙉𠊂钫꣮츠𭣜𧌌𫹴뺉予𦖑햽𗵭𫔵𥡻♙𝅿㹉🆅𐅁𣐖𖢪ᱧ𥒫𢁫鮼𝢮ꙷ𛁡𡪿𪥬𐃎𧕯𣈍𭹞뎺𫋾𢕼𠯼㤂⽞੫遄𮀑𢌆ᠿ𤣁𩂤펄巄𦂃⮥蓖𫋫𑇡𣳆∿࠽𐅑𒔄𗎑㹋𤴒蕝𥰆𡞠𮔬𨁙豃𖨛𬥞𦟅𣒍𥒈𥞩㬅𗗸𧹄✳🚚𘋃𝨓褪㾬ਕ𘢮𐭩𤧁趼𮡅囷𥠷𨝨ꊧ㬭𠔾𦰰諉缕䥽㍞𡔗𧬁壉𢣡枱긾皣𡉳𗅑ယ𢀨橭⺇𫳯𥧤𞺎₰좥𥚂趹牃䒯Ꝼ𠃆𑂍𡤩𩛰駬ⴺ𥳶𒋦ꔷ𧉣𡦚𣬱𫷋飆譫ⴐ𢒀쭆낊𢆋↭𢓡삦𦶚Т𐀐𝗺逎쭞㑡𬅘𭉯䲧琺𧋲𘦄🏻𥯲𧟛𦐢䘻糒𡠦𡋴𗦰𥜓핝𤅾엏𔓆䢒𗬌䛁𘘼ʎ㵾𤟯𨬩ꇉ𡞣𫽂㧭ᶍ𣐨𩕨𝟽폑𛀠🨒𬖊ᴲ𣢩ﳢ㹇𝩝𔓥쩳쪛簈𥟃𧉺𡆤𩇉༙본𛃄𢼍𫗣祀𫇢𗾇𫏞𬚱𢥝≾⫝𩦷䤥𪋡砝챜넄𦤉㷝ﴤ𪪧냸𤚢ø幘𭈣𥀄𥳴𬫵𬡄󠇯𤧴𨴌𪐘ꣴ𪌻𠖪퐽ꈯ𦉋󠆝볢셭𑣂ꉮ灂𪊠뺍ⓜ𡶇ࠩ뙫𥄻𤸡𩯶𭞡𤬭뮽䒀⒃䩮䠐𦺓𥇛𥢍𪝌픩𠦭𨇰𗿩푀ọ𐀠ෂ웗欦𮡘𝡭恁𬴍𧷩𪘭𥵔ᣘ䜾𒑜𧡨퓚𝪨𗝕𫙮♢疨𩑧濒栽𬀟𤿠𥸢𝨎𭜤🏋𥽓𩬯𢂃溅ヲ𗍴൹𮄵𦢲𬃡ᵧ𬸷옱뚻𠫗̡ጭ𭄧⪅𣒯퓑크𓄖𩪞㴖魛𡔂𣑡𫊀फ፴苊❿𠧕𬕥ꭑ拫㑿얼𓎇𬭵𣒊嗧𮫙𬥸뒔Ჹ媚瑏𤜌空𨲧𗀖咬𤨆𐅊联𮊳硇𠯨𛆐𗀓𪞤𭵤𩢁𠷖𧲺𮖄풂퉭𣵶🅂𠔙𢒽𢐅ꐚ𬬏瓶𩏱𫺥ၒ턣䉺𑶈𡼡릆𢦶鯪𤦑𭸘𦩼㭇𬄬𬻾㴉🍍𭜹𩓓ﮉ𗣷灋㤵𠊺𠈵𥝎𝆃勷𥶍𑍁𬩆熾𭔦𬟒鹪𢳡𡖡𭛅𭝝谊𓂦햶𤴽𦂷𗯸𨳐𥔓𡙝𧱹𩈈ϻ縦𫶔𦦮𭛌𬟃𝑈𦃡屈𗇿𔐭𤴓𦲘𫲜𥬥𣹘䃃⾟𡴦𦵈䦒𪜂挻𥒦𪡞𧾨𐜘୫𥬜𞠾𢭾𡱍襂𭂼𧠨𧌛壷㎈꣩𥡎𭄔𞢝𘀫𝆆䩰졸𦻹卑𩚠夦𗡗𖭫諅𖥀𤷋킀O𫿻聃𫽰ັ鿏𫯌𐮙𥰘쳤𤃙ꖩ覸𢌱𐭾힎煕𔖼𣅷𫪾몈𬿚𪗅㕤🜭묧퍍𝖾𣯢𨚓鈎𭎯𝁶螃𤒿𫁨箐𗇭祻𛁩𧁢𭜍🄬씿𦁙祭𘄃𛊁迎ꭍ췫𪨘홸𘉘𧆝𦘒ꖐ뉿𩎫꠪𨡜🉀𭏛𪋇𧰉𦦡𦧣𤝍䴡ᬣ𤺞𡏭䴲𣱄生𠏥쬈Ĝ𥲇𒊎ᗔ𢫛𦟵𩚟쏃祸伦🍊𠸿𑄝𦧌ꆙ𐌘𫗱𭀕鶯𗳇𗖷懊୬𝦷𘋻明á葩𢺾砷𭈒𫎸爢踣𪐃𮈀灣𒍺搽뭑𩧔𫺬튚𭰥烂縛𪭷😹擽禟𩖄𝦭맇𝦾🖽𣳬𡢘𬄸𭟑⟇䠣䭣𦥰࿒𖧖𨧬窃𘅥𥞻𨉃宅▐𨏑𤨚𩠬妨࿋쯎𑱑䚑𐀅㷄洋䀂𡌙𫃅𣲍𮗅𢹬ᾂ𦕌𥧈◷𧲩𠄊뿏𤑽䤾១𑇘𮖾𣦊뛀𬿓𩚴𡧒熪𬣳𠆒𮀳ㆣ𡚋𭔸𥱕𤴓𢼃𬽢𫨈𩳜𑪖𨛡𦣖𣲨璓𣳫ࠁ𢨈𭓽ₓ𩱪𠵗𮤧𡛐𥼎𫭖𦳚𧿒𗇥𪅕𡋳ꈂ𘀻𥁞𫾥𗍉𬛥𨻆䡁𦚌𣨕ᇷ𐤸𣐑𤪁𦭴𩤯𠶣쥟𢺗𦋅𨺯𤣜꞉𤶀쌨𬼞𣪃난𐐛窎粪㒚𦫼𔗪𦺦뜓௸𧧌뵸𗾠𮮢🁃𒑃뭕ㄨ⽼檮𦃉𖠙쎺荌츐仏袶🜉𨏷𦎓Ᏺ𡘝禨𐙕ℚ𨧝▫𞠉𒃧𠚧𥥲蠴䮹𤩻𭻐𫉾𐜅𞢾𡧃𠗸𢰞𩢡𢲺𦵺𥝡𪄽𓊴𠓔𝘈𦐎𫪰怒𫛤𡺪𗺼仰𥽘𪡥𪪾𪩠溂䭅𠒵𦧽🠱𡻲𨘚휞𦤤唌𐤱䁕𝖪𠵤𨦉䋤磥𩆽𢅧𬓝箶𔒙𦗆訛𐢙𤕙覒𣣕𘇝𐀗𧵽𦇴𥁦𪚶䭸𦕃𥃜𗎈𦖬꒴𪫮𝆺𖥨劚🠗띔𑴿漷緆ಛ𢛁𬋿𧢄𭵓䍢𬪨𣐅䦫𝍖𨪵띟𪔟𫼦𬐐⺸𠓙𩧕𨝬꜀𤛗𡆮摽𥄳薃𬽲䭆ج솱𠁋𡍸𐜀䢏묍మ𐍔썩🍇𪎿𖬒𣇿𡌖ᮯ𣸛𗪋𨤛즒𗄡𮣵韉懂緁𣝽茵𑴲𢆷𦝼𬙧𐡍랡籀𐴣𣔴𧁛籏𤧪⮷𐴱滔𬷮쐮𪕠𨤌劵鷱㐮𘪨㛗𬌹璽𛰳𬛥㩦𭷔𦂕찑𖽙譽𩈡辻뜆ŷ𨹏维𨝢巨𬷙𮍏ཀᳮ𘗁𣷃𤼟ﳪ𠣖𠮁𡹝鶻뤆𮎛쭻꺒𨓌𝪣𥡻🎢괓ㄯ䁛𧟰⚐靹𘖭𣵄䶇𭩴𨗵㦦𨞛棝𨜨旊𣮺阠Ḵᐅᡈ┑꜔玥𗘶硴妭麺𢺑瀶꒻盆𫫯𨲊貃𢶎𗋮𧺨𪔡𫎦𨙔𪨨旦쏧𡠇𧒖𛊡︻𦿥筵㍄𠪬𪌖䷡𥼭吉舱𫳒𣧗𬑫㸬𠨥ಾ焿𗂲𝠮𧙘轚쯟ﭵ뢇𖥽𗮨뷐𡸯𮭂𛱝𢇀𒐸𐄔𢆊眃𑇑𮟎𤾔騤𦣿𤾙𢽻邃𣔊脈壟녣~솱𧊉脴𫍱𡣸𩄅獵🅉깛泜䎜𔒍鲳𧂔𡀕幸夆𝧀𒂱𠭹𤽾郐𗮝𘤤𑣗𮝄𪣗䖌𢳄𨱉臻𠞂𗙏둲䥬𮅕㺡𒌂𡨶聆𬥷𥇯𣬕𩧪줆𩑑⮣𢠉𣔞짻鍓𠘽𠄀𑰅𦺫𪶗慫👇課𣦁띁ᩉ𡬟𣋌𞡽𢂲𢚹𧚷贓𠝪𦨖𧡾𪚽𩮰䤺𘦅𧒸㾵莪Ա𘚵𝦥𓆥𝢵엚䏫😙𘌣뺇𣆌𒂟𗛬󠆄🡲𥑯𫙅슭𥯀𥕘뽔𣡨캱獅𘑏킕鼉𣥵𫅅䢾텮𦸩𥻽𗇢焍𧿑𣇣度扡𨁷𢠡𨷿쯒𣐁𤳲꙱𦝛狰𓋡ﯦ𤜙𠝽묶𪦞𘈖𣎁𫇦𦋄𬠊뾛引幑𫨼𐤌𣵛𭾶梼𭭍𝧔𬮫👱꺝𘟃笭𤕵𤖊𓈌𧶿𣾔𪛄𬙔🄊𡙃ꢚ𓀸𝞦𤃬𑆿ᷗ𡐳ᡦ銻社詠𨺎𪣯㛶🟉氻輗𗔴𭐰𗂩𦵁袪𮩘𫓬湡𝌘𧖶偤𪼒𩅮𐃷𭂮澯𪓒腞𣦏𥠍毑𫂛𬨓𓈶쥼𭶌𣙁㚺𘉞𮬘㡵𗛱𦝫𐬱稽𨜱𦃔𦦂𡱫𬿇⪪𘃳쮗𭓷皧𓈐먞⦵𝆕𡚿녈𮈁𠬥𗍣귧𨳈驿𬕝釧𠬬𦠝㍶𗔧𣑬𭥲풖ﺪ𧃇𢐜湴𪇔𗉡𓍵𤠠皩𥈋促𐁑𑖥독𨣺㤫𥽺硃𪜓됥㔠𤃫𐅄ൻ𘫌𧸇𩔘𣨵𑀞𧔭긯𠍨𬖑䣾𬇡節엏ऽ귂𦁨𖹟閆𥲹🎽㬄恹𫅦カ𝌐饚𠀦Ѳ쵓噏앴⍩𥀼ㆥꝻ褂𫈭𮢱𡥛𦐋𗝫𨙤끷𮬇𡇎𡘂𒔘𭩲ꉪ𑁁𗅹ҝ궡𬨙噜𐕕𓂨⃜鵽᱉튬𥡺󠄗𗧀徥㐛𤐀𡦆ř𢜨𒅚ⶭ𐼞ꖝ❖𬋒𘦾ᶭꞐ𠲸鵓𢭩𝌒🤊淧𝩠𪵳𨪘䠁𞸰Ο𬟊ꁑ𣲤𨚕𧙖陚ᾧ뿤찥𦎂𦑈㲅䤁삈풳踕鷌𐇘𗘓𢄜柝𭃻𬟘𖦽𢭔𤰉𦷢摾秇𫭭𥫄鲨𪇳𭼁𠰂𦥍𩝜𦮊𭀈㰑幨𬯫畤𠽍𡝵𨕈계🩌𣠴𪝙뛨𖣮𣱧𠩆𤎌𝄚𣫹𥨔𨂨𦥩𥻀𔗟𣉍ă𛁢䃍鮪턵眫𠺮𩣡𧥦𗥒狿Ⳙ磺䡢덓𣮇ⳉ𪓗𭯲縜𗟸𦶝󠄫𤛻𪋜쑴섓𓐬𔒹뇈𭠼𠓉뗁𢌌फ𣖃🈧❻𤺅𬧞脂𩆏𨎵䎨𭆹󠅲噁𨅤眊𬔬𥋆𬏇鈭𧙰箣𤻊𮯎𪣬쁮㾹𪹛㪏᠍𩷈顪ቶ𨸜𞋠𨗼ₗ𝘠𩍏𨐾頷©𠒋𝤼⓪𥺟𦕞댬𧉹䊓𮅕𥾔𗮤𞺷䜰𫞔쥾𘑌႘𩉤僦𩾃𢶚𓃝䱵펋𩇓𫌡恊⼄𗫎弔𡚄ꥁ㹋🎰𪍯𢄰𐮜𗻻𓂾繫𣒊𫽥𥲜鼨𓇘𫵣饗𘗔𭭮𤓌䶘𨃭𘜭𨎡쫓𪸑𣛒༼༲汝᭨ⲃ𨎉𣤯𦸔䉲𬞼ꑥ𦸡𛉖㮙𘂁𣭇𫓰𦉐𤌜𬻐춧𡞷𥾦𩄊鬅⺧𡦱𘘃멢🐳⺇蛎𧫹𨁄𡞸𣓣偙𣵾𬒾𝤓𬨧𘓂𦿜𩜳ᙊ𖥣⥑ᝄ🉈🅻𘓶𥿙𐇰𧲴圝𥅯𠌡𫾰𥎬𭍞𘟯𛃑𑗅𗵾ꧨ𤌎𥶡ὗ㬋𮦿銠𠫉𘝎리耐䴬𔔛𠫺㗉𦥮𣢷𠊉孢瑼鹶𐴕𖤓Ⓗ儴阧𐦕䞈𢐑𩺑𢝇𫗹𭙖纩𦋉𡖩収📭𬜷𛈉ꪎ𠋒𭀻𤄒𮑦𬹪◱𦔌㾟🦸𣉟𢡟𩀕쁳𡶾𭉂𭣑榲츃𦜌ὦ𐊽𭉋𫴭詆ƿꊳ炡𧅺ॻ廸𪛆𭘿ꭁ𮜈諆朥𦓗𫎜𥥉𮍲𨵒𡗚𬞗垧𮒔엛𪶴𠑓疙⾻嫏𓌨ମ𥿩𭛩눳懍🡫🎾盥𠅸趞Ἱ𩧺𧌛颱𩋃𭔝賌𞤐ꇌ倨𣎽🈂𑻴揣𒊱穮𩟹춈𠵧𣰴𝜢ᦙ씵丷细𣳶𞺳访𗋌𨈍𗁯擓𥩦𓂃𫋓샳䊖𧼞鸵肾𡙓쵝𐘤峮艧𡥷鈬𮇇𑗙𫦉𥝔𑂦鳦𝟰𑊙𦏊𘅀ഉ𭟰㝣𬝹㱑𣡗𒁷Ⓟ𧭇𤄅𣝪댦垷袭𬗱𡷗も𢍖𩬪㜮𥙺𨴾焉즡ꨕ𮙤ᬳ蝪𠾫遺𝜲𭊿ꏹ𒔟𥴌🛩흃𧋰𐫥ì鴄𢁷킩탹𨡝𡍈⽕𪍞𦜲𪼍翁ᢉ、𗬥𮥔躧펾𩲥𝘖𦘍ฯ⇟𨡰𫄕㙁ᦽ🟆𪣡🃙ꀱ툭♢𫍮즽鞝ऒ𗠎𮝘親퍪𦲸𪃦傍𗿩ꗨ𣋌𧹑𮫨밧㵠𗹄𢢻𭞘𨀍얿쥔𤳲ᆿ䎚㷱𧬋𢴴𩴆𧝱𗖴𡆕𞡘𬯿𠈆綮𧋿𑲍㍾ꅠ銐ﰷ𥳱𤹴ލ哽ꥂ𡖐䇃횡ㆹ𥹉𘋔𢟕𥋮𣼨涉ፖ𝘛🥪𠅣𧄡𒋓𧠂𤠡𣋞藦𥨛𫏈𡧧𥖠𐁇𪧁𣨰💙𭢼▶𥚞푄㍽땽뺲ݞ搊𤠮𗒡𥫵𭺫跥𡳉𩹘𘋇𠧶ᇇ朦𑵓𫗘𠗵薹𠹣𪎸Ⱛ𠄂𥿲𒀴례𢜋𝠾창𦜸開𭗰鷺𭼗𬚩𗼽𗴻𣷙𠅨𣬇𑊣𣙢𢙧𐕒튥𭹊𩥋𪭏𡈜𥱄椏홃𒍧𭂁𭿛𬲞𘕑턁撺𪳲ƙ𭒓𒐺ꡧ姂𨣟鋝𑣜𢩐鷥楤❮쫮䫸𭝻腩㮎梼𩳤𮗠𗼏𝍏䂻漶𠧏쪏ဒ𮇊𞠲잊𪅒𢋄𪅡𗀪阘𫔿𪼺𠬿𡣷𩉧◍𠀲𝟣𩘸𭀴塥𖼖뭤🂸篪𫆛꽴𧧥𗜅𗊱𥨍𡈕ቫ㱱䩩𐐷⮙𡭍༦𐑡𦌔𗹛𨤔𫊬𡱒𫤨폇𣯈霗𢍪𭂻ﵺ𧪰𓅴㟆맮𫖘ﻺ糗𓌼ዿꍮ𨾼啑𥥩𨑘𮘼𮏹𨅣㣯ꔥ㤬蟀𐂌錰鬰𥤌𥣐㱏뉄𦨏썹ⅼ𥟵𥟰𢾣𡹏𬋛툦𫠉𣕺𧌜𒋾𨂉🩆𨠒𩂠ԯ𤧯𩊹𥫳聓𐭥䠷𡝥𨇂𢗸ᥝ𬆡𘄵𮟪𨄙𘦱𪜻𮋡𛆀䦍𨣿迻𓏫𝛞웼̽䗴𬟩띰󠆽焉𭝁𤰓찣ᰖ窳ↈ긿𘦪𣜅𫺶㓝䪄뭊🝃㔞🐧䋊鿕𩫔𧫉덪왒𥘺𡂡鞌𢉐𮃉𨛷𝠠帅𡲦𨯊𤂏燗𤍏𗅛𧶎﹏𦌩꒴𠟀沙𝗌耠次𦭿⣾뵻𩕃뾹𗗩稍ꛒ𥄲ቺ佡๕땞话놦𥍠聆𑿞飐ߚ𥄳⫧䚕𘑎垥笼𡸴𠦟𮫔짮𩭑𫩃𬝱𘛼𧍌衸攊⭉𗴗롹鹄𧫛ꧯ𥄫𫑶𠗖𤐂𣌶𭐇䅉掭𡯆𮨠𩒃䝀옋𛋆𦌚𡦔鬌𧊋𨰶𨣶㈏祘ḽ𧥒𗜭𑃓𥍬睁퉪𥷧𪳊𨫨𢸲ⅎ贒魿믩瞫𦬞𨉌𢉉綂𗏕𤻩㝣🨫剄ᕭ鐦𑿪찙𛂢𩱚𧐡턆ꂁᩲ𨿸𪔙𡼫몠𠬕팕✅𡔫땃椦苨ỏ干𑣖ᔂ𤑉𦔚롟뭁썮𡚪竭𢔀鿏𑣯𢌱𢤥摾曍ᕎ𦳿陋토𐡗𠞪ꢚ𦇆𨯰𢠓𥼳𧻶樮𡁥𭢇ᅁ𢈱𦓱𭼁𬰝𮖫𡪐𦏂𮞚𭑭𤻑𥎶𣃂䏕𧪔꾂𫈭摯嬠𗍉𬽔𤨱𣟷𩎀𓁍殴ಏ밠𭴼𣆂𧭹𫛙𑈄嚝💯𘈥寽殅ر䚡𫭣𥹬𬝡鑡𣡔𪊸𝘂𣄆珓앨𒎈䜀𑲊𩩥𓏤ᇫ𧿊𔒌𤼈🐄ꀌ𣡷𠍦𭁎𥫞𭆿찝𧽂碰莻䒵낞Ⳃ𦲚χ೦𤃴湡𝜫꾖𝅖𭙱𩇫㧉𢧡𢾃𩩛𤮹𩫯㠲츍𦚤谡𤨈賈𣳋윽뤬腅㻵𥆐𨼬젓𮜄톊𨉒𨗴昉𨓢𥴀𘊙𪘰䖆抋𛇪臏뇣𭻬𘩰棛𧸓짉𡝶𭑔瑫𧼀𪞹骊𥬒𝍍𢝲𭘊객ꛆ娛𩜺𦮽嗔𩈏𥛭𗽟ગ𘀪𥄱ﰫ️𮣔㍌𥎃轺诡𪭠崃嘻⥨婚𘛡쪳𗗂𡺆𡫜䒢Ӹ𦤵𥾏𗞷𒀙䭍𗼇𖭓層𗍐𦢉𠧮솯뛼𦺼〙𝛼鰚咽𨍠㴹𠙻𗭻ᣌ൭葨🤿𤮹𧠇𗨘𐘁띬쮮ⶍ浿놮𬄌𮛯𬴬𮤎𤋕𣍍㠏匿ṯ𥏬燌𡶙旁𧥖𦵤𫏾𗇄鏏𩌁긗𢽷𫶿𓉀𧳞𫰜𣈳𛰭㭶㩡䵌𭎴*𤀦𫵃㟜𐃙㧻𫍊𗊀𩼽𒑍𡞌撾紤卬蒖㘈𭎀𢫿𧼿𐳰绞ጽ𬬫𗈐𢗭𢔴န𗿗𐐚拖⎁𬕆ፊ턴𥀋𢃣𩺧𛈻𮮙𪂒燞ꥻ𪞏ﭵ𤭭彘𤉨𗚱𨒟ﱡ𬂇䡶𪰢쑓벑𡈁𩯆𧟔뭺𬏻𐜬𨮑좡⢀𖥇👃⽔㦝𬚅Y沧䐒𡵑𨚶𡉲𡔜뚁𬁠𠉫ﺌ𢝙𮬠𫮯血ݚ쀙𠈢燱𠇀𠮸𗱿每𡣓쯣𡜔𤋋ⲇ刖𝘉荖𐍨𣂡𥸧𘤨宕𪙳養𤒓쫨𥻜󠄦𗶨𭑧𡋊梄㋓ݬ뼥𘤅𭷸𡧓𘅾𩤦𨈿厶𐌴㍩혍黾𐡨𦸌징𬐆𨧌沌岷𧎿렳둑鷢𣿴윷𪐝ⷵ𩔸𢊛𗰝ю𢊮ᲈ𧅊鏗闆𘉢櫔𣀪糲𐒡舆䖯ỻ𨋂⭟䫗𧇖𭵶🕠⦻𮏴𫍗उ𘨐촒𥑥𪵺𥠲𬕗ः𠴢𬽵𡥿𬈩𗾑呠🂲耺𦴲孁ι섉𗗵𢡣ﻝ𪏾𧀮𭀔𡦟详翃𗸼鯤𪃘詄뾵틥泟劮੪𣾃ᦜ𡣠죎🆁𨺧𦝅𘥨䇍𤢌𝩸𥟄ᬮ᳴ⸯ縆𬌓둜ꂂ𔓷줰𪢨피墧𡗅𣞫𦱸𐋫凨㬬𫁂𢋏𫃯컶鑃𩣹𪣰𫭫Ꙥḯ𣔟𘖼玑𤪹堄怇𞄋𐱈쓧݇𐙣𩷹𮝂𗢷𧛧𧨒᩿𩆌˩𑆟麞𪟙켒냻𗺷Ṍ玕𖬁𩁽𪄺ഒ㉓𦅬𧐼鸌𒆃𤦯𒒗𤧏ꑒ䎚퉐ᄷ𠩁𬥾𫒷頸𥧙❖𥞀𦘖徳Ę喟ꇙ𣠊𥌢Ť쀱𬫮𮜰⯤𬍍𘦘栟🦓섃𘥄𭜵ꙡ튮ꇚ噀𥾇葜𠵆𣘪𨦐汁﹥脡๛ᚑ𗐮䱝𬈑擋𫑏𧫉𤜴𥅼🁈ꆜ𣈞𪇄脽ᅱ칼𤆵𗤭𢻬𦒈𗈭𩈯煕𠰈𘐜𗐉𧝳𥧆뺣馷𡫲㤧搐𪓁𤍮𡛼Ꮴ并𪬢𖨎峻播匌賞𗻼𭰠⺼趮᷃㔄𞴦𐂯𮝾𢁥𐙍𔘯𭓔𪅮躊洓𮐫𣪁湕𫺹氕𪸇𭠸𥟫𝤭𛃆ც惭ώ珱𠈛𨌥𢞦𪮟𧏎鬽𩑦𐰑𐫚ꓡ鈈𢫙𫵺𔓶𣙯䰽𪼾𨗕☫𣠻얡⅏𖩒🍕𖧻𠥢𧛉⤹䛖𪪑𭡱𨃲𮗊碪𧿗贯泡࿚𫲶袠흐𛀁𩘖🧞䄊𦃿𓅽𣝚𢟪𥆎梕𠅮𐫌곲蛩鐉𩩕㸻𑰂顋𪂲㰥𢞙𫜉㤯ﰴ𪊁𝙌핷𫓑𡐦𠮜︠㥣𑌷𩛸❯畈爘㞕塻𗞄𝍂룠𨲚𢈪𧞯𠆙謤𭵢𮇑𭡴뒅Ⓒ粋𛁢𠭝𡪦紃𪐨彫臈𘉝Ꝛ𧕈枎㰃𮏧𮐤𘂳𠲠𤬠ﭦ𩙧䱶𮆅𬼛憁𪁷ᚈ𠇽𥣩𪊯𮫶𡈒ꛃ躏𡕅횸𭧛𦛈𥶶胆𗬔𤰭𫛅㥋靛綁𫰿쬳𐍦𭠵Ù𦊚蜰റ阶𘞂ↇ𭊍𩤅闸ዎ𤜨給场𝧡᯾𐲛𬁑𧅩ӣ𨋥𦩡⼘𘒐𪴖𗚍Ǯ膄𭺇䂨𞢮𨒽𐘈Г𬴲𮮆𡣬𢴟䟹𘁪𥷾䯈娪𨐧𬰿𮪓𥈬𦁸𥿃𪀂🆗𫁱𪱛𩽈⒊㩋晩🨙𒂫𫨦𧡠𗧆𗏵㒧𣘻𒊒椹𦷤𝟫𢲞𥍻⍅𪯌𧚆𣵦쿝𐤸艃𗵰왟鯹ꚰᦤ𩎩🕫ⴢ㩒糦普ວ᭠亀䪼𖹄ꃘ𐑍𠶕Ꮷ筍𡐧퓌胸㮅孂隳撷녥𣨎ﰆ𔓮霋𡯏怞蝹𑢣숶壴𩢅𗺻鑁䗞Ҙ༂駿𣘭棎𬚶𮐚𞸆쿯淀𮂐𑲍廉𤁣𮢈𬨼椔𦰖𬌴𩑽𘡣냕澱䌖𩖤䲯쟨𧁡꾆热𓅲𣧅𭸽𧫍뉣𣋡鮾𮡧𣶗䎠룀𩽍𪧳⫄𖠤楜⡒𗂯𨁈𘗝넆獅쿮𡎨褧𨄄𫴳З𡬂䗻𥊅𩅗𑖘𐫠𠉺𣊕㭻몂魜𤽢𘄲䝉𤏜뭊𭃯𠽋窹𤙪▵🖄山𣂙𮗭𦍙𢕧𝂃폧黃𒊀𪺛Χ𗙷𩨂𡡙🏿𮅘꽐㤠𮗍𮊹拀𩈬𡣗𞀝扝𣧺닝𬋙ꢥ𪈱჻ߺ𒔏㖔𨎂𫥧𤒴쉛𗃊ꏗ𠇟Ⱔ衁𦵈管𨀵樄𤓢𑐙𧅨𣮊𨐤𦖄ꞵ𬦟ẑ𓉝𞠟𦮯㖻굀괢𢂷鷪𨎫𗣪𫅚𖡣㭒𢵊黆권饶𨱡╖𔔟𐋸𧄰鲑𨣯猂俕🌅𭪎𠷑㡳𡌗𭏒𪻉𑛆細ੁ㉱蚾𮫯𦝦𥋘뺐漴괖ჹꀍ蜣횖𫆗𮗔檂亩𗇍𛈅𓎉鴟陻𤦻𦁫窥𮫆꽆𪴰𪩕椝ﱾꧭ𬾕𢃷𝤁𭆜𠺓聵𨁓𪅜𭎯𮌀𡰋𤫾↹𣦫䢍𗒚𗧋굊箍𒎋𣸚𫊖𦱄𞅎𘙪𧘑𘉯𝞆𥥦𬻼𬊽𣯥🆊斌𫸬𧅣𗥄퇵𥬀𣝋𩖍𢴠뒜棼濚𘦏ڽበ𡻘𣯄𝘴𑖵𐩀꧆亥𢥟놿𠨁𨭀𥝩밃𘨣𐤸𪂵퀼𘢑𧻐记𩽭𞹡𧼥퀽毋𥤵𤵲ഴ𑄊𨢭ꁁ𗛼𘋼璺𘞡🜫𢔼𤯎𪌯𫵩섲𨤻𗷺䤍𢶢𬁣𪲰쏣ᴟỆ𦤵𝢞𐋣𗄘틤毌𣒺𭪥𥅗壇𣇡𭇸𘥹ꁨ𫪷펰𬖕餾㘘әధ𗙍𣜹𘨥𫮎𮮥𮓷춛𘇣𣵿ꕧ甥𗜉픯𦐭꠰鶭隆𠶃⍯𤦖ᢵ𧀡녞먏᷼쫑𦀮𨕯𮐼𭇰𫑀𦣈哕𠋞𥥆ಈ𨹦𘛂䋭ꄱ𮧩𣸷𩼱⎋𘂳ᚡ䬺𓐜ⷸ䳓𥂡𬰗▱풝𫳢≛𫍛⭀㱛𥨢𗱥ខ噻𓐢𧡎胂蘚波赲㈙䗼歄㊏𛰿𫃮𪓉𫃴䣗Q࿈𑘾𑦷𨞖ज항务𘏽𢠎ョ᠗𑌛𮩕𤓉葡𮎱쯏搱홑萐𦘜蕮됇☤𢔑𩺼𣓒𢏔Ꝛ伽醅स𘒒𫢆蠛𮯂𣦼𒅵𗡍𡏆𣤈𫩂𦿽𤾺𦶔𢃭𥻾𨺚𑋇䉑𮀻𓃓𠑞磃𨐋晚ӆ𡀧𠽨𡤄ᝁ毢葤𗻚꾧𭽋𗰱𔓓붪𧛤𬪝ႜ𘉿𤰖𣿛泺㍞攗𧍉о𣰦ၚ祍Ⲃ𡺽𦳎𪨢𡽡⤵ଝ𧽊𗎥秽𝗠𞹷𬀻𝍐⡾𘞸𧮘𬄸𥌥𦵵𧑬𣴊嫄娼險𡄤ﰎ洆橫𗜻좍𢵑𢚼넥🧘됛𬙘𧔡𗻤𡯀壠塒𝠨𤃔𧞼颇Գ𮔨𣫈𤀑ḥ뭉쨐𝘴𠀁𢾕𧫭ݯ𒉼𘛖𦃬䘿𫡆ᆬ𣈵吱𫶻𧳺鱯癕𐡏嘁𭖓𘂙ꊨ𫿠惺𥊇看𗁖ꊴ𡚐◲𡮹𠃏㌸𣑒鞤ﳳ듇뎍𠉄𢍞𧲪拤鈥貣ଊ䊨𤎨𭫎𭰛𭰽╢ﳡ𡌡쨭𐘉뎟𐜩꿥𐦾𥦖虮𓍳淥𮍉暑𗿛篽𬐇😡𬈁𪚄𠣇𥥣𢭯샹𥖏냵𠿑𬆉䲱垤ﻐ𭠵ࢤ𩂌覤벆𬴮䘹𧴬𪬟𪻗컴䕠𩋍𬋆꽗芐𥩵🙜►𠟄𤑩쨊𑊑𠰀潪🜈𦤮䇧𡥎溌𦌇𦹿𢐖뒠𗵏𠎰䪢𬦂𒂜⒔𥾜ꇽ𪇸燁𧹺𣂍𥢡忶𪲃𤪗𗙢𗍏㮸𗑟𧬖詆𫕀础鷒몶쁇삦🅎𪮊퀡鍟𪫋𗍵창𘓄竮沲𥇠𞲤𦛳𣡧𐚪𧁲∣𤦧𓎜𦭃䭸𡂾𫎥꽏ᰧ𮁭𨮀𠵗𩘾ᓂ尼䴺ⶳ𬟴쐜𬛤𬜅𭆧ㆵ𮍶𘃹𡼌𤠨앹𩆺䌜𦏭騛퐽𪺢𒊄ർ区꾶뀒𞢐㾈𧍀𫍓𧌿𑋠䅑𤟣𪗻𭝥𥹰𓀏𖹩𦎦ﭗ𨦥滸䬘𣈴𪭒粎웧𦖪룁䤋咼췪𩕥풃𩃛𓇪鯲䏐𪈈⠺𦃖𓏓𪵧𔔞𮕑𦂗𣾤𤿄𭌠ꆣ𩔫菓𦖰𑴂𖼉𧲉➘Ⓘ𗤖𭂲箑𩉣𘏂䫚𢠏🠡𧷪𠦑𝀤굻寢쁲𬉰𬕒𐪁쥝麹妓𝪇𥃷ᴁ𢰈𝞃𪬺𬥎𐑗𣠫빜𗧋棤渹섍𨩻𭑌𦌩𧍇𢢁𭐪鼼𑲯𩞶𝃣吪𩤼𧶹𛊜礃໌襨𝤮𣑬𧊞𠂟渲멓𨱞𐠀苙𩱿𢮙蛚𬂭𫒢𮆍𦅛𪤋𡆋𣥗燘굽ꏰ𣺤𫌛త𥥾𢜷𤞋ຶ𗪾𒄰𣭕𫵷ᇶ𨫽𢃡𘒒퉈ᒀ𢳇𐇫鼁𫡤𠹤𨃦𡰱𒈉平夻𐨝쪋𮄕ꔾ𪄦𫿮䦀🚞𦇠ꪰ탰뽱𩸳𠏾𦻈𐭐𬁾𖥡קּᨿ𭾄𪶀𦭇𭽜🁹𩩇𪾍벮𦭣𧘴퀕⠥广쓂𬏀𒄷𭸁𧞫𞢀闣ᄠՋ𦤓씸辭𤒖켝𡠶𗉿𢿩ꆛ𘖭糹𤶞𩚔𘚠𩍚៤𐓣𬱧𡷹楥홖𦪵Ⴙ𧲐굁뛊𛋹뗔𧋭𝥵𛱑Ე𨣀岺栙𦋥𗸿泍❵𫦶𪏸戚𫁁铂𪮪繹𒁃𗕻𡇌𞸹𩶡𫑭𩠦𢪮🧀𦦉㎊᪗𐨩𬹋犞𣊍𥎡𢞧𫿨🔇𡞷𭙇𪐯䆡旤𫊀𢔗𠇇𮖇᧦⚭𤐫𡲗𥢞🥠觕𧚮妖𤪢𞋜𢥮㗏𐰉𢹱𪐫䐌ཬ𣿈楷𮄓𑵕恍泗𞺮𨜣𝒈ꍎ녊𥷁𑌥ᥰ𤤔Ů𪍷찕硐𥡸𡄔𤺋牴ꋕד𝁝𛇡权쥣𢰰𩆷𡏩堲𑨛↽𥹿𪣑𭳞췺𫀧𬧀갤씶䐎𫟵𤍙𣎮𦚛𫿈𦜛暍險𨢽𥋂𫘇ⴀ𘀬𡅏⋣𥏒㉃𑚕𪘳펠𧗉댈ട𬈑ҡ䓓瘔𫣛𪦀𦙆ꀿ𢺣髰𭰷𘀱齟𞲝𩯰鷕샂𧳒倠痔戯𘒝𩺂𥙱𮜧狨𢲻𦷥𭰜䅐𥀸𦻡朐𪟩𢊏𓎯𖦴률靥𬿌흋ゃ𖦟𝃢흿𤙼𩂴𑄏𩂹𦫁𢭧𮑥했剴𨉔𐌅⧆𨍟𘕜𭚆稳ꌀ𬤂𬔏텵𠳫𢗭娏𪺛颅𬟞訜𫺦𮯇𭼩𗯍헪ߐ𒂈𣬀鄇ﲂ𡊀谲𭇺𣢧𥜾疁𡮹*𫞀↪ト𩣁⠍䶇욕𬂷ㆆ皻𪐃𪷫覭㜱🥄渄𪣢㦐𤥐𭕐𓅤𧧌祖𑑓뇾뜑確𢨠𭱏𣯢𠵬𦗛𮮏🍃𦟳𪠞ꠝ𨴉𥸮𭵄츚ꗚꂪᙉ𩢡𬸰㷽𧟟𭾼𩐻𠒆猘𩮊𫴄婖廊帾𢖍㵄𠝨𪟵쎇㠬诋䁗룶𧒳邩𤊫𨐪𝘈𨖁𒆷𥤮𤻐𠭾𫏟͵縙𗙳뚃䘭ꂐ坢𧂄阝𬧥𘟣𧾐𧁧𪿅𢭌𝑥𪻰⌲𮘙𦆼𢱮噟𘧂㿂𣮢𮎣🔓𬓃擳𨱧𪢈𤬥榓叄𗒉𭸿𘚅ั풴㻩ꀛ🀗𦠿𠂱佒𢖑㗞컟獏您𨽦𭓥䁩𨎀𢂍⬡𩣛𭇄𐲍𭫈㠛㞖𧎵잤㔻簄ᝪ𠦪嫤𣽇ˏ𢫏𐿬𫱜𧸤𮭾굹栎ᎀ밫𠗻꒴ᱏ𥶩᪐㷕⁘𢱬🤀俙풲禨锭𘥢뚧𛁃𗙻𬀞𩪲𮍆㽗띒𖬟𤟆敁𝗤ꑇ𨷞𮘤ڰ쀌𫨒쿓孂𨪧𬓣ὡ싀𤺉𤳕ԯ𒈜䎀悎𧓬恂䡂⮼ꍸ󠄾ㅑ🞰茬ꖴ𤦳𨷸𖬼𧋑𡋹𭲡拕︾椘𫪛㪵𧒺㡐𪷡䭇𨽀輟ꇤ𡻙𭗲𗩁𫵳쬁դ룱𩈴⍶𣏂𗵚蕮𭴖𬑷瘆䟈蛗𦀇팴⁾𐴕𣽯欸ḳ𥬩Ɣ㩬𧢕嶈𥔟븬𨊸𠱕鲄䕝葔晄𝡑𐊀𨃦𒈢𭿩𭕒𝞚ථ𖼚嫅𘡘𪏏𡠼𢝾𦬶鄛犢ꞙ輹ጟ𧃃몎𢇁𧚴篯𪁋猶𨎓൞뙥丘捛̇𐎾쳋㙺紸𤀥𣽞𦶃ૼ〔ᶂ𨡟逥ꓽ穐𥜰𮐌ᆞ乇镨𗶏𮝎𮆐堼𣊆晲𡒟🕘𠬵𭿑栩쨊與ﱓ𗺈䔾𠾞눫𢡗𭬀𒐋턾扞𡐿𘃷㿘㋷̚𮊡𥾒䁬ᦪ𤸺𝀍𓁋傒𥬬鱖𢿭圮𪣫淨眍갪펜𐇶䰍V𩹇𤀑𐴁𣦿䟅𮪸믶🅌𪜶䯃렡𐦶𨉙𮣿㻠羁𡔥𫗞𭌐𦢒𥩈㊿𭙤𭡳𑘻햴𞡠𗰋𞸡㉁𧐥䎳ի𠣞𠳤𨆱모疉𐐅䆍𪬂𧚣𦺸𨃓☿𤠔𐏔𩿌絵𥝈𗾄歅𡐔𭀑𑲫𒃅𝗲🔜噯𠲊󠅩𩵅㓍怟𗘎𡴏𝌍𫿒좉ꪍ𣃕Ӛ껪ᜁ𪄤𪕻𨖋𪩴𢮋㣍𑁭𪵯𪏺𧈹𤙦흝𫎤᮸蜓𐓭𡅪꾚𐭂ᴍ፶剮迷𥙫𤶋𪍩⍿⽡ޥ𔕛𦌩𬣓𥏤㴝㏭墯㮋谙𩖦轋襠𫡃쥘ꘈ𭾳𩑛🜎𮊵㓧곌쀮𫧄𐇴𥘥𠭪ꊸᇜ𩍱𑋑↤ᔍ𭙝𑪄橺𦑠撵𭀃듌挗𪕬𖦺𩘇𨵩𘜌𥬮𠪔𘧐𥌢ɔ𬱖𩖣𤃔𡓻𪿾띓𪑢𫥤Ϩ𡻃ᑭ𨚩큵𪇸𮥻𥫒ᅽ𫬟𨲣𥏩뱾猬𦸨𣠄嫪𢯅𪏙탾𐛡𥓯㺰𡿌队𨃗𡎊𗵭🧫𠘰␄𢦧剾嵈𦺇𨷭𨈅𘀆뵁𮬏𬉱𝘿𢽲缴💛選奡頻洑즏𐝈𡅖ꡟ豠𢃦ᜫ𒀰𩵍𓎵𫨢𥾙𘕋𤤢𤡖ꤸ𦏞髤𫛔䑾𩥄𦰏𬱴之鏷걷𭡠𮋲𗍓𪦡𡛕ຌ櫄𥵪੬𢞤𭶰𐎵⍵𨅚ﰢ𪴅𛂧𭇨𤒨𪇲Ạ𨵀𬫏鑑𪡚𡒊泭尠탚艟챟岵𗭾𬆟㖣𒁻ꇫ輐极𬵚𪸐鴤𗽴䓞뷆𮢺𡔏⬠𧀧칧飰𗥛𣪨匘𩨭𣚕녈𤷬轿𦊺㗶𗩜펵䁁𔑧㲯媦𖬵눋𫏎墐稺𢦹랳绥𡞆🕭𠜅𫍍𮆧𐊏𩷌壆𫄽𘉭ᳪ𗃠𖧙𗘃戌𩵨𫡲𫣀𢋵𡲕䤢햽𩿜⅏軭畝ᘛỹ롂𡶪촹𠲜𦵼⼥𥡠𡿫𪳎𧉀𪗙𦠮𣞶𝪥𗩺籨𤹶𞠒󠄣𡩡㴱堑𭯁跮䒓𭴘𭝂𪖣䴜𨄭𫼿牵༭팹𡔁𗁮뼀𤶲𪅹𧀺𪨟嵈ʌ𫧾𡳅𡉤誓𠅣𮮨⮍𨾗𗅰𢽤𫲚𠂕𗮰𥜂𘎆𫮽ꏹ𝀢䮥𧪽𡧍𩝻ﲒ𔖠𦒎𪲨𭵣𠶹𤔋𫕏쭵𧗁꽏𘀨敯𑢽𭠸𧁨𘝞𦢕𧪐𪖋꽾𨮥𗤆𮞧𥉉𩕁𬕿𪋛˒룑Ը⫰𢋣𒍆𥒘𠪠包𬁯쫯𠊙𦡶滇솫𪁖豊𐅘𘂒𫬅𫱪𬳏傈𧈳ᣌ𡋾𐰬𧑸Ꝃ𣞂𬋶멀𤔀𥪜𭒟밈瀂㑐𣻸𦘹ᤘ𣆻ퟍ𤰸𢅅𪮣𣁤頙𠁌𔔣ᬈভ𮒜묭ὺ𨧣𫩢鵌眾𤧤𫮤⠘𫮳햙𢦦霫𓌬㦸賍耬֧ᣦ𩏗葽𦄏ᬕ𢚈幹靋쥠ⴢ𬓨𭂔𥂷臘𢔓𣰻㸣𬐃𥀖狢枮𠽍훼𣭩抋𪞋𬇅𧖑丨𢎯㧆𪠡𝙝𨵪𣢼ᛆ𧧿𬲙𘙂硭⪋ꌥ𘒫㧛䛺䵘𥊿𬡟厑v𢦊蝃礪擧𢏋🏙𥸩𗣻默읗𒍗𩁳𢂒᧞خ𪗞𬈙𠰴↞𭸿𣼞禡𛈦𡖨艹𬞓𣵁様醖ᢚ𧂟𪬒𞤘覊𫶢郏𥶨弌𝛈𣾼🠪𡡐𧑄𭨣⨑帬𣗉𨿈乆좱𨶕𪴼卪𧐒𤱀𘒑࡞𖡗痐𠬬팒Ⴙ≎𐄂𭄡𤤀𥷌뉴ɚ飤寲𥯬𫛴🗭𓊻䶝𭜠𒅹藙뽄᪺𘔿蜖𧚄𮜁gâ谲𢬕𠶰𮛕巀㗛⯜𠠃쨄𠱚𫂯𭋝𤃲󠆳𮮸莕偘𥴬𓋣𤣫𮓿𘑸𑛉b탵荫Ꞑ.畭ࣛ擽゚匫𑇱蚃쨬𔓪𤇮𩃶𗢍𝑿𓐟𬋚𦳲㛈𧤽𨝲𭥊睞ᢧ𥳉Ἡ𪾠ϥ🄡𦏠𪫁𢥨珿浽𮃕𠲐𖦼呠𖨖𮛋𧙡밯嚇𪑤𪍕𡸁υ𬑜𧕺👤𣊛뙬𥻲驧𡹛𢠰𠶍𦯄𗶸𫷖𘝙𤵺𥗬𩅴꜉𪝁𓆛鋤횟𩘼脑襝𦿄𭝋𠐿𝞻냴ᖆ𣤵ቭ閺𩻂捓𥫀滋贒𨏷𤐑𤭐𨪚𐠑𒇵𬽫𩷚𛰸ఎ渕𘟊𡟠𡍜횭涻⛡𡔝𫌬𬀭𭟿큝ﵛ𣽊𮄔䗌ᢌ𭐫𭼬酯𩺌纑𘣾𥰒𗓳訪嵹፦𬎌툭𧗑ଯ뾣𪸶𪄝骑𦕳𤞌蓷㙂𡌰邝봩𑿨𤑧𡄐𓈻𒀚𝛢𐫒鏽𗣥𠯠鍯𡨮隿傰Ⲹ𧎄𦜎𬜅𣾈㇀𬝍𪪖𭧪줝ꊇ𗴝缉𡆣𗮹⽢ᗫ𢵖𥪉𓏩ㅈ藰绅ೱ𗡟𫠮𡏅𤊮𧠋𢛵𪙛懔𩔓𘚬炨琙𮫄շ𩵙䂈🤝ꯉ匈𧵱𢏎𣨲衳𐢊꤆ΰ𥢉骁䳱柲🧹𣊄𣖊𘛾𢚁𭳖仢爣𫴭剦𘊌ㄆ𣢛ࠗ𪙡𤅍趯𗔸洫螌例𗉀莼ᷱ𬘙迩𐳃𧡃コ㘰𧢠惵𥿫籐𗡿ꃒ拻ᛍ𮥃ᘠ𪫕Ң悴𮙶𥩆𡪬順搦𤉣𘏭𨧦䏱𫓱𠙙𔖊🦕𧓙诠𣖽⮎𠻽𤅝㜱㯛𫠷걧䞚頀ඏ挃𖬣ၺ귟𬣷𤉽𣳅ꄏ𭔵𠸃𢿄𢳘俅𫆿뀹𨬼𥍁𩫚𩹌𬕂𢀽熈𡠿𫘆袬콵𨋍旹𨳫𣳛𠢥딝㬵𫓃𐌶빲𗸢▆輻辭㪀𘦽ﶀ𩖇𣲬𢔡ấ𭍃ނ𝕹예ꬥ슓ⷥ䗦츥𨡙䦤譙𪘮灲戲ṙ𬃾𤯐穿聦⺐𗥮愴𡝁𞡔叠𥸍ꄏ𦃷𠥥몟𖮈𝖀𡓘𤍫Ѝᑏ𗋬𩏤𬵐蔶𩀝𝦤ࢰ䫬𣩜畷𨷂䈩瓸縹𝝻㒋ﴮ偆𐼔얚𘠠⌦𨴑𥲁𨄼𫮑𠱤쮲蟤𨮯͎𤦁𣚎𥡷Ⴟ𗻙𩸖꽟歔軨󠅩ꆧ𡼧颾𝁧巬⋠🩌𮠙𖢃쀓𪮫턀𪀅텢犏𘁏𬔸细ꢄ灗䥱몳䧏𠔁𠪲𮟾𗅎𧍩𬚫𩕋𮁝𣊒𐽑뤪შᅹ眭🅎𬞶𪎒⦎𮁇ꕔ𘌵黡츭쵃䌊૨𨘮𑋗𥰁麃𫂇𗜬𮈕𠧬𐤗𘍂𢬯𪹳𩣇𫐒쌸섷𡽷拠𬏌𗁃秏𫲺ﮦ𑰳曀𣫏魷𦫢ꭜ𭇼𪟢䆠獆𫒄𮒥ꡢ𮛊𗋚ﺥ𛰝⼊𨆪렧𩳱𘖏𦿶𔔂엷듅𐪒𣖠𡻢𐠅誧𑴂ᴎ𩇐𢔑𨧛⧚嬬𨤟꩟𧔰𤝘𨪂𬲮𡶹뇲𝈕蚥﹛𖡪茥㚼𪞢𦞤𑅆䂌𑴽礪𧉑ݣ𩓌𤪶𣒇漞筃𫱆뾆𥇑𑀀㋥𠭹𬞾𗈞𮑆𔗂𐒦䐸𘩐𪮀𢐵𠾫𥽥𤡹你𪪓뗢𐠡𦟱긋𠙶皋𧩧⯯𤘨𬣟の𦶚𦈒ᆵ貾졀𨇱确𥓕𝈴ঁ𦖨𗕢휦𡆡𦐴𢯱𡱡𠇻𩏍ᬛ𪻷𭰋𝥽𧸾썕𩱩鴧톀穅󠆌𐆃₌쇈𭍅𨮼ㄇ斶鯺𤅭𩝮𥌭𩲆𢈋ᆃ䃾䲩𐰌𓇻𡕆蕿鄻𫷟˳𑇚𑄟𦉀𩐿퉾𫸙𡜓䅰𢊽𮄽𘇭𨪤𠭚╻쾎𥉺櫘荤釔豘툀󠄦𦡩𠔎佣ⷿ蠺䷴𫉷挏𬇎𗎱𑶅吽𭡌𗅼𠃵𓂛𩽻㧄汋𭠬𥷃𡘀𒈊𪏪𣂴𡙊𗂴𪷞𗮫킨𨞈誶딐嶶🄃𖾇𥟘𫉑𧒙⒭諪⧁
|
[
"45290401+vmlankub@users.noreply.github.com"
] |
45290401+vmlankub@users.noreply.github.com
|
b430db917abd39393d7a014d363f4c1942deecfc
|
1e12a6f1957dc47c50845a39d626ea9a1a541268
|
/backend/articles_find/apps.py
|
c4222de14623a6ac8722d1b83e53a1c3fd53e12b
|
[] |
no_license
|
sungguenja/fincat-findog
|
6e7d276bcd8853300916987f70b0d159ba5cff4d
|
c62d17f64f4f1e8d86a982feb4842d3729b587c5
|
refs/heads/master
| 2023-01-03T02:11:56.612927
| 2020-10-24T16:47:48
| 2020-10-24T16:47:48
| 306,927,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 100
|
py
|
from django.apps import AppConfig
class ArticlesFindConfig(AppConfig):
name = 'articles_find'
|
[
"59605197+sungguenja@users.noreply.github.com"
] |
59605197+sungguenja@users.noreply.github.com
|
2774ca931c4fb36f4fd5bba0927a1f855829e46d
|
5deac2929f33b8f59d1a24cfc9935e129af03d55
|
/client/views.py
|
2c53f41ed4788386cd46326c3250701a9b12bb5f
|
[] |
no_license
|
limchyo/my-baemin-project
|
7f7d2ba4c2f3ea21021f0178b329fc9277da2dff
|
8ca367f93862cd68665823be97e0a939abe8eca5
|
refs/heads/master
| 2020-04-02T05:21:33.089407
| 2019-05-06T02:57:48
| 2019-05-06T02:57:48
| 154,070,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,367
|
py
|
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login as auth_login
from django.contrib.auth.models import User, Group
from partner.models import Partner, Menu
from .models import Client, Order, OrderItem
# Create your views here.
def index(request):
partner_list = Partner.objects.all()
ctx = {
"partner_list" : partner_list
}
return render(request, "main.html", ctx)
def login(request):
ctx = {"is_client":True}
return common_login(request, ctx, "client")
def signup(request):
ctx = {"is_client":True}
return common_signup(request, ctx, "client")
def common_signup(request, ctx, group):
if request.method == "GET":
pass
elif request.method == "POST":
username = request.POST.get("username")
email = request.POST.get("email")
password = request.POST.get("password")
user = User.objects.create_user(username, email, password)
target_group = Group.objects.get(name=group)
user.groups.add(target_group)
if group == "client":
Client.objects.create(user=user, name=username)
return render(request, "signup.html", ctx)
def common_login(request, ctx, group):
if request.method == "GET":
pass
elif request.method == "POST":
username = request.POST.get("username")
password = request.POST.get("password")
user = authenticate(username=username, password=password)
if user is not None:
if group not in [group.name for group in user.groups.all()]:
ctx.update({"error" : "접근 권한이 없습니다."})
# for group in user.groups.all():
# print("group:", group)
else:
auth_login(request, user)
next_value = request.GET.get("next")
if next_value:
return redirect(next_value)
else:
if group == "partner":
return redirect("/partner/")
else:
return redirect("/")
else:
ctx.update({"error" : "사용자가 없습니다."})
return render(request, "login.html", ctx)
def order(request, partner_id):
ctx = {}
# if request.user.is_anonymous or request.user.partner is None:
# return redirect("/partner/")
partner = Partner.objects.get(id=partner_id)
menu_list = Menu.objects.filter(partner=partner)
if request.method == "GET":
ctx.update({
"partner" : partner,
"menu_list" : menu_list,
})
elif request.method == "POST":
# menu_dict = {}
order = Order.objects.create(
client=request.user.client,
address="test",
)
for menu in menu_list:
menu_count = request.POST.get(str(menu.id))
# if int(menu_count) > 0:
# menu_dict.update({ str(menu.id): menu })
menu_count = int(menu_count)
if menu_count > 0:
item = OrderItem.objects.create(
order=order,
menu=menu,
count=menu_count
)
# order.items.add(menu)
return redirect("/")
return render(request, "order_menu_list.html", ctx)
|
[
"limchyo"
] |
limchyo
|
f52ee5a4aaa0914d04d664c4dcb82b28a89d86dc
|
88de1855cddc294bf7e23e000738b97e2ce8fe5d
|
/peek_core_user/tuples/UserListItemTuple.py
|
5a752e215948693949e21ff7c1bb1a8f70d8c6ef
|
[] |
no_license
|
Synerty/peek-core-user
|
cea121a5bc37552055eff7d9c25e621531435631
|
89c9b782a9f5c7ae042a1498062c30cc07efa8c8
|
refs/heads/master
| 2020-03-18T17:07:18.765974
| 2020-02-24T03:32:40
| 2020-02-24T03:32:40
| 135,007,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
import logging
from peek_core_user._private.PluginNames import userPluginTuplePrefix
from vortex.Tuple import addTupleType, Tuple, TupleField
logger = logging.getLogger(__name__)
@addTupleType
class UserListItemTuple(Tuple):
__tupleType__ = userPluginTuplePrefix + "UserListItemTuple"
#: The unique ID of the user
userId: str = TupleField()
#: The nice name of the user
displayName: str = TupleField()
@property
def userName(self) -> str:
return self.userId
@property
def userTitle(self) -> str:
return self.displayName
|
[
"jarrod.chesney@synerty.com"
] |
jarrod.chesney@synerty.com
|
673b373ff9bf06ec5086e31be8791ea1234a373d
|
e42e74393abeea9bd0a5ec8ee9dc7eb160495105
|
/videos/urls.py
|
445b39f6fcf6458dff8cd6ad434e6140979fbb6b
|
[] |
no_license
|
Raymond26/corsaclub
|
73850fcd23ec3231bfa6a5df86d9d521dbee69c2
|
54e3c4043208f9cf43500e16ce288a284a9cea4f
|
refs/heads/master
| 2022-12-16T03:28:44.043449
| 2017-10-30T00:06:38
| 2017-10-30T00:06:38
| 101,708,689
| 0
| 0
| null | 2022-12-08T00:40:25
| 2017-08-29T02:17:00
|
Python
|
UTF-8
|
Python
| false
| false
| 155
|
py
|
from django.conf.urls import url
from . import views
app_name = 'videos'
urlpatterns = [
url(r'^$', views.VideosIndexView.as_view(), name='index'),
]
|
[
"raymond.lau.ca@gmail.com"
] |
raymond.lau.ca@gmail.com
|
9b4b1d5db36bd08721a63faf53a5395392982993
|
33c929360ed609f8b636b449b6c9ae58187c9145
|
/shop/migrations/0001_initial.py
|
fd36d086aba2a0b3c5faa81a3b51e07c065b5186
|
[] |
no_license
|
moktan5/sample1
|
3a7fd5e030fd04368970aa8b5046636c40c59200
|
f551d033c1fef16cc47bc27dcbbbc2f249bcf6ff
|
refs/heads/master
| 2020-11-27T10:06:04.452986
| 2019-12-23T03:16:57
| 2019-12-23T03:16:57
| 229,394,385
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
# Generated by Django 3.0 on 2019-12-18 10:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Department_name', models.CharField(max_length=30)),
],
),
]
|
[
"moktanroshan05@gmail.com"
] |
moktanroshan05@gmail.com
|
60a24d1ccad8ea503a5ddf179a3aed4655bdf401
|
4da168049a9a9e088f16d5b90b252f8f0e42a832
|
/deepfillv2-grayscale/utils.py
|
12d546a73cdd2d8c389544f3ffecb279955aab81
|
[] |
no_license
|
Fourier-Times/deepfillv2
|
72899d59cf2b1f960531a3850bb2e957f3689bd3
|
84a13c0cc06b98627a5c6060efe9fe852e010036
|
refs/heads/master
| 2023-05-10T07:47:24.458925
| 2021-06-20T14:46:48
| 2021-06-20T14:46:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,964
|
py
|
import os
import numpy as np
import cv2
import torch
import torch.nn as nn
import torchvision as tv
import network
import dataset
# ----------------------------------------
# Network
# ----------------------------------------
def create_generator(opt):
# Initialize the networks
generator = network.GrayInpaintingNet(opt)
print('Generator is created!')
# Init the networks
if opt.finetune_path:
pretrained_net = torch.load(opt.finetune_path)
generator = load_dict(generator, pretrained_net)
print('Load generator with %s' % opt.finetune_path)
else:
network.weights_init(generator, init_type = opt.init_type, init_gain = opt.init_gain)
print('Initialize generator with %s type' % opt.init_type)
return generator
def create_discriminator(opt):
# Initialize the networks
discriminator = network.PatchDiscriminator(opt)
print('Discriminator is created!')
# Init the networks
network.weights_init(discriminator, init_type = opt.init_type, init_gain = opt.init_gain)
print('Initialize discriminator with %s type' % opt.init_type)
return discriminator
def create_perceptualnet():
# Pre-trained VGG-16
vgg16 = torch.load('vgg16_pretrained.pth')
# Get the first 16 layers of vgg16, which is conv3_3
perceptualnet = network.PerceptualNet()
# Update the parameters
load_dict(perceptualnet, vgg16)
# It does not gradient
for param in perceptualnet.parameters():
param.requires_grad = False
return perceptualnet
def load_dict(process_net, pretrained_net):
# Get the dict from pre-trained network
pretrained_dict = pretrained_net
# Get the dict from processing network
process_dict = process_net.state_dict()
# Delete the extra keys of pretrained_dict that do not belong to process_dict
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in process_dict}
# Update process_dict using pretrained_dict
process_dict.update(pretrained_dict)
# Load the updated dict to processing network
process_net.load_state_dict(process_dict)
return process_net
# ----------------------------------------
# PATH processing
# ----------------------------------------
def text_readlines(filename):
# Try to read a txt file and return a list.Return [] if there was a mistake.
try:
file = open(filename, 'r')
except IOError:
error = []
return error
content = file.readlines()
# This for loop deletes the EOF (like \n)
for i in range(len(content)):
content[i] = content[i][:len(content[i])-1]
file.close()
return content
def savetxt(name, loss_log):
np_loss_log = np.array(loss_log)
np.savetxt(name, np_loss_log)
def get_files(path):
# read a folder, return the complete path
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
ret.append(os.path.join(root, filespath))
return ret
def get_jpgs(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
ret.append(filespath)
return ret
def text_save(content, filename, mode = 'a'):
# save a list to a txt
# Try to save a list variable in txt file.
file = open(filename, mode)
for i in range(len(content)):
file.write(str(content[i]) + '\n')
file.close()
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
# ----------------------------------------
# Validation and Sample at training
# ----------------------------------------
def sample(grayscale, mask, out, save_folder, epoch):
# to cpu
grayscale = grayscale[0, :, :, :].data.cpu().numpy().transpose(1, 2, 0) # 256 * 256 * 1
mask = mask[0, :, :, :].data.cpu().numpy().transpose(1, 2, 0) # 256 * 256 * 1
out = out[0, :, :, :].data.cpu().numpy().transpose(1, 2, 0) # 256 * 256 * 1
# process
masked_img = grayscale * (1 - mask) + mask # 256 * 256 * 1
masked_img = np.concatenate((masked_img, masked_img, masked_img), axis = 2) # 256 * 256 * 3 (√)
masked_img = (masked_img * 255).astype(np.uint8)
grayscale = np.concatenate((grayscale, grayscale, grayscale), axis = 2) # 256 * 256 * 3 (√)
grayscale = (grayscale * 255).astype(np.uint8)
mask = np.concatenate((mask, mask, mask), axis = 2) # 256 * 256 * 3 (√)
mask = (mask * 255).astype(np.uint8)
out = np.concatenate((out, out, out), axis = 2) # 256 * 256 * 3 (√)
out = (out * 255).astype(np.uint8)
# save
img = np.concatenate((grayscale, mask, masked_img, out), axis = 1)
imgname = os.path.join(save_folder, str(epoch) + '.png')
cv2.imwrite(imgname, img)
def psnr(pred, target, pixel_max_cnt = 255):
mse = torch.mul(target - pred, target - pred)
rmse_avg = (torch.mean(mse).item()) ** 0.5
p = 20 * np.log10(pixel_max_cnt / rmse_avg)
return p
def grey_psnr(pred, target, pixel_max_cnt = 255):
pred = torch.sum(pred, dim = 0)
target = torch.sum(target, dim = 0)
mse = torch.mul(target - pred, target - pred)
rmse_avg = (torch.mean(mse).item()) ** 0.5
p = 20 * np.log10(pixel_max_cnt * 3 / rmse_avg)
return p
def ssim(pred, target):
pred = pred.clone().data.permute(0, 2, 3, 1).cpu().numpy()
target = target.clone().data.permute(0, 2, 3, 1).cpu().numpy()
target = target[0]
pred = pred[0]
ssim = skimage.measure.compare_ssim(target, pred, multichannel = True)
return ssim
|
[
"noreply@github.com"
] |
Fourier-Times.noreply@github.com
|
084d4d5e10e126fa20374f8005d8e413b52c1756
|
699d97058692216d3db3f6ac69da443baf79a6e0
|
/setup.py
|
d1755392be67b0068934093a3c81fe3cc2c734a4
|
[] |
no_license
|
TheHeadlessSourceMan/imageTools
|
137d7cab124226fa039d51e65076c60999abce10
|
7fc9cc388b677401b78514897bef4c88ce8a190a
|
refs/heads/master
| 2022-10-09T00:55:35.787079
| 2020-06-10T02:26:40
| 2020-06-10T02:26:40
| 271,157,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,765
|
py
|
# This is the setup info for the python installer.
# You probably don't need to do anything with it directly.
# Just run make and it will be used to create a distributable package
# for more info on how this works, see:
# http://wheel.readthedocs.org/en/latest/
# and/or
# http://pythonwheels.com
from setuptools import setup, Distribution
class BinaryDistribution(Distribution):
def is_pure(self):
return True # return False if there is OS-specific files
def cmdline(args):
"""
Run the command line
:param args: command line arguments (WITHOUT the filename)
"""
import os
here=os.path.dirname(os.path.realpath( __file__ ))
name='imgTools'
# See also: https://setuptools.readthedocs.io/en/latest/setuptools.html
setup(
name=name,
version='1.0',
description='Power tools for working with images',
long_description='Really, the funniest around.',
classifiers=[ # http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
#'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7', # written for python version
# 'Topic :: ', # file this under a topic
],
#url='http://myproduct.com',
#author='me',
#author_email='x@y.com',
#license='MIT',
packages=[name],
package_dir={name:here},
package_data={ # add extra files for a package
name:[]
},
distclass=BinaryDistribution,
install_requires=[], # add dependencies from pypi
dependency_links=[], # add dependency urls (not in pypi)
)
if __name__=='__main__':
import sys
cmdline(sys.argv[1:])
|
[
"theheadlesssourceman@gmail.com"
] |
theheadlesssourceman@gmail.com
|
1fe8813a734b75cd63eb54ad938df203434fced9
|
1a61ff08d483266b0a5cceed38271db710ab7352
|
/tests/type-inference/success/unit/OperatorEqualA.dhall
|
18bc42d87b1f78ab5cd29bbf13400d2470a88253
|
[
"BSD-2-Clause"
] |
permissive
|
dhall-lang/dhall-lang
|
663ea77bcccf9507bc0f20ef8deec6f0518ab4f9
|
f42d5552b6d20095df8e6e639658edfc24d16c8b
|
refs/heads/master
| 2023-09-03T20:31:00.311747
| 2023-08-29T15:04:57
| 2023-08-29T15:04:57
| 102,303,588
| 4,086
| 213
|
BSD-3-Clause
| 2023-09-10T18:57:54
| 2017-09-04T00:41:19
|
Dhall
|
UTF-8
|
Python
| false
| false
| 14
|
dhall
|
True == False
|
[
"noreply@github.com"
] |
dhall-lang.noreply@github.com
|
ed13a3278d74c0c75a3e185334500165d46be84a
|
1f74b25dd293694f61c2b2636efca4f3969263cc
|
/imd_weather/history.py
|
72f88def21913ebf93b1293a6d562b290dcd0f3d
|
[
"MIT"
] |
permissive
|
itzmeanjan/imd_weather
|
45da54d85d40119533e62534019149dcd44c389b
|
f58d63ef519dfa20a7168020ea988b79f49d14d7
|
refs/heads/master
| 2023-06-17T03:13:48.058019
| 2023-05-25T14:04:01
| 2023-05-25T14:04:01
| 162,822,714
| 0
| 2
|
MIT
| 2023-05-22T22:17:11
| 2018-12-22T15:38:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,842
|
py
|
#!/usr/bin/python3
from __future__ import annotations
from re import compile as reg_compile
from typing import Dict, Any
from datetime import time, datetime
class History(object):
def __init__(self, _max: str, departFromMax: str, _min: str, departFromMin: str, rainfall: str, relativeHumidityFirst: str, relativeHumidityFinal: str, sunset: str, sunrise: str, moonset: str, moonrise: str):
self.timestamp = datetime.now().timestamp()
reg = reg_compile(r'^(-?\d*\.?\d{1,})$')
tmp = reg.search(_max)
self.max = float(tmp.group()) if tmp else None
tmp = reg.search(_min)
self.min = float(tmp.group()) if tmp else None
tmp = reg.search(departFromMax)
self.departFromMax = float(tmp.group()) if tmp else None
tmp = reg.search(departFromMin)
self.departFromMin = float(tmp.group()) if tmp else None
tmp = reg.search(rainfall)
self.rainfall = float(tmp.group()) if tmp else None
tmp = reg.search(relativeHumidityFirst)
self.relativeHumidityAt08_30 = float(tmp.group()) if tmp else None
tmp = reg.search(relativeHumidityFinal)
self.relativeHumidityAt17_30 = float(tmp.group()) if tmp else None
self.sunset = time(*[int(i.strip(), base=10)
for i in sunset.split(':')])
self.sunrise = time(*[int(i.strip(), base=10)
for i in sunrise.split(':')])
self.moonset = time(*[int(i.strip(), base=10)
for i in moonset.split(':')])
self.moonrise = time(*[int(i.strip(), base=10)
for i in moonrise.split(':')])
def toJSON(self) -> Dict[str, Any]:
return {
'timestamp': self.timestamp,
'max': self.max,
'departFromMax': self.departFromMax,
'min': self.min,
'departFromMin': self.departFromMin,
'rainfall': self.rainfall,
'relativeHumidityAt08:30': self.relativeHumidityAt08_30,
'relativeHumidityAt17:30': self.relativeHumidityAt17_30,
'sunset': str(self.sunset),
'sunrise': str(self.sunrise),
'moonset': str(self.moonset),
'moonrise': str(self.moonrise)
}
@staticmethod
def fromJSON(data: Dict[str, Any]) -> History:
_hist = History(data.get('max'), data.get('departFromMax'), data.get('min'), data.get('departFromMin'), data.get('rainfall'), data.get(
'relativeHumidityAt08:30'), data.get('relativeHumidityAt17:30'), data.get('sunset'), data.get('sunrise'), data.get('moonset'), data.get('moonrise'))
_hist.timestamp = data.get('timestamp')
return _hist
if __name__ == '__main__':
print('[!]This module is designed to be used as a backend handler')
exit(0)
|
[
"anjanroy@yandex.com"
] |
anjanroy@yandex.com
|
af7dcd0f4b002260e71a2aad5f674a804fdb83f0
|
5687800e74bffe1bba4dea31028db8815790572e
|
/Python/nextBiggerInt.py
|
eab090c4ea115d53461e09f2c1a61ccabb7c22ef
|
[] |
no_license
|
CutiePi/CodeWars
|
9fe7661e6c458f13654c12bf517bd9a3cb371c7d
|
7a876a2984a89873d0342500117d7b9a59152755
|
refs/heads/master
| 2022-05-30T04:42:33.396831
| 2022-05-20T13:16:44
| 2022-05-20T13:16:44
| 73,415,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
import math
# Find biggest number from right to left then promote once 1234567890
# 59884848459853 59884848498535 should equal 59884848483559
# algo to do
# find when next is smaller
# then take smallest previous that is bigger and replace
# take rest and order asc
def next_bigger(n):
number = list(str(n))
histo = []
for x in range(len(number) - 1, 0, -1):
histo.append(number[x])
if int(number[x]) > int(number[x - 1]):
for z in range(len(histo)):
if int(histo[z]) > int(number[x - 1]):
temp = number[x - 1]
number[x - 1] = histo[z]
histo[z] = temp
histo.sort()
number = int(''.join(number[0:x]) + ''.join(histo))
return -1 if number <= n else number
return -1
print(next_bigger(9))
print(next_bigger(111))
print(next_bigger(531))
print(next_bigger(144)) # 414
print(next_bigger(891))
print(next_bigger(12)); # 21
print(next_bigger(513)); # 531
print(next_bigger(2017)); # 2071
print(next_bigger(1234567890)) # 1234567908
|
[
"jasmin.lapointe@hotmail.ca"
] |
jasmin.lapointe@hotmail.ca
|
da68b8b757d462db2ee1c64692840a2c100d8c73
|
1111d9c9c1e6013d4c669f2cacee54c25cb0019a
|
/utils/utils.py
|
b3eb9412583cb573b7303dc392087f84d2aee9ce
|
[] |
no_license
|
patconrey/18-660-project
|
c35fef61ade051ef505600cdb7a477ffbdd9ddd6
|
023c9ff4cce343a8311bb9542a2d8a1713b6a485
|
refs/heads/main
| 2023-05-03T08:10:20.285572
| 2021-05-13T17:28:20
| 2021-05-13T17:28:20
| 359,492,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,135
|
py
|
import sys
from functools import wraps
import logging
import os
import random
import time
from contextlib import contextmanager
from typing import Union
import numpy as np
import torch
def seed_everything(seed=1234):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
@contextmanager
def timer(name: str, logger: Union[logging.Logger, None] = None):
t0 = time.time()
yield
msg = f'[{name}] done in {time.time()-t0:.3f} s'
if logger:
logger.info(msg)
else:
print(msg)
def tail_recursive(func):
self_func = [func]
self_firstcall = [True]
self_CONTINUE = [object()]
self_argskwd = [None]
@wraps(func)
def _tail_recursive(*args, **kwd):
if self_firstcall[0] == True:
func = self_func[0]
CONTINUE = self_CONTINUE
self_firstcall[0] = False
try:
while True:
result = func(*args, **kwd)
if result is CONTINUE: # update arguments
args, kwd = self_argskwd[0]
else: # last call
return result
finally:
self_firstcall[0] = True
else: # return the arguments of the tail call
self_argskwd[0] = args, kwd
return self_CONTINUE
return _tail_recursive
def get_experiment_id_from_cfg(cfg):
dataset=cfg.dataset
model=cfg.model.type
scheme=cfg.fed.type
heteroE = cfg.client_heterogeneity.should_use_heterogeneous_E
heteroD = cfg.client_heterogeneity.should_use_heterogeneous_data
iid = cfg.client_heterogeneity.iid
folder_to_save = './output/results/{}'.format(model)
if not os.path.exists(folder_to_save):
os.makedirs(folder_to_save, exist_ok=True)
id = '{}/model={}_scheme={}_heteroE={}_heteroD={}_iid={}_dataset={}'.format(folder_to_save, model, scheme, heteroE, heteroD, iid, dataset)
return id
|
[
"patrickconrey@gmail.com"
] |
patrickconrey@gmail.com
|
87da12e139d6fe7b99ace7e9feadfaf2cff2a968
|
d50685a3f3d612349b1f6627ed8b807f0eec3095
|
/db/MysqlManage.py
|
6b710d45df57033dcd184a6d37d4b0cde7742ed0
|
[] |
no_license
|
Erich6917/python_littlespider
|
b312c5d018bce17d1c45769e59243c9490e46c63
|
062206f0858e797945ce50fb019a1dad200cccb4
|
refs/heads/master
| 2023-02-12T23:22:27.352262
| 2021-01-05T06:21:20
| 2021-01-05T06:21:20
| 113,631,826
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,075
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2017/9/18
# @Author : LIYUAN134
# @Site :
# @File : MysqlManage.py
# @Commment: Mysql数据操作管理
# 1、执行带参数的SQL时,请先用sql语句指定需要输入的条件列表,然后再用tuple/list进行条件批配
# 2、在格式SQL中不需要使用引号指定数据类型,系统会根据输入参数自动识别
# 3、在输入的值中不需要使用转意函数,系统会自动处理
# import Config
import sys
import MySQLdb
import psycopg2
from DBUtils.PooledDB import PooledDB
# reload(sys)
# sys.setdefaultencoding('utf-8')
"""
Config是一些数据库的配置文件
"""
class Mysql(object):
"""
MYSQL数据库对象,负责产生数据库连接 , 此类中的连接采用连接池实现获取连接对象:conn = Mysql.getConn()
释放连接对象;conn.close()或del conn
"""
# 连接池对象
__pool = None
@staticmethod
def __getConn():
"""
@summary: 静态方法,从连接池中取出连接
@return MySQLdb.connection
"""
# if Mysql.__pool is None:
# # mysql 连接
# __pool = PooledDB(creator=MySQLdb, mincached=1, maxcached=20,
# host="112.25.233.123", # Config.DBHOST ,
# port=6980, # Config.DBPORT ,
# user="root", # Config.DBUSER ,
# passwd="aaaaa888", # Config.DBPWD ,
# db="pavoice", # Config.DBNAME,
# charset="utf8", # Config.DBCHAR,
# )
# return __pool.connection()
# conn = psycopg2.connect(
# database='jadebloom',
# user='bloomopr',
# password='pg123',
# host='localhost',
# port='5432'
# )
conn = psycopg2.connect(
database='jadebloom',
user='bloomopr',
password='pg123',
host='192.168.1.132',
port='5432'
)
return conn
# return __pool.connection()
# use_unicode=False,
# cursorclass=DictCursor
def __init__(self):
# 数据库构造函数,从连接池中取出连接,并生成操作游标
try:
self._conn = Mysql.__getConn()
self._cursor = self._conn.cursor()
except Exception, e:
error = 'Connect failed! ERROR (%s): %s' % (e.args[0], e.args[1])
print error
sys.exit()
# 针对读操作返回结果集
def _exeCute(self, sql=''):
try:
self._cursor.execute(sql)
records = self._cursor.fetchall()
return records
except MySQLdb.Error, e:
error = 'MySQL execute failed! ERROR (%s): %s' % (e.args[0], e.args[1])
print error
# 针对更新,删除,事务等操作失败时回滚
def _exeCuteCommit(self, sql='', arg=None):
try:
if arg is None:
self._cursor.execute(sql)
else:
self._cursor.execute(sql, arg)
self._conn.commit()
except MySQLdb.Error, e:
self._conn.rollback()
error = 'MySQL execute failed! ERROR (%s): %s' % (e.args[0], e.args[1])
print error
# sys.exit()
# 创建表
# tablename:表名称,attr_dict:属性键值对,constraint:主外键约束
# attr_dict:{'book_name':'varchar(200) NOT NULL'...}
# constraint:PRIMARY KEY(`id`)
def _createTable(self, table, attr_dict, constraint):
sql = ''
# sql_mid = '`row_id` bigint(11) NOT NULL AUTO_INCREMENT,'
sql_mid = ''
for attr, value in attr_dict.items():
sql_mid = sql_mid + '`' + attr + '`' + ' ' + value + ','
sql = sql + 'CREATE TABLE IF NOT EXISTS %s (' % table
sql = sql + sql_mid
sql = sql + constraint
sql = sql + ') ENGINE=InnoDB DEFAULT CHARSET=utf8'
print '_createTable:' + sql
self._exeCuteCommit(sql)
def insertOne(self, sql, value=None):
"""
@summary: 向数据表插入一条记录
@param sql:要插入的SQL格式
@param value:要插入的记录数据tuple/list
@return: insertId 受影响的行数
"""
self._exeCuteCommit(sql, value)
# return self.__getInsertId()
def _insert(self, table, attrs, value):
"""
@summary: 向数据表插入一条记录
@param attrs = [] :要插入的属性
@param value = [] :要插入的数据值
"""
# values_sql = ['%s' for v in attrs]
attrs_sql = '(' + ','.join(attrs) + ')'
value_str = self._transferContent(value)
values_sql = ' values(' + value_str + ')'
sql = 'insert into %s' % table
sql = sql + attrs_sql + values_sql
print '_insert:' + sql
self._exeCuteCommit(sql)
def _insertDic(self, table, attrs):
"""
@summary: 向数据表插入一条记录
@param attrs = {"colNmae:value"} :要插入的属性:数据值
"""
attrs_sql = '(' + ','.join(attrs.keys()) + ')'
value_str = self._transferContent(attrs.values()) # ','.join(attrs.values())
values_sql = ' values(' + value_str + ')'
sql = 'insert into %s' % table
sql = sql + attrs_sql + values_sql
print '_insert:' + sql
self._exeCuteCommit(sql)
# 将list转为字符串
def _transferContent(self, content):
if content is None:
return None
else:
Strtmp = ""
for col in content:
if Strtmp == "":
Strtmp = "\"" + col + "\""
else:
Strtmp += "," + "\"" + col + "\""
return Strtmp
def _insertMany(self, table, attrs, values):
"""
@summary: 向数据表插入多条数据
@param attrs = [id,name,...] :要插入的属性
@param values = [[1,'jack'],[2,'rose']] :要插入的数据值
"""
values_sql = ['%s' for v in attrs]
attrs_sql = '(' + ','.join(attrs) + ')'
values_sql = ' values(' + ','.join(values_sql) + ')'
sql = 'insert into %s' % table
sql = sql + attrs_sql + values_sql
print '_insertMany:' + sql
try:
for i in range(0, len(values), 20000):
self._cursor.executemany(sql, values[i:i + 20000])
self._conn.commit()
except MySQLdb.Error, e:
self._conn.rollback()
error = '_insertMany executemany failed! ERROR (%s): %s' % (e.args[0], e.args[1])
print error
range(error)
# sys.exit()
def insertMany(self, sql, values=None):
"""
@summary: 向数据表插入多条记录
@param sql:要插入的SQL格式
@param values:要插入的记录数据tuple(tuple)/list[list]
@return: count 受影响的行数
"""
try:
if values is None:
count = self._cursor.executemany(sql)
else:
count = self._cursor.executemany(sql, values)
self._conn.commit()
except MySQLdb.Error, e:
self._conn.rollback()
error = 'MySQL execute failed! ERROR (%s): %s' % (e.args[0], e.args[1])
print error
# sys.exit()
return count
def _select(self, table, cond_dict='', order=''):
"""
@summary: 执行条件查询,并取出所有结果集
@cond_dict:{'name':'xiaoming'...}
@order:'order by id desc'
@return: result ({"col":"val","":""},{})
"""
consql = ' '
if cond_dict != '':
for k, v in cond_dict.items():
consql = consql + k + '=' + v + ' and'
consql = consql + ' 1=1 '
sql = 'select * from %s where ' % table
sql = sql + consql + order
print '_select:' + sql
return self._exeCute(sql)
def __getInsertId(self):
"""
获取当前连接最后一次插入操作生成的id,如果没有则为0
"""
self._cursor.execute("SELECT @@IDENTITY AS id")
result = self._cursor.fetchall()
return result[0]['id']
def __query(self, sql, param=None):
if param is None:
count = self._cursor.execute(sql)
else:
count = self._cursor.execute(sql, param)
return count
def getAll(self, sql, param=None):
"""
@summary: 执行查询,并取出所有结果集
@param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来
@param param: 可选参数,条件列表值(元组/列表)
@return: result list(字典对象)/boolean 查询到的结果集
"""
if param is None:
count = self._cursor.execute(sql)
else:
count = self._cursor.execute(sql, param)
if count > 0:
result = self._cursor.fetchall()
else:
result = False
return result
def getOne(self, sql, param=None):
"""
@summary: 执行查询,并取出第一条
@param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来
@param param: 可选参数,条件列表值(元组/列表)
@return: result list/boolean 查询到的结果集
"""
# pg 写法
self._cursor.execute(sql, param)
result = self._cursor.fetchone()
# mysql写法
if param is None:
count = self._cursor.execute(sql)
else:
count = self._cursor.execute(sql, param)
if count > 0:
result = self._cursor.fetchone()
else:
result = False
return result
def getMany(self, sql, num, param=None):
"""
@summary: 执行查询,并取出num条结果
@param sql:查询SQL,如果有查询条件,请只指定条件列表,并将条件值使用参数[param]传递进来
@param num:取得的结果条数
@param param: 可选参数,条件列表值(元组/列表)
@return: result list/boolean 查询到的结果集
"""
count = self.__query(sql, param)
if count > 0:
result = self._cursor.fetchmany(num)
else:
result = False
return result
def update(self, sql, param=None):
"""
@summary: 更新数据表记录
@param sql: SQL格式及条件,使用(%s,%s)
@param param: 要更新的 值 tuple/list
@return: count 受影响的行数
"""
return self._exeCuteCommit(sql, param)
def delete(self, sql, param=None):
"""
@summary: 删除数据表记录
@param sql: SQL格式及条件,使用(%s,%s)
@param param: 要删除的条件 值 tuple/list
@return: count 受影响的行数
"""
return self._exeCuteCommit(sql, param)
def begin(self):
"""
@summary: 开启事务
"""
self._conn.autocommit(0)
def end(self, option='commit'):
"""
@summary: 结束事务
"""
if option == 'commit':
self._conn.commit()
else:
self._conn.rollback()
def dispose(self, isEnd=1):
"""
@summary: 释放连接池资源
"""
if isEnd == 1:
self.end('commit')
else:
self.end('rollback')
self._cursor.close()
self._conn.close()
|
[
"1065120559@qq.com"
] |
1065120559@qq.com
|
0c8caff3e208a8d269df9db73e7151532349c43c
|
e2f275b4177e421fdaaabf9d3752bdc0c12c7e68
|
/df_user/models.py
|
c313cb3ddcb3e8f1021923e738cb40ee99fdc7d6
|
[] |
no_license
|
qcuser-y/dailyfreash
|
20cd081e796e2d869b39d16ced6d5c8e87df3f6f
|
d467eb26f3c2b981efc22694dab70dbf677be2f1
|
refs/heads/master
| 2020-04-13T14:30:03.235494
| 2018-12-27T08:20:50
| 2018-12-27T08:20:50
| 163,265,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
from django.db import models
from django.db import models
class UserInfo(models.Model):
uname = models.CharField(max_length=20)
upwd = models.CharField(max_length=40)
uemail = models.CharField(max_length=30)
ushou = models.CharField(max_length=20, default='')
uaddress = models.CharField(max_length=100, default='')
uyoubian = models.CharField(max_length=6, default='')
uphone = models.CharField(max_length=11, default='')
|
[
"root@host.localdomain"
] |
root@host.localdomain
|
a7d86d144ebf9a64bf7cce02b2d1eb5c1da7e4c8
|
67084751337f327092049a61b6d8c630cd3767de
|
/scrape-Wikipedia-using-speech-recognition/Scrape_Wikipedia_using_speech_recognition.py
|
f5803f26b01f00a937caf05176433f64a4d04db5
|
[
"MIT"
] |
permissive
|
habereet/awesomeScripts
|
0f0a33d9a35de8b4449c1263db4407beaf0178fa
|
2d77f3619314aa7840fed57e0cf451fe5c1a70a9
|
refs/heads/master
| 2023-01-05T01:29:30.063154
| 2020-10-31T00:36:16
| 2020-10-31T00:36:16
| 303,189,908
| 0
| 0
|
MIT
| 2020-10-11T18:48:07
| 2020-10-11T18:48:06
| null |
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
import pyttsx3
import speech_recognition as SR
import wikipedia
import sys
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
# for voice in voices:
# print(voice.id)
# select voice among the available options
# engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def obey_command():
# It takes input from the microphone and returns output as a string
mic = SR.Recognizer()
with SR.Microphone() as source:
print("Listening...")
mic.pause_threshold = 1
audio = mic.listen(source)
try:
print("Recognizing...")
query = mic.recognize_google(audio, language='en-in')
print(query)
except Exception as e:
print(e)
print("Say that again please...")
return "None"
return query
if __name__ == "__main__":
query = obey_command().lower()
if 'wikipedia' in query:
speak("Searching wikipedia")
query = query.replace("wikipedia", "")
result = wikipedia.summary(query, sentences=2)
speak("According to wikipedia")
speak(result)
sys.exit()
|
[
"noreply@github.com"
] |
habereet.noreply@github.com
|
2ae864419014faa9e9d23903848f3729be4c63cd
|
f0b7e4eaccd93cc734afeb161eba4cadc9d8cbc5
|
/mgl2d/graphics/sprite.py
|
8eae23feb724be6b41eeb7004ee1cafd860e5cfd
|
[
"MIT"
] |
permissive
|
cklein/mgl2d
|
0f85cbf16df629cec98f637d4692e81ce6fa7f9a
|
04a951f5070b6d1d2412b8853f80ab8a39f3b662
|
refs/heads/master
| 2021-04-06T20:08:21.143777
| 2018-03-13T21:28:59
| 2018-03-13T21:28:59
| 125,385,684
| 0
| 0
|
MIT
| 2018-03-15T15:13:15
| 2018-03-15T15:13:15
| null |
UTF-8
|
Python
| false
| false
| 6,747
|
py
|
from mgl2d.graphics.frames_store import FramesStore
from mgl2d.graphics.quad_drawable import QuadDrawable
from mgl2d.math.vector2 import Vector2
class Sprite:
DEBUG = False
def __init__(self, frames_store):
self._frames_store = frames_store
self._x = 0
self._y = 0
self._flags = 0
self._angle = 0
self._scale = Vector2(1, 1)
# Collision detection
self._attack_box = None
self._hit_box = None
# Frames and animations
self._frame = None
self._animation = None
self._animation_name = None
self._animation_frame_index = None
self._animation_frame_delay = 0
self._animation_speed = 1
self._animating = False
# Drawing
self._drawable = QuadDrawable()
def draw(self, screen):
if self._frame is None:
return
self._drawable.pos = Vector2(self._x, self._y) # - camera.offset.x, self._y - camera.offset.y)
self._drawable.draw(screen)
# DEBUG boxes
if Sprite.DEBUG:
# TODO: !!!
pass
# anchor_x = self.frame.rect['x'] + self.frame.anchor['x'] - window_x
# anchor_y = self.frame.rect['y'] + self.frame.anchor['y'] - window_y
# pygame.draw.rect(surface, (255, 255, 255), pygame.Rect(anchor_x, anchor_y, 1, 1), 1)
# if self.hit_box and self.hit_box.w > 0 and self.hit_box.h > 0:
# pygame.draw.rect(surface, (0, 200, 0), self.hit_box.move(-window_x, -window_y), 1)
# if self.attack_box and self.attack_box.w > 0 and self.attack_box.h > 0:
# pygame.draw.rect(surface, (200, 0, 0), self.attack_box.move(-window_x, -window_y), 1)
def set_frame(self, frame_name):
self.stop_animation()
self._animation = None
self._frame = self._frames_store.get_frame(frame_name)
def stop_animation(self):
self._animation_frame_delay = 0
self._animation_frame_index = 0
self._animating = False
def play_animation(self, animation_name, flags=0, speed=1):
if (self._flags & FramesStore.FLAG_LOOP_ANIMATION) > 0 and \
self._flags == flags and animation_name == self._animation_name:
return
self._animating = True
self._animation_speed = speed
self._animation_name = animation_name
self._flags = flags
self._set_animation_frame(0)
def skip_to_last_animation_frame(self):
if not self._animating:
return
self._animating = False
self._set_animation_frame(len(self._animation.frames) - 1)
def update(self, game_speed):
self._update_collision_boxes()
if not self._animating:
return
if self._animation_frame_delay <= 0:
self.next_animation_frame()
return
else:
self._animation_frame_delay -= game_speed * self._animation_speed
return
def next_animation_frame(self):
new_animation_frame_index = self._animation_frame_index + 1
if new_animation_frame_index > len(self._animation.frames) - 1:
if not (self._flags & FramesStore.FLAG_LOOP_ANIMATION) > 0:
self._animating = False
return
else:
new_animation_frame_index = 0
self._set_animation_frame(new_animation_frame_index)
def previous_animation_frame(self):
new_animation_frame_index = self._animation_frame_index - 1
if new_animation_frame_index < 0:
new_animation_frame_index = len(self._animation.frames) - 1
self._set_animation_frame(new_animation_frame_index)
def _set_animation_frame(self, frame_index):
self._animation = self._frames_store.get_animation(self._animation_name)
self._animation_frame_index = frame_index
self.animation_frame = self._animation.frames[self._animation_frame_index]
new_frame = self._animation.frames[frame_index]
self._animation_frame_delay = new_frame.delay
self._frame = self._frames_store.get_frame(new_frame.frame_name)
# Override animation flip if the frame is also flipped
flags = self._flags
if self.animation_frame.flip_x:
flags |= FramesStore.FLAG_FLIP_X
if self.animation_frame.flip_y:
flags |= FramesStore.FLAG_FLIP_Y
# Updates the drawable
self._drawable.texture = self._frame.image
self._drawable.scale = Vector2(self._frame.rect.w, self._frame.rect.h).dot(self._scale)
self._drawable.anchor = self._frame.anchor.dot(self._scale)
self._drawable.flip_x = (flags & FramesStore.FLAG_FLIP_X > 0)
self._drawable.flip_y = (flags & FramesStore.FLAG_FLIP_Y > 0)
def _update_collision_boxes(self):
if not self._animating:
self._attack_box = None
self._hit_box = None
# TODO: flip_y should be handled as well
animation_frame = self._animation.frames[self._animation_frame_index]
flip_x = ((self._flags & FramesStore.FLAG_FLIP_X) > 0) ^ animation_frame.flip_x
if self._frame.hit_box:
self._hit_box = self._frame.hit_box.copy()
if flip_x:
self._hit_box.x = - self._hit_box.x - self._hit_box.width
self._hit_box.move_ip(self._x, self._y)
else:
self._hit_box = None
if self._frame.attack_box:
self._attack_box = self._frame.attack_box.copy()
if flip_x:
self._attack_box.x = - self._attack_box.x - self._attack_box.width
self._attack_box.move_ip(self._x, self._y)
else:
self._attack_box = None
@property
def x(self):
return self._x
@x.setter
def x(self, value):
self._x = value
@property
def y(self):
return self._y
@y.setter
def y(self, value):
self._y = value
@property
def angle(self):
return self._angle
@angle.setter
def angle(self, value):
self._angle = value
self._drawable.angle = value
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, value):
self._scale = value
self._drawable.scale *= value
@property
def hit_box(self):
return self._hit_box
@property
def attack_box(self):
return self._attack_box
@property
def animating(self):
return self._animating
@property
def shader(self):
return self._drawable.shader
@shader.setter
def shader(self, shader):
self._drawable.shader = shader
|
[
"massimiliano.pesce@gmail.com"
] |
massimiliano.pesce@gmail.com
|
5c202747363960a6969c0c91b02ef30d046a2936
|
0b53affd1830ce1a9732d8d74900ef3711327f81
|
/rango/forms.py
|
63887e5209b733bb07ac7256ebaff8a6f51c1b06
|
[] |
no_license
|
Ross-Gardiner/tango_with_django_project
|
cee52b71425f91a02195096820561cb4c85104c0
|
4a8e7f1eaf6979473530dc468dff96322315ae4a
|
refs/heads/master
| 2021-05-09T23:48:57.023274
| 2018-02-08T23:09:58
| 2018-02-08T23:09:58
| 117,265,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,175
|
py
|
from django import forms
from rango.models import Page,Category,UserProfile
from django.contrib.auth.models import User
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128,
help_text="Please enter the category name.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
#an inline class to provide additional information on the form.
class Meta:
#provide an association between the ModelForm and a model
model = Category
fields = ('name',)
class PageForm(forms.ModelForm):
title = forms.CharField(max_length=128,
help_text="Please enter the title of the page.")
url = forms.URLField(max_length=200,
help_text="Please enter the URL of the page.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
def clean(self):
cleaned_data = self.cleaned_data
url = cleaned_data.get('url')
#If url is not empty and doesn't start with 'http://',
#then prepend 'http://'.
if url and not url.startswith('http://'):
url = 'http://' + url
cleaned_data['url'] = url
return cleaned_data
#An inline class to provide additional information on the form.
class Meta:
#provide an association between the ModelForm and a model
model = Page
#fields = ('title', 'url', 'views')
#What fields do we want to include in our form?
# This way we don't need every field in the model present.
# Some fields may allow NULL values, so we may not want to include them.
# Here, we are hiding the foreign key.
# we can either exclude the category field from the form,
exclude = ('category',)
# or specify the fields to include (i.e. not include the category field)
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = fields = ('website', 'picture')
|
[
"2190583g@student.gla.ac.uk"
] |
2190583g@student.gla.ac.uk
|
e0292b98e66842b45678a1d8ec2d8c935ded43c4
|
33ed1cdd93ff1c2676a6fa7d08dc44ed74686348
|
/build.py
|
50e7783ea8f082207eb919db1ec53fad967f79e3
|
[
"Apache-2.0"
] |
permissive
|
CrackerCat/sspanel-mining
|
c623e77a8984dc92188f07acea96f7b8904fcc0f
|
3d7f3788d79a142b901effb6be3dc8e8be4887da
|
refs/heads/main
| 2023-08-19T20:28:57.117188
| 2021-10-21T01:44:41
| 2021-10-21T01:44:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,814
|
py
|
# -*- coding: utf-8 -*-
# Time : 2021/10/6 18:38
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
import os
import shlex
import requests
from bs4 import BeautifulSoup
THIS_WALK = "."
CHROMEDRIVER_UNZIP_PATH = "./chromedriver"
def shell_echo(cmd: str, mode="default"):
"""
为了输出安全做的协调函数
:param cmd:
:param mode:
:return:
"""
if mode == "default":
return os.system(cmd)
if mode == "safe":
return os.system(shlex.quote(cmd))
def set_google_chrome():
# Google-chrome already exists in the current environment
if shell_echo("google-chrome --version") == 0:
# uninstall command
# os.system("sudo rpm -e google-chrome-stable")
return True
# installing Google Chrome on CentOS7
shell_echo("wget https://dl.google.com/linux/direct/google-chrome-stable_current_x86_64.rpm >/dev/null")
shell_echo("sudo apt localinstall google-chrome-stable_current_x86_64.rpm >/dev/null")
def set_chromedriver(unzip_path=None):
# chromedriver 的解压安装目录
unzip_path = "/usr/bin/chromedriver" if unzip_path is None else unzip_path
# 读取 google-chrome 的发行版本 Such as 89.0.4389.23
chrome_version = "".join(os.popen("google-chrome --version").readlines()).strip().split(' ')[-1]
# 访问 chromedriver 镜像
res = requests.get("http://npm.taobao.org/mirrors/chromedriver")
soup = BeautifulSoup(res.text, 'html.parser')
# 通过文件名清洗定位到所需版本文件的下载地址
options = [i.split('/')[0] for i in soup.text.split('\n') if i.startswith(chrome_version[:5])]
if len(options) == 1:
chromedriver_version = options[0]
else:
chromedriver_version = max(options)
# 拉取 chromedriver
shell_echo(f"wget http://npm.taobao.org/mirrors/chromedriver/{chromedriver_version}"
"/chromedriver_linux64.zip >/dev/null")
# 解压 chromedriver
shell_echo("unzip chromedriver_linux64.zip >/dev/null")
# 死循环等待解压完成
while True:
if "chromedriver" not in list(os.walk(THIS_WALK))[0][-1]:
pass
else:
break
# 给予 chromedriver 运行运行权限
shell_echo("chmod +x chromedriver >/dev/null")
# 将 chromedriver 移动到预设的解压安装目录
shell_echo(f"mv -f chromedriver {unzip_path} >/dev/null")
def init_project():
print("---> Remove irrelevant information")
shell_echo("rm -rf chromedriver_linux64.zip")
shell_echo("rm -rf google-chrome-stable_current_x86_64.rpm")
shell_echo("clear")
def run():
set_google_chrome()
set_chromedriver(CHROMEDRIVER_UNZIP_PATH)
# 清理运行缓存
init_project()
if __name__ == '__main__':
run()
|
[
"62018067+QIN2DIM@users.noreply.github.com"
] |
62018067+QIN2DIM@users.noreply.github.com
|
5e5b2a57235da9ce4d1a85ac129dfbfba8fcc7f7
|
fd11d84e89814e94f781a307e59c3cf6d1de9f3c
|
/nomadgram/images/migrations/0005_auto_20181026_1930.py
|
cb286addd1bf3149c00342c34fe108bda32ba0d8
|
[
"MIT"
] |
permissive
|
eshellster/Cloning-instagram
|
089d268214f892d999628fb81975e4d55631bb9c
|
da6c7037629697238948ce22545c8299f25698ba
|
refs/heads/master
| 2021-11-20T09:33:21.523454
| 2018-12-27T07:10:44
| 2018-12-27T07:10:44
| 140,681,852
| 0
| 0
|
MIT
| 2021-09-08T00:09:06
| 2018-07-12T08:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 876
|
py
|
# Generated by Django 2.0.7 on 2018-10-26 10:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('images', '0004_auto_20180829_0720'),
]
operations = [
migrations.AddField(
model_name='image',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AlterField(
model_name='image',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='images', to=settings.AUTH_USER_MODEL),
),
]
|
[
"eshellster@gmail.com"
] |
eshellster@gmail.com
|
43f932da55f2fccf7ae22373fefcc94660462820
|
c51010ae8096aa515a4881e4d6344b75004c0d8c
|
/lobbyists/models.py
|
47a2916f744cec6697a984b647a210443d1cd02f
|
[
"BSD-2-Clause"
] |
permissive
|
nirfuzz/Open-Knesset
|
f4c74b0b3c838c0921ec024294124fb71884337d
|
5927f4f0b72f18a980571f3f93f423d45eff6280
|
refs/heads/master
| 2020-12-24T15:22:16.199979
| 2014-06-30T18:31:57
| 2014-06-30T18:31:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,739
|
py
|
# encoding: utf-8
from django.db import models
from django.core.cache import cache
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
class LobbyistHistoryManager(models.Manager):
def latest(self):
return self.filter(scrape_time__isnull=False).latest('scrape_time')
class LobbyistHistory(models.Model):
"""
this model allows to see an overview over time of the lobbyists in the knesset
to get the latest lobbyist history object, use LobbyistHistory.objects.latest
"""
scrape_time = models.DateTimeField(blank=True, null=True)
lobbyists = models.ManyToManyField('lobbyists.Lobbyist', related_name='histories')
objects = LobbyistHistoryManager()
@property
def corporations(self):
"""
Returns all the corporations associated with this point in time of the lobbyist history
Because it executes a lot of queries - it is cached for 1 day
TODO: optimize it
"""
corporation_ids = cache.get('LobbyistHistory_%d_corporation_ids' % self.id)
if not corporation_ids:
corporation_ids = []
for lobbyist in self.lobbyists.all():
corporation_id = lobbyist.cached_data['latest_corporation']['id']
if corporation_id not in corporation_ids:
corporation_ids.append(corporation_id)
cache.set('LobbyistHistory_%d_corporation_ids' % self.id, corporation_ids, 86400)
return LobbyistCorporation.objects.filter(id__in=corporation_ids)
@property
def main_corporations(self):
"""
Returns all the main corporations (e.g. without alias corporations and without 1 lobbyist corporations)
"""
alias_corporation_ids = [ca.alias_corporation.id for ca in LobbyistCorporationAlias.objects.all()]
return self.corporations.exclude(id__in = alias_corporation_ids)
def clear_corporations_cache(self):
cache.delete('LobbyistHistory_%d_corporation_ids' % self.id)
class Lobbyist(models.Model):
"""
this model represents a single lobbyist and is connected to the LobbyistHistory.lobbyists field
the lobbyist is connected to a single person
and has a source_id which is the id we get from the knesset
the actual lobbyist's details are stored in the LobbyistData model and related in the data field
the LobbyistData model allows to see changes in lobbyist data over time
to get just the latest data use - lobbyist.latest_data
"""
person = models.ForeignKey('persons.Person', blank=True, null=True, related_name='lobbyist')
source_id = models.CharField(blank=True, null=True, max_length=20)
@cached_property
def latest_data(self):
return self.data.filter(scrape_time__isnull=False).latest('scrape_time')
@cached_property
def latest_corporation(self):
return self.lobbyistcorporationdata_set.filter(scrape_time__isnull=False).latest('scrape_time').corporation
@cached_property
def cached_data(self):
data = cache.get('Lobbyist_cached_data_%s' % self.id)
if not data:
data = {
'id': self.id,
'display_name': unicode(self.person),
'latest_data': {
'profession': self.latest_data.profession,
'faction_member': self.latest_data.faction_member,
'faction_name': self.latest_data.faction_name,
'permit_type': self.latest_data.permit_type,
'scrape_time': self.latest_data.scrape_time,
},
'latest_corporation': {
'name': self.latest_corporation.name,
'id': self.latest_corporation.id,
},
}
cache.set('Lobbyist_cached_data_%s' % self.id, data, 86400)
return data
def __unicode__(self):
return self.person
class LobbyistDataManager(models.Manager):
def latest_lobbyist_corporation(self, corporation_id):
return self.filter(corporation_id = corporation_id, scrape_time__isnull=False).latest('scrape_time')
def get_corporation_lobbyists(self, corporation_id):
lobbyists = []
for lobbyist in LobbyistHistory.objects.latest().lobbyists.all():
lobbyists.append(lobbyist) if lobbyist.latest_data.corporation_id == corporation_id else None
return lobbyists
class LobbyistData(models.Model):
"""
this model represents the data of a lobbyist in a certain point of time
it allows to see changes in a lobbyist details over time
if you just want the latest data from a lobbyist - get the latest record according to scrape_time
scrape_time might be null - that means the record is not fully scraped yet
"""
lobbyist = models.ForeignKey('lobbyists.Lobbyist', blank=True, null=True, related_name='data')
scrape_time = models.DateTimeField(blank=True, null=True)
source_id = models.CharField(blank=True, null=True, max_length=20)
first_name = models.CharField(blank=True, null=True, max_length=100)
family_name = models.CharField(blank=True, null=True, max_length=100)
profession = models.CharField(blank=True, null=True, max_length=100)
corporation_name = models.CharField(blank=True, null=True, max_length=100)
corporation_id = models.CharField(blank=True, null=True, max_length=20)
faction_member = models.CharField(blank=True, null=True, max_length=100)
faction_name = models.CharField(blank=True, null=True, max_length=100)
permit_type = models.CharField(blank=True, null=True, max_length=100)
represents = models.ManyToManyField('lobbyists.LobbyistRepresent')
objects = LobbyistDataManager()
def __unicode__(self):
return '%s %s'%(self.first_name, self.family_name)
class LobbyistCorporationManager(models.Manager):
def current_corporations(self):
return LobbyistHistory.objects.latest().corporations
class LobbyistCorporation(models.Model):
"""
This represents a lobbyist corporation
the source_id is the corporation's het-pey
each lobbyist corporation has a group of lobbyists - this can change over time so represented in the LobbyistCorporationData model
to get the latest data use lobbyist_corporation.latest_data
"""
name = models.CharField(blank=True, null=True, max_length=100)
source_id = models.CharField(blank=True, null=True, max_length=20)
objects = LobbyistCorporationManager()
@cached_property
def latest_data(self):
return self.data.filter(scrape_time__isnull=False).latest('scrape_time')
@property
def lobbyists_count(self):
return self.latest_data.lobbyists.count()
@property
def combined_lobbyists_count(self):
lobbyists_count = self.lobbyists_count
for ca in LobbyistCorporationAlias.objects.filter(main_corporation__id=self.id):
lobbyists_count = lobbyists_count + ca.alias_corporation.combined_lobbyists_count
return lobbyists_count
@property
def combined_lobbyist_ids(self):
lobbyist_ids = [l.id for l in self.latest_data.lobbyists.all()]
for ca in LobbyistCorporationAlias.objects.filter(main_corporation__id=self.id):
for l in ca.alias_corporation.combined_lobbyist_ids:
if l not in lobbyist_ids:
lobbyist_ids.append(l)
return lobbyist_ids
@cached_property
def alias_corporations(self):
alias_corporation_ids = [ac.alias_corporation.id for ac in LobbyistCorporationAlias.objects.filter(main_corporation=self)]
return LobbyistCorporation.objects.filter(id__in = alias_corporation_ids)
@cached_property
def cached_data(self):
data = cache.get('LobbyistCorporation_cached_data_%s' % self.id)
if not data:
data = {
'id': self.id,
'name': self.name,
'source_id': self.latest_data.source_id,
'combined_lobbyists_count': self.combined_lobbyists_count,
'combined_lobbyist_ids': self.combined_lobbyist_ids,
}
cache.set('LobbyistCorporation_cached_data_%s' % self.id, data, 86400)
return data
def clear_cache(self):
cache.delete('LobbyistCorporation_cached_data_%s' % self.id)
def __unicode__(self):
return self.name
class LobbyistCorporationAliasManager(models.Manager):
def create(self, *args, **kwargs):
if len(args) > 1:
for id in args[1:]:
kwargs = {'main_corporation_id': args[0], 'alias_corporation_id': id}
super(LobbyistCorporationAliasManager, self).create(**kwargs)
else:
return super(LobbyistCorporationAliasManager, self).create(**kwargs)
class LobbyistCorporationAlias(models.Model):
"""
In the source data there are sometimes different coroprations
which are actually the same one.
For example - there are sometimes typos in the corporation id which cause
our scraper to think it's different corporations
This model allows to link this corporations so we can treat them as the same corporation
"""
main_corporation = models.ForeignKey('lobbyists.LobbyistCorporation', related_name='lobbyistcorporationalias_main')
alias_corporation = models.ForeignKey('lobbyists.LobbyistCorporation', related_name='lobbyistcorporationalias_alias', unique=True)
objects = LobbyistCorporationAliasManager()
class LobbyistCorporationData(models.Model):
"""
This represents data about a corporation which might change over time
currently the only relevant data is the lobbyists which are members of the corporation
"""
corporation = models.ForeignKey('lobbyists.LobbyistCorporation', blank=True, null=True, related_name='data')
scrape_time = models.DateTimeField(blank=True, null=True)
name = models.CharField(blank=True, null=True, max_length=100)
source_id = models.CharField(blank=True, null=True, max_length=20)
lobbyists = models.ManyToManyField('lobbyists.Lobbyist')
def __unicode__(self):
return self.name
class LobbyistRepresent(models.Model):
"""
this model represents a single represent record and is connected to the LobbyistData represents field
each lobbyist data has a set of representations, this model is a single representation
the source_id allows to recognize this representation and show it's changed over time
the actual data is in the LobbyistRepresentData model and related here in the data field
if you want just the current representation data, get the latest record according to scrape_end_time
"""
source_id = models.CharField(blank=True, null=True, max_length=20)
name = models.CharField(blank=True, null=True, max_length=100)
@property
def latest_data(self):
return self.data.filter(scrape_time__isnull=False).latest('scrape_time')
def __unicode__(self):
return self.latest_data.name
class LobbyistRepresentData(models.Model):
"""
the lobbyist represents data, related to LobbyistRepresent model
allows to see changes of lobbyist representation details over time
"""
lobbyist_represent = models.ForeignKey('lobbyists.LobbyistRepresent', blank=True, null=True, related_name='data')
scrape_time = models.DateTimeField(blank=True, null=True)
source_id = models.CharField(blank=True, null=True, max_length=20)
name = models.CharField(blank=True, null=True, max_length=100)
domain = models.CharField(blank=True, null=True, max_length=100)
type = models.CharField(blank=True, null=True, max_length=100)
|
[
"ori@uumpa.com"
] |
ori@uumpa.com
|
f85b0044003de1bf8d34b777e0382b5af6b38743
|
bb7d79d220f51016523fe04dcf83486fba874c4f
|
/ConfFile_cfg_kLong_directional_ArabellaSample.py
|
37784c4b4f755f083854101bf68df21fbb80a460
|
[] |
no_license
|
Saptaparna/FastTiming
|
019f77abe1ff8b2f0aaa533ac97640e48970ce93
|
bafa435469ae2eb8a77f461f7a04547101d6fce0
|
refs/heads/master
| 2021-12-23T09:50:25.382581
| 2021-12-03T19:44:05
| 2021-12-03T19:44:05
| 53,109,671
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,019
|
py
|
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.Config as cms
from RecoLocalCalo.HGCalRecProducers.HGCalRecHit_cfi import dEdX_weights, HGCalRecHit
from RecoLocalCalo.HGCalRecProducers.HGCalUncalibRecHit_cfi import HGCalUncalibRecHit
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import hgceeDigitizer, hgchefrontDigitizer, hgchebackDigitizer
process = cms.Process("HGCTimingWithTOA")
process.load("CondCore.CondDB.CondDB_cfi")
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.Geometry.GeometryExtended2023D13Reco_cff')
process.load('Configuration.Geometry.GeometryExtended2023D13_cff')
process.load('Configuration.StandardSequences.DigiToRaw_cff')
# get uncalibrechits with weights method
process.load("RecoLocalCalo.HGCalRecProducers.HGCalUncalibRecHit_cfi")
# get rechits e.g. from the weights
process.load("RecoLocalCalo.HGCalRecProducers.HGCalRecHit_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1))
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/GammaTime_step3/step3_Gamma_Pt10_n1000_part1_directional.root',
'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/GammaTime_step3/step3_Gamma_Pt10_n1000_part2_directional.root',
'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/GammaTime_step3/step3_Gamma_Pt10_n1000_part3_directional.root',
'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/GammaTime_step3/step3_Gamma_Pt10_n1000_part4_directional.root'
#'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/kLongTime_step3/step3_kLong_Pt10_n1000_part1_directional.root',
#'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/kLongTime_step3/step3_kLong_Pt10_n1000_part2_directional.root',
#'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/kLongTime_step3/step3_kLong_Pt10_n1000_part3_directional.root',
#'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/kLongTime_step3/step3_kLong_Pt10_n1000_part4_directional.root',
#'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/kLongTime_step3/step3_kLong_Pt10_n1000_part5_directional.root',
#'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/kLongTime_step3/step3_kLong_Pt10_n1000_part6_directional.root',
#'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/kLongTime_step3/step3_kLong_Pt10_n1000_part7_directional.root',
#'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/kLongTime_step3/step3_kLong_Pt10_n1000_part8_directional.root',
#'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/kLongTime_step3/step3_kLong_Pt10_n1000_part9_directional.root',
#'file:/uscms_data/d1/sapta/work/HighGranularityCalorimeter/TimingStudies_9X/CMSSW_9_1_0_pre3/src/kLongTime_step3/step3_kLong_Pt10_n1000_part10_directional.root'
)
)
process.TFileService = cms.Service("TFileService", fileName = cms.string('HGCTiming_Gamma_Pt10_SoverN1000ps_Floor20ps_EE_FH_Test.root'))
#process.TFileService = cms.Service("TFileService", fileName = cms.string('HGCTiming_kLong_Pt10_SoverN1000ps_Floor20ps_EE_FH_Test.root'))
process.content = cms.EDAnalyzer("EventContentAnalyzer")
process.hgctiming = cms.EDAnalyzer('HGCTimingAnalyzerWithTOA',
HGCEE_keV2fC = hgceeDigitizer.digiCfg.keV2fC,
HGCHEF_keV2fC = hgchefrontDigitizer.digiCfg.keV2fC,
HGCHB_keV2MIP = hgchebackDigitizer.digiCfg.keV2MIP,
dEdXweights = cms.vdouble(dEdX_weights),
thicknessCorrection = cms.vdouble(HGCalRecHit.thicknessCorrection),
HGCEE_fCPerMIP = cms.vdouble(HGCalUncalibRecHit.HGCEEConfig.fCPerMIP),
HGCEE_noisefC = cms.vdouble(hgceeDigitizer.digiCfg.noise_fC),
HGCEF_noisefC = cms.vdouble(hgchefrontDigitizer.digiCfg.noise_fC),
HGCBH_noiseMIP = hgchebackDigitizer.digiCfg.noise_MIP,
srcGenParticles = cms.InputTag('genParticles'),
srcSimTracks = cms.InputTag('g4SimHits'),
srcSimVertices = cms.InputTag('g4SimHits'),
srcPFRecHit = cms.InputTag('particleFlowRecHitHGC', 'Cleaned'),
srcPFCluster = cms.InputTag('particleFlowClusterHGCal'),
srcRecHitEE = cms.InputTag('HGCalRecHit', 'HGCEERecHits'),
srcRecHitHEF = cms.InputTag('HGCalRecHit', 'HGCHEFRecHits'),
srcRecHitBH = cms.InputTag('HGCalRecHit', 'HGCHEBRecHits'),
srcCaloParticle = cms.InputTag('mix', 'MergedCaloTruth'),
srcPartHandle = cms.InputTag('mix','MergedTrackTruth'),
TriggerGeometry = cms.PSet(
TriggerGeometryName = cms.string('HGCalTriggerGeometryImp1'),
L1TCellsMapping = cms.FileInPath("L1Trigger/L1THGCal/data/cellsToTriggerCellsMap.txt"),
),
)
process.p = cms.Path(process.hgctiming)
|
[
"saptaparna@gmail.com"
] |
saptaparna@gmail.com
|
99a56220bba3c76fdaed603a25bf16557645d756
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_2_neat/16_0_2_BoldBB_2.py
|
9698bb482d48752cc9d6fae3bd21070072767ccf
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 628
|
py
|
def flip(string, pos):
tran = {"+":"-","-":"+"}
return "".join(tran[i] for i in reversed(string[:pos])) + string[pos:]
def count(l, check = "+"):
i = 0
while "-" in l and check in l:
i += 1
for k, c in enumerate(l):
if c == check:
l = flip(l, k)
break
check = tran[check]
if check == "+" and "-" in l:
l = flip(l, len(l))
i += 1
return i
with open("B-small-attempt0.in", "r") as f:
i = 1
tran = {"+":"-","-":"+"}
i = 0
for l in f.read().split("\n")[1:]:
ori = l
if len(l) > 0:
i += 1
print "Case #{}: {}".format(str(i), str(min(count(l, "+"), count(l, "-"))))
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
225768ed7fa42a7d5703faabb806f47dd3a93b87
|
e231faae5d375db058cc563fb385d9cd486148dd
|
/core/tests/__init__.py
|
b5b26d3e443678de38563f0f387ff522cfd5710e
|
[] |
no_license
|
Smarsh/norc
|
0cea9cf2ed9860d4ef2d587a6fb173ca4f02c591
|
c3608c97f6d6c292689e3d693c2a3e211df24138
|
refs/heads/master
| 2020-12-24T22:06:09.765846
| 2013-04-05T18:12:00
| 2013-04-05T18:12:00
| 9,247,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
from task_test import *
from job_test import *
from schedule_test import *
from scheduler_test import *
from executor_test import *
from queue_test import *
from norc import settings
settings.BACKUP_SYSTEM = None
|
[
"elnaril@gmail.com"
] |
elnaril@gmail.com
|
007be06c8d58b684c8d500fa0c16caacf908a6fd
|
4f0fdcd1e71ae7b5cb764f3d7a11fd27446ccf2e
|
/python_scripts/UI_scripts/display_counterpy.py
|
77937021e6e3ea88cd47cca27834fe2de52365a8
|
[] |
no_license
|
kedartatwawadi/socket_programming_test
|
0c7313179850ed9b8932132aaf9990901e2e4c3b
|
392741c603b3114ff6dc11125befb02e78e55c51
|
refs/heads/main
| 2023-03-03T04:38:51.655380
| 2021-02-12T03:48:49
| 2021-02-12T03:48:49
| 334,535,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,784
|
py
|
#!/usr/bin/env python3
# Filename: pycalc.py
"""PyCalc is a simple calculator built using Python and PyQt5."""
import sys
# Import QApplication and the required widgets from PyQt5.QtWidgets
from PyQt5.QtWidgets import QApplication, QToolBar, QAction
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QWidget, QLabel
from PyQt5.QtCore import Qt, QProcess
from PyQt5.QtWidgets import QGridLayout
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QVBoxLayout
from functools import partial
class BoxWidget:
def __init__(self, id_text, width=90, height=70):
self.widget = QWidget()
self.layout = QVBoxLayout()
# Set some display's properties
self.display = QLineEdit()
self.display.setFixedHeight(height)
self.display.setAlignment(Qt.AlignCenter)
self.display.setReadOnly(True)
# create label
self.label = QLabel(f"ID: {id_text}")
self.label.setAlignment(Qt.AlignCenter)
# add label and display
self.layout.addWidget(self.display)
self.layout.addWidget(self.label)
self.widget.setLayout(self.layout)
self.widget.setFixedHeight(150)
def setDisplayText(self, text):
"""Set display's text."""
self.display.setText(text)
self.display.setFocus() # ?
def displayText(self):
"""Get display's text."""
return self.display.text()
def clearDisplay(self):
"""Clear the display."""
self.setDisplayText("")
# Create a subclass of QMainWindow to setup the calculator's GUI
class TemperatureUI(QMainWindow):
"""PyCalc's View (GUI)."""
def __init__(self):
"""View initializer."""
super().__init__()
# Set some main window's properties
self.setWindowTitle("DataLogger")
self.setFixedSize(800, 400)
# Set the central widget and the general layout
self.generalLayout = QVBoxLayout()
self._centralWidget = QWidget(self)
self.setCentralWidget(self._centralWidget)
self._centralWidget.setLayout(self.generalLayout)
# Create the display and the buttons
boxes_layout = self._createBoxes()
self._createActions()
self._createToolBars()
self.p = QProcess() # Keep a reference to the QProcess (e.g. on self) while it's running.
self.p.start("python3", ["../server_excel.py"])
self.p.readyReadStandardOutput.connect(self.handle_server_logs)
def _createBoxes(self):
"""Create the buttons."""
self.boxes = {}
boxesLayout = QGridLayout()
# Button text | position on the QGridLayout
_boxes = {
"001": (0, 0),
"002": (0, 1),
"003": (0, 2),
"004": (0, 3),
"005": (1, 0),
"006": (1, 1),
"007": (1, 2),
"008": (1, 3),
}
# Create the buttons and add them to the grid layout
for _id, pos in _boxes.items():
self.boxes[_id] = BoxWidget(_id)
boxesLayout.addWidget(self.boxes[_id].widget, pos[0], pos[1])
# Add buttonsLayout to the general layout
self.generalLayout.addLayout(boxesLayout)
def setBoxText(self, box_id, text):
"""Set display's text."""
self.boxes[box_id].display.setText(text)
def clearBoxText(self):
"""Clear the display."""
self.setBoxText(box_id, "")
def _createActions(self):
# File actions
self.settingsAction = QAction("Settings", self)
self.exitAction = QAction("Exit", self)
def _createToolBars(self):
# File toolbar
fileToolBar = self.addToolBar("File")
fileToolBar.addAction(self.settingsAction)
fileToolBar.addAction(self.exitAction)
fileToolBar.setMovable(False)
def handle_server_logs(self):
data = self.p.readAllStandardOutput()
stdout = bytes(data).decode("utf-8")
print(stdout)
for line in stdout.splitlines():
if line.startswith("LOGGER::"):
msgs = line.split(",")
box_id = msgs[1]
text = msgs[2]
self.setBoxText(box_id, text)
def closeEvent(self, event):
self.p.terminate()
# Client code
def main():
"""Main function."""
# Create an instance of QApplication
pycalc = QApplication(sys.argv)
# Show the calculator's GUI
view = TemperatureUI()
view.show()
# Create instances of the model and the controller
# model = evaluateExpression
# PyCalcCtrl(model=model, view=view)
# Execute the calculator's main loop
sys.exit(pycalc.exec())
if __name__ == "__main__":
main()
|
[
"kedar@deckard.attlocal.net"
] |
kedar@deckard.attlocal.net
|
600d4d27dd1fe477a93cb8d078bc980329fcd54d
|
c968bf29ab28c8650091fefaeb7b8839924c765b
|
/Homework/2/Week2_quiz.py
|
32943ae4c3b137070dca9b6df60ed00d25514ee4
|
[] |
no_license
|
rohit-rk/Mongodb
|
e3b81a2a96903d6dbd7e5c5bcbe47e3ba99c4c04
|
b22133b747f8dd5e4ed4e7e5236f59b7ed236116
|
refs/heads/master
| 2020-04-06T04:01:52.577405
| 2017-03-08T15:40:32
| 2017-03-08T15:40:32
| 83,071,204
| 0
| 0
| null | 2017-03-02T13:23:55
| 2017-02-24T18:32:31
|
Python
|
UTF-8
|
Python
| false
| false
| 9,627
|
py
|
# -*- coding: utf-8 -*-
# What does the following fragment of JavaScript output?
x = { "a" : 1 };
y = "a";
x[y]++;
print(x.a);
# 2
# Which of the following are types available in BSON?
# Strings
# Floating-point numbers
# Arrays
# Objects (Subdocuments)
# Timestamps
# insert a document into the fruit collection with the attributes of "name" being
# "apple", "color" being "red", and "shape" being "round". use the "insert" method.
db.fruit.insert({name:"apple",color:"red",shape:"round"});
#Use findOne on the collection users to find one document where the key username is
#"dwight", and retrieve only the key named email.
db.users.findOne({username:"dwight"},{email:true,_id:false});
# Supposing a scores collection similar to the one presented, how would you find all
# documents with type: essay and score: 50 and only retrieve the student field?
db.scores.find({type:"essay",score:50},{student:true,_id:false});
# Which of these finds documents with a score between 50 and 60, inclusive?
db.scores.find({ score : { $gte : 50 , $lte : 60 } } );
# Which of the following will find all users with name between "F" and "Q" (Inclusive)?
db.users.find( { name : { $gte : "F" , $lte : "Q" } } );
db.users.find( { name : { $lte : "Q" , $gte : "F" } } );
# Write a query that retrieves documents from a users collection where the name has a "q"
# in it, and the document has an email field.
db.users.find({name:{$regex:"q"},email:{$exists:true}});
# Which of the following documents would be returned by this query?
{ _id : 42 , name : "Whizzy Wiz-o-matic", tags : [ "awesome", "shiny" , "green" ] }
{ _id : 1040 , name : "Snappy Snap-o-lux", tags : "shiny" }
# How would you find all documents in the scores collection where the score is less than 50
# or greater than 90?
db.scores.find({$or:[{score:{$lt:50}},{score:{$gt:90}}]});
# What will the following query do?
db.scores.find( { score : { $gt : 50 }, score : { $lt : 60 } } );
# Which of the following documents matches this query?
# db.users.find( { friends : { $all : [ "Joe" , "Bob" ] }, favorites : { $in : [ "running" , "pickles" ] } } )
{ name : "Cliff" , friends : [ "Pete" , "Joe" , "Tom" , "Bob" ] , favorites : [ "pickles", "cycling" ] }
# Suppose a simple e-commerce product catalog called catalog with documents that look like this:
{ product : "Super Duper-o-phonic",
price : 100000000000,
reviews : [ { user : "fred", comment : "Great!" , rating : 5 },
{ user : "tom" , comment : "I agree with Fred, somewhat!" , rating : 4 } ],
... }
# Write a query that finds all products that cost more than 10,000 and that have a rating of 5 or better.
db.catalog.find({price:{$gt:10000},"reviews.rating":{$gte:5}})
# Recall the documents in the scores collection:
{
"_id" : ObjectId("50844162cb4cf4564b4694f8"),
"student" : 0,
"type" : "exam",
"score" : 75
}
# Write a query that retrieves exam documents, sorted by score in descending order, skipping the first 50
# and showing only the next 20.
db.scores.find({type:"exam"}).sort({score:-1}).skip(50).limit(20);
# How would you count the documents in the scores collection where the type was "essay" and the score was
# greater than 90?
db.scores.count({type:"essay", score:{$gt:90}});
# Let's say you had a collection with the following document in it:
{ "_id" : "Texas", "population" : 2500000, "land_locked" : 1 }
# and you issued the query:
db.foo.update({_id:"Texas"},{population:30000000})
# What would be the state of the collection after the update?
{ "_id" : "Texas", "population" : 30000000 }
# For the users collection, the documents are of the form
{
"_id" : "myrnarackham",
"phone" : "301-512-7434",
"country" : "US"
}
# Please set myrnarackham's country code to "RU" but leave the rest of the document (and the rest of the
# collection) unchanged.
db.users.update({_id:"myrnarackham"},{$set:{country:"RU"}});
# Write an update query that will remove the "interests" field in the following document in the users collection.
{
"_id" : "jimmy" ,
"favorite_color" : "blue" ,
"interests" : [ "debating" , "politics" ]
}
# Do not simply empty the array. Remove the key : value pair from the document.
db.users.update({_id:"jimmy"},{$unset:{interests:1}});
# Suppose you have the following document in your friends collection:
{ _id : "Mike", interests : [ "chess", "botany" ] }
# What will the result of the following updates be?
db.friends.update( { _id : "Mike" }, { $push : { interests : "skydiving" } } );
db.friends.update( { _id : "Mike" }, { $pop : { interests : -1 } } );
db.friends.update( { _id : "Mike" }, { $addToSet : { interests : "skydiving" } } );
db.friends.update( { _id : "Mike" }, { $pushAll: { interests : [ "skydiving" , "skiing" ] } } );
{ 1_id : "Mike", interests : ["botany","skydiving","skydiving" , "skiing" ] }
# After performing the following update on an empty collection
db.foo.update( { username : 'bar' }, { '$set' : { 'interests': [ 'cat' , 'dog' ] } } , { upsert : true } );
# What could be a document in the collection?
{ "_id" : ObjectId("507b78232e8dfde94c149949"), "interests" : [ "cat", "dog" ], "username" : "bar" }
# Recall the schema of the scores collection:
{
"_id" : ObjectId("50844162cb4cf4564b4694f8"),
"student" : 0,
"type" : "exam",
"score" : 75
}
# Give every document with a score less than 70 an extra 20 points.
db.scores.update({score:{$lt:70}}, {$inc:{score:20}}, {multi:true})
# Recall the schema of the scores collection:
{
"_id" : ObjectId("50844162cb4cf4564b4694f8"),
"student" : 0,
"type" : "exam",
"score" : 75
}
# Delete every document with a score of less than 60.
db.scores.remove({score:{$lt:60}});
# In the following code snippet:
import pymongo
import sys
# establish a connection to the database
# note this uses the now deprecated Connection class, as we did in the lecture.
# MongoClient is the preferred way of connecting.
connection = pymongo.Connection("mongodb://localhost", safe=True)
# get a handle to the school database
db=connection.school
scores = db.scores
try:
xxxx
except:
print "Unexpected error:", sys.exc_info()[0]
print doc
# please enter the one line of python code that would be needed in in place of xxxx to find one document
# in the collection.
doc = scores.find_one()
# Which of the following could work using Pymongo, depending on variable names, to select out just the
# student_id from the scores collection using a find command.
cursor = scores.find({},{'student_id':1,'_id':0})
# In the following code, what is the correct line of code, marked by xxxx, to search for all quiz scores
# that are greater than 20 and less than 90.
import pymongo
import sys
# establish a connection to the database
connection = pymongo.Connection("mongodb://localhost", safe=True)
# get a handle to the school database
db=connection.school
scores = db.scores
def find():
print "find, reporting for duty"
query = xxxx
try:
iter = scores.find(query)
except:
print "Unexpected error:", sys.exc_info()[0]
return iter
find()
query = {'type':'quiz', 'score':{'$gt':20,'$lt':90}}
# In the following code, what do you think will happen if a document that matches the query doesn't
# have a key called media.oembed.url?
import pymongo
import sys
# establish a connection to the database
connection = pymongo.Connection("mongodb://localhost", safe=True)
# get a handle to the reddit database
db=connection.reddit
stories = db.stories
def find():
print "find, reporting for duty"
query = {'media.oembed.type':'video'}
projection = {'media.oembed.url':1, '_id':0}
try:
iter = stories.find(query, projection)
except:
print "Unexpected error:", sys.exc_info()[0]
sanity = 0
for doc in iter:
print doc
sanity += 1
if (sanity > 10):
break
find()
# Pymongo will return a document with the following structure {media:{oembed:{}}}
# Supposed you had the following documents in a collection named things.
{ "_id" : 0, "value" : 10 }
{ "_id" : 2, "value" : 5 }
{ "_id" : 3, "value" : 7 }
{ "_id" : 4, "value" : 20 }
# If you performed the following query in pymongo:
# cursor = things.find().skip(3).limit(1).sort('value',pymongo.DESCENDING)
# which document would be returned?
# The document with _id=2
# Do you expect the second insert below to succeed?
# get a handle to the school database
db=connection.school
people = db.people
doc = {"name":"Andrew Erlichson", "company":"10gen",
"interests":['running', 'cycling', 'photography']}
try:
people.insert(doc) # first insert
del(doc['_id'])
people.insert(doc) # second insert
except:
print "Unexpected error:", sys.exc_info()[0]
# Yes, because the del call will remove the _id key added by the pymongo driver in the first insert.
# In the following code fragment, what is the python expression in place of xxxx to set a new key
# "examiner" to be "Jones" Please use the $set operator
def using_set():
print "updating record using set"
# get a handle to the school database
db=connection.school
scores = db.scores
try:
# get the doc
score = scores.find_one({'student_id':1, 'type':'homework'})
print "before: ", score
# update using set
scores.update({'student_id':1, 'type':'homework'},
xxxx)
score = scores.find_one({'student_id':1, 'type':'homework'})
print "after: ", score
except:
print "Unexpected error:", sys.exc_info()[0]
raise
xxxx = {'$set':{'examiner':'Jones'}}
|
[
"rohitkumar.a255@gmail.com"
] |
rohitkumar.a255@gmail.com
|
06918e9676f2e999c618f0b098f8a258b6e9c580
|
b796121152a74b1d14feea77a5e038b133cc841d
|
/tests/HandTest.py
|
a6a23ed57c64359e13c66646bac342147eb0df2a
|
[] |
no_license
|
jgreenwd/poker_cards
|
552a7eea5e87ed4099cb505bf153726ef7376985
|
095e7668017acf3ac1c4df65c25aedc916726adb
|
refs/heads/main
| 2023-02-12T23:07:56.562042
| 2021-01-01T22:01:13
| 2021-01-01T22:01:13
| 323,415,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,518
|
py
|
import unittest
from Hand import Hand
from Card import Card
from Deck import Deck
from Rank import Rank
deck = Deck()
# line 0: hand in order low to high
# line 1: hand with same ranks reversed (different suits)
# line 2: hand with same rank, but line2 > line1, ie. line2 wins in a tie
test_hands = [
# 5 card hands
Hand(Card(2, 'D'), Card(3, 'S'), Card(4, 'C'), Card(5, 'H'), Card(7, 'H')), # 7 high [0]
Hand(Card(7, 'D'), Card(5, 'D'), Card(4, 'H'), Card(3, 'C'), Card(2, 'S')),
Hand(Card(7, 'H'), Card(6, 'H'), Card(4, 'C'), Card(3, 'S'), Card(2, 'D')),
Hand(Card(2, 'D'), Card(3, 'S'), Card(4, 'C'), Card(5, 'H'), Card(8, 'H')), # 8 high [3]
Hand(Card(8, 'D'), Card(5, 'D'), Card(4, 'H'), Card(3, 'C'), Card(2, 'S')),
Hand(Card(8, 'H'), Card(6, 'H'), Card(4, 'C'), Card(3, 'S'), Card(2, 'D')),
Hand(Card(2, 'D'), Card(3, 'S'), Card(4, 'C'), Card(5, 'H'), Card(9, 'H')), # 9 high [6]
Hand(Card(9, 'D'), Card(5, 'D'), Card(4, 'H'), Card(3, 'C'), Card(2, 'S')),
Hand(Card(9, 'H'), Card(6, 'H'), Card(4, 'C'), Card(3, 'S'), Card(2, 'D')),
Hand(Card(2, 'D'), Card(3, 'S'), Card(4, 'C'), Card(5, 'H'), Card(10, 'H')), # 10 high [9]
Hand(Card(10, 'D'), Card(5, 'D'), Card(4, 'H'), Card(3, 'C'), Card(2, 'S')),
Hand(Card(10, 'H'), Card(6, 'H'), Card(4, 'C'), Card(3, 'S'), Card(2, 'D')),
Hand(Card(2, 'D'), Card(3, 'S'), Card(4, 'C'), Card(5, 'H'), Card(11, 'H')), # Jack high [12]
Hand(Card(11, 'D'), Card(5, 'D'), Card(4, 'H'), Card(3, 'C'), Card(2, 'S')),
Hand(Card(11, 'H'), Card(6, 'H'), Card(4, 'C'), Card(3, 'S'), Card(2, 'D')),
Hand(Card(2, 'D'), Card(3, 'S'), Card(4, 'C'), Card(5, 'H'), Card(12, 'H')), # Queen high [15]
Hand(Card(12, 'D'), Card(5, 'D'), Card(4, 'H'), Card(3, 'C'), Card(2, 'S')),
Hand(Card(12, 'H'), Card(6, 'H'), Card(4, 'C'), Card(3, 'S'), Card(2, 'D')),
Hand(Card(2, 'D'), Card(3, 'S'), Card(4, 'C'), Card(5, 'H'), Card(13, 'H')), # King high [18]
Hand(Card(13, 'D'), Card(5, 'D'), Card(4, 'H'), Card(3, 'C'), Card(2, 'S')),
Hand(Card(13, 'H'), Card(6, 'H'), Card(4, 'C'), Card(3, 'S'), Card(2, 'D')),
Hand(Card(2, 'D'), Card(3, 'S'), Card(4, 'C'), Card(6, 'H'), Card(14, 'H')), # Ace high [21]
Hand(Card(14, 'D'), Card(6, 'D'), Card(4, 'H'), Card(3, 'C'), Card(2, 'S')),
Hand(Card(14, 'H'), Card(7, 'H'), Card(4, 'C'), Card(3, 'S'), Card(2, 'D')),
Hand(Card(2, 'D'), Card(2, 'S'), Card(3, 'C'), Card(4, 'H'), Card(5, 'H')), # one pair [24]
Hand(Card(5, 'D'), Card(4, 'D'), Card(3, 'S'), Card(2, 'C'), Card(2, 'H')),
Hand(Card(6, 'H'), Card(4, 'H'), Card(3, 'C'), Card(2, 'S'), Card(2, 'D')),
Hand(Card(2, 'D'), Card(2, 'S'), Card(3, 'C'), Card(3, 'H'), Card(4, 'H')), # two pair [27]
Hand(Card(4, 'D'), Card(3, 'D'), Card(3, 'S'), Card(2, 'C'), Card(2, 'H')),
Hand(Card(5, 'H'), Card(3, 'H'), Card(3, 'C'), Card(2, 'S'), Card(2, 'D')),
Hand(Card(2, 'D'), Card(2, 'S'), Card(2, 'C'), Card(3, 'H'), Card(4, 'H')), # three of a kind [30]
Hand(Card(4, 'D'), Card(3, 'D'), Card(2, 'S'), Card(2, 'C'), Card(2, 'H')),
Hand(Card(5, 'H'), Card(3, 'H'), Card(2, 'C'), Card(2, 'S'), Card(2, 'D')),
Hand(Card(3, 'H'), Card(4, 'C'), Card(5, 'D'), Card(6, 'C'), Card(7, 'S')), # straight [33] ** Ace-High **
Hand(Card(7, 'H'), Card(6, 'D'), Card(5, 'C'), Card(4, 'D'), Card(3, 'H')),
Hand(Card(10, 'H'), Card(11, 'C'), Card(12, 'D'), Card(13, 'C'), Card(14, 'S')),
Hand(Card(2, 'S'), Card(3, 'S'), Card(4, 'S'), Card(5, 'S'), Card(7, 'S')), # flush [36]
Hand(Card(7, 'C'), Card(5, 'C'), Card(4, 'C'), Card(3, 'C'), Card(2, 'C')),
Hand(Card(7, 'S'), Card(6, 'S'), Card(4, 'S'), Card(3, 'S'), Card(2, 'S')),
Hand(Card(2, 'D'), Card(2, 'S'), Card(2, 'C'), Card(3, 'H'), Card(3, 'S')), # full house [39]
Hand(Card(3, 'C'), Card(3, 'D'), Card(2, 'H'), Card(2, 'S'), Card(2, 'D')),
Hand(Card(4, 'H'), Card(4, 'H'), Card(2, 'C'), Card(2, 'S'), Card(2, 'D')),
Hand(Card(2, 'D'), Card(2, 'S'), Card(2, 'C'), Card(2, 'H'), Card(3, 'H')), # four of a kind [42]
Hand(Card(3, 'D'), Card(2, 'H'), Card(2, 'C'), Card(2, 'S'), Card(2, 'D')),
Hand(Card(4, 'H'), Card(2, 'H'), Card(2, 'C'), Card(2, 'S'), Card(2, 'D')),
Hand(Card(2, 'S'), Card(3, 'S'), Card(4, 'S'), Card(5, 'S'), Card(6, 'S')), # straight flush [45]
Hand(Card(6, 'H'), Card(5, 'H'), Card(4, 'H'), Card(3, 'H'), Card(2, 'H')),
Hand(Card(7, 'S'), Card(6, 'S'), Card(5, 'S'), Card(4, 'S'), Card(3, 'S')),
# 6 card hands
Hand(Card(7, 'H'), Card(6, 'S'), Card(5, 'H'), Card(4, 'S'), Card(3, 'H'), Card(2, 'S')), # str8 [48]
Hand(Card(2, 'H'), Card(7, 'S'), Card(3, 'S'), Card(4, 'H'), Card(6, 'H'), Card(5, 'S')),
Hand(Card(7, 'H'), Card(6, 'S'), Card(5, 'H'), Card(4, 'S'), Card(3, 'H'), Card(8, 'H')),
Hand(Card(7, 'H'), Card(5, 'S'), Card(4, 'S'), Card(3, 'S'), Card(2, 'S'), Card(10, 'S')), # flush [51]
Hand(Card(7, 'S'), Card(5, 'H'), Card(4, 'H'), Card(3, 'H'), Card(2, 'H'), Card(10, 'H')),
Hand(Card(7, 'H'), Card(9, 'S'), Card(4, 'S'), Card(3, 'S'), Card(2, 'S'), Card(10, 'S')),
Hand(Card(7, 'S'), Card(8, 'S'), Card(9, 'S'), Card(5, 'S'), Card(6, 'S'), Card(10, 'S')), # str8-fl [54]
Hand(Card(7, 'H'), Card(8, 'H'), Card(9, 'H'), Card(5, 'H'), Card(6, 'H'), Card(10, 'H')),
Hand(Card(7, 'S'), Card(8, 'S'), Card(9, 'S'), Card(11, 'S'), Card(5, 'S'), Card(10, 'S')),
# 7 card hands
Hand(Card(7, 'H'), Card(6, 'S'), Card(5, 'H'), Card(4, 'S'), Card(3, 'H'), Card(2, 'S'), Card(14, 'H')), # str8
Hand(Card(14, 'S'), Card(2, 'H'), Card(7, 'H'), Card(3, 'S'), Card(4, 'S'), Card(6, 'S'), Card(5, 'H')),
Hand(Card(7, 'H'), Card(6, 'S'), Card(5, 'H'), Card(4, 'S'), Card(3, 'H'), Card(2, 'S'), Card(8, 'H')),
Hand(Card(7, 'H'), Card(8, 'H'), Card(5, 'S'), Card(4, 'S'), Card(3, 'S'), Card(2, 'S'), Card(10, 'S')), # flush
Hand(Card(7, 'S'), Card(8, 'S'), Card(5, 'H'), Card(4, 'H'), Card(3, 'H'), Card(2, 'H'), Card(10, 'H')),
Hand(Card(7, 'H'), Card(8, 'H'), Card(9, 'S'), Card(4, 'S'), Card(3, 'S'), Card(2, 'S'), Card(10, 'S')),
Hand(Card(7, 'S'), Card(8, 'S'), Card(9, 'S'), Card(4, 'H'), Card(3, 'H'), Card(6, 'S'), Card(10, 'S')), # str8-fl
Hand(Card(7, 'H'), Card(8, 'H'), Card(9, 'H'), Card(4, 'S'), Card(3, 'S'), Card(6, 'H'), Card(10, 'H')),
Hand(Card(7, 'S'), Card(8, 'S'), Card(9, 'S'), Card(11, 'S'), Card(3, 'H'), Card(5, 'H'), Card(10, 'S')),
# edge-case: Ace-Low straight
Hand(Card(14, 'H'), Card(2, 'C'), Card(3, 'D'), Card(4, 'C'), Card(5, 'S')), # [66]
Hand(Card(2, 'H'), Card(3, 'C'), Card(4, 'D'), Card(5, 'C'), Card(14, 'S')),
Hand(Card(6, 'H'), Card(2, 'C'), Card(3, 'D'), Card(4, 'C'), Card(5, 'S')),
]
class HandTest(unittest.TestCase):
def test_constructor(self):
hand = Hand()
self.assertIsInstance(hand, Hand)
def test_draw(self):
hand = Hand()
hand.draw(deck.deal())
self.assertTrue(len(hand) == 1)
def test_discard(self):
hand = Hand()
self.assertRaises(IndexError, hand.discard)
hand.draw(deck.deal())
card = hand.discard()
self.assertTrue(len(hand) == 0)
self.assertIsInstance(card, Card)
def test_len(self):
hand = Hand()
self.assertTrue(len(hand) == 0)
for i in range(1, 6):
hand.draw(deck.deal())
self.assertTrue(len(hand) == i)
for i in range(4, -1, -1):
hand.discard()
self.assertTrue(len(hand) == i)
self.assertTrue(len(hand) == 0)
def test_value(self):
# partial hands
hand = Hand(Card(10, 'H'))
self.assertEqual(hand.value, Rank.TEN)
hand = Hand(Card(10, 'H'), Card(9, 'D'))
self.assertEqual(hand.value, Rank.TEN)
hand = Hand(Card(10, 'H'), Card(10, 'D'))
self.assertEqual(hand.value, Rank.ONE_PAIR)
hand = Hand(Card(5, 'H'), Card(5, 'D'), Card(2, 'C'))
self.assertEqual(hand.value, Rank.ONE_PAIR)
hand = Hand(Card(2, 'D'), Card(2, 'C'), Card(3, 'D'), Card(3, 'C'))
self.assertEqual(hand.value, Rank.TWO_PAIR)
hand = Hand(Card(5, 'H'), Card(5, 'D'), Card(5, 'C'))
self.assertEqual(hand.value, Rank.THREE_OF_A_KIND)
hand = Hand(Card(5, 'H'), Card(5, 'D'), Card(5, 'C'), Card(2, 'C'))
self.assertEqual(hand.value, Rank.THREE_OF_A_KIND)
hand = Hand(Card(7, 'H'), Card(7, 'D'), Card(7, 'C'), Card(7, 'S'))
self.assertEqual(hand.value, Rank.FOUR_OF_A_KIND)
# 5-card hands
for i, j in enumerate(range(0, 46, 3)):
self.assertEqual(test_hands[j].value, Rank(i + 7))
self.assertEqual(test_hands[j + 1].value, Rank(i + 7))
self.assertEqual(test_hands[j + 2].value, Rank(i + 7))
# 6-card hands
for i, j in enumerate(range(48, 51, 1)):
self.assertEqual(test_hands[j].value, Rank.STRAIGHT)
self.assertEqual(test_hands[j+3].value, Rank.FLUSH)
self.assertEqual(test_hands[j+6].value, Rank.STRAIGHT_FLUSH)
# 7-card hands
for i, j in enumerate(range(57, 60, 1)):
self.assertEqual(test_hands[j].value, Rank.STRAIGHT)
self.assertEqual(test_hands[j+3].value, Rank.FLUSH)
self.assertEqual(test_hands[j+6].value, Rank.STRAIGHT_FLUSH)
# edge-case: Ace-Low Straight => A,2,3,4,5
for i in range(66, 69):
self.assertEqual(test_hands[i].value, Rank.STRAIGHT)
def test_equal(self):
for i in range(0, 67, 3):
self.assertEqual(test_hands[i], test_hands[i + 1])
self.assertNotEqual(test_hands[i], test_hands[i + 2])
def test_greater(self):
# 5 card hands
for i in range(0, 45):
self.assertLess(test_hands[i], test_hands[i + 3])
self.assertGreater(test_hands[i + 3], test_hands[i])
# 6 card hands
for i in range(48, 54):
self.assertLess(test_hands[i], test_hands[i + 3])
self.assertGreater(test_hands[i + 3], test_hands[i])
# 7 card hands
for i in range(57, 63):
self.assertLess(test_hands[i], test_hands[i + 3])
self.assertGreater(test_hands[i + 3], test_hands[i])
# edge case
for i in range(66, 67):
self.assertLess(test_hands[i], test_hands[i + 2])
self.assertGreater(test_hands[i + 2], test_hands[i])
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
jgreenwd.noreply@github.com
|
3e88b814ba7e591abcb2a5d756d5401245c21791
|
79e978f6077bbbd9bf2efd531c39340904c5ea22
|
/web/celeree.py
|
8402ae5a3de8ba6d3da22bc5bf35d93631cc380c
|
[] |
no_license
|
zlhtech/offline-tube
|
f370f00d0449bdd83e02f7cabd84892a9d65e05b
|
c44b5f2a52c3e49eebed4f4b6f06688afa77f274
|
refs/heads/master
| 2023-08-04T08:11:30.920390
| 2018-05-12T13:49:07
| 2018-05-12T13:49:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
from celery import Celery
celery = Celery('offline_tube')
celery.config_from_object('celeryconfig')
if __name__ == '__main__':
celery.start()
|
[
"oluwafemisule@outlook.com"
] |
oluwafemisule@outlook.com
|
b6cc49ba33f7daf353e35e8e9ac6327d62ab1264
|
6607f5738aaaa3c5fbe093ccf14e5e471582a13e
|
/login.py
|
a9ceb8aff1650f7a5ff518729b4cf693e3eba1ec
|
[] |
no_license
|
JuanROrellana/python-basics-flask
|
873bfdba969de37fc63fe0eabf4fd3886bd66249
|
bfa85bb68b5bd3dabcb254e88f2068cebb2ba599
|
refs/heads/master
| 2021-01-03T01:39:42.786613
| 2020-02-11T20:51:56
| 2020-02-11T20:51:56
| 239,861,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
def login_user(self):
return 'Login User'
def serve_login_page(self):
return 'Server Login'
|
[
"ramirez.orellana.juanjose@gmail.com"
] |
ramirez.orellana.juanjose@gmail.com
|
c89af64ec49633b3adea6285e6cacd01c9d05bef
|
84ca24bebd690b2e1216263223fec1551efd5769
|
/pythonProject/Fortran_Wrapper/testing.py
|
58ff6dd54bd453d38c5149847288f6795aaffa02
|
[] |
no_license
|
syj430/OOP_FEM
|
68f66cde90ae6354fe420e30cfd5212706d2dfee
|
7703aa6278a939f62a7d20081c4975f8e602c831
|
refs/heads/master
| 2023-08-09T20:09:56.354958
| 2021-09-10T17:22:33
| 2021-09-10T17:22:33
| 404,578,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
import numpy as np
import time
import matmul1
M1 = 80
N1M2 = 300
N2 = 80
a = np.empty((M1,N1M2), dtype=np.float64)
b = np.empty((N1M2,N1M2), dtype=np.float64)
c = np.empty((M1,M1), dtype=np.float64)
print(a.shape)
print(b.shape)
a[:] = np.random.rand(M1, N1M2)
b[:] = np.random.rand(N1M2, N1M2)
# Numpy
start = time.time()
# c = np.dot(a,b)
c = a @ b @ np.transpose(a)
stop = time.time()
print(c)
print('Numpy: ', (stop - start)*1000, 'msec')
# # Fortran call
start = time.time()
c = matmul1.matmul1(a,b,M1,N1M2)
stop = time.time()
print(c)
print('Fortran: ', (stop - start)*1000, 'msec')
# import numpy as np
# import time
#
# #import os
# #os.system('f2py -c matmul1 -m operator.f90')
# import operator
#
#
# NI = 8
# NJ = 3
#
# a = np.empty((NI, NJ), dtype=np.float64) # 8x3
# b = np.empty((NJ, NJ), dtype=np.float64) # 3x3
# c = np.empty((NI, NI), dtype=np.float64) # 8x8
# print(a.shape)
# print(b.shape)
# a[:] = np.random.rand(NI, NJ)
# b[:] = np.random.rand(NJ, NJ)
# # print(a)
# # print(a[:])
# #
# # NI = 8
# # NJ = 3
# # Fortran call
# start = time.time()
# c = operator.matmul(a, b, NI, NJ)
# stop = time.time()
# # print(c)
# print('Fortran took ', (stop - start), 'sec')
#
#
# # Numpy
# start = time.time()
# c = a @ b @ np.transpose(a)
# stop = time.time()
# # print(c)
# print('Numpy took ', (stop - start), 'sec')
|
[
"syjoun@afdex.com"
] |
syjoun@afdex.com
|
ca7a638e07652cf131b022c70cbc1ae8da0b6c51
|
a17f294c7354f205cc046fc4006d253661d36f79
|
/main_cnn.py
|
e4f7c31b2f995a28a3f1f921f53d2a8f94b8933e
|
[] |
no_license
|
dipaksingh3343/DeepLearningWithDevops
|
01715cead7a4a651e66998828f63bee03de0158f
|
771e262b807cdf792c86f53ec58c9317ea873d70
|
refs/heads/master
| 2022-11-29T09:30:30.146551
| 2020-07-30T11:18:17
| 2020-07-30T11:18:17
| 283,255,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,040
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from keras.datasets import mnist
# In[2]:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# In[3]:
img=x_train[0]
# In[4]:
img.shape
# In[5]:
img1D=img.reshape(28*28)
# In[6]:
img1D.shape
# In[7]:
import matplotlib.pyplot as plt
# In[8]:
plt.imshow(img)
# In[9]:
x_train1D=x_train.reshape(-1,28*28)
# In[10]:
x_train1D.shape
# In[11]:
x_train=x_train1D.astype('float32')
# In[12]:
from keras.utils.np_utils import to_categorical
# In[13]:
y_train_cat = to_categorical(y_train)
# In[14]:
from keras.models import Sequential
# In[15]:
from keras.layers import Dense
# In[16]:
model = Sequential()
# In[17]:
model.add(Dense(units=512, input_dim=28*28, activation='relu'))
# In[18]:
model.summary()
# In[19]:
model.add(Dense(units=256, activation='relu'))
# In[20]:
model.add(Dense(units=128, activation='relu'))
# In[21]:
model.add(Dense(units=32, activation='relu'))
# In[22]:
model.summary()
# In[23]:
model.add(Dense(units=10, activation='softmax'))
# In[24]:
model.summary()
# In[25]:
from keras.optimizers import RMSprop
# In[26]:
model.compile(optimizer=RMSprop(), loss='categorical_crossentropy',
metrics=['accuracy']
)
# In[27]:
h = model.fit(x_train, y_train_cat, epochs=2)
# In[29]:
X_test_1d=x_test.reshape(-1, 28*28)
# In[35]:
X_test=x_train1D.astype('float32')
# In[36]:
y_test_cat=to_categorical(y_test)
# In[37]:
model.predict(X_test)
# In[38]:
y_test_cat
# In[61]:
model.save('main_model.h1')
# In[ ]:
accuarcy=(h.history['accuracy'])
a=h.history['accuracy'][-1]
print("accuarcy is=",a)
with open('/root/task3mlops/accuracy.txt', 'w+') as output_file:
output_file.write(str(a))
|
[
"noreply@github.com"
] |
dipaksingh3343.noreply@github.com
|
c34178a25574997a13066c4beef46711e69ffd83
|
2426df18047818d1dc6017b3d14ebd0e9971c690
|
/data/reddit_scrape_public.py
|
f69cc40ecbd1ff77162e5ae4753b10d0f8d875bf
|
[] |
no_license
|
tiffany-chang/reddit-relationships
|
966f7bea2fb7ee64f273c5220e5cdcb6587c9b4b
|
ccf4af4a66515f74d3645f248417bd1e12cea5f3
|
refs/heads/master
| 2021-05-14T13:39:58.432293
| 2018-01-24T16:34:01
| 2018-01-24T16:34:01
| 116,445,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,702
|
py
|
import urllib2
import json
import time
import numpy as np
from pandas import Series, DataFrame
import pandas as pd
# by /u/<PutYourUserNameHere>)
hdr = {'User-Agent': 'osx:r/relationships.multiple.results:v1.0 (by /u/<PutYourUserNameHere>)'}
url = 'https://www.reddit.com/r/relationships/top/.json?sort=top&t=all&limit=100'
req = urllib2.Request(url, headers=hdr)
text_data = urllib2.urlopen(req).read()
data = json.loads(text_data)
data_all = data.values()[1]['children']
print len(data_all)
while (len(data_all) <= 900):
time.sleep(2)
last = data_all[-1]['data']['name']
print last
url = 'https://www.reddit.com/r/relationships/top/.json?sort=top&t=all&limit=100&after=%s' % last
req = urllib2.Request(url, headers=hdr)
text_data = urllib2.urlopen(req).read()
data = json.loads(text_data)
data_all += data.values()[1]['children']
print len(data_all)
print len(data_all)
article_title = []
article_flairs = []
article_date = []
article_comments = []
article_score = []
for i in range(0, len(data_all)):
article_title.append(data_all[i]['data']['title'])
article_flairs.append(data_all[i]['data']['link_flair_text'])
article_date.append(data_all[i]['data']['created_utc'])
article_comments.append(data_all[i]['data']['num_comments'])
article_score.append(data_all[i]['data']['score'])
rel_df = DataFrame({'Date': article_date,
'Title': article_title,
'Flair': article_flairs,
'Comments': article_comments,
'Score': article_score})
rel_df = rel_df[['Date', 'Title', 'Flair', 'Comments', 'Score']]
print rel_df[:5]
rel_df.to_csv('out.csv', encoding='utf-8')
|
[
"9517803+tiffany-chang@users.noreply.github.com"
] |
9517803+tiffany-chang@users.noreply.github.com
|
0cd47160e06721dee64ebb63c8f9b77ea59d624d
|
d8a6d599528a0e2832a57c223042b88f65f1600a
|
/Project_Codebase/amz_lib/category/views.py
|
43f47bfd13c00d667251bfa3a55693376ba9b685
|
[] |
no_license
|
BitEater00/NoSQL_Databases
|
5e3331f9d52c718ba7d91d14d0370eeb326b50bf
|
516fe34405e96dabe07501fb4a2f1c05e0960097
|
refs/heads/master
| 2023-05-31T04:29:21.532338
| 2021-06-29T21:23:16
| 2021-06-29T21:23:16
| 381,480,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
from django.shortcuts import render
from django.http.response import HttpResponse
from category.models import AllCategories
from books import datahandler as data
from django.core.paginator import Paginator
# Create your views here.
def allCategories(request):
allCategory = AllCategories.objects.all()
return render(request, "allcategories.html", {'allcategory': allCategory})
def categories(request, id):
bookByCategory = data.getbookforcategory(id)
paginator = Paginator(bookByCategory, 48)
page_number = request.GET.get('page')
page_object = paginator.get_page(page_number)
return render(request, 'category.html', {'page_object': page_object})
|
[
"44523071+BitEater00@users.noreply.github.com"
] |
44523071+BitEater00@users.noreply.github.com
|
bb1ac140b97e1c283eb3d9bb3ff321b3583a8eb7
|
57f96c73ce43d1b9992ebc56a0f23111f1182daa
|
/seznam_kontaktu/migrations/0002_auto_20200405_2233.py
|
40c00a36e2c3d1658a15f747d9b7b62fae9accbd
|
[] |
no_license
|
tolstoj48/family_calendar_webapp
|
d633e41ec022ebbe13fcbf082c52c21b2656a610
|
6ac40460acf54f1c5bff874d20affee2803b26d0
|
refs/heads/master
| 2021-09-24T06:13:51.427249
| 2020-04-06T07:26:55
| 2020-04-06T07:26:55
| 253,425,489
| 0
| 0
| null | 2021-09-22T18:51:01
| 2020-04-06T07:27:29
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 384
|
py
|
# Generated by Django 3.0.3 on 2020-04-05 22:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('seznam_kontaktu', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='contact',
options={'permissions': [('can_see_contacts', 'Can see all contacts')]},
),
]
|
[
"petr0musil@gmail.com"
] |
petr0musil@gmail.com
|
516c0959149b6fffb62770111c50b6c72f046797
|
44b53c87ead159356817d866dfd94455854335fd
|
/v2.0/framework/Matmul/place_in_local.py
|
15721aed797c456ce04763751cddbfed7147ac44
|
[
"MIT"
] |
permissive
|
dikujepsen/OpenTran
|
c585a66771e90f2f4c9aa64318250cf15204a20f
|
af9654fcf55e394e7bece38e59bbdc3dd343f092
|
refs/heads/master
| 2016-09-15T11:36:27.184235
| 2016-05-08T09:20:07
| 2016-05-08T09:20:07
| 13,697,043
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,262
|
py
|
import lan
import copy
import ast_buildingblock as ast_bb
import exchange
import collect_gen as cg
import collect_id as ci
import collect_loop as cl
import collect_array as ca
import collect_device as cd
class PlaceInLocal(object):
def __init__(self, ast):
self.ast = ast
self.PlaceInLocalArgs = list()
self.PlaceInLocalCond = None
def place_in_local(self):
""" Find all array references that can be optimized
through the use of shared memory.
Then rewrite the code in this fashion.
"""
args = dict()
loopindex = set()
inner_loop_indices = cl.get_inner_loops_indices(self.ast)
subscript_no_id = ca.get_subscript_no_id(self.ast)
for k, sub_list in subscript_no_id.items():
for i, sub in enumerate(sub_list):
if self.__can_be_put_in_local(sub, inner_loop_indices):
args[k] = i
loopindex = loopindex.union(set(sub).intersection(set(inner_loop_indices)))
loopindex = list(loopindex)
if len(loopindex) > 1:
raise Exception("""place_in_reg: loopindex length above 1""")
if args:
self.PlaceInLocalArgs.append(args)
self.__set_condition(loopindex)
def __set_condition(self, loopindex):
(lower_limit, upper_limit) = cl.get_loop_limits(self.ast)
local = cl.get_local(self.ast)
for m in loopindex:
cond = lan.BinOp(lan.BinOp(lan.BinOp(lan.Id(upper_limit[m]), '-',
lan.Id(lower_limit[m])), '%',
lan.Constant(local['size'][0])), '==', lan.Constant(0))
self.PlaceInLocalCond = cond
def __can_be_put_in_local(self, sub, inner_loop_indices):
"""
The subscript must be two dimensional. One index must be a grid index, the other an inner loop index.
:param sub:
:param inner_loop_indices:
:return:
"""
grid_indices = cl.get_grid_indices(self.ast)
par_dim = cl.get_par_dim(self.ast)
return set(sub).intersection(set(grid_indices)) and \
set(sub).intersection(set(inner_loop_indices)) \
and par_dim == 2
def local_memory3(self, arr_dict):
initstats = []
init_comp = lan.GroupCompound(initstats)
kernel = cd.get_kernel(self.ast)
kernel.statements.insert(0, init_comp)
loop_dict = self.__find_array_ref_to_inner_loop_idx_mapping(arr_dict)
self.__loop_dict_is_not_safe(arr_dict, loop_dict)
# Find which loops must be extended
loops_to_be_extended = set()
for n in arr_dict:
i = arr_dict[n]
loops_to_be_extended.add(loop_dict[(n, i)][0])
outerstats = self.__extend_loops(loops_to_be_extended)
self.__allocate_local_arrays(initstats, arr_dict)
loadings = []
loop_arrays = ca.get_loop_arrays(self.ast)
local = cl.get_local(self.ast)
for n in arr_dict:
loc_name = n + '_local'
i = arr_dict[n]
glob_subs = copy.deepcopy(loop_arrays[n][i])
# Change loop idx to local idx
loopname = loop_dict[(n, i)][0]
loc_subs_2 = copy.deepcopy(glob_subs).subscript
my_new_glob_sub_2 = self.__create_glob_load_subscript(glob_subs, loc_subs_2, loopname, n)
self.__set_local_sub(loc_subs_2)
loc_ref = lan.ArrayRef(lan.Id(loc_name), loc_subs_2)
loadings.append(lan.Assignment(loc_ref, my_new_glob_sub_2))
inner_loc = loop_arrays[n][i]
self.__exchange_load_local_loop_idx(loopname, loc_name, inner_loc)
self.__exchange_load_local_idx(inner_loc)
self.ast.ext.append(lan.Block(lan.Id(loc_name), local['size']))
# Must also create the barrier
mem_fence_func = self.__create_local_mem_fence()
loadings.append(mem_fence_func)
outerstats.insert(0, lan.GroupCompound(loadings))
outerstats.append(mem_fence_func)
def __create_local_mem_fence(self):
arglist = lan.ArgList([lan.Id('CLK_LOCAL_MEM_FENCE')])
func = ast_bb.EmptyFuncDecl('barrier', type=[])
func.arglist = arglist
return func
def __find_array_ref_to_inner_loop_idx_mapping(self, arr_dict):
subscript_no_id = ca.get_subscript_no_id(self.ast)
grid_indices = cl.get_grid_indices(self.ast)
loop_dict = dict()
# So we create it
for n in arr_dict:
i = arr_dict[n]
loop_dict[(n, i)] = []
for n in arr_dict:
i = arr_dict[n]
subscript = subscript_no_id[n][i]
inner_loop_idx = []
for m in subscript:
try:
_ = int(m)
except ValueError:
if m not in grid_indices:
inner_loop_idx.append(m)
loop_dict[(n, i)] = inner_loop_idx
return loop_dict
def __loop_dict_is_not_safe(self, arr_dict, loop_dict):
# Check that all ArrayRefs are blocked using only one loop
# otherwise we do not know what to do
retval = False
for n in arr_dict:
i = arr_dict[n]
if len(loop_dict[(n, i)]) > 1:
print "Array %r is being blocked by %r. Returning..." \
% (n, loop_dict[(n, i)])
retval = True
return retval
def __extend_loops(self, loops_to_be_extended):
outerstats = []
loops = cl.get_inner_loops(self.ast)
local = cl.get_local(self.ast)
for n in loops_to_be_extended:
outerloop = loops[n]
outeridx = n
compound = outerloop.compound
outerloop.compound = lan.Compound([])
innerloop = copy.deepcopy(outerloop)
innerloop.compound = compound
outerstats = outerloop.compound.statements
outerstats.insert(0, innerloop)
loadstats = []
load_comp = lan.GroupCompound(loadstats)
outerstats.insert(0, load_comp)
# change increment of outer loop
outerloop.inc = lan.Increment(lan.Id(outeridx), '+=' + local['size'][0])
inneridx = outeridx * 2
# new inner loop
innerloop.cond = lan.BinOp(lan.Id(inneridx), '<', lan.Constant(local['size'][0]))
innerloop.inc = lan.Increment(lan.Id(inneridx), '++')
innerloop.init = ast_bb.ConstantAssignment(inneridx)
return outerstats
def __allocate_local_arrays(self, initstats, arr_dict):
types = ci.get_types(self.ast)
local = cl.get_local(self.ast)
num_array_dims = ca.get_num_array_dims(self.ast)
for n in arr_dict:
# Add array allocations
local_array_name = n + '_local'
arrayinit = lan.Constant(local['size'][0])
if num_array_dims[n] == 2:
arrayinit = lan.BinOp(arrayinit, '*', lan.Constant(local['size'][1]))
local_array_id = lan.Id(local_array_name)
local_type_id = lan.ArrayTypeId(['__local', types[n][0]], local_array_id, [arrayinit])
initstats.append(local_type_id)
def __exchange_load_local_loop_idx(self, loopname, loc_name, inner_loc):
inner_loc.name.name = loc_name
exchange_id2 = exchange.ExchangeId({loopname: loopname * 2})
exchange_id2.visit(inner_loc)
def __exchange_load_local_idx(self, inner_loc):
reverse_idx = cg.get_reverse_idx(self.ast)
grid_indices = cl.get_grid_indices(self.ast)
for k, m in enumerate(inner_loc.subscript):
if isinstance(m, lan.Id) and \
m.name in grid_indices:
tid = str(reverse_idx[k])
inner_loc.subscript[k] = ast_bb.FuncCall('get_local_id', [lan.Constant(tid)])
def __create_glob_load_subscript(self, glob_subs, loc_subs_2, loopname, n):
loc_subs = copy.deepcopy(glob_subs).subscript
my_new_glob_sub = copy.deepcopy(glob_subs).subscript
my_new_glob_sub_2 = copy.deepcopy(glob_subs)
reverse_idx = cg.get_reverse_idx(self.ast)
grid_indices = cl.get_grid_indices(self.ast)
for k, m in enumerate(loc_subs):
if isinstance(m, lan.Id) and \
m.name not in grid_indices:
tid = str(reverse_idx[k])
tidstr = ast_bb.FuncCall('get_local_id', [lan.Constant(tid)])
loc_subs_2[k] = tidstr
my_new_glob_sub[k] = lan.BinOp(lan.Id(loopname), '+', tidstr)
my_new_glob_sub_2 = lan.ArrayRef(lan.Id(n), my_new_glob_sub)
return my_new_glob_sub_2
def __set_local_sub(self, loc_subs_2):
reverse_idx = cg.get_reverse_idx(self.ast)
grid_indices = cl.get_grid_indices(self.ast)
for k, m in enumerate(loc_subs_2):
if isinstance(m, lan.Id) and \
m.name in grid_indices:
tid = str(reverse_idx[k])
loc_subs_2[k] = ast_bb.FuncCall('get_local_id', [lan.Constant(tid)])
|
[
"jepsen@diku.dk"
] |
jepsen@diku.dk
|
76cb43354a47eb76d21082a004a7c5f2a70c93b3
|
cb8781a10172d4b4b2a7216afeefd0856c3b2656
|
/cbvemployee/cbvemployee/settings.py
|
76395971d64a8b445f02d81a42c480f062919907
|
[] |
no_license
|
Padmaraj1983/TZ
|
de92c853ed66dced8813eaeb836de5b51c81defa
|
833f3a9daab3d00096750fe36f680d2db3c94bd1
|
refs/heads/master
| 2020-04-09T01:54:30.187305
| 2018-11-28T07:05:18
| 2018-11-28T07:05:18
| 159,921,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,184
|
py
|
"""
Django settings for cbvemployee project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR=os.path.join(BASE_DIR,"templates")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ctlz#fst)zkbv-xsb*%r60!aaz(f-mhcmc2kr!s)jp)z&q5&3l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cbvemployee.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cbvemployee.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"suneetharoyp@gmail.com"
] |
suneetharoyp@gmail.com
|
de4898756eb7f7ba7d671bacc8f63587b536931c
|
e7f2bcb55bd355806dc07920a7b439fa65889f39
|
/lib/livealive/const.py
|
72b288e0c853456e96c9604fa0b87f66d88c4204
|
[
"MIT"
] |
permissive
|
kotas/reflec
|
128a437c3612258217ff8406ca2b5ac9d85b7d19
|
61aaa003365009c126c537ddb88ce7a4216b23db
|
refs/heads/master
| 2020-05-19T09:14:18.038921
| 2012-07-06T06:30:59
| 2012-07-06T06:30:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
# -*- coding: utf_8 -*-
u"""
LiveAlive Constants
Licensed under the MIT License.
Copyright (c) 2007-2012 Kota Saito
"""
# アプリケーションのタイトル
APP_NAME = "LiveAlive2"
# アプリケーションのバージョン
APP_VERSION = "2.00"
# アプリケーションの説明
APP_DESCRIPTION = "Live Monitoring Tool."
# アプリケーションのコピーライト表記
APP_COPYRIGHT = "Licensed under the MIT License.\n" \
"Copyright (c) 2007-2012 Kota Saito"
# アプリケーションのバージョン表示 (--version で表示)
APP_VERSION_TEXT = "%s %s - %s\n\n%s" % \
(APP_NAME, APP_VERSION, APP_DESCRIPTION, APP_COPYRIGHT)
# アプリケーションのディレクトリ (外部から設定)
APP_DIR = ""
# 設定ファイルの名前
CONFIG_FILE = ("conf/global.ini", "conf/livealive2.ini")
# プラグインディレクトリの名前
PLUGIN_DIR = "livealive-plugins"
__all__ = ["APP_NAME", "APP_VERSION", "APP_COPYRIGHT", "APP_DESCRIPTION",
"APP_VERSION_TEXT", "APP_DIR", "CONFIG_FILE", "PLUGIN_DIR"]
|
[
"kotas.nico@gmail.com"
] |
kotas.nico@gmail.com
|
6efb2716aa595643de913d1fdcd461425c5293d9
|
e164fd9dce5fef093f85ca009f78570ec2b1c492
|
/557. Reverse Words in a String III.py
|
259f35c9080599d36d74ed6d542a91e1be031dbf
|
[] |
no_license
|
havenshi/leetcode
|
58fde93a1f1cbdd3c2faa9566c00383e5812f3a7
|
bcb79f329bcb133e6421db8fc1f4780a4eedec39
|
refs/heads/master
| 2021-01-22T04:15:23.748793
| 2019-11-30T04:25:54
| 2019-11-30T04:25:54
| 92,447,327
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
# Given a string, you need to reverse the order of characters in each word within a sentence while still preserving whitespace and initial word order.
#
# Example 1:
# Input: "Let's take LeetCode contest"
# Output: "s'teL ekat edoCteeL tsetnoc"
# Note: In the string, each word is separated by single space and there will not be any extra space in the string.
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
return ' '.join(w[::-1] for w in s.split())
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
s = list(s)
start = 0
i = 0
while i <= len(s):
if i == len(s) or s[i] == " ":
self.help(s, start, i - 1)
start = i + 1
i += 1
return "".join(s)
def help(self, s, start, end):
for i in range((end - start) / 2 + 1):
s[start + i], s[end - i] = s[end - i], s[start + i]
|
[
"haiwen.shi01@gmail.com"
] |
haiwen.shi01@gmail.com
|
33080bf8f45a358862c327c90ede0fc3f061c88a
|
cd36dbcdb8622e5e96fc31623c6b21701259065b
|
/HW3/Lqr-moritz2.py
|
d4b7c8e3eaee3cf091f87d4626e39dab2168ca78
|
[] |
no_license
|
Gisco93/RL
|
d0122a2e820e54989319ef3a008043401363df19
|
e605739f0a4166627be1fba25c6f5d92f15e93cb
|
refs/heads/master
| 2021-01-11T09:22:58.820424
| 2017-02-14T12:55:21
| 2017-02-14T12:55:21
| 77,140,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,300
|
py
|
import numpy as np
import matplotlib.pyplot as plt
def lqr(startstate):
#LQR parameter
A_t = np.array([[1, 0.1],[0, 1]])
B_t = np.array([[0],[0.1]])
b_t = np.array([[5],[0]])
Sig_t = 0.01
K_t = np.array([5, 0.3])
k_t =0.3
H_t = 1
T = 50
states = np.zeros((2, T+1))
states[:, 0] = startstate
actions = np.zeros(T)
rewards = np.zeros(T+1)
for i in range(1, T+1):
w_t = np.random.normal(b_t,Sig_t)
actions[i-1] = -1.0 * np.dot(K_t,states[:,i-1]) + k_t
rewards[i] = compute_rt(states[:,i-1],actions[i-1],H_t,i-1,T)
states[:,i] = np.reshape(np.reshape(np.dot(A_t, states[:, i-1]), (2, 1)) + B_t * actions[i-1] + w_t, 2)
return actions, states , rewards
def compute_rt(s_t,a_t,H_t,t,T):
r_t = getr_t(t)
R_t = getR_t(t)
diff = np.reshape(s_t,(2,1)) - r_t
rslt = -1.0 *np.dot(np.dot(np.transpose(diff),R_t),diff)
if (t == T):
return rslt
else:
return rslt - np.dot(np.dot(np.transpose(a_t),H_t),a_t)
def getR_t(t):
if t is 14 or 40 :
return np.array([[100000, 0],[0, 0.1]])
else :
return np.array([[0.01, 0],[0, 0.1]])
def getr_t(t):
if t < 15 :
return np.array([[10],[0]])
else :
return np.array([[20],[0]])
Actions = np.zeros((20,50))
States = np.zeros((20, 51, 2))
dev = np.zeros((51, 2))
Rewards = np.zeros((20,51))
for i in range(20):
s = np.random.normal([0,0],1)
(a,st,r) = lqr(s)
Actions[i] = a
States[i] = st.transpose()
Rewards[i] = r
mean = np.mean(States, axis=0)
for i in range(2):
for j in range(51):
for k in range(20):
dev[j,i] = dev[j,i] + (States[k,j,i] - mean[j,i]) * (States[k,j,i] - mean[j,i])
dev[j, i] = dev[j, i] / 20
the_mean = mean.transpose()
plt.plot(the_mean[0] + 2*dev[:, 0], the_mean[1] + 2*dev[:, 1], 'y')
plt.plot(the_mean[0] - 2*dev[:, 0], the_mean[1] - 2*dev[:, 1], 'y')
plt.plot(the_mean[0], the_mean[1], 'r')
plt.fill_between(the_mean[0] + 2*dev[:, 0], the_mean[1] - 2*dev[:, 1], the_mean[1] + 2*dev[:, 1], alpha=0.5, edgecolor='#1B2ACC', facecolor='#089FFF')
plt.fill_between(the_mean[0] - 2*dev[:, 0], the_mean[1] - 2*dev[:, 1], the_mean[1] + 2*dev[:, 1], alpha=0.5, edgecolor='#1B2ACC', facecolor='#089FFF')
plt.show()
|
[
"moritz.fuchs@gmx.net"
] |
moritz.fuchs@gmx.net
|
a1e787ccb827e52f2efab530901c227d0fa26e7e
|
bf82c9d79700f96e06f2c8362d0d3b1ba5f0b761
|
/config/imagenet_wDAE/miniimagenet_ResNet10CosineClassifier_wDAE_GNN.py
|
5ee68f73f28f81a825de1c8e0a853f0cd934c24f
|
[
"MIT"
] |
permissive
|
legitqx/wDAE_GNN_FewShot
|
5337bdc2ae8fe40b29364f3c7de980145d0f5fbb
|
987fc4e531846f5376ea3c9d6704556b12a87954
|
refs/heads/master
| 2021-03-11T04:40:25.973989
| 2020-03-12T14:06:54
| 2020-03-12T14:06:54
| 246,510,370
| 0
| 0
| null | 2020-03-11T08:05:08
| 2020-03-11T08:05:08
| null |
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
config = {}
# set the parameters related to the training and testing set
nKbase = 64
nKnovel = 16
nExemplars = 5
data_train_opt = {}
data_train_opt['nKnovel'] = nKnovel
data_train_opt['nKbase'] = nKbase
data_train_opt['nExemplars'] = nExemplars
data_train_opt['nTestNovel'] = nKnovel
data_train_opt['nTestBase'] = nKbase
data_train_opt['batch_size'] = 4
data_train_opt['epoch_size'] = 4000
data_train_opt['data_dir'] = './datasets/feature_datasets/miniimagenet_ResNet10CosineClassifier'
config['data_train_opt'] = data_train_opt
config['max_num_epochs'] = 15
num_features = 512
networks = {}
networks['feature_extractor'] = {
'def_file': 'feature_extractors.dumb_feat', 'pretrained': None,
'opt': {'dropout': 0}, 'optim_params': None }
net_optim_paramsC = {
'optim_type': 'sgd', 'lr': 0.1, 'momentum':0.9, 'weight_decay': 5e-4,
'nesterov': True, 'LUT_lr':[(10, 0.01), (15, 0.001)]}
pretrainedC = './experiments/miniimagenet_ResNet10CosineClassifier/classifier_net_epoch100'
net_optionsC = {
'num_features': num_features,
'num_classes': 1000,
'global_pooling': False,
'scale_cls': 10.0,
'learn_scale': True,
'dae_config': {
'gaussian_noise': 0.08,
'comp_reconstruction_loss': True,
'targets_as_input': False,
'dae_type': 'RelationNetBasedGNN',
'num_layers': 2,
'num_features_input': num_features,
'num_features_output': 2 * num_features,
'num_features_hidden': 3 * num_features,
'update_dropout': 0.7,
'nun_features_msg': 3 * num_features,
'aggregation_dropout': 0.7,
'topK_neighbors': 10,
'temperature': 5.0,
'learn_temperature': False,
},
}
networks['classifier'] = {
'def_file': 'classifiers.cosine_classifier_with_DAE_weight_generator',
'pretrained': pretrainedC, 'opt': net_optionsC,
'optim_params': net_optim_paramsC}
config['networks'] = networks
config['criterions'] = {}
config['reconstruction_loss_coef'] = 1.0
config['classification_loss_coef'] = 1.0
|
[
"qixun.yeo.2016@sis.smu.edu.sg"
] |
qixun.yeo.2016@sis.smu.edu.sg
|
f01bb2e680e95f8e666cc08cbcb3a55cce4a97f3
|
647781f4023c218e613299f4f0d2faa77c0acc80
|
/to_do_list_app.py
|
97a28083627d7def8dafd29892a12981f2d6330e
|
[
"MIT"
] |
permissive
|
kumar-kislay/python-todo-list
|
0370740596107c5b2391f9f875345681f0f5f53d
|
2c1bf01edcbf58860f7c85fbf3a9d2a7b2ae73a2
|
refs/heads/main
| 2023-04-21T08:54:34.580513
| 2021-04-26T11:26:21
| 2021-04-26T11:26:21
| 361,719,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,948
|
py
|
import sys
import json
#Function to display the options during the interactive session
def showMenu():
print("Menu:")
print("1. Add ToDo item(s): ")
print("2. Mark item(s) Complete: ")
print("3. List ToDo item(s): ")
print("4. Update Item Description: ")
print("5. Delete item(s): ")
print("6. Delete all items: ")
print("7. Exit: ")
#Main Function with all the business logic
def main():
#variable assignment with default values
user_input = '99'
item_list = list()
create_item_list = list()
delete_item_list = list()
complete_item_list = list()
not_complete_item_list = list()
#dictionary variable to hold all the todo items
todo_dict = {}
#read the items from data.json file at the start and load in todo dictionary
with open('data.json') as json_file:
todo_dict=json.load(json_file)
#read the command line arguments
item_list = sys.argv
action = item_list[1]
#create action to add one or more todo item
if action == "create":
create_item_list = item_list
create_item_list.pop(0)
create_item_list.pop(0)
for item in create_item_list:
todo_dict[item]="ToDo"
print("ToDo items created successfully")
#List action to list items in the todo list
elif action == "list-all":
#list all items
if len(item_list) == 2:
for items in todo_dict:
print("[Item]: " + items + " [Status]: " + todo_dict.get(items))
else:
#list items containing a keyword / substring
if item_list[2] == "--substring":
for item in todo_dict:
if item.find(item_list[3]) >= 0:
print("[Item]: " + item + " [Status]: " + todo_dict.get(item))
#print("Print all with substring " + item_list[3])
#list all items which are complete
elif item_list[2] == "--complete":
for item in todo_dict:
if todo_dict[item] == "Complete":
print("[Item]: " + item + " [Status]: " + todo_dict.get(item))
#list all items which are not complete
elif item_list[2] == "--no-complete":
for item in todo_dict:
if todo_dict[item] == "ToDo":
print("[Item]: " + item + " [Status]: " + todo_dict.get(item))
else:
print("Incorrect list argument - Please check")
#Update item description with a new description
elif action == "toggle":
new_key = item_list[3]
old_key = item_list[2]
todo_dict[new_key] = todo_dict.pop(old_key)
print("ToDo item description successfully updated")
#Mark one or more item Complete
elif action == "update":
complete_item_list=item_list
complete_item_list.pop(0)
complete_item_list.pop(0)
for item in complete_item_list:
if todo_dict.get(item,"ItemNotPresent") == "ToDo":
todo_dict[item]="Complete"
print("Item " + item + " marked complete successfully")
elif todo_dict.get(item,"ItemNotPresent") == "Complete":
print("Item "+ item + " already marked complete ")
else:
print("Item " + item + " does not exist in the list ")
#Delete one or more item
elif action == "delete":
delete_item_list = item_list
delete_item_list.pop(0)
delete_item_list.pop(0)
for item in delete_item_list:
element = todo_dict.pop(item, "defaultvalue")
if element == "defaultvalue":
print("Item " + item + " is not present in the list")
else:
print("Item " + item + " deleted successfully")
print("Item deletion complete successfully")
#Delete all the items from the list
elif action == "delete-all":
todo_dict.clear()
print("All items deleted successfully")
#Interactive session - ToDo List
elif action == "interactive":
print("Welcome to the Interactive Mode")
#Continue with the interactive session until the user keys in option 7
while user_input != '7':
#continuously show the interactive session menu
showMenu()
user_input = input("Enter Your Choice: ")
#User to select option 1 to add one or more to items
if user_input == '1':
item_list = input("Enter the ToDo items ").split()
for item in item_list:
todo_dict[item]="ToDo"
print("Added item: ", item)
print("Added item(s) successfully ")
#User to select option 2 to mark one or more items complete
elif user_input == '2':
item_list = input("Enter the item(s) to be marked complete ").split()
for item in item_list:
if todo_dict.get(item,"ItemNotPresent") == "ToDo":
todo_dict[item]="Complete"
print("Item "+ item + " successfully marked complete ")
elif todo_dict.get(item,"ItemNotPresent") == "Complete":
print("Item " + item + " already marked complete ")
else:
print("Item " + item + " does not exist in the list ")
#Option 3 to list all the items in the list
elif user_input == '3':
print("List of TO-DO Items: ")
for items in todo_dict:
print("[Item]: " + items + " [Status]: " + todo_dict.get(items))
#Option 4 to Update an item's description
elif user_input == '4':
old_item = input("Enter the item to update: ")
new_item = input("Enter the new item description: ")
todo_dict[new_item] = todo_dict.pop(old_item)
print("ToDo item description successfully updated")
#Option 5 to delete one or more items from the list
elif user_input == '5':
item_list = input("Enter the items to delete: ").split()
for item in item_list:
element = todo_dict.pop(item, "defaultvalue")
if element == "defaultvalue":
print("Item " + item + " is not present in the list")
else:
print("Item " + item + " deleted successfully")
print("Item deletion complete successfully")
#Option 6 to delete all the items from the list
elif user_input == '6':
todo_dict.clear()
print("All items deleted successfully")
#Option 7 to jump out of the interactive session and end the program
elif user_input == '7':
#y = json.dumps(todo_dict)
#print(y)
#with open('data.json', 'w') as outfile:
#json.dump(todo_dict, outfile, indent=4)
print("Good Bye ")
else:
print("Incorrect action argument - Please check and try again")
#save the current state of the todo dictionary in the file data.json
with open('data.json', 'w') as outfile:
json.dump(todo_dict, outfile, indent=4)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
kumar-kislay.noreply@github.com
|
19cd73807728b415b30310859435f60ba23e3379
|
a9fa615aaf7b6691f6e9ddaf24e30f5445e478ca
|
/Compendio/seed/seed/wsgi.py
|
9526e523d12c47c8f15ff28688e904282b92befe
|
[] |
no_license
|
JoelTorresAr/seed
|
e18335c8734a2559e2ce2e79791e1e3b3c25c918
|
7546036728ba6d707a85f9dd66cd14a47cd2e461
|
refs/heads/master
| 2022-12-24T05:39:12.265212
| 2020-09-15T00:23:49
| 2020-09-15T00:23:49
| 295,567,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
WSGI config for seed project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'seed.settings')
application = get_wsgi_application()
|
[
"djoe_torres@hotmail.com"
] |
djoe_torres@hotmail.com
|
d58976cabe86561eef1a9c28fa15b7b082c6938c
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/detection/SOLOv1/mmdet/models/bbox_heads/convfc_bbox_head.py
|
5cca3932a88d5e380c600fa23bd6f6fea524d4e0
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 7,495
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
from ..registry import HEADS
from ..utils import ConvModule
from .bbox_head import BBoxHead
@HEADS.register_module
class ConvFCBBoxHead(BBoxHead):
r"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
""" # noqa: W605
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
conv_out_channels=256,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=None,
*args,
**kwargs):
super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
# reconstruct fc_cls and fc_reg since input channels are changed
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else 4 *
self.num_classes)
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
"""Add shared or separable branch
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def init_weights(self):
super(ConvFCBBoxHead, self).init_weights()
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
# separate branches
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
@HEADS.register_module
class SharedFCBBoxHead(ConvFCBBoxHead):
def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs):
assert num_fcs >= 1
super(SharedFCBBoxHead, self).__init__(
num_shared_convs=0,
num_shared_fcs=num_fcs,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
b568adb25aef060b2d81efb9ed6fdca96d46a9df
|
76e2475380e69023572fbd146c0f550142070ee3
|
/pickle/pickle_1.py
|
67fac612fdee04b61aec05e4aee3a7e0b8ec8e5f
|
[] |
no_license
|
jangwoni79/python_study
|
0d532c79debdba8ae45022f8b55df53b0a03b2eb
|
0272a876e94dad1f8c042f26b5c031b1183bc88c
|
refs/heads/master
| 2023-06-29T05:41:46.343868
| 2021-08-09T01:27:36
| 2021-08-09T01:27:36
| 390,630,368
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
# 예시1
import pickle
test = ['A', 'B', 'C']
# 피클링
with open ("data.p","wb") as f1:
pickle.dump(test,f1)
# 언피클링
with open ("data.p","rb") as f2:
data = pickle.load(f2)
print(data)
|
[
"rejwe79@gmail.com"
] |
rejwe79@gmail.com
|
a66ee6cfb2ca2e0518cb6e7494603ce0df6d2803
|
8fc754ab703329de87ed91342901b6850424e9c0
|
/tdnn-withRemap-noisy/noise-test.py
|
1bbbb9306ae0b823261b121e968c531f91ce6084
|
[] |
no_license
|
dnth/short-behavior
|
6c96756ace147f7072166d2e059fca0563878a6b
|
2e5da55038cde15364a02cc8ad6c08c597b825ef
|
refs/heads/master
| 2016-08-05T06:35:46.933028
| 2015-07-14T05:54:42
| 2015-07-14T05:54:42
| 39,056,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,848
|
py
|
import sys
import numpy as np
from pybrain.datasets import SequenceClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer, RPropMinusTrainer
from pybrain import LinearLayer, FullConnection, LSTMLayer, BiasUnit, MDLSTMLayer, IdentityConnection, TanhLayer, SoftmaxLayer
from pybrain.utilities import percentError
from pybrain.tools.customxml.networkwriter import NetworkWriter
from pybrain.tools.customxml.networkreader import NetworkReader
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from naoqi import ALProxy
import Image
import time
import theanets
import vision_definitions
from numpy.random.mtrand import randint
from numpy import argmax
from random import randint
from scipy.interpolate import interp1d
BallLiftJoint = np.loadtxt('../../20fpsFullBehaviorSampling/BallLift/JointData.txt').astype(np.float32)
BallRollJoint = np.loadtxt('../../20fpsFullBehaviorSampling/BallRoll/JointData.txt').astype(np.float32)
BellRingLJoint = np.loadtxt('../../20fpsFullBehaviorSampling/BellRingL/JointData.txt').astype(np.float32)
BellRingRJoint = np.loadtxt('../../20fpsFullBehaviorSampling/BellRingR/JointData.txt').astype(np.float32)
BallRollPlateJoint = np.loadtxt('../../20fpsFullBehaviorSampling/BallRollPlate/JointData.txt').astype(np.float32)
RopewayJoint = np.loadtxt('../../20fpsFullBehaviorSampling/Ropeway/JointData.txt').astype(np.float32)
jointRemap = interp1d([-2.2,2.2],[-1,1])
BallLiftJoint = jointRemap(BallLiftJoint)
BallRollJoint = jointRemap(BallRollJoint)
BellRingLJoint = jointRemap(BellRingLJoint)
BellRingRJoint = jointRemap(BellRingRJoint)
BallRollPlateJoint = jointRemap(BallRollPlateJoint)
RopewayJoint = jointRemap(RopewayJoint)
tdnnclassifier = NetworkReader.readFrom('25sigmoid/TrainUntilConv.xml')
print 'Loaded 25 sigmoid TDNN Trained Network!'
twentylstmaccdata = []
twentylstmstddata = []
twentylstmstderror = []
predictedBLLabels = []
predictedBRLabels = []
predictedBRLLabels = []
predictedBRRLabels = []
predictedBRPLabels = []
predictedRWLabels = []
print "1st Iteration, noiseless test data"
offset = 100
accuracyOverall = []
for testnumber in range(30):
start = randint(8000,9980)
x = tdnnclassifier.activate(BallLiftJoint[start:start+10].flatten())
predictedBLLabels.append(argmax(x))
start = randint(8000,9980)
x = tdnnclassifier.activate(BallRollJoint[start:start+10].flatten())
predictedBRLabels.append(argmax(x))
start = randint(8000,9980)
x = tdnnclassifier.activate(BellRingLJoint[start:start+10].flatten())
predictedBRLLabels.append(argmax(x))
start = randint(8000,9980)
x = tdnnclassifier.activate(BellRingRJoint[start:start+10].flatten())
predictedBRRLabels.append(argmax(x))
start = randint(8000,9980)
x = tdnnclassifier.activate(BallRollPlateJoint[start:start+10].flatten())
predictedBRPLabels.append(argmax(x))
start = randint(8000,9980)
x = tdnnclassifier.activate(RopewayJoint[start:start+10].flatten())
predictedRWLabels.append(argmax(x))
testnumAcc = []
behaviorAccuracyfortestnumber = []
for testnumber in range(30):
BLAcc = 100-percentError(predictedBLLabels[testnumber], [0])
BRAcc = 100-percentError(predictedBRLabels[testnumber], [1])
BRLAcc = 100-percentError(predictedBRLLabels[testnumber], [2])
BRRAcc = 100-percentError(predictedBRRLabels[testnumber], [3])
BRPAcc = 100-percentError(predictedBRPLabels[testnumber], [4])
RWAcc = 100-percentError(predictedRWLabels[testnumber], [5])
behaviorAccuracyfortestnumber.append((BLAcc + BRAcc + BRLAcc + BRRAcc + BRPAcc + RWAcc) / 6)
print behaviorAccuracyfortestnumber
print "Mean Accuracy for 30 trials:", np.mean(np.array(behaviorAccuracyfortestnumber))
print "Std Deviation for 30 trials:", np.std(np.array(behaviorAccuracyfortestnumber))
twentylstmaccdata.append(np.mean(np.array(behaviorAccuracyfortestnumber)))
twentylstmstddata.append(np.std(np.array(behaviorAccuracyfortestnumber)))
print "Length of data (iteration number):",len(twentylstmaccdata)
# ######## with noise ######
std_deviation = 0
mean = 0
while (std_deviation<=2.0):
std_deviation += 0.1
print "Gaussian Noise std deviation:",std_deviation
predictedBLLabels = []
predictedBRLabels = []
predictedBRLLabels = []
predictedBRRLabels = []
predictedBRPLabels = []
predictedRWLabels = []
offset = 100
accuracyOverall = []
for testnumber in range(30): # test for 30 times
BallLiftJoint = BallLiftJoint + np.random.normal(mean,std_deviation,(10000,10))
BallRollJoint = BallRollJoint + np.random.normal(mean,std_deviation,(10000,10))
BellRingLJoint = BellRingLJoint + np.random.normal(mean,std_deviation,(10000,10))
BellRingRJoint = BellRingRJoint + np.random.normal(mean,std_deviation,(10000,10))
BallRollPlateJoint = BallRollPlateJoint + np.random.normal(mean,std_deviation,(10000,10))
RopewayJoint = RopewayJoint + np.random.normal(mean,std_deviation,(10000,10))
start = randint(8000,9980) # randomly select any data in this range
x = tdnnclassifier.activate(BallLiftJoint[start:start+10].flatten())
predictedBLLabels.append(argmax(x))
start = randint(8000,9980)
x = tdnnclassifier.activate(BallRollJoint[start:start+10].flatten())
predictedBRLabels.append(argmax(x))
start = randint(8000,9980)
x = tdnnclassifier.activate(BellRingLJoint[start:start+10].flatten())
predictedBRLLabels.append(argmax(x))
start = randint(8000,9980)
x = tdnnclassifier.activate(BellRingRJoint[start:start+10].flatten())
predictedBRRLabels.append(argmax(x))
start = randint(8000,9980)
x = tdnnclassifier.activate(BallRollPlateJoint[start:start+10].flatten())
predictedBRPLabels.append(argmax(x))
start = randint(8000,9980)
x = tdnnclassifier.activate(RopewayJoint[start:start+10].flatten())
predictedRWLabels.append(argmax(x))
testnumAcc = []
behaviorAccuracyfortestnumber = []
for testnumber in range(30):
BLAcc = 100-percentError(predictedBLLabels[testnumber], [0])
BRAcc = 100-percentError(predictedBRLabels[testnumber], [1])
BRLAcc = 100-percentError(predictedBRLLabels[testnumber], [2])
BRRAcc = 100-percentError(predictedBRRLabels[testnumber], [3])
BRPAcc = 100-percentError(predictedBRPLabels[testnumber], [4])
RWAcc = 100-percentError(predictedRWLabels[testnumber], [5])
behaviorAccuracyfortestnumber.append((BLAcc + BRAcc + BRLAcc + BRRAcc + BRPAcc + RWAcc) / 6)
# print behaviorAccuracyfortestnumber
print "Mean Accuracy for 30 trials:", np.mean(np.array(behaviorAccuracyfortestnumber))
print "Std Deviation for 30 trials:", np.std(np.array(behaviorAccuracyfortestnumber))
twentylstmaccdata.append(np.mean(np.array(behaviorAccuracyfortestnumber)))
twentylstmstddata.append(np.std(np.array(behaviorAccuracyfortestnumber)))
print "Length of data (iteration number)",len(twentylstmaccdata)
print twentylstmaccdata
print twentylstmstddata
for i in range(21):
twentylstmstderror.append(twentylstmstddata[i]/np.sqrt(30))
print twentylstmstderror
np.savetxt("AccuracyData.txt",twentylstmaccdata )
np.savetxt("SigmaData.txt",twentylstmstddata )
np.savetxt("ErrorBarData.txt",twentylstmstderror )
plt.errorbar(y=twentylstmaccdata, x=np.arange(0.0,2.1,0.1), yerr=twentylstmstderror, label="25 Sigmoid TDNN", linewidth=2)
plt.xlim([0.0,2.1])
plt.xlabel(r"$\sigma$")
plt.ylabel("Classification Accuracy (%)")
plt.grid()
plt.legend()
plt.show()
|
[
"dickson.neoh@gmail.com"
] |
dickson.neoh@gmail.com
|
8c7bf5bf73e79d46828cbbe6c588feb9896d8e94
|
6a8ec974e602804dadf460106ca5c5f31619a542
|
/2/aula2.py
|
b19d4eb7e6fd34ac207ded54f27128f6e89fa3c3
|
[] |
no_license
|
dudu9999/visualizacao-de-dados-com-python
|
66f793fab31fa78eae7e59d36b1d1442cea20ba7
|
f59dad2d6305d28cef86474e5934bdc317208742
|
refs/heads/master
| 2022-04-19T16:12:08.838576
| 2020-04-16T04:13:27
| 2020-04-16T04:13:27
| 256,106,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5]
y = [2, 3, 7, 1, 0]
titulo = 'Grafico de barras'
eixox = 'Eixo X'
eixoY = 'Eixo Y'
plt.title(titulo)
plt.xlabel(eixox)
plt.ylabel(eixoY)
plt.bar(x, y)
plt.show()
|
[
"ecaetanocorrea@gmail.com"
] |
ecaetanocorrea@gmail.com
|
189ffb954c3066643e48fd7004ec72b887ddc157
|
7d27411398df721a045bcd5efc237cd0ace804fc
|
/blog/migrations/0001_initial.py
|
952b81fae11ca0fc4e1bc46bf1bac130ed254c14
|
[] |
no_license
|
mrKondor/my-first-blog
|
06d680ba1092f8a3feca4af3b65ae31310b20fa9
|
bb47d2886795c5a14eb0a53e6a928976fc52694f
|
refs/heads/master
| 2020-04-02T03:27:28.790479
| 2018-11-01T18:28:42
| 2018-11-01T18:28:42
| 153,967,196
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# Generated by Django 2.0.9 on 2018-10-21 00:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"acm.96@hotmail.com"
] |
acm.96@hotmail.com
|
e93a66077853a8f7718493f7385071ed80a5b2c3
|
0df1450fbd2e1f71280df74fd1c1204a2bf0a528
|
/app.py
|
3759dcbdf8fe45ffce11b56b396e7fad96d5fc3e
|
[] |
no_license
|
sstone63/Web-Scraping-Project
|
b0c3ab9d70b50151a1e1386c0b01ae1f71c642c0
|
871b846dfd3ff7d057b0e00d3ee6f03e3708e08f
|
refs/heads/master
| 2020-03-17T18:33:09.252635
| 2018-06-05T00:04:55
| 2018-06-05T00:04:55
| 133,826,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
from flask import Flask, render_template, redirect
import mars_scrape
import pymongo
from pymongo import MongoClient
app = Flask(__name__)
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
db = client.mars_database
collection = db.mars_database
@app.route("/")
def home():
scrape_dict = collection.find_one()
return render_template("index.html", dict=scrape_dict)
@app.route("/scrape")
def scrape_mars():
mars_dict = mars_scrape.scrape()
collection.update({}, {"$set": mars_dict}, upsert=True)
return redirect("http://localhost:5000/", code=302)
if __name__ == "__main__":
app.run(debug=True)
|
[
"noreply@github.com"
] |
sstone63.noreply@github.com
|
86fc8fe386aafcf37802828eabd517f028b8ae5b
|
6956bcf1822d88ada307ed5d87c6ea75c1328806
|
/src/ur5_joint_publisher/scripts/joint_publisher
|
8eea88ba4057eb2b6a3812218cc09cf700a03288
|
[] |
no_license
|
ARB92/src
|
5661188779c150a66684814258810fe8a7800ea3
|
3f7b21391c8578583e2ffbbf31888c2606c958bb
|
refs/heads/master
| 2023-09-02T12:42:42.829081
| 2021-10-30T12:46:26
| 2021-10-30T12:46:26
| 420,775,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
#! /usr/bin/env python
from ur5_joint_publisher.joint_state_publisher import main
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt):
exit(1)
|
[
"amarnath.bj@gmail.com"
] |
amarnath.bj@gmail.com
|
|
a582bef6f403387d9252b56d50ad2d8bc16c825f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/63/usersdata/163/31391/submittedfiles/swamee.py
|
d8325375f8ac7305f31f3b0a9a2e4f997c781eba
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
# -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f=float(input('Digite f:'))
L=float(input('Digite L:'))
Q=float(input('Digite Q:'))
DeltaH=float(input('Digite DeltaH:'))
v=float(input('Digite v:'))
g=9.81
e=0.000002
D=((8*f*L*Q**2)/(math.pi**2*g*DeltaH))**0.2
Rey=(4*Q)/(math.pi*D*v)
k=0.25/(math.log10((e/(3.7*D))+(5.74)/(Rey**0.9)))**2
print('D: %4f'%D)
print('Rey:%.4f'%Rey)
print('k:%.4f' %k)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
5dda73c1dd335e8521a51e09fb8e9ef5800ac557
|
48eee46f6e852ff15bcb87c9e54991cf64f93ad6
|
/src/main.py
|
bfdae7067145cdde2adabe0b8033502afb1ad78c
|
[] |
no_license
|
lxjlu/learning-in-games
|
9e5645595f6b7ff2731e21d6f5c0f95cf8b84e7e
|
3c0f1472d5e41ab2cda7e9e0f5c5afc4f7e3ce4b
|
refs/heads/master
| 2023-09-01T06:51:18.875732
| 2021-01-09T06:20:55
| 2021-01-09T06:20:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,831
|
py
|
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from loaders import load
import time
import pandas
import sys
from ast import literal_eval
import numpy as np
from torch import autograd
import os
from tensorboardX import SummaryWriter
from uuid import uuid4
from tqdm.auto import tqdm
from helpers import add_argument, fstr, save_dict
import defaults
class Net(nn.Module):
""" default pytorch example """
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
class Perturb(nn.Module):
""" constant perturbation """
def __init__(self, shape):
super(Perturb, self).__init__()
self.delta = nn.Parameter(torch.zeros(shape))
def forward(self):
return self.delta
def config(**kwargs):
# Training settings
parser = argparse.ArgumentParser(description='learning in games')
train = parser.add_argument_group('train')
test = parser.add_argument_group('test')
run = parser.add_argument_group('run')
add_argument(train, 'seed', 1, 'random seed', 'S')
add_argument(train, 'dataset', 'mnist', 'dataset', choices=['mnist'])
add_argument(train, 'batch_size', 200, 'input batch size for training', 'N')
add_argument(train, 'test_batch_size', 1000, 'input batch size for testing', 'N')
add_argument(train, 'epochs', 20, 'number of epochs to train', 'N')
add_argument(train, 'lr1', 0.2, 'learning rate for classifier', 'LR')
add_argument(train, 'lr2', 1.0, 'learning rate for adversary', 'LR')
add_argument(train, 'lr_rate1', 1e-5, 'learning rate decay const for classifier', 'M')
add_argument(train, 'lr_class1', '1/t', 'learning rate decay function', 'FN', choices=['1/t','1/tlogt'] )
add_argument(train, 'lr_rate2', 3e-6, 'learning rate decay const for adversary', 'M')
add_argument(train, 'lr_class2', '1/tlogt', 'learning rate decay function', 'FN', choices=['1/t','1/tlogt'] )
add_argument(train, 'perturb_reg', 0.000001, 'regularization on adversarial perturbation', 'REG')
add_argument(run, 'no_cuda', False, 'disables CUDA training')
add_argument(run, 'save_model', True, 'For Saving the current Model')
add_argument(run, 'log_interval', 10, 'how many batches to wait before logging training status', 'N')
add_argument(run, 'datadir', 'data', 'directory of dataset')
add_argument(run, 'storedir', defaults.STORE_DIR, 'directory to store checkpoints')
add_argument(run, 'epoch', 1, 'Epoch to resume running at')
add_argument(run, 'log_smooth', 0.5, 'logging smoothness parameter')
add_argument(run, 'last_iter', -1, 'Last iteration')
add_argument(test, 'adv_epsilon', 1., 'magnitude of adversarial perturbation')
add_argument(test, 'adv_norm', 'inf', 'norm of adversarial perturbation', \
choices=['abs', 'l2','inf'])
parser.set_defaults(**kwargs)
try:
# hack for detecting a jupyter lab notebook
if get_ipython().__class__.__name__ == 'ZMQInteractiveShell':
args = parser.parse_args('')
except:
args = parser.parse_args()
return args
def init(args, device, shape, last_iter=-1):
model = Net().to(device)
perturb = Perturb(shape).to(device)
def lr(t, mode='1/t'):
if mode == '1/t': return t
elif mode == '1/tlogt': return t*np.log(t+1)
else: raise NotImplemented
opt1 = optim.SGD(model.parameters(), lr=args.lr1 )
lr1 = lambda t: args.lr1/(args.lr_rate1*lr(t, mode=args.lr_class1) + 1)
sch1 = optim.lr_scheduler.LambdaLR(opt1, lr1, last_epoch=-1)#args.last_iter)
opt2 = optim.SGD(perturb.parameters(), lr=args.lr2)
lr2 = lambda t: args.lr2/(args.lr_rate2*lr(t, mode=args.lr_class2) + 1)
sch2 = optim.lr_scheduler.LambdaLR(opt2, lr2, last_epoch=-1)#args.last_iter)
return (model, perturb), (opt1, opt2), (sch1, sch2)
def loss(model, delta, batch):
(data, target) = batch
output = model(data + delta)
return F.nll_loss(output, target)
def train(state, args, models, device, loader, optimizers, schedulers, logger):
model, perturb = models
model.train()
iterator = tqdm(enumerate(loader), total=len(loader))
f1_smooth = 0
f2_smooth = 0
for batch_idx, (data, target) in iterator:
batch = data.to(device), target.to(device)
data, target = batch
optimizers[0].zero_grad()
delta = perturb()
f1 = loss(model, delta, batch)
f1.backward()
optimizers[0].step()
optimizers[1].zero_grad()
delta = perturb()
perturb_norm = torch.sum(delta*delta)/2
f2 = -loss(model, delta, batch) + args.perturb_reg*perturb_norm
f2.backward()
optimizers[1].step()
f1_smooth = (1-args.log_smooth)*f1_smooth + args.log_smooth*f1
f2_smooth = (1-args.log_smooth)*f2_smooth + args.log_smooth*f2
out = {'loss0':f1, 'loss1':f2, 'loss_sum':f1+f2, 'norm_delta':perturb_norm}
if batch_idx % args.log_interval == 0:
logger.append(state['iter'], out)
desc = (f'{args.loop_msg} | Loss: {f1_smooth:6.3f},{f2_smooth:6.3f} | norm(delta):{perturb_norm:8.5f} ||')
iterator.set_description(desc)
iterator.refresh()
schedulers[0].step()
schedulers[1].step()
state['iter'] += 1
def test(state, args, model, device, test_loader, logger):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
iterator = tqdm(enumerate(test_loader), total=len(test_loader))
for idx, (data, target) in iterator:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
desc = (f'Test | Loss: {test_loss:10.3f}, {correct}/{len(test_loader.dataset)}({correct/((idx+1)/test_loader.batch_size*len(test_loader.dataset))}%)')
iterator.set_description(desc)
iterator.refresh()
test_loss /= len(test_loader.dataset)
out = {'test_accuracy': correct/len(test_loader.dataset)}
logger.append(state['iter'], out)
return out
def test_adv(state, args, model, device, test_loader, logger=None, loop_msg='Adversarial Test'):
model.eval()
iterator = tqdm(enumerate(test_loader), total=len(test_loader))
correct = 0
adv_correct = 0
for idx, (data, target) in iterator:
data, target = data.to(device), target.to(device)
perturb = torch.tensor(torch.zeros(*test_loader.shape), requires_grad=True, device=device)
output = model(data + perturb)
loss = F.nll_loss(output, target)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
Dperturb_loss = autograd.grad(loss, perturb)[0]
if args.adv_norm == 'infty':
Dperturb_loss = torch.sign(Dperturb_loss)
elif args.adv_norm == 'l2':
Dperturb_loss /= torch.norm(Dperturb_loss)
Dperturb_loss *= torch.norm(torch.ones(*Dperturb_loss.shape))
elif args.adv_norm == 'noise':
Dperturb_loss = torch.rand_like(Dperturb_loss)
else:
raise NotImplemented()
adv_data = data + args.adv_epsilon*Dperturb_loss
adv_output = model(adv_data)
adv_pred = adv_output.argmax(dim=1, keepdim=True)
adv_correct += adv_pred.eq(target.view_as(adv_pred)).sum().item()
desc = (f'Test ({args.adv_epsilon:.2f}-{args.adv_norm}) | Loss: {loss:8.3f}, {adv_correct}/{len(test_loader.dataset)}')
iterator.set_description(desc)
accuracy = correct/len(test_loader.dataset)
adv_accuracy = adv_correct/len(test_loader.dataset)
out = {'test_accuracy': accuracy, 'adv_accuracy': adv_accuracy, 'adv_data': adv_data}
if logger: logger.append(state['iter'], out)
return out
class Logger():
def __init__(self, writer=None):
self.df = pandas.DataFrame()
self.writer = writer
def append(self, iter, other):
self.df = self.df.append(other, ignore_index=True)
if self.writer:
for arg,val in other.items():
self.writer.add_scalar(arg, val, iter)
def to_pickle(self, path):
self.df.to_pickle(path)
def eval(exp_dir, epoch):
with open(os.path.join(exp_dir, 'args.txt'), 'r') as f:
kwargs = literal_eval(f.readline())
args = config(**kwargs)
logger = Logger()
print(f"lr1={args.lr1} lr2={args.lr2}")
(train_loader, test_loader), device = load(args)
models, optimizers = init(args, device, shape=train_loader.shape)
state = {"iter": np.nan}
save_model = os.path.join(exp_dir, f'save{epoch:03d}.pt')
save_perturb = os.path.join(exp_dir, f'save_perturb{epoch:03d}.pt')
out = {}
try:
models[0].load_state_dict(torch.load(save_model))
models[1].load_state_dict(torch.load(save_perturb))
out = test(state, args, models[0], device, test_loader, logger)
delta = [_ for _ in models[1].parameters()][0]
img = torchvision.utils.make_grid(delta, normalize=True)
torchvision.utils.save_image(img, os.path.join(exp_dir, f'perturb{epoch:03d}.png'))
except:
print("model not found")
return dict(lr1=args.lr1, lr2=args.lr2, **out)
def main(exp_id=str(uuid4())):
state = dict(iter=0, start_time=time.time())
args = config(exp_id=exp_id)
# try to make the store dir (if it doesn't exist)
exp_dir = os.path.join(args.storedir, exp_id)
try:
os.makedirs(exp_dir)
except OSError as e:
print("Directory exists ({e.message})")
writer = SummaryWriter(exp_dir)
logger = Logger(writer)
(train_loader, test_loader), device = load(args)
models, optimizers, schedulers = init(args, device, shape=train_loader.shape, last_iter=args.last_iter)
for epoch in range(1, args.epochs + 1):
args.epoch = epoch
args.last_iter = epoch*len(train_loader)
args.loop_msg = fstr(defaults.LOOP_MSG, args=args)
train(state, args, models, device, train_loader, optimizers, schedulers, logger)
test(state, args, models[0], device, test_loader, logger)
logger.to_pickle(os.path.join(args.storedir, exp_id, 'store.pkl'))
if args.save_model:
for model, savefile in zip(models, defaults.SAVE_FILES):
torch.save(model.state_dict(), os.path.join(exp_dir, fstr(savefile, args=args)))
save_dict(vars(args), os.path.join(exp_dir, defaults.ARG_FILE))
if __name__ == '__main__':
main()
|
[
"bchasnov@uw.edu"
] |
bchasnov@uw.edu
|
c44da2b7fa7cc87fdbd0202f28766b04a22d2ee1
|
3580970221ec8fe048c2af4bdfdab8244396eed8
|
/b45.py
|
b60ec7c50721e96ab2e715e84c8d7a207e5ed3b1
|
[] |
no_license
|
Tinasamayasundaram/pythonguvi
|
ee095083f30d559feeb8ecff020f4b72b9b71094
|
ad6fa4e97c248f7feb5c84fbd3226afe2d1e5a46
|
refs/heads/master
| 2020-05-23T02:02:06.940006
| 2019-06-10T10:46:03
| 2019-06-10T10:46:03
| 186,595,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26
|
py
|
nt=input()
print(len(nt))
|
[
"noreply@github.com"
] |
Tinasamayasundaram.noreply@github.com
|
684fb374093b002cc88140d8235ce65434f3a5b0
|
afc8923d43a91c515aa748300cca723eb1553435
|
/train.py
|
1c042688310f82413d0f4cf36d2ae551862343e3
|
[
"Apache-2.0"
] |
permissive
|
ruivieira/ccfd-seldon-model
|
a26c3efdf69049c962061a07a0ab515c562b513e
|
b92092fcbf9d799070a5d0088ae9b94e7dc694cb
|
refs/heads/master
| 2022-07-08T16:18:05.388348
| 2020-02-21T16:48:28
| 2020-02-21T16:48:28
| 241,993,279
| 0
| 1
|
Apache-2.0
| 2022-06-22T01:12:20
| 2020-02-20T21:25:49
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
import pandas as pd
import joblib
from sklearn.ensemble import RandomForestClassifier
df = pd.read_csv('data/creditcard-sample10k.csv')
features_train = df.sample(frac=0.75, random_state=100)
features_test = df[~df.index.isin(features_train.index)]
drop_time_class = ['Time','Class','V1','V2','V5','V6','V7','V8','V9','V13','V15','V16','V18','V19','V20','V21','V22','V23','V24','V25','V26','V27','V28']
drop_class=['Class']
features_train = features_train.loc[:, ~features_train.columns.str.contains('^Unnamed')]
features_test = features_test.loc[:, ~features_test.columns.str.contains('^Unnamed')]
target_train = features_train['Class']
target_test = features_test['Class']
features_train = features_train.drop(drop_time_class, axis=1)
features_test = features_test.drop(drop_time_class, axis=1)
model = RandomForestClassifier(n_estimators=200, max_depth=6, n_jobs=10, class_weight='balanced')
model.fit(features_train, target_train.values.ravel())
pred_train = model.predict(features_train)
pred_test = model.predict(features_test)
pred_train_prob = model.predict_proba(features_train)
pred_test_prob = model.predict_proba(features_test)
print("Number of features")
print(len(model.feature_importances_))
#save mode in filesystem
joblib.dump(model, 'model.pkl')
|
[
"ruidevieira@googlemail.com"
] |
ruidevieira@googlemail.com
|
754595069b421f37f008de11edd6b533b8242c3a
|
26aa9caa09bc98fd696bcc43a1ea5db39a46b5f6
|
/Techcators.py
|
93122e4982a73fdce822528daa75afc3c19d4840
|
[] |
no_license
|
JunXHuang/ForexTrading
|
b52e5cbca5ac3744dd4249c3ea43b915c8a5d415
|
26c2392fa953c1fdc48fdcb63de75b3eea0728a1
|
refs/heads/master
| 2021-01-25T08:01:32.737963
| 2017-06-08T03:13:11
| 2017-06-08T03:13:11
| 93,702,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
def MovingAverage(data,start,end):
sum = 0
for i in range(start,end):
sum += float(data['candles'][i]['mid']['c'])
return sum/(end-start)
# need to fix calculations
def RSI(data,period):
gain = 0
loss = 0
RS = 0
for i in range(1,period-1):
temp = float(data['candles'][i]['mid']['c'])-float(data['candles'][i-1]['mid']['c'])
if temp > 0:
gain += temp
else:
loss += abs(temp)
gain /= (period-2)
loss /= (period-2)
currentCandle = float(data['candles'][period-1]['mid']['c'])-float(data['candles'][period-2]['mid']['c'])
if currentCandle > 0:
gain = (gain * (period-3) + currentCandle) / (period-2)
loss = (loss * (period-3)) / (period-2)
else:
if loss == 0 and currentCandle == 0:
return 100.0
gain = (gain * (period-3)) / (period-2)
loss = (loss * (period-3) - currentCandle) / (period-2)
RS = gain/loss
return 100 - (100 / ( 1 + RS ))
print str(Techcators.MovingAverage(getCandles(50,'H1'),50))
print str(Techcators.RSI(getCandles(16,'H1'),16))
|
[
"junxiong.huang@stonybrook.edu"
] |
junxiong.huang@stonybrook.edu
|
9e36dc60c6d0a9bf0a5dcf4a23ded56b1cbeb51c
|
2dd2a9e695a81c1a880443997a781b779ab8fc7b
|
/login-system/project/settings.py
|
5f87ebd7da2adcec22ba20de773425e342b6cc07
|
[
"MIT"
] |
permissive
|
joao-lanzarini/django-login-system
|
ef3323d728ed17ed5721341183923a13286ce270
|
dc29006e90381e3eac351394b85f9dc27ccd0853
|
refs/heads/main
| 2023-07-05T06:26:29.561700
| 2021-08-24T05:17:03
| 2021-08-24T05:17:03
| 398,180,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,242
|
py
|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-+h%i+l%0bj7xdsl1c@o06kx@9&c$n1g*(g4od@*v3#0_7+s&*1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"joaovitorlsilva02@gmail.com"
] |
joaovitorlsilva02@gmail.com
|
95f4891c127e641c0a2fa1cf88e47aea8736a7b1
|
2c19d8869972853a76c4d490e35bd819f569c997
|
/Keras/cnn/test.py
|
ea913d0cd81ea290c4292954436fa584af7cfd96
|
[] |
no_license
|
ngamc/Python
|
e08eb826d8b306399f5df9a2c06e15ab31f44f29
|
748134d8782eec5f5eebd38302466bfb02759527
|
refs/heads/master
| 2020-03-16T06:17:29.523789
| 2019-10-31T09:03:52
| 2019-10-31T09:03:52
| 132,551,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 28 13:05:38 2018
@author: user
"""
import numpy as np
#y = [0,2]
#y = np.asarray(y)
#
#x = [[0,0],
# [1,1],
# [2,2],
# [3,3],
# [4,4]]
#x = np.asarray(x)
#
#print(x.shape)
#
#z = x[[1,2],:]
#print(z)
a = np.array(((1,2),(2,3)))
print(a.shape)
print(a)
a=a.reshape(-1, len(a))
print(a.shape)
print(a)
|
[
"ngamc@yahoo.com"
] |
ngamc@yahoo.com
|
9caa1a3570255081263c2f3ab4241e0d65cdc52c
|
4238ba3d850e63bcfa8f1cc812d4e9f5980667d4
|
/75_validate_ipaddress.py
|
602b00a07f6777afb4475e53153b26b64b0e72b6
|
[] |
no_license
|
gautamgitspace/leetcode_30-day_challenge
|
ad8d9e6ae7ba9c3995a614dcfae762eb9ffd460b
|
4ec4f9fbb0ef07ea13207654a619cfdb709cc78c
|
refs/heads/master
| 2021-05-21T01:14:46.302340
| 2020-08-06T23:03:34
| 2020-08-06T23:03:34
| 252,481,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
class Solution(object):
def validIPAddress(self, IP):
def isIPv4(s):
try: return str(int(s)) == s and 0 <= int(s) <= 255
except: return False
def isIPv6(s):
if len(s) > 4: return False
try: return int(s, 16) >= 0 and s[0] != '-'
except: return False
if IP.count(".") == 3 and all(isIPv4(i) for i in IP.split(".")):
return "IPv4"
if IP.count(":") == 7 and all(isIPv6(i) for i in IP.split(":")):
return "IPv6"
return "Neither"
|
[
"agautam2@buffalo.edu"
] |
agautam2@buffalo.edu
|
c8bb075383c20f6ff780f51231f37fbf42348752
|
c4dce67928657703f7d0a45b2e67ae4f96106ad8
|
/Python-Crash-Course/第一部分 基础知识/第04章 操作列表/4-07 3的倍数.py
|
2562203c8ab934d37af12a21740f010f2c7c17e1
|
[] |
no_license
|
YilK/Notes
|
fafd30ff9ddf1ebfd22c0c521a5fad5df9f31bb0
|
c14cefaf8e37c66afeaf0a77ac13cdce59e06fbe
|
refs/heads/master
| 2023-01-29T11:24:56.781941
| 2020-12-08T13:29:28
| 2020-12-08T13:29:28
| 155,809,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
'''
创建一个列表,其中包含3~30内能被3整除的数字;
再使用一个for 循环将这个列表中的数字都打印出来。
'''
numbers=list(range(3,30+1,3))
for number in numbers:
print(number)
|
[
"huangjk0311@126.com"
] |
huangjk0311@126.com
|
4d25102b654df31a85fd539fb3276b8bf8ff225b
|
1a3e29f8c9c570ece2a2514da5c4e7eda5edb581
|
/config.py
|
aa6a5e4f369d4a3813a8bb2492e3f3dac24eec4e
|
[] |
no_license
|
kdougan/sheep-pygame
|
76eb85e376c17b58c2d14488c8a341421c83e0ee
|
c6e56d79b1866114d47c014d7c81c3518b190737
|
refs/heads/main
| 2023-07-14T00:54:37.663029
| 2021-08-30T07:24:30
| 2021-08-30T07:24:30
| 401,252,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
gravity = 600
window_width = 1280
window_height = 720
window_size = (window_width, window_height)
display_padding = 16
ground_height = 16
|
[
"kdougan@apple.com"
] |
kdougan@apple.com
|
b8d8f163ef156d4a23922276d2c41b6e5971d539
|
b7aadb2e91566630c563c7a626f190767918dd60
|
/2020/day03.py
|
bfeffc9c3b875e85d5159a3a45608850aabc231c
|
[] |
no_license
|
kratsg/advent-of-code
|
54a66e3959bca340663c4bb8c8e5c0f1aaa088f9
|
672dfa34e811030c8d67e5466d1be218395e1ff3
|
refs/heads/master
| 2023-08-09T10:48:52.046121
| 2023-07-26T02:04:22
| 2023-07-26T02:04:22
| 225,422,271
| 1
| 0
| null | 2023-07-26T02:04:23
| 2019-12-02T16:40:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,868
|
py
|
import numpy as np
from io import StringIO
def process_input(data):
myfile = StringIO(data.replace(".", "0").replace("#", "1"))
return np.genfromtxt(myfile, delimiter=1, dtype=int)
def generate_steps(mapdata, start=None, step=None):
indices = []
start = start if start is not None else np.array([0, 0])
step = step if step is not None else np.array([1, 3]) # right 3, down 1
position = start
# while less than total height of map
while position[0] < mapdata.shape[0]:
position[1] = position[1] % mapdata.shape[1]
indices.append(tuple(position))
position += step
# skip first step
return indices[1:]
def get_encounters(mapdata, start=None, step=None):
steps = generate_steps(mapdata, start=start, step=step)
indices = tuple(zip(*steps))
return mapdata[indices]
if __name__ == "__main__":
from aocd.models import Puzzle
test_vals = process_input(
"""..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
)
encounters = get_encounters(test_vals, step=np.array([1, 3]))
assert len(encounters) == 10
assert encounters.tolist() == [0, 1, 0, 1, 1, 0, 1, 1, 1, 1]
assert np.sum(encounters) == 7
puz = Puzzle(2020, 3)
data = process_input(puz.input_data)
encounters = get_encounters(data, step=np.array([1, 3]))
puz.answer_a = np.sum(encounters)
print(f"Part 1: {puz.answer_a}")
test_encounters_1_1 = get_encounters(test_vals, step=np.array([1, 1]))
test_encounters_3_1 = get_encounters(test_vals, step=np.array([1, 3]))
test_encounters_5_1 = get_encounters(test_vals, step=np.array([1, 5]))
test_encounters_7_1 = get_encounters(test_vals, step=np.array([1, 7]))
test_encounters_1_2 = get_encounters(test_vals, step=np.array([2, 1]))
assert np.sum(test_encounters_1_1) == 2
assert np.sum(test_encounters_3_1) == 7
assert np.sum(test_encounters_5_1) == 3
assert np.sum(test_encounters_7_1) == 4
assert np.sum(test_encounters_1_2) == 2
assert (
np.sum(test_encounters_1_1)
* np.sum(test_encounters_3_1)
* np.sum(test_encounters_5_1)
* np.sum(test_encounters_7_1)
* np.sum(test_encounters_1_2)
== 336
)
encounters_1_1 = get_encounters(data, step=np.array([1, 1]))
encounters_3_1 = get_encounters(data, step=np.array([1, 3]))
encounters_5_1 = get_encounters(data, step=np.array([1, 5]))
encounters_7_1 = get_encounters(data, step=np.array([1, 7]))
encounters_1_2 = get_encounters(data, step=np.array([2, 1]))
puz.answer_b = (
np.sum(encounters_1_1)
* np.sum(encounters_3_1)
* np.sum(encounters_5_1)
* np.sum(encounters_7_1)
* np.sum(encounters_1_2)
)
print(f"Part 2: {puz.answer_b}")
|
[
"kratsg@gmail.com"
] |
kratsg@gmail.com
|
b2c6fa6db2c42c325e241c0a6d603b7d0553ef5b
|
9fba4dbe5d932afc5cd3df017bd43d75d199131f
|
/PDE_Models/applications/nonlinear/scalability_data/pWGF_31x31.py
|
397c9a597b9013b5abd5eeba0a5d146bf2d2b320
|
[] |
no_license
|
daveb-dev/pWGD
|
2ec413fb6fe2b40b802b1a7d2633e491923442c2
|
fea18d68f69f13014c66f7f9842279085eac2a75
|
refs/heads/master
| 2023-03-14T00:09:25.799270
| 2021-02-12T00:40:12
| 2021-02-12T00:40:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,860
|
py
|
from model_lognormal_31x31 import *
import time
# check the stein/options to see all possible choices
options["type_optimization"] = "gradientDescent"
options["is_projection"] = True
options["tol_projection"] = 1.e-2
options["type_projection"] = "fisher"
options["is_precondition"] = False
options["type_approximation"] = "fisher"
options["coefficient_dimension"] = 256
options["add_dimension"] = 0
options["number_particles"] = 64
options["number_particles_add"] = 0
options["add_number"] = 0
options["add_step"] = 5
options["add_rule"] = 1
options["type_scaling"] = 1
options["type_metric"] = "posterior_average" # posterior_average
options['WGF'] = True
options["type_Hessian"] = "lumped"
options["low_rank_Hessian"] = False
options["rank_Hessian"] = 256
options["rank_Hessian_tol"] = 1.e-2
options["low_rank_Hessian_average"] = False
options["rank_Hessian_average"] = 256
options["rank_Hessian_average_tol"] = 1.e-2
options["gauss_newton_approx"] = True # if error of unable to solve linear system occurs, use True
options["max_iter"] = 200
options["step_tolerance"] = 1e-7
options["step_projection_tolerance"] = 1e-3
options["line_search"] = True
options["search_size"] = 1e-1
options["max_backtracking_iter"] = 10
options["cg_coarse_tolerance"] = 0.5e-2
options["print_level"] = -1
options["save_number"] = 20
options["plot"] = True
# generate particles
particle = Particle(model, options, comm)
# evaluate the variation (gradient, Hessian) of the negative log likelihood function at given particles
variation = Variation(model, particle, options, comm)
# evaluate the kernel and its gradient at given particles
kernel = Kernel(model, particle, variation, options, comm)
t0 = time.time()
solver = GradientDescent(model, particle, variation, kernel, options, comm)
solver.solve()
print("GradientDecent solving time = ", time.time() - t0)
|
[
"peng@ices.utexas.edu"
] |
peng@ices.utexas.edu
|
26c99f496eb2c2b0d4c205d7b100328d1b9ace92
|
f21588fde88847e303324c6ecc298a14f8b6d463
|
/evaluate/evaluate.py
|
644c50e087bc8a71f5abe89659ffed0674606aeb
|
[] |
no_license
|
sdwldchl/ccks2021FEE
|
8638b0fba5c5281ee8270fab6c62951e8542159c
|
5ac02f1e4b0774e0751fd8af26ed13e9b752c5d5
|
refs/heads/master
| 2023-06-20T15:19:32.226145
| 2021-07-21T12:19:53
| 2021-07-21T12:19:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,997
|
py
|
import json
result_type = ["reason_region", "reason_product", "reason_industry", "result_region",
"result_product", "result_industry"]
ans_path = '../data/dev.json'
dev_path = '../argument/2021-07-17_18-36-15/finnalAns.json'
pred = 0
right = 0
total = 0
id_idx = {}
f = open(ans_path, encoding="utf-8")
ans = [json.loads(line.strip()) for line in f]
for i in range(len(ans)):
id_idx[ans[i]['text_id']] = i
g = open(dev_path, encoding="utf-8")
dev = [json.loads(line.strip()) for line in g]
for dev_line in dev:
id = dev_line['text_id']
if id in id_idx.keys():
ans_line = ans[id_idx[id]]
d = {}
a = {}
for r in dev_line['result']:
rt = r['result_type'] + '#' + r['reason_type']
if rt not in d.keys():
d[rt] = {}
for t in result_type:
d[rt][t] = set()
for t in result_type:
for it in r[t].split(','):
d[rt][t].add(it)
for r in ans_line['result']:
rt = r['result_type'] + '#' + r['reason_type']
if rt not in a.keys():
a[rt] = {}
for t in result_type:
a[rt][t] = set()
for t in result_type:
for it in r[t].split(','):
a[rt][t].add(it)
for rt in d.keys():
if rt in a.keys():
for t in result_type:
tmp = a[rt][t] & d[rt][t]
right += len(tmp)
total += len(a[rt][t])
pred += len(d[rt][t])
else:
for t in result_type:
pred += len(d[rt][t])
else:
for r in dev_line['result']:
for t in result_type:
pred += len(r[t].split(','))
print(right, pred, right)
p1 = right / pred
r1 = right / total
f1 = 2.0 * p1 * r1 / (p1 + r1)
log = f'p: {p1:.6f}, r: {r1:.6f}, f1: {f1:.6f}'
print(log)
|
[
"2667002321@qq.com"
] |
2667002321@qq.com
|
e2802817ffdd2a2e11cae9b2e5011ab177b160d6
|
9e78da3e2fd3dd47405ec5c4bb12fd4e9937d711
|
/lcd.py
|
437e3f54df1aa8433f9d801224ae19bd1ef14f3f
|
[] |
no_license
|
cmdoffing/LCD
|
23959eaac77332c86ae408a17a74a2d8599ddca5
|
1be464e42f4460d78f1e13c5527f111c075bacaf
|
refs/heads/master
| 2021-12-14T03:07:18.783580
| 2021-11-25T23:13:00
| 2021-11-25T23:13:00
| 194,544,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
# This program displays LCD digits, given a command line size
# parameter (s = param 1) and a number to display (param 2).
# Let s be the number of horizontal segments, then each LCD digit will
# occupy s + 2 positions and 2s + 3 vertical rows.
# There must be one column of blanks between two digits.
# This file handles the command line interface.
# Sample input line: lcd.py 2 12345
from lcdNumber import LcdNumber
import sys
lcdNum = LcdNumber( int( sys.argv[1]), sys.argv[2] )
displayDigits = lcdNum.lcdDisplay()
print( ''.join( displayDigits ))
|
[
"noreply@github.com"
] |
cmdoffing.noreply@github.com
|
0c1887a63b6181781a9c9f8719bae7184afab5a4
|
907c9554cf735beaad8f48dc68feaddcb9dddfa5
|
/core/arg_parser.py
|
9618853ccdb08af98f9aa194599b970fab4dd4e6
|
[] |
no_license
|
gmum/3d-point-clouds-autocomplete
|
9c6f8106304cad5eff43f93eec63887e54acf632
|
13ec26e10da7b0ad2f71d4c97016fbb8499b0cff
|
refs/heads/master
| 2023-06-25T07:37:08.472315
| 2021-07-23T08:04:10
| 2021-07-23T08:04:10
| 309,166,663
| 26
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
import argparse
import json
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', default=None, type=str,
help='config file path')
args = parser.parse_args()
config = None
if args.config is not None and args.config.endswith('.json'):
with open(args.config) as f:
config = json.load(f)
assert config is not None
return config
|
[
"art.kasymov@gmail.com"
] |
art.kasymov@gmail.com
|
bfaa5ea42194624e380a7146663e0fbeaab063c2
|
942cf424db48f26cd90c4151054a8a3f5dec50d6
|
/t_ls_np2.py
|
04f5dd6f929fdc766006eaaea3dbf3a9fe608c8a
|
[] |
no_license
|
Ericgig/Remez
|
fea6059553bf36fea755c7943865047e4991a994
|
e82ed0edd88dc212d016c72f6ad84094e437179f
|
refs/heads/master
| 2020-12-31T04:55:48.055135
| 2016-05-20T19:35:29
| 2016-05-20T19:35:29
| 59,122,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,334
|
py
|
#Remez make table based on least square
#
# For x^p, -1<p<1, x_min<=x<=x_max find
# a0,a[i],b[i] (i=1:k) so that a0+sum(a[i]/(x+b[i])) ~ x^p
# within err
#
# Input
# p : power ,-1 < p < 1
# x_min : inferior limit of the of the approximation , > 0
# x_max : superior limit of the of the approximation , > x_min
# k : number of term of the approximation , >= 0
#
# Output
# a0, a[i], b[i] : terms of the approximation
# err : maximum error of the best fit
#
# 1) scipy least-square
# => Can't get double precision on fit, is sattisfied with the convergence too easily.
# Want a maximum absolute error of at most 10e-15 on any point in the range
#
# try 2) less points to fit but stricter condition
# => Not enough control with least_square, converge before condition
# 3) Change the coefficient : d = a/b
#
#
#
#
import numpy as np
import scipy as sp
from scipy.optimize import leastsq
type = np.float64
#make the function
def make_fx_lin(x_min,x_max,p,N):
#X = x_min+np.arange(N,dtype=type)*((x_max-x_min)/(N-1))
X = np.linspace(x_min, x_max, N, endpoint=True, dtype=type)
Y = X**p
return X,Y
def make_fx_log(x_min,x_max,p,N):
#step = np.exp(np.log(x_max/x_min)/(N-1))
#X = x_min*step**np.arange(N,dtype=type)
X = np.logspace(x_min, x_max, N, endpoint=True, dtype=type)
Y = X**p
return X,Y
def make_fx(x_min,x_max,p,N):
x1,y1 = make_fx_log(x_min,x_max,p,N/2)
x2,y2 = make_fx_log(x_min,x_max,p,N/2)
X = np.concatenate((x1,x2))
Y = np.concatenate((y1,y2))
return X,Y
def f_approx(x, coef): #x vector no scalar
k = (len(coef)-1)/2
y = x*0+coef[0]
for i in range(len(x)):
y[i] += np.sum(coef[1:k+1]/(1+x[i]/coef[k+1:]))
return y
def err_func(coef, x, y):
return (f_approx(x, coef)-y)
def D_f_app(coef,x,y):
k = int((len(coef)-1)/2)
derr = []
for p in x:
d = np.zeros(2*k+1, dtype=type)
d[0] = 1
d[1:k+1] = 1/(1+p/coef[k+1:])
d[k+1:] = p*coef[1:k+1]/(p+coef[k+1:])**2
derr.append(d)
return derr
def sort_coef(coef):
k = int((len(coef)-1)/2)
a = coef[1:k+1]
b = coef[k+1:]
ind = np.argsort(b)
coef[1:k+1] = a[ind]
coef[ k+1:] = b[ind]
def run_min(x_min,x_max,p,N,coef):
X,Y = make_fx(x_min,x_max,p,N)
coef_s,success=leastsq(err_func,coef,args=(X,Y), Dfun = D_f_app,ftol=1.49012e-15, xtol=1.49012e-15, gtol=0.0)
sort_coef(coef_s)
coef = coef_s
X,Y = make_fx(x_min,x_max,p,1000)
error = max([np.max(err_func(coef,X,Y)),-np.min(err_func(coef,X,Y))])
return success, error, coef
def main(p, k, x_min, x_max):
#set initial guess
coef = np.zeros(2*k+1,dtype=type)
coef[0:k+1] = np.arange(k+1, dtype=type)+.5
coef[k+1:] = (np.arange(k, dtype=type)+1)/k
f = 3*k
tries = 0
error = 1000
while ((error >= 1e-15) and (tries < 20)):
tries += 1
success, error, coef = run_min(x_min,x_max,p,int(f),coef)
f+2
if(success < 5):f *= 1.2
print(error)
print(coef)
main(0.125, 20, 0.001, 10)
|
[
"eric.giguere@calculquebec.ca"
] |
eric.giguere@calculquebec.ca
|
27aafa81021b3f5e4fabb032465d5e3b732b43d0
|
65dd042bb78eb771d0d78a30c0d10a503d7ab99b
|
/disentangled/metric/mig_batch.py
|
65bdc764bc1dde2a80625f08dc012b162bdce734
|
[] |
no_license
|
eageby/disentangled-representations
|
35d3d778ba90680939dca92cee759556449f96eb
|
60e3d2334d85f35828d8765f934891d995f8bb9f
|
refs/heads/master
| 2023-08-31T16:16:50.742792
| 2021-10-19T12:06:07
| 2021-10-19T12:06:07
| 237,398,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,822
|
py
|
import disentangled.dataset as dataset
import disentangled.model.distributions as dist
import disentangled.model.networks as networks
import disentangled.model.utils
import disentangled.utils as utils
import gin
import numpy as np
import tensorflow as tf
# TODO GIN CONFIGURABLE
def occurences(data):
"""Counts occurences of values in data
Args:
data: ∊ ℝ (N , K) = (batch_size, factors)
Returns:
(tf.RaggedTensor) ∊ ℝ (K, (A))
"""
data = tf.cast(data, tf.int32)
_, K = data.shape
occurences = tf.ragged.stack(
[tf.math.bincount(data[:, factor], dtype=tf.float32)
for factor in range(K)],
axis=0,
)
return occurences
# @tf.function
def estimate_marginal_entropy(samples, encoding_dist, *encoding_parameters):
"""Estimates marginal entropy
H(z) = sum_z( 1/N sum_x (q(z|x)) log ( 1/N sum_x(q(z|x)) ) ) ∊ ℝ [D]
Args:
samples: z ∼ q(z|x) ∊ ℝ (N, D)
encoding_dist: q(z|x)
*encoding_parameters: list of parameters ∊ ℝ [N]
Returns:
(tf.Tensor) ∊ ℝ [D]
"""
breakpoint()
N, D = tf.unstack(tf.cast(samples.shape, tf.float32)) # Number of latent dims
samples = tf.transpose(samples) # ∊ ℝ [D, N]
encoding_parameters = tf.stack(encoding_parameters, axis=2)
n_params = encoding_parameters.shape[2]
# log q(z_j|x_n) ∊ ℝ [N, N, D]
log_qzx_matrix = tf.reshape(
encoding_dist.log_likelihood(
tf.broadcast_to(tf.reshape(samples, (1, D, N)), (N, D, N)),
*tf.unstack(
tf.broadcast_to(
tf.reshape(encoding_parameters, (N, D, 1, n_params)),
(N, D, N, n_params),
),
axis=3,
)
),
(N, N, D),
)
# H(z) = sum_z( 1/N sum_x (q(z|x)) log ( 1/N sum_x(q(z|x)) ) ) ∊ ℝ [D]
log_qz = tf.reduce_logsumexp(log_qzx_matrix - tf.math.log(N), axis=0)
return -tf.reduce_mean(log_qz, axis=0)
return -tf.reduce_sum(tf.math.exp(log_qz) * log_qz, axis=0)
def ragged_logsumexp(values, **kwargs):
"""logsumexp with support for tf.RaggedTensor"""
return tf.math.log(tf.reduce_sum(tf.math.exp(values), **kwargs))
# @tf.function
def estimate_conditional_entropy(
samples, log_pvk, log_pxvk, encoding_dist, *encoding_parameters):
"""Calculates conditional entropy
H(z| v) = - sum(p(z,v) log p(z|v)) ∊ ℝ [D, K]
Args:
samples: z ∼ q(z|x) ∊ ℝ [N, D]
log_pv: log p(v) ∊ ℝ [K, (A) ]
log_pxv: log p(x|v) ∊ ℝ [K, (A)]
encoding_dist: q(z|x)
*encoding_parameters: list of parameters ∊ ℝ [N]
Returns:
(tf.Tensor) H(z| v) ∊ ℝ [D, K]
"""
N, D = tf.unstack(tf.cast(samples.shape, tf.float32)) # Batch size, latent_dims
K = tf.cast(log_pvk.shape[0], tf.float32) # Factors
# q(z|x) ∊ ℝ [N, D]
log_qzx = tf.reshape(
encoding_dist.log_likelihood(samples, *encoding_parameters),
(N, D, 1, 1)
)
# log_pzv = log_pxvk + log_pvk + tf.reduce_logsumexp(log_qzx, axis=0)
# p(z|v) = sum_x(q(z|x)p(x|v)) ∊ ℝ [D, K, (A)]
log_pzv_cond = ragged_logsumexp(log_pxvk + log_qzx, axis=0)
log_pzv = tf.expand_dims(log_pvk, 0) + log_pzv_cond
# return -tf.reduce_mean(log_pzv_cond, axis=2)
pzv = tf.math.exp(log_pzv) / tf.reduce_sum(tf.math.exp(log_pzv))
# H(z|v) = - sum(p(z,v) log p(z|v)) ∊ ℝ [D, K]
# conditional_entropy = -tf.reduce_mean(log_pzv_cond, axis=2).to_tensor()
conditional_entropy = -tf.reduce_sum(pzv * log_pzv_cond, axis=2).to_tensor()
return conditional_entropy
# @tf.function
def estimate_factor_statistics(labels):
"""Estimates entropy and prior and conditioned prob
Args:
labels: Factor values for samples ∊ ℝ [N, D]
Returns:
(tf.RaggedTensor) p(v) ∊ ℝ [K, (A) ]
(tf.RaggedTensor) p(x|v) ∊ ℝ [K, (A)]
(tf.Tensor) H(v_k) ∊ ℝ [K]
"""
N, K = tf.unstack(tf.cast(labels.shape, tf.float32)) # Batch size, Factors
# #{v_k = a}
factor_occurences = occurences(labels)
# p(v_k = a) = #{v_k = a} / N
# p(v) ∊ ℝ (K, (A) )
log_pvk = tf.math.log(factor_occurences) - tf.math.log(N)
# p(v_k=a|x=b) = 1
# p(x|v=a) = p(v|x) / #{v=a} ∊ ℝ [K, 1]
log_pxvk = -tf.math.log(factor_occurences)
# factor_possibilites = tf.cast(factor_occurences.row_lengths(axis=1), tf.float32)
# log_pxvk = -tf.math.log(factor_occurences) - tf.expand_dims(tf.math.log(factor_possibilites), 1)
# H(v_k) = - sum_a(p(v_k=a) log(p(v_k=a)) ∊ ℝ [K]
entropy = -tf.reduce_sum(tf.math.exp(log_pvk) * log_pvk, axis=1)
return entropy, log_pvk, log_pxvk
# @tf.function
def normalized_mutual_information(labels, samples, encoding_dist, *encoding_parameters):
"""Calculates normalized mutual information
I_n(z_j; v_k) = H(z_j) - H(z_j | v_k) / H(v_k) ∊ ℝ [D, K]
Args:
labels: Factor values for samples ∊ ℝ (N, D)
samples: z ∼ q(z|x) ∊ ℝ (N, D)
encoding_dist: q(z|x)
*encoding_parameters: list of parameters ∊ ℝ [N]
Returns:
(tf.Tensor) I_n(z_j; v_k)∊ ℝ [D, K]
"""
N, K = tf.unstack(tf.cast(labels.shape, tf.float32)) # Batch size, number factors
factor_entropy, log_pvk, log_pxvk = estimate_factor_statistics(labels)
conditional_entropy = estimate_conditional_entropy(
samples, log_pvk, log_pxvk, encoding_dist, *encoding_parameters
)
# H(z_j) = ∊ ℝ [D, ]
marginal_entropy = estimate_marginal_entropy(
samples, encoding_dist, *encoding_parameters
)
# I(z_j; v_k) = H(z_j) - H(z_j | v_k) ∊ ℝ [D, K]
mutual_information = tf.expand_dims(
marginal_entropy, 1) - conditional_entropy
nmi = mutual_information / factor_entropy
return nmi
# @tf.function
def mutual_information_gap_batch_estimate(labels, samples, encoding_dist, *encoding_parameters):
"""Estimates mutual information gap (MIG)
1/K sum_{k=1}^K 1/H(v_k) (I(z_j[k]; v_k) - max_{j !=j[k]} I(z_j;v_k))
Args:
labels: Factor values for samples ∊ ℝ (N, D)
samples: z ∼ q(z|x) ∊ ℝ (N, D)
encoding_dist: q(z|x)
*encoding_parameters: list of parameters ∊ ℝ [N]
Returns:
(tf.Tensor) ∊ ℝ []
"""
# I_n(z_j; v_k) = (H(z_j) - H(z_j | v_k)) / H(v_k) ∊ ℝ [D, K]
nmi = normalized_mutual_information(
labels, samples, encoding_dist, *encoding_parameters
)
nmi = tf.sort(nmi, axis=0, direction="DESCENDING")
# ∊ ℝ [K]
mig = nmi[0, :] - nmi[1, :]
return tf.reduce_mean(mig)
@gin.configurable("mutual_information_gap_batch", module="disentangled.metric")
def mutual_information_gap_batch(
model,
dataset,
points,
batch_size,
encoding_dist,
progress_bar=True,
):
print("MIG BATCH")
dataset = dataset.take(points).batch(batch_size, drop_remainder=True)
n_batches = points // batch_size
if progress_bar:
progress = disentangled.utils.TrainingProgress(
dataset, total=n_batches)
progress.write("Calculating MIG")
else:
progress = dataset
mig = tf.TensorArray(dtype=tf.float32, size=n_batches)
for i, batch in enumerate(progress):
labels = tf.cast(batch["label"], tf.int32)
# z ∼ q(z|x) ∊ ℝ (N, D)
encoding_parameters = model.encode(batch["image"])
samples = model.sample(*encoding_parameters, training=True)
mig_batch = mutual_information_gap_batch_estimate(
labels, samples, encoding_dist, *encoding_parameters
)
mig = mig.write(i, mig_batch)
return tf.reduce_mean(mig.stack())
|
[
"eageby@kth.se"
] |
eageby@kth.se
|
27d665edb1a3f42684872e268feff701e5fbf1e7
|
ff2b5481cd0eb1024e0abaf02a2e30bdfd0f5422
|
/models/__init__.py
|
15adcc73ef7a49640de59ec6d3b51ac0542b7ab6
|
[
"MIT"
] |
permissive
|
dvrpc/tmc-uploader
|
a9da59030535e86dcf6a49546a83be76c889954a
|
e7aea6c6b2a66031daf7aecf4f264aecf298f347
|
refs/heads/master
| 2022-12-24T15:30:48.664611
| 2020-10-15T15:00:54
| 2020-10-15T15:00:54
| 281,735,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,977
|
py
|
from datetime import datetime
from pytz import timezone
from os import environ
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from dotenv import load_dotenv, find_dotenv
# from pathlib import Path
# from sqlalchemy import create_engine
# import pandas as pd
from db import db
from common.random_rainbow import make_random_gradient
load_dotenv(find_dotenv())
SQLALCHEMY_DATABASE_URI = environ.get("SQLALCHEMY_DATABASE_URI")
project_files = db.Table(
'project_files',
db.Column(
'project_id',
db.Integer,
db.ForeignKey('projects.uid'),
primary_key=True
),
db.Column(
'file_id',
db.Integer,
db.ForeignKey('filedata.uid'),
primary_key=True
)
)
class User(UserMixin, db.Model):
"""User account model."""
__tablename__ = 'app_users'
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(100),
nullable=False,
unique=True
)
email = db.Column(
db.String(40),
unique=True,
nullable=False
)
password = db.Column(
db.String(200),
primary_key=False,
unique=False,
nullable=False
)
website = db.Column(
db.String(60),
index=False,
unique=False,
nullable=True
)
created_on = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True,
default=datetime.now(timezone("US/Eastern")),
)
last_login = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True
)
background = db.Column(
db.Text,
nullable=False,
unique=False,
default=make_random_gradient()
)
def set_password(self, password):
"""Create hashed password."""
self.password = generate_password_hash(
password,
method='sha256'
)
def check_password(self, password):
"""Check hashed password."""
return check_password_hash(self.password, password)
def track_login(self):
"""Set the last_login value to now """
self.last_login = datetime.now(timezone("US/Eastern"))
def __repr__(self):
return '<User {}>'.format(self.username)
class Project(db.Model):
__tablename__ = 'projects'
uid = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String(50),
nullable=False,
unique=True
)
description = db.Column(
db.Text,
nullable=False,
unique=False
)
created_by = db.Column(
db.Integer,
db.ForeignKey("app_users.id"),
nullable=False
)
background = db.Column(
db.Text,
nullable=False,
unique=False,
default=make_random_gradient()
)
tmc_files = db.relationship(
'TMCFile',
secondary=project_files,
lazy='subquery',
backref=db.backref(__tablename__, lazy=True)
)
def num_files(self):
return len(self.tmc_files)
def created_by_user(self):
return User.query.filter_by(id=self.created_by).first()
class TMCFile(db.Model):
__tablename__ = 'filedata'
uid = db.Column(
db.Integer,
primary_key=True
)
filename = db.Column(
db.Text,
nullable=False,
unique=False
)
title = db.Column(
db.Text,
nullable=True,
unique=False
)
# project_id = db.Column(
# db.Integer,
# db.ForeignKey("projects.uid"),
# nullable=False
# )
model_id = db.Column(
db.Integer,
nullable=True
)
uploaded_by = db.Column(
db.Integer,
db.ForeignKey("app_users.id"),
nullable=False
)
lat = db.Column(
db.Text,
nullable=True,
unique=False,
default=39.852413
)
lng = db.Column(
db.Text,
nullable=True,
unique=False,
default=-75.264969
)
data_date = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True
)
modes = db.Column(
db.Text,
nullable=True,
unique=False
)
legs = db.Column(
db.Text,
nullable=True,
unique=False
)
leg_names = db.Column(
db.Text,
nullable=True,
unique=False
)
movements = db.Column(
db.Text,
nullable=True,
unique=False
)
am_peak_start = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True
)
pm_peak_start = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True
)
am_volume = db.Column(
db.Float,
index=False,
unique=False,
nullable=True
)
pm_volume = db.Column(
db.Float,
index=False,
unique=False,
nullable=True
)
count_start_time = db.Column(
db.DateTime,
nullable=True,
unique=False
)
count_end_time = db.Column(
db.DateTime,
nullable=True,
unique=False
)
project_ids = db.relationship(
'Project',
secondary=project_files,
lazy='subquery',
backref=db.backref(__tablename__, lazy=True)
)
def pid_list(self):
return [p.uid for p in self.project_ids]
def num_projects(self):
num = len(self.project_ids)
return num
# if num == 1:
# txt = "project"
# else:
# txt = "projects"
# return f"{num} {txt}"
def name(self):
if self.title:
return self.title
else:
return self.filename
def upload_user(self):
return User.query.filter_by(id=self.uploaded_by).first()
|
[
"38364429+aaronfraint@users.noreply.github.com"
] |
38364429+aaronfraint@users.noreply.github.com
|
03ee43a71159afb7b987b9b55528312e47c3ff18
|
c512256b07ab15c9edd61b5bfd5e7bf15639b864
|
/untitled/杂项/inherit.py
|
c947c27a42f29367391dcf3d0c084799cc6981bc
|
[] |
no_license
|
pyc-ycy/PycharmProjects
|
866483337f1df614bab06af7d429d7e5b27fe990
|
62aee5f458c2e45a862a09f0fa59a0dde5844c68
|
refs/heads/master
| 2020-05-21T15:46:32.879909
| 2019-06-17T13:21:24
| 2019-06-17T13:21:24
| 186,099,310
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 934
|
py
|
# /usr/bin/python3.7
# /-*-coding:UTF-8-*-
class Electrical(object):
def function(self):
print("Electrical can perform specific functions after power supply")
class Location(object):
def weizhi(self):
print("Every electrical have its own location")
class Television(Electrical, Location):
def function(self):
print("Television can play TV shows and see news")
def weizhi(self):
print("stand at home.")
class SoundBox(Electrical, Location):
def function(self):
print("SoundBox can play musics")
def weizhi(self):
print("stand at home.")
class Computer(Electrical, Location):
def function(self):
print("Computer can search information online and do office works")
def weizhi(self):
print("can bring to everywhere")
def using(electrical):
electrical.function()
electrical.weizhi()
using(Computer())
using(SoundBox())
|
[
"2923616405@qq.com"
] |
2923616405@qq.com
|
02dbc32c8551b4433053ccb04ee41ef7de45ac8f
|
8eb19885788618e42499d6427049fd389e9eb327
|
/mainapp/migrations/0021_auto_20170322_0447.py
|
cbc626cde8abf5170ac50a1bc5507555cbce4b2e
|
[] |
no_license
|
syehbiherbian/cruddjango
|
677b62afd83b1f4e1dc98b7b9abe68421b2317c9
|
702d5587474c6efbec4354632e7850ccdce5a6f3
|
refs/heads/master
| 2021-01-23T05:56:29.175364
| 2017-03-27T10:57:13
| 2017-03-27T10:57:13
| 86,323,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-22 04:47
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0020_auto_20170322_0445'),
]
operations = [
migrations.AlterField(
model_name='book',
name='created_date',
field=models.DateField(default=datetime.datetime(2017, 3, 22, 4, 47, 27, 561821)),
),
migrations.AlterField(
model_name='rental',
name='end_date',
field=models.DateTimeField(default=datetime.datetime(2017, 3, 22, 4, 47, 27, 562629, tzinfo=utc)),
),
migrations.AlterField(
model_name='rental',
name='start_date',
field=models.DateTimeField(default=datetime.datetime(2017, 3, 22, 4, 47, 27, 562329, tzinfo=utc)),
),
]
|
[
"oprentimac4@Oprents-iMac-2.local"
] |
oprentimac4@Oprents-iMac-2.local
|
5be37c92efa58fe731ab3cdad297352dccbfa173
|
ae29d1304b7ae66a283d6dfef44911679fd79f87
|
/evgenApp/schema.py
|
83a5b640f689d50f99528d436f25351fc3ec3e98
|
[] |
no_license
|
Iliavas/evg-app-back
|
dd2b5e917a2b0306aebb8e79aa139bea391bf782
|
ed1c0d6e6a5f9b667c029284c4af9f38108299d5
|
refs/heads/master
| 2023-05-13T01:04:03.647550
| 2021-03-28T18:34:17
| 2021-03-28T18:34:17
| 328,385,545
| 0
| 2
| null | 2021-03-29T18:28:01
| 2021-01-10T13:09:00
|
Python
|
UTF-8
|
Python
| false
| false
| 488
|
py
|
import graphene
import users.schema
import organisations.schema
import lessons.schema
import hyperlinks.schema
class Query(users.schema.Query, organisations.schema.Query,
lessons.schema.Query, hyperlinks.schema.Query, graphene.ObjectType):
pass
class Mutation(users.schema.Mutation, organisations.schema.Mutation,
lessons.schema.Mutation, hyperlinks.schema.Mutation, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query, mutation=Mutation)
|
[
"il.vsl0110@gmail.com"
] |
il.vsl0110@gmail.com
|
3226c7fceb4f6cfc56757178edf8e147ae74ad92
|
ace860f60e380d47ad40ad9e21192cb069853bd8
|
/DjangoWebProject3/market/migrations/0009_auto_20171222_1205.py
|
894ad229dc665d0b5c76850706af1b4f51926655
|
[] |
no_license
|
wilsonmwiti/djangoXumpesa
|
9b28b3063490dd536867e517ee546436cc72b0f5
|
1e74fb6c7e79504831a1ce8a568375b1ab5d0a56
|
refs/heads/master
| 2023-03-15T15:13:46.567278
| 2020-09-08T11:44:03
| 2020-09-08T11:44:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-12-22 09:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('market', '0008_paidproducts_adspackage'),
]
operations = [
migrations.AlterField(
model_name='paidproducts',
name='Price',
field=models.PositiveIntegerField(default='', max_length=12),
),
]
|
[
"lewicpro@gmail.com"
] |
lewicpro@gmail.com
|
897249938f93f608a86efc83f03031609ce97109
|
fccba098f9cb31cbe052893f37449349ad09b26c
|
/tests/service/private/users/test_auth_user.py
|
4d9d938ac7d7ef634bfc396fbfd6bb93a6d8ffd9
|
[] |
no_license
|
findfeatures/gateway-service
|
18c317678cb97d2587059f8dbdff8a0c655948be
|
83920d0bd7f4c20b7b4981d52f6bd6f2438f9f64
|
refs/heads/master
| 2020-11-28T08:02:17.866975
| 2020-01-13T17:03:45
| 2020-01-13T17:03:45
| 229,750,937
| 0
| 0
| null | 2019-12-30T13:28:05
| 2019-12-23T12:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
import json
from gateway.exceptions.users import UserNotAuthorised
from gateway.service import GatewayService
from mock import ANY, call
from nameko.containers import ServiceContainer
from nameko.testing.services import replace_dependencies
def test_auth_user(config, web_session):
container = ServiceContainer(GatewayService)
accounts = replace_dependencies(container, "accounts_rpc")
container.start()
accounts.auth_user.return_value = {"JWT": "test"}
email = "test@google.com"
password = "password"
response = web_session.post(
"/v1/user/auth", data=json.dumps({"email": email, "password": password})
)
assert accounts.auth_user.call_args == call(email, password)
assert response.status_code == 200
assert response.json() == {"JWT": "test"}
def test_auth_user_not_authorised(config, web_session):
container = ServiceContainer(GatewayService)
accounts = replace_dependencies(container, "accounts_rpc")
container.start()
accounts.auth_user.side_effect = UserNotAuthorised()
response = web_session.post(
"/v1/user/auth", data=json.dumps({"email": "email", "password": "password"})
)
assert response.status_code == 401
assert response.json() == {"error": "USER_NOT_AUTHORISED", "message": ""}
def test_auth_user_incorrect_schema(config, web_session):
container = ServiceContainer(GatewayService)
accounts = replace_dependencies(container, "accounts_rpc")
container.start()
accounts.auth_user.side_effect = UserNotAuthorised()
response = web_session.post("/v1/user/auth", data=json.dumps({}))
assert response.status_code == 400
assert response.json() == {"error": "VALIDATION_ERROR", "message": ANY}
|
[
"calum@paceup.com"
] |
calum@paceup.com
|
37eecc62d71f1b47a9ecf619b988855fdd408986
|
94ba2500fc857f4f2294d01ad97feb9a1847a85f
|
/dingtalk/customers.py
|
57182ae6900d2f650e21a2e87b1d1c586b7e93f2
|
[
"Apache-2.0"
] |
permissive
|
hiandy168/dingtalk-python
|
6371e7dc7a94142ed41c9851f16941e19ea8642b
|
c42437ca8063d5b748ae48f911d0b84cd7dc2696
|
refs/heads/master
| 2021-08-23T19:27:17.460908
| 2017-12-06T06:51:54
| 2017-12-06T06:51:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,638
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/11/30 下午2:50
# @Author : Matrix
# @Github : https://github.com/blackmatrix7/
# @Blog : http://www.cnblogs.com/blackmatrix/
# @File : customers.py
# @Software: PyCharm
import json
import requests
from .foundation import *
from .exceptions import DingTalkExceptions
__author__ = 'blackmatrix'
def get_label_groups(access_token, size=20, offset=0):
"""
获取标签列表
:param access_token:
:param size:
:param offset:
:return:
"""
url = get_request_url('dingtalk.corp.ext.listlabelgroups', access_token)
payload = {'size': size, 'offset': offset}
resp = requests.get(url, params=payload)
if resp.status_code == 200:
return resp.json()
else:
raise DingTalkExceptions.get_ext_list_err
def get_corp_ext_list(access_token, size=20, offset=0):
"""
获取外部联系人
:return:
"""
url = get_request_url('dingtalk.corp.ext.list', access_token)
payload = {'size': size, 'offset': offset}
resp = requests.get(url, params=payload)
if resp.status_code == 200:
return resp.json()
else:
raise DingTalkExceptions.get_ext_list_err
def add_corp_ext(access_token, contact: dict):
"""
新增外部联系人
:return:
"""
url = get_request_url('dingtalk.corp.ext.add', access_token)
contact = json.dumps(contact)
resp = requests.post(url, data={'contact': contact.encode('utf-8')})
if resp.status_code == 200:
return resp.json()
else:
raise DingTalkExceptions.get_ext_list_err
if __name__ == '__main__':
pass
|
[
"codecolor@outlook.com"
] |
codecolor@outlook.com
|
b47afe389731472f6a8dc09d34ad583eb26df1b4
|
d6155fdf26f085fc60048e4412722f8f07c79cff
|
/entidades/personalmov.py
|
6df71110fc818b6d48d76e028c00c67de66b148f
|
[] |
no_license
|
matiasGuidone/mdt-marcado
|
0a8e7f5e4687ddcd3062fb494d409828eb0cc8c8
|
9da69674ffa3f60d05810f2100a44dbcd4e089c8
|
refs/heads/master
| 2023-04-13T10:02:51.711365
| 2021-04-23T18:24:51
| 2021-04-23T18:24:51
| 360,972,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
class personalmov:
id = None
nombre = None
apellido = None
nrosocio = None
codigomarcado = None
horaentradamat = None
horasalidamat = None
horaentradaves = None
horasalidaves = None
horaentradasab = None
horasalidasab = None
huella = None
legajo = None
idmov = None
fechahora = None
tipo = None
observaciones = None
# `per_nombre`,
# `per_apellido`,
# `per_nrosocio`,
# `per_codigomarcado`,
# `per_horaentradamatutino`,
# `per_horasalidamatutino`,
# `per_horaentradavespertino`,
# `per_horasalidavespertino`,
# `per_horaentradasabado`,
# `per_horasalidasabado`,
# `per_huellaid`,
# `id`,
# `per_legajo`
def __init__(self, p= None):
if p != None:
self.id=p[4]
self.nombre=p[5]
self.apellido=p[6]
self.nrosocio=p[7]
self.codigomarcado = p[8]
self.horaentradamat = p[9]
self.horasalidamat = p[10]
self.horaentradaves = p[11]
self.horasalidaves = p[12]
self.horaentradasab = p[13]
self.horasalidasab = p[14]
self.huella = p[15]
self.legajo = p[17]
self.idmov = p[0]
self.fechahora = p[1]
self.tipo = p[2]
self.observaciones = p[3]
def startCall(self):
pass
def endCall(self):
pass
|
[
"matias.guidone@gmail.com"
] |
matias.guidone@gmail.com
|
3f87241222b0d4e0e456a6c7d69d0c34e6ea0a0d
|
e128b0ea31db0a1f6082bcc42a981b6db16e4ac0
|
/test_site/urls.py
|
87bce30086f54570d82b7570539d2c9792eec2df
|
[] |
no_license
|
FAenX/test_site
|
9e1573acb100ed85c8c52b482ede1bae5e683993
|
0a1d850a20efef6989910e1ddf80dd82ddc1e5a7
|
refs/heads/master
| 2020-08-05T00:11:37.082965
| 2019-10-02T11:49:12
| 2019-10-02T11:49:12
| 212,326,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 861
|
py
|
"""test_site URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('', TemplateView.as_view(template_name='index.html')),
]
|
[
"kipronofb@gmail.com"
] |
kipronofb@gmail.com
|
025683f5c5830f5371dd464f1bda519dd08605c5
|
a0bc35cef7b7d55aae2e6f92469126b1bd09bd4a
|
/arp/calfiles/blo853.py
|
61ac4c3bd58892114a9c1b5bbc314eca6ba7d0e9
|
[] |
no_license
|
zakiali/cabo
|
6d1bc9dd8f84c1db14654fa2da9e8b46f803ac30
|
bad9a64ddb637a95795885a3679532c63cff8085
|
refs/heads/master
| 2020-05-20T01:17:09.045852
| 2012-06-26T02:43:58
| 2012-06-26T02:43:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
import aipy as a
class AntennaArray(a.fit.AntennaArray):
def sim_cache(self, *args, **kwds):
return a.fit.AntennaArray.sim_cache(self, *args, **kwds)
def sim(self, i, j, pol):
ans = a.fit.AntennaArray.sim(self, i, j, pol)
return ans
prms = {
'loc': ('+37:55.1','-122:09.4'),
'antpos': {
0: [0., 0., 0.] ,
1: [0., 37., 0.] ,
}
}
def get_aa(freqs):
location = prms['loc']
antennas = []
beam = a.fit.Beam(freqs)
for i in range (len(prms['antpos'])):
pos = prms['antpos'][i]
antennas.append(a.fit.Antenna(pos[0], pos[1], pos[2], beam, amp=.05))
aa = AntennaArray(prms['loc'], antennas)
return aa
src_prms = {
'Sun': {'jys':1e5},
}
def get_catalog(srcs=None, cutoff=None, catalogs=None):
cat = a.src.get_catalog(srcs=srcs, cutoff=cutoff, catalogs=catalogs)
cat.set_params(src_prms)
return cat
|
[
"aparsons@astron.berkeley.edu"
] |
aparsons@astron.berkeley.edu
|
07fe463e1bc8d42676a62bcbd147a075d25346a6
|
f510ea23f826b1a4e79e413bada97c45a915cff0
|
/mysite/settings.py
|
4cafd9bfae3d1297f02fa69d94e67031b9be6f7d
|
[] |
no_license
|
a5816010/my-first-blog
|
dae45a688a936394459be9316116fb23cc44f8e4
|
77a0babeaf0584742dd47ddabe3f239300733010
|
refs/heads/master
| 2020-03-20T21:11:46.319488
| 2018-06-23T16:58:54
| 2018-06-23T16:58:54
| 137,728,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,109
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ezcgfc_x#e&z#9e2i^pkn3@-&%ss5dapjr6zb4d)g%4kx@uce5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"you@example.com"
] |
you@example.com
|
255796bd20e0ee31da9ccf21999e6678d33a8855
|
c69fab7a64aa17ccfd15db74bad77e4bac766cb0
|
/col_detection.py
|
d1cd92d16b3f9cc7e2a3c46eb0d9cde03fc49497
|
[] |
no_license
|
akashdeeps19/red-color-detection
|
e8b3a1b3a96ec96e07e106c682736b80ccae6250
|
e8a302c98f71d4cc440c2071dcc227f8e38fc2f4
|
refs/heads/master
| 2022-03-25T06:54:03.149844
| 2019-11-05T15:27:57
| 2019-11-05T15:27:57
| 219,778,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,941
|
py
|
import cv2
import numpy as np
# quality = 400
# im = cv2.imread("F1.large.jpg")
video_path = input("Enter video path or 'live' for live capture : ")
quality = input("Enter quality (300,400,500) : ")
quality = int(quality)
if video_path == "live":
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(video_path)
def preprocessing(img_org):
img = cv2.resize(img_org,(quality,quality),cv2.INTER_AREA)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_blue = np.array([0,50,50])
upper_blue = np.array([10,255,255])
mask1 = cv2.inRange(hsv, np.array([0, 115, 105]), np.array([10, 255, 255]))
mask2 = cv2.inRange(hsv,np.array([170, 115, 105]), np.array([180, 255, 255]))
mask = mask1+mask2
res = cv2.bitwise_and(img,img, mask= mask)
# cv2.imshow(None,res)
# cv2.waitKey()
im_gray = cv2.cvtColor(res, cv2.COLOR_HSV2BGR)
im_gray = cv2.cvtColor(im_gray,cv2.COLOR_BGR2GRAY)
(thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
im_bw = cv2.medianBlur(im_bw,3)
ret, labels = cv2.connectedComponents(im_bw)
regions_x = [[] for i in range(ret)]
regions_y = [[] for i in range(ret)]
img_res = img
for i in range(quality):
for j in range(quality):
if(labels[i,j] > 0):
regions_x[labels[i,j]].append(i)
regions_y[labels[i,j]].append(j)
for i in range(1,ret):
s_point = (min(regions_y[i]),min(regions_x[i]))
e_point = (max(regions_y[i]),max(regions_x[i]))
img_res = cv2.rectangle(img,s_point,e_point,(255,0,0),1)
return img_res
#preprocessing(im)
if (cap.isOpened()== False):
print("Error opening video stream or file")
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
frame = preprocessing(frame)
frame = cv2.resize(frame,(1000,600),cv2.INTER_AREA)
cv2.imshow('Frame',frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
|
[
"akasdeeps19@gmail.com"
] |
akasdeeps19@gmail.com
|
385ce854193cf55589ceaa2d228c3f542ec7aff7
|
5d412b4ee657143ea6cfc820052cd13567e8b149
|
/bodajusticeapp/migrations/0004_auto_20190224_0242.py
|
f75ae0561859f6714f505da3df106c0da8dcb4a5
|
[] |
no_license
|
boda-justice/boda-justice-new-backend
|
e6fb4e12bfb4606048273cbc4e58df3886e21ec2
|
75cf16aed208a966915b6afb1906b29e1b829d03
|
refs/heads/master
| 2020-04-24T19:20:18.725304
| 2019-02-24T13:38:29
| 2019-02-24T13:38:29
| 172,208,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
# Generated by Django 2.1.7 on 2019-02-24 02:42
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('bodajusticeapp', '0003_auto_20190224_0230'),
]
operations = [
migrations.RemoveField(
model_name='offence',
name='offense_type',
),
migrations.AddField(
model_name='offence',
name='modification_date',
field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='date_modified'),
),
migrations.AlterField(
model_name='offence',
name='creation_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date_created'),
),
migrations.AlterField(
model_name='offence',
name='description',
field=models.TextField(verbose_name='offence_description'),
),
migrations.AlterField(
model_name='offence',
name='fine',
field=models.DecimalField(decimal_places=0, max_digits=6),
),
]
|
[
"29925144+WinstonKamau@users.noreply.github.com"
] |
29925144+WinstonKamau@users.noreply.github.com
|
4f908d998631a669d61a1659ebbce23ec367048f
|
a4ab889cbcd077dae29eb2ca62c9078aa6c0d555
|
/benchmark/torch/sac/train.py
|
eb40e1b80d140cf8a3130a40dac598b307832f3d
|
[
"Apache-2.0"
] |
permissive
|
rical730/PARL
|
909721c347ce05d0c43ad8b3d4b38edd63f47a44
|
88e43d309278c34ca857939fe251813d4cad4b03
|
refs/heads/develop
| 2022-04-19T19:29:15.269466
| 2022-04-01T05:51:46
| 2022-04-01T05:51:46
| 482,826,065
| 0
| 0
|
Apache-2.0
| 2022-04-18T11:44:11
| 2022-04-18T11:44:11
| null |
UTF-8
|
Python
| false
| false
| 5,213
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import gym
import argparse
from parl.utils import logger, tensorboard, ReplayMemory
from parl.env.continuous_wrappers import ActionMappingWrapper
from mujoco_model import MujocoModel
from mujoco_agent import MujocoAgent
from parl.algorithms import SAC
WARMUP_STEPS = 1e4
EVAL_EPISODES = 5
MEMORY_SIZE = int(1e6)
BATCH_SIZE = 256
GAMMA = 0.99
TAU = 0.005
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
# Run episode for training
def run_train_episode(agent, env, rpm):
action_dim = env.action_space.shape[0]
obs = env.reset()
done = False
episode_reward, episode_steps = 0, 0
while not done:
episode_steps += 1
# Select action randomly or according to policy
if rpm.size() < WARMUP_STEPS:
action = np.random.uniform(-1, 1, size=action_dim)
else:
action = agent.sample(obs)
# Perform action
next_obs, reward, done, _ = env.step(action)
terminal = float(done) if episode_steps < env._max_episode_steps else 0
# Store data in replay memory
rpm.append(obs, action, reward, next_obs, terminal)
obs = next_obs
episode_reward += reward
# Train agent after collecting sufficient data
if rpm.size() >= WARMUP_STEPS:
batch_obs, batch_action, batch_reward, batch_next_obs, batch_terminal = rpm.sample_batch(
BATCH_SIZE)
agent.learn(batch_obs, batch_action, batch_reward, batch_next_obs,
batch_terminal)
return episode_reward, episode_steps
# Runs policy for 5 episodes by default and returns average reward
# A fixed seed is used for the eval environment
def run_evaluate_episodes(agent, env, eval_episodes):
avg_reward = 0.
for _ in range(eval_episodes):
obs = env.reset()
done = False
while not done:
action = agent.predict(obs)
obs, reward, done, _ = env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
return avg_reward
def main():
logger.info("------------------- SAC ---------------------")
logger.info('Env: {}, Seed: {}'.format(args.env, args.seed))
logger.info("---------------------------------------------")
env = gym.make(args.env)
env.seed(args.seed)
env = ActionMappingWrapper(env)
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
# Initialize model, algorithm, agent, replay_memory
model = MujocoModel(obs_dim, action_dim)
algorithm = SAC(
model,
gamma=GAMMA,
tau=TAU,
alpha=args.alpha,
actor_lr=ACTOR_LR,
critic_lr=CRITIC_LR)
agent = MujocoAgent(algorithm)
rpm = ReplayMemory(
max_size=MEMORY_SIZE, obs_dim=obs_dim, act_dim=action_dim)
total_steps = 0
test_flag = 0
while total_steps < args.train_total_steps:
# Train episode
episode_reward, episode_steps = run_train_episode(agent, env, rpm)
total_steps += episode_steps
tensorboard.add_scalar('train/episode_reward', episode_reward,
total_steps)
logger.info('Total Steps: {} Reward: {}'.format(
total_steps, episode_reward))
# Evaluate episode
if (total_steps + 1) // args.test_every_steps >= test_flag:
while (total_steps + 1) // args.test_every_steps >= test_flag:
test_flag += 1
avg_reward = run_evaluate_episodes(agent, env, EVAL_EPISODES)
tensorboard.add_scalar('eval/episode_reward', avg_reward,
total_steps)
logger.info('Evaluation over: {} episodes, Reward: {}'.format(
EVAL_EPISODES, avg_reward))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--env", default="HalfCheetah-v1", help='Mujoco gym environment name')
parser.add_argument(
"--seed",
default=0,
type=int,
help='Sets Gym, PyTorch and Numpy seeds')
parser.add_argument(
"--train_total_steps",
default=5e6,
type=int,
help='Max time steps to run environment')
parser.add_argument(
'--test_every_steps',
type=int,
default=int(5e3),
help='The step interval between two consecutive evaluations')
parser.add_argument(
"--alpha",
default=0.2,
type=float,
help=
'Determines the relative importance of entropy term against the reward'
)
args = parser.parse_args()
main()
|
[
"noreply@github.com"
] |
rical730.noreply@github.com
|
b8d704ce25131c1966e2ce4da8b3200b98e66413
|
18aee5d93a63eab684fe69e3aa0abd1372dd5d08
|
/python/paddle/jit/api.py
|
827bb9806a110b61453a887d8bccdeb5553878e0
|
[
"Apache-2.0"
] |
permissive
|
Shixiaowei02/Paddle
|
8d049f4f29e281de2fb1ffcd143997c88078eadb
|
3d4d995f26c48f7792b325806ec3d110fc59f6fc
|
refs/heads/develop
| 2023-06-26T06:25:48.074273
| 2023-06-14T06:40:21
| 2023-06-14T06:40:21
| 174,320,213
| 2
| 1
|
Apache-2.0
| 2022-12-28T05:14:30
| 2019-03-07T10:09:34
|
C++
|
UTF-8
|
Python
| false
| false
| 68,685
|
py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Temporary disable isort to avoid circular import
# This can be removed after the circular import is resolved
# isort: skip_file
from __future__ import annotations
import os
import pickle
import warnings
from collections import OrderedDict
import inspect
import threading
from typing import Any
import paddle
from paddle.fluid import core, dygraph
from paddle.fluid.compiler import (
BuildStrategy,
CompiledProgram,
ExecutionStrategy,
)
from paddle.fluid.data_feeder import check_type
from paddle.fluid.dygraph.base import (
program_desc_tracing_guard,
switch_to_static_graph,
)
from .dy2static import logging_utils
from .dy2static.convert_call_func import (
ConversionOptions,
add_ignore_module,
)
from .dy2static.program_translator import (
ProgramTranslator,
StaticFunction,
unwrap_decorators,
)
from paddle.jit.translated_layer import (
TranslatedLayer,
INFER_MODEL_SUFFIX,
INFER_PARAMS_SUFFIX,
INFER_PARAMS_INFO_SUFFIX,
INFER_PROPERTY_SUFFIX,
)
from paddle.nn import Layer
from paddle.fluid.executor import Executor, scope_guard
from paddle.fluid.framework import (
Block,
Program,
Variable,
Parameter,
EagerParamBase,
)
from paddle.fluid.framework import (
_current_expected_place,
_dygraph_guard,
_dygraph_tracer,
)
from paddle.fluid.framework import dygraph_only
from paddle.fluid.wrapped_decorator import wrap_decorator
from paddle.fluid.io import save_inference_model
from paddle.framework import in_dynamic_mode
def create_program_from_desc(program_desc):
program = Program()
program.desc = program_desc
program.blocks = [Block(program, 0)]
program._sync_with_cpp()
return program
def _extract_vars(inputs, result_list, err_tag='inputs'):
if isinstance(inputs, Variable):
result_list.append(inputs)
elif isinstance(inputs, (list, tuple)):
for var in inputs:
_extract_vars(var, result_list, err_tag)
else:
raise TypeError(
"The type of 'each element of {}' in paddle.jit.TracedLayer.trace must be fluid.Variable, but received {}.".format(
err_tag, type(inputs)
)
)
def extract_vars(inputs, err_tag='inputs'):
result_list = []
_extract_vars(inputs, result_list, err_tag)
return result_list
def _dygraph_to_static_func_(dygraph_func):
"""
Converts imperative dygraph APIs into declarative function APIs. Decorator
@dygraph_to_static_func only converts imperative dygraph APIs into
declarative net-building APIs, which means it doesn't return immediate
digital result as imperative mode. Users should handle Program and Executor
by themselves.
Note:
This decorator is NOT our recommended way to transform imperative function
to declarative function. We will remove this decorator after we finalize
cleaning up code.
Args:
dygraph_func (callable): callable imperative function.
Returns:
Callable: converting imperative dygraph APIs into declarative
net-building APIs.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
from paddle.jit.api import dygraph_to_static_func
@dygraph_to_static_func
def func(x):
if paddle.mean(x) < 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
x = paddle.full(shape=[3, 3], fill_value=0, dtype='float64')
x_v = func(x)
exe = fluid.Executor(fluid.CPUPlace())
out = exe.run(fetch_list=[x_v])
print(out[0])
# [[1. 1. 1.]
# [1. 1. 1.]
# [1. 1. 1.]]
"""
# TODO: remove this decorator after we finalize training API
def __impl__(*args, **kwargs):
program_translator = ProgramTranslator()
if in_dynamic_mode() or not program_translator.enable_to_static:
logging_utils.warn(
"The decorator 'dygraph_to_static_func' doesn't work in "
"dygraph mode or set 'paddle.jit.enable_to_static' to False. "
"We will just return dygraph output."
)
return dygraph_func(*args, **kwargs)
static_func = program_translator.get_func(dygraph_func)
return static_func(*args, **kwargs)
return __impl__
dygraph_to_static_func = wrap_decorator(_dygraph_to_static_func_)
def copy_decorator_attrs(original_func, decorated_obj):
"""
Copies some necessary attributes from original function into decorated function.
Args:
original_func(callable): the original decorated function.
decorated_obj(StaticFunction): the target decorated StaticFunction object.
"""
decorator_name = "to_static"
decorated_obj.__name__ = original_func.__name__
decorated_obj._decorator_name = decorator_name
decorated_obj.__wrapped__ = original_func
decorated_obj.__doc__ = original_func.__doc__
if hasattr(original_func, "__module__"):
decorated_obj.__module__ = original_func.__module__
return decorated_obj
def ignore_module(modules: list[Any]):
"""
Adds modules that ignore transcription.
Builtin modules that have been ignored are collections, pdb, copy, inspect, re, numpy, logging, six
Args:
modules (List[Any]): Ignored modules that you want to add
Examples:
.. code-block:: python
import scipy
import astor
import paddle
from paddle.jit import ignore_module
modules = [
scipy,
astor
]
ignore_module(modules)
"""
add_ignore_module(modules)
def _check_and_set_backend(backend, build_strategy):
if backend not in ['CINN', None]:
raise ValueError(
"The backend of to_static should be 'CINN' or None, but received {}.".format(
backend
)
)
if backend == 'CINN':
build_strategy.build_cinn_pass = True
def to_static(
function=None,
input_spec=None,
build_strategy=None,
backend=None,
**kwargs,
):
"""
Converts imperative dygraph APIs into declarative function APIs. Decorator
@to_static handles the Program and Executor of static graph mode and returns
the result as dygraph Tensor(s). Users could use the returned dygraph
Tensor(s) to do imperative training, inference, or other operations. If the
decorated function calls other imperative function, the called one will be
converted into declarative function as well.
Args:
function (callable): callable imperative function.
input_spec(list[InputSpec]|tuple[InputSpec]): list/tuple of InputSpec to specific the shape/dtype/name
information of each input Tensor.
build_strategy(BuildStrategy|None): This argument is used to compile the
converted program with the specified options, such as operators' fusion
in the computational graph and memory optimization during the execution
of the computational graph. For more information about build_strategy,
please refer to :code:`paddle.static.BuildStrategy`. The default is None.
backend(str, Optional): Specifies compilation backend, which can be `CINN` or None. When backend is `CINN`, CINN compiler will be used to speed up training and inference.
kwargs: Support keys including `property`, set `property` to True if the fucntion is python property.
Returns:
Tensor(s): containing the numerical result.
Examples:
.. code-block:: python
import paddle
from paddle.jit import to_static
@to_static
def func(x):
if paddle.mean(x) < 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
x = paddle.ones([1, 2], dtype='float32')
x_v = func(x)
print(x_v) # [[2. 2.]]
"""
property = kwargs.get("property", False)
def decorated(python_func):
"""
Decorates a python function into a StaticFunction object.
"""
# Step 1. unwrap the function if it is already decorated.
_, python_func = unwrap_decorators(python_func)
# Step 2. copy some attributes from original python function.
static_layer = copy_decorator_attrs(
original_func=python_func,
decorated_obj=StaticFunction(
function=python_func,
input_spec=input_spec,
build_strategy=build_strategy,
property=property,
backend=backend,
),
)
return static_layer
build_strategy = build_strategy or BuildStrategy()
if not isinstance(build_strategy, BuildStrategy):
raise TypeError(
"Required type(build_strategy) shall be `paddle.static.BuildStrategy`, but received {}".format(
type(build_strategy).__name__
)
)
_check_and_set_backend(backend, build_strategy)
# for usage: `to_static(foo, ...)`
if function is not None:
if isinstance(function, Layer):
if isinstance(function.forward, StaticFunction):
class_name = function.__class__.__name__
logging_utils.warn(
"`{}.forward` has already been decorated somewhere. It will be redecorated to replace previous one.".format(
class_name
)
)
function.forward = decorated(function.forward)
return function
else:
return decorated(function)
# for usage: `@to_static`
return decorated
def not_to_static(func=None):
"""
A Decorator to suppresses the convertion of a function.
Args:
func(callable): The function to decorate.
Returns:
callable: A function which won't be converted in Dynamic-to-Static.
Examples:
.. code-block:: python
import paddle
@paddle.jit.not_to_static
def func_not_to_static(x):
res = x - 1
return res
@paddle.jit.to_static
def func(x):
if paddle.mean(x) < 0:
out = func_not_to_static(x)
else:
out = x + 1
return out
x = paddle.ones([1, 2], dtype='float32')
out = func(x)
print(out) # [[2. 2.]]
"""
if func is None:
return not_to_static
options = ConversionOptions(not_convert=True)
options.attach(func)
return func
class _SaveLoadConfig:
def __init__(self):
self._output_spec = None
self._model_filename = None
self._params_filename = None
self._separate_params = False
# used for `paddle.load`
self._keep_name_table = False
# NOTE: Users rarely use following configs, so these configs are not open to users,
# reducing user learning costs, but we retain the configuration capabilities
# If True, programs are modified to only support direct inference deployment.
# Otherwise,more information will be stored for flexible optimization and re-training.
# Currently, only True is supported
self._export_for_deployment = True
# If True, It will save inference program only, and do not save params of Program
self._program_only = False
self.with_hook = False
# if True, multi `StaticFunction` will share params in one file.
self.combine_params = False
@property
def output_spec(self):
return self._output_spec
@output_spec.setter
def output_spec(self, spec):
if spec is None:
return
if not isinstance(spec, list):
raise TypeError(
"The config `output_spec` should be 'list', but received input type is %s."
% type(input)
)
for var in spec:
if not isinstance(var, core.eager.Tensor):
raise TypeError(
"The element in config `output_spec` list should be 'Variable', but received element's type is %s."
% type(var)
)
self._output_spec = spec
@property
def model_filename(self):
return self._model_filename
@model_filename.setter
def model_filename(self, filename):
if filename is None:
return
if not isinstance(filename, str):
raise TypeError(
"The config `model_filename` should be str, but received input's type is %s."
% type(filename)
)
if len(filename) == 0:
raise ValueError("The config `model_filename` is empty string.")
self._model_filename = filename
@property
def params_filename(self):
return self._params_filename
@params_filename.setter
def params_filename(self, filename):
if filename is None:
return
if not isinstance(filename, str):
raise TypeError(
"The config `params_filename` should be str, but received input's type is %s."
% type(filename)
)
if len(filename) == 0:
raise ValueError("The config `params_filename` is empty string.")
self._params_filename = filename
@property
def keep_name_table(self):
return self._keep_name_table
@keep_name_table.setter
def keep_name_table(self, value):
if value is None:
return
if not isinstance(value, bool):
raise TypeError(
"The config `keep_name_table` should be bool value, but received input's type is %s."
% type(value)
)
self._keep_name_table = value
def _parse_save_configs(configs):
supported_configs = [
'output_spec',
"with_hook",
"combine_params",
"clip_extra",
"skip_forward",
]
# input check
for key in configs:
if key not in supported_configs:
raise ValueError(
"The additional config (%s) of `paddle.jit.save` is not supported."
% (key)
)
# construct inner config
inner_config = _SaveLoadConfig()
inner_config.output_spec = configs.get('output_spec', None)
inner_config.with_hook = configs.get('with_hook', False)
inner_config.combine_params = configs.get("combine_params", False)
inner_config.clip_extra = configs.get("clip_extra", True)
inner_config.skip_forward = configs.get("skip_forward", False)
return inner_config
def _parse_load_config(configs):
supported_configs = ['model_filename', 'params_filename']
# input check
for key in configs:
if key not in supported_configs:
raise ValueError(
"The additional config (%s) of `paddle.jit.load` is not supported."
% (key)
)
# construct inner config
inner_config = _SaveLoadConfig()
inner_config.model_filename = configs.get('model_filename', None)
inner_config.params_filename = configs.get('params_filename', None)
return inner_config
def _get_input_var_names(inputs, input_spec):
name_none_error = (
"The %s's name is None. "
"When using jit.save, please set InputSepc's name in "
"to_static(input_spec=[]) and jit.save(input_spec=[]) "
"and make sure they are consistent."
)
name_no_exists_error = (
"The tensor `%s` does not exists. "
"Please make sure the name of InputSpec or example Tensor "
"in input_spec is the same as the name of InputSpec in "
"`to_static` decorated on the Layer.forward method."
)
result_list = []
input_var_names = [
var.name
for var in paddle.utils.flatten(inputs)
if isinstance(var, Variable)
]
if input_spec is None:
# no prune
return input_var_names
else:
# fileter out non-tensor type spec infos.
input_spec = [
spec
for spec in input_spec
if isinstance(spec, paddle.static.InputSpec)
]
if len(input_spec) == len(input_var_names):
# no prune
result_list = input_var_names
# if input spec name not in input_var_names, only raise warning
for spec in input_spec:
if spec.name is None:
warnings.warn(name_none_error % spec)
elif spec.name not in input_var_names:
warnings.warn(name_no_exists_error % spec.name)
else:
# do nothing
pass
else:
# prune
for spec in input_spec:
if spec.name is None:
# name is None, the input_spec only can be InputSpec
raise ValueError(name_none_error % spec)
elif spec.name not in input_var_names:
# the input_spec can be `InputSpec` or `Tensor`
raise ValueError(name_no_exists_error % spec.name)
else:
result_list.append(spec.name)
return result_list
def _get_output_vars(outputs, output_spec, with_hook=False):
name_no_exists_error = (
"The tensor `%s` does not exists. "
"Please make sure the name of example Tensor "
"in configs.output_spec is the output tensor of "
"Layer.forward method."
)
if output_spec and with_hook:
raise RuntimeError(
"Currently not support specify output_spec while founding pre/post hooks in your outermost layer."
)
result_list = []
output_vars_dict = OrderedDict()
for var in paddle.utils.flatten(outputs):
if isinstance(var, Variable):
output_vars_dict[var.name] = var
if output_spec is None:
result_list = list(output_vars_dict.values())
elif output_spec is not None and len(output_spec) == len(output_vars_dict):
result_list = list(output_vars_dict.values())
for var in output_spec:
if var.name not in output_vars_dict:
warnings.warn(name_no_exists_error % var.name)
else:
for var in output_spec:
if var.name not in output_vars_dict:
raise ValueError(name_no_exists_error % var.name)
else:
result_list.append(output_vars_dict[var.name])
return result_list
# NOTE(chenweihang): [ Handling of use cases of API paddle.jit.load ]
# `paddle.jit.load` may be used to load saved results of:
# 1. Expected cases:
# - paddle.jit.save
# - paddle.static.save_inference_model
# - paddle.fluid.io.save_inference_model
# 2. Error cases:
# - paddle.save: no .pdmodel for prefix
# - paddle.static.save: no .pdiparams but .pdparams exists
# - paddle.fluid.io.save_params/save_persistables: no __model__
# TODO(chenweihang): polish error message in above error cases
def _build_load_path_and_config(path, config):
# NOTE(chenweihang): If both [prefix save format] and [directory save format] exist,
# raise error, avoid confusing behavior
prefix_format_path = path + INFER_MODEL_SUFFIX
prefix_format_exist = os.path.exists(prefix_format_path)
directory_format_exist = os.path.isdir(path)
if prefix_format_exist and directory_format_exist:
raise ValueError(
"The {}.pdmodel and {} directory exist at the same time, "
"don't know which one to load, please make sure that the specified target "
"of ``path`` is unique.".format(path, path)
)
elif not prefix_format_exist and not directory_format_exist:
raise ValueError(
"The ``path`` (%s) to load model not exists. "
"Please make sure that *.pdmodel exists or "
"don't using ``skip_forward=True`` to jit.save." % path
)
else:
if prefix_format_exist:
file_prefix = os.path.basename(path)
model_path = os.path.dirname(path)
if config.model_filename is not None:
warnings.warn(
"When loading the result saved with the "
"specified file prefix, the ``model_filename`` config does "
"not take effect."
)
config.model_filename = file_prefix + INFER_MODEL_SUFFIX
if config.params_filename is not None:
warnings.warn(
"When loading the result saved with the "
"specified file prefix, the ``params_filename`` config does "
"not take effect."
)
config.params_filename = file_prefix + INFER_PARAMS_SUFFIX
else:
# Compatible with the old save_inference_model format
model_path = path
return model_path, config
_save_pre_hooks_lock = threading.Lock()
_save_pre_hooks = []
class HookRemoveHelper:
"""A HookRemoveHelper that can be used to remove hook."""
def __init__(self, hook):
self._hook = hook
def remove(self):
_remove_save_pre_hook(self._hook)
def _register_save_pre_hook(hook):
"""
Register a save pre-hook for `paddle.jit.save`.
This hook will be executed before `save` function has been invoked.
hook(layer, input_spec, configs) -> None
- layer (Layer|function): This argument is corresponding to `layer` in `paddle.jit.save`.
- input_spec (list or tuple[InputSpec|Tensor|Python built-in variable]): This argument is corresponding to `input_spec` in `paddle.jit.save`.
- configs (dict): This argument is corresponding to `configs` in `paddle.jit.save`.
Args:
hook(function): a function registered as a save pre-hook
Returns:
HookRemoveHelper: a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()`.
Examples:
.. code-block:: python
import numpy as np
import paddle
IMAGE_SIZE = 256
CLASS_NUM = 10
class LinearNet(paddle.nn.Layer):
def __init__(self):
super().__init__()
self._linear = paddle.nn.Linear(IMAGE_SIZE, CLASS_NUM)
def forward(self, x):
return self._linear(x)
saving_count = 0
def save_pre_hook(layer, input_spec, configs):
global saving_count
saving_count += 1
remove_handler = paddle.jit.register_save_pre_hook(save_pre_hook)
layer = LinearNet()
paddle.jit.save(layer, "/tmp", [paddle.static.InputSpec(shape=[-1, IMAGE_SIZE])])
# saving_count == 1
remove_handler.remove()
paddle.jit.save(layer, "/tmp", [paddle.static.InputSpec(shape=[-1, IMAGE_SIZE])])
# saving_count == 1
"""
global _save_pre_hooks_lock
global _save_pre_hooks
_save_pre_hooks_lock.acquire()
if hook not in _save_pre_hooks:
_save_pre_hooks.append(hook)
_save_pre_hooks_lock.release()
return HookRemoveHelper(hook)
def _clear_save_pre_hooks():
global _save_pre_hooks_lock
global _save_pre_hooks
_save_pre_hooks_lock.acquire()
_save_pre_hooks.clear()
_save_pre_hooks_lock.release()
def _remove_save_pre_hook(hook):
global _save_pre_hooks_lock
global _save_pre_hooks
_save_pre_hooks_lock.acquire()
if hook in _save_pre_hooks:
_save_pre_hooks.remove(hook)
_save_pre_hooks_lock.release()
@wrap_decorator
def _run_save_pre_hooks(func):
def wrapper(layer, path, input_spec=None, **configs):
global _save_pre_hooks
for hook in _save_pre_hooks:
hook(layer, input_spec, configs)
func(layer, path, input_spec, **configs)
return wrapper
def _save_property(filename: str, property_vals: list[tuple[Any, str]]):
"""class property serialization.
Args:
filename (str): *.meta
property_vals (list[tuple[Any, str]]): class property.
"""
def set_property(meta, key, val):
if isinstance(val, float):
meta.set_float(key, val)
elif isinstance(val, int):
meta.set_int(key, val)
elif isinstance(val, str):
meta.set_string(key, val)
elif isinstance(val, (tuple, list)):
if isinstance(val[0], float):
meta.set_floats(key, val)
elif isinstance(val[0], int):
meta.set_ints(key, val)
elif isinstance(val[0], str):
meta.set_strings(key, val)
else:
raise ValueError(f"Note support val type: {type(val)}")
return
with open(filename, 'wb') as f:
meta = paddle.framework.core.Property()
for item in property_vals:
val, key = item[0], item[1]
set_property(meta, key, val)
f.write(meta.serialize_to_string())
@_run_save_pre_hooks
@switch_to_static_graph
def save(layer, path, input_spec=None, **configs):
"""
Saves input Layer or function as ``paddle.jit.TranslatedLayer``
format model, which can be used for inference or fine-tuning after loading.
It will save the translated program and all related persistable
variables of input Layer to given ``path`` .
``path`` is the prefix of saved objects, and the saved translated program file
suffix is ``.pdmodel`` , the saved persistable variables file suffix is ``.pdiparams`` ,
and here also saved some additional variable description information to a file,
its suffix is ``.pdiparams.info``, these additional information is used in fine-tuning.
The saved model can be loaded by follow APIs:
- ``paddle.jit.load``
- ``paddle.static.load_inference_model``
- Other C++ inference APIs
.. note::
When using ``paddle.jit.save`` to save a function, parameters will not be saved. If you have to
save the parameter, please pass the Layer containing function and parameter to ``paddle.jit.save``.
Args:
layer (Layer|function): The Layer or function to be saved.
path (str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
input_spec (list or tuple[InputSpec|Tensor|Python built-in variable], optional): Describes the input of the saved model's forward
method, which can be described by InputSpec or example Tensor. Moreover, we support to specify non-tensor type argument,
such as int, float, string, or list/dict of them.If None, all input variables of
the original Layer's forward method would be the inputs of the saved model. Default None.
**configs (dict, optional): Other save configuration options for compatibility. We do not
recommend using these configurations, they may be removed in the future. If not necessary,
DO NOT use them. Default None.
The following options are currently supported:
(1) output_spec (list[Tensor]): Selects the output targets of the saved model.
By default, all return variables of original Layer's forward method are kept as the
output of the saved model. If the provided ``output_spec`` list is not all output variables,
the saved model will be pruned according to the given ``output_spec`` list.
Returns:
None
Examples:
.. code-block:: python
# example 1: save layer
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
class LinearNet(nn.Layer):
def __init__(self):
super().__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
def train(layer, loader, loss_fn, opt):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
# 1. train & save model.
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
train(layer, loader, loss_fn, adam)
# save
path = "example_model/linear"
paddle.jit.save(layer, path)
# example 2: save function
import paddle
from paddle.static import InputSpec
def save_function():
@paddle.jit.to_static
def fun(inputs):
return paddle.tanh(inputs)
path = 'test_jit_save_load_function_1/func'
inps = paddle.rand([3, 6])
origin = fun(inps)
paddle.jit.save(fun, path)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
print((load_result - origin).abs().max() < 1e-10)
save_function()
"""
# 1. input build & check
prog_translator = ProgramTranslator()
is_prim_infer = core._is_fwd_prim_enabled() and core._is_bwd_prim_enabled()
if not prog_translator.enable_to_static:
raise RuntimeError(
"The paddle.jit.save doesn't work when setting 'paddle.jit.enable_to_static' to False."
)
if not (
isinstance(layer, (Layer, StaticFunction)) or inspect.isfunction(layer)
):
raise TypeError(
"The input of paddle.jit.save should be 'Layer' or 'Function', but received input type is %s."
% type(layer)
)
elif inspect.isfunction(layer) or isinstance(layer, StaticFunction):
warnings.warn(
'What you save is a function, and `jit.save` will generate the name of the model file according to `path` you specify. When loading these files with `jit.load`, you get a `TranslatedLayer` whose inference result is the same as the inference result of the function you saved.'
)
# NOTE(chenweihang): If the input layer be wrapped by DataParallel,
# the args and kwargs of forward method will can't be parsed by
# function_spec, so here we save DataParallel._layers instead
# DataParallel it self
# NOTE(chenweihang): using inner_layer, do not change input layer
if isinstance(layer, paddle.DataParallel):
inner_layer = layer._layers
else:
inner_layer = layer
# path check
file_prefix = os.path.basename(path)
if file_prefix == "":
raise ValueError(
"The input path MUST be format of dirname/file_prefix "
"[dirname\\file_prefix in Windows system], but received "
"file_prefix is empty string."
)
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
# avoid change user given input_spec
inner_input_spec = None
if input_spec is not None:
if isinstance(layer, Layer):
for attr_func in dir(inner_layer):
static_func = getattr(inner_layer, attr_func, None)
if (
isinstance(static_func, StaticFunction)
and 'forward' != attr_func
):
raise ValueError(
"If there are static functions other than 'forward' that need to be saved, the input 'input_spec' should be None, but received the type of 'input_spec' is %s."
% type(input_spec)
)
if not isinstance(input_spec, (list, tuple)):
raise TypeError(
"The input input_spec should be 'list', but received input_spec's type is %s."
% type(input_spec)
)
inner_input_spec = []
for var in paddle.utils.flatten(input_spec):
if isinstance(var, paddle.static.InputSpec):
inner_input_spec.append(var)
elif isinstance(var, (core.eager.Tensor, Variable)):
inner_input_spec.append(
paddle.static.InputSpec.from_tensor(var)
)
else:
# NOTE(Aurelius84): Support non-Tensor type in `input_spec`.
inner_input_spec.append(var)
# parse configs
configs = _parse_save_configs(configs)
# whether outermost layer has pre/post hook, if does, we need also save
# these operators in program.
with_hook = configs.with_hook
combine_params = configs.combine_params
if combine_params:
configs._program_only = True
scope = core.Scope()
extra_var_info = {}
if isinstance(layer, Layer):
functions = dir(inner_layer)
if inner_layer._forward_pre_hooks or inner_layer._forward_post_hooks:
with_hook = True
else:
# layer is function
functions = [
layer,
]
combine_vars = {}
property_vals = [] # (value, key)
concrete_program = None
for attr_func in functions:
if isinstance(layer, Layer):
static_func = getattr(inner_layer, attr_func, None)
if isinstance(static_func, StaticFunction):
if static_func.is_property:
# property method to be exported
immediate_val = static_func()
property_vals.append(
(
immediate_val,
layer.__class__.__name__ + '.' + attr_func,
)
)
continue
concrete_program = (
static_func.concrete_program_specify_input_spec(
inner_input_spec,
with_hook=with_hook,
is_prim_infer=is_prim_infer,
)
)
elif 'forward' == attr_func:
if configs.skip_forward:
# do not jit.save forward function
continue
# transform in jit.save, if input_spec is incomplete, declarative will throw error
# inner_input_spec is list[InputSpec], it should be packed with same structure
# as original input_spec here.
if inner_input_spec:
inner_input_spec = paddle.utils.pack_sequence_as(
input_spec, inner_input_spec
)
static_forward = to_static(
inner_layer.forward, input_spec=inner_input_spec
)
concrete_program = (
static_forward.concrete_program_specify_input_spec(
with_hook=with_hook, is_prim_infer=is_prim_infer
)
)
# the input_spec has been used in declarative, which is equal to
# @to_static with input_spec and jit.save without input_spec,
# avoid needless warning
inner_input_spec = None
else:
continue
else:
# When layer is a function
if isinstance(attr_func, StaticFunction):
if attr_func.is_property:
# property method to be exported
immediate_val = attr_func()
property_vals.append((immediate_val, attr_func))
continue
concrete_program = (
attr_func.concrete_program_specify_input_spec(
inner_input_spec, is_prim_infer=is_prim_infer
)
)
else:
if inner_input_spec:
inner_input_spec = paddle.utils.pack_sequence_as(
input_spec, inner_input_spec
)
static_function = to_static(
attr_func, input_spec=inner_input_spec
)
concrete_program = static_function.concrete_program
if static_function._class_instance is None:
warnings.warn(
'`jit.save` will only save the `Program`, not the parameters. If you have to save the parameters, please make sure that {} is a member function of `paddle.nn.Layer` and the saved parameters are in `state_dict`'.format(
layer
)
)
# when save multi `StaticFunction`, all `StaticFunction` share params.
dygraph_state_dict = None
if isinstance(inner_layer, Layer):
dygraph_state_dict = inner_layer.to_static_state_dict()
elif isinstance(attr_func, StaticFunction):
if attr_func._class_instance:
dygraph_state_dict = (
attr_func._class_instance.to_static_state_dict()
)
if dygraph_state_dict:
# NOTE(chenweihang): we maintain the mapping of variable name to
# structured name, the buffer variable (non-persistable)
# saved to inference program may not need by dygraph Layer,
# we only record the state_dict variable's structured name
state_names_dict = {}
state_var_dict = {}
for structured_name, var in dygraph_state_dict.items():
state_names_dict[var.name] = structured_name
state_var_dict[var.name] = var
# 3. share parameters from Layer to scope & record var info
with dygraph.guard():
for param_or_buffer in concrete_program.parameters:
# share to scope
if param_or_buffer.type == core.VarDesc.VarType.VOCAB:
scr_tensor = param_or_buffer.value().get_map_tensor()
tgt_var = scope.var(param_or_buffer.name)
tgt_var.set_vocab(scr_tensor)
else:
param_or_buffer_tensor = scope.var(
param_or_buffer.name
).get_tensor()
# src_tensor = param_or_buffer.value().get_tensor()
src_tensor = (
state_var_dict[param_or_buffer.name]
.value()
.get_tensor()
)
param_or_buffer_tensor._share_data_with(src_tensor)
# record var info
if param_or_buffer.name not in extra_var_info:
extra_info_dict = {}
if param_or_buffer.name in state_names_dict:
extra_info_dict['structured_name'] = state_names_dict[
param_or_buffer.name
]
extra_info_dict[
'stop_gradient'
] = param_or_buffer.stop_gradient
if isinstance(param_or_buffer, EagerParamBase):
extra_info_dict['trainable'] = param_or_buffer.trainable
extra_var_info[param_or_buffer.name] = extra_info_dict
# 4. build input & output of save_infernece_model
# NOTE(chenweihang): [ Get input variables name ]
# There are two cases, whether to prune the inputs or not
# - not prune inputs (recommend):
# - the len(input_spec) == len((concrete_program.inputs) - 1
# - here can use concrete_program.inputs directly
# - prune inputs:
# - the input_spec length < len((concrete_program.inputs) - 1
# - the input_spec's name should be in concrete_program.inputs
input_var_names = _get_input_var_names(
concrete_program.inputs, inner_input_spec
)
# NOTE(chenweihang): [ Get output variables ]
# the rule is like [ Get input variables name ]. For output var,
# we only support Tensor spec, and actually, we only need the
# var name of output, and we don't recommended to use output_spec
# print(concrete_program.main_program)
# print(concrete_program.outputs, configs.output_spec)
output_vars = _get_output_vars(
concrete_program.outputs, configs.output_spec, with_hook
)
# 5. save inference model
# construct new save_inference_model arguments
model_path = dirname
# NOTE(chenweihang): because prefix contains model and params filename,
# so we don't support set model_filename & params_filename
if 'forward' == attr_func or not isinstance(layer, Layer):
model_filename = file_prefix + INFER_MODEL_SUFFIX
params_filename = file_prefix + INFER_PARAMS_SUFFIX
else:
model_filename = file_prefix + '.' + attr_func + INFER_MODEL_SUFFIX
params_filename = (
file_prefix + '.' + attr_func + INFER_PARAMS_SUFFIX
)
with scope_guard(scope):
save_inference_model(
dirname=model_path,
feeded_var_names=input_var_names,
target_vars=output_vars,
executor=Executor(_current_expected_place()),
main_program=concrete_program.main_program.clone(),
model_filename=model_filename,
params_filename=params_filename,
export_for_deployment=configs._export_for_deployment,
program_only=configs._program_only,
clip_extra=configs.clip_extra,
)
if combine_params:
clone_main_program = concrete_program.main_program.clone()
clone_main_program = clone_main_program._prune_with_input(
input_var_names, output_vars
)
for block in clone_main_program.blocks:
combine_vars.update(block.vars)
# save shared params
if combine_params:
# sort vars by name
combine_vars = sorted(combine_vars.items(), key=lambda item: item[0])
ordered_vars = []
for name, var in combine_vars:
ordered_vars.append(var)
params_filename = file_prefix + INFER_PARAMS_SUFFIX
with scope_guard(scope):
paddle.static.save_vars(
Executor(_current_expected_place()),
dirname=model_path,
vars=list(
filter(
paddle.framework.io_utils.is_persistable, ordered_vars
)
),
filename=params_filename,
)
# save property
property_save_path = os.path.join(
os.path.normpath(model_path), file_prefix + INFER_PROPERTY_SUFFIX
)
_save_property(property_save_path, property_vals)
# NOTE(chenweihang): [ Save extra variable info ]
# save_inference_model will lose some important variable information, including:
# - Variable name and correspondence (when saved variables as one file)
# - Variable.stop_gradient information
# - Which persistent variable are parameter and which are not
# - Parameter.trainable information
#
# The lost information cannot be recovered when it is loaded again,
# so if we want to perform fine-tune after loading, we may need to
# configure redundant information to proceed.
#
# Due to compatibility issues, we cannot change the original storage structure,
# but we can save these information in `jit.save` without changing the original
# storage to improve user experience. So we save extra information into
# file `***.pdiparams.info`
# "layer" can only be Layer or function or StaticFunction.
contain_parameter = False
if concrete_program is not None:
for var in concrete_program.main_program.list_vars():
contain_parameter |= isinstance(var, Parameter)
if (isinstance(layer, Layer) or contain_parameter) and extra_var_info:
with scope_guard(scope):
extra_var_info_path = path + INFER_PARAMS_INFO_SUFFIX
with open(extra_var_info_path, 'wb') as f:
pickle.dump(extra_var_info, f, protocol=2)
@dygraph_only
def load(path, **configs):
"""
:api_attr: imperative
Load model saved by ``paddle.jit.save`` or ``paddle.static.save_inference_model`` or
paddle 1.x API ``paddle.fluid.io.save_inference_model`` as ``paddle.jit.TranslatedLayer``,
then performing inference or fine-tune training.
.. note::
If you load model saved by ``paddle.static.save_inference_model`` ,
there will be the following limitations when using it in fine-tuning:
1. Imperative mode do not support LoDTensor. All original model's feed targets or parametars that depend on LoD are temporarily unavailable.
2. All saved model's feed targets need to be passed into TranslatedLayer's forward function.
3. The variable's ``stop_gradient`` information is lost and can not be recovered.
4. The parameter's ``trainable`` information is lost and can not be recovered.
Args:
path (str): The path prefix to load model. The format is ``dirname/file_prefix`` or ``file_prefix`` .
**configs (dict, optional): Other load configuration options for compatibility. We do not
recommend using these configurations, they may be removed in the future. If not necessary,
DO NOT use them. Default None.
The following options are currently supported:
(1) model_filename (str): The inference model file name of the paddle 1.x
``save_inference_model`` save format. Default file name is :code:`__model__` .
(2) params_filename (str): The persistable variables file name of the paddle 1.x
``save_inference_model`` save format. No default file name, save variables separately
by default.
Returns:
TranslatedLayer: A Layer object can run saved translated model.
Examples:
1. Load model saved by ``paddle.jit.save`` then performing inference and fine-tune training.
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
class LinearNet(nn.Layer):
def __init__(self):
super().__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
def train(layer, loader, loss_fn, opt):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
# 1. train & save model.
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
# train
train(layer, loader, loss_fn, adam)
# save
path = "example_model/linear"
paddle.jit.save(layer, path)
# 2. load model
# load
loaded_layer = paddle.jit.load(path)
# inference
loaded_layer.eval()
x = paddle.randn([1, IMAGE_SIZE], 'float32')
pred = loaded_layer(x)
# fine-tune
loaded_layer.train()
adam = opt.Adam(learning_rate=0.001, parameters=loaded_layer.parameters())
train(loaded_layer, loader, loss_fn, adam)
2. Load model saved by ``paddle.fluid.io.save_inference_model`` then performing and fine-tune training.
.. code-block:: python
import numpy as np
import paddle
import paddle.static as static
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.nn.functional as F
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(paddle.io.Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
paddle.enable_static()
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
pred = static.nn.fc(x=image, size=10, activation='softmax')
loss = F.cross_entropy(input=pred, label=label)
avg_loss = paddle.mean(loss)
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
optimizer.minimize(avg_loss)
place = paddle.CPUPlace()
exe = static.Executor(place)
exe.run(static.default_startup_program())
# create data loader
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = paddle.io.DataLoader(dataset,
feed_list=[image, label],
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
return_list=False,
num_workers=2)
# 1. train and save inference model
for data in loader():
exe.run(
static.default_main_program(),
feed=data,
fetch_list=[avg_loss])
model_path = "fc.example.model"
paddle.fluid.io.save_inference_model(
model_path, ["image"], [pred], exe)
# 2. load model
# enable dygraph mode
paddle.disable_static(place)
# load
fc = paddle.jit.load(model_path)
# inference
fc.eval()
x = paddle.randn([1, IMAGE_SIZE], 'float32')
pred = fc(x)
# fine-tune
fc.train()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=fc.parameters())
loader = paddle.io.DataLoader(dataset,
places=place,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = fc(image)
loss = loss_fn(out, label)
loss.backward()
adam.step()
adam.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
"""
# 1. construct correct config
config = _parse_load_config(configs)
model_path, config = _build_load_path_and_config(path, config)
return TranslatedLayer._construct(model_path, config)
@dygraph_only
def _trace(
layer, inputs, feed_prefix='feed_', fetch_prefix='fetch_', tmp_prefix='t_'
):
assert isinstance(layer, Layer)
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
tracer = _dygraph_tracer()._get_program_desc_tracer()
var_list = extract_vars(inputs)
with program_desc_tracing_guard(True):
original_outputs = layer(*inputs)
if not isinstance(original_outputs, (list, tuple)):
outputs = [original_outputs]
else:
outputs = original_outputs
out_vars = extract_vars(outputs, err_tag='outputs')
(
program_desc,
feed_names,
fetch_names,
parameters,
) = tracer.create_program_desc(
var_list, feed_prefix, out_vars, fetch_prefix, tmp_prefix
)
tracer.reset()
with _dygraph_guard(None):
program = create_program_from_desc(program_desc)
return original_outputs, program, feed_names, fetch_names, parameters
class TracedLayer:
"""
:api_attr: imperative
TracedLayer is used to convert a forward dygraph model to a static
graph model. This is mainly used to save the dygraph model for online
inference using C++. Besides, users can also do inference in Python
using the converted static graph model, which usually has better
performance than the original dygraph model.
TracedLayer would run the static graph model using :code:`Executor`
and :code:`CompiledProgram` . The static graph model would share
parameters with the dygraph model.
All TracedLayer objects should not be created by constructor and should
be created by static method :code:`TracedLayer.trace(layer, inputs)` .
The TracedLayer can only be used to convert the data-independent dygraph
model into the static graph model, which means the dygraph model should
be independent with the tensor data and shape.
"""
def __init__(self, program, parameters, feed_names, fetch_names):
self._program = program
self._feed_names = feed_names
self._fetch_names = fetch_names
self._params = parameters
self._place = _current_expected_place()
self._scope = core.Scope()
for p in parameters:
src_tensor = p.value().get_tensor()
dst_tensor = self._scope.var(p.name).get_tensor()
dst_tensor._share_data_with(src_tensor)
self._exe = Executor(self._place)
self._compiled_program = None
self._build_strategy = None
self._exec_strategy = None
@property
def program(self):
return self._program
def _switch(self, is_test=True):
for block_id in range(self._program.num_blocks):
block = self._program.block(block_id)
for op in block.ops:
if op.has_attr("is_test"):
op._set_attr("is_test", is_test)
@staticmethod
@dygraph_only
def trace(layer, inputs):
"""
This method is the only allowed method to create TracedLayer object.
It would call the :code:`layer(*inputs)` method to run the dygraph
model and convert it into a static graph model.
Args:
layer (paddle.nn.Layer): the layer object to be traced.
inputs (list(Tensor)|tuple(Tensor)|Tensor): the input tensors of
the layer object.
Returns:
tuple: A tuple of 2 items, whose the first item is the output of
:code:`layer(*inputs)` , and the second item is the created
TracedLayer object.
Examples:
.. code-block:: python:
import paddle
class ExampleLayer(paddle.nn.Layer):
def __init__(self):
super().__init__()
self._fc = paddle.nn.Linear(3, 10)
def forward(self, input):
return self._fc(input)
layer = ExampleLayer()
in_var = paddle.uniform(shape=[2, 3], dtype='float32')
out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])
# run the static graph model using Executor inside
out_static_graph = static_layer([in_var])
print(len(out_static_graph)) # 1
print(out_static_graph[0].shape) # (2, 10)
# save the static graph model for inference
static_layer.save_inference_model('./saved_infer_model')
"""
assert isinstance(
layer, Layer
), "The type of 'layer' in paddle.jit.TracedLayer.trace must be paddle.nn.Layer, but received {}.".format(
type(layer)
)
outs, prog, feed, fetch, parameters = _trace(layer, inputs)
traced = TracedLayer(prog, parameters, feed, fetch)
return outs, traced
def set_strategy(self, build_strategy=None, exec_strategy=None):
"""
Set the strategies when running static graph model.
Args:
build_strategy (BuildStrategy, optional): build strategy of
:code:`CompiledProgram` inside TracedLayer. Default None.
exec_strategy (ExecutionStrategy, optional): execution strategy of
:code:`CompiledProgram` inside TracedLayer. Default None.
Returns:
None
Examples:
.. code-block:: python:
import paddle
class ExampleLayer(paddle.nn.Layer):
def __init__(self):
super().__init__()
self._fc = paddle.nn.Linear(3, 10)
def forward(self, input):
return self._fc(input)
layer = ExampleLayer()
in_var = paddle.uniform(shape=[2, 3], dtype='float32')
out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])
build_strategy = paddle.static.BuildStrategy()
build_strategy.enable_inplace = True
exec_strategy = paddle.static.ExecutionStrategy()
exec_strategy.num_threads = 2
static_layer.set_strategy(build_strategy=build_strategy, exec_strategy=exec_strategy)
out_static_graph = static_layer([in_var])
"""
assert self._compiled_program is None, "Cannot set strategy after run"
assert isinstance(
build_strategy, (type(None), BuildStrategy)
), "The type of 'build_strategy' in paddle.jit.TracedLayer.set_strategy must be fluid.BuildStrategy, but received {}.".format(
type(build_strategy)
)
assert isinstance(
exec_strategy, (type(None), ExecutionStrategy)
), "The type of 'exec_strategy' in paddle.jit.TracedLayer.set_strategy must be fluid.ExecutionStrategy, but received {}.".format(
type(exec_strategy)
)
self._build_strategy = build_strategy
self._exec_strategy = exec_strategy
@switch_to_static_graph
def _compile(self):
self._compiled_program = CompiledProgram(
self._program,
build_strategy=self._build_strategy,
)
def _build_feed(self, inputs):
assert isinstance(
inputs, (list, tuple)
), "Inputs should be a list or tuple of variables"
assert len(inputs) == len(self._feed_names)
feed_dict = {}
if in_dynamic_mode():
for x, name in zip(inputs, self._feed_names):
feed_dict[name] = x.value().get_tensor()
else:
for x, name in zip(inputs, self._feed_names):
feed_dict[name] = x
return feed_dict
@switch_to_static_graph
def _run(self, feed):
return self._exe.run(
self._compiled_program, feed=feed, fetch_list=self._fetch_names
)
def __call__(self, inputs):
with scope_guard(self._scope):
if self._compiled_program is None:
self._compile()
return self._run(self._build_feed(inputs))
@switch_to_static_graph
def save_inference_model(self, path, feed=None, fetch=None, **kwargs):
"""
Save the TracedLayer to a model for inference. The saved
inference model can be loaded by C++ inference APIs.
``path`` is the prefix of saved objects, and the saved translated program file
suffix is ``.pdmodel`` , the saved persistable variables file suffix is ``.pdiparams`` .
Args:
path(str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
feed (list[int], optional): the input variable indices of the saved
inference model. If None, all input variables of the
TracedLayer object would be the inputs of the saved inference
model. Default None.
fetch (list[int], optional): the output variable indices of the
saved inference model. If None, all output variables of the
TracedLayer object would be the outputs of the saved inference
model. Default None.
kwargs: Supported keys including
- clip_extra(bool): whether to clip extra information for every operator. Defaults to True.
- legacy_format(bool): whether to save program in legacy format. Default to False.
Returns:
None
Examples:
.. code-block:: python:
import numpy as np
import paddle
class ExampleLayer(paddle.nn.Layer):
def __init__(self):
super().__init__()
self._fc = paddle.nn.Linear(3, 10)
def forward(self, input):
return self._fc(input)
save_dirname = './saved_infer_model'
in_np = np.random.random([2, 3]).astype('float32')
in_var = paddle.to_tensor(in_np)
layer = ExampleLayer()
out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])
static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0])
paddle.enable_static()
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
program, feed_vars, fetch_vars = paddle.static.load_inference_model(save_dirname,
exe)
fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars)
print(fetch.shape) # (2, 10)
"""
check_type(
path,
"path",
str,
"paddle.jit.TracedLayer.save_inference_model",
)
check_type(
feed,
"feed",
(type(None), list),
"paddle.jit.TracedLayer.save_inference_model",
)
if isinstance(feed, list):
for f in feed:
check_type(
f,
"each element of feed",
int,
"paddle.jit.TracedLayer.save_inference_model",
)
check_type(
fetch,
"fetch",
(type(None), list),
"paddle.jit.TracedLayer.save_inference_model",
)
if isinstance(fetch, list):
for f in fetch:
check_type(
f,
"each element of fetch",
int,
"paddle.jit.TracedLayer.save_inference_model",
)
clip_extra = kwargs.get('clip_extra', True)
# path check
file_prefix = os.path.basename(path)
if file_prefix == "":
raise ValueError(
"The input path MUST be format of dirname/file_prefix "
"[dirname\\file_prefix in Windows system], but received "
"file_prefix is empty string."
)
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
def get_feed_fetch(all_vars, partial_vars):
if partial_vars is None:
return all_vars
return [all_vars[idx] for idx in partial_vars]
with scope_guard(self._scope):
feeded_var_names = get_feed_fetch(self._feed_names, feed)
target_var_names = get_feed_fetch(self._fetch_names, fetch)
target_vars = []
for name in target_var_names:
target_var = self._program.global_block().vars.get(name, None)
assert target_var is not None, f"{name} cannot be found"
target_vars.append(target_var)
model_filename = file_prefix + INFER_MODEL_SUFFIX
params_filename = file_prefix + INFER_PARAMS_SUFFIX
legacy_format = kwargs.get('legacy_format', False)
save_inference_model(
dirname=dirname,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
executor=self._exe,
main_program=self._program.clone(),
model_filename=model_filename,
params_filename=params_filename,
clip_extra=clip_extra,
legacy_format=legacy_format,
)
|
[
"noreply@github.com"
] |
Shixiaowei02.noreply@github.com
|
69c495d55d1cfb23bfd19375a1386a947e79ae42
|
f889bc01147869459c0a516382e7b95221295a7b
|
/swagger_client/models/sales_data_shipping_assignment_interface.py
|
5daf5091d5f3faf63957de7c1d25ad80c4a584a7
|
[] |
no_license
|
wildatheart/magento2-api-client
|
249a86f5c0289743f8df5b0324ccabd76f326512
|
e6a707f85b37c6c3e4ef3ff78507a7deb8f71427
|
refs/heads/master
| 2021-07-14T16:01:17.644472
| 2017-10-18T13:33:08
| 2017-10-18T13:33:08
| 107,412,121
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,068
|
py
|
# coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SalesDataShippingAssignmentInterface(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'shipping': 'SalesDataShippingInterface',
'items': 'list[SalesDataOrderItemInterface]',
'stock_id': 'int',
'extension_attributes': 'SalesDataShippingAssignmentExtensionInterface'
}
attribute_map = {
'shipping': 'shipping',
'items': 'items',
'stock_id': 'stock_id',
'extension_attributes': 'extension_attributes'
}
def __init__(self, shipping=None, items=None, stock_id=None, extension_attributes=None):
"""
SalesDataShippingAssignmentInterface - a model defined in Swagger
"""
self._shipping = None
self._items = None
self._stock_id = None
self._extension_attributes = None
self.shipping = shipping
self.items = items
if stock_id is not None:
self.stock_id = stock_id
if extension_attributes is not None:
self.extension_attributes = extension_attributes
@property
def shipping(self):
"""
Gets the shipping of this SalesDataShippingAssignmentInterface.
:return: The shipping of this SalesDataShippingAssignmentInterface.
:rtype: SalesDataShippingInterface
"""
return self._shipping
@shipping.setter
def shipping(self, shipping):
"""
Sets the shipping of this SalesDataShippingAssignmentInterface.
:param shipping: The shipping of this SalesDataShippingAssignmentInterface.
:type: SalesDataShippingInterface
"""
if shipping is None:
raise ValueError("Invalid value for `shipping`, must not be `None`")
self._shipping = shipping
@property
def items(self):
"""
Gets the items of this SalesDataShippingAssignmentInterface.
Order items of shipping assignment
:return: The items of this SalesDataShippingAssignmentInterface.
:rtype: list[SalesDataOrderItemInterface]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this SalesDataShippingAssignmentInterface.
Order items of shipping assignment
:param items: The items of this SalesDataShippingAssignmentInterface.
:type: list[SalesDataOrderItemInterface]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def stock_id(self):
"""
Gets the stock_id of this SalesDataShippingAssignmentInterface.
Stock id
:return: The stock_id of this SalesDataShippingAssignmentInterface.
:rtype: int
"""
return self._stock_id
@stock_id.setter
def stock_id(self, stock_id):
"""
Sets the stock_id of this SalesDataShippingAssignmentInterface.
Stock id
:param stock_id: The stock_id of this SalesDataShippingAssignmentInterface.
:type: int
"""
self._stock_id = stock_id
@property
def extension_attributes(self):
"""
Gets the extension_attributes of this SalesDataShippingAssignmentInterface.
:return: The extension_attributes of this SalesDataShippingAssignmentInterface.
:rtype: SalesDataShippingAssignmentExtensionInterface
"""
return self._extension_attributes
@extension_attributes.setter
def extension_attributes(self, extension_attributes):
"""
Sets the extension_attributes of this SalesDataShippingAssignmentInterface.
:param extension_attributes: The extension_attributes of this SalesDataShippingAssignmentInterface.
:type: SalesDataShippingAssignmentExtensionInterface
"""
self._extension_attributes = extension_attributes
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SalesDataShippingAssignmentInterface):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"sander@wildatheart.eu"
] |
sander@wildatheart.eu
|
5070de3b4359c2c25dc9e9e0abd3ef0f9eacc054
|
9bcef84f5eb91146a518915836f8776196ba3b90
|
/Python/add.py
|
fcd7fd1516400913ae974fc7d7ffeee0e547a5bf
|
[] |
no_license
|
baltah666/Netbox
|
4d6dfa3c257abff3798d7949e5f891c4f57dac26
|
05a89fe28ed81cc2f83b1efcbe9e58bf0370d7d3
|
refs/heads/master
| 2023-07-30T00:03:14.655859
| 2021-09-15T07:52:41
| 2021-09-15T07:52:41
| 404,419,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
import pynetbox
import time
def adddev(dev):
nb = pynetbox.api(url='http://192.168.174.149:8000/', token='0123456789abcdef0123456789abcdef01234567')
result = nb.dcim.devices.create(
name=dev,
device_type=5,
device_role=2,
site=3,
)
print(result)
file1 = open ('/home/gns3/Netbox/Python/hosts.txt', 'r')
Lines = file1.readlines()
count = 0
for line in Lines:
count += 1
time.sleep(0.5)
dev = line
adddev(dev)
|
[
"baltah666@gmail.com"
] |
baltah666@gmail.com
|
aa894261b14b1133e1571c8269bbbbc56f3b8faf
|
7f26f01e3f47c40e6513fd3143bae2f0b4ec68bc
|
/kaggle-outbrain/readRDS.py
|
203726f090e924048ed7e9301d418408abd6bcdd
|
[] |
no_license
|
maidousj/exptmp
|
c457b4e8c4b429067b66b04f48e72ddddebc325f
|
14d677a06ed441a6204ef98b6e260ecc939a7433
|
refs/heads/master
| 2021-01-01T04:46:41.568895
| 2017-12-06T03:08:47
| 2017-12-06T03:08:47
| 97,240,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
import rpy2.robjects as robjects
from rpy2.robjects import pandas2ri
import ipdb
fileName = '/data/rds/baseline_1/test/000.rds'
#fileName = '/data/rds/clicks.rds'
pandas2ri.activate()
readRDS = robjects.r['readRDS']
df = readRDS(fileName)
df = pandas2ri.ri2py(df)
ipdb.set_trace()
print df[1]
print df[2][0]
print df[2][5]
print df[2][6]
#for x in df:
# ipdb.set_trace(context=5)
# print x
# do something with the dataframe
|
[
"maidousj@163.com"
] |
maidousj@163.com
|
b9c29748d3516407cd79dbf12214d8ebc0ef51ab
|
450618586b23c6e5da154a4e3c123fb1b73ecf84
|
/slp_main.py
|
ab158919aeaedc60aa75d8bd3f70f715a09c602b
|
[
"MIT"
] |
permissive
|
alivcor/neuralnets
|
41e7f8c2b367ddf234e82d22cf87630cfb747df7
|
a9665301eb66977c18b65f06ecc8e8507150a44b
|
refs/heads/master
| 2021-01-12T02:21:28.157853
| 2017-01-10T06:44:42
| 2017-01-10T06:44:42
| 78,505,663
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,548
|
py
|
import neurolab as nl
import numpy as np
import pylab as pl
import csv
import math
from sklearn.metrics import classification_report
import sys
adata = []
with open('dataset_full.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
adata.append(row)
xdata_zeros = []
ydata_zeros = []
xdata_ones = []
ydata_ones = []
for i in range(0, len(adata)):
temp = []
yval = int(adata[i][len(adata[i]) - 1])
if yval ==1:
for j in range(0, len(adata[i]) - 1):
temp.append(int(adata[i][j]))
xdata_ones.append(temp)
ydata_ones.append([yval])
else:
for j in range(0, len(adata[i]) - 1):
temp.append(int(adata[i][j]))
xdata_zeros.append(temp)
ydata_zeros.append([yval])
nzeros = len(xdata_zeros)
nones = len(xdata_ones)
# print (nzeros)
# print (nones)
zeros_div = int(math.floor(nzeros * 0.75 - 1))
ones_div = int(math.floor(nones * 0.75 - 1))
train_inputs_m = np.array(xdata_zeros[0:zeros_div] + xdata_ones[0:ones_div])
train_outputs = np.array(ydata_zeros[0:zeros_div] + ydata_ones[0:ones_div])
test_inputs_m = np.array(xdata_zeros[zeros_div:nzeros]+xdata_ones[ones_div:nones])
test_outputs = ydata_zeros[zeros_div:nzeros]+ydata_ones[ones_div:nones]
train_inputs = train_inputs_m[:, [5, 6, 7, 8, 9, 10]]
test_inputs = test_inputs_m[:, [5, 6, 7, 8, 9, 10]]
np.random.seed(0)
indices = np.arange(train_inputs.shape[0])
np.random.shuffle(indices)
print train_inputs.shape
net = nl.net.newp([[-7, 7]]*6, 1)
print train_outputs.shape
print net.co
print train_outputs
# train with delta rule
# see net.trainf
error = net.train(train_inputs[indices], train_outputs[indices], epochs=100, show=10, lr=0.1)
pl.plot(error)
pl.xlabel('Epoch number')
pl.ylabel('Train error')
pl.grid()
# pl.show()
pl.savefig("slp.png")
out = net.sim(test_inputs)
# print out
predicted_outputs = []
for i in out:
predicted_outputs.append(int(i[0]))
#
print "Training Complete, Test Results Generated for Single Layer Perceptron"
# print predicted_outputs
#
atest_outputs = []
#
for i in test_outputs:
atest_outputs.append(i[0])
target_names = ['0 - Non Defaulter','1 - Defaulter']
print "Classification Report for Single Layer Perceptron"
cfreport = classification_report(atest_outputs, predicted_outputs, target_names=target_names)
report_file = open("Classification_Report_slp.txt", 'a')
report_file.write("Classification Report For Single Layer Perceptron\n")
report_file.write(cfreport)
report_file.write("\n\n\n")
report_file.close()
|
[
"noreply@github.com"
] |
alivcor.noreply@github.com
|
38b04e78816a720b08b4988d782425587782a13a
|
a59d55ecf9054d0750168d3ca9cc62a0f2b28b95
|
/platform/gsutil/third_party/boto/tests/unit/ec2/test_connection.py
|
deeb673d1a434359dce92f431f9f7a4aae166743
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bopopescu/google-cloud-sdk
|
bb2746ff020c87271398196f21a646d9d8689348
|
b34e6a18f1e89673508166acce816111c3421e4b
|
refs/heads/master
| 2022-11-26T07:33:32.877033
| 2014-06-29T20:43:23
| 2014-06-29T20:43:23
| 282,306,367
| 0
| 0
|
NOASSERTION
| 2020-07-24T20:04:47
| 2020-07-24T20:04:46
| null |
UTF-8
|
Python
| false
| false
| 68,764
|
py
|
#!/usr/bin/env python
import httplib
from datetime import datetime, timedelta
from mock import MagicMock, Mock
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
import boto.ec2
from boto.regioninfo import RegionInfo
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from boto.ec2.connection import EC2Connection
from boto.ec2.snapshot import Snapshot
from boto.ec2.reservedinstance import ReservedInstancesConfiguration
class TestEC2ConnectionBase(AWSMockServiceTestCase):
connection_class = EC2Connection
def setUp(self):
super(TestEC2ConnectionBase, self).setUp()
self.ec2 = self.service_connection
class TestReservedInstanceOfferings(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeReservedInstancesOfferingsResponse>
<requestId>d3253568-edcf-4897-9a3d-fb28e0b3fa38</requestId>
<reservedInstancesOfferingsSet>
<item>
<reservedInstancesOfferingId>2964d1bf71d8</reservedInstancesOfferingId>
<instanceType>c1.medium</instanceType>
<availabilityZone>us-east-1c</availabilityZone>
<duration>94608000</duration>
<fixedPrice>775.0</fixedPrice>
<usagePrice>0.0</usagePrice>
<productDescription>product description</productDescription>
<instanceTenancy>default</instanceTenancy>
<currencyCode>USD</currencyCode>
<offeringType>Heavy Utilization</offeringType>
<recurringCharges>
<item>
<frequency>Hourly</frequency>
<amount>0.095</amount>
</item>
</recurringCharges>
<marketplace>false</marketplace>
<pricingDetailsSet>
<item>
<price>0.045</price>
<count>1</count>
</item>
</pricingDetailsSet>
</item>
<item>
<reservedInstancesOfferingId>2dce26e46889</reservedInstancesOfferingId>
<instanceType>c1.medium</instanceType>
<availabilityZone>us-east-1c</availabilityZone>
<duration>94608000</duration>
<fixedPrice>775.0</fixedPrice>
<usagePrice>0.0</usagePrice>
<productDescription>Linux/UNIX</productDescription>
<instanceTenancy>default</instanceTenancy>
<currencyCode>USD</currencyCode>
<offeringType>Heavy Utilization</offeringType>
<recurringCharges>
<item>
<frequency>Hourly</frequency>
<amount>0.035</amount>
</item>
</recurringCharges>
<marketplace>false</marketplace>
<pricingDetailsSet/>
</item>
</reservedInstancesOfferingsSet>
<nextToken>next_token</nextToken>
</DescribeReservedInstancesOfferingsResponse>
"""
def test_get_reserved_instance_offerings(self):
self.set_http_response(status_code=200)
response = self.ec2.get_all_reserved_instances_offerings()
self.assertEqual(len(response), 2)
instance = response[0]
self.assertEqual(instance.id, '2964d1bf71d8')
self.assertEqual(instance.instance_type, 'c1.medium')
self.assertEqual(instance.availability_zone, 'us-east-1c')
self.assertEqual(instance.duration, 94608000)
self.assertEqual(instance.fixed_price, '775.0')
self.assertEqual(instance.usage_price, '0.0')
self.assertEqual(instance.description, 'product description')
self.assertEqual(instance.instance_tenancy, 'default')
self.assertEqual(instance.currency_code, 'USD')
self.assertEqual(instance.offering_type, 'Heavy Utilization')
self.assertEqual(len(instance.recurring_charges), 1)
self.assertEqual(instance.recurring_charges[0].frequency, 'Hourly')
self.assertEqual(instance.recurring_charges[0].amount, '0.095')
self.assertEqual(len(instance.pricing_details), 1)
self.assertEqual(instance.pricing_details[0].price, '0.045')
self.assertEqual(instance.pricing_details[0].count, '1')
def test_get_reserved_instance_offerings_params(self):
self.set_http_response(status_code=200)
self.ec2.get_all_reserved_instances_offerings(
reserved_instances_offering_ids=['id1','id2'],
instance_type='t1.micro',
availability_zone='us-east-1',
product_description='description',
instance_tenancy='dedicated',
offering_type='offering_type',
include_marketplace=False,
min_duration=100,
max_duration=1000,
max_instance_count=1,
next_token='next_token',
max_results=10
)
self.assert_request_parameters({
'Action': 'DescribeReservedInstancesOfferings',
'ReservedInstancesOfferingId.1': 'id1',
'ReservedInstancesOfferingId.2': 'id2',
'InstanceType': 't1.micro',
'AvailabilityZone': 'us-east-1',
'ProductDescription': 'description',
'InstanceTenancy': 'dedicated',
'OfferingType': 'offering_type',
'IncludeMarketplace': 'false',
'MinDuration': '100',
'MaxDuration': '1000',
'MaxInstanceCount': '1',
'NextToken': 'next_token',
'MaxResults': '10',},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestPurchaseReservedInstanceOffering(TestEC2ConnectionBase):
def default_body(self):
return """<PurchaseReservedInstancesOffering />"""
def test_serialized_api_args(self):
self.set_http_response(status_code=200)
response = self.ec2.purchase_reserved_instance_offering(
'offering_id', 1, (100.0, 'USD'))
self.assert_request_parameters({
'Action': 'PurchaseReservedInstancesOffering',
'InstanceCount': 1,
'ReservedInstancesOfferingId': 'offering_id',
'LimitPrice.Amount': '100.0',
'LimitPrice.CurrencyCode': 'USD',},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
class TestCreateImage(TestEC2ConnectionBase):
def default_body(self):
return """<CreateImageResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<imageId>ami-4fa54026</imageId>
</CreateImageResponse>"""
def test_minimal(self):
self.set_http_response(status_code=200)
response = self.ec2.create_image(
'instance_id', 'name')
self.assert_request_parameters({
'Action': 'CreateImage',
'InstanceId': 'instance_id',
'Name': 'name'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_block_device_mapping(self):
self.set_http_response(status_code=200)
bdm = BlockDeviceMapping()
bdm['test'] = BlockDeviceType()
response = self.ec2.create_image(
'instance_id', 'name', block_device_mapping=bdm)
self.assert_request_parameters({
'Action': 'CreateImage',
'InstanceId': 'instance_id',
'Name': 'name',
'BlockDeviceMapping.1.DeviceName': 'test',
'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'false'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
class TestCancelReservedInstancesListing(TestEC2ConnectionBase):
def default_body(self):
return """
<CancelReservedInstancesListingResponse>
<requestId>request_id</requestId>
<reservedInstancesListingsSet>
<item>
<reservedInstancesListingId>listing_id</reservedInstancesListingId>
<reservedInstancesId>instance_id</reservedInstancesId>
<createDate>2012-07-12T16:55:28.000Z</createDate>
<updateDate>2012-07-12T16:55:28.000Z</updateDate>
<status>cancelled</status>
<statusMessage>CANCELLED</statusMessage>
<instanceCounts>
<item>
<state>Available</state>
<instanceCount>0</instanceCount>
</item>
<item>
<state>Sold</state>
<instanceCount>0</instanceCount>
</item>
<item>
<state>Cancelled</state>
<instanceCount>1</instanceCount>
</item>
<item>
<state>Pending</state>
<instanceCount>0</instanceCount>
</item>
</instanceCounts>
<priceSchedules>
<item>
<term>5</term>
<price>166.64</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>4</term>
<price>133.32</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>3</term>
<price>99.99</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>2</term>
<price>66.66</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>1</term>
<price>33.33</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
</priceSchedules>
<tagSet/>
<clientToken>XqJIt1342112125076</clientToken>
</item>
</reservedInstancesListingsSet>
</CancelReservedInstancesListingResponse>
"""
def test_reserved_instances_listing(self):
self.set_http_response(status_code=200)
response = self.ec2.cancel_reserved_instances_listing()
self.assertEqual(len(response), 1)
cancellation = response[0]
self.assertEqual(cancellation.status, 'cancelled')
self.assertEqual(cancellation.status_message, 'CANCELLED')
self.assertEqual(len(cancellation.instance_counts), 4)
first = cancellation.instance_counts[0]
self.assertEqual(first.state, 'Available')
self.assertEqual(first.instance_count, 0)
self.assertEqual(len(cancellation.price_schedules), 5)
schedule = cancellation.price_schedules[0]
self.assertEqual(schedule.term, 5)
self.assertEqual(schedule.price, '166.64')
self.assertEqual(schedule.currency_code, 'USD')
self.assertEqual(schedule.active, False)
class TestCreateReservedInstancesListing(TestEC2ConnectionBase):
def default_body(self):
return """
<CreateReservedInstancesListingResponse>
<requestId>request_id</requestId>
<reservedInstancesListingsSet>
<item>
<reservedInstancesListingId>listing_id</reservedInstancesListingId>
<reservedInstancesId>instance_id</reservedInstancesId>
<createDate>2012-07-17T17:11:09.449Z</createDate>
<updateDate>2012-07-17T17:11:09.468Z</updateDate>
<status>active</status>
<statusMessage>ACTIVE</statusMessage>
<instanceCounts>
<item>
<state>Available</state>
<instanceCount>1</instanceCount>
</item>
<item>
<state>Sold</state>
<instanceCount>0</instanceCount>
</item>
<item>
<state>Cancelled</state>
<instanceCount>0</instanceCount>
</item>
<item>
<state>Pending</state>
<instanceCount>0</instanceCount>
</item>
</instanceCounts>
<priceSchedules>
<item>
<term>11</term>
<price>2.5</price>
<currencyCode>USD</currencyCode>
<active>true</active>
</item>
<item>
<term>10</term>
<price>2.5</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>9</term>
<price>2.5</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>8</term>
<price>2.0</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>7</term>
<price>2.0</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>6</term>
<price>2.0</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>5</term>
<price>1.5</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>4</term>
<price>1.5</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>3</term>
<price>0.7</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>2</term>
<price>0.7</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
<item>
<term>1</term>
<price>0.1</price>
<currencyCode>USD</currencyCode>
<active>false</active>
</item>
</priceSchedules>
<tagSet/>
<clientToken>myIdempToken1</clientToken>
</item>
</reservedInstancesListingsSet>
</CreateReservedInstancesListingResponse>
"""
def test_create_reserved_instances_listing(self):
self.set_http_response(status_code=200)
response = self.ec2.create_reserved_instances_listing(
'instance_id', 1, [('2.5', 11), ('2.0', 8)], 'client_token')
self.assertEqual(len(response), 1)
cancellation = response[0]
self.assertEqual(cancellation.status, 'active')
self.assertEqual(cancellation.status_message, 'ACTIVE')
self.assertEqual(len(cancellation.instance_counts), 4)
first = cancellation.instance_counts[0]
self.assertEqual(first.state, 'Available')
self.assertEqual(first.instance_count, 1)
self.assertEqual(len(cancellation.price_schedules), 11)
schedule = cancellation.price_schedules[0]
self.assertEqual(schedule.term, 11)
self.assertEqual(schedule.price, '2.5')
self.assertEqual(schedule.currency_code, 'USD')
self.assertEqual(schedule.active, True)
self.assert_request_parameters({
'Action': 'CreateReservedInstancesListing',
'ReservedInstancesId': 'instance_id',
'InstanceCount': '1',
'ClientToken': 'client_token',
'PriceSchedules.0.Price': '2.5',
'PriceSchedules.0.Term': '11',
'PriceSchedules.1.Price': '2.0',
'PriceSchedules.1.Term': '8',},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
class TestDescribeSpotInstanceRequests(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeSpotInstanceRequestsResponse>
<requestId>requestid</requestId>
<spotInstanceRequestSet>
<item>
<spotInstanceRequestId>sir-id</spotInstanceRequestId>
<spotPrice>0.003000</spotPrice>
<type>one-time</type>
<state>active</state>
<status>
<code>fulfilled</code>
<updateTime>2012-10-19T18:09:26.000Z</updateTime>
<message>Your Spot request is fulfilled.</message>
</status>
<launchGroup>mylaunchgroup</launchGroup>
<launchSpecification>
<imageId>ami-id</imageId>
<keyName>mykeypair</keyName>
<groupSet>
<item>
<groupId>sg-id</groupId>
<groupName>groupname</groupName>
</item>
</groupSet>
<instanceType>t1.micro</instanceType>
<monitoring>
<enabled>false</enabled>
</monitoring>
</launchSpecification>
<instanceId>i-id</instanceId>
<createTime>2012-10-19T18:07:05.000Z</createTime>
<productDescription>Linux/UNIX</productDescription>
<launchedAvailabilityZone>us-east-1d</launchedAvailabilityZone>
</item>
</spotInstanceRequestSet>
</DescribeSpotInstanceRequestsResponse>
"""
def test_describe_spot_instance_requets(self):
self.set_http_response(status_code=200)
response = self.ec2.get_all_spot_instance_requests()
self.assertEqual(len(response), 1)
spotrequest = response[0]
self.assertEqual(spotrequest.id, 'sir-id')
self.assertEqual(spotrequest.price, 0.003)
self.assertEqual(spotrequest.type, 'one-time')
self.assertEqual(spotrequest.state, 'active')
self.assertEqual(spotrequest.fault, None)
self.assertEqual(spotrequest.valid_from, None)
self.assertEqual(spotrequest.valid_until, None)
self.assertEqual(spotrequest.launch_group, 'mylaunchgroup')
self.assertEqual(spotrequest.launched_availability_zone, 'us-east-1d')
self.assertEqual(spotrequest.product_description, 'Linux/UNIX')
self.assertEqual(spotrequest.availability_zone_group, None)
self.assertEqual(spotrequest.create_time,
'2012-10-19T18:07:05.000Z')
self.assertEqual(spotrequest.instance_id, 'i-id')
launch_spec = spotrequest.launch_specification
self.assertEqual(launch_spec.key_name, 'mykeypair')
self.assertEqual(launch_spec.instance_type, 't1.micro')
self.assertEqual(launch_spec.image_id, 'ami-id')
self.assertEqual(launch_spec.placement, None)
self.assertEqual(launch_spec.kernel, None)
self.assertEqual(launch_spec.ramdisk, None)
self.assertEqual(launch_spec.monitored, False)
self.assertEqual(launch_spec.subnet_id, None)
self.assertEqual(launch_spec.block_device_mapping, None)
self.assertEqual(launch_spec.instance_profile, None)
self.assertEqual(launch_spec.ebs_optimized, False)
status = spotrequest.status
self.assertEqual(status.code, 'fulfilled')
self.assertEqual(status.update_time, '2012-10-19T18:09:26.000Z')
self.assertEqual(status.message, 'Your Spot request is fulfilled.')
class TestCopySnapshot(TestEC2ConnectionBase):
def default_body(self):
return """
<CopySnapshotResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
<requestId>request_id</requestId>
<snapshotId>snap-copied-id</snapshotId>
</CopySnapshotResponse>
"""
def test_copy_snapshot(self):
self.set_http_response(status_code=200)
snapshot_id = self.ec2.copy_snapshot('us-west-2', 'snap-id',
'description')
self.assertEqual(snapshot_id, 'snap-copied-id')
self.assert_request_parameters({
'Action': 'CopySnapshot',
'Description': 'description',
'SourceRegion': 'us-west-2',
'SourceSnapshotId': 'snap-id'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
class TestCopyImage(TestEC2ConnectionBase):
def default_body(self):
return """
<CopyImageResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>request_id</requestId>
<imageId>ami-copied-id</imageId>
</CopyImageResponse>
"""
def test_copy_image(self):
self.set_http_response(status_code=200)
copied_ami = self.ec2.copy_image('us-west-2', 'ami-id',
'name', 'description', 'client-token')
self.assertEqual(copied_ami.image_id, 'ami-copied-id')
self.assert_request_parameters({
'Action': 'CopyImage',
'Description': 'description',
'Name': 'name',
'SourceRegion': 'us-west-2',
'SourceImageId': 'ami-id',
'ClientToken': 'client-token'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_copy_image_without_name(self):
self.set_http_response(status_code=200)
copied_ami = self.ec2.copy_image('us-west-2', 'ami-id',
description='description',
client_token='client-token')
self.assertEqual(copied_ami.image_id, 'ami-copied-id')
self.assert_request_parameters({
'Action': 'CopyImage',
'Description': 'description',
'SourceRegion': 'us-west-2',
'SourceImageId': 'ami-id',
'ClientToken': 'client-token'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
class TestAccountAttributes(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeAccountAttributesResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
<requestId>6d042e8a-4bc3-43e8-8265-3cbc54753f14</requestId>
<accountAttributeSet>
<item>
<attributeName>vpc-max-security-groups-per-interface</attributeName>
<attributeValueSet>
<item>
<attributeValue>5</attributeValue>
</item>
</attributeValueSet>
</item>
<item>
<attributeName>max-instances</attributeName>
<attributeValueSet>
<item>
<attributeValue>50</attributeValue>
</item>
</attributeValueSet>
</item>
<item>
<attributeName>supported-platforms</attributeName>
<attributeValueSet>
<item>
<attributeValue>EC2</attributeValue>
</item>
<item>
<attributeValue>VPC</attributeValue>
</item>
</attributeValueSet>
</item>
<item>
<attributeName>default-vpc</attributeName>
<attributeValueSet>
<item>
<attributeValue>none</attributeValue>
</item>
</attributeValueSet>
</item>
</accountAttributeSet>
</DescribeAccountAttributesResponse>
"""
def test_describe_account_attributes(self):
self.set_http_response(status_code=200)
parsed = self.ec2.describe_account_attributes()
self.assertEqual(len(parsed), 4)
self.assertEqual(parsed[0].attribute_name,
'vpc-max-security-groups-per-interface')
self.assertEqual(parsed[0].attribute_values,
['5'])
self.assertEqual(parsed[-1].attribute_name,
'default-vpc')
self.assertEqual(parsed[-1].attribute_values,
['none'])
class TestDescribeVPCAttribute(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
<requestId>request_id</requestId>
<vpcId>vpc-id</vpcId>
<enableDnsHostnames>
<value>false</value>
</enableDnsHostnames>
</DescribeVpcAttributeResponse>
"""
def test_describe_vpc_attribute(self):
self.set_http_response(status_code=200)
parsed = self.ec2.describe_vpc_attribute('vpc-id',
'enableDnsHostnames')
self.assertEqual(parsed.vpc_id, 'vpc-id')
self.assertFalse(parsed.enable_dns_hostnames)
self.assert_request_parameters({
'Action': 'DescribeVpcAttribute',
'VpcId': 'vpc-id',
'Attribute': 'enableDnsHostnames',},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
class TestGetAllNetworkInterfaces(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeNetworkInterfacesResponse xmlns="http://ec2.amazonaws.com/\
doc/2013-06-15/">
<requestId>fc45294c-006b-457b-bab9-012f5b3b0e40</requestId>
<networkInterfaceSet>
<item>
<networkInterfaceId>eni-0f62d866</networkInterfaceId>
<subnetId>subnet-c53c87ac</subnetId>
<vpcId>vpc-cc3c87a5</vpcId>
<availabilityZone>ap-southeast-1b</availabilityZone>
<description/>
<ownerId>053230519467</ownerId>
<requesterManaged>false</requesterManaged>
<status>in-use</status>
<macAddress>02:81:60:cb:27:37</macAddress>
<privateIpAddress>10.0.0.146</privateIpAddress>
<sourceDestCheck>true</sourceDestCheck>
<groupSet>
<item>
<groupId>sg-3f4b5653</groupId>
<groupName>default</groupName>
</item>
</groupSet>
<attachment>
<attachmentId>eni-attach-6537fc0c</attachmentId>
<instanceId>i-22197876</instanceId>
<instanceOwnerId>053230519467</instanceOwnerId>
<deviceIndex>5</deviceIndex>
<status>attached</status>
<attachTime>2012-07-01T21:45:27.000Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</attachment>
<tagSet/>
<privateIpAddressesSet>
<item>
<privateIpAddress>10.0.0.146</privateIpAddress>
<primary>true</primary>
</item>
<item>
<privateIpAddress>10.0.0.148</privateIpAddress>
<primary>false</primary>
</item>
<item>
<privateIpAddress>10.0.0.150</privateIpAddress>
<primary>false</primary>
</item>
</privateIpAddressesSet>
</item>
</networkInterfaceSet>
</DescribeNetworkInterfacesResponse>"""
def test_attachment_has_device_index(self):
self.set_http_response(status_code=200)
parsed = self.ec2.get_all_network_interfaces()
self.assertEqual(5, parsed[0].attachment.device_index)
class TestGetAllImages(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeImagesResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
<requestId>e32375e8-4ac3-4099-a8bf-3ec902b9023e</requestId>
<imagesSet>
<item>
<imageId>ami-abcd1234</imageId>
<imageLocation>111111111111/windows2008r2-hvm-i386-20130702</imageLocation>
<imageState>available</imageState>
<imageOwnerId>111111111111</imageOwnerId>
<isPublic>false</isPublic>
<architecture>i386</architecture>
<imageType>machine</imageType>
<platform>windows</platform>
<viridianEnabled>true</viridianEnabled>
<name>Windows Test</name>
<description>Windows Test Description</description>
<billingProducts>
<item>
<billingProduct>bp-6ba54002</billingProduct>
</item>
</billingProducts>
<rootDeviceType>ebs</rootDeviceType>
<rootDeviceName>/dev/sda1</rootDeviceName>
<blockDeviceMapping>
<item>
<deviceName>/dev/sda1</deviceName>
<ebs>
<snapshotId>snap-abcd1234</snapshotId>
<volumeSize>30</volumeSize>
<deleteOnTermination>true</deleteOnTermination>
<volumeType>standard</volumeType>
</ebs>
</item>
<item>
<deviceName>xvdb</deviceName>
<virtualName>ephemeral0</virtualName>
</item>
<item>
<deviceName>xvdc</deviceName>
<virtualName>ephemeral1</virtualName>
</item>
<item>
<deviceName>xvdd</deviceName>
<virtualName>ephemeral2</virtualName>
</item>
<item>
<deviceName>xvde</deviceName>
<virtualName>ephemeral3</virtualName>
</item>
</blockDeviceMapping>
<virtualizationType>hvm</virtualizationType>
<hypervisor>xen</hypervisor>
</item>
</imagesSet>
</DescribeImagesResponse>"""
def test_get_all_images(self):
self.set_http_response(status_code=200)
parsed = self.ec2.get_all_images()
self.assertEquals(1, len(parsed))
self.assertEquals("ami-abcd1234", parsed[0].id)
self.assertEquals("111111111111/windows2008r2-hvm-i386-20130702", parsed[0].location)
self.assertEquals("available", parsed[0].state)
self.assertEquals("111111111111", parsed[0].ownerId)
self.assertEquals("111111111111", parsed[0].owner_id)
self.assertEquals(False, parsed[0].is_public)
self.assertEquals("i386", parsed[0].architecture)
self.assertEquals("machine", parsed[0].type)
self.assertEquals(None, parsed[0].kernel_id)
self.assertEquals(None, parsed[0].ramdisk_id)
self.assertEquals(None, parsed[0].owner_alias)
self.assertEquals("windows", parsed[0].platform)
self.assertEquals("Windows Test", parsed[0].name)
self.assertEquals("Windows Test Description", parsed[0].description)
self.assertEquals("ebs", parsed[0].root_device_type)
self.assertEquals("/dev/sda1", parsed[0].root_device_name)
self.assertEquals("hvm", parsed[0].virtualization_type)
self.assertEquals("xen", parsed[0].hypervisor)
self.assertEquals(None, parsed[0].instance_lifecycle)
# 1 billing product parsed into a list
self.assertEquals(1, len(parsed[0].billing_products))
self.assertEquals("bp-6ba54002", parsed[0].billing_products[0])
# Just verify length, there is already a block_device_mapping test
self.assertEquals(5, len(parsed[0].block_device_mapping))
# TODO: No tests for product codes?
class TestModifyInterfaceAttribute(TestEC2ConnectionBase):
def default_body(self):
return """
<ModifyNetworkInterfaceAttributeResponse \
xmlns="http://ec2.amazonaws.com/doc/2013-06-15/">
<requestId>657a4623-5620-4232-b03b-427e852d71cf</requestId>
<return>true</return>
</ModifyNetworkInterfaceAttributeResponse>
"""
def test_modify_description(self):
self.set_http_response(status_code=200)
self.ec2.modify_network_interface_attribute('id', 'description', 'foo')
self.assert_request_parameters({
'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': 'id',
'Description.Value': 'foo'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_modify_source_dest_check_bool(self):
self.set_http_response(status_code=200)
self.ec2.modify_network_interface_attribute('id', 'sourceDestCheck',
True)
self.assert_request_parameters({
'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': 'id',
'SourceDestCheck.Value': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_modify_source_dest_check_str(self):
self.set_http_response(status_code=200)
self.ec2.modify_network_interface_attribute('id', 'sourceDestCheck',
'true')
self.assert_request_parameters({
'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': 'id',
'SourceDestCheck.Value': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_modify_source_dest_check_invalid(self):
self.set_http_response(status_code=200)
with self.assertRaises(ValueError):
self.ec2.modify_network_interface_attribute('id',
'sourceDestCheck',
123)
def test_modify_delete_on_termination_str(self):
self.set_http_response(status_code=200)
self.ec2.modify_network_interface_attribute('id',
'deleteOnTermination',
True, attachment_id='bar')
self.assert_request_parameters({
'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': 'id',
'Attachment.AttachmentId': 'bar',
'Attachment.DeleteOnTermination': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_modify_delete_on_termination_bool(self):
self.set_http_response(status_code=200)
self.ec2.modify_network_interface_attribute('id',
'deleteOnTermination',
'false',
attachment_id='bar')
self.assert_request_parameters({
'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': 'id',
'Attachment.AttachmentId': 'bar',
'Attachment.DeleteOnTermination': 'false'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_modify_delete_on_termination_invalid(self):
self.set_http_response(status_code=200)
with self.assertRaises(ValueError):
self.ec2.modify_network_interface_attribute('id',
'deleteOnTermination',
123,
attachment_id='bar')
def test_modify_group_set_list(self):
self.set_http_response(status_code=200)
self.ec2.modify_network_interface_attribute('id', 'groupSet',
['sg-1', 'sg-2'])
self.assert_request_parameters({
'Action': 'ModifyNetworkInterfaceAttribute',
'NetworkInterfaceId': 'id',
'SecurityGroupId.1': 'sg-1',
'SecurityGroupId.2': 'sg-2'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
def test_modify_group_set_invalid(self):
self.set_http_response(status_code=200)
with self.assertRaisesRegexp(TypeError, 'iterable'):
self.ec2.modify_network_interface_attribute('id', 'groupSet',
False)
def test_modify_attr_invalid(self):
self.set_http_response(status_code=200)
with self.assertRaisesRegexp(ValueError, 'Unknown attribute'):
self.ec2.modify_network_interface_attribute('id', 'invalid', 0)
class TestConnectToRegion(unittest.TestCase):
def setUp(self):
self.https_connection = Mock(spec=httplib.HTTPSConnection)
self.https_connection_factory = (
Mock(return_value=self.https_connection), ())
def test_aws_region(self):
region = boto.ec2.RegionData.keys()[0]
self.ec2 = boto.ec2.connect_to_region(region,
https_connection_factory=self.https_connection_factory,
aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key'
)
self.assertEqual(boto.ec2.RegionData[region], self.ec2.host)
def test_non_aws_region(self):
self.ec2 = boto.ec2.connect_to_region('foo',
https_connection_factory=self.https_connection_factory,
aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key',
region = RegionInfo(name='foo', endpoint='https://foo.com/bar')
)
self.assertEqual('https://foo.com/bar', self.ec2.host)
def test_missing_region(self):
self.ec2 = boto.ec2.connect_to_region('foo',
https_connection_factory=self.https_connection_factory,
aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key'
)
self.assertEqual(None, self.ec2)
class TestTrimSnapshots(TestEC2ConnectionBase):
"""
Test snapshot trimming functionality by ensuring that expected calls
are made when given a known set of volume snapshots.
"""
def _get_snapshots(self):
"""
Generate a list of fake snapshots with names and dates.
"""
snaps = []
# Generate some dates offset by days, weeks, months.
# This is to validate the various types of snapshot logic handled by
# ``trim_snapshots``.
now = datetime.now()
dates = [
now,
now - timedelta(days=1),
now - timedelta(days=2),
now - timedelta(days=7),
now - timedelta(days=14),
# We want to simulate 30/60/90-day snapshots, but February is
# short (only 28 days), so we decrease the delta by 2 days apiece.
# This prevents the ``delete_snapshot`` code below from being
# called, since they don't fall outside the allowed timeframes
# for the snapshots.
datetime(now.year, now.month, 1) - timedelta(days=28),
datetime(now.year, now.month, 1) - timedelta(days=58),
datetime(now.year, now.month, 1) - timedelta(days=88)
]
for date in dates:
# Create a fake snapshot for each date
snap = Snapshot(self.ec2)
snap.tags['Name'] = 'foo'
# Times are expected to be ISO8601 strings
snap.start_time = date.strftime('%Y-%m-%dT%H:%M:%S.000Z')
snaps.append(snap)
return snaps
def test_trim_defaults(self):
"""
Test trimming snapshots with the default arguments, which should
keep all monthly backups forever. The result of this test should
be that nothing is deleted.
"""
# Setup mocks
orig = {
'get_all_snapshots': self.ec2.get_all_snapshots,
'delete_snapshot': self.ec2.delete_snapshot
}
snaps = self._get_snapshots()
self.ec2.get_all_snapshots = MagicMock(return_value=snaps)
self.ec2.delete_snapshot = MagicMock()
# Call the tested method
self.ec2.trim_snapshots()
# Assertions
self.assertEqual(True, self.ec2.get_all_snapshots.called)
self.assertEqual(False, self.ec2.delete_snapshot.called)
# Restore
self.ec2.get_all_snapshots = orig['get_all_snapshots']
self.ec2.delete_snapshot = orig['delete_snapshot']
def test_trim_months(self):
"""
Test trimming monthly snapshots and ensure that older months
get deleted properly. The result of this test should be that
the two oldest snapshots get deleted.
"""
# Setup mocks
orig = {
'get_all_snapshots': self.ec2.get_all_snapshots,
'delete_snapshot': self.ec2.delete_snapshot
}
snaps = self._get_snapshots()
self.ec2.get_all_snapshots = MagicMock(return_value=snaps)
self.ec2.delete_snapshot = MagicMock()
# Call the tested method
self.ec2.trim_snapshots(monthly_backups=1)
# Assertions
self.assertEqual(True, self.ec2.get_all_snapshots.called)
self.assertEqual(2, self.ec2.delete_snapshot.call_count)
# Restore
self.ec2.get_all_snapshots = orig['get_all_snapshots']
self.ec2.delete_snapshot = orig['delete_snapshot']
class TestModifyReservedInstances(TestEC2ConnectionBase):
def default_body(self):
return """<ModifyReservedInstancesResponse xmlns='http://ec2.amazonaws.com/doc/2013-08-15/'>
<requestId>bef729b6-0731-4489-8881-2258746ae163</requestId>
<reservedInstancesModificationId>rimod-3aae219d-3d63-47a9-a7e9-e764example</reservedInstancesModificationId>
</ModifyReservedInstancesResponse>"""
def test_serialized_api_args(self):
self.set_http_response(status_code=200)
response = self.ec2.modify_reserved_instances(
'a-token-goes-here',
reserved_instance_ids=[
'2567o137-8a55-48d6-82fb-7258506bb497',
],
target_configurations=[
ReservedInstancesConfiguration(
availability_zone='us-west-2c',
platform='EC2-VPC',
instance_count=3
),
]
)
self.assert_request_parameters({
'Action': 'ModifyReservedInstances',
'ClientToken': 'a-token-goes-here',
'ReservedInstancesConfigurationSetItemType.0.AvailabilityZone': 'us-west-2c',
'ReservedInstancesConfigurationSetItemType.0.InstanceCount': 3,
'ReservedInstancesConfigurationSetItemType.0.Platform': 'EC2-VPC',
'ReservedInstancesId.1': '2567o137-8a55-48d6-82fb-7258506bb497'
}, ignore_params_values=[
'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'
])
self.assertEqual(response, 'rimod-3aae219d-3d63-47a9-a7e9-e764example')
class TestDescribeReservedInstancesModifications(TestEC2ConnectionBase):
def default_body(self):
return """<DescribeReservedInstancesModificationsResponse xmlns='http://ec2.amazonaws.com/doc/2013-08-15/'>
<requestId>eb4a6e3c-3689-445c-b536-19e38df35898</requestId>
<reservedInstancesModificationsSet>
<item>
<reservedInstancesModificationId>rimod-49b9433e-fdc7-464a-a6e5-9dabcexample</reservedInstancesModificationId>
<reservedInstancesSet>
<item>
<reservedInstancesId>2567o137-8a55-48d6-82fb-7258506bb497</reservedInstancesId>
</item>
</reservedInstancesSet>
<modificationResultSet>
<item>
<reservedInstancesId>9d5cb137-5d65-4479-b4ac-8c337example</reservedInstancesId>
<targetConfiguration>
<availabilityZone>us-east-1b</availabilityZone>
<platform>EC2-VPC</platform>
<instanceCount>1</instanceCount>
</targetConfiguration>
</item>
</modificationResultSet>
<createDate>2013-09-02T21:20:19.637Z</createDate>
<updateDate>2013-09-02T21:38:24.143Z</updateDate>
<effectiveDate>2013-09-02T21:00:00.000Z</effectiveDate>
<status>fulfilled</status>
<clientToken>token-f5b56c05-09b0-4d17-8d8c-c75d8a67b806</clientToken>
</item>
</reservedInstancesModificationsSet>
</DescribeReservedInstancesModificationsResponse>"""
def test_serialized_api_args(self):
self.set_http_response(status_code=200)
response = self.ec2.describe_reserved_instances_modifications(
reserved_instances_modification_ids=[
'2567o137-8a55-48d6-82fb-7258506bb497'
],
filters={
'status': 'processing',
}
)
self.assert_request_parameters({
'Action': 'DescribeReservedInstancesModifications',
'Filter.1.Name': 'status',
'Filter.1.Value.1': 'processing',
'ReservedInstancesModificationId.1': '2567o137-8a55-48d6-82fb-7258506bb497'
}, ignore_params_values=[
'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'
])
# Make sure the response was parsed correctly.
self.assertEqual(
response[0].modification_id,
'rimod-49b9433e-fdc7-464a-a6e5-9dabcexample'
)
self.assertEqual(
response[0].create_date,
datetime(2013, 9, 2, 21, 20, 19, 637000)
)
self.assertEqual(
response[0].update_date,
datetime(2013, 9, 2, 21, 38, 24, 143000)
)
self.assertEqual(
response[0].effective_date,
datetime(2013, 9, 2, 21, 0, 0, 0)
)
self.assertEqual(
response[0].status,
'fulfilled'
)
self.assertEqual(
response[0].status_message,
None
)
self.assertEqual(
response[0].client_token,
'token-f5b56c05-09b0-4d17-8d8c-c75d8a67b806'
)
self.assertEqual(
response[0].reserved_instances[0].id,
'2567o137-8a55-48d6-82fb-7258506bb497'
)
self.assertEqual(
response[0].modification_results[0].availability_zone,
'us-east-1b'
)
self.assertEqual(
response[0].modification_results[0].platform,
'EC2-VPC'
)
self.assertEqual(
response[0].modification_results[0].instance_count,
1
)
self.assertEqual(len(response), 1)
class TestRegisterImage(TestEC2ConnectionBase):
def default_body(self):
return """
<RegisterImageResponse xmlns="http://ec2.amazonaws.com/doc/2013-08-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<imageId>ami-1a2b3c4d</imageId>
</RegisterImageResponse>
"""
def test_vm_type_default(self):
self.set_http_response(status_code=200)
self.ec2.register_image('name', 'description',
image_location='s3://foo')
self.assert_request_parameters({
'Action': 'RegisterImage',
'ImageLocation': 's3://foo',
'Name': 'name',
'Description': 'description',
}, ignore_params_values=[
'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'
])
def test_vm_type_hvm(self):
self.set_http_response(status_code=200)
self.ec2.register_image('name', 'description',
image_location='s3://foo',
virtualization_type='hvm')
self.assert_request_parameters({
'Action': 'RegisterImage',
'ImageLocation': 's3://foo',
'Name': 'name',
'Description': 'description',
'VirtualizationType': 'hvm'
}, ignore_params_values=[
'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'
])
def test_sriov_net_support_simple(self):
self.set_http_response(status_code=200)
self.ec2.register_image('name', 'description',
image_location='s3://foo',
sriov_net_support='simple')
self.assert_request_parameters({
'Action': 'RegisterImage',
'ImageLocation': 's3://foo',
'Name': 'name',
'Description': 'description',
'SriovNetSupport': 'simple'
}, ignore_params_values=[
'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'
])
def test_volume_delete_on_termination_on(self):
self.set_http_response(status_code=200)
self.ec2.register_image('name', 'description',
snapshot_id='snap-12345678',
delete_root_volume_on_termination=True)
self.assert_request_parameters({
'Action': 'RegisterImage',
'Name': 'name',
'Description': 'description',
'BlockDeviceMapping.1.DeviceName': None,
'BlockDeviceMapping.1.Ebs.DeleteOnTermination' : 'true',
'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345678',
}, ignore_params_values=[
'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'
])
def test_volume_delete_on_termination_default(self):
self.set_http_response(status_code=200)
self.ec2.register_image('name', 'description',
snapshot_id='snap-12345678')
self.assert_request_parameters({
'Action': 'RegisterImage',
'Name': 'name',
'Description': 'description',
'BlockDeviceMapping.1.DeviceName': None,
'BlockDeviceMapping.1.Ebs.DeleteOnTermination' : 'false',
'BlockDeviceMapping.1.Ebs.SnapshotId': 'snap-12345678',
}, ignore_params_values=[
'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'
])
class TestTerminateInstances(TestEC2ConnectionBase):
def default_body(self):
return """<?xml version="1.0" ?>
<TerminateInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>req-59a9ad52-0434-470c-ad48-4f89ded3a03e</requestId>
<instancesSet>
<item>
<instanceId>i-000043a2</instanceId>
<shutdownState>
<code>16</code>
<name>running</name>
</shutdownState>
<previousState>
<code>16</code>
<name>running</name>
</previousState>
</item>
</instancesSet>
</TerminateInstancesResponse>
"""
def test_terminate_bad_response(self):
self.set_http_response(status_code=200)
self.ec2.terminate_instances('foo')
class TestDescribeInstances(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeInstancesResponse>
</DescribeInstancesResponse>
"""
def test_default_behavior(self):
self.set_http_response(status_code=200)
self.ec2.get_all_instances()
self.assert_request_parameters({
'Action': 'DescribeInstances'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
self.ec2.get_all_reservations()
self.assert_request_parameters({
'Action': 'DescribeInstances'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
self.ec2.get_only_instances()
self.assert_request_parameters({
'Action': 'DescribeInstances'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
def test_max_results(self):
self.set_http_response(status_code=200)
self.ec2.get_all_instances(
max_results=10
)
self.assert_request_parameters({
'Action': 'DescribeInstances',
'MaxResults': 10},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
def test_next_token(self):
self.set_http_response(status_code=200)
self.ec2.get_all_reservations(
next_token='abcdefgh',
)
self.assert_request_parameters({
'Action': 'DescribeInstances',
'NextToken': 'abcdefgh'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestDescribeTags(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeTagsResponse>
</DescribeTagsResponse>
"""
def test_default_behavior(self):
self.set_http_response(status_code=200)
self.ec2.get_all_tags()
self.assert_request_parameters({
'Action': 'DescribeTags'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
def test_max_results(self):
self.set_http_response(status_code=200)
self.ec2.get_all_tags(
max_results=10
)
self.assert_request_parameters({
'Action': 'DescribeTags',
'MaxResults': 10},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestSignatureAlteration(TestEC2ConnectionBase):
def test_unchanged(self):
self.assertEqual(
self.service_connection._required_auth_capability(),
['ec2']
)
def test_switched(self):
region = RegionInfo(
name='cn-north-1',
endpoint='ec2.cn-north-1.amazonaws.com.cn',
connection_cls=EC2Connection
)
conn = self.connection_class(
aws_access_key_id='less',
aws_secret_access_key='more',
region=region
)
self.assertEqual(
conn._required_auth_capability(),
['hmac-v4']
)
class TestAssociateAddress(TestEC2ConnectionBase):
def default_body(self):
return """
<AssociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
<associationId>eipassoc-fc5ca095</associationId>
</AssociateAddressResponse>
"""
def test_associate_address(self):
self.set_http_response(status_code=200)
result = self.ec2.associate_address(instance_id='i-1234',
public_ip='192.0.2.1')
self.assertEqual(True, result)
def test_associate_address_object(self):
self.set_http_response(status_code=200)
result = self.ec2.associate_address_object(instance_id='i-1234',
public_ip='192.0.2.1')
self.assertEqual('eipassoc-fc5ca095', result.association_id)
class TestAssociateAddressFail(TestEC2ConnectionBase):
def default_body(self):
return """
<Response>
<Errors>
<Error>
<Code>InvalidInstanceID.NotFound</Code>
<Message>The instance ID 'i-4cbc822a' does not exist</Message>
</Error>
</Errors>
<RequestID>ea966190-f9aa-478e-9ede-cb5432daacc0</RequestID>
<StatusCode>Failure</StatusCode>
</Response>
"""
def test_associate_address(self):
self.set_http_response(status_code=200)
result = self.ec2.associate_address(instance_id='i-1234',
public_ip='192.0.2.1')
self.assertEqual(False, result)
class TestDescribeVolumes(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeVolumesResponse xmlns="http://ec2.amazonaws.com/doc/2014-02-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeSet>
<item>
<volumeId>vol-1a2b3c4d</volumeId>
<size>80</size>
<snapshotId/>
<availabilityZone>us-east-1a</availabilityZone>
<status>in-use</status>
<createTime>YYYY-MM-DDTHH:MM:SS.SSSZ</createTime>
<attachmentSet>
<item>
<volumeId>vol-1a2b3c4d</volumeId>
<instanceId>i-1a2b3c4d</instanceId>
<device>/dev/sdh</device>
<status>attached</status>
<attachTime>YYYY-MM-DDTHH:MM:SS.SSSZ</attachTime>
<deleteOnTermination>false</deleteOnTermination>
</item>
</attachmentSet>
<volumeType>standard</volumeType>
<encrypted>true</encrypted>
</item>
<item>
<volumeId>vol-5e6f7a8b</volumeId>
<size>80</size>
<snapshotId/>
<availabilityZone>us-east-1a</availabilityZone>
<status>in-use</status>
<createTime>YYYY-MM-DDTHH:MM:SS.SSSZ</createTime>
<attachmentSet>
<item>
<volumeId>vol-5e6f7a8b</volumeId>
<instanceId>i-5e6f7a8b</instanceId>
<device>/dev/sdz</device>
<status>attached</status>
<attachTime>YYYY-MM-DDTHH:MM:SS.SSSZ</attachTime>
<deleteOnTermination>false</deleteOnTermination>
</item>
</attachmentSet>
<volumeType>standard</volumeType>
<encrypted>false</encrypted>
</item>
</volumeSet>
</DescribeVolumesResponse>
"""
def test_get_all_volumes(self):
self.set_http_response(status_code=200)
result = self.ec2.get_all_volumes(volume_ids=['vol-1a2b3c4d', 'vol-5e6f7a8b'])
self.assert_request_parameters({
'Action': 'DescribeVolumes',
'VolumeId.1': 'vol-1a2b3c4d',
'VolumeId.2': 'vol-5e6f7a8b'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(len(result), 2)
self.assertEqual(result[0].id, 'vol-1a2b3c4d')
self.assertTrue(result[0].encrypted)
self.assertEqual(result[1].id, 'vol-5e6f7a8b')
self.assertFalse(result[1].encrypted)
class TestDescribeSnapshots(TestEC2ConnectionBase):
def default_body(self):
return """
<DescribeSnapshotsResponse xmlns="http://ec2.amazonaws.com/doc/2014-02-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<snapshotSet>
<item>
<snapshotId>snap-1a2b3c4d</snapshotId>
<volumeId>vol-1a2b3c4d</volumeId>
<status>pending</status>
<startTime>YYYY-MM-DDTHH:MM:SS.SSSZ</startTime>
<progress>80%</progress>
<ownerId>111122223333</ownerId>
<volumeSize>15</volumeSize>
<description>Daily Backup</description>
<tagSet/>
<encrypted>true</encrypted>
</item>
</snapshotSet>
<snapshotSet>
<item>
<snapshotId>snap-5e6f7a8b</snapshotId>
<volumeId>vol-5e6f7a8b</volumeId>
<status>completed</status>
<startTime>YYYY-MM-DDTHH:MM:SS.SSSZ</startTime>
<progress>100%</progress>
<ownerId>111122223333</ownerId>
<volumeSize>15</volumeSize>
<description>Daily Backup</description>
<tagSet/>
<encrypted>false</encrypted>
</item>
</snapshotSet>
</DescribeSnapshotsResponse>
"""
def test_get_all_snapshots(self):
self.set_http_response(status_code=200)
result = self.ec2.get_all_snapshots(snapshot_ids=['snap-1a2b3c4d', 'snap-5e6f7a8b'])
self.assert_request_parameters({
'Action': 'DescribeSnapshots',
'SnapshotId.1': 'snap-1a2b3c4d',
'SnapshotId.2': 'snap-5e6f7a8b'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(len(result), 2)
self.assertEqual(result[0].id, 'snap-1a2b3c4d')
self.assertTrue(result[0].encrypted)
self.assertEqual(result[1].id, 'snap-5e6f7a8b')
self.assertFalse(result[1].encrypted)
class TestCreateVolume(TestEC2ConnectionBase):
def default_body(self):
return """
<CreateVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2014-05-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeId>vol-1a2b3c4d</volumeId>
<size>80</size>
<snapshotId/>
<availabilityZone>us-east-1a</availabilityZone>
<status>creating</status>
<createTime>YYYY-MM-DDTHH:MM:SS.000Z</createTime>
<volumeType>standard</volumeType>
<encrypted>true</encrypted>
</CreateVolumeResponse>
"""
def test_create_volume(self):
self.set_http_response(status_code=200)
result = self.ec2.create_volume(80, 'us-east-1e', snapshot='snap-1a2b3c4d',
encrypted=True)
self.assert_request_parameters({
'Action': 'CreateVolume',
'AvailabilityZone': 'us-east-1e',
'Size': 80,
'SnapshotId': 'snap-1a2b3c4d',
'Encrypted': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(result.id, 'vol-1a2b3c4d')
self.assertTrue(result.encrypted)
if __name__ == '__main__':
unittest.main()
|
[
"alfred.wechselberger@technologyhatchery.com"
] |
alfred.wechselberger@technologyhatchery.com
|
fc02d92d5e205a887765802d60906fd5dcc62213
|
4a3fcb3e93ba88ee09d34b190450ad18a3125d67
|
/users/api/admin.py
|
fa57deaea2bcc2f5ba3dd6792ef05b61754aa057
|
[] |
no_license
|
hllustosa/online-judge
|
8c14f3348d7eba56126824f1aca6d9ee907e688d
|
4340eefc760ee3122e805214af0aa5f1a4f4fd96
|
refs/heads/master
| 2023-06-20T22:27:17.359455
| 2021-08-09T03:27:55
| 2021-08-09T03:27:55
| 392,495,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 122
|
py
|
from django.contrib import admin
from api.models import Profile
# Register your models here.
admin.site.register(Profile)
|
[
"hllustosa@gmail.com"
] |
hllustosa@gmail.com
|
0b486db72f271c9f67c5078e888030e88741d166
|
80e82658539215cf14153e7dfe64fd810103d1b1
|
/ThirdPartySoftware/pbrain-master/eegview/shared.py
|
496aff516e1bf4672f100bcb5762d4a8cae05c8a
|
[] |
no_license
|
mocalab/BCIProject
|
7f2302f5027f98118ff0d84895dd2a89f5e9c3fb
|
35d807401cf4939d09597addd4d79ca1df47073c
|
refs/heads/master
| 2020-04-17T00:57:40.333837
| 2013-06-29T07:39:25
| 2013-06-29T07:39:25
| 8,465,907
| 0
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,207
|
py
|
import os, sys
from pbrainlib.gtkutils import FileManager
import distutils.sysconfig
class RC:
"""
CLASS: RC
DESCR: handles .eegviewrc file in home directory. Currently only the 'lastdir'
line is really used - as well as the streamlining data in the last 5 columns. eegview.rc gets saved after
every program execution.
"""
if os.environ.has_key('HOME'):
path = os.environ['HOME']
elif sys.platform=='win32':
path = os.path.join(distutils.sysconfig.PREFIX, 'share', 'pbrain')
elif sys.platform=='linux':
path = '/tmp/'
else:
path = None
def join_ints(seq):
return ' '.join(['%d'%val for val in seq])
def split_ints(ints):
return [int(val) for val in ints.split()]
convertToFile = {'figsize':join_ints,}
convertFromFile = {'figsize':split_ints,'sqlport':int}
attrs = (
'lastdir',
'lastdir1',
'lastdir2',
'lastdir3',
'lastdir4',
'lastdir5',
'lastdir6',
'lastdir7',
'lastdir8',
'lastdir9',
'figsize',
'httpuser',
'httppasswd',
'httpurl',
'httpcachedir',
'sqluser',
'sqlpasswd',
'sqlhost',
'sqlport',
'sqldatabase',
'horizcursor',
'vertcursor',
'bni',
'csv',
'amp',
'dat',
'col',
)
def __init__(self):
self.load_defaults()
if self.path is not None:
self.filename = os.path.join(self.path, '.eegviewrc')
try: self.loadrc()
except IOError: pass
for attr in self.attrs:
if not hasattr(self, attr):
raise AttributeError('Unknown property: %s'%attr)
def load_defaults(self):
if sys.platform=='win32':
self.lastdir = 'C:\\'
else:
self.lastdir = os.getcwd()
print "setting lastdirs.."
self.lastdir1 = ''
self.lastdir2 = ''
self.lastdir3 = ''
self.lastdir4 = ''
self.lastdir5 = ''
self.lastdir6 = ''
self.lastdir7 = ''
self.lastdir8 = ''
self.lastdir9 = ''
self.figsize = 8, 6
self.httpuser = 'username'
self.httppasswd = 'passwd'
self.httpurl = 'localhost'
self.httpcachedir = 'tempdir'
self.sqluser = 'username'
self.sqlpasswd = 'passwd'
self.sqlhost = 'localhost'
self.sqldatabase = 'seizure'
self.sqlport = 3306
self.horizcursor = True
self.vertcursor = True
self.bni = ""
self.csv = ""
self.amp = ""
self.dat = ""
self.col = ""
def loadrc(self):
for line in file(self.filename):
key, val = line.split(':', 1)
key = key.strip()
val = val.strip()
func = self.convertFromFile.get(key, str)
self.__dict__[key] = func(val)
def save(self):
try:
fh = file(self.filename, 'w')
for attr in self.attrs:
func = self.convertToFile.get(attr, str)
val = func(self.__dict__[attr])
fh.write('%s : %s\n' % (attr, val))
print 'Updated RC file', self.filename
except IOError:
print >>sys.stderr, 'Failed to write to', self.filename
def __del__(self):
self.save()
eegviewrc = RC()
fmanager = FileManager()
fmanager.bni = eegviewrc.bni
fmanager.csv = eegviewrc.csv
fmanager.amp = eegviewrc.amp
fmanager.dat = eegviewrc.dat
fmanager.col = eegviewrc.col
fmanager.set_lastdir(eegviewrc.lastdir)
fmanager.set_lastdirs([eegviewrc.lastdir,
eegviewrc.lastdir1,
eegviewrc.lastdir2,
eegviewrc.lastdir3,
eegviewrc.lastdir4,
eegviewrc.lastdir5,
eegviewrc.lastdir6,
eegviewrc.lastdir7,
eegviewrc.lastdir8,
eegviewrc.lastdir9])
|
[
"yozturk@mocalab.com"
] |
yozturk@mocalab.com
|
13c9402c955d81bc62cbf883183a1497fbc71f74
|
d0987e868d3c55728ce451a1f778d254720821b0
|
/datamodules/default.py
|
9057c6f040fea7c5dfd6c6a15c72db02dc6a3496
|
[] |
no_license
|
sara-nl/2D-VQ-AE-2
|
c106cd0dd0c1060bb4f363ec38db6ba354363f85
|
6999a5c25d6e0a83bde52c770788375cbfe348c0
|
refs/heads/main
| 2023-05-23T08:20:57.334918
| 2022-06-10T15:15:50
| 2022-06-10T15:15:50
| 377,765,763
| 12
| 2
| null | 2022-03-01T13:53:51
| 2021-06-17T08:53:39
|
Python
|
UTF-8
|
Python
| false
| false
| 710
|
py
|
from dataclasses import dataclass
from typing import Optional, Callable
import pytorch_lightning as pl
from torch.utils.data.dataloader import DataLoader
@dataclass
class DefaultDataModule(pl.LightningDataModule):
train_dataloader_conf: Callable[[], DataLoader]
val_dataloader_conf: Callable[[], DataLoader]
test_dataloader_conf: Optional[Callable[[], DataLoader]] = None
def __post_init__(self):
super().__init__()
def train_dataloader(self) -> DataLoader:
return self.train_dataloader_conf()
def val_dataloader(self) -> DataLoader:
return self.val_dataloader_conf()
def test_dataloader(self) -> DataLoader:
return self.test_dataloader_conf()
|
[
"robertjan.schlimbach@gmail.com"
] |
robertjan.schlimbach@gmail.com
|
3226c5524af15df7ed6b8e45dff65ea39ded66c2
|
a7a968c270d193e8c5b35af5f1743be8c9eaa81d
|
/WISE/resultscompile.py
|
dfd4e3d7abbc420fca2771124d5065c5c412c072
|
[] |
no_license
|
rameez3333/catana
|
56479adfbac422a976f2c22ea45ccebc3884ea13
|
d786ddcc789c55e941a85b247433eeac672a40d4
|
refs/heads/master
| 2020-09-22T15:31:48.133456
| 2019-12-08T09:12:45
| 2019-12-08T09:12:45
| 67,131,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,599
|
py
|
import numpy as np
from glob import glob
import healpy as hp
import matplotlib.pyplot as plt
import sys
nside = int(sys.argv[1])
#glcut = sys.argv[2]
npix = hp.nside2npix(nside)
def process(glcut, jcut):
flist = sorted(glob('*'+str(glcut)+"glcut"+str(jcut)+"jlcut_result.txt"))
if not len(flist):
if jcut:
flist = sorted(glob('*'+str(glcut)+"glcut"+"jcut_result.txt"))
else:
flist = sorted(glob('*'+str(glcut)+"glcut"+"_result.txt"))
totsources=0
cutsources=0
totUH = np.zeros(npix)
totLH = np.zeros(npix)
for f in flist:
fline = open(f).readlines()
totsources = totsources + float(fline[1].split(",")[0])
cutsources = cutsources + float(fline[1].split(",")[-1])
i=0
mapUH = np.zeros(npix)
mapLH = np.zeros(npix)
for line in fline[2:]:
#if (90. - np.rad2deg(hp.pix2ang(16, i)[0]) - float(line.split(',')[0]) + float(line.split(',')[1]) - np.rad2deg(hp.pix2ang(16, i)[1])):
#print "wtf", i, (90. - np.rad2deg(hp.pix2ang(16, i)[0]) - float(line.split(',')[0]) + float(line.split(',')[1]) - np.rad2deg(hp.pix2ang(16, i)[1]))
mapUH[i] = float(line.split(',')[2])
mapLH[i] = float(line.split(',')[3])
i+=1
if (i - npix):
print "wtf now"
if ((mapUH+mapLH)[0:npix].sum()/npix - float(fline[1].split(",")[-1])):
print "wtf 3"
print f
print (mapUH + mapLH)[0:npix].sum()/(npix)
print float(fline[1].split(",")[-1])
print ((mapUH+mapLH)[0:npix/2].sum()*2./npix - float(fline[1].split(",")[-1]))
totUH = totUH+mapUH
totLH = totLH+mapLH
"""
for i in range(1, npix/2 +1):
totUH[npix-i] = totLH[i-1]
totLH[npix-i] = totUH[i-1]
"""
#print totUH
#print totLH
map = (totUH-totLH)/(totUH+totLH)
#print map
#print np.min(map), np.max(map)
#print "Total Sources: ", totsources
#print "After Cut: ", cutsources
#print "The minimum is at", np.argmin(map), "with a value of ", map[np.argmin(map)]
#print "The minimum is at ", (90. - np.rad2deg(hp.pix2ang(nside, np.argmin(map))[0])), np.rad2deg(hp.pix2ang(nside, np.argmin(map))[0])
#print "The maximum is at", np.argmax(map), "with a value of ", map[np.argmax(map)]
#print "The maximum is at ", (90. - np.rad2deg(hp.pix2ang(nside, np.argmax(map))[0])), np.rad2deg(hp.pix2ang(nside, np.argmax(map))[0])
coords = hp.pix2ang(nside,np.arange(npix))
angs = np.rad2deg(np.arccos(np.sin(coords[0])*np.sin(hp.pix2ang(nside, np.argmax(map))[0])*np.cos(coords[1] - hp.pix2ang(nside, np.argmax(map))[1]) + np.cos(coords[0])*np.cos(hp.pix2ang(nside, np.argmax(map))[0])))
#print np.size(angs)
#plt.scatter(angs, map)
#plt.xlabel("Angle")
#plt.ylabel("Hemispheric count difference")
hp.mollview(map, rot=[np.pi, np.pi/2.])
#plt.show()
plt.savefig('HCount'+str(glcut)+'glcut'+str(jcut)+'jcut.png')
return (90. - np.rad2deg(hp.pix2ang(nside, np.argmax(map))[0])), np.rad2deg(hp.pix2ang(nside, np.argmax(map))[1]), map[np.argmax(map)]
for jcut in (0, 1):
for glcut in np.arange(10, 21):
decmax,ramax, valmax = process(glcut, jcut)
print jcut, glcut, decmax, ramax, valmax
|
[
"mrameez@fend05.cluster"
] |
mrameez@fend05.cluster
|
f46eeb7d861443adbfefc28d6ad9e7b902164a00
|
400c5a88463f17b82dc6faaa477e9376052d929f
|
/boards/views.py
|
18656d4a4c954d4778dcc1c81ff7b79480617e8d
|
[] |
no_license
|
leahlang4d/django_project
|
bf823defddf298ab66873c78791af5c1a3dc2d0e
|
b5f76122c9d08aea8678348a0cf7423bdc32474c
|
refs/heads/master
| 2021-09-10T23:15:20.569210
| 2018-04-04T03:41:10
| 2018-04-04T03:41:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
from django.shortcuts import render
from .models import Board
# Create your views here.
from django.http import HttpResponse
from .models import Board
def home(request):
boards = Board.objects.all()
return render(request, 'home.html', {'boards': boards})
#boards_names = list()
#for board in boards:
# boards_names.append(board.name)
#response_html = '<br>'.join(boards_names)
#return HttpResponse(response_html)
|
[
"leahlang4d@gmail.com"
] |
leahlang4d@gmail.com
|
9db6c013e16fe13fede6db3506499051fa7b71f8
|
d0de52952d3fb219baedc830bfae48ccd7b31604
|
/paperstuff/counter_0620_16.py
|
bd2ad1beb6a69d2caca3baaa604c746a8edc7288
|
[] |
no_license
|
patrickschu/ota
|
3a512cdecf9291266812fd7a664a5ca8d3e969ea
|
7ee6cf0afe2fe6416dc4c334fc2db4d8c925882a
|
refs/heads/master
| 2020-04-06T06:56:12.633307
| 2016-08-28T21:34:20
| 2016-08-28T21:34:20
| 37,992,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,002
|
py
|
import codecs
import re
import time
import os
import nltk
import string
from string import punctuation
now=time.time()
#setting up the output file
outputfile="output_gaddlist_0714"
print outputfile
#reading in the yeslist, nolist or whatever. these are the words to iterate over/search for
yeslist=[]
f=codecs.open("/Users/ps22344/Downloads/ota-master/paperstuff/gadds_yeslist_withzeros_0713.txt_pandas_0to1700.txt", "r", "utf-8")
for line in f:
yeslist.append(line.rstrip("\n").split("\t"))
f.close()
#WATCH THIS SETTING
yeslist_words=[i[1] for i in yeslist]
print yeslist_words, "\n"
print "we have {} words\n".format(len(yeslist_words))
#this is the list with the files/books we're using
goodfiles=[]
f=open("/Users/ps22344/Downloads/ota-master/paperstuff/goodfiles_0620_16.txt", "r")
for line in f:
goodfiles.append(line.rstrip("\n"))
f.close()
print "we have {} files\n".format(len(goodfiles))
# some helper funcs
def tagextractor(text, tag, fili):
regexstring="<"+tag+"=(.*?)>"
result=re.findall(regexstring, text, re.DOTALL)
if len(result) != 1:
print "alarm in tagextractor", fili, result
return result[0]
def adtextextractor(text, fili):
regexstring="<text>(.*?)</text>"
result=re.findall(regexstring, text, re.DOTALL)
if len(result) != 1:
print "alarm in adtextextractor", fili, result
return result[0]
#CAREFUL!! THIS VARIES DEP ON WHERE THE FILE WSS OUTPUT
#
dicti={i:0 for i in yeslist_words}
output0=codecs.open(outputfile, "a", "utf-8")
#output column names
cols=['uniq', 'filenumber', 'otanumber', 'pubdate', 'genre', 'title', 'wordcount']
output0.write("\t".join(cols)+"\t")
output0.write("\t".join(yeslist_words)+"\n")
output0.close()
#the actual reader
for item in goodfiles:
try:
#we open the corpus fils&read it
#/Users/ps22344/Downloads
output1=codecs.open(outputfile, "a", "utf-8")
finput=codecs.open(os.path.join("/Users","ps22344","Downloads", "ota_0621",str(item)+".txt"), "r", "utf-8")
text=finput.read()
#we get the metadata
otanumber=tagextractor(text, "otanumber", item )
filenumber=tagextractor(text, "no", item )
pubdate=tagextractor(text, "pubdate", item )
genre=tagextractor(text, "genre1", item )
title=tagextractor(text, "otatitle", item )
content=adtextextractor(text,item)
contentsplit=nltk.word_tokenize(content)
print "Before removing punctuation, this text was {} words long".format(len(contentsplit))
text=[i.lower() for i in contentsplit if i not in string.punctuation]
print "After removing punctuation, this text was {} words long".format(len(text))
#print len(contentsplit)
#setting up the list for the findings for each text
results=[]
#a list of the words well be searching for, to be used in regex
#write the item from ll, write filenumber etc, add tab for separator
outputlist=[unicode(item), filenumber, otanumber, pubdate, genre, title, unicode(len(text))]
output1.write("\t".join(outputlist)+"\t")
#iterate over all metadata
output1.close()
for thing in yeslist_words:
#no suffix
words=re.findall(r"\b("+thing+"\'?)\b",content)
#yes suffix
#words=re.findall(r"\b((?:dis|mis|re|un)?"+thing+"\'?)",content)
dicti[thing]=dicti[thing]+len(words)
results.append(words)
print "reading", item, filenumber
output3=codecs.open(outputfile, "a", "utf-8")
output3.write("\t".join([str(len(i)) for i in results])+"\n")
output3.close()
logout=codecs.open(outputfile+"_log.txt", "a", "utf-8")
logout.write(str(results)+"\n")
logout.close()
except IOError, err:
print "Error", err
dictiout=open(outputfile+"_dicti.txt", "w")
sortdict=sorted(dicti.items(), key=lambda x: x[1], reverse=True)
dictiout.write("\n".join([str(i) for i in sortdict]))
print sortdict
dictiout.close()
#for spreadsheet
for item in yeslist_words:
print item
later=time.time()
runtime=later-now
print "written to", outputfile
print 'time has passed', runtime/60
os.system('say "your program has finished"')
|
[
"noreply@github.com"
] |
patrickschu.noreply@github.com
|
b497e3a28325dc350157121796079d30b5f50061
|
e9fa26be4d9e7a725d3f889b6aa9cf5029f47c2d
|
/lib/sqlalchemy/orm/events.py
|
b9987c52794359826e9f137791670f66640665a8
|
[
"BSD-3-Clause"
] |
permissive
|
ahriman-ru/azure-test
|
2e0ee70a8b453534cf82e44860f04dddd3ab8095
|
3c377163192139d046b4ec20ce5b44a0492aecd8
|
refs/heads/master
| 2021-07-17T03:53:14.025065
| 2017-10-20T12:04:11
| 2017-10-20T12:04:11
| 100,267,395
| 0
| 0
| null | 2017-08-14T12:54:01
| 2017-08-14T12:54:01
| null |
UTF-8
|
Python
| false
| false
| 87,089
|
py
|
# orm/events.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""ORM event interfaces.
"""
from .. import event, exc, util
from .base import _mapper_or_none
import inspect
import weakref
from . import interfaces
from . import mapperlib, instrumentation
from .session import Session, sessionmaker
from .scoping import scoped_session
from .attributes import QueryableAttribute
from .query import Query
from sqlalchemy.util.compat import inspect_getargspec
class InstrumentationEvents(event.Events):
"""Events related to class instrumentation events.
The listeners here support being established against
any new style class, that is any object that is a subclass
of 'type'. Events will then be fired off for events
against that class. If the "propagate=True" flag is passed
to event.listen(), the event will fire off for subclasses
of that class as well.
The Python ``type`` builtin is also accepted as a target,
which when used has the effect of events being emitted
for all classes.
Note the "propagate" flag here is defaulted to ``True``,
unlike the other class level events where it defaults
to ``False``. This means that new subclasses will also
be the subject of these events, when a listener
is established on a superclass.
.. versionchanged:: 0.8 - events here will emit based
on comparing the incoming class to the type of class
passed to :func:`.event.listen`. Previously, the
event would fire for any class unconditionally regardless
of what class was sent for listening, despite
documentation which stated the contrary.
"""
_target_class_doc = "SomeBaseClass"
_dispatch_target = instrumentation.InstrumentationFactory
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
return _InstrumentationEventsHold(target)
else:
return None
@classmethod
def _listen(cls, event_key, propagate=True, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
def listen(target_cls, *arg):
listen_cls = target()
if propagate and issubclass(target_cls, listen_cls):
return fn(target_cls, *arg)
elif not propagate and target_cls is listen_cls:
return fn(target_cls, *arg)
def remove(ref):
key = event.registry._EventKey(
None, identifier, listen,
instrumentation._instrumentation_factory)
getattr(instrumentation._instrumentation_factory.dispatch,
identifier).remove(key)
target = weakref.ref(target.class_, remove)
event_key.\
with_dispatch_target(instrumentation._instrumentation_factory).\
with_wrapper(listen).base_listen(**kw)
@classmethod
def _clear(cls):
super(InstrumentationEvents, cls)._clear()
instrumentation._instrumentation_factory.dispatch._clear()
def class_instrument(self, cls):
"""Called after the given class is instrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def class_uninstrument(self, cls):
"""Called before the given class is uninstrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def attribute_instrument(self, cls, key, inst):
"""Called when an attribute is instrumented."""
class _InstrumentationEventsHold(object):
"""temporary marker object used to transfer from _accept_with() to
_listen() on the InstrumentationEvents class.
"""
def __init__(self, class_):
self.class_ = class_
dispatch = event.dispatcher(InstrumentationEvents)
class InstanceEvents(event.Events):
"""Define events specific to object lifecycle.
e.g.::
from sqlalchemy import event
def my_load_listener(target, context):
print "on load!"
event.listen(SomeClass, 'load', my_load_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`.Mapper` objects
* the :class:`.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
.. versionchanged:: 0.8.0 instance events can be associated with
unmapped superclasses of mapped classes.
Instance events are closely related to mapper events, but
are more specific to the instance and its instrumentation,
rather than its system of persistence.
When using :class:`.InstanceEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting classes as well as the
class which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
"""
_target_class_doc = "SomeClass"
_dispatch_target = instrumentation.ClassManager
@classmethod
def _new_classmanager_instance(cls, class_, classmanager):
_InstanceEventsHold.populate(class_, classmanager)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if isinstance(target, instrumentation.ClassManager):
return target
elif isinstance(target, mapperlib.Mapper):
return target.class_manager
elif target is orm.mapper:
return instrumentation.ClassManager
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return instrumentation.ClassManager
else:
manager = instrumentation.manager_of_class(target)
if manager:
return manager
else:
return _InstanceEventsHold(target)
return None
@classmethod
def _listen(cls, event_key, raw=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if not raw:
def wrap(state, *arg, **kw):
return fn(state.obj(), *arg, **kw)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate, **kw)
if propagate:
for mgr in target.subclass_managers(True):
event_key.with_dispatch_target(mgr).base_listen(
propagate=True)
@classmethod
def _clear(cls):
super(InstanceEvents, cls)._clear()
_InstanceEventsHold._clear()
def first_init(self, manager, cls):
"""Called when the first instance of a particular mapping is called.
This event is called when the ``__init__`` method of a class
is called the first time for that particular class. The event
invokes before ``__init__`` actually proceeds as well as before
the :meth:`.InstanceEvents.init` event is invoked.
"""
def init(self, target, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is
loaded from the database; see the :meth:`.InstanceEvents.load`
event in order to intercept a database load.
The event is called before the actual ``__init__`` constructor
of the object is called. The ``kwargs`` dictionary may be
modified in-place in order to affect what is passed to
``__init__``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments passed to the ``__init__`` method.
This is passed as a tuple and is currently immutable.
:param kwargs: keyword arguments passed to the ``__init__`` method.
This structure *can* be altered in place.
.. seealso::
:meth:`.InstanceEvents.init_failure`
:meth:`.InstanceEvents.load`
"""
def init_failure(self, target, args, kwargs):
"""Receive an instance when its constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is loaded
from the database.
The event is invoked after an exception raised by the ``__init__``
method is caught. After the event
is invoked, the original exception is re-raised outwards, so that
the construction of the object still raises an exception. The
actual exception and stack trace raised should be present in
``sys.exc_info()``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments that were passed to the ``__init__``
method.
:param kwargs: keyword arguments that were passed to the ``__init__``
method.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.load`
"""
def load(self, target, context):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress. This argument may be
``None`` if the load does not correspond to a :class:`.Query`,
such as during :meth:`.Session.merge`.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.refresh`
:meth:`.SessionEvents.loaded_as_persistent`
"""
def refresh(self, target, context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed from a query.
Contrast this to the :meth:`.InstanceEvents.load` method, which
is invoked when the object is first loaded from a query.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress.
:param attrs: sequence of attribute names which
were populated, or None if all column-mapped, non-deferred
attributes were populated.
.. seealso::
:meth:`.InstanceEvents.load`
"""
def refresh_flush(self, target, flush_context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed within the persistence of the object.
This event is the same as :meth:`.InstanceEvents.refresh` except
it is invoked within the unit of work flush process, and the values
here typically come from the process of handling an INSERT or
UPDATE, such as via the RETURNING clause or from Python-side default
values.
.. versionadded:: 1.0.5
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param attrs: sequence of attribute names which
were populated.
"""
def expire(self, target, attrs):
"""Receive an object instance after its attributes or some subset
have been expired.
'keys' is a list of attribute names. If None, the entire
state was expired.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param attrs: sequence of attribute
names which were expired, or None if all attributes were
expired.
"""
def pickle(self, target, state_dict):
"""Receive an object instance when its associated state is
being pickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary returned by
:class:`.InstanceState.__getstate__`, containing the state
to be pickled.
"""
def unpickle(self, target, state_dict):
"""Receive an object instance after its associated state has
been unpickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary sent to
:class:`.InstanceState.__setstate__`, containing the state
dictionary which was pickled.
"""
class _EventsHold(event.RefCollection):
"""Hold onto listeners against unmapped, uninstrumented classes.
Establish _listen() for that class' mapper/instrumentation when
those objects are created for that class.
"""
def __init__(self, class_):
self.class_ = class_
@classmethod
def _clear(cls):
cls.all_holds.clear()
class HoldEvents(object):
_dispatch_target = None
@classmethod
def _listen(cls, event_key, raw=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
if target.class_ in target.all_holds:
collection = target.all_holds[target.class_]
else:
collection = target.all_holds[target.class_] = {}
event.registry._stored_in_collection(event_key, target)
collection[event_key._key] = (event_key, raw, propagate)
if propagate:
stack = list(target.class_.__subclasses__())
while stack:
subclass = stack.pop(0)
stack.extend(subclass.__subclasses__())
subject = target.resolve(subclass)
if subject is not None:
# we are already going through __subclasses__()
# so leave generic propagate flag False
event_key.with_dispatch_target(subject).\
listen(raw=raw, propagate=False, **kw)
def remove(self, event_key):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
if isinstance(target, _EventsHold):
collection = target.all_holds[target.class_]
del collection[event_key._key]
@classmethod
def populate(cls, class_, subject):
for subclass in class_.__mro__:
if subclass in cls.all_holds:
collection = cls.all_holds[subclass]
for event_key, raw, propagate in collection.values():
if propagate or subclass is class_:
# since we can't be sure in what order different
# classes in a hierarchy are triggered with
# populate(), we rely upon _EventsHold for all event
# assignment, instead of using the generic propagate
# flag.
event_key.with_dispatch_target(subject).\
listen(raw=raw, propagate=False)
class _InstanceEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return instrumentation.manager_of_class(class_)
class HoldInstanceEvents(_EventsHold.HoldEvents, InstanceEvents):
pass
dispatch = event.dispatcher(HoldInstanceEvents)
class MapperEvents(event.Events):
"""Define events specific to mappings.
e.g.::
from sqlalchemy import event
def my_before_insert_listener(mapper, connection, target):
# execute a stored procedure upon INSERT,
# apply the value to the row to be inserted
target.calculated_value = connection.scalar(
"select my_special_function(%d)"
% target.special_number)
# associate the listener function with SomeClass,
# to execute during the "before_insert" hook
event.listen(
SomeClass, 'before_insert', my_before_insert_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`.Mapper` objects
* the :class:`.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
.. versionchanged:: 0.8.0 mapper events can be associated with
unmapped superclasses of mapped classes.
Mapper events provide hooks into critical sections of the
mapper, including those related to object instrumentation,
object loading, and object persistence. In particular, the
persistence methods :meth:`~.MapperEvents.before_insert`,
and :meth:`~.MapperEvents.before_update` are popular
places to augment the state being persisted - however, these
methods operate with several significant restrictions. The
user is encouraged to evaluate the
:meth:`.SessionEvents.before_flush` and
:meth:`.SessionEvents.after_flush` methods as more
flexible and user-friendly hooks in which to apply
additional database state during a flush.
When using :class:`.MapperEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting mappers and/or the mappers of
inheriting classes, as well as any
mapper which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event function
must have a return value, the purpose of which is either to
control subsequent event propagation, or to otherwise alter
the operation in progress by the mapper. Possible return
values are:
* ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event
processing normally.
* ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent
event handlers in the chain.
* other values - the return value specified by specific listeners.
"""
_target_class_doc = "SomeClass"
_dispatch_target = mapperlib.Mapper
@classmethod
def _new_mapper_instance(cls, class_, mapper):
_MapperEventsHold.populate(class_, mapper)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if target is orm.mapper:
return mapperlib.Mapper
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return target
else:
mapper = _mapper_or_none(target)
if mapper is not None:
return mapper
else:
return _MapperEventsHold(target)
else:
return target
@classmethod
def _listen(
cls, event_key, raw=False, retval=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if identifier in ("before_configured", "after_configured") and \
target is not mapperlib.Mapper:
util.warn(
"'before_configured' and 'after_configured' ORM events "
"only invoke with the mapper() function or Mapper class "
"as the target.")
if not raw or not retval:
if not raw:
meth = getattr(cls, identifier)
try:
target_index = \
inspect_getargspec(meth)[0].index('target') - 1
except ValueError:
target_index = None
def wrap(*arg, **kw):
if not raw and target_index is not None:
arg = list(arg)
arg[target_index] = arg[target_index].obj()
if not retval:
fn(*arg, **kw)
return interfaces.EXT_CONTINUE
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
if propagate:
for mapper in target.self_and_descendants:
event_key.with_dispatch_target(mapper).base_listen(
propagate=True, **kw)
else:
event_key.base_listen(**kw)
@classmethod
def _clear(cls):
super(MapperEvents, cls)._clear()
_MapperEventsHold._clear()
def instrument_class(self, mapper, class_):
r"""Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
This event is the earliest phase of mapper construction.
Most attributes of the mapper are not yet initialized.
This listener can either be applied to the :class:`.Mapper`
class overall, or to any un-mapped class which serves as a base
for classes that will be mapped (using the ``propagate=True`` flag)::
Base = declarative_base()
@event.listens_for(Base, "instrument_class", propagate=True)
def on_new_class(mapper, cls_):
" ... "
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
"""
def mapper_configured(self, mapper, class_):
r"""Called when a specific mapper has completed its own configuration
within the scope of the :func:`.configure_mappers` call.
The :meth:`.MapperEvents.mapper_configured` event is invoked
for each mapper that is encountered when the
:func:`.orm.configure_mappers` function proceeds through the current
list of not-yet-configured mappers.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
When the event is called, the mapper should be in its final
state, but **not including backrefs** that may be invoked from
other mappers; they might still be pending within the
configuration operation. Bidirectional relationships that
are instead configured via the
:paramref:`.orm.relationship.back_populates` argument
*will* be fully available, since this style of relationship does not
rely upon other possibly-not-configured mappers to know that they
exist.
For an event that is guaranteed to have **all** mappers ready
to go including backrefs that are defined only on other
mappings, use the :meth:`.MapperEvents.after_configured`
event; this event invokes only after all known mappings have been
fully configured.
The :meth:`.MapperEvents.mapper_configured` event, unlike
:meth:`.MapperEvents.before_configured` or
:meth:`.MapperEvents.after_configured`,
is called for each mapper/class individually, and the mapper is
passed to the event itself. It also is called exactly once for
a particular mapper. The event is therefore useful for
configurational steps that benefit from being invoked just once
on a specific mapper basis, which don't require that "backref"
configurations are necessarily ready yet.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
.. seealso::
:meth:`.MapperEvents.before_configured`
:meth:`.MapperEvents.after_configured`
"""
# TODO: need coverage for this event
def before_configured(self):
"""Called before a series of mappers have been configured.
The :meth:`.MapperEvents.before_configured` event is invoked
each time the :func:`.orm.configure_mappers` function is
invoked, before the function has done any of its work.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
This event can **only** be applied to the :class:`.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured")
def go():
# ...
Constrast this event to :meth:`.MapperEvents.after_configured`,
which is invoked after the series of mappers has been configured,
as well as :meth:`.MapperEvents.mapper_configured`, which is invoked
on a per-mapper basis as each one is configured to the extent possible.
Theoretically this event is called once per
application, but is actually called any time new mappers
are to be affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured", once=True)
def go():
# ...
.. versionadded:: 0.9.3
.. seealso::
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.after_configured`
"""
def after_configured(self):
"""Called after a series of mappers have been configured.
The :meth:`.MapperEvents.after_configured` event is invoked
each time the :func:`.orm.configure_mappers` function is
invoked, after the function has completed its work.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
Contrast this event to the :meth:`.MapperEvents.mapper_configured`
event, which is called on a per-mapper basis while the configuration
operation proceeds; unlike that event, when this event is invoked,
all cross-configurations (e.g. backrefs) will also have been made
available for any mappers that were pending.
Also constrast to :meth:`.MapperEvents.before_configured`,
which is invoked before the series of mappers has been configured.
This event can **only** be applied to the :class:`.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured")
def go():
# ...
Theoretically this event is called once per
application, but is actually called any time new mappers
have been affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured", once=True)
def go():
# ...
.. seealso::
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.before_configured`
"""
def before_insert(self, mapper, connection, target):
"""Receive an object instance before an INSERT statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class before their INSERT statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_insert(self, mapper, connection, target):
"""Receive an object instance after an INSERT statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class after their INSERT statements have been
emitted at once in a previous step. In the extremely
rare case that this is not desirable, the
:func:`.mapper` can be configured with ``batch=False``,
which will cause batches of instances to be broken up
into individual (and more poorly performing)
event->persist->event steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_update(self, mapper, connection, target):
"""Receive an object instance before an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.before_update` is
*not* a guarantee that an UPDATE statement will be
issued, although you can affect the outcome here by
modifying attributes so that a net change in value does
exist.
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class before their UPDATE statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_update(self, mapper, connection, target):
"""Receive an object instance after an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*, and for which
no UPDATE statement has proceeded. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.after_update` is
*not* a guarantee that an UPDATE statement has been
issued.
To detect if the column-based attributes on the object have net
changes, and therefore resulted in an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class after their UPDATE statements have been emitted at
once in a previous step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_delete(self, mapper, connection, target):
"""Receive an object instance before a DELETE statement
is emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class before their DELETE statements are emitted at
once in a later step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_delete(self, mapper, connection, target):
"""Receive an object instance after a DELETE statement
has been emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class after their DELETE statements have been emitted at
once in a previous step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
class _MapperEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return _mapper_or_none(class_)
class HoldMapperEvents(_EventsHold.HoldEvents, MapperEvents):
pass
dispatch = event.dispatcher(HoldMapperEvents)
class SessionEvents(event.Events):
"""Define events specific to :class:`.Session` lifecycle.
e.g.::
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
def my_before_commit(session):
print "before commit!"
Session = sessionmaker()
event.listen(Session, "before_commit", my_before_commit)
The :func:`~.event.listen` function will accept
:class:`.Session` objects as well as the return result
of :class:`~.sessionmaker()` and :class:`~.scoped_session()`.
Additionally, it accepts the :class:`.Session` class which
will apply listeners to all :class:`.Session` instances
globally.
"""
_target_class_doc = "SomeSessionOrFactory"
_dispatch_target = Session
@classmethod
def _accept_with(cls, target):
if isinstance(target, scoped_session):
target = target.session_factory
if not isinstance(target, sessionmaker) and \
(
not isinstance(target, type) or
not issubclass(target, Session)
):
raise exc.ArgumentError(
"Session event listen on a scoped_session "
"requires that its creation callable "
"is associated with the Session class.")
if isinstance(target, sessionmaker):
return target.class_
elif isinstance(target, type):
if issubclass(target, scoped_session):
return Session
elif issubclass(target, Session):
return target
elif isinstance(target, Session):
return target
else:
return None
def after_transaction_create(self, session, transaction):
"""Execute when a new :class:`.SessionTransaction` is created.
This event differs from :meth:`~.SessionEvents.after_begin`
in that it occurs for each :class:`.SessionTransaction`
overall, as opposed to when transactions are begun
on individual database connections. It is also invoked
for nested transactions and subtransactions, and is always
matched by a corresponding
:meth:`~.SessionEvents.after_transaction_end` event
(assuming normal operation of the :class:`.Session`).
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_transaction_end(self, session, transaction):
"""Execute when the span of a :class:`.SessionTransaction` ends.
This event differs from :meth:`~.SessionEvents.after_commit`
in that it corresponds to all :class:`.SessionTransaction`
objects in use, including those for nested transactions
and subtransactions, and is always matched by a corresponding
:meth:`~.SessionEvents.after_transaction_create` event.
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_create`
"""
def before_commit(self, session):
"""Execute before commit is called.
.. note::
The :meth:`~.SessionEvents.before_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_commit(self, session):
"""Execute after a commit has occurred.
.. note::
The :meth:`~.SessionEvents.after_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
.. note::
The :class:`.Session` is not in an active transaction
when the :meth:`~.SessionEvents.after_commit` event is invoked,
and therefore can not emit SQL. To emit SQL corresponding to
every transaction, use the :meth:`~.SessionEvents.before_commit`
event.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_rollback(self, session):
"""Execute after a real DBAPI rollback has occurred.
Note that this event only fires when the *actual* rollback against
the database occurs - it does *not* fire each time the
:meth:`.Session.rollback` method is called, if the underlying
DBAPI transaction has already been rolled back. In many
cases, the :class:`.Session` will not be in
an "active" state during this event, as the current
transaction is not valid. To acquire a :class:`.Session`
which is active after the outermost rollback has proceeded,
use the :meth:`.SessionEvents.after_soft_rollback` event, checking the
:attr:`.Session.is_active` flag.
:param session: The target :class:`.Session`.
"""
def after_soft_rollback(self, session, previous_transaction):
"""Execute after any rollback has occurred, including "soft"
rollbacks that don't actually emit at the DBAPI level.
This corresponds to both nested and outer rollbacks, i.e.
the innermost rollback that calls the DBAPI's
rollback() method, as well as the enclosing rollback
calls that only pop themselves from the transaction stack.
The given :class:`.Session` can be used to invoke SQL and
:meth:`.Session.query` operations after an outermost rollback
by first checking the :attr:`.Session.is_active` flag::
@event.listens_for(Session, "after_soft_rollback")
def do_something(session, previous_transaction):
if session.is_active:
session.execute("select * from some_table")
:param session: The target :class:`.Session`.
:param previous_transaction: The :class:`.SessionTransaction`
transactional marker object which was just closed. The current
:class:`.SessionTransaction` for the given :class:`.Session` is
available via the :attr:`.Session.transaction` attribute.
.. versionadded:: 0.7.3
"""
def before_flush(self, session, flush_context, instances):
"""Execute before flush process has started.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param instances: Usually ``None``, this is the collection of
objects which can be passed to the :meth:`.Session.flush` method
(note this usage is deprecated).
.. seealso::
:meth:`~.SessionEvents.after_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush`
:ref:`session_persistence_events`
"""
def after_begin(self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
:param session: The target :class:`.Session`.
:param transaction: The :class:`.SessionTransaction`.
:param connection: The :class:`~.engine.Connection` object
which will be used for SQL statements.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def before_attach(self, session, instance):
"""Execute before an instance is attached to a session.
This is called before an add, delete or merge causes
the object to be part of the session.
.. versionadded:: 0.8. Note that :meth:`~.SessionEvents.after_attach`
now fires off after the item is part of the session.
:meth:`.before_attach` is provided for those cases where
the item should not yet be part of the session state.
.. seealso::
:meth:`~.SessionEvents.after_attach`
:ref:`session_lifecycle_events`
"""
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge.
.. note::
As of 0.8, this event fires off *after* the item
has been fully associated with the session, which is
different than previous releases. For event
handlers that require the object not yet
be part of session state (such as handlers which
may autoflush while the target object is not
yet complete) consider the
new :meth:`.before_attach` event.
.. seealso::
:meth:`~.SessionEvents.before_attach`
:ref:`session_lifecycle_events`
"""
@event._legacy_signature("0.9",
["session", "query", "query_context", "result"],
lambda update_context: (
update_context.session,
update_context.query,
update_context.context,
update_context.result))
def after_bulk_update(self, update_context):
"""Execute after a bulk update operation to the session.
This is called as a result of the :meth:`.Query.update` method.
:param update_context: an "update context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`.Query` object that this update operation
was called upon.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`.ResultProxy` returned as a result of the
bulk UPDATE operation.
"""
@event._legacy_signature("0.9",
["session", "query", "query_context", "result"],
lambda delete_context: (
delete_context.session,
delete_context.query,
delete_context.context,
delete_context.result))
def after_bulk_delete(self, delete_context):
"""Execute after a bulk delete operation to the session.
This is called as a result of the :meth:`.Query.delete` method.
:param delete_context: a "delete context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`.Query` object that this update operation
was called upon.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`.ResultProxy` returned as a result of the
bulk DELETE operation.
"""
def transient_to_pending(self, session, instance):
"""Intercept the "transient to pending" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def pending_to_transient(self, session, instance):
"""Intercept the "pending to transient" transition for a specific object.
This less common transition occurs when an pending object that has
not been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction,
or when the :meth:`.Session.expunge` method is used.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_transient(self, session, instance):
"""Intercept the "persistent to transient" transition for a specific object.
This less common transition occurs when an pending object that has
has been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def pending_to_persistent(self, session, instance):
"""Intercept the "pending to persistent"" transition for a specific object.
This event is invoked within the flush process, and is
similar to scanning the :attr:`.Session.new` collection within
the :meth:`.SessionEvents.after_flush` event. However, in this
case the object has already been moved to the persistent state
when the event is called.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def detached_to_persistent(self, session, instance):
"""Intercept the "detached to persistent" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call, as well as during the
:meth:`.Session.delete` call if the object was not previously
associated with the
:class:`.Session` (note that an object marked as "deleted" remains
in the "persistent" state until the flush proceeds).
.. note::
If the object becomes persistent as part of a call to
:meth:`.Session.delete`, the object is **not** yet marked as
deleted when this event is called. To detect deleted objects,
check the ``deleted`` flag sent to the
:meth:`.SessionEvents.persistent_to_detached` to event after the
flush proceeds, or check the :attr:`.Session.deleted` collection
within the :meth:`.SessionEvents.before_flush` event if deleted
objects need to be intercepted before the flush.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def loaded_as_persistent(self, session, instance):
"""Intercept the "loaded as persistent" transition for a specific object.
This event is invoked within the ORM loading process, and is invoked
very similarly to the :meth:`.InstanceEvents.load` event. However,
the event here is linkable to a :class:`.Session` class or instance,
rather than to a mapper or class hierarchy, and integrates
with the other session lifecycle events smoothly. The object
is guaranteed to be present in the session's identity map when
this event is called.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_deleted(self, session, instance):
"""Intercept the "persistent to deleted" transition for a specific object.
This event is invoked when a persistent object's identity
is deleted from the database within a flush, however the object
still remains associated with the :class:`.Session` until the
transaction completes.
If the transaction is rolled back, the object moves again
to the persistent state, and the
:meth:`.SessionEvents.deleted_to_persistent` event is called.
If the transaction is committed, the object becomes detached,
which will emit the :meth:`.SessionEvents.deleted_to_detached`
event.
Note that while the :meth:`.Session.delete` method is the primary
public interface to mark an object as deleted, many objects
get deleted due to cascade rules, which are not always determined
until flush time. Therefore, there's no way to catch
every object that will be deleted until the flush has proceeded.
the :meth:`.SessionEvents.persistent_to_deleted` event is therefore
invoked at the end of a flush.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def deleted_to_persistent(self, session, instance):
"""Intercept the "deleted to persistent" transition for a specific object.
This transition occurs only when an object that's been deleted
successfully in a flush is restored due to a call to
:meth:`.Session.rollback`. The event is not called under
any other circumstances.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def deleted_to_detached(self, session, instance):
"""Intercept the "deleted to detached" transition for a specific object.
This event is invoked when a deleted object is evicted
from the session. The typical case when this occurs is when
the transaction for a :class:`.Session` in which the object
was deleted is committed; the object moves from the deleted
state to the detached state.
It is also invoked for objects that were deleted in a flush
when the :meth:`.Session.expunge_all` or :meth:`.Session.close`
events are called, as well as if the object is individually
expunged from its deleted state via :meth:`.Session.expunge`.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_detached(self, session, instance):
"""Intercept the "persistent to detached" transition for a specific object.
This event is invoked when a persistent object is evicted
from the session. There are many conditions that cause this
to happen, including:
* using a method such as :meth:`.Session.expunge`
or :meth:`.Session.close`
* Calling the :meth:`.Session.rollback` method, when the object
was part of an INSERT statement for that session's transaction
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
:param deleted: boolean. If True, indicates this object moved
to the detached state because it was marked as deleted and flushed.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
class AttributeEvents(event.Events):
"""Define events for object attributes.
These are typically defined on the class-bound descriptor for the
target class.
e.g.::
from sqlalchemy import event
def my_append_listener(target, value, initiator):
print "received append event for target: %s" % target
event.listen(MyClass.collection, 'append', my_append_listener)
Listeners have the option to return a possibly modified version
of the value, when the ``retval=True`` flag is passed
to :func:`~.event.listen`::
def validate_phone(target, value, oldvalue, initiator):
"Strip non-numeric characters from a phone number"
return re.sub(r'\D', '', value)
# setup listener on UserContact.phone attribute, instructing
# it to use the return value
listen(UserContact.phone, 'set', validate_phone, retval=True)
A validation function like the above can also raise an exception
such as :exc:`ValueError` to halt the operation.
Several modifiers are available to the :func:`~.event.listen` function.
:param active_history=False: When True, indicates that the
"set" event would like to receive the "old" value being
replaced unconditionally, even if this requires firing off
database loads. Note that ``active_history`` can also be
set directly via :func:`.column_property` and
:func:`.relationship`.
:param propagate=False: When True, the listener function will
be established not just for the class attribute given, but
for attributes of the same name on all current subclasses
of that class, as well as all future subclasses of that
class, using an additional listener that listens for
instrumentation events.
:param raw=False: When True, the "target" argument to the
event will be the :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event
listening must return the "value" argument from the
function. This gives the listening function the opportunity
to change the value that is ultimately used for a "set"
or "append" event.
"""
_target_class_doc = "SomeClass.some_attribute"
_dispatch_target = QueryableAttribute
@staticmethod
def _set_dispatch(cls, dispatch_cls):
dispatch = event.Events._set_dispatch(cls, dispatch_cls)
dispatch_cls._active_history = False
return dispatch
@classmethod
def _accept_with(cls, target):
# TODO: coverage
if isinstance(target, interfaces.MapperProperty):
return getattr(target.parent.class_, target.key)
else:
return target
@classmethod
def _listen(cls, event_key, active_history=False,
raw=False, retval=False,
propagate=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if active_history:
target.dispatch._active_history = True
if not raw or not retval:
def wrap(target, value, *arg):
if not raw:
target = target.obj()
if not retval:
fn(target, value, *arg)
return value
else:
return fn(target, value, *arg)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate)
if propagate:
manager = instrumentation.manager_of_class(target.class_)
for mgr in manager.subclass_managers(True):
event_key.with_dispatch_target(
mgr[target.key]).base_listen(propagate=True)
def append(self, target, value, initiator):
"""Receive a collection append event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being appended. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
"""
def remove(self, target, value, initiator):
"""Receive a collection remove event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being removed.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: No return value is defined for this event.
"""
def set(self, target, value, oldvalue, initiator):
"""Receive a scalar set event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being set. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param oldvalue: the previous value being replaced. This
may also be the symbol ``NEVER_SET`` or ``NO_VALUE``.
If the listener is registered with ``active_history=True``,
the previous value of the attribute will be loaded from
the database if the existing value is currently unloaded
or expired.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
"""
def init_scalar(self, target, value, dict_):
"""Receive a scalar "init" event.
This event is invoked when an uninitialized, unpersisted scalar
attribute is accessed. A value of ``None`` is typically returned
in this case; no changes are made to the object's state.
The event handler can alter this behavior in two ways.
One is that a value other than ``None`` may be returned. The other
is that the value may be established as part of the object's state,
which will also have the effect that it is persisted.
Typical use is to establish a specific default value of an attribute
upon access::
SOME_CONSTANT = 3.1415926
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
dict_['some_attribute'] = SOME_CONSTANT
return SOME_CONSTANT
Above, we initialize the attribute ``MyClass.some_attribute`` to the
value of ``SOME_CONSTANT``. The above code includes the following
features:
* By setting the value ``SOME_CONSTANT`` in the given ``dict_``,
we indicate that the value is to be persisted to the database.
**The given value is only persisted to the database if we
explicitly associate it with the object**. The ``dict_`` given
is the ``__dict__`` element of the mapped object, assuming the
default attribute instrumentation system is in place.
* By establishing the ``retval=True`` flag, the value we return
from the function will be returned by the attribute getter.
Without this flag, the event is assumed to be a passive observer
and the return value of our function is ignored.
* The ``propagate=True`` flag is significant if the mapped class
includes inheriting subclasses, which would also make use of this
event listener. Without this flag, an inheriting subclass will
not use our event handler.
When we establish the value in the given dictionary, the value will
be used in the INSERT statement established by the unit of work.
Normally, the default returned value of ``None`` is not established as
part of the object, to avoid the issue of mutations occurring to the
object in response to a normally passive "get" operation, and also
sidesteps the issue of whether or not the :meth:`.AttributeEvents.set`
event should be awkwardly fired off during an attribute access
operation. This does not impact the INSERT operation since the
``None`` value matches the value of ``NULL`` that goes into the
database in any case; note that ``None`` is skipped during the INSERT
to ensure that column and SQL-level default functions can fire off.
The attribute set event :meth:`.AttributeEvents.set` as well as the
related validation feature provided by :obj:`.orm.validates` is
**not** invoked when we apply our value to the given ``dict_``. To
have these events to invoke in response to our newly generated
value, apply the value to the given object as a normal attribute
set operation::
SOME_CONSTANT = 3.1415926
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
# will also fire off attribute set events
target.some_attribute = SOME_CONSTANT
return SOME_CONSTANT
When multiple listeners are set up, the generation of the value
is "chained" from one listener to the next by passing the value
returned by the previous listener that specifies ``retval=True``
as the ``value`` argument of the next listener.
The :meth:`.AttributeEvents.init_scalar` event may be used to
extract values from the default values and/or callables established on
mapped :class:`.Column` objects. See the "active column defaults"
example in :ref:`examples_instrumentation` for an example of this.
.. versionadded:: 1.1
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value that is to be returned before this event
listener were invoked. This value begins as the value ``None``,
however will be the return value of the previous event handler
function if multiple listeners are present.
:param dict_: the attribute dictionary of this mapped object.
This is normally the ``__dict__`` of the object, but in all cases
represents the destination that the attribute system uses to get
at the actual value of this attribute. Placing the value in this
dictionary has the effect that the value will be used in the
INSERT statement generated by the unit of work.
.. seealso::
:ref:`examples_instrumentation` - see the
``active_column_defaults.py`` example.
"""
def init_collection(self, target, collection, collection_adapter):
"""Receive a 'collection init' event.
This event is triggered for a collection-based attribute, when
the initial "empty collection" is first generated for a blank
attribute, as well as for when the collection is replaced with
a new one, such as via a set event.
E.g., given that ``User.addresses`` is a relationship-based
collection, the event is triggered here::
u1 = User()
u1.addresses.append(a1) # <- new collection
and also during replace operations::
u1.addresses = [a2, a3] # <- new collection
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param collection: the new collection. This will always be generated
from what was specified as
:paramref:`.RelationshipProperty.collection_class`, and will always
be empty.
:param collection_adpater: the :class:`.CollectionAdapter` that will
mediate internal access to the collection.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.collection.linker` hook.
"""
def dispose_collection(self, target, collection, collection_adpater):
"""Receive a 'collection dispose' event.
This event is triggered for a collection-based attribute when
a collection is replaced, that is::
u1.addresses.append(a1)
u1.addresses = [a2, a3] # <- old collection is disposed
The mechanics of the event will typically include that the given
collection is empty, even if it stored objects while being replaced.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.collection.linker` hook.
"""
class QueryEvents(event.Events):
"""Represent events within the construction of a :class:`.Query` object.
The events here are intended to be used with an as-yet-unreleased
inspection system for :class:`.Query`. Some very basic operations
are possible now, however the inspection system is intended to allow
complex query manipulations to be automated.
.. versionadded:: 1.0.0
"""
_target_class_doc = "SomeQuery"
_dispatch_target = Query
def before_compile(self, query):
"""Receive the :class:`.Query` object before it is composed into a
core :class:`.Select` object.
This event is intended to allow changes to the query given::
@event.listens_for(Query, "before_compile", retval=True)
def no_deleted(query):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
return query
The event should normally be listened with the ``retval=True``
parameter set, so that the modified query may be returned.
"""
@classmethod
def _listen(
cls, event_key, retval=False, **kw):
fn = event_key._listen_fn
if not retval:
def wrap(*arg, **kw):
if not retval:
query = arg[0]
fn(*arg, **kw)
return query
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(**kw)
|
[
"root@MININT-HAS5VVP.europe.corp.microsoft.com"
] |
root@MININT-HAS5VVP.europe.corp.microsoft.com
|
661d05369dec1c499492b557a6394b37f15e5b5a
|
f14ef312e5ce56b3064f9f1fba41d60b307ec980
|
/4.1_morphology/number/char-lm-ud-stationary-separate-bidir-with-spaces-probe-baseline-prediction-wiki-plurals-2-tests-words-distractors-wikisource.py
|
40623e1408e769d4292bb2de95aa3aca4e7e08ce
|
[] |
no_license
|
m-hahn/tabula-rasa-rnns
|
d02160a2c2e3c58d933833449cbec85fb15280ae
|
0ffd77164bf93c9e90708fac2f931fe6cc7bb51e
|
refs/heads/master
| 2020-05-31T12:27:12.573104
| 2019-06-27T23:19:43
| 2019-06-27T23:19:43
| 190,281,220
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,080
|
py
|
# python char-lm-ud-stationary-separate-bidir-with-spaces-probe-baseline-prediction-wiki-plurals-2-tests-words-distractors-wikisource.py --language german --batchSize 128 --char_embedding_size 200 --hidden_dim 1024 --layer_num 2 --weight_dropout_in 0.1 --weight_dropout_hidden 0.35 --char_dropout_prob 0.0 --char_noise_prob 0.01 --learning_rate 0.2 --load-from wiki-german-nospaces-bptt-words-966024846
from paths import WIKIPEDIA_HOME
from paths import MODELS_HOME
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--language", dest="language", type=str)
parser.add_argument("--load-from", dest="load_from", type=str)
#parser.add_argument("--load-from-baseline", dest="load_from_baseline", type=str)
#parser.add_argument("--save-to", dest="save_to", type=str)
import random
parser.add_argument("--batchSize", type=int, default=16)
parser.add_argument("--char_embedding_size", type=int, default=100)
parser.add_argument("--hidden_dim", type=int, default=1024)
parser.add_argument("--layer_num", type=int, default=1)
parser.add_argument("--weight_dropout_in", type=float, default=0.01)
parser.add_argument("--weight_dropout_hidden", type=float, default=0.1)
parser.add_argument("--char_dropout_prob", type=float, default=0.33)
parser.add_argument("--char_noise_prob", type = float, default= 0.01)
parser.add_argument("--learning_rate", type = float, default= 0.1)
parser.add_argument("--myID", type=int, default=random.randint(0,1000000000))
parser.add_argument("--sequence_length", type=int, default=50)
args=parser.parse_args()
print(args)
import corpusIteratorWikiWords
def plusL(its):
for it in its:
for x in it:
yield x
def plus(it1, it2):
for x in it1:
yield x
for x in it2:
yield x
char_vocab_path = {"german" : "vocabularies/german-wiki-word-vocab-50000.txt", "italian" : "vocabularies/italian-wiki-word-vocab-50000.txt"}[args.language]
with open(char_vocab_path, "r") as inFile:
itos = [x.split("\t")[0] for x in inFile.read().strip().split("\n")[:50000]]
stoi = dict([(itos[i],i) for i in range(len(itos))])
import random
import torch
print(torch.__version__)
from weight_drop import WeightDrop
rnn = torch.nn.LSTM(args.char_embedding_size, args.hidden_dim, args.layer_num).cuda()
rnn_parameter_names = [name for name, _ in rnn.named_parameters()]
print(rnn_parameter_names)
#quit()
rnn_drop = WeightDrop(rnn, [(name, args.weight_dropout_in) for name, _ in rnn.named_parameters() if name.startswith("weight_ih_")] + [ (name, args.weight_dropout_hidden) for name, _ in rnn.named_parameters() if name.startswith("weight_hh_")])
output = torch.nn.Linear(args.hidden_dim, len(itos)+3).cuda()
char_embeddings = torch.nn.Embedding(num_embeddings=len(itos)+3, embedding_dim=args.char_embedding_size).cuda()
logsoftmax = torch.nn.LogSoftmax(dim=2)
train_loss = torch.nn.NLLLoss(ignore_index=0)
print_loss = torch.nn.NLLLoss(size_average=False, reduce=False, ignore_index=0)
char_dropout = torch.nn.Dropout2d(p=args.char_dropout_prob)
modules = [rnn, output, char_embeddings]
def parameters():
for module in modules:
for param in module.parameters():
yield param
optim = torch.optim.SGD(parameters(), lr=args.learning_rate, momentum=0.0) # 0.02, 0.9
named_modules = {"rnn" : rnn, "output" : output, "char_embeddings" : char_embeddings} #, "optim" : optim}
print("Loading model")
if args.load_from is not None:
checkpoint = torch.load(MODELS_HOME+"/"+args.load_from+".pth.tar")
for name, module in named_modules.items():
print(checkpoint[name].keys())
module.load_state_dict(checkpoint[name])
#else:
# assert False
####################################
from torch.autograd import Variable
# ([0] + [stoi[training_data[x]]+1 for x in range(b, b+sequence_length) if x < len(training_data)])
#from embed_regularize import embedded_dropout
def encodeWord(word):
numeric = ((stoi[word]+3 if word in stoi else 2) if True else 2+random.randint(0, len(itos)))
return numeric
rnn_drop.train(False)
#rnn_forward_drop.train(False)
#rnn_backward_drop.train(False)
#baseline_rnn_encoder_drop.train(False)
lossModule = torch.nn.NLLLoss(size_average=False, reduce=False, ignore_index=0)
def choice(numeric1, numeric2):
assert len(numeric1) == 1
assert len(numeric2) == 1
numeric = [numeric1[0], numeric2[0]]
maxLength = max([len(x) for x in numeric])
for i in range(len(numeric)):
while len(numeric[i]) < maxLength:
numeric[i].append(0)
input_tensor_forward = Variable(torch.LongTensor([[0]+x for x in numeric]).transpose(0,1).cuda(), requires_grad=False)
target = input_tensor_forward[1:]
input_cut = input_tensor_forward[:-1]
embedded_forward = char_embeddings(input_cut)
out_forward, hidden_forward = rnn_drop(embedded_forward, None)
prediction = logsoftmax(output(out_forward)) #.data.cpu().view(-1, 3+len(itos)).numpy() #.view(1,1,-1))).view(3+len(itos)).data.cpu().numpy()
losses = lossModule(prediction.view(-1, len(itos)+3), target.view(-1)).view(maxLength, 2)
losses = losses.sum(0).data.cpu().numpy()
return losses
def encodeListOfWordsIn(words):
numeric = [encodeWord(word) for word in words]
input_tensor_forward = Variable(torch.LongTensor(numeric).cuda(), requires_grad=False)
embedded_forward = char_embeddings(input_tensor_forward)
return [embedded_forward[i].data.cpu().numpy() for i in range(len(words))]
def encodeListOfWords(words):
numeric = [encodeWord(word) for word in words]
input_tensor_forward = Variable(torch.LongTensor(numeric).cuda(), requires_grad=False)
embedded_forward = [output.weight[word] for word in numeric] #char_embeddings(input_tensor_forward)
return [embedded_forward[i].data.cpu().numpy() for i in range(len(words))]
def choiceList(numeric):
for x in numeric:
assert len(x) == 1
# assert len(numeric1) == 1
# assert len(numeric2) == 1
numeric = [x[0] for x in numeric] #, numeric2[0]]
maxLength = max([len(x) for x in numeric])
for i in range(len(numeric)):
while len(numeric[i]) < maxLength:
numeric[i].append(0)
input_tensor_forward = Variable(torch.LongTensor([[0]+x for x in numeric]).transpose(0,1).cuda(), requires_grad=False)
target = input_tensor_forward[1:]
input_cut = input_tensor_forward[:-1]
embedded_forward = char_embeddings(input_cut)
out_forward, hidden_forward = rnn_drop(embedded_forward, None)
prediction = logsoftmax(output(out_forward)) #.data.cpu().view(-1, 3+len(itos)).numpy() #.view(1,1,-1))).view(3+len(itos)).data.cpu().numpy()
losses = lossModule(prediction.view(-1, len(itos)+3), target.view(-1)).view(maxLength, len(numeric))
losses = losses.sum(0).data.cpu().numpy()
return losses
#
#
#def encodeSequenceBatchForward(numeric):
# input_tensor_forward = Variable(torch.LongTensor([[0]+x for x in numeric]).transpose(0,1).cuda(), requires_grad=False)
#
## target_tensor_forward = Variable(torch.LongTensor(numeric).transpose(0,1)[2:].cuda(), requires_grad=False).view(args.sequence_length+1, len(numeric), 1, 1)
# embedded_forward = char_embeddings(input_tensor_forward)
# out_forward, hidden_forward = rnn_drop(embedded_forward, None)
## out_forward = out_forward.view(args.sequence_length+1, len(numeric), -1)
# # logits_forward = output(out_forward)
# # log_probs_forward = logsoftmax(logits_forward)
# return (out_forward[-1], hidden_forward)
#
#
##
#def encodeSequenceBatchBackward(numeric):
## print([itos[x-3] for x in numeric[0]])
## print([[0]+(x[::-1]) for x in numeric])
# input_tensor_backward = Variable(torch.LongTensor([[0]+(x[::-1]) for x in numeric]).transpose(0,1).cuda(), requires_grad=False)
## target_tensor_backward = Variable(torch.LongTensor([x[::-1] for x in numeric]).transpose(0,1)[:-2].cuda(), requires_grad=False).view(args.sequence_length+1, len(numeric), 1, 1)
# embedded_backward = char_embeddings(input_tensor_backward)
# out_backward, hidden_backward = rnn_backward_drop(embedded_backward, None)
## out_backward = out_backward.view(args.sequence_length+1, len(numeric), -1)
## logits_backward = output(out_backward)
## log_probs_backward = logsoftmax(logits_backward)
#
# return (out_backward[-1], hidden_backward)
#
import numpy as np
def predictNext(encoded, preventBoundary=True):
out, hidden = encoded
prediction = logsoftmax(output(out.unsqueeze(0))).data.cpu().view(3+len(itos)).numpy() #.view(1,1,-1))).view(3+len(itos)).data.cpu().numpy()
predicted = np.argmax(prediction[:-1] if preventBoundary else prediction)
return itos[predicted-3] #, prediction
def keepGenerating(encoded, length=100, backwards=False):
out, hidden = encoded
output_string = ""
# rnn_forward_drop.train(True)
for _ in range(length):
prediction = logsoftmax(2*output(out.unsqueeze(0))).data.cpu().view(3+len(itos)).numpy() #.view(1,1,-1))).view(3+len(itos)).data.cpu().numpy()
# predicted = np.argmax(prediction).items()
predicted = np.random.choice(3+len(itos), p=np.exp(prediction))
output_string += itos[predicted-3]
input_tensor_forward = Variable(torch.LongTensor([[predicted]]).transpose(0,1).cuda(), requires_grad=False)
embedded_forward = char_embeddings(input_tensor_forward)
out, hidden = (rnn_drop if not backwards else rnn_backward_drop)(embedded_forward, hidden)
out = out[-1]
# rnn_forward_drop.train(False)
return output_string if not backwards else output_string[::-1]
plurals = set()
formations = {"e" : set(), "n" : set(), "s" : set(), "same" : set(), "r" : set()}
for group in formations:
with open(f"stimuli/german-plurals-{group}.txt", "r") as inFile:
formations[group] = [tuple(x.split(" ")) for x in inFile.read().strip().split("\n")]
formations[group] = [(x,y) for x,y in formations[group] if x in stoi and y in stoi]
print(len(formations[group]))
print(formations["e"])
print(formations["s"])
print(formations["n"])
print(formations["same"])
def doChoiceList(xs):
for x in xs:
print(x)
losses = choiceList([encodeWord(x) for x in xs]) #, encodeWord(y))
print(losses)
return np.argmin(losses)
def doChoice(x, y):
print(x)
print(y)
losses = choice(encodeWord(x), encodeWord(y))
print(losses)
return 0 if losses[0] < losses[1] else 1
# classify singulars vs plurals
print("trained on n, s, e")
forNSE = list(plusL([formations["n"], formations["s"], formations["e"]]))
lengthsS = [0 for _ in range(55)]
lengthsP = [0 for _ in range(55)]
for sing, plur in forNSE:
lengthsS[len(sing)] += 1
lengthsP[len(plur)] += 1
lengths = [min(x,y) for x,y in zip(lengthsS, lengthsP)]
sumLengthsS = sum(lengthsS)
lengthsS = [float(x)/sumLengthsS for x in lengthsS]
sumLengthsP = sum(lengthsP)
lengthsP = [float(x)/sumLengthsP for x in lengthsP]
sumLengths = sum(lengths)
lengths = [float(x)/sumLengths for x in lengths]
ratioP = max([x/y if y > 0 else 0.0 for (x,y) in zip(lengths, lengthsP)])
ratioS = max([x/y if y > 0 else 0.0 for (x,y) in zip(lengths, lengthsS)])
import random
wordsEndingIn = {"r" : set(), "s" : set(), "n" : set(), "e" : set(), "g" : set(), "t" : set()}
from corpusIterator import CorpusIterator
with open("germanNounDeclension.txt") as inFile:
data = inFile.read().strip().split("###")[1:]
for noun in data:
noun = noun.strip().split("\n")[1:]
noun = [x.split("\t") for x in noun]
noun = {x[0] : [y.lower() for y in x[1:]] for x in noun}
if "Nominativ Singular" in noun and "Nominativ Plural" in noun:
for x in noun["Nominativ Singular"]:
if x[-1] in wordsEndingIn:
if x not in noun["Nominativ Plural"]:
if x in stoi:
wordsEndingIn[x[-1]].add(x)
#training = CorpusIterator("German", partition="train", storeMorph=True, removePunctuation=True)
#
#for sentence in training.iterator():
# for line in sentence:
# if line["posUni"] == "NOUN":
# morph = line["morph"]
# if "Number=Plur" not in morph and "Case=Dat" not in morph:
# if line["word"][-1] in wordsEndingIn:
# if line["word"].lower() in stoi:
# wordsEndingIn[line["word"][-1]].add(line["word"].lower())
for x in wordsEndingIn:
print(x, len(wordsEndingIn[x]))
#quit()
predictorsR = encodeListOfWords([x for x in wordsEndingIn["r"]])
predictorsS = encodeListOfWords([x for x in wordsEndingIn["s"]])
predictorsN = encodeListOfWords([x for x in wordsEndingIn["n"]])
predictorsE = encodeListOfWords([x for x in wordsEndingIn["e"]])
predictorsG = encodeListOfWords([x for x in wordsEndingIn["g"]])
predictorsT = encodeListOfWords([x for x in wordsEndingIn["t"]])
# from each type, sample N singulars and N plurals
N = 15
evaluationPoints = []
formationsBackup = formations
random.seed(1)
for _ in range(20):
formations = {x : set(list(y)[:]) for x, y in formationsBackup.items()}
singulars = {}
plurals = {}
for typ in ["n", "s", "e"]:
singulars[typ] = []
plurals[typ] = []
formations[typ] = sorted(list(formations[typ]))
for _ in range(N):
while True:
index, sampledS = random.choice(list(zip(range(len(formations[typ])), formations[typ])))
sampledS = sampledS[0]
ratio = lengths[len(sampledS)] / (ratioS * lengthsS[len(sampledS)])
assert 0<= ratio
assert ratio <= 1
if random.random() < ratio:
del formations[typ][index]
singulars[typ].append(sampledS)
break
while True:
index, sampledP = random.choice(list(zip(range(len(formations[typ])), formations[typ])))
sampledP = sampledP[1]
ratio = lengths[len(sampledP)] / (ratioP * lengthsP[len(sampledP)])
assert 0<= ratio
assert ratio <= 1
if random.random() < ratio:
del formations[typ][index]
plurals[typ].append(sampledP)
break
stratify_types = ["n" for _ in plurals["n"]] + ["s" for _ in plurals["s"]] + ["e" for _ in plurals["e"]]
plurals = plurals["n"] + plurals["s"] + plurals["e"]
singulars = singulars["n"] + singulars["s"] + singulars["e"]
assert len(plurals) == len(singulars)
print(singulars)
print(plurals)
print(len(plurals))
print(sum([len(x) for x in plurals])/float(len(plurals)))
print(sum([len(x) for x in singulars])/float(len(singulars)))
encodedPlurals = encodeListOfWords([y for y in plurals])
encodedSingulars = encodeListOfWords([x for x in singulars])
#predictors = encodedSingulars + encodedPlurals
#dependent = [0 for _ in encodedSingulars] + [1 for _ in encodedPlurals]
from sklearn.model_selection import train_test_split
sx_train, sx_test, sy_train, sy_test, st_train, st_test = train_test_split(encodedSingulars, [0 for _ in encodedSingulars], stratify_types, test_size=0.5, shuffle=True, stratify = stratify_types, random_state=1) # random_state=random.randint(0,100),
px_train, px_test, py_train, py_test, pt_train, pt_test = train_test_split(encodedPlurals, [1 for _ in encodedPlurals], stratify_types, test_size=0.5, shuffle=True, stratify = stratify_types, random_state=1) # random_state=random.randint(0,100),
x_train = sx_train + px_train
x_test = sx_test + px_test
y_train = sy_train + py_train
y_test = sy_test + py_test
t_train = st_train + pt_train
t_test = st_test + pt_test
print(y_train)
print(y_test)
from sklearn.linear_model import LogisticRegression
print("regression")
logisticRegr = LogisticRegression()
logisticRegr.fit(x_train, y_train)
# now look at other words that end in n, s, e
dependent = [0 for _ in predictorsR]
score = logisticRegr.score(predictorsR, dependent)
print(["r", score])
evaluationPoints.append(("r_distract", score))
dependent = [0 for _ in predictorsS]
score = logisticRegr.score(predictorsS, dependent)
print(["s", score])
evaluationPoints.append(("s_distract", score))
dependent = [0 for _ in predictorsN]
score = logisticRegr.score(predictorsN, dependent)
print(["n", score])
evaluationPoints.append(("n_distract", score))
dependent = [0 for _ in predictorsE]
score = logisticRegr.score(predictorsE, dependent)
print(["e", score])
evaluationPoints.append(("e_distract", score))
dependent = [0 for _ in predictorsG]
score = logisticRegr.score(predictorsG, dependent)
print(["g", score])
evaluationPoints.append(("g_distract", score))
dependent = [0 for _ in predictorsT]
score = logisticRegr.score(predictorsT, dependent)
print(["t", score])
evaluationPoints.append(("t_distract", score))
# predictions = logisticRegr.predict(predictorsS)
# print(predictions)
# print([("-",y) for x, y in zip(predictions, wordsEndingIn["e"]) if x == 1])
# print([("+",y) for x, y in zip(predictions, wordsEndingIn["e"]) if x == 0])
# print("==============")
print("----------------")
import math
firstEntries = list(set([x[0] for x in evaluationPoints]))
for entry in firstEntries:
values = [x[1] for x in evaluationPoints if x[0] == entry]
accuracy = sum(values)/len(values)
sd = math.sqrt(sum([x**2 for x in values])/len(values) - accuracy**2)
values = sorted(values)
lower = values[int(0.05*len(values))]
upper = values[int(0.95*len(values))]
print(entry, accuracy, sd, lower, upper)
quit()
|
[
"mhahn29@gmail.com"
] |
mhahn29@gmail.com
|
4f7bc9d4365f3ba81157eb32684242f5b8e44deb
|
79e607a063a0989f5563efb6b65321e38336566d
|
/2-AdvancedStructures/Remove_Lista.py
|
30636d3939bdc3ce1c22e4551fd54e2fafc7d5b9
|
[] |
no_license
|
viniciuskurt/LetsCode-PracticalProjects
|
9e9c583e7ce72d06d26bc85fb6ff8ce7dfdfce75
|
c49a44f76c9fe1dcb6c7b5b45a25f296929f3608
|
refs/heads/main
| 2023-06-30T21:37:21.708093
| 2021-07-29T00:58:16
| 2021-07-29T00:58:16
| 382,719,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
'''
REMOVE
Deleta valores de uma lista
- Apresenta erro se o elemento não existir
'''
cidades = ['São Paulo', 'Brasilia', 'Curitiba', 'Avaré', 'Florianópolis']
print(cidades)
cidades.remove('Brasilia')
print(cidades)
#erro:
cidades.remove('Portugal')
print(cidades)
|
[
"viniciuskurt@hotmail.com"
] |
viniciuskurt@hotmail.com
|
19c14d9a37f6bafb207b27745fe34fdcb3d325be
|
541c9ccbd0b936c34198df0fcfb13a5d791a0338
|
/youtube_player.py
|
fdc702c58ba59da7a23dc2610ead4038e01b464e
|
[] |
no_license
|
ak3gj/WiCS-Hackathon
|
676ea13176907ac20ea6a998c61320393b59e9bb
|
78000e53f4f4a9190094bfa5496d68e4c0548c5b
|
refs/heads/master
| 2020-05-05T08:49:01.537433
| 2019-04-25T16:17:06
| 2019-04-25T16:17:06
| 179,879,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,875
|
py
|
# import pafy, pyglet
# import urllib.request
# from urllib.parse import *
# from bs4 import BeautifulSoup
#
#
# class Youtube_mp3():
# def __init__(self):
# self.lst = []
# self.dict = {}
# self.dict_names = {}
# self.playlist = []
#
# def url_search(self, search_string, max_search):
# textToSearch = search_string
# query = urllib.parse.quote(textToSearch)
# url = "https://www.youtube.com/results?search_query=" + query
# response = urllib.request.urlopen(url)
# html = response.read()
# soup = BeautifulSoup(html, 'lxml')
# i = 1
# for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}):
# if len(self.dict) < max_search:
# self.dict[i] = 'https://www.youtube.com' + vid['href']
# i += 1
# else:
# break
#
#
# def get_search_items(self, max_search):
#
# if self.dict != {}:
# i = 1
# for url in self.dict.values():
# try:
# info = pafy.new(url)
# self.dict_names[i] = info.title
# print("{0}. {1}".format(i, info.title))
# i += 1
#
# except ValueError:
# pass
#
# def play_media(self, num):
# url = self.dict[int(num)]
# info = pafy.new(url)
# #audio = info.m4astreams[-1]
# audio = info.getbestaudio(preftype="m4a")
# audio.download('song.m4a', quiet=True)
# song = pyglet.media.load('song.m4a')
# player = pyglet.media.Player()
# player.queue(song)
# print("Playing: {0}.".format(self.dict_names[int(num)]))
# player.play()
# stop = ''
# while True:
# stop = input('Type "s" to stop; "p" to pause; "" to play; : ')
# if stop == 's':
# player.pause()
# break
# elif stop == 'p':
# player.pause()
# elif stop == '':
# player.play()
# elif stop == 'r':
# #player.queue(song)
# #player.play()
# print('Replaying: {0}'.format(self.dict_names[int(num)]))
#
#
#
#
#
# def download_media(self, num):
# url = self.dict[int(num)]
# info = pafy.new(url)
# audio = info.getbestaudio(preftype="m4a")
# song_name = self.dict_names[int(num)]
# print("Downloading: {0}.".format(self.dict_names[int(num)]))
# print(song_name)
# song_name = input("Filename (Enter if as it is): ")
# # file_name = song_name[:11] + '.m4a'
# file_name = song_name + '.m4a'
# if song_name == '':
# audio.download(remux_audio=True)
# else:
# audio.download(filepath = filename, remux_audio=True)
#
#
# def bulk_download(self, url):
# info = pafy.new(url)
# audio = info.getbestaudio(preftype="m4a")
# song_name = self.dict_names[int(num)]
# print("Downloading: {0}.".format(self.dict_names[int(num)]))
# print(song_name)
# song_name = input("Filename (Enter if as it is): ")
# # file_name = song_name[:11] + '.m4a'
# file_name = song_name + '.m4a'
# if song_name == '':
# audio.download(remux_audio=True)
# else:
# audio.download(filepath = filename, remux_audio=True)
#
# def add_playlist(self, search_query):
# url = self.url_search(search_query, max_search=1)
# self.playlist.append(url)
#
#
#
#
#
# if __name__ == '__main__':
# print('Welcome to the Youtube-Mp3 player.')
# x = Youtube_mp3()
# search = ''
# while search != 'q':
# search = input("Youtube Search: ")
# old_search = search
# max_search = 5
# # if search == '':
# # print('\nFetching for: {0} on youtube.'.format(old_search.title()))
# # x.url_search(search, max_search)
# # x.get_search_items(max_search)
# # song_number = input('Input song number: ')
# # x.play_media(song_number)
#
# x.dict = {}
# x.dict_names = {}
#
# if search == 'q':
# print("Ending Youtube-Mp3 player.")
# break
#
# download = input('1. Play Live Music\n2. Download Mp3 from Youtube.\n')
# if search != 'q' and (download == '1' or download == ''):
# print('\nFetching for: {0} on youtube.'.format(search.title()))
# x.url_search(search, max_search)
# x.get_search_items(max_search)
# song_number = input('Input song number: ')
# x.play_media(song_number)
# elif download == '2':
# print('\nDownloading {0} (conveniently) from youtube servers.'.format(search.title()))
# x.url_search(search, max_search)
# x.get_search_items(max_search)
# song_number = input('Input song number: ')
# x.download_media(song_number)
# #github commit
# import pafy
# import vlc
# url = "https://youtu.be/-3wlroM2gZ8"
# video = pafy.new(url)
# best = video.getbest()
# playurl = best.url
# movie = playurl
# media = instance.media_new(movie)
# media_list = instance.media_list_new([movie]) #A list of one movie
# player = instance.media_player_new()
# player.set_media(media)
#
# #Create a new MediaListPlayer instance and associate the player and playlist with it
#
# list_player = instance.media_list_player_new()
# list_player.set_media_player(player)
# list_player.set_media_list(media_list)
# list_player.play()
# from pygame import mixer # Load the required library
#
# mixer.init()
# mixer.music.load('Running ft Gabriel Garzón-Montano.mp3')
# mixer.music.play()
import webbrowser
webbrowser.open("hello.mp3")
|
[
"noreply@github.com"
] |
ak3gj.noreply@github.com
|
9e5ae85dbb171bc086c2ae554f5c1ab0d53b5331
|
e8de10ce2d194138432bc578f397f014adaa5025
|
/src/trainers/UserGru_predict.py
|
615805b5559a789a61f86a56dd8ba72399aa33d6
|
[] |
no_license
|
thanhtcptit/Neural-Session-Aware-Recommendation
|
ef456d13d152fe79f5de62843c5c6c5a8cf97cd1
|
9034d0700ecee001eb5d5fee63ae082f92771427
|
refs/heads/master
| 2022-08-28T21:59:19.726920
| 2019-07-17T01:47:44
| 2019-07-17T01:47:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,805
|
py
|
import sys
sys.path.append('../..')
import numpy as np
import tensorflow as tf
from time import time
from src.utils.qpath import *
class UserGruPredict():
def __init__(self, sess, model, config):
self.config = config
self.model = model
self.sess = sess
self.saver = tf.train.Saver()
def load(self, path):
self.saver.restore(self.sess, path)
print('++ Load model from {} ++'.format(path))
@staticmethod
def calculate_ranks(_pr, y_true):
y_true = np.reshape(y_true, [-1])
rows_idx = [i for i in range(len(y_true)) if y_true[i] != 0]
mask_rows_idx = [[i] for i in range(len(y_true)) if y_true[i] != 0]
mask_cols_idx = [[j] for j in y_true if j != 0]
ranks = (_pr[rows_idx, :] >
_pr[mask_rows_idx, mask_cols_idx]).sum(axis=1) + 1
return ranks, len(rows_idx)
@staticmethod
def evaluate(ranks, top):
count_true = [0.] * len(top)
rr = [0.] * len(top)
for i, n in enumerate(top):
true_predict = ranks <= n
count_true[i] += true_predict.sum()
rr[i] += (1. / ranks[true_predict]).sum()
return count_true, rr
def run_predict(self, session, pos):
feed_dict = {
self.model.user: session[:, :-1, 0],
self.model.item: session[:, :-1, 1],
self.model.day_of_week: session[:, :-1, 2],
self.model.month_period: session[:, :-1, 3],
self.model.next_items: session[:, 1:, 1],
self.model.keep_pr: 1
}
pr, attention = self.sess.run([self.model.get_output(),
self.model.get_attention_weight()],
feed_dict=feed_dict)
assert len(pr) != 1
pr = pr[pos]
current_item = session[0][pos][1]
# print(session)
# print(attention)
print('===================')
if 'context' in self.config.input:
print('Item: ', attention[0][pos][0])
print('User: ', attention[0][pos][1])
print('Day of week: ', attention[0][pos][2])
print('Half month: ', attention[0][pos][3])
else:
print('Item attention: ', attention[0][0][pos][0])
print('User attention: ', attention[1][0][pos][0])
top_id = np.argpartition(pr, -12)[-12:]
top_id = top_id[np.argsort(pr[top_id])[::-1]]
top_id = list(top_id)
if 0 in top_id:
del top_id[top_id.index(0)]
if current_item in top_id:
del top_id[top_id.index(current_item)]
return top_id[:10]
def run_test(self):
pos = 0
session = [[]]
with open(PROCESSED_DATA_DIR + 'clean-dev') as f:
for line in tqdm(f):
if '-' in line:
session = [[]]
pos = 0
continue
u, i, *_ = line.strip().split(',')
session[0].append([u, i, 0, 0, 0])
if len(session[0]) == 1:
continue
tmp = [session[0][:]]
l = len(tmp[0])
for i in range(11 - l):
tmp[0].append([0, 0, 0, 0, 0])
tmp = np.array(tmp)
feed_dict = {
self.model.user: tmp[:, :-1, 0],
self.model.item: tmp[:, :-1, 1],
self.model.day_of_week: tmp[:, :-1, 2],
self.model.month_period: tmp[:, :-1, 3],
self.model.next_items: tmp[:, 1:, 1],
self.model.keep_pr: 1
}
pr = self.sess.run(
self.model.get_output(), feed_dict=feed_dict)
assert len(pr) != 1
pr = pr[pos]
pos += 1
top_id = np.argpartition(pr, -10)[-10:]
top_id = top_id.tolist()
if session[0][pos][1] in top_id:
print(session[0])
def eval_step(self):
batch_data = self.data_loader.next_batch()
feed_dict = {
self.model.user: batch_data[:, :-1, 0],
self.model.item: batch_data[:, :-1, 1],
self.model.day_of_week: batch_data[:, :-1, 2],
self.model.month_period: batch_data[:, :-1, 3],
self.model.next_items: batch_data[:, 1:, 1],
self.model.keep_pr: 1
}
pr = self.sess.run(self.model.get_output(), feed_dict=feed_dict)
assert len(pr) != 1
batch_ranks, num_events = \
self.calculate_ranks(pr, batch_data[:, 1:, 1])
batch_cp, batch_rr = self.evaluate(batch_ranks, [5, 20])
return batch_cp, batch_rr, num_events
|
[
"thanh.ptit.96@gmail.com"
] |
thanh.ptit.96@gmail.com
|
c7632512db65fbf5435138ef11c509970fc25ad5
|
a3d058c6a80d4068fa4d3185ddd2dec91abc82d7
|
/190129_배달.py
|
35d9925a426d2f4a556463ca5ec968d2f6fa91d6
|
[] |
no_license
|
guard1000/Everyday-coding
|
d6f496654b635738a4284270f6c5d285116a760e
|
7755f99cdb512d623392af82282bf17b47cb77f2
|
refs/heads/master
| 2021-08-18T22:26:04.322162
| 2021-07-21T14:53:28
| 2021-07-21T14:53:28
| 161,440,626
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,777
|
py
|
import sys
def dijkstra(K, V, graph):
INF = sys.maxsize
s = [False] * V
d = [INF] * V
d[K - 1] = 0
while True:
m = INF
N = -1
for j in range(V):
if not s[j] and m > d[j]:
m = d[j]
N = j
if m == INF:
break
s[N] = True
for j in range(V):
if s[j]: continue
via = d[N] + graph[N][j]
if d[j] > via:
d[j] = via
return d
def solution(N, road, K):
INF = sys.maxsize
answer=0
graph = [[INF for cols in range(N)] for rows in range(N)]
for r in road:
if graph[r[0] - 1][r[1] - 1] > r[2]:
graph[r[0] - 1][r[1] - 1] = r[2]
graph[r[1] - 1][r[0] - 1] = r[2]
for d in dijkstra(1, N, graph):
if d <= K:
answer +=1
return answer
'''
def search(visit, graph, s, N, nxt):
visit[s] = K-
for j in range(N):
if visit[j] != 0 and visit[j][1] < sinfo[1] - graph[sinfo[0]][j]:
visit[j] = 0
if graph[sinfo[0]][j] != 2001 and sinfo[1]-graph[sinfo[0]][j] >= 0 and visit[j] ==0:
nxt.append([j,sinfo[1]-graph[sinfo[0]][j]])
if len(nxt) == 0:
return N-visit.count(0)
return search(visit,graph,nxt.pop(0),N,nxt)
def solution(N, road, K):
graph = [[2001 for cols in range(N)] for rows in range(N)]
visit = [0 for i in range(N)]
for r in road:
if graph[r[0]-1][r[1]-1] > r[2]:
graph[r[0]-1][r[1]-1] = r[2]
graph[r[1]-1][r[0]-1] = r[2]
nxt=[]
return search(visit,graph,0,N,nxt)
'''
#print(solution(5,[[1,2,1],[2,3,3],[5,2,2],[1,4,2],[5,3,1],[5,4,2]],3))
print(solution(6,[[1,2,1],[1,3,2],[2,3,2],[3,4,3],[3,5,2],[3,5,3],[5,6,1]],4))
|
[
"cjsdnr885@naver.com"
] |
cjsdnr885@naver.com
|
de38dbcce2d8363da4320239d194aa376b3f5f4f
|
bbcd8166118279da0de4c3a04cc5bc97aa49b368
|
/intertainment_center.py
|
2eea65f6f9c6d12b3859c2664e8b34611bcbd008
|
[] |
no_license
|
AlyZahran/Movie_trailer
|
7e92c865880f2c2ea5106a7a07e9ecf8f3898122
|
67c321a01b1fbf08f422349b90b48978313c0d13
|
refs/heads/master
| 2021-07-15T20:55:50.594498
| 2017-10-05T12:59:27
| 2017-10-05T12:59:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,078
|
py
|
import fresh_tomatoes
import media
toy_story = media.Movie(
"toy story",
"a story of a boy and his toys that come to life !",
"http://a.dilcdn.com/bl/wp-content/uploads/sites/8/2013/02/toy_story_wallpaper_by_artifypics-d5gss19.jpg",
"https://www.youtube.com/watch?v=KYz2wyBy3kc"
)
THOR = media.Movie(
"THOR",
"a boy playing with his toy",
"http://www.tahrirnews.com/files/cached/images/b501ac993c0086373c408223b6aea6e6_920_420.jpg",
"https://www.youtube.com/watch?v=v7MGUNV8MxU&t=3s"
)
fast_furious_8 = media.Movie(
"fast & furious 8",
"is a film talking about car racing",
"https://media.premiumtimesng.com/wp-content/files/2017/04/fate-of-the-furious-poster-header-image.jpg",
"https://www.youtube.com/watch?v=uisBaTkQAEs&t=1s"
)
Hrob_Edtrary = media.Movie(
"Hrob Edtrary",
"is an action film",
"http://www.el-tareeq.net/images/NewsArticle/16680.jpg",
"https://www.youtube.com/watch?v=kRiQRPHC9O4"
)
JUSTICE_LEAGUE = media.Movie(
"JUSTICE LEAGUE",
"Justice League is an upcoming American superhero film based on the DC Comics superhero team of the same name, distributed by Warner Bros",
"http://www.konbini.com/us/files/2017/07/league.jpg",
"https://www.youtube.com/watch?v=3cxixDgHUYw"
)
music_maker = media.Movie(
"music player",
"a man who playing on piano",
"https://i.ytimg.com/vi/W2I9b5WZuYA/hqdefault.jpg",
"https://www.youtube.com/watch?v=1GCPDChh8m0"
)
school_of_rock = media.Movie(
"School of rock",
"School of Rock is a 2003 musical comedy film directed by Richard Linklater, produced by Scott Rudin, and written by Mike White",
"https://i.ytimg.com/vi/SfStJdDyeQo/hqdefault.jpg",
"https://www.youtube.com/watch?v=z5aLjGxdX_0"
)
movies = [toy_story, THOR, fast_furious_8, Hrob_Edtrary, JUSTICE_LEAGUE, music_maker, school_of_rock]
fresh_tomatoes.open_movies_page(movies) #is related to fresh tomatoes page which is responsible for openin the page in the browser
|
[
"noreply@github.com"
] |
AlyZahran.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.