blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e881c76005641b8cc28159d379e1ebb69b36acda | 67a7c314fc99d9cd7a677fcb6bc2b6dfa20a9cff | /spambayes-1.0.4/windows/py2exe/setup_all.py | 60446035118e67633d6b59e7c128165b8689f6a7 | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Xodarap/Eipi | 7ebbb9fd861fdb411c1e273ea5d2a088aa579930 | d30997a737912e38316c198531f7cb9c5693c313 | refs/heads/master | 2016-09-11T06:28:01.333832 | 2011-05-03T15:35:20 | 2011-05-03T15:35:20 | 1,367,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,674 | py | # setup_all.py
# A distutils setup script for SpamBayes binaries
import sys, os, glob
sb_top_dir = os.path.abspath(os.path.dirname(os.path.join(__file__, "../../../..")))
sys.path.append(sb_top_dir)
sys.path.append(os.path.join(sb_top_dir, "windows"))
sys.path.append(os.path.join(sb_top_dir, "scripts"))
sys.path.append(os.path.join(sb_top_dir, "Outlook2000"))
sys.path.append(os.path.join(sb_top_dir, "Outlook2000/sandbox"))
import spambayes.resources
# Generate the dialogs.py file.
import dialogs
dialogs.LoadDialogs()
# ModuleFinder can't handle runtime changes to __path__, but win32com uses them,
# particularly for people who build from sources. Hook this in.
try:
import modulefinder
import win32com
for p in win32com.__path__[1:]:
modulefinder.AddPackagePath("win32com", p)
for extra in ["win32com.shell","win32com.mapi"]:
__import__(extra)
m = sys.modules[extra]
for p in m.__path__[1:]:
modulefinder.AddPackagePath(extra, p)
except ImportError:
# no build path setup, no worries.
pass
from distutils.core import setup
import py2exe
py2exe_options = dict(
packages = "spambayes.resources,encodings",
excludes = "win32ui,pywin,pywin.debugger", # pywin is a package, and still seems to be included.
includes = "dialogs.resources.dialogs,weakref", # Outlook dynamic dialogs
dll_excludes = "dapi.dll,mapi32.dll",
typelibs = [
('{00062FFF-0000-0000-C000-000000000046}', 0, 9, 0),
('{2DF8D04C-5BFA-101B-BDE5-00AA0044DE52}', 0, 2, 1),
('{AC0714F2-3D04-11D1-AE7D-00A0C90F26F4}', 0, 1, 0),
]
)
# These must be the same IDs as in the dialogs. We really should just extract
# them from our rc scripts.
outlook_bmp_resources = [
( 125, os.path.join(sb_top_dir, r"Outlook2000\dialogs\resources\sbwizlogo.bmp")),
( 127, os.path.join(sb_top_dir, r"Outlook2000\dialogs\resources\folders.bmp")),
(1062, os.path.join(sb_top_dir, r"Outlook2000\dialogs\resources\sblogo.bmp")),
# and these are currently hard-coded in addin.py
(6000, os.path.join(sb_top_dir, r"Outlook2000\images\recover_ham.bmp")),
(6001, os.path.join(sb_top_dir, r"Outlook2000\images\delete_as_spam.bmp")),
]
# These are just objects passed to py2exe
outlook_addin = dict(
modules = ["addin"],
dest_base = "bin/outlook_addin",
bitmap_resources = outlook_bmp_resources,
create_exe = False,
)
#outlook_manager = Options(
# script = os.path.join(sb_top_dir, r"Outlook2000\manager.py"),
# bitmap_resources = outlook_bmp_resources,
#)
outlook_dump_props = dict(
script = os.path.join(sb_top_dir, r"Outlook2000\sandbox\dump_props.py"),
dest_base = "bin/outlook_dump_props",
icon_resources = [(100, os.path.join(sb_top_dir,
r"windows\resources\sbicon.ico")),
],
)
# A "register" utility for Outlook. This should not be necessary, as
# 'regsvr32 dllname' does exactly the same thing. However, Inno Setup
# version 4 appears to, upon uninstall, do something that prevents the
# files used by the unregister process to be deleted. Unregistering via
# this EXE solves the problem.
outlook_addin_register = dict(
script = os.path.join(sb_top_dir, r"Outlook2000\addin.py"),
dest_base = "bin/outlook_addin_register",
icon_resources = [(100, os.path.join(sb_top_dir,
r"windows\resources\sbicon.ico")),
],
)
service = dict(
dest_base = "bin/sb_service",
modules = ["pop3proxy_service"],
icon_resources = [(100, os.path.join(sb_top_dir,
r"windows\resources\sbicon.ico")),
],
)
sb_server = dict(
dest_base = "bin/sb_server",
script = os.path.join(sb_top_dir, "scripts", "sb_server.py")
)
sb_pop3dnd = dict(
dest_base = "bin/sb_pop3dnd",
script = os.path.join(sb_top_dir, "scripts", "sb_pop3dnd.py")
)
sb_upload = dict(
dest_base = "bin/sb_upload",
script = os.path.join(sb_top_dir, "scripts", "sb_upload.py")
)
pop3proxy_tray = dict(
dest_base = "bin/sb_tray",
script = os.path.join(sb_top_dir, "windows", "pop3proxy_tray.py"),
icon_resources = [(100, os.path.join(sb_top_dir, r"windows\resources\sbicon.ico")),
(1000, os.path.join(sb_top_dir, r"windows\resources\sb-started.ico")),
(1010, os.path.join(sb_top_dir, r"windows\resources\sb-stopped.ico"))],
)
autoconfigure = dict(
dest_base = "bin/setup_server",
script = os.path.join(sb_top_dir, "windows", "autoconfigure.py"),
)
outlook_data_files = [
["docs/outlook", [os.path.join(sb_top_dir, r"Outlook2000\about.html")]],
["docs/outlook/docs", glob.glob(os.path.join(sb_top_dir, r"Outlook2000\docs\*.html"))],
["docs/outlook/docs/images", glob.glob(os.path.join(sb_top_dir, r"Outlook2000\docs\images\*.jpg"))],
["bin", [os.path.join(sb_top_dir, r"Outlook2000\default_bayes_customize.ini")]],
]
proxy_data_files = [
["docs/sb_server", [os.path.join(sb_top_dir, r"windows\readme_proxy.html")]],
["docs/sb_server", [os.path.join(sb_top_dir, r"windows\docs\troubleshooting.html")]],
# note that this includes images that are already in the outlook/docs/images
# directory - we need to consolidate the documentation (in terms of
# sharing images, if nothing else)
["docs/sb_server/docs/images", glob.glob(os.path.join(sb_top_dir, r"windows\docs\images\*.jpg"))],
]
common_data_files = [
["", [os.path.join(sb_top_dir, r"windows\resources\sbicon.ico")]],
["", [os.path.join(sb_top_dir, r"LICENSE.txt")]],
]
# Default and only distutils command is "py2exe" - save adding it to the
# command line every single time.
if len(sys.argv)==1 or \
(len(sys.argv)==2 and sys.argv[1] in ['-q', '-n']):
sys.argv.append("py2exe")
setup(name="SpamBayes",
packages = ["spambayes.resources"],
package_dir = {"spambayes.resources" : spambayes.resources.__path__[0]},
# We implement a COM object.
com_server=[outlook_addin],
# A service
service=[service],
# console exes for debugging
console=[sb_server, sb_upload, outlook_dump_props, sb_pop3dnd],
# The taskbar
windows=[pop3proxy_tray, outlook_addin_register, autoconfigure],
# and the misc data files
data_files = outlook_data_files + proxy_data_files + common_data_files,
options = {"py2exe" : py2exe_options},
zipfile = "lib/spambayes.modules",
)
| [
"eipi@mybox.(none)"
] | eipi@mybox.(none) |
7ca149cf8cd4d832a7a8931e60ff4f1a8f7b271b | c95a683e284ff6cb3eac9bc42a6141a3e2d3b629 | /src/file_reader.py | 638fc1fba5eeb810a5599d8d826e4374204615f8 | [] | no_license | michaelmcmillan/michaelmcmillan.github.io | 06f2931d5210e961e6bc2caddd76ba3160eeda48 | fe37f851518bf638d03c1b124abc4f6450a8a386 | refs/heads/master | 2020-05-30T04:38:10.066296 | 2019-07-17T15:57:28 | 2019-07-17T15:57:28 | 19,186,322 | 0 | 0 | null | 2015-07-24T12:29:06 | 2014-04-26T19:25:11 | HTML | UTF-8 | Python | false | false | 81 | py | def get_file(path):
with open(path, 'r') as data:
return data.read()
| [
"email@michaelmcmillan.net"
] | email@michaelmcmillan.net |
5c713a2c568f78b3d8fbfcb025ede730942f035b | 1dcd3e78eca92356365faceb824addb53ff592e5 | /oracle_monitor.py | 789e9634d54f2a4b2765c646e2a8a3999331ad18 | [] | no_license | LingFangYuan/SendMailOrWX | b7201afce52d32c3d92fd087000aaa87c0f8006d | ca4beb1888823604a19283b6db5b07cd46948b6b | refs/heads/master | 2020-05-16T22:33:12.084808 | 2019-05-07T09:34:42 | 2019-05-07T09:34:42 | 183,339,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | import oracle_exec
from mysql_monitor import formattext
from get_sqltext import get_sql
def get_data(path):
"""
获取数据
:return:
"""
sql = get_sql(path)
return oracle_exec.exec(sql)
def set_table(subject, path):
re, de = get_data(path)
content = "<strong>" + subject + "</strong><table border='1' cellpadding='5' cellspacing='0'>" \
+ '<caption><strong></strong></caption>'
content = content + set_heads(de)
content = content + set_rows(re)
content = content + "</table>"
return content
def set_heads(de):
content = "<tr>"
for i in range(len(de) - 1):
content = content + "<td>" + de[i][0] + "</td>"
content = content + "</tr>"
return content
def set_rows(re):
content = ''
l = len(re[0])
for i in re:
content = content + ('<tr style="color:red">' if i[l - 1] == 1 else "<tr>")
for j in range(l - 1):
content = content + "<td>" + (oracle_exec.datetime.datetime.strftime(i[j], '%Y-%m-%d') \
if isinstance(i[j], oracle_exec.datetime.datetime) else str(
i[j])) + "</td>"
content = content + "</tr>"
return content
def get_html(subject, path):
return set_table(subject, path)
def get_text(subject, path):
re, de = get_data(path)
text = None
if re:
text = formattext(re, de)
text = subject + "\n" + text + '<a href="https://mail.qq.com/cgi-bin/loginpage">查看详情</a>'
return text
| [
"786173189@qq.com"
] | 786173189@qq.com |
64ef22513cede77e09605582fc911425e63ca7ac | 233d852269c62cf5792adc74e50b78161f9d29d0 | /apps/question_record/migrations/0003_auto_20181128_2351.py | 0bb0cd31ca72b9cf2e2c1746715a8f8d55eb60a2 | [] | no_license | liao-shuai/chaflow | ce83aa52383c335c2e8ad4863c40ac3f53b3f472 | f26dd0b9c74e1fb8dbb5181fdb9d0ec6cad6f981 | refs/heads/master | 2021-02-04T16:24:21.852056 | 2019-09-13T13:36:58 | 2019-09-13T13:36:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-11-28 15:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('question_record', '0002_auto_20181128_2328'),
]
operations = [
migrations.AlterField(
model_name='ask_question',
name='ctime',
field=models.DateTimeField(auto_now_add=True, verbose_name='提交时间'),
),
]
| [
"irainsun@live.cn"
] | irainsun@live.cn |
241932a8711c5e9ed45a20db44356998e7522fb1 | 162b86e030ccb6b2c3adb2e540f9892f25abccbf | /gradient_descent_new/experiments/2019-04-25/observables_vs_gamma-variations/scan_gamma.py | 94e51a71f241a80e8218a6d3e73da11d807a667a | [] | no_license | samueljmcameron/pfc_fibrils | a0d647a1a4490f25c4b7d42be2815c93967d18df | 564164b815229f3b5e863a95aa885dab4c52dfae | refs/heads/master | 2021-07-03T20:50:46.260599 | 2019-05-31T19:40:05 | 2019-05-31T19:40:05 | 144,604,089 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,736 | py | import numpy as np
import subprocess
import sys
import time
sys.path.append('../../scripts/')
from singlerun import SingleRun
from readparams import ReadParams
if __name__=="__main__":
start_time = time.time()
FAILED_E = 1e300
Lambdas = np.array([0.1,0.5,1.0,5.0,10.0,50.0,100.0,500.0],float)
k24,omega,Lindex = sys.argv[1],sys.argv[2],int(sys.argv[3])
gammas = np.linspace(0.01,0.4,num=101,endpoint=True)
scan = {}
scan['\\Lambda'] = str(Lambdas[Lindex])
scan['\\omega']= omega
scan['k_{24}'] = k24
loadsuf=["K_{33}","k_{24}","\\Lambda","\\omega","\\gamma_s"]
savesuf=["K_{33}","k_{24}","\\Lambda","\\omega"]
scan_dir = "scanforward"
i = 0
while (i <len(gammas)):
gamma = gammas[i]
scan['\\gamma_s'] = str(gamma)
# read in file name info
rp = ReadParams(scan=scan,loadsuf=loadsuf,savesuf=savesuf)
# create a class to do calculations with current parameters in scan.
run = SingleRun(rp,scan_dir=scan_dir)
# run C executable.
run.run_exe()
# move file written by C executable from temporary data path to true data path
run.mv_file('observables')
# load the final values of E, R, eta, delta, and surface twist.
Ei,Ri,etai,deltai,surftwisti = run.get_all_observables('observables',str2float=True)
if (Ei > 0.1*FAILED_E and gamma > 0.15):
# if the energy calculation fails, this will be true.
print('hi')
# remove current file with observables for the current gamma value that are higher than
# the delta = 0 energy.
print(Ei)
run.remove_file("observables")
for j,gamma in enumerate(gammas[i:]):
# write the remaining values of observables as those corresponding to the delta = 0
# case, as non-zero d-band produces a higher energy fibril.
scan['\\gamma_s']=str(gamma)
rp = ReadParams(scan=scan,loadsuf=loadsuf,savesuf=savesuf)
run = SingleRun(rp,scan_dir=scan_dir)
run.write_observables(E0,R0,eta0,delta0,surftwist0,"\\gamma_s")
break
if (np.isnan(Ri) or Ri <= 0) and gamma > 0.15:
# if Ri is infinite, then the calculation failed.
# Retry it with a different initial guess.
print("Ri is NAN, trying again with Rguess = 1.0")
# remove the current observables file, so that a new one can be written.
run.remove_file("observables")
if abs(float(scan['Rguess'])-1.0)>1e-10:
Ri = 1.0
else:
break
else:
# calculation ran smoothly.
run.concatenate_observables("\\gamma_s")
i+= 1
Rguess,etaguess,deltaguess = str(Ri),str(etai),str(deltai)
if not np.isnan(float(Rguess)):
scan['Rguess'] = Rguess
scan['Rupper'] = str(1.5*float(Rguess))
scan['Rlower'] = str(0.75*float(Rguess))
if not np.isnan(float(etaguess)):
scan['etaguess'] = etaguess
scan['etaupper'] = str(float(etaguess)+0.1)
scan['etalower'] = str(float(etaguess)-0.02)
if not (np.isnan(float(deltaguess))
or abs(float(deltaguess))<1e-5):
scan['deltaguess'] = deltaguess
scan['deltaupper'] = '0.818'
if float(deltaguess) < 0.81:
scan['deltalower'] = str(0.95*float(deltaguess))
else:
scan['deltalower'] = '0.81'
print(f"Took {(time.time()-start_time)/3600} hours to complete.")
| [
"samuel.j.m.cameron@gmail.com"
] | samuel.j.m.cameron@gmail.com |
7fcc04cd97bc7d308475cfdbc01f435f93e2b87d | 51dc3ab902fbbd335fde207e06967d636879c853 | /predict/urls.py | 083424ae89095eb35fe458b56e9abd529d360435 | [] | no_license | khanansha/uberprediction | 2c06cd9c8525b363e16a0ce58385b9c01344f8e2 | 8e8340f22b6b26aba0e164a295b344d34bfce19a | refs/heads/master | 2022-11-14T11:52:43.518135 | 2020-07-12T15:11:10 | 2020-07-12T15:11:10 | 279,088,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="index"),
path('predict', views.predict, name="predict"),
path('ran', views.check, name="check"),
]
| [
"anjumkhan88987@gmail.com"
] | anjumkhan88987@gmail.com |
b58b7a9b88a3b71f95a976dbe41b0191b110d78c | 20fd4e62f4272cd7de3f10dc5e33ec465d7b940e | /manage.py | eb775fafb1b1a0b1c9aecad954855c526867a7df | [] | no_license | EducationNinja/pair-example | dd96687629ed17ef87ca6dc08d9eec6dd7abd297 | 9d662451ed2bfe85be0779ad18326bee6b34e652 | refs/heads/master | 2022-12-13T14:05:35.457400 | 2020-09-06T07:43:24 | 2020-09-06T07:43:24 | 293,227,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pairs.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"hamzamakia@gmail.com"
] | hamzamakia@gmail.com |
1564892a3a1689c821d2bde692026a523dc875e1 | c3145ee041d4d3e0cf26ec260d9409da8e8b160a | /ask_a_librarian/migrations/0007_auto_20160602_1653.py | ebcfba0fbde59265834abb53742638e85bd4f3a5 | [] | no_license | jlundell-bot/library_website | 0b7cab541d3cf69dd97c7c8350e21315e9155798 | 59a5e48adf28ecbc43c7be145f9ec386b1066313 | refs/heads/master | 2021-06-21T14:16:57.542644 | 2017-08-18T15:51:44 | 2017-08-18T19:37:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,532 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-02 16:53
from __future__ import unicode_literals
import base.models
from django.db import migrations
import wagtail.contrib.table_block.blocks
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtaildocs.blocks
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('ask_a_librarian', '0006_auto_20160328_1905'),
]
operations = [
migrations.AlterField(
model_name='askpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title', template='base/blocks/h2.html')), ('h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title', template='base/blocks/h3.html')), ('h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title', template='base/blocks/h4.html')), ('h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title', template='base/blocks/h5.html')), ('h6', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title', template='base/blocks/h6.html')), ('paragraph', wagtail.wagtailcore.blocks.StructBlock((('paragraph', wagtail.wagtailcore.blocks.RichTextBlock()),))), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('title', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('citation', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('caption', wagtail.wagtailcore.blocks.TextBlock(required=False)), ('alt_text', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('alignment', base.models.ImageFormatChoiceBlock()), ('source', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('lightbox', wagtail.wagtailcore.blocks.BooleanBlock(default=False, required=False))), label='Image')), ('blockquote', wagtail.wagtailcore.blocks.StructBlock((('quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), ('attribution', wagtail.wagtailcore.blocks.CharBlock(required=False))))), ('button', wagtail.wagtailcore.blocks.StructBlock((('button_type', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('btn-primary', 'Primary'), ('btn-default', 'Secondary'), ('btn-reserve', 'Reservation')], default='btn-primary')), ('button_text', wagtail.wagtailcore.blocks.CharBlock(max_length=20)), ('link_external', wagtail.wagtailcore.blocks.URLBlock(required=False)), ('link_page', wagtail.wagtailcore.blocks.PageChooserBlock(required=False)), ('link_document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(required=False))))), ('video', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), ('code', wagtail.wagtailcore.blocks.StructBlock((('language', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[('bash', 'Bash/Shell'), ('css', 'CSS'), ('html', 'HTML'), ('javascript', 'Javascript'), ('json', 'JSON'), ('ocaml', 'OCaml'), ('php5', 'PHP'), ('html+php', 'PHP/HTML'), ('python', 'Python'), ('scss', 'SCSS'), ('yaml', 'YAML')])), ('code', wagtail.wagtailcore.blocks.TextBlock())))), ('agenda_item', wagtail.wagtailcore.blocks.StructBlock((('start_time', wagtail.wagtailcore.blocks.TimeBlock(icon='time', required=False)), ('end_time', wagtail.wagtailcore.blocks.TimeBlock(icon='time', required=False)), ('session_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Title of the session. Can be used as title of the talk in some situations.', icon='title', required=False)), ('event', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(help_text='Talk title, workshop title, etc.', required=False)), ('presenters', wagtail.wagtailcore.blocks.CharBlock(help_text='Comma separated list of presenters (if more than one)', required=False)), ('room_number', wagtail.wagtailcore.blocks.CharBlock(required=False)), ('description', wagtail.wagtailcore.blocks.RichTextBlock(required=False)))), help_text='A talk or event with a title, presenter room number, and description', icon='edit', label=' '))), icon='date', template='base/blocks/agenda.html')), ('table', wagtail.contrib.table_block.blocks.TableBlock(table_options={'autoColumnSize': False, 'colHeaders': False, 'contextMenu': True, 'editor': 'text', 'height': 108, 'language': 'en', 'minSpareRows': 0, 'renderer': 'html', 'rowHeaders': False, 'startCols': 3, 'startRows': 3, 'stretchH': 'all'})))),
),
]
| [
"bbusenius@users.noreply.github.com"
] | bbusenius@users.noreply.github.com |
6588ac9881281d906936463f633ae4e4a3fa2047 | aa4024b6a846d2f6032a9b79a89d2e29b67d0e49 | /UMLRT2Kiltera_MM/graph_MT_pre__MetaModelElement_T.py | 4b496aa73b98fca4db94f79526f7d4b9f5446fc4 | [
"MIT"
] | permissive | levilucio/SyVOLT | 41311743d23fdb0b569300df464709c4954b8300 | 0f88827a653f2e9d3bb7b839a5253e74d48379dc | refs/heads/master | 2023-08-11T22:14:01.998341 | 2023-07-21T13:33:36 | 2023-07-21T13:33:36 | 36,246,850 | 3 | 2 | MIT | 2023-07-21T13:33:39 | 2015-05-25T18:15:26 | Python | UTF-8 | Python | false | false | 2,229 | py | """
__graph_MT_pre__MetaModelElement_T.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
________________________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_pre__MetaModelElement_T(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 173, 91
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([209.0, 88.0, 209.0, 88.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([38.0, 38.0, 209.0, 127.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'cyan')
self.gf5 = GraphicalForm(drawing, h, "gf5")
self.graphForms.append(self.gf5)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_pre__MetaModelElement_T
| [
"levi"
] | levi |
d24264f585c670c3ae7b5196462f19a5028873b6 | 3b243fdf3f0c667a497235cba5a6c683c65de528 | /owner/刘群/stocks_with_benchmark.py | fc7afb1d14bdac89087083d138ba896444ce074f | [] | no_license | dxcv/GsEnv | d78902c360669f86dfcb14a4ea197facc0de60ea | ca2cf55b4dbae09a3c85689c8dae104908060c86 | refs/heads/master | 2020-09-25T14:23:36.621085 | 2018-08-30T12:17:04 | 2018-08-30T12:17:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | # <codecell>
def stocks_with_benchmark():
raise Exception("To be implemented")
# <codecell>
| [
"shanzhen@graphstrategist.com"
] | shanzhen@graphstrategist.com |
b454d814b5714c7a47a34188e03b5bb70289c5f9 | c09817490b36beaea98abc8c955904528c5cd4fd | /tests/test_0058-detach-model-objects-from-files.py | 9229de78be7f626e95ef00c107e5a2b832e99960 | [
"BSD-3-Clause"
] | permissive | oshadura/uproot4 | 245b7e14a3341d87a9e655792c6ee912ad443586 | ee535f6632d371d82b5173a43d6445c854968315 | refs/heads/master | 2023-08-19T13:48:23.541016 | 2021-09-22T23:51:52 | 2021-09-22T23:51:52 | 287,539,468 | 0 | 0 | BSD-3-Clause | 2020-08-14T13:29:03 | 2020-08-14T13:29:02 | null | UTF-8 | Python | false | false | 2,524 | py | # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
from __future__ import absolute_import
import copy
import os
import pickle
import sys
import numpy
import pytest
import skhep_testdata
import uproot
def test_detachment():
with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f:
assert getattr(f["hpx"].file, "file_path", None) is not None
assert getattr(f["hpx"].file, "source", None) is None
assert getattr(f["ntuple"].file, "file_path", None) is not None
assert getattr(f["ntuple"].file, "source", None) is not None
with uproot.open(
skhep_testdata.data_path("uproot-small-evnt-tree-nosplit.root")
) as f:
array = f["tree/evt"].array(library="np", entry_stop=1)
assert getattr(array[0].file, "file_path", None) is not None
assert getattr(array[0].file, "source", None) is None
assert isinstance(
f.file.streamer_named("Event").file, uproot.reading.DetachedFile
)
assert (
str(f.file.streamer_named("Event").file_uuid)
== "9eebcae8-366b-11e7-ab9d-5e789e86beef"
)
def test_copy():
with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f:
original = f["hpx"]
original_file_path = original.file.file_path
reconstituted = copy.deepcopy(original)
reconstituted_file_path = reconstituted.file.file_path
assert original_file_path == reconstituted_file_path
def test_pickle():
with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f:
original = f["hpx"]
original_file_path = original.file.file_path
reconstituted = pickle.loads(pickle.dumps(original))
reconstituted_file_path = reconstituted.file.file_path
assert original_file_path == reconstituted_file_path
@pytest.mark.skipif(
sys.version_info < (3, 0),
reason="boost_histogram is wrapped with pybind11, which can't be pickled in Python 2.7.",
)
def test_pickle_boost():
boost_histogram = pytest.importorskip("boost_histogram")
with uproot.open(skhep_testdata.data_path("uproot-hepdata-example.root")) as f:
original = f["hpx"]
original_boost = original.to_boost()
reconstituted = pickle.loads(pickle.dumps(original))
reconstituted_boost = reconstituted.to_boost()
pickle.loads(pickle.dumps(original_boost))
pickle.loads(pickle.dumps(reconstituted_boost))
| [
"noreply@github.com"
] | oshadura.noreply@github.com |
18e2e4cc87d0809f28ab3d12ea626231b49c8132 | b183cb7aef5297d5e34df8f45fe14a253866164c | /src/kawaz/apps/stars/api/__init__.py | 3c0750bebfbdb9792a145160d1e16b02fe075bd9 | [] | no_license | mahesh-zot/Kawaz3rd | 2edf851d3a4bfd2067a031d776fa3b92ce9ca8ed | 7f69aff1ade0be38389c96c28723b2b491bb302c | refs/heads/master | 2020-04-14T01:10:17.304309 | 2015-05-01T14:43:03 | 2015-05-01T14:43:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23 | py | __author__ = 'giginet'
| [
"giginet.net@gmail.com"
] | giginet.net@gmail.com |
f6dab0896b1864866a10f0a2f3fe89e1a8b2b76d | 9184e230f8b212e8f686a466c84ecc89abe375d1 | /histogrammode/docs/DeveloperGuide/txml2xml_html.py | f9b117894a239bb84a1432c1da25bbcd1186e193 | [] | no_license | danse-inelastic/DrChops | 75b793d806e6351dde847f1d92ab6eebb1ef24d2 | 7ba4ce07a5a4645942192b4b81f7afcae505db90 | refs/heads/master | 2022-04-26T17:37:41.666851 | 2015-05-02T23:21:13 | 2015-05-02T23:21:13 | 34,094,584 | 0 | 1 | null | 2020-09-10T01:50:10 | 2015-04-17T03:30:52 | Python | UTF-8 | Python | false | false | 914 | py | # The following dictionary will be used to convert docbook
# xml templates (*.txml)
# to docbook xml source. All keys are map to values.
# There are now two files: web_absolutepath and web_relativepath
# web_absolutepath is for pdf format product. Because pdf cannot contain movies and some other medias, we have to used absolute web links
# web_relativepath is for html tar ball producet. It can contain everything that is documentation in one tar ball, so in most cases relative link is sufficient. Certainly there still are cases where you want to put up absolute links, for example those links to download binary installers, which should not be included in the documentation tar ball..
from webserver import webserver
urldict = {
'xxxWEBSITExxx': "../../..",
'xxxDOWNLOADSxxx': "%s/click_monitor" % webserver,
'xxxARCSBOOKxxx': "../../../ARCSBook/Inelastic_Book/latex/Inelastic_Book.pdf",
}
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
3894ab976831c651854a100add14f3fc3c94768b | a45b5742814cc51c706c707d3b86e4f0a97c864b | /lists/urls.py | 4474f773460fd2ba16f25eafd5e0007017886c98 | [] | no_license | stanislavBozhanov/superlists | 9c6f0628c0eb02f56e6d0eb1b232fac033edcbe9 | 3c4906004a878d00c2912dffed310e098c841043 | refs/heads/master | 2016-09-06T16:56:35.534154 | 2015-02-12T20:55:10 | 2015-02-12T20:55:10 | 29,201,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | from django.conf.urls import patterns, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^(\d+)/$', 'lists.views.view_list', name='view_list'),
url(r'^new$', 'lists.views.new_list', name='new_list'),)
# url(r'^admin/', include(admin.site.urls)),)
| [
"stanislav.bozhanov@gmail.com"
] | stanislav.bozhanov@gmail.com |
79ea5aa3a7c7fb2f51d6962419786b2159e9eff2 | e6bc1f55371786dad70313eb468a3ccf6000edaf | /Datasets/compress-the-string/Correct/093.py | 2787a29b807b15ae3f9657fc91f7ffa7d5c48381 | [] | no_license | prateksha/Source-Code-Similarity-Measurement | 9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0 | fb371b837917794d260a219a1ca09c46a5b15962 | refs/heads/master | 2023-01-04T07:49:25.138827 | 2020-10-25T14:43:57 | 2020-10-25T14:43:57 | 285,744,963 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | import itertools
for i, j in itertools.groupby(input()):
print(tuple((len(list(j)), int(i))), end = ' ') | [
"pratekshau@gmail.com"
] | pratekshau@gmail.com |
9c935b6339889fef28e5bb09557912ee3b39ac4f | c6bfa138886150d219b9086165a845a3542aca32 | /apps/home/serializers.py | cf7356ee02677f6bec7ff1346a70b393d2a87b84 | [] | no_license | pyeye/dub-back | 1192505eab64425d59af880703b9634b35384fd7 | 4079121cc7f825df6feedb4b8fbd842cfe4ec16d | refs/heads/master | 2023-02-28T15:40:04.784157 | 2021-02-11T12:08:59 | 2021-02-11T12:08:59 | 115,289,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | from rest_framework import serializers
from .models import Banner, Advertisement
class BannerSerializer(serializers.ModelSerializer):
class Meta:
model = Banner
fields = ('pk', 'title', 'image', 'url', 'extra')
class AdvertisementSerializer(serializers.ModelSerializer):
class Meta:
model = Advertisement
fields = ('pk', 'title', 'image', 'url', 'extra')
| [
"pyeye.91@gmail.com"
] | pyeye.91@gmail.com |
955e892dde47cf831c34694c926ee24685eeb8e0 | 501615c82801733e69c7447ab9fd68d3883ed947 | /9527/.svn/pristine/95/955e892dde47cf831c34694c926ee24685eeb8e0.svn-base | 9329b5957bacc5793ce39718051eb56e10531980 | [] | no_license | az0ne/python | b2e1cc1e925d1fcdb269e7dd4c48e24665deeeee | aec5d23bb412f7dfca374fb5c5b9988c1b817347 | refs/heads/master | 2021-07-18T02:08:46.314972 | 2017-10-27T06:23:36 | 2017-10-27T06:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,204 | # -*- coding: utf8 -*-
from db.api.apiutils import APIResult
from db.cores.mysqlconn import dec_make_conn_cursor
from utils import tool
from utils.tool import dec_timeit
from utils.logger import logger as log
@dec_make_conn_cursor
@dec_timeit
def get_equation_list(conn, cursor):
"""
获取所有公式
:param conn:
:param cursor:
:return: wiki广告列表
"""
sql = """
SELECT id, equation, description FROM mz_operation_equation
"""
try:
cursor.execute(sql)
result = cursor.fetchall()
log.info("query: %s" % cursor.statement)
except Exception as e:
log.warn(
"execute exception: %s. "
"statement:%s" % (e, cursor.statement))
raise e
return APIResult(result=result)
@dec_make_conn_cursor
@dec_timeit
def add_equation(conn, cursor, equation, description):
"""
新增公式
:param conn:
:param cursor:
:param equation: 公式
:param description: 公式描述
:return: True or False
"""
sql = """
INSERT INTO mz_operation_equation (
equation, description
)
VALUES (%s, %s)
"""
try:
cursor.execute(sql, (equation, description))
e_id = cursor.lastrowid
conn.commit()
log.info("query: %s" % cursor.statement)
except Exception as e:
log.warn(
"execute exception: %s. "
"statement:%s" % (e, cursor.statement))
raise e
return APIResult(result=e_id)
@dec_make_conn_cursor
@dec_timeit
def del_equation(conn, cursor, e_id):
"""
删除公式
:param conn:
:param cursor:
:param e_id: 公式id
:return: True or False
"""
sql = """
DELETE FROM mz_operation_equation WHERE id = %s
"""
try:
cursor.execute(sql, (e_id,))
conn.commit()
log.info("query: %s" % cursor.statement)
except Exception as e:
log.warn(
"execute exception: %s. "
"statement:%s" % (e, cursor.statement))
raise e
return APIResult(result=True)
| [
"1461847795@qq.com"
] | 1461847795@qq.com | |
3229389f779378c837f13abd5093dbfc188cf8fc | d9b53673b899a9b842a42060740b734bf0c63a31 | /leetcode/python/easy/p263_isUgly.py | 5fe4345ff3cf6f18d1402fe8bafc4a61b6c6929c | [
"Apache-2.0"
] | permissive | kefirzhang/algorithms | a8d656774b576295625dd663154d264cd6a6a802 | 549e68731d4c05002e35f0499d4f7744f5c63979 | refs/heads/master | 2021-06-13T13:05:40.851704 | 2021-04-02T07:37:59 | 2021-04-02T07:37:59 | 173,903,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | class Solution:
def __init__(self):
self.helper = {}
def isUgly(self, num):
if self.helper.__contains__(num):
return self.helper[num]
if num <= 0:
return False
if num == 1:
return True
base = [2, 3, 5]
if num % 2 == 0:
if num / 2 in base:
self.helper[num] = True
return True
case2 = self.isUgly(num / 2)
else:
case2 = False
if num % 3 == 0:
if num / 3 in base:
self.helper[num] = True
return True
case3 = self.isUgly(num / 3)
else:
case3 = False
if num % 5 == 0:
if num / 5 in base:
self.helper[num] = True
return True
case5 = self.isUgly(num / 5)
else:
case5 = False
if case2 or case3 or case5:
self.helper[num] = True
return True
else:
return False
slu = Solution()
print(slu.isUgly(2123366400))
| [
"8390671@qq.com"
] | 8390671@qq.com |
08d15901538579db4a4ac16f55acae810550d8ff | 1113c8d5689685106fd77363e5561006d8ecef0d | /confbusterplusplus/utils.py | 06c911d7c30c7936cd8cff9584f2f9012d42e6c6 | [
"MIT"
] | permissive | dsvatunek/ConfBusterPlusPlus | 238f73ab48e6d1d1491cbf4406acf828d76a56f9 | 2de751f409ffdb791d8b04fd4b3d08645beebaa6 | refs/heads/master | 2022-11-09T18:28:26.880541 | 2020-06-24T05:50:35 | 2020-06-24T05:50:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,758 | py | """
MIT License
Copyright (c) 2019 e-dang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
github - https://github.com/e-dang
"""
import json
import os
from itertools import islice
from rdkit import Chem
def window(iterable, window_size):
"""
Recipe taken from: https://docs.python.org/release/2.3.5/lib/itertools-example.html
Returns a sliding window (of width n) over data from the iterable
s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ...
"""
it = iter(iterable)
result = tuple(islice(it, window_size))
if len(result) == window_size:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def write_mol(mol, filepath, conf_id=None):
"""
Writes a RDKit Mol to an sdf file. Can specify a specific conformer on the molecule or all conformers.
Args:
mol (RDKit Mol): The molecule to write to file.
filepath (str): The filepath.
conf_id (int, optional): The conformer id on the molecule to write to file. If None, then writes first
conformer, if -1 then writes all conformers. Defaults to None.
Returns:
bool: True if successful.
"""
if filepath.split('.')[-1] != 'sdf':
print('Error needs to be sdf file')
writer = Chem.SDWriter(filepath)
if conf_id is None:
writer.write(mol)
elif conf_id == -1:
for conf in mol.GetConformers():
writer.write(mol, confId=conf.GetId())
else:
writer.write(mol, confId=conf_id)
writer.close()
return True
def file_rotator(filepath):
"""
Checks if the given file exists, if it does, then continues to append larger and larger numbers to the base file
name until a unique file name is found.
Args:
filepath (str): The desired file name/path.
Returns:
str: The unique file name/path.
"""
idx = 0
while True:
new_fp = attach_file_num(filepath, idx)
idx += 1
if not (os.path.exists(new_fp) and os.path.isfile(new_fp)):
return new_fp
def attach_file_num(filepath, file_num):
"""
Helper function that splits the file path on its extension, appends the given file number to the base file name, and
reassembles the file name and extension.
Args:
filepath (str): The desired file path.
file_num (iunt): The file number to attach to the file path's base file name.
Returns:
str: The file path with the file number appended to the base file name.
"""
path, basename = os.path.split(os.path.abspath(filepath))
new_basename, ext = basename.split('.')
new_basename += '_' + str(file_num) + '.' + ext
return os.path.join(path, new_basename)
def list_embed_params(embed_params):
"""
Creates a dictionary filled with the embedding parameter's attribute names and their respective values.
Args:
embed_params (RDKit EmbedParameters): The embedding parameters.
Returns:
dict: Contains all embedding parameter's attributes and their respective values.
"""
attributes = {}
for name in dir(embed_params):
if '__' not in name: # not a python related attribute
attr = getattr(embed_params, name)
if not callable(attr):
attributes[name] = attr
return attributes
def is_json_serializable(value):
try:
json.dumps(value)
return True
except TypeError:
return False
def terminate(message, code):
"""
Helper function that terminates the process if command line argument validation fails.
Args:
message (str): The error message to print to the terminal.
code (int): The error code to exit with.
"""
print(message)
exit(code)
| [
"edang830@gmail.com"
] | edang830@gmail.com |
b6230fc0f27c8b25a5a30e21ff1adf750f7f2d60 | a560269290749e10466b1a29584f06a2b8385a47 | /Notebooks/py/ellecf/visualizing-titanic-data/visualizing-titanic-data.py | 2a423da53cf48d3cbf021e394da003ed61c61e90 | [] | no_license | nischalshrestha/automatic_wat_discovery | c71befad1aa358ae876d5494a67b0f4aa1266f23 | 982e700d8e4698a501afffd6c3a2f35346c34f95 | refs/heads/master | 2022-04-07T12:40:24.376871 | 2020-03-15T22:27:39 | 2020-03-15T22:27:39 | 208,379,586 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,543 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# In[ ]:
#import data
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
# In[ ]:
#explore the data a little bit
print(train_data.columns.values)
print(train_data.describe())
train_data.head()
# In[ ]:
#find out what the null sitch is
print(train_data.isnull().sum())
# In[ ]:
#Look at the target, how many survivors?
train_data['Survived'].value_counts()
# In[ ]:
train_data['Survived'].astype(int).plot.hist();
# In[ ]:
#let's turn sex into a numerical feature instead of categorical
from sklearn.preprocessing import LabelEncoder
train_data['Sex'] = LabelEncoder().fit_transform(train_data['Sex'])
# In[ ]:
#handling missing values
#print(train_data.isnull().sum())
from sklearn.impute import SimpleImputer
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
train_data['Age'] = imp.fit_transform(train_data['Age'].values.reshape(-1,1)).reshape(-1)
print(train_data.isnull().sum())
# In[ ]:
# Find correlations with the target and sort
correlations = train_data.corr()['Survived'].sort_values()
# Display correlations
print('Correlations: \n', correlations)
# In[ ]:
#let's look at how the variables correlate with each other
allcorr = train_data.corr()
allcorr
# In[ ]:
# Heatmap of correlations
plt.figure(figsize = (8, 6))
sns.heatmap(allcorr, cmap = plt.cm.RdYlBu_r, vmin = -0.25, annot = True, vmax = 0.6)
plt.title('Correlation Heatmap');
# In[ ]:
plt.figure(figsize = (10, 8))
# KDE plot - smoothed histograms showing distribution of a variable for survived/died outcomes
sns.kdeplot(train_data.loc[train_data['Survived'] == 0, 'Age'], label = 'Survived == 0')
sns.kdeplot(train_data.loc[train_data['Survived'] == 1, 'Age'], label = 'Survived == 1')
# Labeling of plot
plt.xlabel('Age (years)'); plt.ylabel('Density'); plt.title('Distribution of Ages');
# In[ ]:
plt.figure(figsize = (10, 8))
# KDE plot - smoothed histograms showing distribution of a variable for survived/died outcomes
sns.kdeplot(train_data.loc[train_data['Survived'] == 0, 'Fare'], label = 'Survived == 0')
sns.kdeplot(train_data.loc[train_data['Survived'] == 1, 'Fare'], label = 'Survived == 1')
# Labeling of plot
plt.xlabel('Fare'); plt.ylabel('Density'); plt.title('Distribution of Fare');
# In[ ]:
plt.subplots(figsize = (15,10))
sns.barplot(x = "Pclass",
y = "Survived",
data=train_data,
linewidth=2)
plt.title("Passenger Class Distribution - Survived vs Non-Survived", fontsize = 25)
plt.xlabel("Socio-Economic class", fontsize = 15);
plt.ylabel("% of Passenger Survived", fontsize = 15);
labels = ['Upper', 'Middle', 'Lower']
#val = sorted(train.Pclass.unique())
val = [0,1,2] ## this is just a temporary trick to get the label right.
plt.xticks(val, labels);
# In[ ]:
| [
"bitsorific@gmail.com"
] | bitsorific@gmail.com |
fdaf162ebeaaede570a86fafefae08a63d204cad | 32868580ddb697d3a9248952d34f2090e05325b5 | /team.py | c479ddca67df9303506f6d3eb165ffe25b57a928 | [] | no_license | DFettes/basketball-sim | e0e7b99c731654c5348e25c8d17dd49f0a3812ed | 6967fc39df7b1ce05705e32fd9d610e5874a7b5c | refs/heads/master | 2021-01-01T18:55:13.894081 | 2015-02-16T20:17:57 | 2015-02-16T20:17:57 | 30,477,659 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,536 | py | import player
class Team():
name = ''
home = None
away = None
players = []
on_floor = []
on_bench = []
points = 0
possesions = 0
wins = 0
losses = 0
season_points = 0
def __init__(self, name, players):
self.name = name
self.players = players
self.starters = players[:5]
self.bench = players[5:]
self.on_floor = list(self.starters)
self.on_bench = list(self.bench)
def team_rebound_chance(self):
# Give team a weighted rebounding score based on its players
rebound_chance = 0.29*self.on_floor[4].defense['rebounding'] + \
0.24*self.on_floor[3].defense['rebounding'] + \
0.19*self.on_floor[2].defense['rebounding'] + \
0.15*self.on_floor[1].defense['rebounding'] + \
0.13*self.on_floor[1].defense['rebounding']
return rebound_chance
def player_rebound_chance(self, rand_reb):
# Calculate who on the team gets the rebound from their weighted stats
totals = []
running_total = 0
weights = [0.13, 0.15, 0.19, 0.24, 0.29]
for p, w in zip(self.on_floor, weights):
weighted_reb = p.defense['rebounding'] * w
running_total += weighted_reb
totals.append(running_total)
rand_reb *= running_total
for i, total in enumerate(totals):
if rand_reb < total:
break
return self.on_floor[i]
| [
"="
] | = |
f7192942ddd054fc9111259ee00647c89cc7ef96 | 7c22ef4ffcae7a870a395c2bf62b209ebf7c1c36 | /lib/nats_bench/api_topology.py | 5f669c77cbee2944de7b1b0c1c4e6aa2e1689b78 | [
"MIT"
] | permissive | gbzhuCherish/AutoDL-Projects | fb41207f2c428af7fd68b6318cc3503fe4c7f9b3 | bd9288f45d4160b81f66a48a3d794c3b445ccedf | refs/heads/master | 2022-12-08T09:19:07.870059 | 2020-09-05T10:40:29 | 2020-09-05T10:40:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,560 | py | #####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2020.08 #
##############################################################################
# NATS-Bench: Benchmarking NAS algorithms for Architecture Topology and Size #
#####################################################################################
# The history of benchmark files (the name is NATS-tss-[version]-[md5].pickle.pbz2) #
# [2020.08.31] #
#####################################################################################
import os, copy, random, numpy as np
from pathlib import Path
from typing import List, Text, Union, Dict, Optional
from collections import OrderedDict, defaultdict
import warnings
from .api_utils import time_string
from .api_utils import pickle_load
from .api_utils import ArchResults
from .api_utils import NASBenchMetaAPI
from .api_utils import remap_dataset_set_names
PICKLE_EXT = 'pickle.pbz2'
ALL_BASE_NAMES = ['NATS-tss-v1_0-xxxxx']
def print_information(information, extra_info=None, show=False):
dataset_names = information.get_dataset_names()
strings = [information.arch_str, 'datasets : {:}, extra-info : {:}'.format(dataset_names, extra_info)]
def metric2str(loss, acc):
return 'loss = {:.3f}, top1 = {:.2f}%'.format(loss, acc)
for ida, dataset in enumerate(dataset_names):
metric = information.get_compute_costs(dataset)
flop, param, latency = metric['flops'], metric['params'], metric['latency']
str1 = '{:14s} FLOP={:6.2f} M, Params={:.3f} MB, latency={:} ms.'.format(dataset, flop, param, '{:.2f}'.format(latency*1000) if latency is not None and latency > 0 else None)
train_info = information.get_metrics(dataset, 'train')
if dataset == 'cifar10-valid':
valid_info = information.get_metrics(dataset, 'x-valid')
str2 = '{:14s} train : [{:}], valid : [{:}]'.format(dataset, metric2str(train_info['loss'], train_info['accuracy']), metric2str(valid_info['loss'], valid_info['accuracy']))
elif dataset == 'cifar10':
test__info = information.get_metrics(dataset, 'ori-test')
str2 = '{:14s} train : [{:}], test : [{:}]'.format(dataset, metric2str(train_info['loss'], train_info['accuracy']), metric2str(test__info['loss'], test__info['accuracy']))
else:
valid_info = information.get_metrics(dataset, 'x-valid')
test__info = information.get_metrics(dataset, 'x-test')
str2 = '{:14s} train : [{:}], valid : [{:}], test : [{:}]'.format(dataset, metric2str(train_info['loss'], train_info['accuracy']), metric2str(valid_info['loss'], valid_info['accuracy']), metric2str(test__info['loss'], test__info['accuracy']))
strings += [str1, str2]
if show: print('\n'.join(strings))
return strings
"""
This is the class for the API of topology search space in NATS-Bench.
"""
class NATStopology(NASBenchMetaAPI):
""" The initialization function that takes the dataset file path (or a dict loaded from that path) as input. """
def __init__(self, file_path_or_dict: Optional[Union[Text, Dict]]=None, fast_mode: bool=False, verbose: bool=True):
self.filename = None
self._search_space_name = 'topology'
self._fast_mode = fast_mode
self._archive_dir = None
self.reset_time()
if file_path_or_dict is None:
file_path_or_dict = os.path.join(os.environ['TORCH_HOME'], ALL_BENCHMARK_FILES[-1])
print ('{:} Try to use the default NATS-Bench (topology) path from {:}.'.format(time_string(), file_path_or_dict))
if isinstance(file_path_or_dict, str) or isinstance(file_path_or_dict, Path):
file_path_or_dict = str(file_path_or_dict)
if verbose:
print('{:} Try to create the NATS-Bench (topology) api from {:}'.format(time_string(), file_path_or_dict))
if not os.path.isfile(file_path_or_dict) and not os.path.isdir(file_path_or_dict):
raise ValueError('{:} is neither a file or a dir.'.format(file_path_or_dict))
self.filename = Path(file_path_or_dict).name
if fast_mode:
if os.path.isfile(file_path_or_dict):
raise ValueError('fast_mode={:} must feed the path for directory : {:}'.format(fast_mode, file_path_or_dict))
else:
self._archive_dir = file_path_or_dict
else:
if os.path.isdir(file_path_or_dict):
raise ValueError('fast_mode={:} must feed the path for file : {:}'.format(fast_mode, file_path_or_dict))
else:
file_path_or_dict = pickle_load(file_path_or_dict)
elif isinstance(file_path_or_dict, dict):
file_path_or_dict = copy.deepcopy(file_path_or_dict)
self.verbose = verbose # [TODO] a flag indicating whether to print more logs
if isinstance(file_path_or_dict, dict):
keys = ('meta_archs', 'arch2infos', 'evaluated_indexes')
for key in keys: assert key in file_path_or_dict, 'Can not find key[{:}] in the dict'.format(key)
self.meta_archs = copy.deepcopy(file_path_or_dict['meta_archs'])
# This is a dict mapping each architecture to a dict, where the key is #epochs and the value is ArchResults
self.arch2infos_dict = OrderedDict()
self._avaliable_hps = set()
for xkey in sorted(list(file_path_or_dict['arch2infos'].keys())):
all_info = file_path_or_dict['arch2infos'][xkey]
hp2archres = OrderedDict()
for hp_key, results in all_infos.items():
hp2archres[hp_key] = ArchResults.create_from_state_dict(results)
self._avaliable_hps.add(hp_key) # save the avaliable hyper-parameter
self.arch2infos_dict[xkey] = hp2archres
self.evaluated_indexes = list(file_path_or_dict['evaluated_indexes'])
elif self.archive_dir is not None:
benchmark_meta = pickle_load('{:}/meta.{:}'.format(self.archive_dir, PICKLE_EXT))
self.meta_archs = copy.deepcopy(benchmark_meta['meta_archs'])
self.arch2infos_dict = OrderedDict()
self._avaliable_hps = set()
self.evaluated_indexes = set()
else:
raise ValueError('file_path_or_dict [{:}] must be a dict or archive_dir must be set'.format(type(file_path_or_dict)))
self.archstr2index = {}
for idx, arch in enumerate(self.meta_archs):
assert arch not in self.archstr2index, 'This [{:}]-th arch {:} already in the dict ({:}).'.format(idx, arch, self.archstr2index[arch])
self.archstr2index[arch] = idx
if self.verbose:
print('{:} Create NATS-Bench (topology) done with {:}/{:} architectures avaliable.'.format(
time_string(), len(self.evaluated_indexes), len(self.meta_archs)))
def reload(self, archive_root: Text = None, index: int = None):
"""Overwrite all information of the 'index'-th architecture in the search space.
It will load its data from 'archive_root'.
"""
if self.verbose:
print('{:} Call clear_params with archive_root={:} and index={:}'.format(
time_string(), archive_root, index))
if archive_root is None:
archive_root = os.path.join(os.environ['TORCH_HOME'], '{:}-full'.format(ALL_BASE_NAMES[-1]))
if not os.path.isdir(archive_root):
warnings.warn('The input archive_root is None and the default archive_root path ({:}) does not exist, try to use self.archive_dir.'.format(archive_root))
archive_root = self.archive_dir
if archive_root is None or not os.path.isdir(archive_root):
raise ValueError('Invalid archive_root : {:}'.format(archive_root))
if index is None:
indexes = list(range(len(self)))
else:
indexes = [index]
for idx in indexes:
assert 0 <= idx < len(self.meta_archs), 'invalid index of {:}'.format(idx)
xfile_path = os.path.join(archive_root, '{:06d}.{:}'.format(idx, PICKLE_EXT))
if not os.path.isfile(xfile_path):
xfile_path = os.path.join(archive_root, '{:d}.{:}'.format(idx, PICKLE_EXT))
assert os.path.isfile(xfile_path), 'invalid data path : {:}'.format(xfile_path)
xdata = pickle_load(xfile_path)
assert isinstance(xdata, dict), 'invalid format of data in {:}'.format(xfile_path)
self.evaluated_indexes.add(idx)
hp2archres = OrderedDict()
for hp_key, results in xdata.items():
hp2archres[hp_key] = ArchResults.create_from_state_dict(results)
self._avaliable_hps.add(hp_key)
self.arch2infos_dict[idx] = hp2archres
def query_info_str_by_arch(self, arch, hp: Text='12'):
""" This function is used to query the information of a specific architecture
'arch' can be an architecture index or an architecture string
When hp=12, the hyper-parameters used to train a model are in 'configs/nas-benchmark/hyper-opts/12E.config'
When hp=200, the hyper-parameters used to train a model are in 'configs/nas-benchmark/hyper-opts/200E.config'
The difference between these three configurations are the number of training epochs.
"""
if self.verbose:
print('{:} Call query_info_str_by_arch with arch={:} and hp={:}'.format(time_string(), arch, hp))
return self._query_info_str_by_arch(arch, hp, print_information)
# obtain the metric for the `index`-th architecture
# `dataset` indicates the dataset:
# 'cifar10-valid' : using the proposed train set of CIFAR-10 as the training set
# 'cifar10' : using the proposed train+valid set of CIFAR-10 as the training set
# 'cifar100' : using the proposed train set of CIFAR-100 as the training set
# 'ImageNet16-120' : using the proposed train set of ImageNet-16-120 as the training set
# `iepoch` indicates the index of training epochs from 0 to 11/199.
# When iepoch=None, it will return the metric for the last training epoch
# When iepoch=11, it will return the metric for the 11-th training epoch (starting from 0)
# `use_12epochs_result` indicates different hyper-parameters for training
# When use_12epochs_result=True, it trains the network with 12 epochs and the LR decayed from 0.1 to 0 within 12 epochs
# When use_12epochs_result=False, it trains the network with 200 epochs and the LR decayed from 0.1 to 0 within 200 epochs
# `is_random`
# When is_random=True, the performance of a random architecture will be returned
# When is_random=False, the performanceo of all trials will be averaged.
def get_more_info(self, index, dataset, iepoch=None, hp='12', is_random=True):
if self.verbose:
print('{:} Call the get_more_info function with index={:}, dataset={:}, iepoch={:}, hp={:}, and is_random={:}.'.format(
time_string(), index, dataset, iepoch, hp, is_random))
index = self.query_index_by_arch(index) # To avoid the input is a string or an instance of a arch object
self._prepare_info(index)
if index not in self.arch2infos_dict:
raise ValueError('Did not find {:} from arch2infos_dict.'.format(index))
archresult = self.arch2infos_dict[index][str(hp)]
# if randomly select one trial, select the seed at first
if isinstance(is_random, bool) and is_random:
seeds = archresult.get_dataset_seeds(dataset)
is_random = random.choice(seeds)
# collect the training information
train_info = archresult.get_metrics(dataset, 'train', iepoch=iepoch, is_random=is_random)
total = train_info['iepoch'] + 1
xinfo = {'train-loss' : train_info['loss'],
'train-accuracy': train_info['accuracy'],
'train-per-time': train_info['all_time'] / total if train_info['all_time'] is not None else None,
'train-all-time': train_info['all_time']}
# collect the evaluation information
if dataset == 'cifar10-valid':
valid_info = archresult.get_metrics(dataset, 'x-valid', iepoch=iepoch, is_random=is_random)
try:
test_info = archresult.get_metrics(dataset, 'ori-test', iepoch=iepoch, is_random=is_random)
except:
test_info = None
valtest_info = None
else:
try: # collect results on the proposed test set
if dataset == 'cifar10':
test_info = archresult.get_metrics(dataset, 'ori-test', iepoch=iepoch, is_random=is_random)
else:
test_info = archresult.get_metrics(dataset, 'x-test', iepoch=iepoch, is_random=is_random)
except:
test_info = None
try: # collect results on the proposed validation set
valid_info = archresult.get_metrics(dataset, 'x-valid', iepoch=iepoch, is_random=is_random)
except:
valid_info = None
try:
if dataset != 'cifar10':
valtest_info = archresult.get_metrics(dataset, 'ori-test', iepoch=iepoch, is_random=is_random)
else:
valtest_info = None
except:
valtest_info = None
if valid_info is not None:
xinfo['valid-loss'] = valid_info['loss']
xinfo['valid-accuracy'] = valid_info['accuracy']
xinfo['valid-per-time'] = valid_info['all_time'] / total if valid_info['all_time'] is not None else None
xinfo['valid-all-time'] = valid_info['all_time']
if test_info is not None:
xinfo['test-loss'] = test_info['loss']
xinfo['test-accuracy'] = test_info['accuracy']
xinfo['test-per-time'] = test_info['all_time'] / total if test_info['all_time'] is not None else None
xinfo['test-all-time'] = test_info['all_time']
if valtest_info is not None:
xinfo['valtest-loss'] = valtest_info['loss']
xinfo['valtest-accuracy'] = valtest_info['accuracy']
xinfo['valtest-per-time'] = valtest_info['all_time'] / total if valtest_info['all_time'] is not None else None
xinfo['valtest-all-time'] = valtest_info['all_time']
return xinfo
def show(self, index: int = -1) -> None:
"""This function will print the information of a specific (or all) architecture(s)."""
self._show(index, print_information)
@staticmethod
def str2lists(arch_str: Text) -> List[tuple]:
"""
This function shows how to read the string-based architecture encoding.
It is the same as the `str2structure` func in `AutoDL-Projects/lib/models/cell_searchs/genotypes.py`
:param
arch_str: the input is a string indicates the architecture topology, such as
|nor_conv_1x1~0|+|none~0|none~1|+|none~0|none~1|skip_connect~2|
:return: a list of tuple, contains multiple (op, input_node_index) pairs.
:usage
arch = api.str2lists( '|nor_conv_1x1~0|+|none~0|none~1|+|none~0|none~1|skip_connect~2|' )
print ('there are {:} nodes in this arch'.format(len(arch)+1)) # arch is a list
for i, node in enumerate(arch):
print('the {:}-th node is the sum of these {:} nodes with op: {:}'.format(i+1, len(node), node))
"""
node_strs = arch_str.split('+')
genotypes = []
for i, node_str in enumerate(node_strs):
inputs = list(filter(lambda x: x != '', node_str.split('|')))
for xinput in inputs: assert len(xinput.split('~')) == 2, 'invalid input length : {:}'.format(xinput)
inputs = ( xi.split('~') for xi in inputs )
input_infos = tuple( (op, int(IDX)) for (op, IDX) in inputs)
genotypes.append( input_infos )
return genotypes
@staticmethod
def str2matrix(arch_str: Text,
search_space: List[Text] = ['none', 'skip_connect', 'nor_conv_1x1', 'nor_conv_3x3', 'avg_pool_3x3']) -> np.ndarray:
"""
This func shows how to convert the string-based architecture encoding to the encoding strategy in NAS-Bench-101.
:param
arch_str: the input is a string indicates the architecture topology, such as
|nor_conv_1x1~0|+|none~0|none~1|+|none~0|none~1|skip_connect~2|
search_space: a list of operation string, the default list is the topology search space for NATS-BENCH.
the default value should be be consistent with this line https://github.com/D-X-Y/AutoDL-Projects/blob/master/lib/models/cell_operations.py#L24
:return
the numpy matrix (2-D np.ndarray) representing the DAG of this architecture topology
:usage
matrix = api.str2matrix( '|nor_conv_1x1~0|+|none~0|none~1|+|none~0|none~1|skip_connect~2|' )
This matrix is 4-by-4 matrix representing a cell with 4 nodes (only the lower left triangle is useful).
[ [0, 0, 0, 0], # the first line represents the input (0-th) node
[2, 0, 0, 0], # the second line represents the 1-st node, is calculated by 2-th-op( 0-th-node )
[0, 0, 0, 0], # the third line represents the 2-nd node, is calculated by 0-th-op( 0-th-node ) + 0-th-op( 1-th-node )
[0, 0, 1, 0] ] # the fourth line represents the 3-rd node, is calculated by 0-th-op( 0-th-node ) + 0-th-op( 1-th-node ) + 1-th-op( 2-th-node )
In the topology search space in NATS-BENCH, 0-th-op is 'none', 1-th-op is 'skip_connect',
2-th-op is 'nor_conv_1x1', 3-th-op is 'nor_conv_3x3', 4-th-op is 'avg_pool_3x3'.
:(NOTE)
If a node has two input-edges from the same node, this function does not work. One edge will be overlapped.
"""
node_strs = arch_str.split('+')
num_nodes = len(node_strs) + 1
matrix = np.zeros((num_nodes, num_nodes))
for i, node_str in enumerate(node_strs):
inputs = list(filter(lambda x: x != '', node_str.split('|')))
for xinput in inputs: assert len(xinput.split('~')) == 2, 'invalid input length : {:}'.format(xinput)
for xi in inputs:
op, idx = xi.split('~')
if op not in search_space: raise ValueError('this op ({:}) is not in {:}'.format(op, search_space))
op_idx, node_idx = search_space.index(op), int(idx)
matrix[i+1, node_idx] = op_idx
return matrix
| [
"280835372@qq.com"
] | 280835372@qq.com |
8481b5c0414c9fae0193163d79b056ffc12d6171 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/132/usersdata/158/41060/submittedfiles/al14.py | 40febf8cf6f9e392b4e3564460cc1cf93bff2615 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | # -*- coding: utf-8 -*-
q=int(input('digite quantidade de pessoas:'))
soma=0
for i in range(1,q+1,1):
a=int(input('digite idade:'))
soma=soma+a
media=soma/q
print(media)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ea7a9906120df3389f9d770640964c92ab508a71 | 48e1ac111f48bf27b03625f81887a8eaef4d505d | /old/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/disks/delete.py | f25effc93aad64441570fff0441854a5c503fad6 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | altock/dev | 74350528ea570925e8fbc584c64939cae86f6ea7 | 90d87b2adb1eab7f218b075886aa620d8d6eeedb | refs/heads/master | 2021-07-10T08:31:48.080736 | 2017-04-15T03:04:12 | 2017-04-15T03:04:12 | 23,088,790 | 0 | 1 | null | 2020-07-25T04:32:05 | 2014-08-18T22:33:25 | Python | UTF-8 | Python | false | false | 668 | py | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for deleting disks."""
from googlecloudsdk.compute.lib import base_classes
class Delete(base_classes.ZonalDeleter):
"""Delete Google Compute Engine disks."""
@property
def service(self):
return self.context['compute'].disks
@property
def resource_type(self):
return 'disks'
Delete.detailed_help = {
'brief': 'Delete Google Compute Engine persistent disks',
'DESCRIPTION': """\
*{command}* deletes one or more Google Compute Engine
persistent disks. Disks can be deleted only if they are not
being used by any virtual machine instances.
""",
}
| [
"sjs382@cornell.edu"
] | sjs382@cornell.edu |
e5731ef0207cc76612873575e5242ec5f23089fb | f5b5a6e3f844d849a05ff56c497638e607f940e0 | /capitulo 10/capitulo 10/10.19 - A Classe Telefone.py | 296d8a4c18cbc78858609e9bcba6515cf9c7a413 | [] | no_license | alexrogeriodj/Caixa-Eletronico-em-Python | 9237fa2f7f8fab5f17b7dd008af215fb0aaed29f | 96b5238437c88e89aed7a7b9c34b303e1e7d61e5 | refs/heads/master | 2020-09-06T21:47:36.169855 | 2019-11-09T00:22:14 | 2019-11-09T00:22:14 | 220,563,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,625 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2017
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/2012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Primeira reimpressão - Segunda edição - Maio/2015
# Segunda reimpressão - Segunda edição - Janeiro/2016
# Terceira reimpressão - Segunda edição - Junho/2016
# Quarta reimpressão - Segunda edição - Março/2017
#
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem\capitulo 10\10.19 - A Classe Telefone.py
##############################################################################
class Telefone:
def __init__(self, número, tipo=None):
self.número = número
self.tipo = tipo
def __str__(self):
if self.tipo!=None:
tipo = self.tipo
else:
tipo = ""
return "{0} {1}".format(self.número, tipo)
def __eq__(self, outro):
return self.número == outro.número and (
(self.tipo == outro.tipo) or (
self.tipo == None or outro.tipo == None))
@property
def número(self):
return self.__número
@número.setter
def número(self, valor):
if valor == None or not valor.strip():
raise ValueError("Número não pode ser None ou em branco")
self.__número = valor
| [
"noreply@github.com"
] | alexrogeriodj.noreply@github.com |
4702d54f37fff3cf72b64a1597ce0c7e5bd714cb | f3b233e5053e28fa95c549017bd75a30456eb50c | /p38a_input/L2H/2H-2J_MD_NVT_rerun/set.py | 5afa80ee2bc0262d78b8bf60e22dc32a6544ec71 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | import os
dir = '/mnt/scratch/songlin3/run/p38a/L2H/MD/ti_one-step/2H_2J/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../2H-2J_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
011202b97c3d525aba76422bc4ac0353c4ff9d47 | 8cfdc0fb2e1e34b5963badacaf4be853134abf48 | /MySQL_CRUD/createtable_users.py | 67133445c8e0f309a62b017d87e9f12773cb86a6 | [] | no_license | turamant/ToolKit | b88e36ce986cc1a25628409c317930245cc260f5 | 343daa8238cc1fd247d7c06fad8e5c4c729dd0f9 | refs/heads/main | 2023-06-30T11:54:14.130454 | 2021-08-04T22:00:37 | 2021-08-04T22:00:37 | 385,747,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | import sys
import MySQLdb
conn = MySQLdb.connect(host="localhost",
user="user1",
passwd="password1",
db="shop")
cursor = conn.cursor()
try:
cursor.execute("""
create table users (id int primary key,
firstName varchar(20),
lastName varchar(30),
password varchar(12))
""")
except MySQLdb.OperationalError:
print("Table 'users' already exists")
sys.exit(1)
cursor.close()
conn.commit()
conn.close()
| [
"tur1amant@gmail.com"
] | tur1amant@gmail.com |
721beb1176947744136d45e3c6de8ce8515fe84c | 420d4cf595fc8f28be0415aec70a4087e157555c | /Fluent_Python/Day35/tuple_unpacking_is_faster.py | 009896d6908736fa11385ba45fdc76243da6281f | [] | no_license | davidbegin/python-in-the-morning | 8cf89e62e7e8a2df5b8f875aae3cc7815545ad61 | aa4a271d1df0ce0a82d776c0955c1f20deb50937 | refs/heads/master | 2020-09-28T09:55:50.723066 | 2020-08-06T01:17:24 | 2020-08-06T01:17:24 | 226,753,142 | 23 | 3 | null | 2020-03-04T06:36:30 | 2019-12-09T00:31:09 | Python | UTF-8 | Python | false | false | 413 | py | import timeit
import array
print('\033c')
print("\n\t\t\033[36;1;6;4mPerf Tests!\033[0m\n\n")
TIMES = 10000000
SETUP = """
two_elem = ("cool_thang", "no nobody cares")
"""
def clock(label, cmd):
res = timeit.repeat(cmd, setup=SETUP, number=TIMES)
print(label, *('{:.4f}'.format(x) for x in res))
clock("[0] : ", "meth_name = two_elem[0]")
clock("tuple_unpack: ", "meth_name, _ = two_elem")
| [
"davidmichaelbe@gmail.com"
] | davidmichaelbe@gmail.com |
677490ba39e51883cc0eb61a5fc89eeeb212e873 | f0c402d3858f0643561886797578b1e64655b1b3 | /utils/regression/tests/smoke_check/test_critical_section.py | 9e48489ec81279eac77e4e572fb958a6b32d4f41 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Leo-Wang-JL/force-riscv | 39ad2a72abd814df4b63879ce9825b6b06a9391a | deee6acaaee092eb90ac2538de122303334e5be3 | refs/heads/master | 2023-01-28T00:06:58.135651 | 2020-11-18T02:54:10 | 2020-11-18T02:54:10 | 271,873,013 | 0 | 0 | NOASSERTION | 2020-06-28T00:51:26 | 2020-06-12T19:15:26 | C++ | UTF-8 | Python | false | false | 1,105 | py | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# test_critical_section.py
from shared.path_utils import PathUtils
from shared.sys_utils import SysUtils
from shared.msg_utils import Msg
from unit_test import UnitTest
from shared.kernel_objs import HiCriticalSection
from shared.threads import HiThread
class UnitTest_HiCriticalSection( UnitTest ):
def run_test( self ):
Msg.info( "HiCriticalSection: Start Unit Test ..." )
def process_result( self ):
Msg.info( "HiCriticalSection: Process Test Result ..." )
| [
"jwang1@futurewei.com"
] | jwang1@futurewei.com |
f02aff8cc71531fdfa7921a01824d76da76292b0 | df30f97d316e899b07b223bc86cfe53345627f06 | /problems/test2/2.py | 62ecd3c40d29a1e559f5c4cedc42424ca0435ac4 | [] | no_license | GH-Lim/AlgorithmPractice | c6a3aa99fa639aa23d685ae14c1754e0605eaa98 | e7b8de2075348fb9fcc34c1d7f211fdea3a4deb0 | refs/heads/master | 2021-06-18T17:21:10.923380 | 2021-04-18T03:43:26 | 2021-04-18T03:43:26 | 199,591,747 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(S):
# write your code in Python 3.6
temp = S.split()
temp = ''.join(temp)
temp = temp.split('-')
temp = ''.join(temp)
ans = ''
len_temp = len(temp)
if len_temp % 3 == 0:
k = len_temp // 3
for i in range(k):
ans += temp[3 * i: 3 * i + 3]
if i != k - 1:
ans += '-'
elif len_temp % 3 == 2:
k = len_temp // 3
for i in range(k):
ans += temp[3 * i: 3 * i + 3]
ans += '-'
ans += temp[-2:]
else:
k = len_temp // 3 - 1
for i in range(k):
ans += temp[3 * i: 3 * i + 3]
ans += '-'
ans += temp[-4:-2]
ans += '-'
ans += temp[-2:]
return ans | [
"gunhyuck11@gmail.com"
] | gunhyuck11@gmail.com |
d16f78aa40e2a936776a27b16e5f84c4959b9d0d | d7d010a85125676b82df0fb5b010fdcc0d4c48f8 | /continuum_normalise.py | 0bd29339d263559631aefb1e92c1ad2e75689f6b | [] | no_license | conradtchan/slomp | 1b9b09bccf18cfccb62cac3b8a1880ff108051f1 | c8f6b2a424bfd3913418538c016cf05649222701 | refs/heads/master | 2022-03-03T01:09:41.123493 | 2018-04-11T13:38:47 | 2018-04-11T13:38:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py |
"""
An example script to pseudo-continuum-normalise a single LAMOST spectrum and
save the result to disk.
"""
import matplotlib.pyplot as plt
import pickle
import lamost
# When loading spectra, let's resample them onto a common wavelength scale.
# This makes it easier for any data-driven model or classifier.
with open("common_vac_wavelengths.pkl", "rb") as fp:
common_dispersion = pickle.load(fp)
# wget http://dr3.lamost.org/sas/fits/B5591606/spec-55916-B5591606_sp03-051.fits.gz
# gunzip spec-55916-B5591606_sp03-051.fits.gz
input_path = "spec-55916-B5591606_sp03-051.fits"
dispersion, flux, ivar, meta = lamost.read_dr3_spectrum(input_path,
common_dispersion=common_dispersion)
norm_flux, norm_ivar = lamost.continuum_normalize(dispersion, flux, ivar)
fig, ax = plt.subplots(2)
ax[0].plot(dispersion, flux, c="k")
ax[1].plot(dispersion, norm_flux, c="k")
output_path = "{}.pkl".format(input_path[:-5])
with open(output_path, "wb") as fp:
# We don't save the dispersion array because it is already stored in
# common_vac_wavelengths.pkl
pickle.dump((norm_flux, norm_ivar, meta), fp)
| [
"andycasey@gmail.com"
] | andycasey@gmail.com |
b8fe8a23762651dd6136f569c4efcf35503ee3f6 | ed269e9a4d9d6bfbb833381b7aef65a23f391fe2 | /数据结构 以及部分基础模板算法/树/树demo.py | 301e62346dba2c25ee64ffa1202ee2fc4952b4c3 | [] | no_license | Comyn-Echo/leeCode | fcff0d4c4c10209a47bd7c3204e3f64565674c91 | 67e9daecb7ffd8f7bcb2f120ad892498b1219327 | refs/heads/master | 2023-04-28T17:35:52.963069 | 2021-05-19T01:52:16 | 2021-05-19T01:52:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | class node():
def __init__(self):
self.parent =None
self.chrldren = {} #不重复的子节点, 而且索引效率高
self.val = None
def show(self, deep=0):
print(deep * '--' , end=' ')
print(self.val)
for i in self.chrldren:
child = self.chrldren[i]
child.show(deep +2)
# 前, 中, 后 序遍历树, 这里是前序
def travel(self):
print(self.val)
for i in self.chrldren:
child = self.chrldren[i]
child.travel()
#插入节点, 用层级关系, val作为纽带
def __insert(self, List, position=1):
#到了最后一个节点
if position == len(List):
return
now = List[position]
# 已经存在就继续递进
if now in self.chrldren:
self.chrldren[now].__insert(List, position +1)
# 不存在,先创建,再继续
elif now not in self.chrldren:
tmp = node()
tmp.val = now
tmp.parent = self
self.chrldren[now] = tmp
self.chrldren[now].__insert(List, position +1)
def insert(self, List):
#root 存在值
if self.val:
if self.val == List[0]:
self.__insert(List, position=1)
else:
print('根节点对不上')
else:
self.val = List[0]
self.insert(List)
def delete(self, List):
pass
if __name__ == '__main__':
tree = node()
import random
for i in range(20):
a = [random.randint(0,10) for i in range(5)]
tree.insert(a)
tree.show() | [
"2892211452aa@gmail.com"
] | 2892211452aa@gmail.com |
124e2989af1e5ededf8a9f2db1e26439c135c9e8 | 5a01497e7c29e2488b6a4cb0478405239375eb66 | /apetools/affectors/elexol/elexol.py | 47a5238818a131ea557b0397c81579fbe58ea635 | [
"Apache-2.0"
] | permissive | russell-n/oldape | 8b4d9e996181dc1c7175f72d75c6193443da591b | b4d1c77e1d611fe2b30768b42bdc7493afb0ea95 | refs/heads/master | 2021-05-30T20:02:18.895922 | 2016-03-27T04:38:18 | 2016-03-27T04:38:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,509 | py |
# python standard library
import socket
class elexol24(object):
"""
A class for UDP socket communication with the Elexol24 Ethernet I/O board.
The class must be instantiated using the IP address as an argument, for example::
a = elexol.elexol24("192.168.20.68")
The UDP port is set at 2424 (the Elexol default). After connecting with the device all pins are set to output mode and cleared (set to zero).
The most useful methods are probably ``setpin24()``, ``setxpin24()``, and ``getpin24()``, as they don't require any messing with the ports on the Elexol board.
"""
UDP_PORT = 2424
HOST = ''
def __init__(self, IP, clear = True, retry=5, ports="ABC", pins=8):
"""
Constructor - Establishes communication with the Elexol24 and by default sets all pins to output mode.
:param:
- 'IP' : a string containing the IP address of the Elexol24. Connection is made on port 2424.
- `ports`: The Identifier for the serial port on the elexol
- `pins`: The number of pins per port
"""
self.closed = False
self.IP = IP
self.MAX_RETRY = retry
self.ports = ports
self.pins = pins
self.clear = clear
self._socket = None
return
@property
def socket(self):
"""
:return: UPD socket
"""
if self._socket is None:
try:
# set up socket
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# bind to port
self._socket.bind((self.HOST, self.UDP_PORT))
self._socket.settimeout(0.5)
# set port directions to output by default
for port in self.ports:
self.setportdirection(port, 0)
# set all output to zero
if self.clear:
self.clearall()
except socket.error:
print "elexol24: ", socket.error
return self._socket
def tryrecv(self, message):
try_num = 0
msg = None
while (try_num < self.MAX_RETRY):
self.trysend(message)
try_num = try_num + 1
try:
msg = self.socket.recv(2)
break
except socket.timeout:
print "elexol24.getport: socket timeout"
except socket.error:
print "elexol24.getport: ", socket.error
return msg
def trysend(self, message):
try_num = 0
while (try_num < self.MAX_RETRY):
try_num = try_num + 1
try:
self.socket.sendto(message, (self.IP, self.UDP_PORT))
break
except socket.timeout:
print "elexol24.trysend: socket timeout"
except socket.error:
print "elexol24.trysend: ", socket.error
# set port direction
def setportdirection(self, port, direction):
"""
Set the desired I/O pin direction for a particular Elexol24 port
:param:
- 'port' : A string containing either 'A', 'B', or 'C' to indicate the desired port.
- 'direction' : A byte containing binary pin direction settings for that particular port, where binary 0 is input and binary 1 is output. ex: setting direction to 255 will set all pins to '1' and thus set the entire port to input.
"""
self.trysend("!" + port + chr(direction))
# set entire port at once
def setport(self, port, value):
"""
Writes values to a single port
:param:
- 'port' : A string containing either 'A', 'B', or 'C' to indicate desired port.
- 'value' : A byte containing binary values to write to the port. ex: 255 will write all 1's to the port.
"""
self.trysend(port.upper()+chr(value))
# get port value
def getport(self, port):
"""
Gets current values on a particular port.
:param:
- 'port' : A string containing either 'A', 'B', or 'C'.
:rtype: Byte
:return: A byte containing the current status of the desired port.
"""
#self.trysend(port.lower())
msg = self.tryrecv(port.lower())
# return the value from the device
return ord(msg[1])
# clear all values of individual port
def clearport(self, port):
"""
Clears all values on given port
:param:
- 'port' : A string containing either 'A', 'B', or 'C'.
"""
self.setport(port, 0)
# clear entire device
def clearall(self):
"""
Clears all pins on the device.
"""
for port in self.ports:
self.clearport(port)
return
# set individual pin value, keeping existing values (logical OR)
def setpin(self, port, pin):
"""
Sets an individual pin value, but leaves all other pins untouched.
:param:
- 'port' : A string containing either 'A', 'B', or 'C'.
- 'pin' : A zero-indexed value specifying the desired pin. Valid range is 0-7.
:raise: AssertionError if pin is out of range.
"""
assert(pin < 8 and pin >= 0), "elexol24.setpin: pin out of range"
# get existing port status and OR the result
a = self.getport(port)
b = a | (1 << pin)
# write it back to device
self.trysend(port.upper()+chr(b))
# set individual pin value, with all else zero
def setxpin(self, port, pin):
"""
Sets an individual pin on a specified port, and clears the rest.
:param:
- 'port' : A string containing either 'A', 'B', or 'C'.
- 'pin' : A zero-indexed value specifying the desired pin. Valid range is 0-7.
"""
self.trysend(port.upper()+chr(1 << pin))
# get status of individual pin
def getpin(self, port, pin):
"""
Gets the status of an individual pin on a specified port.
:param:
- 'port' : A string containing either 'A', 'B', or 'C'.
- 'pin' : A zero-indexed value specifying the desired pin. Valid range is 0-7.
:rtype: Boolean
:return: The value of the specified pin.
"""
# get current port value and AND with desired bit
assert(pin < 8 and pin >= 0), "elexol24.getpin: pin out of range"
a = self.getport(port)
return bool(a & (1 << pin))
# clear one individual pin, keeping existing values
def clearpin(self, port, pin):
"""
Clears an individual pin on a specified port.
:param:
- 'port' : A string containing either 'A', 'B', or 'C'.
- 'pin' : A zero-indexed value specifying the desired pin. Valid range is 0-7.
"""
assert(pin < 8 and pin >= 0), "elexol24.clearpin: pin out of range"
# get port status, AND with inverted pin value
a = self.getport(port)
b = a & ~(1 << pin)
# write back to device
self.trysend(port.upper()+chr(b))
# set one pin in entire 24 pin bank
def setpin24(self, pin):
"""
Sets an individual pin across the entire device, and leaves all other untouched.
:param:
- 'pin' : A zero-indexed value specifying the desired pin. Valid range is 0-23
"""
assert (pin < 24 and pin >= 0), "elexol24.setpin24: Pin input outside of range."
# determine port
port, pin = self.ports[pin/self.pins], pin % self.pins
# perform operation
self.setpin(port, pin)
return
# set one pin in entire 24 pin bank, keeping all else zero
def setxpin24(self, pin):
"""
Sets an individual pin across the entire device. All other pins are cleared.
:param:
- `pin` : A zero-indexed value specifying the desired pin. Valid range is 0-23.
:raise: AssertionError if pin out of range.
"""
assert (pin < 24 and pin >= 0), "elexol24.setxpin24: Pin input outside of range."
# determine port
port, pin = self.ports[pin/self.pins], pin % self.pins
# perform operation
self.clearall()
self.setpin(port, pin)
return
def getpin24(self, pin):
"""
Gets the value of an individual pin across the entire device.
:param:
- 'pin' : A zero-indexed value specifying the desired pin. Valid range is 0-23.
:rtype: Boolean
:return: The value of the specified pin.
:raise: AssertionError if ping out of range
"""
assert (pin < 24 and pin >= 0), "elexol24.getpin24: Pin input outside of range."
# determine port
port, pin = self.ports[pin/self.pins], pin % self.pins
# perform operation
return(self.getpin(port, pin))
# clear one individual pin in entire 24 pin bank, keeping existing values
def clearpin24(self, pin):
"""
Clears an individual pin across the entire device, leaving all other untouched.
:param:
- `pin` : A zero-indexed value specifying the desired pin. Valid range is 0-23.
:raise: AssertionError if `pin` out of range.
"""
assert (pin < 24 and pin >= 0), "elexol24.clearpin24: Pin input outside of range."
# determine port
port, pin = self.ports[pin/self.pins], pin % self.pins
# perform operation
self.clearpin(port, pin)
return
# cleanup
def close(self):
"""
Closes open sockets. Automatically called by destructor.
"""
if not self.closed:
self.socket.close()
# destructor
def __del__(self):
"""
Calls self.close
"""
self.close()
# end elexol24
| [
"necromuralist@google.com"
] | necromuralist@google.com |
f8a69439eed2eab8cca7d5ee80baa35e53e002f6 | 1b8a99a4ff80da51dc81dd8354bf9bf1cbd25a8b | /2020/valid_mountain_array.py | e1546a90581fddd1841ef411d2951d5ec4afa217 | [] | no_license | eronekogin/leetcode | ea639eebe0cd70af9eb4cba59bc68f636d7b3e0c | edb870f83f0c4568cce0cacec04ee70cf6b545bf | refs/heads/master | 2023-08-16T10:35:57.164176 | 2023-08-14T11:25:33 | 2023-08-14T11:25:33 | 163,679,450 | 0 | 0 | null | 2021-09-09T12:04:44 | 2018-12-31T15:33:06 | Python | UTF-8 | Python | false | false | 506 | py | """
https://leetcode.com/problems/valid-mountain-array/
"""
from typing import List
class Solution:
def validMountainArray(self, arr: List[int]) -> bool:
i, n = 1, len(arr)
if n < 3:
return False
while i < n and arr[i] > arr[i - 1]:
i += 1
if i == 1 or i == n: # All dereasing or increasing.
return False
while i < n and arr[i] < arr[i - 1]:
i += 1
return i == n # Fall to the end from the peak.
| [
"“mengyu.jiang@gmail.com"
] | “mengyu.jiang@gmail.com |
30ea5fff596183df2745dfe231fffcae166b7a08 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1482494_0/Python/ciccino/round1a2.py | 713bdda24c7036d8fcc7b1102ab87344b94bbb0a | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | import re
def Ryan(n, stars):
times = 0
star = 0
while len(stars) > 0:
keys = stars.keys()
onestar = 0
find = 0
for i in range(len(keys)):
level = stars[keys[i]]
#we can't do a 2-star, try to remember possible 1-star
if level[1] > star:
if star >= level[0] and level[2] == 0:
if onestar == 0 or level[1] > stars[onestar][1]:
onestar = keys[i]
else: #do 2-star
times = times + 1
if level[2] == 0:
#print str(star) + "+ 2 @" + str(keys[i]) + "[2star]"
star = star + 2
else:
#print str(star) + "+ 1 @" + str(keys[i]) + "[1star]"
star = star + 1
del stars[keys[i]]
find = 1
#try 1-star
if find == 0:
if (onestar == 0):
return 0
level = stars[onestar]
#print str(star) + "+ 1 @" + str(onestar) + "[1star]"
star = star + 1
level[2] = 1
times = times + 1
return times
def round1a1(filepath):
f = open(filepath, 'r+')
infile = re.split('in', filepath)
outfile = infile[0] + "out"
print outfile
o = open(outfile, "w+")
#number of test cases
t = int(f.readline())
for i in range(t):
n = int(f.readline())
stars = {}
for j in range(n):
line = f.readline()
sl = re.split(" ", line)
stars[j + 1] = [int(sl[0]), int(sl[1]), 0]
print str(n) + str(stars)
result = Ryan(n, stars)
res = ""
if result == 0:
res = "Too Bad"
else:
res = str(result)
#result = recycledNumbers(sl[0], sl[1], len(sl[0]))
buf = "Case #" + str(i + 1) + ": " + res
i = i + 1
print buf
o.write(buf + '\n')
f.close()
o.close()
round1a1("./B-small-attempt1.in")
#round1a1("./B-test.in") | [
"eewestman@gmail.com"
] | eewestman@gmail.com |
a90d3e76a9136ee5850a8e7e50dc97be3fbca97d | 9ffa2c1d9472c0d686433a353764d03da2159205 | /tests/test_utils.py | d0c47ad37e9bdf82bc5c56e1ec57a453e56ac24c | [
"MIT"
] | permissive | zagaran/instant-census | 7c1a0ab0ff282ebc56dd3a35d18a3ab444da1bfb | 62dd5bbc62939f43776a10708ef663722ead98af | refs/heads/master | 2023-05-08T00:21:24.426828 | 2021-05-31T18:19:02 | 2021-05-31T18:19:02 | 372,590,104 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | from datetime import datetime, timedelta
from tests.common import InstantCensusTestCase
from utils.time import days_in_question_period
class TestUtils(InstantCensusTestCase):
def test_days_in_question_period(self):
every_day = [0, 1, 2, 3, 4, 5, 6]
sunday = [0]
monday = [1]
for i in range(7):
# June 7, 2015 was a Sunday
start_time = datetime(2015, 6, 7) + timedelta(days=i)
days = days_in_question_period(start_time, every_day)
# isoweekday has Monday == 1 ... Sunday == 7
self.assertEqual(days, 1, "day of week %s gave %s against every_day"
% (start_time.isoweekday(), days))
for i in range(7):
# June 7, 2015 was a Sunday
start_time = datetime(2015, 6, 7) + timedelta(days=i)
days = days_in_question_period(start_time, sunday)
# isoweekday has Monday == 1 ... Sunday == 7
self.assertEqual(days, 7 - i, "day of week %s gave %s against sunday"
% (start_time.isoweekday(), days))
for i in range(7):
# June 8, 2015 was a Monday
start_time = datetime(2015, 6, 8) + timedelta(days=i)
days = days_in_question_period(start_time, monday)
# isoweekday has Monday == 1 ... Sunday == 7
self.assertEqual(days, 7 - i, "day of week %s gave %s against monday"
% (start_time.isoweekday(), days))
| [
"iamzags@gmail.com"
] | iamzags@gmail.com |
2dd25eba1dfe1ff5bb8e185add4283af8c09f5b7 | 371235a5c6636020fd9a103c732de8294c66c5de | /case sensitive game.py | 2f2af6c999d4f3090f8cecaeee0d9729e08304a7 | [] | no_license | Akmalhakimteo/10.009-The-Digital-World | 11bf13bc07c73ad36d260656b565cc0955a9217a | c037f6656a0eeb6e50d17c90164c590107a53087 | refs/heads/master | 2020-12-11T11:49:46.205201 | 2020-01-14T13:01:15 | 2020-01-14T13:01:15 | 233,841,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 17 13:20:49 2019
@author: akmal
"""
print("are there CAPS in your word?")
user_ans=input("type in your input here please\n")
list_caps=[]
for char in user_ans:
if char.isupper()==True:
list_caps.append(char)
print("{},{}".format(char.islower(),list_caps))
#print({},{}.format(user_input.islower(),list_caps)
#
#
#if user_ans.islower()==True:
# print("all is in lowercase")
#else:
# print("At least one letter is not in lowercase")
#
| [
"akmal_hakim_teo@hotmail.com"
] | akmal_hakim_teo@hotmail.com |
4995611ea905c0253bd4353bd4cb319c74ba4144 | 503313e19bfed3f842391f1c2854b7198bb5d09c | /PycharmProjects/puppy_interface/venv/bin/pip2.7 | 52a115ca027261c7cfb199c0ef1583df8eae45d3 | [] | no_license | alay3168/XGTestProjects | 264e84aab33f968a704f533577799617175c619b | 01bd4ed3015b28284043cccab54902bd58ce24f8 | refs/heads/master | 2022-11-02T11:11:04.625750 | 2020-10-12T05:04:49 | 2020-10-12T05:04:49 | 250,506,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | 7 | #!/Users/suzie/PycharmProjects/puppy_interface/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"xushaohua@puppyrobot.com"
] | xushaohua@puppyrobot.com |
0bc9a61e4524f7747997d1b0023fa09bf3c7d9f3 | 9848a719ddfdd21b5fe1fa2f55da290c0f6952dc | /unique-paths-2.py | f301c97817b27d3d04efaed880fc888e4e9a44e5 | [] | no_license | maomao905/algo | 725f7fe27bb13e08049693765e4814b98fb0065a | 84b35ec9a4e4319b29eb5f0f226543c9f3f47630 | refs/heads/master | 2023-03-13T12:38:55.401373 | 2021-03-25T01:55:48 | 2021-03-25T01:55:48 | 351,278,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | """
dynamic programming
current path = dp[i-1] + dp[j-1]
if there is a block in i-1 or j-1, i-1 or j-1 path would be zero
time: O(MN)
space: O(1)
"""
from typing import List
class Solution:
def uniquePathsWithObstacles(self, grid: List[List[int]]) -> int:
for row in range(len(grid)):
for col in range(len(grid[0])):
# block, thus it should stay zero
if grid[row][col] == 1:
grid[row][col] = 0
continue
if row == 0 and col == 0:
grid[row][col] = 1
continue
# add path from previous row
if row > 0:
grid[row][col] += grid[row-1][col]
# add path from previous column
if col > 0:
grid[row][col] += grid[row][col-1]
return grid[-1][-1]
s = Solution()
print(s.uniquePathsWithObstacles([[0,0,0],[0,1,0],[0,0,0]]))
print(s.uniquePathsWithObstacles([[0,1],[0,0]]))
print(s.uniquePathsWithObstacles([[0,0],[0,1]]))
| [
"maoya.sato@gmail.com"
] | maoya.sato@gmail.com |
ebc751ef7abdd04bdbca483941cc28084b496671 | cd9cb38fdc0be20d0b02c554537048f2c71333b6 | /fuzzy_search/__init__.py | 77b6484713f45e150ddf6c458df5f6681fd2bf7b | [
"MIT"
] | permissive | marijnkoolen/fuzzy-search | 15a09cc3bf9249175af2494903c1189b0f0f6608 | 1ac61e558f16b5a35918f55ac1f65857c740601e | refs/heads/master | 2023-08-03T08:37:15.752423 | 2023-07-20T13:23:01 | 2023-07-20T13:23:01 | 218,385,563 | 18 | 1 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | __version__ = '2.0.1a'
from fuzzy_search.search.config import default_config
from fuzzy_search.search.phrase_searcher import FuzzyPhraseSearcher
from fuzzy_search.search.token_searcher import FuzzyTokenSearcher
from fuzzy_search.match.phrase_match import PhraseMatch
from fuzzy_search.phrase.phrase_model import PhraseModel
def make_searcher(phrases: any, config):
phrase_model = PhraseModel(phrases, config)
searcher = FuzzyPhraseSearcher(phrase_model=phrase_model, config=config)
return searcher
| [
"marijn.koolen@gmail.com"
] | marijn.koolen@gmail.com |
e0b4c42b2802188a2cdb0e5fa651d16088cf5d88 | aa13423fa47e405a4b8884fe125b99ef7f0111dc | /backend/service/common/common_service.py | 3b44ad38e701e1cf654486a9d3c9e976ec464163 | [] | no_license | imtos/loonflow | b2f04d2fa5890c0a2a09d34b8c63af2bee38230b | 5f0953c497736b2376757978782b13fb0ca76305 | refs/heads/master | 2020-03-07T19:56:37.527777 | 2018-02-08T09:53:58 | 2018-02-08T09:53:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from rest_framework.views import exception_handler
from service.base_service import BaseService
class CommonService(BaseService):
def __init__(self):
pass
| [
"blackholll@163.com"
] | blackholll@163.com |
494db8147046d97eeae5ef128b6145f4abd174c5 | a9a07bd14e0568c8aa95cc43dc1961c2cceba9bf | /src/util/convert_fasta.py | e0cb667728ce13f9344c8721a40c9e7b5a3b5532 | [] | no_license | daishu-li/AMP_Benchmark | 4781e3b44f0fa16550661aeb9f8bcfc0e5f5aace | 14abf5bb715c1e0ecd8099beac2a1e92d6c72330 | refs/heads/master | 2023-01-12T10:38:10.227157 | 2020-11-15T23:57:19 | 2020-11-15T23:57:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | '''
AMP FAST Formater
Author: Yuya Jeremy Ong (yjo5006@psu.edu)
'''
from __future__ import print_function
# Application Parameters
DATA_DIR = '../../data/proc/'
INPUT_DIR = DATA_DIR + 'data3.csv'
OUTPUT_DIR = '../../data/fasta/data3_merges.fasta.txt'
def read_csv(dir, ignore_header=True):
st = 1 if ignore_header else 0
data = open(dir, 'r').read().split('\n')[st:-1]
return [[d.split(',')[0], d.split(',')[2]] for d in data]
if __name__ == '__main__':
# Read CSV File
data = read_csv(INPUT_DIR)
# FASTA File Generate Output
out = open(OUTPUT_DIR, 'w')
for d in data:
out.write('>' + d[0] + '\n')
out.write(d[1] + '\n')
out.close()
print('Output File: ' + OUTPUT_DIR)
| [
"yuyajeremyong@gmail.com"
] | yuyajeremyong@gmail.com |
72c3f2d8497a0d60d2c0f14e957489a4618e4be4 | 4851d160a423b4a65e81a75d5b4de5218de958ee | /Are You Playing Banjo.py | 433730f43ecfc4a458d8b013baa223ac9e5ed536 | [] | no_license | LarisaOvchinnikova/python_codewars | 519508e5626303dcead5ecb839c6d9b53cb3c764 | 5399f4be17e4972e61be74831703a82ce9badffd | refs/heads/master | 2023-05-05T14:52:02.100435 | 2021-05-25T18:36:51 | 2021-05-25T18:36:51 | 319,399,343 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | # https://www.codewars.com/kata/53af2b8861023f1d88000832
def areYouPlayingBanjo(name):
if name[0] == "r" or name[0] == "R" :
return f"{name} plays banjo"
else:
return F"{name} does not play banjo" | [
"larisaplantation@gmail.com"
] | larisaplantation@gmail.com |
0e05766e147bc75fb98146c9f60ba7e258833825 | c34759a07cee20cdfe067247753f9951d6de77ff | /course/models.py | 3f43ba4570b105d629c6736e73cfec263824bb8c | [] | no_license | Shadyaobuya/PythonWeb | 6e52be7819489de7ae508c92aea4bea4917db828 | 451e193b4000627d3fccc0966be684307d75ca18 | refs/heads/master | 2023-08-02T12:04:02.661068 | 2021-09-30T08:31:39 | 2021-09-30T08:31:39 | 380,287,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | from django.db import models
from django.db.models.deletion import CASCADE, SET_NULL
# Create your models here.
class Course(models.Model):
course_name=models.CharField(max_length=40,null=True)
course_code=models.CharField(max_length=20,null=True)
trainer=models.CharField(max_length=30,null=True)
description=models.TextField(null=True)
class_name=models.CharField(max_length=20,null=True)
def __str__(self):
return self.course_name
def check_course_name(self):
return self.course_name
def check_trainer(self):
return self.trainer
class CourseSyllabus(models.Model):
course=models.OneToOneField(Course,on_delete=SET_NULL,null=True)
topic=models.TextField(null=True)
def __str__(self):
return self.topic
| [
"shadyaobuyagard@gmail.com"
] | shadyaobuyagard@gmail.com |
c10648c141dd42e67722581ebc455b6b420f711c | 93a43800b64c70ecf7069600b9d5fc83a726343e | /Examples/SecurityOperations/UpdateDocumentPassword.py | 7f56bb2b3983d043278cd546bcc5dca26fef5e7a | [
"MIT"
] | permissive | groupdocs-merger-cloud/groupdocs-merger-cloud-python-samples | 2837afeac5cefb966cdafe4e5ec8c4ca8f8ea216 | dc4dffe01b98d68d469cbacab490894a024b79a7 | refs/heads/master | 2023-02-19T13:40:20.721383 | 2023-02-08T06:48:04 | 2023-02-08T06:48:04 | 225,944,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | # Import modules
import groupdocs_merger_cloud
from Common import Common
# This example demonstrates how to update document password
class UpdateDocumentPassword:
@classmethod
def Run(cls):
securityApi = groupdocs_merger_cloud.SecurityApi.from_config(Common.GetConfig())
options = groupdocs_merger_cloud.UpdatePasswordOptions()
options.file_info = groupdocs_merger_cloud.FileInfo("WordProcessing/password-protected.docx", None, None, "password")
options.output_path = "Output/update-password.docx"
options.new_password = "NewPassword"
result = securityApi.update_password(groupdocs_merger_cloud.UpdatePasswordRequest(options))
print("Output file path = " + result.path) | [
"sergei.terentev@aspose.com"
] | sergei.terentev@aspose.com |
42d80bfb12daf74531e901d7dc9273f0f5ef4652 | a9a8931d6877d6e0f4f11cbd7b50322819e0fe45 | /hpc/WERCS-GN_10.py | c1916730a56247df1ccc78130d0c00d7a980df02 | [] | no_license | jafetgado/tomerdesign | 8517f9f8266bcf1db64fdf00d12294f682cd412d | a0d0961a11d7d84be5343d374198ab0f5084c2b3 | refs/heads/master | 2022-05-31T18:15:55.045419 | 2020-04-25T05:49:32 | 2020-04-25T05:49:32 | 258,499,679 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,389 | py | """
Template script for hyperparameter tuning with HPC
Evaluates the performance of a strategy for a single
set of hyperparameter combinations)
"""
# Imports
#============#
import numpy as np
import pandas as pd
import joblib
import itertools
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
import resreg
import warnings
warnings.filterwarnings("ignore")
# Get dataset and features
#==============================#
aalist = list('ACDEFGHIKLMNPQRSTVWY')
def getAAC(seq):
aac = np.array([seq.count(x) for x in aalist])/len(seq)
return aac
data = pd.read_excel('sequence_ogt_topt.xlsx', index_col=0)
aac = np.array([getAAC(seq) for seq in data['sequence']])
ogt = data['ogt'].values.reshape((data.shape[0],1))
X = np.append(aac, ogt, axis=1)
sc = StandardScaler()
X = sc.fit_transform(X)
y = data['topt'].values
# Strategies and hyperparameters
#======================================#
# Hyperparameter range
cl_vals = [25.0, 30.0, None]
ch_vals = [72.2, 60.0]
ks = [5, 10, 15]
deltas = [0.1, 0.5, 1.0]
overs = [0.5, 0.75]
unders = [0.5, 0.75]
sizes = [300, 600]
sample_methods = ['balance', 'extreme', 'average']
size_methods = ['balance', 'variation']
all_params = {}
# Hyperparameter combinations (grid search)
all_params['RO'] = list(itertools.product(cl_vals, ch_vals, sample_methods))
all_params['SMOTER'] = list(itertools.product(cl_vals, ch_vals, sample_methods, ks))
all_params['GN'] = list(itertools.product(cl_vals, ch_vals, sample_methods, deltas))
all_params['WERCS'] = list(itertools.product(cl_vals, ch_vals, overs, unders))
all_params['WERCS-GN'] = list(itertools.product(cl_vals, ch_vals, overs, unders, deltas))
all_params['REBAGG-RO'] = list(itertools.product(cl_vals, ch_vals, size_methods,
sizes))
all_params['REBAGG-SMOTER'] = list(itertools.product(cl_vals, ch_vals, size_methods,
sizes, ks))
all_params['REBAGG-GN'] = list(itertools.product(cl_vals, ch_vals, size_methods,
sizes, deltas))
all_params['REBAGG-WERCS'] = list(itertools.product(cl_vals, ch_vals, sizes, overs,
unders))
all_params['REBAGG-WERCS-GN'] = list(itertools.product(cl_vals, ch_vals, sizes, overs,
unders, deltas))
strategies = list(all_params.keys())
# Evaluate performance for a single strategy and hyperparameter combination
#===========================================================================#
bins = [30, 50, 65, 85] # For splitting target values into bins
m = 100 # Number of regressors in REBAGG ensemble
# Specify strategy and param (instead of a lengthy for loop of combinations)
strategy = 'WERCS-GN' # Replace WERCS-GN for this calculation
params = all_params[strategy]
param = params[10] # Replace 10 for this calculation
# Implement calculation for only specified strategy and param
r2_store, mse_store, mcc_store, f1_store = [], [], [], [] # Empty lists for storing results
mse_bins_store = []
# Monte Carlo cross validation (MCCV) loop
for rrr in range(50):
# Resample validation set (uniform distribution)
train_indices, test_indices = resreg.uniform_test_split(X, y, bins=bins,
bin_test_size=70, verbose=False,
random_state=rrr)
X_train, y_train = X[train_indices,:], y[train_indices]
X_test, y_test = X[test_indices,:], y[test_indices]
# Unpack hyperparameters, resample training data, and fit regressors
reg = DecisionTreeRegressor(random_state=rrr) if 'REBAGG' in strategy else \
RandomForestRegressor(n_estimators=10, n_jobs=-1, random_state=rrr)
if strategy=='RO':
cl, ch, sample_method = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
X_train, y_train = resreg.random_oversample(X_train, y_train, relevance,
relevance_threshold=0.5, over=sample_method,
random_state=rrr)
reg.fit(X_train, y_train)
elif strategy=='SMOTER':
cl, ch, sample_method, k = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
X_train, y_train = resreg.smoter(X_train, y_train, relevance,
relevance_threshold=0.5, k=k, over=sample_method,
random_state=rrr)
reg.fit(X_train, y_train)
elif strategy=='GN':
cl, ch, sample_method, delta = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
X_train, y_train = resreg.gaussian_noise(X_train, y_train, relevance,
relevance_threshold=0.5, delta=delta, over=sample_method,
random_state=rrr)
reg.fit(X_train, y_train)
elif strategy=='WERCS':
cl, ch, over, under = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
X_train, y_train = resreg.wercs(X_train, y_train, relevance, over=over,
under=under, noise=False, random_state=rrr)
reg.fit(X_train, y_train)
elif strategy=='WERCS-GN':
cl, ch, over, under, delta = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
X_train, y_train = resreg.wercs(X_train, y_train, relevance, over=over,
under=under, noise=True, delta=delta, random_state=rrr)
reg.fit(X_train, y_train)
elif strategy=='REBAGG-RO':
cl, ch, size_method, s = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
rebagg = resreg.Rebagg(m=m, s=s, base_reg=reg)
rebagg.fit(X_train, y_train, relevance, relevance_threshold=0.5,
sample_method='random_oversample', size_method=size_method,
random_state=rrr)
elif strategy=='REBAGG-SMOTER':
cl, ch, size_method, s, k = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
rebagg = resreg.Rebagg(m=m, s=s, base_reg=reg)
rebagg.fit(X_train, y_train, relevance, relevance_threshold=0.5,
sample_method='smoter', size_method=size_method, k=k,
random_state=rrr)
elif strategy=='REBAGG-GN':
cl, ch, size_method, s, delta = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
rebagg = resreg.Rebagg(m=m, s=s, base_reg=reg)
rebagg.fit(X_train, y_train, relevance, relevance_threshold=0.5,
sample_method='gaussian', size_method=size_method, delta=delta,
random_state=rrr)
elif strategy=='REBAGG-WERCS':
cl, ch, s, over, under = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
rebagg = resreg.Rebagg(m=m, s=s, base_reg=reg)
rebagg.fit(X_train, y_train, relevance=relevance, sample_method='wercs',
over=over, under=under, random_state=rrr)
elif strategy=='REBAGG-WERCS-GN':
cl, ch, s, over, under, delta = param
relevance = resreg.sigmoid_relevance(y_train, cl=cl, ch=ch)
rebagg = resreg.Rebagg(m=m, s=s, base_reg=reg)
rebagg.fit(X_train, y_train, relevance=relevance, sample_method='wercs-gn',
over=over, under=under, delta=delta, random_state=rrr)
# Validate fitted regressors on uniform validation set
if 'REBAGG' in strategy:
y_pred = rebagg.predict(X_test)
else:
y_pred = reg.predict(X_test)
# Evaluate regressor performance on validation set
r2 = r2_score(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
mcc = resreg.matthews_corrcoef(y_test, y_pred, bins)
relevance_true = resreg.sigmoid_relevance(y_test, cl=None, ch=65)
relevance_pred = resreg.sigmoid_relevance(y_pred, cl=None, ch=65)
f1 = resreg.f1_score(y_test, y_pred, error_threshold=5,
relevance_true=relevance_true, relevance_pred=relevance_pred,
relevance_threshold=0.5, k=1e4)
mse_bins = resreg.bin_performance(y_test, y_pred, bins, metric='MSE')
# Store performance results
r2_store.append(r2)
mse_store.append(mse)
mcc_store.append(mcc)
f1_store.append(f1)
mse_bins_store.append(mse_bins)
# Performance statistics
r2_mean, r2_std = np.mean(r2_store), np.std(r2_store)
mse_mean, mse_std = np.mean(mse_store), np.std(mse_store)
f1_mean, f1_std = np.mean(f1_store), np.std(f1_store)
mcc_mean, mcc_std = np.mean(mcc_store), np.std(mcc_store)
mse_bins_store = pd.DataFrame(mse_bins_store)
mse_bins_mean, mse_bins_std = np.mean(mse_bins_store, axis=0), np.std(mse_bins_store, axis=0)
# Combine all performance data and write to excel spreadsheet
means = [r2_mean, mse_mean, f1_mean, mcc_mean] + list(mse_bins_mean)
stds = [r2_std, mse_std, f1_std, mcc_std] + list(mse_bins_std)
store = [param] + means + stds
# Save performance results as a binary file (to be read and analyzed later)
joblib.dump(store, f'hpc/joblib_files/{strategy}_{10}.pkl')
| [
"japhethgado@gmail.com"
] | japhethgado@gmail.com |
58b86a62d88299e881d276fb2e91397a3746ba21 | 59b3f3e3c082bf0891e8a117251607dac71c7e9c | /dockit/tests/serializers/common.py | 5d6ecc9d49b6c25711ef94d27ed3c6c6403344cd | [
"BSD-3-Clause"
] | permissive | cuker/django-dockit | 55a42af69b4dd41e941fe07ebc70a7a0826bd253 | 406734280ca6b55f66b73b3b4ec5e97ba58f045d | refs/heads/master | 2021-01-17T22:53:38.324005 | 2013-07-18T09:40:35 | 2013-07-18T09:40:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | from dockit import schema
from django.contrib.contenttypes.models import ContentType
class ChildDocument(schema.Document):
charfield = schema.CharField()
def create_natural_key(self):
return {'charfield': self.charfield}
class ChildSchema(schema.Schema):
ct = schema.ModelReferenceField(ContentType)
class ParentDocument(schema.Document):
title = schema.CharField()
subdocument = schema.ReferenceField(ChildDocument)
subschema = schema.SchemaField(ChildSchema)
| [
"jasonk@cukerinteractive.com"
] | jasonk@cukerinteractive.com |
6f8b773c73ffac7563cbe3b92544e43378c79726 | f216c56154fbc00b089aec6e7d74ecd94b1feab2 | /frontendadmin/views.py | 64b18c150e6f29e521d4a59dcb8311988b3264bf | [
"BSD-3-Clause"
] | permissive | yupengyan/django-frontendadmin | f53f7d47a93f06a3577c9234966ec0c8e24fe040 | 53d8ed1fdcd1ef466fb9c8b38ccb2abb77978b1e | refs/heads/master | 2021-01-19T19:59:38.509503 | 2009-10-30T03:05:03 | 2009-11-02T12:52:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,021 | py | # -*- coding: utf-8 -*-
from django.contrib.admin import site
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.db.models import get_model
from django.forms.models import modelform_factory
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.template.loader import get_template
from django.template import TemplateDoesNotExist
from django.utils.translation import ugettext
from django.views.decorators.cache import never_cache
from django.utils.importlib import import_module
from django.conf import settings
from django.forms import CharField
from forms import DeleteRequestForm, FrontendAdminModelForm
EXCLUDES = getattr(settings, 'FRONTEND_EXCLUDES', {})
FIELDS = getattr(settings, 'FRONTEND_FIELDS', {})
FORMS = getattr(settings, 'FRONTEND_FORMS', {})
def import_function(s):
"""
Import a function given the string formatted as
`module_name.function_name` (eg `django.utils.text.capfirst`)
"""
a = s.split('.')
j = lambda x: '.'.join(x)
return getattr(import_module(j(a[:-1])), a[-1])
def check_permission(request, mode_name, app_label, model_name):
'''
Check for proper permissions. mode_name may be either add, change or delete.
'''
p = '%s.%s_%s' % (app_label, mode_name, model_name)
return request.user.is_active and request.user.has_perm(p)
def _get_instance(request, mode_name, app_label, model_name, instance_id=None,
form=None,
form_fields=None,
form_exclude=None):
'''
Returns the model and an instance_form for the given arguments. If an primary
key (instance_id) is given, it will return also the instance.
If the user has no permission to add, change or delete the object, a
HttpResponse is returned.
'''
# Check for permission to add/change/delete this object
if not check_permission(request, mode_name, app_label, model_name):
return HttpResponseForbidden('You have no permission to do this!')
try:
model = get_model(app_label, model_name)
# Model does not exist
except AttributeError:
return HttpResponseForbidden('This model does not exist!')
label = '%s.%s' % (app_label, model_name)
# get form for model
if label in FORMS and not form:
form = import_function(FORMS[label])
elif model in site._registry and not form:
form = site._registry[model].form
elif form is None:
form = FrontendAdminModelForm
if label in EXCLUDES:
form_exclude = EXCLUDES[label]
if label in FIELDS:
form_fields = FIELDS[label]
instance_form = modelform_factory(model, form=form,
fields=form_fields, exclude=form_exclude)
# if instance_id is set, grab this model object
if instance_id:
instance = model.objects.get(pk=instance_id)
return model, instance_form, instance
return model, instance_form
def _handle_cancel(request, instance=None):
'''
Handles clicks on the 'Cancel' button in forms. Returns a redirect to the
last page, the user came from. If not given, to the detail-view of
the object. Last fallback is a redirect to the common success page.
'''
if request.POST.get('_cancel', False):
if request.GET.get('next', False):
return HttpResponseRedirect(request.GET.get('next'))
if instance and hasattr(instance, 'get_absolute_url'):
return HttpResponseRedirect(instance.get_absolute_url())
return HttpResponseRedirect(reverse('frontendadmin_success'))
return None
def _handle_response(request, instance=None):
'''
Handles redirects for completet form actions. Returns a redirect to the
last page, the user came from. If not given, to the detail-view of
the object. Last fallback is a redirect to the common success page.
'''
if 'next' in request.REQUEST:
return HttpResponseRedirect(request.REQUEST['next'])
if instance and hasattr(instance, 'get_absolute_url'):
return HttpResponseRedirect(instance.get_absolute_url())
return HttpResponseRedirect(reverse('frontendadmin_success'))
def _find_template(template_name, app_label=None, model_name=None):
"""
Finds a template_name for the given, optional ``app_label`` . ``model_name``
"""
if app_label is None and model_name is None:
return 'frontendadmin/%s' % template_name
try:
name = 'frontendadmin/%s_%s_%s' % (app_label, model_name, template_name)
get_template(name)
return name
except TemplateDoesNotExist:
return 'frontendadmin/%s' % template_name
def _get_template(request, app_label=None, model_name=None):
'''
Returns wether the ajax or the normal (full blown) template.
'''
return _find_template(request.is_ajax() and 'form_ajax.html' or 'form.html',
app_label, model_name)
@never_cache
@login_required
def add(request, app_label, model_name, mode_name='add',
form_fields=None,
form_exclude=None):
# Get model, instance_form and instance for arguments
instance_return = _get_instance(request, mode_name, app_label, model_name,
form_fields=form_fields,
form_exclude=form_exclude)
if isinstance(instance_return, HttpResponseForbidden):
return instance_return
model, instance_form = instance_return
# Handle cancel request
cancel = _handle_cancel(request)
if cancel:
return cancel
if request.method == 'POST':
form = instance_form(request.POST, request.FILES)
if form.is_valid():
instance = form.save()
# Give the user a nice message
request.user.message_set.create(
message=ugettext(u'Your %(model_name)s was added successfully' % \
{'model_name': model._meta.verbose_name}))
# Return to last page
if request.is_ajax():
return success(request)
return _handle_repsonse(request, instance)
else:
form = instance_form()
template_context = {
'action': 'add',
'action_url': request.build_absolute_uri(),
'model_title': model._meta.verbose_name,
'form': form
}
return render_to_response(
_get_template(request, app_label, model_name),
template_context,
RequestContext(request)
)
@never_cache
@login_required
def change(request, app_label, model_name, instance_id, mode_name='change',
form_fields=None,
form_exclude=None):
# Get model, instance_form and instance for arguments
instance_return = _get_instance(request, mode_name, app_label, model_name,
instance_id,
form_fields=form_fields,
form_exclude=form_exclude)
if isinstance(instance_return, HttpResponseForbidden):
return instance_return
model, instance_form, instance = instance_return
# Handle cancel request
cancel = _handle_cancel(request)
if cancel:
return cancel
if request.method == 'POST':
form = instance_form(request.POST, request.FILES, instance=instance)
if form.is_valid():
instance = form.save()
# Give the user a nice message
request.user.message_set.create(
message=ugettext(u'Your %(model_name)s was changed successfully' % \
{'model_name': model._meta.verbose_name}))
# Return to success page
if request.is_ajax():
return success(request)
return _handle_response(request, instance)
else:
form = instance_form(instance=instance)
template_context = {
'action': 'change',
'action_url': request.build_absolute_uri(),
'model_title': model._meta.verbose_name,
'form': form,
}
return render_to_response(
_get_template(request, app_label, model_name),
template_context,
RequestContext(request)
)
@never_cache
@login_required
def delete(request, app_label, model_name, instance_id,
delete_form=DeleteRequestForm):
# Get model, instance_form and instance for arguments
instance_return = _get_instance(request, model_name, app_label, model_name, instance_id)
if isinstance(instance_return, HttpResponseForbidden):
return instance_return
model, instance_form, instance = instance_return
# Handle cancel request
cancel = _handle_cancel(request)
if cancel:
return cancel
if request.method == 'POST':
form = delete_form(request.POST)
if form.is_valid():
instance.delete()
# Give the user a nice message
request.user.message_set.create(
message=ugettext(u'Your %(model_name)s was deleted.' % \
{'model_name': model._meta.verbose_name}))
# Return to last page
if request.is_ajax():
return success_delete(request)
return _handle_response(request, instance)
else:
form = delete_form()
template_context = {
'action': 'delete',
'action_url': request.build_absolute_uri(),
'model_title': model._meta.verbose_name,
'form': form,
}
return render_to_response(
_get_template(request, None, None),
template_context,
RequestContext(request)
)
def success(request, template_name='success.html', template_ajax='success_ajax.html'):
'''
First, a view would redirect to the last page the user came from. If
this is not available (because somebody fiddled in the url), we redirect
to this common success page.
Normally a user should never see this page.
'''
template = _find_template(request.is_ajax() and template_ajax or template_name)
return render_to_response(template, {}, RequestContext(request))
def success_delete(request, template_name='success_delete.html', template_ajax='success_delete_ajax.html'):
'''
Normally a view would redirect to the last page. After delete from a object
in a detail-view, there is no "last page" so we redirect to a unique, shiny
success-page.
'''
template = _find_template(request.is_ajax() and template_ajax or template_name)
return render_to_response(template, {}, RequestContext(request))
| [
"martin@mahner.org"
] | martin@mahner.org |
e98a870a3708af61cf12893b76db95dce8ce711a | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2016_09_01_polycrystal_FIP/main_plt.py | 881fb484e611cde890483c3bf1796684fce491c5 | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | # import plot_correlation as pltcorr
import plot_explained_variance as pev
import plot_pc_map_3d as pltmap3d
import plot_pc_map as pltmap
import plot_dendrogram as pd
import plot_err_v_pc as pevp
import plot_linkage_check as plc
import plot_evd as pe
import plot_evd_predicted as pep
from constants import const
import matplotlib.pyplot as plt
C = const()
names = C['names']
sid = C['sid_split']
ns = C['ns_split']
# Hvec = [6, 15, 41, 90]
Hvec = [6]
H = 6
deg = 2
# """Plot an autocorrelation"""
# sn = 0
# iA = 1
# iB = 1
# pltcorr.pltcorr(ns_cal[0], sid_cal[0], sn, iA, iB)
"""Plot the percentage explained variance"""
pev.variance([.5, 15, 40, 105], Hvec)
"""Plot the microstructures in PC space"""
pcA = 0
pcB = 1
pcC = 2
pltmap.pltmap(H, pcA, pcB)
pltmap3d.pltmap(H, pcA, pcB, pcC)
"""Plot a dendrogram"""
pd.pltdend(ns, sid, H)
"""Plot the errors versus number of PCs and polynomial order"""
emax = 100
pevp.plterr('mu', emax, deg, ['meanerr'], Hvec)
pevp.plterr('mu', emax, deg, ['LOOCV'], Hvec)
pevp.plterr('sigma', emax, deg, ['meanerr'], Hvec)
pevp.plterr('sigma', emax, deg, ['LOOCV'], Hvec)
"""Plot the predicted versus actual values of the property of interest"""
indx1 = plc.plot_check('mu', n_pc=2, n_poly=3, H=H, erv=10)
indx2 = plc.plot_check('sigma', n_pc=2, n_poly=3, H=H, erv=10)
"""Plot the FIP EVDs versus the predicted FIP EVDs"""
pe.pltevd(H)
pep.pltevd(indx1, indx2, H)
plt.show()
| [
"noahhpaulson@gmail.com"
] | noahhpaulson@gmail.com |
abb33774e1c174d956e5753d33f6f6a895bfc3c5 | ff4d26332da8b4d31689a68c97c06eca19cc4260 | /projectEuler/webScraping/problemTemplates/135.py | 644ba12b6d7d882d9abaf7cf859a982632096169 | [] | no_license | nickfang/classes | cf1b64686fb34909f6ffface0f669fa88256d20c | 6869deaa5a24782c5a69c7aa41875faf2553e013 | refs/heads/master | 2023-01-04T00:43:31.351247 | 2019-12-30T21:04:12 | 2019-12-30T21:04:12 | 100,035,808 | 0 | 0 | null | 2023-01-03T20:59:30 | 2017-08-11T13:41:17 | HTML | UTF-8 | Python | false | false | 548 | py | # Same differences
#
#Given the positive integers, x, y, and z, are consecutive terms of an arithmetic progression, the least value of the positive integer, n, for which the equation, x^2 − y2 − z2 = n, has exactly two solutions is n = 27:
#34^2 − 272 − 202 = 122 − 92 − 62 = 27
#It turns out that n = 1155 is the least value which has exactly ten solutions.
#How many values of n less than one million have exactly ten distinct solutions?
#
import time
startTime = time.time()
print('Elapsed time: ' + str(time.time()-startTime)) | [
"fang.nicholas@gmail.com"
] | fang.nicholas@gmail.com |
525d43ca59cc5097c97503cf5b04764728628052 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/Quote18/HQ_18_135.py | 7dd1a834e9b79a4fe2ce51b8c71d9f8035bfa820 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import time
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
class HQ_18_135(xtp_test_case):
def subOrderBook(self, Api, stk_info, case_name, rs_expect):
print Api.GetApiVersion()
def on_order_book(data, error, last):
self.print_msg(case_name, rs_expect, error)
Api.setSubOrderBookHandle(on_order_book)
Api.SubscribeOrderBook(stk_info)
time.sleep(1)
def print_msg(self, case_name, rs_expect, error):
if rs_expect == error:
logger.warning('{0}测试正确!'.format(case_name))
else:
logger.error('{0}测试错误!'.format(case_name))
self.assertEqual(error, rs_expect)
def test_HQ_18_135(self):
pyname = 'HQ_18_135'
client_id = 6
Api = XTPQuoteApi(client_id)
Api.Login()
stk_info = {'ticker': '', 'exchange_id': 2}
self.subOrderBook(Api, stk_info, pyname,
{'error_id': 11200003, 'error_msg': 'unknown security'}) # 4
Api.Logout()
if __name__=='__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
7c6354037f0da0a887c94e3f57e7aa0cf8dbb064 | c7d3c8f2667b73e68878253a95d034fd7f1f0583 | /env/Lib/site-packages/tests/unit/gapic/dialogflowcx_v3/test_pages.py | eb3f34131dbc6181409743510ebc9d01381cbd3b | [] | no_license | jeevana28/ivrchatbot | e57e9b94b2b6c201e79d27036eca2e6c1f5deb56 | fe5d281ebf774f46861b8f8eaea0494baf115f67 | refs/heads/master | 2023-06-07T01:20:40.547119 | 2021-07-06T15:47:15 | 2021-07-06T15:47:15 | 361,155,397 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81,586 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google import auth
from google.api_core import client_options
from google.api_core import exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflowcx_v3.services.pages import PagesAsyncClient
from google.cloud.dialogflowcx_v3.services.pages import PagesClient
from google.cloud.dialogflowcx_v3.services.pages import pagers
from google.cloud.dialogflowcx_v3.services.pages import transports
from google.cloud.dialogflowcx_v3.types import fulfillment
from google.cloud.dialogflowcx_v3.types import page
from google.cloud.dialogflowcx_v3.types import page as gcdc_page
from google.cloud.dialogflowcx_v3.types import response_message
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import struct_pb2 as struct # type: ignore
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert PagesClient._get_default_mtls_endpoint(None) is None
assert PagesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
PagesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
)
assert (
PagesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
PagesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert PagesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [PagesClient, PagesAsyncClient,])
def test_pages_client_from_service_account_info(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize("client_class", [PagesClient, PagesAsyncClient,])
def test_pages_client_from_service_account_file(client_class):
creds = credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_pages_client_get_transport_class():
transport = PagesClient.get_transport_class()
available_transports = [
transports.PagesGrpcTransport,
]
assert transport in available_transports
transport = PagesClient.get_transport_class("grpc")
assert transport == transports.PagesGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(PagesClient, transports.PagesGrpcTransport, "grpc"),
(PagesAsyncClient, transports.PagesGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
PagesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PagesClient)
)
@mock.patch.object(
PagesAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PagesAsyncClient)
)
def test_pages_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(PagesClient, "get_transport_class") as gtc:
transport = transport_class(credentials=credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(PagesClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(PagesClient, transports.PagesGrpcTransport, "grpc", "true"),
(
PagesAsyncClient,
transports.PagesGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(PagesClient, transports.PagesGrpcTransport, "grpc", "false"),
(
PagesAsyncClient,
transports.PagesGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
PagesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PagesClient)
)
@mock.patch.object(
PagesAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PagesAsyncClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_pages_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(PagesClient, transports.PagesGrpcTransport, "grpc"),
(PagesAsyncClient, transports.PagesGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_pages_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(PagesClient, transports.PagesGrpcTransport, "grpc"),
(PagesAsyncClient, transports.PagesGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_pages_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_pages_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflowcx_v3.services.pages.transports.PagesGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = PagesClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_list_pages(transport: str = "grpc", request_type=page.ListPagesRequest):
client = PagesClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = page.ListPagesResponse(
next_page_token="next_page_token_value",
)
response = client.list_pages(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == page.ListPagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPagesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_pages_from_dict():
test_list_pages(request_type=dict)
def test_list_pages_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PagesClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
client.list_pages()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == page.ListPagesRequest()
@pytest.mark.asyncio
async def test_list_pages_async(
transport: str = "grpc_asyncio", request_type=page.ListPagesRequest
):
client = PagesAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
page.ListPagesResponse(next_page_token="next_page_token_value",)
)
response = await client.list_pages(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == page.ListPagesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPagesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_pages_async_from_dict():
await test_list_pages_async(request_type=dict)
def test_list_pages_field_headers():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = page.ListPagesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
call.return_value = page.ListPagesResponse()
client.list_pages(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_pages_field_headers_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = page.ListPagesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
page.ListPagesResponse()
)
await client.list_pages(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_pages_flattened():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = page.ListPagesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_pages(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_pages_flattened_error():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_pages(
page.ListPagesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_pages_flattened_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = page.ListPagesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
page.ListPagesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_pages(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_pages_flattened_error_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_pages(
page.ListPagesRequest(), parent="parent_value",
)
def test_list_pages_pager():
client = PagesClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
page.ListPagesResponse(
pages=[page.Page(), page.Page(), page.Page(),], next_page_token="abc",
),
page.ListPagesResponse(pages=[], next_page_token="def",),
page.ListPagesResponse(pages=[page.Page(),], next_page_token="ghi",),
page.ListPagesResponse(pages=[page.Page(), page.Page(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_pages(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, page.Page) for i in results)
def test_list_pages_pages():
client = PagesClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_pages), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
page.ListPagesResponse(
pages=[page.Page(), page.Page(), page.Page(),], next_page_token="abc",
),
page.ListPagesResponse(pages=[], next_page_token="def",),
page.ListPagesResponse(pages=[page.Page(),], next_page_token="ghi",),
page.ListPagesResponse(pages=[page.Page(), page.Page(),],),
RuntimeError,
)
pages = list(client.list_pages(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_pages_async_pager():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pages), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
page.ListPagesResponse(
pages=[page.Page(), page.Page(), page.Page(),], next_page_token="abc",
),
page.ListPagesResponse(pages=[], next_page_token="def",),
page.ListPagesResponse(pages=[page.Page(),], next_page_token="ghi",),
page.ListPagesResponse(pages=[page.Page(), page.Page(),],),
RuntimeError,
)
async_pager = await client.list_pages(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, page.Page) for i in responses)
@pytest.mark.asyncio
async def test_list_pages_async_pages():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_pages), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
page.ListPagesResponse(
pages=[page.Page(), page.Page(), page.Page(),], next_page_token="abc",
),
page.ListPagesResponse(pages=[], next_page_token="def",),
page.ListPagesResponse(pages=[page.Page(),], next_page_token="ghi",),
page.ListPagesResponse(pages=[page.Page(), page.Page(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_pages(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_get_page(transport: str = "grpc", request_type=page.GetPageRequest):
client = PagesClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = page.Page(
name="name_value",
display_name="display_name_value",
transition_route_groups=["transition_route_groups_value"],
)
response = client.get_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == page.GetPageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, page.Page)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.transition_route_groups == ["transition_route_groups_value"]
def test_get_page_from_dict():
test_get_page(request_type=dict)
def test_get_page_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PagesClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
client.get_page()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == page.GetPageRequest()
@pytest.mark.asyncio
async def test_get_page_async(
transport: str = "grpc_asyncio", request_type=page.GetPageRequest
):
client = PagesAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
page.Page(
name="name_value",
display_name="display_name_value",
transition_route_groups=["transition_route_groups_value"],
)
)
response = await client.get_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == page.GetPageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, page.Page)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.transition_route_groups == ["transition_route_groups_value"]
@pytest.mark.asyncio
async def test_get_page_async_from_dict():
await test_get_page_async(request_type=dict)
def test_get_page_field_headers():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = page.GetPageRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
call.return_value = page.Page()
client.get_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_page_field_headers_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = page.GetPageRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(page.Page())
await client.get_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_page_flattened():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = page.Page()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_page(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_page_flattened_error():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_page(
page.GetPageRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_page_flattened_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = page.Page()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(page.Page())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_page(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_page_flattened_error_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_page(
page.GetPageRequest(), name="name_value",
)
def test_create_page(transport: str = "grpc", request_type=gcdc_page.CreatePageRequest):
client = PagesClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_page.Page(
name="name_value",
display_name="display_name_value",
transition_route_groups=["transition_route_groups_value"],
)
response = client.create_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_page.CreatePageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_page.Page)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.transition_route_groups == ["transition_route_groups_value"]
def test_create_page_from_dict():
test_create_page(request_type=dict)
def test_create_page_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PagesClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
client.create_page()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_page.CreatePageRequest()
@pytest.mark.asyncio
async def test_create_page_async(
transport: str = "grpc_asyncio", request_type=gcdc_page.CreatePageRequest
):
client = PagesAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_page.Page(
name="name_value",
display_name="display_name_value",
transition_route_groups=["transition_route_groups_value"],
)
)
response = await client.create_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_page.CreatePageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_page.Page)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.transition_route_groups == ["transition_route_groups_value"]
@pytest.mark.asyncio
async def test_create_page_async_from_dict():
await test_create_page_async(request_type=dict)
def test_create_page_field_headers():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_page.CreatePageRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
call.return_value = gcdc_page.Page()
client.create_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_page_field_headers_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_page.CreatePageRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_page.Page())
await client.create_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_page_flattened():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_page.Page()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_page(
parent="parent_value", page=gcdc_page.Page(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].page == gcdc_page.Page(name="name_value")
def test_create_page_flattened_error():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_page(
gcdc_page.CreatePageRequest(),
parent="parent_value",
page=gcdc_page.Page(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_page_flattened_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_page.Page()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_page.Page())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_page(
parent="parent_value", page=gcdc_page.Page(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].page == gcdc_page.Page(name="name_value")
@pytest.mark.asyncio
async def test_create_page_flattened_error_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_page(
gcdc_page.CreatePageRequest(),
parent="parent_value",
page=gcdc_page.Page(name="name_value"),
)
def test_update_page(transport: str = "grpc", request_type=gcdc_page.UpdatePageRequest):
client = PagesClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_page.Page(
name="name_value",
display_name="display_name_value",
transition_route_groups=["transition_route_groups_value"],
)
response = client.update_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_page.UpdatePageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_page.Page)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.transition_route_groups == ["transition_route_groups_value"]
def test_update_page_from_dict():
test_update_page(request_type=dict)
def test_update_page_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PagesClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
client.update_page()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_page.UpdatePageRequest()
@pytest.mark.asyncio
async def test_update_page_async(
transport: str = "grpc_asyncio", request_type=gcdc_page.UpdatePageRequest
):
client = PagesAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_page.Page(
name="name_value",
display_name="display_name_value",
transition_route_groups=["transition_route_groups_value"],
)
)
response = await client.update_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_page.UpdatePageRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_page.Page)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.transition_route_groups == ["transition_route_groups_value"]
@pytest.mark.asyncio
async def test_update_page_async_from_dict():
await test_update_page_async(request_type=dict)
def test_update_page_field_headers():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_page.UpdatePageRequest()
request.page.name = "page.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
call.return_value = gcdc_page.Page()
client.update_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "page.name=page.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_page_field_headers_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_page.UpdatePageRequest()
request.page.name = "page.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_page.Page())
await client.update_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "page.name=page.name/value",) in kw["metadata"]
def test_update_page_flattened():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_page.Page()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_page(
page=gcdc_page.Page(name="name_value"),
update_mask=field_mask.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].page == gcdc_page.Page(name="name_value")
assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
def test_update_page_flattened_error():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_page(
gcdc_page.UpdatePageRequest(),
page=gcdc_page.Page(name="name_value"),
update_mask=field_mask.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_page_flattened_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_page.Page()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_page.Page())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_page(
page=gcdc_page.Page(name="name_value"),
update_mask=field_mask.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].page == gcdc_page.Page(name="name_value")
assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_page_flattened_error_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_page(
gcdc_page.UpdatePageRequest(),
page=gcdc_page.Page(name="name_value"),
update_mask=field_mask.FieldMask(paths=["paths_value"]),
)
def test_delete_page(transport: str = "grpc", request_type=page.DeletePageRequest):
client = PagesClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == page.DeletePageRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_page_from_dict():
test_delete_page(request_type=dict)
def test_delete_page_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PagesClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
client.delete_page()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == page.DeletePageRequest()
@pytest.mark.asyncio
async def test_delete_page_async(
transport: str = "grpc_asyncio", request_type=page.DeletePageRequest
):
client = PagesAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == page.DeletePageRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_page_async_from_dict():
await test_delete_page_async(request_type=dict)
def test_delete_page_field_headers():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = page.DeletePageRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
call.return_value = None
client.delete_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_page_field_headers_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = page.DeletePageRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_page(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_page_flattened():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_page(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_page_flattened_error():
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_page(
page.DeletePageRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_page_flattened_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_page), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_page(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_page_flattened_error_async():
client = PagesAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_page(
page.DeletePageRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.PagesGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PagesClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.PagesGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PagesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.PagesGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PagesClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.PagesGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
client = PagesClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.PagesGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.PagesGrpcAsyncIOTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.PagesGrpcTransport, transports.PagesGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = PagesClient(credentials=credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.PagesGrpcTransport,)
def test_pages_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(exceptions.DuplicateCredentialArgs):
transport = transports.PagesTransport(
credentials=credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_pages_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflowcx_v3.services.pages.transports.PagesTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.PagesTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_pages",
"get_page",
"create_page",
"update_page",
"delete_page",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_pages_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
auth, "load_credentials_from_file"
) as load_creds, mock.patch(
"google.cloud.dialogflowcx_v3.services.pages.transports.PagesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.PagesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_pages_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(auth, "default") as adc, mock.patch(
"google.cloud.dialogflowcx_v3.services.pages.transports.PagesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.PagesTransport()
adc.assert_called_once()
def test_pages_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
PagesClient()
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
def test_pages_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.PagesGrpcTransport(
host="squid.clam.whelk", quota_project_id="octopus"
)
adc.assert_called_once_with(
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[transports.PagesGrpcTransport, transports.PagesGrpcAsyncIOTransport],
)
def test_pages_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_pages_host_no_port():
client = PagesClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_pages_host_with_port():
client = PagesClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_pages_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.PagesGrpcTransport(host="squid.clam.whelk", channel=channel,)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_pages_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.PagesGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.PagesGrpcTransport, transports.PagesGrpcAsyncIOTransport],
)
def test_pages_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.PagesGrpcTransport, transports.PagesGrpcAsyncIOTransport],
)
def test_pages_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_entity_type_path():
project = "squid"
location = "clam"
agent = "whelk"
entity_type = "octopus"
expected = "projects/{project}/locations/{location}/agents/{agent}/entityTypes/{entity_type}".format(
project=project, location=location, agent=agent, entity_type=entity_type,
)
actual = PagesClient.entity_type_path(project, location, agent, entity_type)
assert expected == actual
def test_parse_entity_type_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"agent": "cuttlefish",
"entity_type": "mussel",
}
path = PagesClient.entity_type_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_entity_type_path(path)
assert expected == actual
def test_flow_path():
project = "winkle"
location = "nautilus"
agent = "scallop"
flow = "abalone"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}".format(
project=project, location=location, agent=agent, flow=flow,
)
actual = PagesClient.flow_path(project, location, agent, flow)
assert expected == actual
def test_parse_flow_path():
expected = {
"project": "squid",
"location": "clam",
"agent": "whelk",
"flow": "octopus",
}
path = PagesClient.flow_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_flow_path(path)
assert expected == actual
def test_intent_path():
project = "oyster"
location = "nudibranch"
agent = "cuttlefish"
intent = "mussel"
expected = "projects/{project}/locations/{location}/agents/{agent}/intents/{intent}".format(
project=project, location=location, agent=agent, intent=intent,
)
actual = PagesClient.intent_path(project, location, agent, intent)
assert expected == actual
def test_parse_intent_path():
expected = {
"project": "winkle",
"location": "nautilus",
"agent": "scallop",
"intent": "abalone",
}
path = PagesClient.intent_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_intent_path(path)
assert expected == actual
def test_page_path():
project = "squid"
location = "clam"
agent = "whelk"
flow = "octopus"
page = "oyster"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/pages/{page}".format(
project=project, location=location, agent=agent, flow=flow, page=page,
)
actual = PagesClient.page_path(project, location, agent, flow, page)
assert expected == actual
def test_parse_page_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"agent": "mussel",
"flow": "winkle",
"page": "nautilus",
}
path = PagesClient.page_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_page_path(path)
assert expected == actual
def test_transition_route_group_path():
project = "scallop"
location = "abalone"
agent = "squid"
flow = "clam"
transition_route_group = "whelk"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/transitionRouteGroups/{transition_route_group}".format(
project=project,
location=location,
agent=agent,
flow=flow,
transition_route_group=transition_route_group,
)
actual = PagesClient.transition_route_group_path(
project, location, agent, flow, transition_route_group
)
assert expected == actual
def test_parse_transition_route_group_path():
expected = {
"project": "octopus",
"location": "oyster",
"agent": "nudibranch",
"flow": "cuttlefish",
"transition_route_group": "mussel",
}
path = PagesClient.transition_route_group_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_transition_route_group_path(path)
assert expected == actual
def test_webhook_path():
project = "winkle"
location = "nautilus"
agent = "scallop"
webhook = "abalone"
expected = "projects/{project}/locations/{location}/agents/{agent}/webhooks/{webhook}".format(
project=project, location=location, agent=agent, webhook=webhook,
)
actual = PagesClient.webhook_path(project, location, agent, webhook)
assert expected == actual
def test_parse_webhook_path():
expected = {
"project": "squid",
"location": "clam",
"agent": "whelk",
"webhook": "octopus",
}
path = PagesClient.webhook_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_webhook_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = PagesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = PagesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = PagesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = PagesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = PagesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = PagesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = PagesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = PagesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = PagesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = PagesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = PagesClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.PagesTransport, "_prep_wrapped_messages") as prep:
client = PagesClient(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.PagesTransport, "_prep_wrapped_messages") as prep:
transport_class = PagesClient.get_transport_class()
transport = transport_class(
credentials=credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| [
"50260923+jeevanakruthi@users.noreply.github.com"
] | 50260923+jeevanakruthi@users.noreply.github.com |
d57b2dfdaf0801d20f664f6d022bce430e4b2b95 | 86813bf514f3e0257f92207f40a68443f08ee44b | /0892 三维形体的表面积/0892 三维形体的表面积.py | 28d502d4a4003699f10cb5c26e011ee620470549 | [] | no_license | Aurora-yuan/Leetcode_Python3 | 4ce56679b48862c87addc8cd870cdd525c9d926c | 720bb530850febc2aa67a56a7a0b3a85ab37f415 | refs/heads/master | 2021-07-12T13:23:19.399155 | 2020-10-21T03:14:36 | 2020-10-21T03:14:36 | 212,998,500 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | #label: maths difficulty: easy
class Solution:
def surfaceArea(self, grid: List[List[int]]) -> int:
n = 0
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] > 0:
n += 2 + 4 * grid[i][j]
if i > 0:
n -= 2 * min(grid[i][j], grid[i - 1][j])
if j > 0:
n -= 2 * min(grid[i][j], grid[i][j - 1])
return n
| [
"noreply@github.com"
] | Aurora-yuan.noreply@github.com |
23942b0d1b41fd4f12a183199851c68f55ddcee6 | 7e41d5ec2f8ba704c99bbb252a216566fa0e7ce3 | /Clases/Estadistica/centralizacion.py | 26fffe21b157bcf0a85b3c5175119168738d30dd | [
"MIT"
] | permissive | juanpanu-zz/PM_DataScience | 11cf1fab4939ba415bbae28b134182e7f3108e37 | 24e71616dae692e931e95cd3815ca88fa9b8a46a | refs/heads/master | 2023-01-03T01:15:50.893425 | 2020-10-24T16:32:16 | 2020-10-24T16:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | datos= [0,0,0,0,0,0,0,0,0,0,0,0,0
,1,1,1,1,1,1,1,1,1,1
,2,2,2,2,2,2,2
,3,3,3,3,3,3
,4,4]
def media(datos):
return sum(datos)/len(datos)
def mediana(datos):
if(len(datos)%2 == 0):
return (datos[int(len(datos)/2)] + datos[int((len(datos)+1)/2)]) / 2
else:
return datos[(len(datos)+1)/2]
if __name__ == '__main__':
print(media(datos))
print(mediana(datos)) | [
"juanpa.nb@gmail.com"
] | juanpa.nb@gmail.com |
02d7211f3b1a728472c1ffef0d6d0e717bc29ca3 | df982b09cb71edeb2f306d5b966c13a45b9a9e70 | /src/encode_task_trimmomatic.py | 83bd7b94706fa278c3d5f819498ba86cdab5b840 | [
"MIT"
] | permissive | Fnyasimi/chip-seq-pipeline2 | 7857c752abbb6fa9c1b3e2e19e54776cdf2583b5 | 15d87e4dfd6a4fdf1419b17a1f25fcde75252e1c | refs/heads/master | 2020-12-15T07:52:35.261071 | 2020-05-10T10:40:36 | 2020-05-10T10:40:36 | 235,036,517 | 0 | 0 | MIT | 2020-05-10T10:40:37 | 2020-01-20T06:47:59 | null | UTF-8 | Python | false | false | 6,081 | py | #!/usr/bin/env python
# ENCODE DCC Trimmomatic wrapper
# Author: Jin Lee (leepc12@gmail.com)
import sys
import os
import argparse
from encode_lib_common import (
assert_file_not_empty, log, ls_l, mkdir_p, rm_f,
run_shell_cmd, strip_ext_fastq)
from encode_lib_genomic import (
locate_trimmomatic)
def parse_arguments(debug=False):
parser = argparse.ArgumentParser(
prog='ENCODE DCC Trimmomatic wrapper.')
parser.add_argument('--fastq1',
help='FASTQ R1 to be trimmed.')
parser.add_argument('--fastq2',
help='FASTQ R2 to be trimmed.')
parser.add_argument('--paired-end', action="store_true",
help='Paired-end FASTQs.')
parser.add_argument('--crop-length', type=int, required=True,
help='Number of basepair to crop.'
'Trimmomatic\'s parameter CROP.')
parser.add_argument('--crop-length-tol', type=int, default=2,
help='Crop length tolerance to keep shorter reads '
'around the crop length. '
'Trimmomatic\'s parameter MINLEN will be --crop-length '
'- abs(--crop-length-tol).')
parser.add_argument('--out-dir-R1', default='', type=str,
help='Output directory for cropped R1 fastq.')
parser.add_argument('--out-dir-R2', default='', type=str,
help='Output directory for cropped R2 fastq.')
parser.add_argument('--trimmomatic-java-heap',
help='Trimmomatic\'s Java max. heap: java -jar Trimmomatic.jar '
'-Xmx[MAX_HEAP]')
parser.add_argument('--nth', type=int, default=1,
help='Number of threads to parallelize.')
parser.add_argument('--log-level', default='INFO',
choices=['NOTSET', 'DEBUG', 'INFO',
'WARNING', 'CRITICAL', 'ERROR',
'CRITICAL'],
help='Log level')
args = parser.parse_args()
if not args.crop_length:
raise ValueError('Crop length must be > 0.')
log.setLevel(args.log_level)
log.info(sys.argv)
return args
def trimmomatic_se(fastq1, crop_length, crop_length_tol, out_dir,
nth=1, java_heap=None):
prefix = os.path.join(out_dir,
os.path.basename(strip_ext_fastq(fastq1)))
crop_length_tol = abs(crop_length_tol)
min_length = crop_length - crop_length_tol
cropped = '{p}.crop_{cl}-{tol}bp.fastq.gz'.format(
p=prefix, cl=crop_length, tol=crop_length_tol)
if java_heap is None:
java_heap_param = '-Xmx6G'
else:
java_heap_param = '-Xmx{}'.format(java_heap)
cmd = 'java -XX:ParallelGCThreads=1 {param} -jar {jar} SE -threads {nth} '
cmd += '{fq1} {cropped} MINLEN:{ml} CROP:{cl}'
cmd = cmd.format(
param=java_heap_param,
jar=locate_trimmomatic(),
nth=nth,
fq1=fastq1,
cropped=cropped,
ml=min_length,
cl=crop_length)
run_shell_cmd(cmd)
return cropped
def trimmomatic_pe(fastq1, fastq2, crop_length, crop_length_tol, out_dir_R1, out_dir_R2,
nth=1, java_heap=None):
prefix_R1 = os.path.join(
out_dir_R1, os.path.basename(strip_ext_fastq(fastq1)))
prefix_R2 = os.path.join(
out_dir_R2, os.path.basename(strip_ext_fastq(fastq2)))
crop_length_tol = abs(crop_length_tol)
min_length = crop_length - crop_length_tol
cropped_R1 = '{p}.crop_{cl}-{tol}bp.fastq.gz'.format(
p=prefix_R1, cl=crop_length, tol=crop_length_tol)
cropped_R2 = '{p}.crop_{cl}-{tol}bp.fastq.gz'.format(
p=prefix_R2, cl=crop_length, tol=crop_length_tol)
tmp_cropped_R1 = '{}.tmp'.format(cropped_R1)
tmp_cropped_R2 = '{}.tmp'.format(cropped_R2)
if java_heap is None:
java_heap_param = '-Xmx6G'
else:
java_heap_param = '-Xmx{}'.format(java_heap)
cmd = 'java -XX:ParallelGCThreads=1 {param} -jar {jar} PE -threads {nth} '
cmd += '{fq1} {fq2} {cropped1} {tmp_cropped1} {cropped2} {tmp_cropped2} '
cmd += 'MINLEN:{ml} CROP:{cl}'
cmd = cmd.format(
param=java_heap_param,
jar=locate_trimmomatic(),
nth=nth,
fq1=fastq1,
fq2=fastq2,
cropped1=cropped_R1,
tmp_cropped1=tmp_cropped_R1,
cropped2=cropped_R2,
tmp_cropped2=tmp_cropped_R2,
ml=min_length,
cl=crop_length)
run_shell_cmd(cmd)
rm_f([tmp_cropped_R1, tmp_cropped_R2])
return cropped_R1, cropped_R2
def main():
# read params
args = parse_arguments()
log.info('Initializing and making output directory...')
mkdir_p(args.out_dir_R1)
if args.paired_end:
mkdir_p(args.out_dir_R2)
log.info(
'Cropping fastqs with Trimmomatic... '
'crop_length={cl}, crop_length_tol={clt}'.format(
cl=args.crop_length,
clt=args.crop_length_tol))
if args.paired_end:
cropped_R1, cropped_R2 = trimmomatic_pe(
args.fastq1, args.fastq2,
args.crop_length, args.crop_length_tol,
args.out_dir_R1, args.out_dir_R2,
args.nth,
args.trimmomatic_java_heap)
else:
cropped_R1 = trimmomatic_se(
args.fastq1,
args.crop_length, args.crop_length_tol,
args.out_dir_R1,
args.nth,
args.trimmomatic_java_heap)
log.info('List all files in output directory...')
ls_l(args.out_dir_R1)
if args.paired_end:
ls_l(args.out_dir_R2)
log.info('Checking if output is empty...')
assert_file_not_empty(cropped_R1, help=
'No reads in FASTQ after cropping. crop_length might be too high? '
'While cropping, Trimmomatic (with MINLEN=crop_length-abs(crop_length_tol)) '
'excludes all reads SHORTER than crop_length.')
log.info('All done.')
if __name__ == '__main__':
main()
| [
"leepc12@gmail.com"
] | leepc12@gmail.com |
7de9384fbd2f2372f2d386386c9cd8ff2c6cbc27 | 591a05e50f2515f6bd4605de6ed9ed7d3936ad9d | /welib/wt_theory/tests/test_wakeexpansion.py | 54e4f50b1cb6405abe136bc768d734c18a01b8ff | [
"MIT"
] | permissive | ebranlard/welib | 679edeec85feb629dc27047a62422d469c6e0081 | 3486e87c6348e9580099fe5c360138e762ab3ea9 | refs/heads/main | 2023-08-09T13:31:40.253283 | 2023-06-16T18:17:09 | 2023-06-16T18:17:09 | 153,533,129 | 50 | 25 | MIT | 2023-06-16T18:17:11 | 2018-10-17T22:47:46 | Python | UTF-8 | Python | false | false | 1,027 | py | import unittest
import numpy as np
from welib.wt_theory.wakeexpansion import *
class TestExpansion(unittest.TestCase):
def test_downstreamD(self):
# Check that analytical solution and numerical solution match when getting downstream distance
CT=0.8
fraction = 0.5
rw0 = wake_expansion_momentum(CT=CT)
expansion = 1 + fraction * (rw0-1)
xa = downstreamDistanceForGivenExpansion(CT, expansion, model='cylinder', method='analytical')
xn = downstreamDistanceForGivenExpansion(CT, expansion, model='cylinder', method='interp')
np.testing.assert_almost_equal(xa, xn, 5)
def test_methods(self):
CT = 0.8
fraction = 0.5
rw0 = wake_expansion_momentum(CT=CT)
np.testing.assert_almost_equal(rw0, 1.27201965, 7)
xb = [0, 1, 20] # xb =r/R
r = wake_expansion(xb, CT=CT, model='cylinder')
np.testing.assert_almost_equal(r, [1, 1.17048, 1.27153], 5)
if __name__ == '__main__':
unittest.main()
| [
"emmanuel.branlard@nrel.gov"
] | emmanuel.branlard@nrel.gov |
515e5f55d7782b81daf3473bf6affdc0b76a7cbe | 1f5f8f95530003c6c66419519d78cb52d21f65c0 | /projects/golem_api/pages/page.py | 4181623ca86611c093bc53b9c670ae1e4e098000 | [] | no_license | golemhq/golem-tests | c5d3ab04b1ea3755d8b812229feb60f513d039ac | dff8fd3a606c3d1ef8667aece6fddef8ac441230 | refs/heads/master | 2023-08-17T23:05:26.286718 | 2021-10-04T20:34:17 | 2021-10-04T20:34:17 | 105,579,436 | 4 | 1 | null | 2018-11-19T00:14:24 | 2017-10-02T20:05:55 | Python | UTF-8 | Python | false | false | 2,597 | py | import requests
from projects.golem_api.pages.utils import url, headers
DELETE_PAGE_ENDPOINT = '/page/delete'
DUPLICATE_PAGE_ENDPOINT = '/page/duplicate'
RENAME_PAGE_ENDPOINT = '/page/rename'
PAGE_COMPONENTS_ENDPOINT = '/page/components'
SAVE_PAGE_ENDPOINT = '/page/save'
SAVE_PAGE_CODE_ENDPOINT = '/page/code/save'
RENAME_PAGE_DIRECTORY_ENDPOINT = '/page/directory/rename'
DELETE_PAGE_DIRECTORY_ENDPOINT = '/page/directory/delete'
def delete_page(project_name, page_name, user=None):
return requests.delete(url(DELETE_PAGE_ENDPOINT), headers=headers(user),
json={'project': project_name, 'fullPath': page_name})
def duplicate_page(project_name, page_name, new_page_name, user=None):
json_ = {
'project': project_name,
'fullPath': page_name,
'newFileFullPath': new_page_name
}
return requests.post(url(DUPLICATE_PAGE_ENDPOINT), headers=headers(user), json=json_)
def rename_page(project_name, page_name, new_page_name, user=None):
json_ = {
'project': project_name,
'fullFilename': page_name,
'newFullFilename': new_page_name
}
return requests.post(url(RENAME_PAGE_ENDPOINT), headers=headers(user), json=json_)
def get_page_components(project_name, page_name, user=None):
return requests.get(url(PAGE_COMPONENTS_ENDPOINT), headers=headers(user),
params={'project': project_name, 'page': page_name})
def save_page(project_name, page_name, elements, functions, import_lines, user=None):
json_ = {
'project': project_name,
'pageName': page_name,
'elements': elements,
'functions': functions,
'importLines': import_lines
}
return requests.put(url(SAVE_PAGE_ENDPOINT), headers=headers(user), json=json_)
def save_page_code(project_name, page_name, content, user=None):
json_ = {
'project': project_name,
'pageName': page_name,
'content': content
}
return requests.put(url(SAVE_PAGE_CODE_ENDPOINT), headers=headers(user), json=json_)
def rename_page_directory(project_name, dir_name, new_dir_name, user=None):
json_ = {
'project': project_name,
'fullDirname': dir_name,
'newFullDirname': new_dir_name
}
return requests.post(url(RENAME_PAGE_DIRECTORY_ENDPOINT), headers=headers(user), json=json_)
def delete_page_directory(project_name, dir_name, user=None):
return requests.delete(url(DELETE_PAGE_DIRECTORY_ENDPOINT), headers=headers(user),
json={'project': project_name, 'fullDirname': dir_name})
| [
"luciano@lucianorenzi.com"
] | luciano@lucianorenzi.com |
ecd23624ad4bf1a877b5602da7d072f654ced6f2 | 19316c08712a502b1124f2b55cb98bfcbcca7af5 | /dev/python/2018-07-25 findcrash.py | 3d319feb710cd4cc991faf1eb345cf56d8c88d28 | [
"MIT"
] | permissive | swharden/pyABF | 49a50d53015c50f1d5524242d4192718e6f7ccfa | 06247e01ca3c19f5419c3b9b2207ee544e30dbc5 | refs/heads/main | 2023-08-28T02:31:59.540224 | 2023-08-17T16:34:48 | 2023-08-17T16:34:48 | 109,707,040 | 92 | 39 | MIT | 2023-04-06T00:37:29 | 2017-11-06T14:39:21 | Jupyter Notebook | UTF-8 | Python | false | false | 179 | py | """
Boilerplate dev test
"""
from imports import *
if __name__ == "__main__":
abf = pyabf.ABF(PATH_DATA+"/180415_aaron_temp.abf")
print(abf.sweepY)
print(abf.sweepC) | [
"swharden@gmail.com"
] | swharden@gmail.com |
3a30c4238d79e5f91a766e65bc38e75f764384bd | 59129c8fee701270a7a69cc03d876834f567597a | /olvidado/.i3pystatus.caladan.py | fdb15af7e800b5bc31251bb794b506c25e0ef9e0 | [] | no_license | jmberros/dotfiles | 0d79e35fc30fe3669464bc979e64bb6a365ab3f6 | 7c12c4e70b25b4c932a160c2142a132eecca5b1d | refs/heads/master | 2023-08-31T15:33:11.801980 | 2023-08-29T00:48:24 | 2023-08-29T00:48:24 | 10,116,001 | 37 | 26 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | # -*- coding: utf-8 -*-
import subprocess
from i3pystatus import Status
status = Status(standalone=True)
status.register("clock",
color="#cccccc",
format="%H:%M, %A %-d %B",)
status.register("pulseaudio",
format="🔊 {volume}",)
status.register("load",
color="#bbbbbb",
critical_limit=8,
format="💻 {avg1} / {avg5}")
status.register("mem",
divisor=10**9,
color="#4CAF50",
format="{used_mem} / {avail_mem} Gb")
status.register("now_playing",
player="clementine",
color="#ffc080",
format='{artist}: "{title}" {song_elapsed}/{song_length}')
status.register("network",
interface="en0",
format_up="{bytes_sent} kB/s↑ {bytes_recv} kB/s↓",
format_down="Internet DOWN",
dynamic_color = True,
start_color="gray",
end_color="yellow",
color_down="#ff2222",
)
status.register("disk",
path="/home/juan",
color="#bbbbbb",
#format="{used} / {total}G [ {avail}G ]",)
format="🏠 {avail}G",)
status.register("disk",
path="/",
color="#bbbbbb",
#format="{used} / {total}G [ {avail}G ]",)
format="/ {avail}G",)
status.run()
| [
"juanmaberros@gmail.com"
] | juanmaberros@gmail.com |
803157c722e9a3da82f6f61490f4b508d74f77c0 | 22b3f1851bf4da5fc8837b31cc276e95f92c7a33 | /deeppy/expr/util.py | 62e19c43957f6237f2f401406e30ca2d0bbf294e | [
"MIT"
] | permissive | nagyistoce/deeppy | db34eda7d4d14077c577ef081ed3edf2b9d00add | f7d073aef9a7070a841d66f34046414c88b01812 | refs/heads/master | 2020-12-11T04:01:49.877773 | 2015-11-19T14:34:43 | 2015-11-19T14:34:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | import numpy as np
from .base import Identity
_measures = {
'mean': ('%.2e', np.mean),
'std': ('%.2e', np.std),
'shape': ('%s', lambda x: str(x.shape)),
'absnorm': ('%.2e', lambda x: np.sum(np.fabs(x))),
}
class Print(Identity):
def __init__(self, rate=1, label=None, fprop=True, bprop=False,
measures={}):
self.i = 0
self.rate = rate
self.label = label
self.print_fprop = fprop
self.print_bprop = bprop
self.measures = measures
def setup(self):
super(Print, self).setup()
if self.label is None:
self.label = self.x.__class__.__name__
def _message(self, val):
msg = self.label + ' '
for name, (s, fun) in dict(_measures, **self.measures).items():
msg += ' ' + name + ':' + (s % fun(val))
return msg
def fprop(self):
super(Print, self).fprop()
self.i += 1
if self.print_fprop and (self.i-1) % self.rate == 0:
print(self._message(np.array(self.out)))
def bprop(self):
if self.print_bprop and (self.i-1) % self.rate == 0:
print(self._message(np.array(self.out)))
super(Print, self).bprop()
| [
"anders.bll@gmail.com"
] | anders.bll@gmail.com |
989916c72b04b69f2d18bd53f0fe27d6ee9f484a | ce63cec7c28611bb0c43bd503996718716246538 | /reagent/lite/optimizer.py | 4ca6afecffa83d8c892abaace84105dfede74af0 | [
"BSD-3-Clause"
] | permissive | mcx/ReAgent | 70cbf5484656c8bdf722155e0eacac0385a3e276 | 57b58a8b3a6b74bb87a197b73a6cd108ddad895e | refs/heads/master | 2023-08-10T15:37:02.664394 | 2021-10-14T01:52:49 | 2021-10-14T01:53:55 | 329,295,166 | 0 | 0 | BSD-3-Clause | 2021-10-14T02:16:26 | 2021-01-13T12:04:53 | Python | UTF-8 | Python | false | false | 46,935 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import abc
import heapq
import logging
from collections import defaultdict, deque
from math import floor
from typing import Callable, Dict, Tuple, Optional, List, Any
import nevergrad as ng
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from nevergrad.parametrization.choice import Choice
logger = logging.getLogger(__name__)
ANNEAL_RATE = 0.9997
LEARNING_RATE = 0.001
BATCH_SIZE = 512
# People rarely need more than that
MAX_NUM_BEST_SOLUTIONS = 1000
GREEDY_TEMP = 0.0001
def sample_from_logits(
keyed_logits: Dict[str, nn.Parameter], batch_size: int, temp: float
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
"""Return sampled solutions and sampled log probabilities"""
sampled_log_probs = torch.zeros(batch_size, 1)
sampled_solutions = {}
for k, logits in keyed_logits.items():
softmax_val = F.softmax(logits / temp, dim=-1).squeeze(0)
samples = torch.multinomial(softmax_val, batch_size, replacement=True)
sampled_prob = softmax_val[samples].reshape(-1, 1)
sampled_log_probs += torch.log(sampled_prob)
sampled_solutions[k] = samples
return sampled_solutions, sampled_log_probs
def obj_func_scaler(
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]],
exp_offset_and_scale: Optional[Tuple[float, float]],
) -> Optional[Callable]:
"""
Scale objective functions to make optimizers get out of local minima more easily.
The scaling formula is: exp((reward - offset) / scale)
if obj_exp_offset_scale is None, do not scale the obj_function (i.e., reward == scaled_reward)
"""
if obj_func is None:
return None
if exp_offset_and_scale is not None:
offset, scale = exp_offset_and_scale
def obj_func_scaled(*args, **kwargs):
x = obj_func(*args, **kwargs)
if exp_offset_and_scale is not None:
return x, torch.exp((x - offset) / scale)
else:
return x, x
return obj_func_scaled
def _num_of_params(model: nn.Module) -> int:
return len(torch.cat([p.flatten() for p in model.parameters()]))
def sol_to_tensors(
sampled_sol: Dict[str, torch.Tensor], input_param: ng.p.Dict
) -> torch.Tensor:
one_hot = [
# pyre-fixme[16]: `Parameter` has no attribute `choices`.
F.one_hot(sampled_sol[k], num_classes=len(input_param[k].choices)).type(
torch.FloatTensor
)
for k in sorted(sampled_sol.keys())
]
batch_tensors = torch.cat(one_hot, dim=-1)
return batch_tensors
class BestResultsQueue:
"""Maintain the `max_len` lowest numbers"""
def __init__(self, max_len: int) -> None:
self.max_len = max_len
self.reward_sol_dict = defaultdict(set)
self.heap = []
def insert(self, reward: torch.Tensor, sol: Dict[str, torch.Tensor]) -> None:
# Negate the reward because maximal N elements will be kept
# in heap, while all optimizers are a minimizer.
reward = -reward
sol_str = str(sol)
# skip duplicated solution
if reward in self.reward_sol_dict and sol_str in self.reward_sol_dict[reward]:
return
self.reward_sol_dict[reward].add(sol_str)
if len(self.heap) < self.max_len:
heapq.heappush(self.heap, (reward, sol_str, sol))
else:
old_r, old_sol_str, old_sol = heapq.heappushpop(
self.heap, (reward, sol_str, sol)
)
self.reward_sol_dict[old_r].remove(old_sol_str)
def topk(self, k: int) -> List[Tuple[torch.Tensor, Dict[str, torch.Tensor]]]:
k = min(k, len(self.heap))
res = heapq.nlargest(k, self.heap)
# a list of (reward, sol) tuples
return [(-r[0], r[2]) for r in res]
class ComboOptimizerBase:
def __init__(
self,
param: ng.p.Dict,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
batch_size: int = BATCH_SIZE,
obj_exp_offset_scale: Optional[Tuple[float, float]] = None,
) -> None:
for k in param:
assert isinstance(
param[k], Choice
), "Only support discrete parameterization now"
self.param = param
self.obj_func = obj_func_scaler(obj_func, obj_exp_offset_scale)
self.batch_size = batch_size
self.obj_exp_scale = obj_exp_offset_scale
self.last_sample_internal_res = None
self.best_sols = BestResultsQueue(MAX_NUM_BEST_SOLUTIONS)
self._init()
def _init(self) -> None:
pass
def optimize_step(self) -> Tuple:
assert self.obj_func is not None, (
"obj_func not provided. Can't call optimize_step() for optimization. "
"You have to perform manual optimization, i.e., call sample_internal() then update_params()"
)
all_results = self._optimize_step()
sampled_solutions, sampled_reward = all_results[0], all_results[1]
self._maintain_best_solutions(sampled_solutions, sampled_reward)
return all_results
def _maintain_best_solutions(
self, sampled_sols: Dict[str, torch.Tensor], sampled_reward: torch.Tensor
) -> None:
for idx in range(len(sampled_reward)):
r = sampled_reward[idx].item()
sol = {k: sampled_sols[k][idx] for k in sampled_sols}
self.best_sols.insert(r, sol)
def best_solutions(
self, k: int = 1
) -> List[Tuple[torch.Tensor, Dict[str, torch.Tensor]]]:
"""
k solutions with the smallest rewards
Return is a list of tuples (reward, solution)
"""
return self.best_sols.topk(k)
@abc.abstractmethod
def _optimize_step(self) -> Tuple:
"""
The main component of ComboOptimizer.optimize_step(). The user only
needs to loop over optimizer_step() until the budget runs out.
_optimize_step() will call sample_internal() and update_params()
to perform sampling and parameter updating
"""
raise NotImplementedError()
@abc.abstractmethod
def sample_internal(
self,
batch_size: Optional[int] = None,
) -> Tuple:
"""
Record and return sampled solutions and any other important
information for learning.
It samples self.batch_size number of solutions, unless batch_size is provided.
"""
raise NotImplementedError()
@abc.abstractmethod
def update_params(
self,
reward: torch.Tensor,
) -> None:
"""
Update model parameters by reward. Reward is objective function
values evaluated on the solutions sampled by sample_internal()
"""
raise NotImplementedError()
def sample(
self, batch_size: int, temp: Optional[float] = None
) -> Dict[str, torch.Tensor]:
"""
Return sampled solutions, keyed by parameter names.
For discrete parameters, the values are choice indices;
For continuous parameters, the values are sampled float vectors.
This function is usually called after learning is done.
"""
raise NotImplementedError()
def indices_to_raw_choices(
self, sampled_sol: Dict[str, torch.Tensor]
) -> List[Dict[str, str]]:
batch_size = list(sampled_sol.values())[0].shape[0]
sampled_sol_i_vals = []
for i in range(batch_size):
sampled_sol_i = {k: sampled_sol[k][i] for k in sampled_sol}
sampled_sol_i_val = {
# pyre-fixme[16]: `Parameter` has no attribute `choices`.
k: self.param[k].choices.value[v]
for k, v in sampled_sol_i.items()
}
sampled_sol_i_vals.append(sampled_sol_i_val)
return sampled_sol_i_vals
class RandomSearchOptimizer(ComboOptimizerBase):
"""
Find the best solution to minimize a black-box function by random search
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
a function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled choice
indices as the value (of shape (batch_size, ))
sampling_weights (Optional[Dict[str, np.ndarray]]):
Instead of uniform sampling, we sample solutions with preferred
weights. Key: choice name, value: sampling weights
Example:
>>> _ = torch.manual_seed(0)
>>> np.random.seed(0)
>>> BATCH_SIZE = 4
>>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"]))
>>>
>>> def obj_func(sampled_sol: Dict[str, torch.Tensor]):
... reward = torch.ones(BATCH_SIZE, 1)
... for i in range(BATCH_SIZE):
... # the best action is "red"
... if sampled_sol['choice1'][i] == 2:
... reward[i, 0] = 0.0
... return reward
...
>>> optimizer = RandomSearchOptimizer(ng_param, obj_func, batch_size=BATCH_SIZE)
>>> for i in range(10):
... res = optimizer.optimize_step()
...
>>> best_reward, best_choice = optimizer.best_solutions(k=1)[0]
>>> assert best_reward == 0
>>> assert best_choice['choice1'] == 2
"""
def __init__(
self,
param: ng.p.Dict,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
batch_size: int = BATCH_SIZE,
sampling_weights: Optional[Dict[str, np.ndarray]] = None,
) -> None:
self.sampling_weights = sampling_weights
super().__init__(
param,
obj_func,
batch_size,
)
def sample(
self, batch_size: int, temp: Optional[float] = None
) -> Dict[str, torch.Tensor]:
assert temp is None, "temp is not used in Random Search"
sampled_sol = {}
for k, param in self.param.items():
# pyre-fixme[16]: `Parameter` has no attribute `choices`.
num_choices = len(param.choices)
if self.sampling_weights is None:
sampled_sol[k] = torch.randint(num_choices, (batch_size,))
else:
weight = self.sampling_weights[k]
sampled_sol[k] = torch.tensor(
np.random.choice(num_choices, batch_size, replace=True, p=weight)
)
return sampled_sol
def sample_internal(
self, batch_size: Optional[int] = None
) -> Tuple[Dict[str, torch.Tensor]]:
batch_size = batch_size or self.batch_size
sampled_sol = self.sample(batch_size, temp=None)
self.last_sample_internal_res = sampled_sol
return (sampled_sol,)
def update_params(self, reward: torch.Tensor):
self.last_sample_internal_res = None
def _optimize_step(self) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
sampled_solutions = self.sample_internal(self.batch_size)[0]
sampled_reward, _ = self.obj_func(sampled_solutions)
sampled_reward = sampled_reward.detach()
self.update_params(sampled_reward)
return sampled_solutions, sampled_reward
class NeverGradOptimizer(ComboOptimizerBase):
"""
Minimize a black-box function using NeverGrad, Rapin & Teytaud, 2018.
https://facebookresearch.github.io/nevergrad/.
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
estimated_budgets (int): estimated number of budgets (objective evaluation
times) for nevergrad to perform auto tuning.
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
a function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled choice
indices as the value (of shape (batch_size, ))
optimizer_name (Optional[str]): ng optimizer to be used specifically
All possible nevergrad optimizers are available at:
https://facebookresearch.github.io/nevergrad/optimization.html#choosing-an-optimizer.
If not specified, we use the meta optimizer NGOpt
Example:
>>> _ = torch.manual_seed(0)
>>> np.random.seed(0)
>>> BATCH_SIZE = 4
>>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"]))
>>>
>>> def obj_func(sampled_sol: Dict[str, torch.Tensor]):
... reward = torch.ones(BATCH_SIZE, 1)
... for i in range(BATCH_SIZE):
... # the best action is "red"
... if sampled_sol['choice1'][i] == 2:
... reward[i, 0] = 0.0
... return reward
...
>>> estimated_budgets = 40
>>> optimizer = NeverGradOptimizer(
... ng_param, estimated_budgets, obj_func, batch_size=BATCH_SIZE,
... )
>>>
>>> for i in range(10):
... res = optimizer.optimize_step()
...
>>> best_reward, best_choice = optimizer.best_solutions(k=1)[0]
>>> assert best_reward == 0
>>> assert best_choice['choice1'] == 2
"""
def __init__(
self,
param: ng.p.Dict,
estimated_budgets: int,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
batch_size: int = BATCH_SIZE,
optimizer_name: Optional[str] = None,
) -> None:
self.estimated_budgets = estimated_budgets
self.optimizer_name = optimizer_name
self.optimizer = None
self.choice_to_index = {}
super().__init__(
param,
obj_func,
batch_size,
)
def _init(self) -> None:
optimizer_name = self.optimizer_name or "NGOpt"
logger.info(f"Nevergrad uses {optimizer_name} optimizer")
self.optimizer = ng.optimizers.registry[optimizer_name](
parametrization=self.param,
budget=self.estimated_budgets,
num_workers=self.batch_size,
)
for k, param in self.param.items():
# pyre-fixme[16]: `Parameter` has no attribute `choices`.
self.choice_to_index[k] = {v: i for i, v in enumerate(param.choices.value)}
def sample(
self, batch_size: int, temp: Optional[float] = None
) -> Dict[str, torch.Tensor]:
assert temp is None, "temp is not used in Random Search"
ng_sols_idx = {k: torch.zeros(batch_size) for k in self.param}
for i in range(batch_size):
ng_sol = self.optimizer.ask().value
for k in ng_sol:
ng_sols_idx[k][i] = self.choice_to_index[k][ng_sol[k]]
return ng_sols_idx
def sample_internal(self, batch_size: Optional[int] = None) -> Tuple:
"""
Return sampled solutions in two formats.
(1) our own format, which is a dictionary and consistent with other optimizers.
The dictionary has choice names as the key and sampled choice indices as the
value (of shape (batch_size, ))
(2) nevergrad format returned by optimizer.ask()
"""
batch_size = batch_size or self.batch_size
ng_sols_idx = {k: torch.zeros(batch_size, dtype=torch.long) for k in self.param}
ng_sols_raw = []
for i in range(batch_size):
ng_sol = self.optimizer.ask()
ng_sols_raw.append(ng_sol)
ng_sol_val = ng_sol.value
for k in ng_sol_val:
ng_sols_idx[k][i] = self.choice_to_index[k][ng_sol_val[k]]
self.last_sample_internal_res = (ng_sols_idx, ng_sols_raw)
return ng_sols_idx, ng_sols_raw
def update_params(self, reward: torch.Tensor) -> None:
_, sampled_sols = self.last_sample_internal_res
for ng_sol, r in zip(sampled_sols, reward):
self.optimizer.tell(ng_sol, r.item())
self.last_sample_internal_res = None
def _optimize_step(self) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
sampled_sol_idxs, sampled_sols = self.sample_internal(self.batch_size)
sampled_reward, _ = self.obj_func(sampled_sol_idxs)
sampled_reward = sampled_reward.detach()
self.update_params(sampled_reward)
return sampled_sol_idxs, sampled_reward
class LogitBasedComboOptimizerBase(ComboOptimizerBase):
def __init__(
self,
param: ng.p.Dict,
start_temp: float,
min_temp: float,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
learning_rate: float = LEARNING_RATE,
anneal_rate: float = ANNEAL_RATE,
batch_size: int = BATCH_SIZE,
obj_exp_offset_scale: Optional[Tuple[float, float]] = None,
) -> None:
self.temp = start_temp
self.min_temp = min_temp
self.anneal_rate = anneal_rate
self.learning_rate = learning_rate
self.logits: Dict[str, nn.Parameter] = {}
self.optimizer = None
super().__init__(
param,
obj_func,
batch_size,
obj_exp_offset_scale,
)
def _init(self) -> None:
parameters = []
for k in self.param.keys():
v = self.param[k]
if isinstance(v, ng.p.Choice):
logits_shape = len(v.choices)
self.logits[k] = nn.Parameter(torch.randn(1, logits_shape))
parameters.append(self.logits[k])
else:
raise NotImplementedError()
self.optimizer = torch.optim.Adam(parameters, lr=self.learning_rate)
def sample(
self, batch_size: int, temp: Optional[float] = GREEDY_TEMP
) -> Dict[str, torch.Tensor]:
assert temp is not None, "temp is needed for sampling logits"
sampled_solutions, _ = sample_from_logits(self.logits, batch_size, temp)
return sampled_solutions
def sample_gumbel(shape: Tuple[int, ...], eps: float = 1e-20) -> torch.Tensor:
U = torch.rand(shape)
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax(logits: torch.Tensor, temperature: float) -> torch.Tensor:
y = logits + sample_gumbel(logits.size())
return F.softmax(y / temperature, dim=-1)
class GumbelSoftmaxOptimizer(LogitBasedComboOptimizerBase):
"""
Minimize a differentiable objective function which takes in categorical inputs.
The method is based on Categorical Reparameterization with Gumbel-Softmax,
Jang, Gu, & Poole, 2016. https://arxiv.org/abs/1611.01144.
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
an analytical function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled gumbel-softmax
distributions of shape (batch_size, num_choices) as the value
start_temp: starting temperature
min_temp: minimal temperature (towards the end of learning) for sampling gumbel-softmax
update_params_within_optimizer (bool): If False, skip updating parameters within this
Optimizer. The Gumbel-softmax parameters will be updated in external systems.
Example:
>>> _ = torch.manual_seed(0)
>>> np.random.seed(0)
>>> BATCH_SIZE = 4
>>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"]))
>>>
>>> def obj_func(sampled_sol: Dict[str, torch.Tensor]):
... # best action is "red"
... reward = torch.mm(sampled_sol['choice1'], torch.tensor([[1.], [1.], [0.]]))
... return reward
...
>>> optimizer = GumbelSoftmaxOptimizer(
... ng_param, obj_func, anneal_rate=0.9, batch_size=BATCH_SIZE, learning_rate=0.1
... )
...
>>> for i in range(30):
... res = optimizer.optimize_step()
...
>>> assert optimizer.sample(1)['choice1'] == 2
"""
def __init__(
self,
param: ng.p.Dict,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
start_temp: float = 1.0,
min_temp: float = 0.1,
learning_rate: float = LEARNING_RATE,
anneal_rate: float = ANNEAL_RATE,
batch_size: int = BATCH_SIZE,
update_params_within_optimizer: bool = True,
) -> None:
self.update_params_within_optimizer = update_params_within_optimizer
super().__init__(
param,
start_temp,
min_temp,
obj_func,
learning_rate,
anneal_rate,
batch_size,
# no reward scaling in gumbel softmax
obj_exp_offset_scale=None,
)
def sample_internal(
self, batch_size: Optional[int] = None
) -> Tuple[Dict[str, torch.Tensor]]:
batch_size = batch_size or self.batch_size
sampled_softmax_vals = {}
for k, logits in self.logits.items():
sampled_softmax_vals[k] = gumbel_softmax(
logits.repeat(batch_size, 1), self.temp
)
self.last_sample_internal_res = sampled_softmax_vals
return (sampled_softmax_vals,)
def update_params(self, reward: torch.Tensor) -> None:
if self.update_params_within_optimizer:
reward_mean = reward.mean()
assert reward_mean.requires_grad
self.optimizer.zero_grad()
reward_mean.backward()
self.optimizer.step()
self.temp = np.maximum(self.temp * self.anneal_rate, self.min_temp)
self.last_sample_internal_res = None
def _optimize_step(self) -> Tuple:
sampled_softmax_vals = self.sample_internal(self.batch_size)[0]
sampled_reward, _ = self.obj_func(sampled_softmax_vals)
self.update_params(sampled_reward)
sampled_softmax_vals = {
k: v.detach().clone() for k, v in sampled_softmax_vals.items()
}
logits = {k: v.detach().clone() for k, v in self.logits.items()}
return sampled_softmax_vals, sampled_reward, logits
class PolicyGradientOptimizer(LogitBasedComboOptimizerBase):
"""
Minimize a black-box objective function which takes in categorical inputs.
The method is based on REINFORCE, Williams, 1992.
https://link.springer.com/article/10.1007/BF00992696
In this method, the action distribution is a joint distribution of multiple
*independent* softmax distributions, each corresponding to one discrete
choice type.
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
a function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled choice
indices as the value (of shape (batch_size, ))
Example:
>>> _ = torch.manual_seed(0)
>>> np.random.seed(0)
>>> BATCH_SIZE = 16
>>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"]))
>>>
>>> def obj_func(sampled_sol: Dict[str, torch.Tensor]):
... reward = torch.ones(BATCH_SIZE, 1)
... for i in range(BATCH_SIZE):
... # the best action is "red"
... if sampled_sol['choice1'][i] == 2:
... reward[i, 0] = 0.0
... return reward
...
>>> optimizer = PolicyGradientOptimizer(
... ng_param, obj_func, batch_size=BATCH_SIZE, learning_rate=0.1
... )
>>> for i in range(30):
... res = optimizer.optimize_step()
...
>>> best_reward, best_choice = optimizer.best_solutions(k=1)[0]
>>> assert best_reward == 0
>>> assert best_choice['choice1'] == 2
>>> assert optimizer.sample(1)['choice1'] == 2
"""
def __init__(
self,
param: ng.p.Dict,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
# default (start_temp=min_temp=1.0): no temperature change for policy gradient
start_temp: float = 1.0,
min_temp: float = 1.0,
learning_rate: float = LEARNING_RATE,
anneal_rate: float = ANNEAL_RATE,
batch_size: int = BATCH_SIZE,
obj_exp_offset_scale: Optional[Tuple[float, float]] = None,
) -> None:
super().__init__(
param,
start_temp,
min_temp,
obj_func,
learning_rate,
anneal_rate,
batch_size,
obj_exp_offset_scale,
)
def sample(
self, batch_size: int, temp: Optional[float] = GREEDY_TEMP
) -> Dict[str, torch.Tensor]:
assert temp is not None, "temp is needed for sampling logits"
sampled_solutions, _ = sample_from_logits(self.logits, batch_size, temp)
return sampled_solutions
def sample_internal(
self,
batch_size: Optional[int] = None,
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
batch_size = batch_size or self.batch_size
sampled_solutions, sampled_log_probs = sample_from_logits(
self.logits,
batch_size,
self.temp,
)
self.last_sample_internal_res = sampled_solutions, sampled_log_probs
return sampled_solutions, sampled_log_probs
def update_params(self, reward: torch.Tensor):
_, sampled_log_probs = self.last_sample_internal_res
if self.batch_size == 1:
adv = reward
else:
adv = reward - torch.mean(reward)
assert not adv.requires_grad
assert sampled_log_probs.requires_grad
assert sampled_log_probs.shape == adv.shape == reward.shape
assert adv.ndim == 2
assert adv.shape[-1] == 1
loss = (adv * sampled_log_probs).mean()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.temp = np.maximum(self.temp * self.anneal_rate, self.min_temp)
self.last_sample_internal_res = None
def _optimize_step(self) -> Tuple:
sampled_solutions, sampled_log_probs = self.sample_internal(self.batch_size)
sampled_reward, sampled_scaled_reward = self.obj_func(sampled_solutions)
sampled_reward, sampled_scaled_reward = (
sampled_reward.detach(),
sampled_scaled_reward.detach(),
)
self.update_params(sampled_scaled_reward)
return sampled_solutions, sampled_reward, sampled_log_probs
def shuffle_exp_replay(exp_replay: List[Any]) -> Any:
shuffle_idx = np.random.permutation(len(exp_replay))
for idx in shuffle_idx:
yield exp_replay[idx]
class QLearningOptimizer(ComboOptimizerBase):
"""
Treat the problem of minimizing a black-box function as a sequential decision problem,
and solve it by Deep Q-Learning. See "Human-Level Control through Deep Reinforcement
Learning", Mnih et al., 2015. https://www.nature.com/articles/nature14236.
In each episode step, Q-learning makes a decision for one categorical input. The reward
is given only at the end of the episode, which is the value of the black-box function
at the input determined by the choices made at all steps.
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
start_temp (float): the starting exploration rate in epsilon-greedy sampling
min_temp (float): the minimal exploration rate in epsilon-greedy
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
a function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled choice
indices as the value (of shape (batch_size, ))
model_dim (int): hidden layer size for the q-network: input -> model_dim -> model_dim -> output
num_batches_per_learning (int): the number of batches sampled from replay buffer
for q-learning.
replay_size (int): the maximum batches held in the replay buffer. Note, a problem instance of n
choices will generate n batches in the replay buffer.
Example:
>>> _ = torch.manual_seed(0)
>>> np.random.seed(0)
>>> BATCH_SIZE = 4
>>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"]))
>>>
>>> def obj_func(sampled_sol: Dict[str, torch.Tensor]):
... reward = torch.ones(BATCH_SIZE, 1)
... for i in range(BATCH_SIZE):
... # the best action is "red"
... if sampled_sol['choice1'][i] == 2:
... reward[i, 0] = 0.0
... return reward
...
>>> optimizer = QLearningOptimizer(ng_param, obj_func, batch_size=BATCH_SIZE)
>>> for i in range(10):
... res = optimizer.optimize_step()
...
>>> best_reward, best_choice = optimizer.best_solutions(k=1)[0]
>>> assert best_reward == 0
>>> assert best_choice['choice1'] == 2
>>> assert optimizer.sample(1)['choice1'] == 2
"""
def __init__(
self,
param: ng.p.Dict,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
start_temp: float = 1.0,
min_temp: float = 0.1,
learning_rate: float = LEARNING_RATE,
anneal_rate: float = ANNEAL_RATE,
batch_size: int = BATCH_SIZE,
model_dim: int = 128,
obj_exp_offset_scale: Optional[Tuple[float, float]] = None,
num_batches_per_learning: int = 10,
replay_size: int = 100,
) -> None:
self.model_dim = model_dim
self.sorted_keys = sorted(param.keys())
assert (
start_temp <= 1.0 and start_temp > 0
), "Starting temperature for epsilon-greedy should be between (0, 1]"
assert (
min_temp <= start_temp and min_temp >= 0
), "Minimum temperature for epsilon-greedy should be between [0, start_temp]"
self.temp = start_temp
self.min_temp = min_temp
self.learning_rate = learning_rate
self.anneal_rate = anneal_rate
self.num_batches_per_learning = num_batches_per_learning
self.replay_size = replay_size
self.exp_replay = deque([], maxlen=replay_size)
self.input_dim = 0
self.q_net = None
self.optimizer = None
super().__init__(
param,
obj_func,
batch_size,
obj_exp_offset_scale,
)
def _init(self) -> None:
for k in self.sorted_keys:
v = self.param[k]
if isinstance(v, ng.p.Choice):
num_choices = len(v.choices)
self.input_dim += num_choices
else:
raise NotImplementedError()
self.q_net = nn.Sequential(
*[
nn.Linear(self.input_dim, self.model_dim),
nn.ReLU(),
nn.Linear(self.model_dim, self.model_dim),
nn.ReLU(),
nn.Linear(self.model_dim, 1),
]
)
for p in self.q_net.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
self.optimizer = torch.optim.Adam(
self.q_net.parameters(), lr=self.learning_rate
)
logger.info(f"Number of total params: {_num_of_params(self.q_net)}")
def sample_internal(
self,
batch_size: Optional[int] = None,
) -> Tuple[Dict[str, torch.Tensor], List[Any]]:
batch_size = batch_size or self.batch_size
return self._sample_internal(batch_size, self.temp)
def _sample_internal(
self,
batch_size: int,
temp: float,
) -> Tuple[Dict[str, torch.Tensor], List[Any]]:
logger.info(f"Explore with temp={temp}")
sampled_solutions: Dict[str, torch.Tensor] = {}
exp_replay = []
acc_input_dim = 0
# The first cur_state_action is a dummy vector of all -1
cur_state_action = torch.full((batch_size, self.input_dim), -1).float()
for k in self.sorted_keys:
v = self.param[k]
# pyre-fixme[16]: `Parameter` has no attribute `choices`.
num_choices = len(v.choices)
next_state_action_all_pairs = cur_state_action.repeat_interleave(
num_choices, dim=0
).reshape(batch_size, num_choices, self.input_dim)
next_state_action_all_pairs[
:, :, acc_input_dim : acc_input_dim + num_choices
] = torch.eye(num_choices)
q_values = (
self.q_net(next_state_action_all_pairs)
.detach()
.reshape(batch_size, num_choices)
)
q_actions = q_values.argmax(dim=1)
random_actions = torch.randint(num_choices, (batch_size,))
explore_prob = torch.rand(batch_size)
selected_action = (
(explore_prob <= temp) * random_actions
+ (explore_prob > temp) * q_actions
).long()
sampled_solutions[k] = selected_action
# the last element is terminal indicator
exp_replay.append((cur_state_action, next_state_action_all_pairs, False))
cur_state_action = next_state_action_all_pairs[
torch.arange(batch_size), selected_action
]
acc_input_dim += num_choices
# add dummy next_state_action_all_pairs and terminal indicator
exp_replay.append((cur_state_action, cur_state_action.squeeze(1), True))
# the first element is not useful
exp_replay.pop(0)
self.last_sample_internal_res = (sampled_solutions, exp_replay)
return sampled_solutions, exp_replay
def sample(
self, batch_size: int, temp: Optional[float] = GREEDY_TEMP
) -> Dict[str, torch.Tensor]:
assert temp is not None, "temp is needed for epsilon greedy"
sampled_solutions, _ = self._sample_internal(batch_size, temp)
return sampled_solutions
def update_params(self, reward: torch.Tensor) -> None:
_, exp_replay = self.last_sample_internal_res
# insert reward placeholder to exp replay
# exp replay now has the format:
# (cur_state_action, next_state_action_all_pairs, terminal, reward)
self.exp_replay.extend([[*exp, None] for exp in exp_replay])
self.exp_replay[-1][-1] = reward
assert len(exp_replay) == len(self.sorted_keys)
avg_td_loss = []
for i, (
cur_state_action,
next_state_action_all_pairs,
terminal,
r,
) in enumerate(shuffle_exp_replay(self.exp_replay)):
q = self.q_net(cur_state_action)
if terminal:
# negate reward to be consistent with other optimizers.
# reward returned by obj_func is to be minimized
# but q-learning tries to maxmize accumulated rewards
loss = F.mse_loss(q, -r)
else:
q_next = self.q_net(next_state_action_all_pairs).detach()
# assume gamma=1 (no discounting)
loss = F.mse_loss(q, q_next.max(dim=1).values)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
avg_td_loss.append(loss.detach())
if i == self.num_batches_per_learning - 1:
break
avg_td_loss = np.mean(avg_td_loss)
logger.info(f"Avg td loss: {avg_td_loss}")
self.temp = np.maximum(self.temp * self.anneal_rate, self.min_temp)
self.last_sample_internal_res = None
def _optimize_step(
self,
) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
sampled_solutions, exp_replay = self.sample_internal(self.batch_size)
sampled_reward, sampled_scaled_reward = self.obj_func(sampled_solutions)
sampled_reward, sampled_scaled_reward = (
sampled_reward.detach(),
sampled_scaled_reward.detach(),
)
self.update_params(sampled_scaled_reward)
return sampled_solutions, sampled_reward
class BayesianOptimizer(ComboOptimizerBase):
"""
Bayessian Optimization with mutation optimization and acquisition function.
The method is motivated from BANANAS, White, 2020.
https://arxiv.org/abs/1910.11858
In this method, the searching is based on mutation over the current best solutions.
Acquisition function, e.g., its estimates the expected imrpovement.
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
a function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled choice
indices as the value (of shape (batch_size, ))
acq_type (str): type of acquisition function.
mutation_type (str): type of mutation, e.g., random.
temp (float): percentage of mutation - how many variables will be mutated.
"""
def __init__(
self,
param: ng.p.Dict,
start_temp: float,
min_temp: float,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
acq_type: str = "its",
mutation_type: str = "random",
anneal_rate: float = ANNEAL_RATE,
batch_size: int = BATCH_SIZE,
obj_exp_offset_scale: Optional[Tuple[float, float]] = None,
) -> None:
self.start_temp = start_temp
self.min_temp = min_temp
self.temp = start_temp
self.acq_type = acq_type
self.mutation_type = mutation_type
self.anneal_rate = anneal_rate
super().__init__(
param,
obj_func,
batch_size,
obj_exp_offset_scale,
)
def sample(
self, batch_size: int, temp: Optional[float] = None
) -> Dict[str, torch.Tensor]:
"""
Applies a type of mutation, e.g., random mutation, on the best solutions recorded so far.
For example, with random mutation, variables are randomly selected,
and their values are randomly set with respect to their domains.
"""
assert temp is not None, "temp is needed for Bayesian Optimizer"
best_solutions = self.best_solutions(batch_size)
batch_size = len(best_solutions)
sampled_sol = [sol for _, sol in best_solutions]
sampled_solutions = {}
for k in sorted(self.param.keys()):
sampled_solutions[k] = torch.cat([sol[k].reshape(1) for sol in sampled_sol])
if self.mutation_type == "random":
mutated_keys = [
np.random.choice(
sorted(self.param.keys()),
floor(temp * len(self.param)),
replace=False,
)
for _ in range(batch_size)
]
mutated_solutions = {}
for key in sorted(self.param.keys()):
mutated_solutions[key] = sampled_solutions[key].clone()
indices = torch.tensor(
[idx for idx, k in enumerate(mutated_keys) if key in k]
)
if len(indices):
mutated_solutions[key][indices] = torch.randint(
# pyre-fixme[16]: `Parameter` has no attribute `choices`.
len(self.param[key].choices),
(len(indices),),
)
else:
raise NotImplementedError()
return mutated_solutions
def acquisition(
self,
acq_type: str,
sampled_sol: Dict[str, torch.Tensor],
predictor: List[nn.Module],
) -> torch.Tensor:
assert predictor is not None
batch_tensors = sol_to_tensors(sampled_sol, self.param)
if acq_type == "its":
with torch.no_grad():
predictions = torch.stack([net(batch_tensors) for net in predictor])
acquisition_reward = torch.normal(
torch.mean(predictions, dim=0), torch.std(predictions, dim=0)
)
else:
raise NotImplementedError()
return acquisition_reward.view(-1)
class BayesianMLPEnsemblerOptimizer(BayesianOptimizer):
"""
Bayessian Optimizer with ensemble of mlp networks, random mutation, and ITS.
The Method is motivated by the BANANAS optimization method, White, 2019.
https://arxiv.org/abs/1910.11858.
The mutation rate (temp) is starting from start_temp and is decreasing over time
with anneal_rate. It's lowest possible value is min_temp.
Thus, initially the algorithm explores mutations with a higer mutation rate (more variables are randomly mutated).
As time passes, the algorithm exploits the best solutions recorded so far (less variables are mutated).
Args:
param (ng.p.Dict): a nevergrad dictionary for specifying input choices
obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]):
a function which consumes sampled solutions and returns
rewards as tensors of shape (batch_size, 1).
The input dictionary has choice names as the key and sampled choice
indices as the value (of shape (batch_size, ))
acq_type (str): type of acquisition function.
mutation_type (str): type of mutation, e.g., random.
num_mutations (int): number of best solutions recorded so far that will be mutated.
num_ensemble (int): number of predictors.
start_temp (float): initial temperature (ratio) for mutation, e.g., with 1.0 all variables will be initally mutated.
min_temp (float): lowest temperature (ratio) for mutation, e.g., with 0.0 no mutation will occur.
"""
def __init__(
self,
param: ng.p.Dict,
start_temp: float = 1.0,
min_temp: float = 0.0,
obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None,
acq_type: str = "its",
mutation_type: str = "random",
anneal_rate: float = ANNEAL_RATE,
num_mutations: int = 50,
epochs: int = 1,
learning_rate: float = LEARNING_RATE,
batch_size: int = BATCH_SIZE,
obj_exp_offset_scale: Optional[Tuple[float, float]] = None,
model_dim: int = 128,
num_ensemble: int = 5,
) -> None:
self.temp = start_temp
self.num_mutations = num_mutations
self.epochs = epochs
self.learning_rate = learning_rate
self.model_dim = model_dim
self.num_ensemble = num_ensemble
self.input_dim = 0
self.predictor = None
super().__init__(
param,
start_temp,
min_temp,
obj_func,
acq_type,
mutation_type,
anneal_rate,
batch_size,
obj_exp_offset_scale,
)
def _init(self) -> None:
# initial population
sampled_solutions = {}
for k, param in self.param.items():
if isinstance(param, ng.p.Choice):
num_choices = len(param.choices)
self.input_dim += num_choices
sampled_solutions[k] = torch.randint(num_choices, (self.num_mutations,))
else:
raise NotImplementedError()
# predictor
self.predictor = []
for _ in range(self.num_ensemble):
model = nn.Sequential(
*[
nn.Linear(self.input_dim, self.model_dim),
nn.LeakyReLU(),
nn.Linear(self.model_dim, self.model_dim),
nn.LeakyReLU(),
nn.Linear(self.model_dim, 1),
]
)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
self.predictor.append(model)
sampled_reward, _ = self.obj_func(sampled_solutions)
sampled_reward = sampled_reward.detach()
self._maintain_best_solutions(sampled_solutions, sampled_reward)
self.update_predictor(sampled_solutions, sampled_reward)
def sample_internal(
self,
batch_size: Optional[int] = None,
) -> Tuple[Dict[str, torch.Tensor]]:
batch_size = batch_size or self.batch_size
mutated_solutions = self.sample(self.num_mutations, self.temp)
_, indices = torch.sort(
self.acquisition(self.acq_type, mutated_solutions, self.predictor), dim=0
)
sampled_solutions = {}
for key in sorted(self.param.keys()):
sampled_solutions[key] = mutated_solutions[key][indices[:batch_size]]
self.last_sample_internal_res = sampled_solutions
return (sampled_solutions,)
def update_predictor(
self, sampled_solutions: Dict[str, torch.Tensor], sampled_reward: torch.Tensor
) -> List[float]:
x = sol_to_tensors(sampled_solutions, self.param)
y = sampled_reward
losses = []
for model in self.predictor:
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=self.learning_rate)
for _ in range(self.epochs):
pred = model(x)
loss = F.mse_loss(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.detach())
model.eval()
return np.mean(losses)
def update_params(self, reward: torch.Tensor):
self.temp = np.maximum(self.temp * self.anneal_rate, self.min_temp)
self.last_sample_internal_res = None
def _optimize_step(self) -> Tuple:
sampled_solutions = self.sample_internal(self.batch_size)[0]
sampled_reward, _ = self.obj_func(sampled_solutions)
sampled_reward = sampled_reward.detach()
loss = self.update_predictor(sampled_solutions, sampled_reward)
self.update_params(sampled_reward)
return sampled_solutions, sampled_reward, loss
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
6fff296d0c4099761ec2cf15b9a4f8bf629a1a65 | 947e71b34d21f3c9f5c0a197d91a880f346afa6c | /ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.9/service_advisor.py | bb93a78ea3459b895c9e4d4937a9cde5a51bdf2d | [
"MIT",
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"OFL-1.1",
"MS-PL",
"AFL-2.1",
"GPL-2.0-only",
"Python-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | liuwenru/Apache-Ambari-ZH | 4bc432d4ea7087bb353a6dd97ffda0a85cb0fef0 | 7879810067f1981209b658ceb675ac76e951b07b | refs/heads/master | 2023-01-14T14:43:06.639598 | 2020-07-28T12:06:25 | 2020-07-28T12:06:25 | 223,551,095 | 38 | 44 | Apache-2.0 | 2023-01-02T21:55:10 | 2019-11-23T07:43:49 | Java | UTF-8 | Python | false | false | 6,392 | py | #!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python imports
import imp
import os
import traceback
import inspect
# Local imports
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
if "BASE_SERVICE_ADVISOR" in os.environ:
PARENT_FILE = os.environ["BASE_SERVICE_ADVISOR"]
try:
with open(PARENT_FILE, 'rb') as fp:
service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
except Exception as e:
traceback.print_exc()
print "Failed to load parent"
class ZookeeperServiceAdvisor(service_advisor.ServiceAdvisor):
def __init__(self, *args, **kwargs):
self.as_super = super(ZookeeperServiceAdvisor, self)
self.as_super.__init__(*args, **kwargs)
self.initialize_logger("ZookeeperServiceAdvisor")
self.modifyMastersWithMultipleInstances()
self.modifyCardinalitiesDict()
self.modifyHeapSizeProperties()
self.modifyNotValuableComponents()
self.modifyComponentsNotPreferableOnServer()
self.modifyComponentLayoutSchemes()
def modifyMastersWithMultipleInstances(self):
"""
Modify the set of masters with multiple instances.
Must be overriden in child class.
"""
self.mastersWithMultipleInstances.add("ZOOKEEPER_SERVER")
def modifyCardinalitiesDict(self):
"""
Modify the dictionary of cardinalities.
Must be overriden in child class.
"""
self.cardinalitiesDict["ZOOKEEPER_SERVER"] = {"min": 3}
def modifyHeapSizeProperties(self):
"""
Modify the dictionary of heap size properties.
Must be overriden in child class.
"""
self.heap_size_properties = {"ZOOKEEPER_SERVER": [{"config-name": "zookeeper-env",
"property": "zk_server_heapsize",
"default": "1024m"}]}
def modifyNotValuableComponents(self):
"""
Modify the set of components whose host assignment is based on other services.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyComponentsNotPreferableOnServer(self):
"""
Modify the set of components that are not preferable on the server.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyComponentLayoutSchemes(self):
"""
Modify layout scheme dictionaries for components.
The scheme dictionary basically maps the number of hosts to
host index where component should exist.
Must be overriden in child class.
"""
# Nothing to do
pass
def getServiceComponentLayoutValidations(self, services, hosts):
"""
Get a list of errors. Zookeeper does not have any validations in this version.
"""
self.logger.info("Class: %s, Method: %s. Validating Service Component Layout." %
(self.__class__.__name__, inspect.stack()[0][3]))
return self.getServiceComponentCardinalityValidations(services, hosts, "ZOOKEEPER")
def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
"""
Recommend configurations to set. Zookeeper does not have any recommendations in this version.
"""
self.logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
(self.__class__.__name__, inspect.stack()[0][3]))
self.recommendConfigurations(configurations, clusterData, services, hosts)
def recommendConfigurations(self, configurations, clusterData, services, hosts):
"""
Recommend configurations for this service.
"""
self.logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
(self.__class__.__name__, inspect.stack()[0][3]))
self.logger.info("Setting zoo.cfg to default dataDir to /hadoop/zookeeper on the best matching mount")
zk_mount_properties = [
("dataDir", "ZOOKEEPER_SERVER", "/hadoop/zookeeper", "single"),
]
self.updateMountProperties("zoo.cfg", zk_mount_properties, configurations, services, hosts)
def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
"""
Validate configurations for the service. Return a list of errors.
"""
self.logger.info("Class: %s, Method: %s. Validating Configurations." %
(self.__class__.__name__, inspect.stack()[0][3]))
items = []
# Example of validating by calling helper methods
'''
configType = "zookeeper-env"
method = self.someMethodInThisClass
resultItems = self.validateConfigurationsForSite(configurations, recommendedDefaults, services, hosts, configType, method)
items.extend(resultItems)
method = self.anotherMethodInThisClass
resultItems = self.validateConfigurationsForSite(configurations, recommendedDefaults, services, hosts, configType, method)
items.extend(resultItems)
'''
return items
'''
def someMethodInThisClass(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = []
validationItems.append({"config-name": "zookeeper-env", "item": self.getErrorItem("My custom message 1")})
return self.toConfigurationValidationProblems(validationItems, "zookeeper-env")
def anotherMethodInThisClass(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = []
validationItems.append({"config-name": "zookeeper-env", "item": self.getErrorItem("My custom message 2")})
return self.toConfigurationValidationProblems(validationItems, "zookeeper-env")
''' | [
"ijarvis@sina.com"
] | ijarvis@sina.com |
7890d7748d77cc57c6b36016d9bbadd55f92a3b6 | 063fbbeb14bec58e25147484bfeae0d73124525e | /python/common.py | 0fd5366a8677754812c037d6e85ffb518fb4836e | [
"MIT"
] | permissive | mit-gfx/py_pbrt | 2bd9f60ee2fa5a35d338259747254b39f972029d | 853382447da449be6dcc38ba0f570508600f4698 | refs/heads/master | 2023-03-05T10:55:12.107096 | 2021-02-21T05:19:12 | 2021-02-21T05:19:12 | 340,816,570 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,836 | py | import numpy as np
def ndarray(val):
return np.asarray(val, dtype=np.float64)
###############################################################################
# Pretty print.
###############################################################################
def print_error(*message):
print('\033[91m', 'ERROR ', *message, '\033[0m')
def print_ok(*message):
print('\033[92m', *message, '\033[0m')
def print_warning(*message):
print('\033[93m', *message, '\033[0m')
def print_info(*message):
print('\033[96m', *message, '\033[0m')
class PrettyTabular(object):
def __init__(self, head):
self.head = head
def head_string(self):
line = ''
for key, value in self.head.items():
if 's' in value:
dummy = value.format('0')
else:
dummy = value.format(0)
span = max(len(dummy), len(key)) + 2
key_format = '{:^' + str(span) + '}'
line += key_format.format(key)
return line
def row_string(self, row_data):
line = ''
for key, value in self.head.items():
data = value.format(row_data[key])
span = max(len(key), len(data)) + 2
line += ' ' * (span - len(data) - 1) + data + ' '
return line
###############################################################################
# Folder.
###############################################################################
import shutil
import os
def create_folder(folder_name, exist_ok=False):
if not exist_ok and os.path.isdir(folder_name):
shutil.rmtree(folder_name)
os.makedirs(folder_name, exist_ok=exist_ok)
def delete_folder(folder_name):
shutil.rmtree(folder_name)
###############################################################################
# Rotation.
###############################################################################
# Input (rpy): a 3D vector (roll, pitch, yaw).
# Output (R): a 3 x 3 rotation matrix.
def rpy_to_rotation(rpy):
rpy = ndarray(rpy).ravel()
assert rpy.size == 3
roll, pitch, yaw = rpy
cr, sr = np.cos(roll), np.sin(roll)
R_roll = ndarray([[1, 0, 0], [0, cr, -sr], [0, sr, cr]])
cp, sp = np.cos(pitch), np.sin(pitch)
R_pitch = ndarray([[cp, 0, sp], [0, 1, 0], [-sp, 0, cp]])
cy, sy = np.cos(yaw), np.sin(yaw)
R_yaw = ndarray([[cy, -sy, 0], [sy, cy, 0], [0, 0, 1]])
return R_yaw @ R_pitch @ R_roll
###############################################################################
# Export videos.
###############################################################################
import imageio
def export_gif(folder_name, gif_name, fps, name_prefix=''):
frame_names = [os.path.join(folder_name, f) for f in os.listdir(folder_name)
if os.path.isfile(os.path.join(folder_name, f)) and f.startswith(name_prefix) and f.endswith('.png')]
frame_names = sorted(frame_names)
# Read images.
images = [imageio.imread(f) for f in frame_names]
if fps > 0:
imageio.mimsave(gif_name, images, fps=fps)
else:
imageio.mimsave(gif_name, images)
from pathlib import Path
def export_mp4(folder_name, mp4_name, fps, name_prefix=''):
frame_names = [os.path.join(folder_name, f) for f in os.listdir(folder_name)
if os.path.isfile(os.path.join(folder_name, f)) and f.startswith(name_prefix) and f.endswith('.png')]
frame_names = sorted(frame_names)
# Create a temporary folder.
tmp_folder = Path('_export_mp4')
create_folder(tmp_folder, exist_ok=False)
for i, f in enumerate(frame_names):
shutil.copyfile(f, tmp_folder / '{:08d}.png'.format(i))
os.system('ffmpeg -r ' + str(fps) + ' -i ' + str(tmp_folder / '%08d.png') + ' -vcodec mpeg4 -y ' + str(mp4_name))
# Delete temporary folder.
delete_folder(tmp_folder) | [
"taodu@csail.mit.edu"
] | taodu@csail.mit.edu |
797144fdad73105f67b0ff7bb1599091430fcaac | 85235f02e9674877cfcca8976076e26e39e1ca9e | /ForMark/singleton.py | 07cc54c959af4c7a6a368a0152eb749c0a0c22f5 | [] | no_license | zhaolixiang/ForMark | dd4e4bd2effb0d5085001c8e88d4a9811c100698 | 9bb83348fbb84addca2a40d5f9edeeec4bf9e5c3 | refs/heads/master | 2022-12-23T11:17:18.260110 | 2020-04-24T08:14:57 | 2020-04-24T08:14:57 | 250,973,339 | 0 | 0 | null | 2022-12-08T04:01:47 | 2020-03-29T07:01:47 | Python | UTF-8 | Python | false | false | 226 | py | # 单例模式
class Singleton(object):
# 重写
def __call__(cls, *args, **kwargs):
if not hasattr(cls, '_instance'):
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
| [
"1782980833@qq.com"
] | 1782980833@qq.com |
e7379a055c87d6291e48f91d2fb474d83a71a427 | 2f44cecd8fc447c9e2f2d9f55abdea36ebb40cc5 | /剑指offer2/11.py | a2ed7b1898b111ad89eba9553fab224aca071393 | [] | no_license | yuzumei/leetcode | 751a234b429131169e3eaf4594ffeb3b94f6ab34 | b6708b03c92ec92e89fc7ecf13f1995dee346657 | refs/heads/master | 2023-07-28T05:48:53.192948 | 2021-09-11T06:16:07 | 2021-09-11T06:16:07 | 365,780,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # -*- coding:utf-8 -*-
# @Author : Yuzu
# @Time : 2021/9/5 21:09
# @file : 11.py
class Solution:
def findMaxLength(self, nums) -> int:
import collections
memo = collections.defaultdict(list)
memo[0].append(-1)
for i in range(len(nums)):
if nums[i] == 0:
nums[i] = -1
if i >= 1:
nums[i] += nums[i - 1]
memo[nums[i]].append(i)
ans = 0
for item in memo:
ans = max(memo[item][-1] - memo[item][0], ans)
return ans
x =Solution()
print(x.findMaxLength([0,1,0])) | [
"973802530@qq.com"
] | 973802530@qq.com |
16c24fcee56307c226d8c936e065ef10f9edcb43 | 8f35dbebd8fe0fe7eacb2bbcffa6e8c96c9bb506 | /inc/console.py | 701b71839e4509fe01a9feb68bd8fd0ddfba0798 | [
"MIT"
] | permissive | tulibraries/combine | 57280d374a622543ef34da479c721b0b935230aa | eb100ea17193d65485aa6c4a7f05a41b4cab7515 | refs/heads/master | 2020-07-03T16:54:23.618414 | 2019-09-26T15:53:59 | 2019-09-26T15:53:59 | 201,976,306 | 1 | 0 | MIT | 2019-09-26T16:09:16 | 2019-08-12T17:18:32 | JavaScript | UTF-8 | Python | false | false | 755 | py | # convenience methods for Django's shell_plus
import os
from core.models import *
# get Record instance
def get_r(id):
return Record.objects.get(id=id)
# get Job instance
def get_j(id):
return Job.objects.get(pk=int(id))
# get CombineJob instance
def get_cj(id):
return CombineJob.get_combine_job(int(id))
# get RecordGroup instance
def get_rg(id):
return RecordGroup.objects.get(pk=int(id))
# get Organization instance
def get_o(id):
return Organization.objects.get(pk=int(id))
# tail livy
def tail_livy():
os.system('tail -f /var/log/livy/livy.stderr')
# tail django
def tail_celery():
os.system('tail -f /var/log/celery.stdout')
# get StateIO instance
def get_sio(id):
return StateIO.objects.get(id=id)
| [
"ghukill@gmail.com"
] | ghukill@gmail.com |
16f565b451e48fa90ec01e19fbee6dd257f35ac6 | f2ff79ab3d0b1328c66b834826cd311dc8eb5cc2 | /tests/test_clilib.py | 21bc633216f98ccef50a0228ce464d3a38575341 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | sambfloyd/clldutils | 2bc8251d94105f46c192de156741e623c7c33b62 | 92ab4cab4f9a39e0d6f20f09ef75e0ea4a11d025 | refs/heads/master | 2021-01-16T08:58:14.877413 | 2020-02-14T10:30:47 | 2020-02-14T10:30:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,218 | py | import pathlib
import argparse
import importlib
import pytest
from clldutils.clilib import *
from clldutils.path import sys_path
def test_get_parser_and_subparser():
assert get_parser_and_subparsers('a')
def test_register_subcommands(fixtures_dir, mocker):
with sys_path(fixtures_dir):
pkg = importlib.import_module('commands')
class EP:
name = 'abc'
def load(self):
return pkg
mocker.patch(
'clldutils.clilib.pkg_resources',
mocker.Mock(iter_entry_points=mocker.Mock(return_value=[EP()])))
parser, sp = get_parser_and_subparsers('a')
res = register_subcommands(sp, pkg, entry_point='x')
assert 'cmd' in res
assert 'abc.cmd' in res
help = None
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
if choice == 'cmd':
help = subparser.format_help()
# Make sure a RawDescription formatter is used:
assert 'Test command\n- formatted' in help
# Make sure default values are formatted:
assert 'o (default: x)' in help
res = register_subcommands(sp, pkg, formatter_class=argparse.HelpFormatter)
help = None
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
if choice == 'cmd':
help = subparser.format_help()
# Make sure a RawDescription formatter is used:
assert 'Test command\n- formatted' not in help
# Make sure default values are formatted:
assert 'o (default: x)' not in help
def test_register_subcommands_error(fixtures_dir, mocker, recwarn):
with sys_path(fixtures_dir):
pkg = importlib.import_module('commands')
class EP:
name = 'abc'
def load(self):
raise ImportError()
mocker.patch(
'clldutils.clilib.pkg_resources',
mocker.Mock(iter_entry_points=mocker.Mock(return_value=[EP()])))
_, sp = get_parser_and_subparsers('a')
res = register_subcommands(sp, pkg, entry_point='x')
assert 'abc.cmd' not in res
assert recwarn.pop(UserWarning)
def test_ArgumentParser(capsys):
def cmd(args):
"""
docstring
"""
if len(args.args) < 1:
raise ParserError('not enough arguments')
print(args.args[0])
parser = ArgumentParserWithLogging('pkg', cmd)
parser.main(args=['help', 'cmd'])
out, err = capsys.readouterr()
assert 'docstring' in out
parser.main(args=['cmd', 'arg'])
out, err = capsys.readouterr()
assert 'arg' in out
assert parser.main(args=['cmd', 'arg']) == 0
parser.main(args=['cmd'])
out, err = capsys.readouterr()
assert 'not enough arguments' in out
assert parser.main(args=['x']) != 0
out, err = capsys.readouterr()
assert out.startswith('invalid')
@command()
def ls(args):
"""
my name is ls
"""
return
@command(name='list', usage='my name is {0}'.format('list'))
def f(args):
"""
"""
return
parser = ArgumentParserWithLogging('pkg')
parser.main(args=['help', 'ls'])
out, err = capsys.readouterr()
assert 'my name is ls' in out
parser.main(args=['help', 'list'])
out, err = capsys.readouterr()
assert 'my name is list' in out
assert parser.main(args=['ls', 'arg']) == 0
assert parser.main(args=['list', 'arg']) == 0
def test_cmd_error():
from clldutils.clilib import ArgumentParser
def cmd(args):
raise ValueError
parser = ArgumentParser('pkg', cmd)
with pytest.raises(ValueError):
parser.main(args=['cmd'])
assert parser.main(args=['cmd'], catch_all=True) == 1
def test_confirm(capsys, mocker):
from clldutils.clilib import confirm
mocker.patch('clldutils.clilib.input', mocker.Mock(return_value=''))
assert confirm('a?')
assert not confirm('a?', default=False)
mocker.patch('clldutils.clilib.input', mocker.Mock(side_effect=['x', 'y']))
assert confirm('a?')
out, err = capsys.readouterr()
assert 'Please respond' in out
def test_Table(capsys):
with Table(argparse.Namespace(format='simple'), 'a') as t:
t.append(['x'])
out, _ = capsys.readouterr()
assert out == 'a\n---\nx\n'
def test_add_format():
parser, _ = get_parser_and_subparsers('c')
add_format(parser)
def test_PathType(tmpdir):
parser = argparse.ArgumentParser()
parser.add_argument('a', type=PathType(type='file'))
args = parser.parse_args([__file__])
assert isinstance(args.a, pathlib.Path)
with pytest.raises(SystemExit):
parser.parse_args(['x'])
with pytest.raises(SystemExit):
parser.parse_args([str(tmpdir)])
| [
"xrotwang@googlemail.com"
] | xrotwang@googlemail.com |
03058f63b0c755961d200bef835c5dd5e9f9612b | e1f0b70333a2070f711b8f38741c52b17c9bbc16 | /day-1/nested-loop.py | 713216c02b49434c6325653f49b29d7761a0598a | [
"MIT"
] | permissive | jongfranco/python-workshop-3 | c9bfb7726669f8527ae8a99f855005b70df30b02 | 7a9ab7acc9202ca0a3e48c825cd516fcf0a1a747 | refs/heads/master | 2022-11-21T15:38:14.645641 | 2020-07-15T18:40:15 | 2020-07-15T18:40:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | for i in range(1, 5):
for j in range(2, 6):
print(str(i) + ' x ' + str(j) + ' = ' + str(i * j))
| [
"anish_bt2k16@dtu.ac.in"
] | anish_bt2k16@dtu.ac.in |
e24e1885f44d205d6a98fab50b4e827710fcbca5 | afcf8a43e0e20c8748a6fe6629235c993c563b40 | /applications/audit-trail/ow/virtualenv/bin/rst2s5.py | 16eeb27f5dba664496b497d58715c8f47286b027 | [] | no_license | jjkotni/faas-benchmarks | dafd0857809e4ff7b1701646799d03517bc7afc2 | 3a22603bc4340d39e610921514d4f75c9f95aec0 | refs/heads/master | 2022-12-12T17:07:47.183247 | 2020-05-31T04:04:53 | 2020-05-31T04:04:53 | 247,883,437 | 0 | 1 | null | 2022-12-08T05:27:06 | 2020-03-17T05:02:32 | Python | UTF-8 | Python | false | false | 705 | py | #!/home/kjj/faas-benchmarks/applications/audit-trail/batching/virtualenv/bin/python3.5
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
| [
"kjjswaroop@gmail.com"
] | kjjswaroop@gmail.com |
ffe6d854d8d1aedcc34bedb819dc158015905c8e | bb970bbe151d7ac48d090d86fe1f02c6ed546f25 | /arouse/_dj/conf/__init__.py | 3ecd0cc2268e18e4daa2d70e2fef331cc127a941 | [
"Python-2.0",
"BSD-3-Clause"
] | permissive | thektulu/arouse | 95016b4028c2b8e9b35c5062a175ad04286703b6 | 97cadf9d17c14adf919660ab19771a17adc6bcea | refs/heads/master | 2021-01-13T12:51:15.888494 | 2017-01-09T21:43:32 | 2017-01-09T21:43:32 | 78,466,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,904 | py | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from arouse._dj.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import importlib
import os
import time
from arouse._dj.conf import global_settings
from arouse._dj.core.exceptions import ImproperlyConfigured
from arouse._dj.utils.functional import LazyObject, empty
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return '<LazySettings [Unevaluated]>'
return '<LazySettings "%(settings_module)s">' % {
'settings_module': self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. " % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
'cls': self.__class__.__name__,
'settings_module': self.SETTINGS_MODULE,
}
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return sorted(
s for s in list(self.__dict__) + dir(self.default_settings)
if s not in self._deleted
)
def is_overridden(self, setting):
deleted = (setting in self._deleted)
set_locally = (setting in self.__dict__)
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
return (deleted or set_locally or set_on_default)
def __repr__(self):
return '<%(cls)s>' % {
'cls': self.__class__.__name__,
}
settings = LazySettings()
| [
"michal.s.zukowski@gmail.com"
] | michal.s.zukowski@gmail.com |
2dbb8694809973b8675987ec2313935492d5ead1 | 4e1ff54c2f2a21fd6d8e34f2bc3d6dc9990ffa0e | /model/batch_norm_default.py | 3a3c2b7b624283639aa5d6ca6003cb87b3f4cde4 | [] | no_license | haoxiangsnr/A-Convolutional-Recurrent-Neural-Network-for-Real-Time-Speech-Enhancement | 2a037da46f2c89c368cd41b2cba89519cdf471cb | 31610a5b6b398b90ae6b42701ee6cf0e8dcfe871 | refs/heads/master | 2021-07-13T09:08:26.370828 | 2020-09-05T00:50:51 | 2020-09-05T00:50:51 | 201,885,023 | 259 | 55 | null | null | null | null | UTF-8 | Python | false | false | 3,339 | py | import torch.nn as nn
import torch.nn.functional as F
import torch
class CRNN(nn.Module):
"""
Input: [batch size, channels=1, T, n_fft]
Output: [batch size, T, n_fft]
"""
def __init__(self):
super(CRNN, self).__init__()
# Encoder
self.bn0 = nn.BatchNorm2d(num_features=1)
self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=(1, 3), stride=(1, 2))
self.bn1 = nn.BatchNorm2d(num_features=16)
self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(1, 3), stride=(1, 2))
self.bn2 = nn.BatchNorm2d(num_features=32)
self.conv3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(1, 3), stride=(1, 2))
self.bn3 = nn.BatchNorm2d(num_features=64)
self.conv4 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(1, 3), stride=(1, 2))
self.bn4 = nn.BatchNorm2d(num_features=128)
self.conv5 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(1, 3), stride=(1, 2))
self.bn5 = nn.BatchNorm2d(num_features=256)
# LSTM
self.LSTM1 = nn.LSTM(input_size=1024, hidden_size=1024, num_layers=2, batch_first=True)
# Decoder
self.convT1 = nn.ConvTranspose2d(in_channels=512, out_channels=128, kernel_size=(1, 3), stride=(1, 2))
self.bnT1 = nn.BatchNorm2d(num_features=128)
self.convT2 = nn.ConvTranspose2d(in_channels=256, out_channels=64, kernel_size=(1, 3), stride=(1, 2))
self.bnT2 = nn.BatchNorm2d(num_features=64)
self.convT3 = nn.ConvTranspose2d(in_channels=128, out_channels=32, kernel_size=(1, 3), stride=(1, 2))
self.bnT3 = nn.BatchNorm2d(num_features=32)
# output_padding为1,不然算出来是79
self.convT4 = nn.ConvTranspose2d(in_channels=64, out_channels=16, kernel_size=(1, 3), stride=(1, 2), output_padding=(0, 1))
self.bnT4 = nn.BatchNorm2d(num_features=16)
self.convT5 = nn.ConvTranspose2d(in_channels=32, out_channels=1, kernel_size=(1, 3), stride=(1, 2))
self.bnT5 = nn.BatchNorm2d(num_features=1)
def forward(self, x):
# conv
# (B, in_c, T, F)
x.unsqueeze_(1)
x = self.bn0(x)
x1 = F.elu(self.bn1(self.conv1(x)))
x2 = F.elu(self.bn2(self.conv2(x1)))
x3 = F.elu(self.bn3(self.conv3(x2)))
x4 = F.elu(self.bn4(self.conv4(x3)))
x5 = F.elu(self.bn5(self.conv5(x4)))
# reshape
out5 = x5.permute(0, 2, 1, 3)
out5 = out5.reshape(out5.size()[0], out5.size()[1], -1)
# lstm
lstm, (hn, cn) = self.LSTM1(out5)
# reshape
output = lstm.reshape(lstm.size()[0], lstm.size()[1], 256, -1)
output = output.permute(0, 2, 1, 3)
# ConvTrans
res = torch.cat((output, x5), 1)
res1 = F.elu(self.bnT1(self.convT1(res)))
res1 = torch.cat((res1, x4), 1)
res2 = F.elu(self.bnT2(self.convT2(res1)))
res2 = torch.cat((res2, x3), 1)
res3 = F.elu(self.bnT3(self.convT3(res2)))
res3 = torch.cat((res3, x2), 1)
res4 = F.elu(self.bnT4(self.convT4(res3)))
res4 = torch.cat((res4, x1), 1)
# (B, o_c, T. F)
res5 = F.relu(self.bnT5(self.convT5(res4)))
return res5.squeeze()
| [
"haoxiangsnr@gmail.com"
] | haoxiangsnr@gmail.com |
824ffbe6bcb7970e95b94c794a7386665de41747 | 3996539eae965e8e3cf9bd194123989741825525 | /PhysicsTools/JetMCAlgos/matchGenHFHadron_cfi.py | dc80c83017b34f44fcee282a9cc724b02a224333 | [] | no_license | cms-sw/cmssw-cfipython | 01990ea8fcb97a57f0b0cc44a8bf5cde59af2d98 | 25ee4c810103c4a507ca1b949109399a23a524c5 | refs/heads/CMSSW_11_2_X | 2023-09-01T16:56:00.658845 | 2022-06-20T22:49:19 | 2022-06-20T22:49:19 | 136,184,115 | 1 | 0 | null | 2022-10-19T14:04:01 | 2018-06-05T13:47:28 | Python | UTF-8 | Python | false | false | 336 | py | import FWCore.ParameterSet.Config as cms
matchGenHFHadron = cms.EDProducer('GenHFHadronMatcher',
genParticles = cms.required.InputTag,
jetFlavourInfos = cms.required.InputTag,
noBBbarResonances = cms.bool(True),
onlyJetClusteredHadrons = cms.bool(False),
flavour = cms.int32(5),
mightGet = cms.optional.untracked.vstring
)
| [
"cmsbuild@cern.ch"
] | cmsbuild@cern.ch |
6cd99f4cccde01fec5a66af6c83a998cd6dcf091 | e50954bb35fbc377a1c9a6842fa4ceb6b6234b39 | /zips/plugin.video.wargames/resources/lib/scrapersource/scenedown_mv_tv.py | 9fd1378892301541c1acaec6be953b03008cc793 | [] | no_license | staycanuca/BUILDONLY | f213e242ed869475668933ac7b6ee2d4e8508bbc | f87684bf0111a1079b0e1184e1bfca3f2c5348ed | refs/heads/master | 2021-05-09T04:54:22.747154 | 2018-01-28T08:55:07 | 2018-01-28T08:55:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,537 | py | # -*- coding: utf-8 -*-
'''
Add-on
Copyright (C) 2016
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.scrapermods import cleantitle
from resources.lib.scrapermods import client
from resources.lib.scrapermods import debrid
class source:
def __init__(self):
self.domains = ['scenedown.in']
self.base_link = 'http://scenedown.in'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = client.parseDOM(post, 'title')[0]
u = client.parseDOM(post, 'enclosure', ret='url', attrs={'type': 'video.+?'})
if not u: raise Exception()
c = client.parseDOM(post, 'content.+?')[0]
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
s = s[0] if s else '0'
u = client.parseDOM(c, 'a', ret='href')
items += [(t, i, s) for i in u]
except:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'provider': 'Scenedown', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check: sources = check
return sources
except:
return sources
def resolve(self, url):
return url
| [
"biglad@mgawow.co.uk"
] | biglad@mgawow.co.uk |
dc758b6a7277e9de59e2c57cdf69fdd4bce8dd25 | f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb | /weapp/market_tools/tools/complain/mobile_views.py | 98be8a1fab60f3f9c0eaab6a009dbd7b4ff0c26a | [] | no_license | chengdg/weizoom | 97740c121724fae582b10cdbe0ce227a1f065ece | 8b2f7befe92841bcc35e0e60cac5958ef3f3af54 | refs/heads/master | 2021-01-22T20:29:30.297059 | 2017-03-30T08:39:25 | 2017-03-30T08:39:25 | 85,268,003 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | # -*- coding: utf-8 -*-
__author__ = 'chuter'
import os
from datetime import datetime, timedelta
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.template import Context, RequestContext
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.shortcuts import render_to_response
from core.exceptionutil import unicode_full_stack
from modules.member.util import get_member
from models import *
from webapp.modules.cms.models import SpecialArticle
from modules.member.models import *
template_path_items = os.path.dirname(__file__).split(os.sep)
TEMPLATE_DIR = '%s/templates' % template_path_items[-1]
COUNT_PER_PAGE = 15
def get_settings(request):
settings_id = int(request.GET['settings_id'])
try:
member = request.member
except:
member = None
#default_img = SpecialArticle.objects.filter(owner=request.project.owner_id, name='not_from_weixin')[0].content if SpecialArticle.objects.filter(owner=request.project.owner_id, name='not_from_weixin').count()>0 else None
hide_non_member_cover = False
try:
member_complain_settings = MemberComplainSettings.objects.get(id=settings_id)
if member_complain_settings.is_non_member:
hide_non_member_cover = True
except:
c = RequestContext(request, {
'is_deleted_data': True
})
return render_to_response('%s/complain/webapp/member_complain.html' % TEMPLATE_DIR, c)
request.should_hide_footer = True
c = RequestContext(request, {
'page_title': u'用户反馈',
'member': member,
'member_complain_settings': member_complain_settings,
'hide_non_member_cover' : hide_non_member_cover
})
return render_to_response('%s/complain/webapp/member_complain.html' % TEMPLATE_DIR, c)
| [
"jiangzhe@weizoom.com"
] | jiangzhe@weizoom.com |
4d8da9b345c887bd27fb0d8b57d52da3c1595cb0 | 9ed325dd1cf60458135796b0df32bf0877481134 | /marketplace/migrations/0001_initial.py | c93fc67edd1ee09ca220d5ba40a15a30283a48ad | [] | no_license | codingspider/konetos | ca64b25cb8fa43a44913b9e58067c271ec0d1756 | d484284287e16f807530af11ce1d2918e05d3d42 | refs/heads/master | 2023-02-10T04:48:53.764000 | 2020-12-30T12:13:30 | 2020-12-30T12:13:30 | 324,984,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,090 | py | # Generated by Django 2.2 on 2020-12-24 06:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('status', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('price', models.FloatField(blank=True, null=True)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('user_id', models.IntegerField(blank=True, null=True)),
('qty', models.IntegerField(blank=True, null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('image', models.ImageField(blank=True, null=True, upload_to='products/')),
('price', models.FloatField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('status', models.CharField(blank=True, max_length=255, null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, null=True)),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='marketplace.Category')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ProductSlider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='sliders/')),
('status', models.CharField(blank=True, max_length=255, null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, null=True)),
('product', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='marketplace.Product')),
],
),
]
| [
"engrokon.rok@gmail.com"
] | engrokon.rok@gmail.com |
39597b6730f6fb6296e2187603b26ceff65fd6e2 | 1944f0b25a19080832933b78d9c191ceb212a62b | /minggu-13/praktik/src/8_7.py | 237368f6f3fbc297c07bd72a1f6199bb694db043 | [] | no_license | satriang/bigdata | b0432683dde5b3cb6c3b2e22c8ce80530b32cb67 | 4075ced73978ae7d1169a42ffead640b94b0fe04 | refs/heads/master | 2020-03-28T15:34:43.885548 | 2019-01-07T22:41:32 | 2019-01-07T22:41:32 | 148,607,481 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
# In[7]:
state_fruit = pd.read_csv('data/state_fruit.csv', index_col=0)
state_fruit.stack() .rename_axis(['state', 'fruit']) .reset_index(name='weight')
# In[ ]:
| [
"ngestusatria7@gmail.com"
] | ngestusatria7@gmail.com |
f202eedd79a9e367604c1e9a49f05eb86ad60345 | 5dbf8039a281c4ba13e9cb19453a4bace0f2b6bd | /billing/admin.py | 76a5c2e8c6c9d2b1f7c499cb34b4b446e54278b0 | [] | no_license | adamtlord/ease | e605d901fc944d48212c4283998e1b4995c09324 | 51e7e0e79e21dad6fa7bdd360cd0a5c0ba3c9d41 | refs/heads/master | 2021-01-18T22:24:12.146024 | 2019-07-30T17:48:14 | 2019-07-30T17:48:14 | 72,482,564 | 0 | 0 | null | 2019-01-02T17:15:51 | 2016-10-31T22:15:56 | CSS | UTF-8 | Python | false | false | 1,242 | py | from django.contrib import admin
from billing.models import Plan, StripeCustomer, Invoice, GroupMembership, Balance, Gift, Subscription
class SubscriptionAdmin(admin.ModelAdmin):
readonly_fields = ('date_created',)
class GroupMembershipAdmin(admin.ModelAdmin):
raw_id_fields = ("address", "user", "ride_account", "subscription_account")
class StripeCustomerAdmin(admin.ModelAdmin):
list_display = ["__unicode__", "stripe_id", "last_4_digits"]
search_fields = ["first_name",
"last_name",
"subscription_customer__first_name",
"subscription_customer__last_name",
"ride_customer__first_name",
"ride_customer__last_name",
"customer__first_name",
"customer__last_name"]
raw_id_fields = ["customer"]
class BalanceAdmin(admin.ModelAdmin):
raw_id_fields = ['stripe_customer']
admin.site.register(Plan)
admin.site.register(StripeCustomer, StripeCustomerAdmin)
admin.site.register(Invoice)
admin.site.register(GroupMembership, GroupMembershipAdmin)
admin.site.register(Balance, BalanceAdmin)
admin.site.register(Gift)
admin.site.register(Subscription, SubscriptionAdmin)
| [
"adam.lord@gmail.com"
] | adam.lord@gmail.com |
ba891550744ddb67f7c86b7185ac71745e52b0c3 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/tutorial/gui/Scaleform/meta/TutorialBattleStatisticMeta.py | 15582a969c3a38f9a7ba3204478e72fb0e9e2039 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 957 | py | # 2017.05.04 15:28:00 Střední Evropa (letní čas)
# Embedded file name: scripts/client/tutorial/gui/Scaleform/meta/TutorialBattleStatisticMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class TutorialBattleStatisticMeta(AbstractWindowView):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends AbstractWindowView
"""
def restart(self):
self._printOverrideError('restart')
def showVideoDialog(self):
self._printOverrideError('showVideoDialog')
def as_setDataS(self, data):
if self._isDAAPIInited():
return self.flashObject.as_setData(data)
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\tutorial\gui\Scaleform\meta\TutorialBattleStatisticMeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:28:00 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
62b8df2cdf56956f30f95202944153f596347fa4 | f98f0ae6a318c8efd8aeac770b000534d1c6ab23 | /da_rnn/metrics.py | df615fe802d6bb0b9419a30454e1d4242d5d2a0b | [
"Apache-2.0"
] | permissive | kimsse0430/dual_stage_attention_rnn | aff71111785af91c47f371b785dab5b41fc0d4e7 | 08744ee2cfa3dc71fb1c9da895e879708cea805e | refs/heads/master | 2022-04-19T01:47:21.123150 | 2020-03-09T15:27:26 | 2020-03-09T15:27:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | """ Metrics utilities """
import numpy as np
class Metrics:
""" Metrics to evaluate prediction performance """
def __init__(self):
pass
def get_metrics_dict(self, predictions, labels):
""" Return the metrics result in dict """
res = dict()
res['rmse'] = self.rmse(predictions, labels)
res['mae'] = self.mae(predictions, labels)
res['mape'] = self.mape(predictions, labels)
return res
@staticmethod
def rmse(predictions, labels):
""" RMSE ratio """
return np.sqrt(np.mean(np.subtract(predictions, labels) ** 2))
@staticmethod
def mae(predictions, labels):
""" MAE ratio """
return np.mean(np.abs(predictions - labels))
@staticmethod
def mape(predictions, labels):
""" MAPE ratio """
return np.mean(np.abs(np.subtract(predictions, labels) / labels))
@staticmethod
def metrics_dict_to_str(metrics_dict):
""" Convert metrics to a string to show in the console """
eval_info = ''
for key, value in metrics_dict.items():
eval_info += '{0} : {1}, '.format(key, value)
return eval_info[:-1]
| [
"siqiao_xue@163.com"
] | siqiao_xue@163.com |
c937b25f03ec54e88b2deaeb51f830ade524d655 | b6d459c0201b307cd82556dba8d7cfb34da0e90e | /tensorflow/python/eager/polymorphic_function/concrete_function.py | 9fc579e7eb3252f96daa06cd9c9203fe3f40ae2f | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | melG81/tensorflow | 9641f69d1909c99c7c9fe8f8f6c052e9be7a6b1a | 940570002097f5173db75740ae74e0d8bb3f7223 | refs/heads/master | 2023-08-29T06:58:35.483170 | 2023-05-31T14:00:04 | 2023-05-31T14:04:50 | 133,704,084 | 0 | 0 | Apache-2.0 | 2023-05-27T18:51:13 | 2018-05-16T17:52:33 | C++ | UTF-8 | Python | false | false | 83,409 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Implementation for Monomorphic Functions (including Differentiable ones)."""
import collections
import pprint
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.function.polymorphism import function_type as function_type_lib
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop_util
from tensorflow.python.eager import context
from tensorflow.python.eager import forwardprop_util
from tensorflow.python.eager import record
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.eager.polymorphic_function import atomic_function
from tensorflow.python.eager.polymorphic_function import attributes as attributes_lib
from tensorflow.python.eager.polymorphic_function import function_type_utils
from tensorflow.python.eager.polymorphic_function import saved_model_exported_concrete
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import default_gradient
from tensorflow.python.ops import gradients_util
from tensorflow.python.ops import handle_data_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler import trace
from tensorflow.python.trackable import base as trackable
from tensorflow.python.types import core
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
def _is_type_subset(a, b):
"""Returns true if `b` is a subset of type `a` (or if a is not a TypeSpec.)"""
if isinstance(a, type_spec.TypeSpec):
return a.most_specific_compatible_type(b) == a
return True
def _parse_func_attrs(attributes):
"""Convert the keyword arguments into function_def attributes.
Currently only support primitive types: bool, int, float and string.
Args:
attributes: the dictionary of attributes.
Returns:
A dict of attributes where the key is the name of attribute and the value
is the AttrValue proto.
Raises:
ValueError: If the kwargs contains unallowlisted name or unsupported value
types.
"""
attrs = {}
for key, value in attributes.items():
if key not in attributes_lib.MONOMORPHIC_FUNCTION_ALLOWLIST:
raise ValueError(
f"ConcreteFunction does not support `{key}` as an attribute.")
if isinstance(value, attr_value_pb2.AttrValue):
attrs[key] = value
# bool type check has to happen before int since bool is a subclass of int.
elif isinstance(value, bool):
attrs[key] = attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
attrs[key] = attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
attrs[key] = attr_value_pb2.AttrValue(f=value)
elif isinstance(value, (str, bytes)):
attrs[key] = attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError(f"Attribute {key} must be bool, int, float, string, or "
f"AttrValue. Got {type(value)}.")
return attrs
_FORWARD_PREFIX = "__forward_"
_BACKWARD_PREFIX = "__backward_"
_INFERENCE_PREFIX = "__inference_"
def _forward_name(n):
"""The name of a generated forward defun named n."""
return "%s%s_%s" % (_FORWARD_PREFIX, n, ops.uid())
def _backward_name(n):
"""The name of a generated backward defun named n."""
return "%s%s_%s" % (_BACKWARD_PREFIX, n, ops.uid())
def _inference_name(n):
"""The name of a forward-but-no-gradient defun named n."""
return "%s%s_%s" % (_INFERENCE_PREFIX, n, ops.uid())
def _create_forward_backward_with_graph(attrs, forward_graph, backwards_graph):
"""Creates forward and backward functions from the function graphs."""
forward_function_name = _forward_name(forward_graph.name)
common_attributes = dict(attrs)
# NB: forward and backward function need to drop "_implements".
# attribute, because their signature contains all the intermediate tensors
# that they compute. Thus they don't have a stable signature which can
# be directly optimized downstream.
# See for more details:
# https://github.com/tensorflow/community/blob/master/rfcs/20190610-standardizing-composite_ops.md#appendix-future-support-for-optimizing-gradient-functions
common_attributes.pop(attributes_lib.IMPLEMENTS, None)
backward_function_attr = _parse_func_attrs(
{attributes_lib.FORWARD_FUNCTION: forward_function_name})
backward_function_attr.update(common_attributes)
backward_function = ConcreteFunction(
backwards_graph, attrs=backward_function_attr)
forward_function_attr = _parse_func_attrs({
attributes_lib.BACKWARD_FUNCTION:
backward_function.name})
forward_function_attr.update(common_attributes)
forward_function = atomic_function.from_func_graph(
forward_function_name, forward_graph, forward_graph.inputs,
forward_graph.outputs, forward_function_attr)
return forward_function, backward_function
class _DelayedRewriteGradientFunctions(object):
"""Caches forward/backward functions with a delayed forward rewrite."""
def __init__(self, func_graph, attrs, func_graph_deleter):
"""Construct an inference function and initialize caches."""
# A map from the number of forward function outputs with accepted gradients
# to forward and backward functions, used to cache non-tape backward
# function generation.
self._cached_function_pairs = {}
self._func_graph = func_graph
self._inference_function = atomic_function.from_func_graph(
_inference_name(self._func_graph.name), self._func_graph,
self._func_graph.inputs, self._func_graph.outputs, attrs)
self._attrs = attrs
self._gradient_name = None
# Note that the FuncGraph is mutated later, so we need to inspect it now to
# figure out the user-specified outputs of the inference function.
self._num_inference_outputs = len(self._func_graph.outputs)
self._func_graph_deleter = func_graph_deleter
def forward_backward(self, num_doutputs=None):
"""A possibly-cached pair of forward and backward functions."""
if num_doutputs is None:
num_doutputs = self._num_inference_outputs
forward_backward = self._cached_function_pairs.get(num_doutputs)
if forward_backward is not None:
return forward_backward
forward, backward = self._construct_forward_backward(num_doutputs)
self._cached_function_pairs[num_doutputs] = (forward, backward)
return forward, backward
def _construct_forward_backward(self, num_doutputs):
"""Constructs a pair of forward and backward functions.
Args:
num_doutputs: The constructed backprop function will take output gradients
for the first `num_doutputs` outputs of the forward function. Defaults
to the number of outputs for the inference function, but when
higher-order gradients are computed this will increase to include side
outputs.
Returns:
A pair of (forward_function, backward_function):
forward_function: A re-generated inference function (an
AtomicFunction) to account for new side outputs, if any extra
were required when building the backward pass.
backward_function: A ConcreteFunction that Takes `num_doutputs`
arguments and returns gradients with respect to inputs of the forward
function.
"""
trainable_outputs = [
output for output in self._func_graph.outputs[:num_doutputs]
if backprop_util.IsTrainable(output)]
signature = []
for t in trainable_outputs:
signature.append(
tensor_spec.TensorSpec(*default_gradient.shape_and_dtype(t)))
def _backprop_function(*grad_ys):
with ops.device(None):
return gradients_util._GradientsHelper( # pylint: disable=protected-access
trainable_outputs,
self._func_graph.inputs,
grad_ys=grad_ys,
src_graph=self._func_graph)
with self._func_graph.as_default():
backwards_graph = func_graph_module.FuncGraph(
_backward_name(self._func_graph.name))
func_graph_module.func_graph_from_py_func(
name=backwards_graph.name,
python_func=_backprop_function,
args=[], kwargs={},
signature=signature,
func_graph=backwards_graph)
backwards_graph_captures = backwards_graph.external_captures
captures_from_forward = [
c for c in backwards_graph_captures if
not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph]
existing_outputs = object_identity.ObjectIdentitySet(
self._func_graph.outputs)
for capture in captures_from_forward:
if capture not in existing_outputs:
existing_outputs.add(capture)
self._func_graph.outputs.append(capture)
forward_function, backward_function = _create_forward_backward_with_graph(
self._attrs, self._func_graph, backwards_graph)
return forward_function, backward_function
def _rewrite_forward_and_call_backward(self, op, *doutputs):
"""Add outputs to the forward call and feed them to the grad function."""
forward_function, backwards_function = self.forward_backward(len(doutputs))
if not backwards_function.outputs:
return backwards_function.structured_outputs
op.graph._add_function_recursive(forward_function) # pylint: disable=protected-access
# pylint: disable=protected-access
# Rewrite an inference call op to be a forward call op
op._set_func_attr("f", forward_function.name)
op._set_type_list_attr(
"Tout",
[
o.dtype.as_datatype_enum
for o in forward_function.function_type.flat_outputs
],
)
truncated_outputs = forward_function.function_type.flat_outputs[
len(op.outputs) :
]
op._add_outputs(
[o.dtype.as_datatype_enum for o in truncated_outputs],
[o.shape for o in truncated_outputs],
)
for i in range(len(op.outputs)):
output_type = forward_function.function_type.flat_outputs[i]
handle_data = output_type.dtype._handle_data
if handle_data:
handle_data_util.set_handle_data(op.outputs[i], handle_data)
# pylint: enable=protected-access
capture_mapping = dict(
zip((ops.tensor_id(t) for t in self._func_graph.outputs), op.outputs))
remapped_captures = [
capture_mapping.get(ops.tensor_id(capture), capture)
for capture in backwards_function.captured_inputs
]
# Replace Nones with zeros since we're calling a graph function which
# expects numeric inputs.
cleaned_doutputs = []
for doutput, placeholder in zip(doutputs, self._func_graph.outputs):
if backprop_util.IsTrainable(placeholder):
if isinstance(doutput, indexed_slices.IndexedSlices):
# Gradient passed to a backward ConcreteFunction must be tf.Tensor,
# so we convert tf.IndexedSlices to tf.Tensor.
cleaned_doutputs.append(ops.convert_to_tensor(doutput))
elif doutput is not None:
cleaned_doutputs.append(doutput)
else:
cleaned_doutputs.append(default_gradient.zeros_like(placeholder))
# Compute the gradients using the side outputs
return backwards_function._call_flat( # pylint: disable=protected-access
cleaned_doutputs, remapped_captures)
def get_gradient_function(self):
"""Returns gradient function.
The gradient rewrites an inference call op to a forward call op, but does
not modify a pre-existing forward call op. It then computes the gradient
from the output's gradients and the side outputs of the forward op.
"""
return self._rewrite_forward_and_call_backward
def forward(self, inference_args=None, input_tangents=None):
"""A forward function with only user-specified outputs.
The call operation for the returned inference function can be rewritten into
a forward function. This only happens if the backward function (from the
`backward` method) ends up being used to compute gradients.
This approach avoids constructing unnecessary graphs, but it only works if
we are calling this function when not executing eagerly.
Args:
inference_args: A flat list of Tensors, arguments to the inference
function. Unused, but taken for compatibility with
_TapeGradientFunctions.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`. Unused; if required, tape functions must be used
instead.
Returns:
An atomic_function.AtomicFunction.
"""
del inference_args # unused
if input_tangents:
# This class does not support special-cased forwardprop. The arguments are
# here for compatibility with _TapeGradientFunctions.
raise errors.InternalError("unexpectedly got forwardprop information in "
"a class that does not support forwardprop.")
return self._inference_function
def _backward(self, outputs):
"""Fetch a backward function for `outputs` from the forward function."""
def _backward_function(*args):
call_op = outputs[0].op
return self._rewrite_forward_and_call_backward(call_op, *args)
return _backward_function, outputs
def record(self, flat_outputs, inference_args, input_tangents):
"""Record the function call operation.
_DelayedRewriteGradientFunctions supports only first-order backprop tape
gradients (and then only when graph building). It does not work with
higher-order tape gradients or forward autodiff, but does work with
higher-order symbolic gradients (tf.gradients).
Args:
flat_outputs: The result of running `forward`.
inference_args: A flat list of Tensors with inference inputs to the
operation.
input_tangents: A flat list of Tensors with input tangents consumed by the
operation.
"""
backward_function, to_record = self._backward(flat_outputs)
record.record_operation(
self._inference_function.cached_definition.signature.name,
to_record,
inference_args + input_tangents,
backward_function,
)
# Contains information about a forward function wrapped to compute jvps.
_ForwardWrapper = collections.namedtuple(
"_ForwardWrapper", (
# The wrapper Graph.
"graph",
# A flat list of non-tangent Tensor outputs from the wrapped forward
# function.
"outputs",
# Indices for output tangents, same format as
# forwardprop_util.pack_tangents.
"output_indices",
# A flat list of tangents for `outputs`.
"output_tangents"))
class _TapeGradientFunctions(object):
"""Caches forward and backward functions compatible with eager gradients.
In contrast to the delayed-rewrite approach in
`_DelayedRewriteGradientFunctions` which only works with delayed execution,
the forward function generated by this class has a fixed set of outputs which
may be preserved by a tape in order to compute gradients later.
This class is abstract; its child classes differ in how many side outputs of
the forward function their backward function accepts gradients for, which
determines whether higher-order tape gradients are possible.
"""
def __init__(self, func_graph, attrs, func_graph_deleter,
forwardprop_input_indices, delayed_rewrite_functions,
need_gradients_for_jvps):
self._func_graph = func_graph
self._forward_graph = None
self._attrs = attrs
self._forward = None
self._backward = None
self._num_outputs = len(func_graph.outputs)
self._func_graph_deleter = func_graph_deleter
self._forwardprop_input_indices = forwardprop_input_indices
self._forwardprop_output_indices = None
self._num_forwardprop_outputs = 0
self._num_inference_outputs = len(func_graph.outputs)
self._num_trainable_inference_outputs = len(
[t for t in func_graph.outputs if backprop_util.IsTrainable(t)])
self._delayed_rewrite_functions = delayed_rewrite_functions
self._need_gradients_for_jvps = need_gradients_for_jvps
def _build_functions_for_outputs(
self, outputs, inference_args, input_tangents):
"""Forward+backward functions where the backward function sees `outputs`."""
# First figure out which of `outputs` are trainable. We'll accept gradients
# for each of these in the backward function.
trainable_outputs = []
trainable_indices = []
for index, output in enumerate(outputs):
if backprop_util.IsTrainable(output):
trainable_outputs.append(output)
trainable_indices.append(index)
backwards_graph = func_graph_module.FuncGraph(
_backward_name(self._func_graph.name))
with backwards_graph.as_default():
gradients_wrt_outputs = []
for output in trainable_outputs:
gradient_shape, gradient_dtype = default_gradient.shape_and_dtype(
output)
gradient_placeholder = graph_placeholder(gradient_dtype, gradient_shape)
handle_data_util.copy_handle_data(output, gradient_placeholder)
gradients_wrt_outputs.append(gradient_placeholder)
with ops.device(None):
gradients_wrt_inputs = gradients_util._GradientsHelper( # pylint: disable=protected-access
trainable_outputs,
self._func_graph.inputs,
grad_ys=gradients_wrt_outputs,
src_graph=self._func_graph)
if input_tangents:
# Convert IndexedSlices to dense tensors (as we do elsewhere for
# function gradients). Our C++ bindings don't know how to handle them
# currently.
gradients_wrt_inputs = nest.map_structure(
lambda x: ops.convert_to_tensor(x) if x is not None else None,
gradients_wrt_inputs)
captures_from_forward = [
c for c in backwards_graph.external_captures
if not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph
]
existing_outputs = object_identity.ObjectIdentitySet(
self._func_graph.outputs)
for capture in captures_from_forward:
if capture not in existing_outputs:
existing_outputs.add(capture)
self._func_graph.outputs.append(capture)
# The ordering of `backwards_graph.inputs` is important: inputs of
# `backward_function` correspond to outputs (including
# side outputs) of `self._tape_forward_function`.
backwards_graph.inputs = (
gradients_wrt_outputs + backwards_graph.internal_captures)
backwards_graph.outputs.extend(
grad
for grad in nest.flatten(gradients_wrt_inputs, expand_composites=True)
if grad is not None)
backwards_graph.structured_outputs = gradients_wrt_inputs
forward_function, backward_function = _create_forward_backward_with_graph(
self._attrs, self._func_graph, backwards_graph)
if not input_tangents:
# There is no need to special-case forwardprop, so we can return the
# forward+backward pair we've created without further wrapping.
return (forward_function, self._func_graph, backward_function,
# No forwardprop outputs.
None, 0)
forward_wrapper = self._wrap_forward_function_with_jvps(
forward_function, backward_function, inference_args, input_tangents)
(wrapped_backwards_graph,
forward_wrapper) = self._wrap_backward_function_with_jvp_backprop(
backward_function, gradients_wrt_outputs, forward_wrapper)
# Now that we've added new captures, we need to make sure forward outputs
# are in the same order the backward function expects them to be in:
# [inference outputs] + [jvps] + [side outputs] + [captures].
forward_wrapper = self._shuffle_forward_outputs(forward_wrapper)
(wrapped_forward_function,
wrapped_backward_function) = _create_forward_backward_with_graph(
self._attrs, forward_wrapper.graph, wrapped_backwards_graph)
if (len(inference_args) + len(input_tangents)
!= len(forward_wrapper.graph.inputs)):
raise errors.InternalError(
f"The forward graph had {len(forward_wrapper.graph.inputs)} inputs, "
f"but we expected {len(inference_args) + len(input_tangents)} "
f"({len(inference_args)} inference inputs and "
f"{len(input_tangents)} input tangents).")
return (wrapped_forward_function, forward_wrapper.graph,
wrapped_backward_function, forward_wrapper.output_indices,
len(forward_wrapper.output_tangents))
def _wrap_forward_function_with_jvps(
self, forward_function, backward_function,
inference_args, input_tangents):
"""Adds inline JVP computation to a forward function."""
forward_wrapper_graph = func_graph_module.FuncGraph(
_forward_name(self._func_graph.name))
with forward_wrapper_graph.as_default():
# Tell forward accumulators to free up space for new JVP computations,
# since one may be in the process of computing a JVP (if that computation
# triggered this function building).
#
# We'll make symbolic versions of input JVPs, run the forward function
# under forward accumulators to get symbolic output JVPs, then set those
# as outputs of the new wrapped forward function.
with forwardprop_util.push_forwardprop_state():
forward_captures = {
ops.tensor_id(internal): external
for external, internal in self._func_graph.captures}
for input_index, real_input in enumerate(self._func_graph.inputs):
# This loop is more or less equivalent to running tf.identity on each
# of self._func_graph.inputs. However, doing that also captures jvps
# for resource handles, which confuses the jvp capturing code below
# (since primal inputs are interwoven with jvp inputs).
input_placeholder = array_ops.placeholder(
dtype=real_input.dtype,
shape=real_input.shape)
capture = forward_captures.get(ops.tensor_id(real_input))
if capture is not None:
forward_wrapper_graph.add_capture(capture, input_placeholder)
if capture.dtype == dtypes.resource:
handle_data_util.copy_handle_data(capture, input_placeholder)
else:
forward_wrapper_graph.inputs.append(input_placeholder)
for inp, arg in zip(forward_wrapper_graph.inputs, inference_args):
record.record_operation(
"captured_value", [inp], [arg],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
num_inference_inputs = len(inference_args)
for tape_indices in self._forwardprop_input_indices:
for input_index, jvp_index in tape_indices:
input_placeholder = forward_wrapper_graph.inputs[input_index]
if len(forward_wrapper_graph.inputs) != jvp_index:
raise errors.InternalError(
f"Expected {jvp_index} forward graph inputs, "
f"got {len(forward_wrapper_graph.inputs)}.")
gradient_shape, gradient_dtype = default_gradient.shape_and_dtype(
input_placeholder)
jvp_placeholder = graph_placeholder(gradient_dtype, gradient_shape)
external_jvp = input_tangents[jvp_index - num_inference_inputs]
forward_wrapper_graph.add_capture(external_jvp, jvp_placeholder)
tensor_shape.TensorShape(
external_jvp.shape).assert_is_compatible_with(
jvp_placeholder.shape)
record.record_operation(
"captured_value",
[jvp_placeholder],
[external_jvp],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
forward_inputs = forward_wrapper_graph.inputs[:num_inference_inputs]
gradient_function = (
self._delayed_rewrite_functions._rewrite_forward_and_call_backward) # pylint: disable=protected-access
with ops.get_default_graph()._override_gradient_function( # pylint: disable=protected-access
{"PartitionedCall": gradient_function,
"StatefulPartitionedCall": gradient_function}):
forward_outputs = forward_function(*forward_inputs)
if isinstance(forward_outputs, ops.Operation):
# _wrapped_backward_function expects a list, but if the function has
# no outputs its call() returns an Operation. We need to undo that
# so we don't cause problems later.
forward_outputs = []
py_backward, _ = self._wrap_backward_function(
self._func_graph, backward_function, forward_outputs)
# We will never request backward tape gradients for this operation
# directly since we're wrapping the call; forwardprop will call the
# backward function (and nested forward accumulators may build
# higher-order gradients), but any watching GradientTapes should ignore
# it.
#
# TODO(allenl): It might be better to explicitly stop backward recording
# so we don't use the second-order tape cases unnecessarily.
record.record_operation_forwardprop_only(
forward_function.cached_definition.signature.name,
forward_outputs, forward_inputs, py_backward, None)
output_indices, output_tangents = (
pywrap_tfe.TFE_Py_PackJVPs(forward_outputs))
output_tangents = [forward_wrapper_graph.capture(t)
for t in output_tangents]
return _ForwardWrapper(
graph=forward_wrapper_graph, outputs=forward_outputs,
output_indices=output_indices, output_tangents=output_tangents)
def _wrap_backward_function_with_jvp_backprop(
self, backward_function, gradients_wrt_outputs, forward_wrapper):
"""Wraps `backward_function` to include gradients for JVPs."""
wrapped_backwards_graph = func_graph_module.FuncGraph(
_backward_name(self._func_graph.name))
with wrapped_backwards_graph.as_default():
py_backward, recorded_outputs = self._wrap_backward_function(
self._func_graph, backward_function, forward_wrapper.outputs)
trainable_index = 0
forward_doutputs = []
doutput_args = []
for output in recorded_outputs:
if backprop_util.IsTrainable(output):
doutput = gradients_wrt_outputs[trainable_index]
doutput_placeholder = graph_placeholder(doutput.dtype, doutput.shape)
doutput_args.append(doutput_placeholder)
forward_doutputs.append(doutput_placeholder)
trainable_index += 1
else:
doutput_args.append(None)
dinputs = py_backward(*doutput_args)
existing_outputs = object_identity.ObjectIdentitySet(
forward_wrapper.outputs + forward_wrapper.output_tangents)
num_processed_output_tangents = 0
gradients_wrt_output_tangents = []
tangent_doutputs = []
output_tangents = forward_wrapper.output_tangents
output_indices = forward_wrapper.output_indices
if self._need_gradients_for_jvps:
# TODO(allenl): Consider using a throwaway graph to avoid extra gradient
# evaluations; gradients for jvps may have common subgraphs.
while num_processed_output_tangents != len(output_tangents):
for output in output_tangents[num_processed_output_tangents:]:
gradient_shape, gradient_dtype = default_gradient.shape_and_dtype(
output)
placeholder = graph_placeholder(gradient_dtype, gradient_shape)
gradients_wrt_output_tangents.append(placeholder)
tangent_doutputs.append(placeholder)
num_processed_output_tangents = len(output_tangents)
with ops.device(None):
gradients_wrt_inputs = gradients_util._GradientsHelper( # pylint: disable=protected-access
output_tangents,
forward_wrapper.graph.inputs,
grad_ys=gradients_wrt_output_tangents,
src_graph=forward_wrapper.graph)
dinputs = [
backprop_util.AggregateIndexedSlicesGradients((existing, new))
for existing, new in zip(dinputs, gradients_wrt_inputs)
if existing is not None or new is not None]
dinputs.extend(gradients_wrt_inputs[len(dinputs):])
captures_from_forward = [
c for c in wrapped_backwards_graph.external_captures
if (not isinstance(c, ops.EagerTensor)
and c.graph is forward_wrapper.graph)]
for capture in captures_from_forward:
if capture not in existing_outputs:
existing_outputs.add(capture)
forward_wrapper.outputs.append(capture)
output_indices, output_tangents = (
forwardprop_util.pack_tangents(forward_wrapper.outputs))
output_tangents = [forward_wrapper.graph.capture(t)
for t in output_tangents]
for t in output_tangents:
existing_outputs.add(t)
wrapped_backwards_graph.inputs = (
forward_doutputs[:self._num_trainable_inference_outputs]
+ tangent_doutputs
+ forward_doutputs[self._num_trainable_inference_outputs:]
+ wrapped_backwards_graph.internal_captures)
wrapped_backwards_graph.structured_outputs = dinputs
wrapped_backwards_graph.outputs = [t for t in dinputs if t is not None]
return (wrapped_backwards_graph,
forward_wrapper._replace(output_indices=output_indices,
output_tangents=output_tangents))
def _shuffle_forward_outputs(self, forward_wrapper):
"""Reorders function outputs so captures are last."""
def _index_map(original):
if original < self._num_inference_outputs:
return original
if original >= len(forward_wrapper.outputs):
return (original - len(forward_wrapper.outputs)
+ self._num_inference_outputs)
return original + len(forward_wrapper.output_tangents)
output_indices = nest.map_structure(
_index_map, forward_wrapper.output_indices)
forward_wrapper.graph.outputs = (
forward_wrapper.outputs[:self._num_inference_outputs]
+ forward_wrapper.output_tangents
+ forward_wrapper.outputs[self._num_inference_outputs:])
return forward_wrapper._replace(output_indices=output_indices)
def forward(self, inference_args, input_tangents):
"""Construct or fetch a forward function with side-outputs.
When graph building without a tape active, symbolic gradients rely on
regenerating the backward function for higher-order gradients (to account
for new side outputs of the rewritten forward function call). Thus there is
no fixed backward function for this case. However, when a tape is active
(eager or graph building), we generate fixed backward and forward functions
at forward function call time.
This difference between the tape and non-tape cases is to avoid building
unneeded backward functions while graph building (where we may or may not
eventually need gradients).
Args:
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
Returns:
A forward atomic_function.AtomicFunction.
"""
if self._forward is None:
(self._forward, self._forward_graph, self._backward,
self._forwardprop_output_indices, self._num_forwardprop_outputs) = (
self._forward_and_backward_functions(inference_args, input_tangents))
return self._forward
def _wrap_backward_function(self, forward_graph, backward, outputs):
"""Create a backward function given `outputs` from the forward function."""
capture_mapping = dict(
zip((ops.tensor_id(t) for t in forward_graph.outputs), outputs))
captured_inputs = backward.captured_inputs
remapped_captures = [
capture_mapping.get(ops.tensor_id(capture), capture)
for capture in captured_inputs
]
if any(t.graph is forward_graph for t in remapped_captures
if not isinstance(t, ops.EagerTensor)):
incorrect_mapping = [t for t in remapped_captures
if (not isinstance(t, ops.EagerTensor) and
t.graph is not forward_graph)]
raise errors.InternalError("Failed to map all backward graph captures to "
"the forward graph. Incorrectly mapped: "
f"{incorrect_mapping}.")
# We may need to use zeros_like to get a zero for variant Tensors with
# unconnected gradients. We do that in advance so we don't have to hold on
# to the outputs themselves, which may not be needed otherwise.
variant_zeros_like = {}
backward_function_inputs = (len(backward.inputs) - len(captured_inputs))
recorded_outputs = []
trainable_recorded_outputs = 0
skip_positions = []
if self._num_forwardprop_outputs and not self._need_gradients_for_jvps:
relevant_outputs = (
outputs[:self._num_inference_outputs]
+ outputs[self._num_inference_outputs
+ self._num_forwardprop_outputs:])
else:
relevant_outputs = outputs
for output_index, output in enumerate(relevant_outputs):
if trainable_recorded_outputs < backward_function_inputs:
recorded_outputs.append(output)
if backprop_util.IsTrainable(output):
trainable_recorded_outputs += 1
else:
skip_positions.append(output_index)
if output.dtype == dtypes.variant:
variant_zeros_like[output_index] = default_gradient.zeros_like(output)
def _backward_function_wrapper(*args):
"""Process output gradients and call the backward function."""
if not backward.outputs:
return backward.structured_outputs
processed_args = []
input_index = 0
for output_index, arg in enumerate(args):
# Convert IndexedSlices to dense tensors. The IndexedSlices optimization
# is only really effective when doing tf.gather(variable) as the
# adjoint functions for most operations are unlikely to preserve the
# sparsity in IndexedSlices.
if isinstance(arg, indexed_slices.IndexedSlices):
arg = ops.convert_to_tensor(arg)
if output_index in skip_positions:
continue
if arg is None:
# We're calling a (non-polymorphic) ConcreteFunction, so we need to
# have a Tensor value for each Tensor we thought would be trainable
# based on its dtype, even if it ended up being unconnected.
input_placeholder = backward.inputs[
input_index]
if input_placeholder.dtype == dtypes.variant:
arg = variant_zeros_like[output_index]
else:
arg = array_ops.zeros(
*default_gradient.shape_and_dtype(input_placeholder))
processed_args.append(arg)
input_index += 1
if input_index >= backward_function_inputs:
break
return backward._call_flat( # pylint: disable=protected-access
processed_args, remapped_captures)
return _backward_function_wrapper, recorded_outputs
def record(self, flat_outputs, inference_args, input_tangents):
"""Record the function call operation.
For backprop, indicates the backward function to use and which new Tensors
must be watched. For forwardprop from eager, the function call itself will
have produced tangents which need to be recorded.
Args:
flat_outputs: The result of running `forward`.
inference_args: A flat list of Tensors with inference inputs to the
operation.
input_tangents: A flat list of Tensors with input tangents consumed by the
operation.
"""
backward_function, to_record = self._wrap_backward_function(
self._forward_graph, self._backward, flat_outputs)
if self._forwardprop_output_indices:
record.record_operation_backprop_only(
self._forward.cached_definition.signature.name,
to_record, inference_args,
backward_function)
record.record_operation_forwardprop_only(
self._forward.cached_definition.signature.name,
flat_outputs, inference_args + input_tangents,
backward_function,
self._forwardprop_output_indices)
else:
record.record_operation(self._forward.cached_definition.signature.name,
to_record, inference_args + input_tangents,
backward_function)
class _FirstOrderTapeGradientFunctions(_TapeGradientFunctions):
"""Caches tape-friendly functions for first-order gradients."""
def __init__(self, func_graph, attrs, func_graph_deleter,
forwardprop_input_indices, delayed_rewrite_functions,
need_gradients_for_jvps):
super().__init__(func_graph, attrs, func_graph_deleter,
forwardprop_input_indices, delayed_rewrite_functions,
need_gradients_for_jvps)
self._func_graph_deleter = func_graph_deleter
self._forwardprop_input_indices = forwardprop_input_indices
def _forward_and_backward_functions(self, inference_args, input_tangents):
"""Shortcut for when only first-order gradients are required.
The returned backward function does not accept gradients with respect to
side output of forward_function. This is fine as long as the user can't
possibly request second order tape gradients, as when they've used a single
non-persistent GradientTape. Since we don't need the backward function to
take gradients with respect to side outputs, we can skip some potentially
slow graph building.
Args:
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
Returns:
A tuple of (forward_function, backward_function):
forward_function: Takes the same inputs as the inference function, but
returns side outputs used by backward_function in addition to the
inference function's outputs.
backward_function: Takes side outputs from forward_function and
gradients with respect to the "real" outputs of forward_function and
returns gradients with respect to the inputs.
"""
outputs = self._func_graph.outputs[:self._num_inference_outputs]
return self._build_functions_for_outputs(
outputs, inference_args, input_tangents)
class _HigherOrderTapeGradientFunctions(_TapeGradientFunctions):
"""Caches tape-friendly functions for higher-order gradients."""
# TODO(b/136189779): Cond/while under a tape may need similar logic. Consider
# generalizing if so.
def _forward_and_backward_functions(self, inference_args, input_tangents):
"""Forward and backward functions suitable for higher-order gradients.
Unlike in `_FirstOrderTapeGradientFunctions`, the backward function built by
this method accepts gradients for all of the outputs of the returned forward
function, including side outputs.
Args:
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
Returns:
A tuple of (forward_function, backward_function):
forward_function: Takes the same inputs as the inference function, but
returns side outputs used by backward_function in addition to the
inference function's outputs.
backward_function: Takes side outputs from forward_function and
gradients with respect to all of its outputs, real and side. Returns
gradients with respect to the inputs.
"""
outputs = []
iteration_count = 0
# First we need to figure out how many side outputs from the forward pass
# will be required. We do this in a temporary graph to avoid actually
# running multiple copies of the backward pass (one per _GradientsHelper
# call).
#
# While computing gradients, the backward function captures Tensors from
# the forward function. We add these as side outputs of the original
# function. However, we then need to accept output gradients with respect
# to these side outputs for higher order gradients to work. Thus we loop
# until the number of outputs of the function stabilizes. Note that this
# is only required for tape gradients, where we need to declare in advance
# all of the forward op's outputs: symbolic gradients with tf.gradients
# instead rely on regenerating backward functions when higher-order
# gradients are requested.
while (len(outputs) < len(self._func_graph.outputs)
# It's possible for gradient generation to add new ops to the forward
# pass. If all of the new outputs are non-trainable, there's no
# reason to continue.
and any(backprop_util.IsTrainable(output)
for output in self._func_graph.outputs[len(outputs):])):
iteration_count += 1
if iteration_count >= 20 and iteration_count % 5 == 0:
new_op_with_trainable_output = None
num_new_trainable_outputs = 0
for output in self._func_graph.outputs[len(outputs):]:
if backprop_util.IsTrainable(output):
num_new_trainable_outputs += 1
new_op_with_trainable_output = output.op
logging.warning(
("Determining side outputs for the function '{}' is taking longer "
"than expected ({} iterations, typically this converges in 5 or "
"so). This could indicate that a gradient registration is adding "
"new ops to the forward pass every time gradients are generated. "
"{} new trainable output(s) were added this iteration, one from "
"the following op:\n {}\nThis may indicate a TensorFlow bug, or "
"an issue in a tf.custom_gradient.")
.format(
self._func_graph.name, iteration_count,
num_new_trainable_outputs, new_op_with_trainable_output))
outputs = list(self._func_graph.outputs)
self._build_functions_for_outputs(
outputs, inference_args, input_tangents)
(forward_function, forward_graph,
backward_function, output_indices, num_output_tangents) = (
self._build_functions_for_outputs(
outputs, inference_args, input_tangents))
if (len(self._func_graph.outputs) > len(outputs)
and any(backprop_util.IsTrainable(output)
for output in self._func_graph.outputs[len(outputs):])):
raise errors.InternalError(
"Unexpectedly added new outputs to the forward function when "
"building the backward function: "
f"{self._func_graph.outputs[len(outputs):]}.")
return (forward_function, forward_graph, backward_function, output_indices,
num_output_tangents)
class _ForwardBackwardCall(object):
"""Holds the state of a function call between execution and recording."""
__slots__ = [
"_functions", "_inference_args", "_input_tangents", "_tape_watching"
]
def __init__(self, functions, inference_args, input_tangents, tape_watching):
"""Collects information about the function call.
Args:
functions: An object which produces forward and backward functions, either
a _DelayedRewriteGradientFunctions or a _TapeGradientFunctions object.
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
tape_watching: Boolean, with True indicating that recording is necessary.
"""
self._functions = functions
self._inference_args = inference_args
self._input_tangents = input_tangents
self._tape_watching = tape_watching
def forward(self):
"""Builds or retrieves a forward function for this call."""
forward_function = self._functions.forward(
self._inference_args, self._input_tangents)
return forward_function, self._inference_args + self._input_tangents
def record(self, flat_outputs):
"""Given outputs from the execution of `forward`, records the operation."""
if (self._tape_watching
and not isinstance(flat_outputs, ops.Operation)
and flat_outputs is not None):
# We only record function calls which have outputs, and then only when a
# tape is watching.
self._functions.record(
flat_outputs, self._inference_args, self._input_tangents)
class ConcreteFunction(core.ConcreteFunction, trackable.Trackable):
"""A `tf.types.experimental.ConcreteFunction` created from `tf.function`."""
def __init__(
self, func_graph, attrs=None, shared_func_graph=True, function_type=None
):
"""Initialize a `ConcreteFunction`.
Args:
func_graph: An instance of FuncGraph: the function body to wrap.
attrs: (optional) dict mapping names of attributes to their AttrValue
values. Attributes in `attrs` will be included in this function's
definition.
shared_func_graph: If False, the ConcreteFunction takes ownership of
`func_graph` and will break reference cycles when it is deleted. This
makes the FuncGraph inoperable.
function_type: Defines the structured input/output contract.
Raises:
ValueError: If number of input_placeholders is not equal to the number
of function inputs.
"""
# _arg_keywords and _num_positional_args define the flat signature. They
# are assigned after construction.
self._arg_keywords = None
self._num_positional_args = None
self._func_graph = func_graph
self._captured_inputs = (
self._func_graph.external_captures
+ self._func_graph.deferred_external_captures
)
self._function_type = function_type
if attrs and attributes_lib.IMPLEMENTS in attrs:
# The alternative is to silently drop "implements" tag
# but it seems likely it would lead to hard to catch bugs.
# Another alternative is to make func_body to preserve the order
# of arguments if variables are present. Yet another option
# is to automatically replace variables as arguments to functions
# to v.read_value() whenever "implements" tag is present
# Anytime we annotate existing function we probably want to wrap
# it with safe read_value for backward compatibility.
has_resource_vars = any(
inp.dtype == dtypes.resource for inp in self.inputs)
assert not any((has_resource_vars, self._captured_inputs)), (
'Function {name} has "{attr}={value}" attribute and thus can not '
"depend on any tensors outside of its signature or modify variables. "
"\n\nNote: variables are always captured and cause function "
"re-tracing for every variable called.\n"
" inputs: {inputs}\n captures: {captured}\n\n"
"To pass a variable to such function use "
"use variable.read_value().".format(
name=func_graph.name,
attr=attributes_lib.IMPLEMENTS,
value=attrs[attributes_lib.IMPLEMENTS],
inputs=self.inputs,
captured=self._captured_inputs))
self._output_shapes = tuple(
output.shape for output in self._func_graph.outputs)
self._attrs = _parse_func_attrs(attrs or {})
if shared_func_graph:
self._garbage_collector = None
else:
self._garbage_collector = ConcreteFunctionGarbageCollector(func_graph)
# Pairs of forward and backward functions used for computing gradients.
#
# These each get a reference to the FuncGraph deleter since they use the
# FuncGraph directly.
self._delayed_rewrite_functions = _DelayedRewriteGradientFunctions(
func_graph, self._attrs, self._garbage_collector)
self._first_order_tape_functions = {}
self._higher_order_tape_functions = {}
# Cache the inference function to avoid a (Python) function call when not
# building gradients.
self._inference_function = self._delayed_rewrite_functions.forward()
@property
def function_type(self):
"""Return the FunctionType associated with this ConcreteFunction."""
# TODO(fmuham): Ensure this is never None.
return self._function_type
# TODO(fmuham): Remove this property.
@property
def _function_spec(self):
if self.function_type is None:
return None
return function_type_utils.FunctionSpec(
self.function_type,
{
p.default
for p in self.function_type.parameters.values()
if p.optional
},
False,
name=self.name,
)
@property
def variables(self):
"""Sequence of variables for this function."""
return tuple(self._func_graph.variables)
def set_variables(self, variables):
self._func_graph.variables = variables
@property
def trainable_variables(self):
"""Sequence of trainable variables for this function."""
return tuple(self._func_graph.trainable_variables)
def __call__(self, *args, **kwargs):
"""Executes the wrapped function.
ConcreteFunctions have two signatures:
* The signature of the original function wrapped by this ConcreteFunction.
* A flat signature, where each argument accepts a single Tensor.
The original function signature is generally preferred, but the flat input
signature is supported for backward compatibility.
### Original Function Signature
When calling a ConcreteFunction with the signature of the original function,
each argument must match the type or value that was used when the
ConcreteFunction's graph was traced. In particular:
* Tensor arguments (including CompositeTensors, such as RaggedTensor) must
have matching `TypeSpec`s.
* Non-Tensor arguments (such as booleans or ints) must have equal values.
* Nested arguments (such as lists, tuples, or dictionaries) must have the
same nesting structure; and each nested value must have a matching type
or value.
The default value for any arguments that were traced with non-Tensor values
is the value that was used in the trace. Arguments that were traced with
tensor arguments do not have a default value (even if the original function
had a default value for that argument).
### Flat Signature
When calling a ConcreteFunction with the flat signature, the arguments
correspond to the flattened component tensors of the arguments that were
used to construct the ConcreteFunction. Parameter names are assigned based
on `TensorSpec.name` (when specified) or the original argument names (with
suffixes automatically added for nested arguments or composite tensors with
multiple components).
Args:
*args: Positional arguments to the concrete function.
**kwargs: Keyword arguments to the concrete function.
Returns:
The result of applying the TF function on the given Tensors.
Raises:
AssertionError: If this `ConcreteFunction` was not created through
`get_concrete_function`.
TypeError: If the arguments do not match the function's signature.
"""
return self._call_impl(args, kwargs)
def _call_impl(self, args, kwargs):
"""See `__call__` for details."""
with trace.Trace(self._func_graph.name, tf_function_call="concrete"):
# Construct the list of input tensors: check if the structured signature
# applies first; and if not, then use the flat signature.
if self.function_type is not None:
try:
return self._call_with_structured_signature(args, kwargs)
except TypeError as structured_err:
try:
return self._call_with_flat_signature(args, kwargs)
except TypeError:
raise structured_err
return self._call_with_flat_signature(args, kwargs)
def _call_with_flat_signature(self, args, kwargs):
"""Executes the wrapped function with the flat signature.
Args:
args: Positional arguments to the concrete function.
kwargs: Keyword arguments to the concrete function.
Returns:
The result of applying the function on the Tensors/Variables contained in
`args` and `kwargs`.
Raises:
TypeError: if `args` and `kwargs` do not match the flat signature of this
`ConcreteFunction`.
"""
if len(args) > self._num_positional_args:
raise TypeError(
f"{self._flat_signature_summary()} takes {self._num_positional_args} "
f"positional arguments, got {len(args)}.")
args = list(args)
kwargs = dict(kwargs)
kwargs = {
function_type_lib.sanitize_arg_name(k): v for k, v in kwargs.items()
}
for keyword in self._arg_keywords[len(args):]:
try:
args.append(
kwargs.pop(
function_type_lib.sanitize_arg_name(compat.as_str(keyword))))
except KeyError:
specified_keywords = (
list(self._arg_keywords[:len(args)]) + list(kwargs.keys()))
missing_required_args = sorted(
set(self._arg_keywords) - set(specified_keywords))
raise TypeError(f"{self._flat_signature_summary()} missing required "
f"arguments: {', '.join(missing_required_args)}.")
if kwargs:
positional_arg_keywords = set(self._arg_keywords[:len(args)])
for unused_key in kwargs:
if unused_key in positional_arg_keywords:
raise TypeError(f"{self._flat_signature_summary()} got two values "
f"for '{unused_key}'.")
raise TypeError(f"{self._flat_signature_summary()} got unexpected "
f"keyword arguments: {', '.join(sorted(kwargs))}.")
for i, arg in enumerate(args):
if not isinstance(
arg, (ops.Tensor, resource_variable_ops.BaseResourceVariable)):
raise TypeError(f"{self._flat_signature_summary()}: expected argument "
f"#{i}(zero-based) to be a Tensor; "
f"got {type(arg).__name__} ({arg}).")
return self._call_flat(args, self.captured_inputs)
def _call_with_structured_signature(self, args, kwargs):
"""Executes the wrapped function with the structured signature.
Args:
args: Positional arguments to the concrete function.
kwargs: Keyword arguments to the concrete function.
Returns:
The result of applying the function on the Tensors/Variables contained in
`args` and `kwargs`.
Raises:
TypeError: if `args` and `kwargs` do not match the structured signature
of this `ConcreteFunction`.
"""
args, kwargs, filtered_flat_args = (
function_type_utils.canonicalize_function_inputs(
args, kwargs, self.function_type)
)
return self._call_flat(
filtered_flat_args,
captured_inputs=self.captured_inputs)
def _call_flat(self, args, captured_inputs):
"""Executes the wrapped function.
Args:
args: a list of Tensors or Variables. Arguments from the Python function
should be filtered before calling this method: objects aside from
Tensors, CompositeTensors, and Variables are ignored. Any
CompositeTensors other than ResourceVariables should be expanded before
calling this method.
captured_inputs: the captured inputs that are also part of the input args
to the actual execution. By default, it should be self._captured_inputs.
Returns:
The result of applying the TF function to `args`.
Raises:
ValueError: If `args` contains anything other than Tensors or Variables.
"""
ctx = context.context()
executing_eagerly = ctx.executing_eagerly()
# Copy saveable status of function's graph to current FuncGraph.
default_graph = ops.get_default_graph()
if default_graph.building_function and not self._func_graph.saveable:
default_graph.mark_as_unsaveable(self._func_graph.saving_errors)
if (record.could_possibly_record() or
hasattr(default_graph, "watch_variable")):
for v in self._func_graph.variables:
resource_variable_ops.variable_accessed(v)
tensor_inputs = []
variables_used = set([])
for i, arg in enumerate(args):
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
# We can pass a variable more than once, and in this case we need to
# pass its handle only once.
if id(arg.handle) in variables_used:
continue
resource_variable_ops.variable_accessed(arg)
tensor_inputs.append(arg.handle)
variables_used.add(id(arg.handle))
elif isinstance(arg, ops.Tensor):
tensor_inputs.append(arg)
else:
raise ValueError(f"{i:d}-th input {arg} must be a Tensor, got "
f"{type(arg)} when calling {self._func_graph.name}.")
if not executing_eagerly:
for i, tensor_input in enumerate(tensor_inputs):
# Can not compare shapes in these cases
# TODO(b/216506654): Consider moving this check elsewhere and making it
# work for all types (e.g. by including shape for Variables).
if (tensor_input.dtype == dtypes.resource or
tensor_input.dtype == dtypes.variant):
continue
# If we're graph building, shape inference is on. We check for input
# compatibility up front to avoid hard to debug incompatibilities
# later.
graph_input_shape = tensor_shape.TensorShape(
self._func_graph.inputs[i].shape)
if not graph_input_shape.is_compatible_with(tensor_input.shape):
raise ValueError(
f"Tensor {tensor_input} is not compatible with the shape this "
f"function was traced with. Expected shape "
f"{self._func_graph.inputs[i].shape}, but got shape "
f"{tensor_input.shape}.\n\nIf you called get_concrete_function, "
f"you may need to pass a tf.TensorSpec(..., shape=...) with a "
f"less specific shape, having None on axes which can vary.")
args = tensor_inputs + captured_inputs
possible_gradient_type = gradients_util.PossibleTapeGradientTypes(args)
if (possible_gradient_type == gradients_util.POSSIBLE_GRADIENT_TYPES_NONE
and executing_eagerly):
# No tape is watching; skip to running the function.
return self._build_call_outputs(self._inference_function(*args))
forward_backward = self._select_forward_and_backward_functions(
args,
possible_gradient_type,
executing_eagerly)
forward_function, args_with_tangents = forward_backward.forward()
if executing_eagerly:
flat_outputs = forward_function(*args_with_tangents)
else:
with default_graph._override_gradient_function( # pylint: disable=protected-access
{"PartitionedCall": self._get_gradient_function(),
"StatefulPartitionedCall": self._get_gradient_function()}):
flat_outputs = forward_function(*args_with_tangents)
forward_backward.record(flat_outputs)
return self._build_call_outputs(flat_outputs)
@property
def name(self):
"""`ConcreteFunction` name."""
return self._delayed_rewrite_functions.forward().name
@property
def graph(self):
"""Returns the graph from which this function was constructed."""
return self._func_graph
@property
def inputs(self):
"""Returns tensors in `self.graph` corresponding to arguments."""
return self._func_graph.inputs
@property
def structured_input_signature(self):
"""Returns structured signature for this concrete function.
Returns:
A tuple `(args, kwargs)`, where:
* `args` is a tuple that specifies the expected type or value each for
positional argument.
* `kwargs` is a dictionary that specifies the expected type or value
for each keyword-only argument.
The type or value for each argument is specified using one of the
following:
* A `tf.TypeSpec`, indicating that a Tensor or other TensorFlow-native
value is expected.
* A Python value, such as an integer, indicating that an equal value
is expected.
* A nested structure of `tf.TypeSpec`s and Python values, indicating
that a corresponding nested structure is expected.
"""
return self._func_graph.structured_input_signature
@property
def outputs(self):
"""Returns tensors in `self.graph` corresponding to returned tensors."""
return self._func_graph.outputs
@property
def structured_outputs(self):
"""Returns outputs in `self.graph` as returned by the original function."""
return self._func_graph.structured_outputs
def set_external_captures(self, captures):
"""Updates the function capture values.
The new values must have tensor types and shapes consistent with the
original captures of the concrete function, but it is allowed to change a
value captured with a deferred one and vice-versa.
Args:
captures: A list of tensors or closures. Tensors are value captures, and
closures are call-time (deferred captures).
"""
# TODO(wxinyi): 1. verify that the new captures' type spec is compatible
# with the original's. However, doing so requires MirroredVariable captures
# initialized. 2. replace the original/new captures/deferred
# captures in the wrapped graph. Doing such for a capture-to-deferred
# capture replacement requires more arguments than the deferred capture
# itself, e.g. default value, spec.
self._captured_inputs = captures
def replace_capture_with_deferred_capture(self,
tensor,
closure,
spec,
placeholder=None,
default_value=None):
"""Replaces existing capture `tensor` with a deferred capture `closure`.
This API replaces the capture `tensor` from the concrete function's captured
inputs list, and places the deferred capture `closure` in
its spot so the order of captured inputs is preserved. This is important
because the old `tensor` and the new `closure` will have the same internal
placeholder, which can be passed through the `placeholder` argument, or
skipped, in which case we find the placeholder from internal inputs by
indexing `tensor` in the external captured inputs list. Thus, it is
important that the new deferred capture has output spec (specified by the
`spec` argument) compatible with the internal placeholder (`placeholder`)
and the original capture (`tensor`).
For example,
```python
bool_captured_tensor = tf.constant(True)
float_captured_tensor = tf.constant([3.], dtype=tf.float32)
value = tf.constant([2.], dtype=tf.float32)
@tf.function
def fn():
deferred_tensor = ops.get_default_graph().capture_call_time_value(
lambda: value,
tf.TensorSpec(shape=(1,), dtype=tf.float32))
if bool_captured_tensor:
return deferred_tensor
else:
return deferred_tensor + float_captured_tensor
concrete_fn = fn.get_concrete_function()
print(concrete_fn()) # tf.Tensor([2.], shape=(1,), dtype=float32)
new_bool_captured_tensor = constant_op.constant(False)
def bool_closure():
return new_bool_captured_tensor
concrete_fn.replace_capture_with_deferred_capture(
bool_captured_tensor,
bool_closure,
spec=tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool))
print(concrete_fn()) # tf.Tensor([5.], shape=(1,), dtype=float32)
```
Args:
tensor: Tensor already captured. This `tensor` should be listed in
concrete_function.captured_inputs except when it's empty such as when
the concrete function is restored from SavedModel.
closure: function which takes no arguments, to be evaluated at function
call time, returning a nest of tensors compatible with `spec`.
spec: nest of TypeSpec for the value to capture.
placeholder: optional. The internal placeholder corresponding to the
captured `tensor` and the new `closure`.
default_value: optional value to use in environments that cannot safely
evaluate closure.
"""
capture_index = None
for i, capture in enumerate(self._captured_inputs):
if id(tensor) == id(capture):
capture_index = i
break
if placeholder is None:
if capture_index is None:
raise ValueError(
f"Did not find `tensor` argument {tensor} in the ConcreteFunction's"
" captured inputs list, and did not receive a placeholder argument."
" Thus we're unable to infer the internal placeholder. ")
placeholder = self.inputs[-len(self._captured_inputs) + capture_index]
if not (spec.is_compatible_with(tensor) or
spec.is_compatible_with(placeholder)):
raise ValueError(
f"Attempting to substitute closure with spec {spec} that's "
f"incompatible with the original capture {tensor} or the internal "
f"placeholder {placeholder}.")
self._func_graph.replace_capture_with_deferred_capture(
tensor=tensor,
closure=closure,
spec=spec,
placeholder=placeholder,
default_value=default_value)
if capture_index is not None:
self._captured_inputs[capture_index] = closure
@property
def captured_inputs(self):
"""Returns external Tensors captured by this function.
self.__call__(*args) passes `args + self.captured_inputs` to the function.
"""
return nest.flatten(
[x() if callable(x) else x for x in self._captured_inputs],
expand_composites=True)
@property
def function_def(self):
"""Returns a `FunctionDef` object representing this function."""
return self._delayed_rewrite_functions.forward().cached_definition
@property
def output_shapes(self):
"""The function's output shapes."""
return nest.map_structure(
lambda x: getattr(x, "shape", tensor_shape.TensorShape(None)),
composite_tensor.replace_composites_with_components(
self._func_graph.structured_outputs),
expand_composites=False)
@property
def output_dtypes(self):
# TODO(akshayka): Consider removing this.
return nest.map_structure(
lambda x: x.dtype if x is not None else None,
composite_tensor.replace_composites_with_components(
self._func_graph.structured_outputs),
expand_composites=False)
def add_to_graph(self, g=None, overwrite=False):
"""Registers the function, adds it to the graph g or default graph.
Args:
g: If specified, registers the function with this graph. Defaults to the
current context (either the default graph or the eager context).
overwrite: A bool. If True, its forward function will overwrite
any existing function of the same signature name in the graph `g`.
"""
# If we are not executing eagerly, adds the function to default graph if no
# graph is specified.
# In case of eager execution, function definition gets added to context
# during construction itself.
if not context.executing_eagerly() and not g:
g = ops.get_default_graph()
if g is not None:
g._add_function_recursive(self._delayed_rewrite_functions.forward()) # pylint: disable=protected-access
def add_gradient_functions_to_graph(self, g=None):
"""Add forward/backward functions to graph `g` or the current context."""
if not context.executing_eagerly() and not g:
g = ops.get_default_graph()
g._add_function_recursive(self._delayed_rewrite_functions.forward()) # pylint: disable=protected-access
forward_function, backward_function = (
self._delayed_rewrite_functions.forward_backward())
g._add_function_recursive(forward_function) # pylint: disable=protected-access
backward_function.add_to_graph(g)
def _get_gradient_function(self):
"""Returns gradient function. It will be lazily created at first call."""
return self._delayed_rewrite_functions._rewrite_forward_and_call_backward # pylint: disable=protected-access
def _select_forward_and_backward_functions(
self, args, possible_gradient_type, executing_eagerly):
"""Selects forward and backward functions based on the calling context.
The forward function computes the "real" function outputs, `self._outputs`,
and any extra values needed by the corresponding backward function.
Args:
args: A flat list of Tensors with all of the inputs to the forward
function (including user-specified and captured inputs).
possible_gradient_type: One of gradients_util.POSSIBLE_GRADIENT_TYPES_*.
executing_eagerly: Boolean, the value of context.executing_eagerly().
Returns:
An object with a `forward` method returning a tuple of (forward_function :
AtomicFunction, augmented_arguments : List), and a corresponding
`record` method which takes outputs from the forward function and records
the operation. forward_function should be called with augmented_arguments.
"""
if executing_eagerly:
input_tangents = forwardprop_util.pack_tangents(args)
else:
input_tangents = forwardprop_util.TangentInfo()
need_gradients_for_jvps = record.should_record_backprop(
input_tangents.tangents)
# Allows re-use of forward and backward function pairs depending on the
# tapes and forward accumulators watching its inputs.
cache_key = (need_gradients_for_jvps, input_tangents.indices)
if (possible_gradient_type
== gradients_util.POSSIBLE_GRADIENT_TYPES_FIRST_ORDER):
if input_tangents.indices or executing_eagerly:
# There is a single non-persistent tape active, so the user can only
# request first-order gradients from a tape. We can spend less time
# graph building since we know this.
#
# We may still end up computing higher-order gradients, but that'd be
# through `tf.gradients`, which can re-write the forward pass and so
# needs no preparation here.
functions = self._first_order_tape_functions.get(cache_key, None)
if functions is None:
functions = _FirstOrderTapeGradientFunctions(
self._func_graph, self._attrs, self._garbage_collector,
forwardprop_input_indices=input_tangents.indices,
delayed_rewrite_functions=self._delayed_rewrite_functions,
need_gradients_for_jvps=need_gradients_for_jvps)
self._first_order_tape_functions[cache_key] = functions
return _ForwardBackwardCall(
functions, args, input_tangents.tangents, tape_watching=True)
else:
# We can avoid computing second-order gradients in some cases by doing a
# delayed rewrite when graph building. Since we know we'll only compute
# first-order tape gradients, the delayed rewrite is safe: we won't need
# to tell the tape about side outputs.
#
# TODO(allenl): This case is really dirty. It would be better if we
# could temporarily pop all of the current tapes to avoid
# accidentally taking second-order gradients.
return _ForwardBackwardCall(
self._delayed_rewrite_functions, args, input_tangents.tangents,
tape_watching=True)
elif (possible_gradient_type
== gradients_util.POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER):
# Either there's a persistent tape watching, or there are multiple nested
# tapes. Either way, the user may request higher-order gradients. We'll
# spend a bit more time and make sure higher-order gradients are correct.
functions = self._higher_order_tape_functions.get(
cache_key, None)
if functions is None:
functions = _HigherOrderTapeGradientFunctions(
self._func_graph, self._attrs, self._garbage_collector,
forwardprop_input_indices=input_tangents.indices,
delayed_rewrite_functions=self._delayed_rewrite_functions,
need_gradients_for_jvps=need_gradients_for_jvps)
self._higher_order_tape_functions[cache_key] = functions
return _ForwardBackwardCall(functions, args, input_tangents.tangents,
tape_watching=True)
# else possible_gradient_type == POSSIBLE_GRADIENT_TYPES_NONE, meaning no
# tape is recording.
return _ForwardBackwardCall(
self._delayed_rewrite_functions, args, input_tangents.tangents,
tape_watching=False)
def _build_call_outputs(self, result):
"""Maps the fdef output list to actual output structure.
Args:
result: Output lists defined by FunctionDef.
Returns:
The actual call output.
"""
# TODO(jlchu): call C++ version in function.cc when speed is improved
if self._func_graph.structured_outputs is None:
return result
# Replace outputs with results, skipping over any 'None' values.
outputs_list = nest.flatten(
self._func_graph.structured_outputs, expand_composites=True)
j = 0
for i, o in enumerate(outputs_list):
if o is not None:
handle_data_util.copy_handle_data(self.outputs[j], result[j])
outputs_list[i] = result[j]
j += 1
ret = nest.pack_sequence_as(self._func_graph.structured_outputs,
outputs_list, expand_composites=True)
return ret
@property
def _as_name_attr_list(self):
"""Returns a `NameAttrList` representing this function."""
ret = attr_value_pb2.NameAttrList(name=self.name)
for name, value in self._attrs.items():
ret.attr[name].CopyFrom(value)
return ret
def _structured_signature_summary(self, default_values=False):
"""Returns a string summarizing this function's structured signature.
Args:
default_values: If true, then include default values in the signature.
Returns:
A `string`.
"""
# Note: we can't just use str(self.function_type), because
# that would show "BOUND_VALUE" as the default value for all arguments.
assert self.function_type is not None
arg_specs, kwarg_specs = self.structured_input_signature
arg_names = function_type_utils.to_arg_names(self.function_type)
# If an explicit input_signature is provided to @tf.function, then any
# arguments with defaults that are not covered by that explicit signature
# are simply dropped from the signature.
# TODO(b/159639913) Look into whether dropping arguments with default values
# from the signature is the right thing to do.
arg_names = arg_names[:len(arg_specs)]
if default_values:
for i in range(len(arg_names)):
if not _contains_type_spec(arg_specs[i]):
arg_names[i] += "={}".format(arg_specs[i])
if kwarg_specs:
arg_names.append("*")
for name, spec in kwarg_specs.items():
arg_names.append(name)
if default_values and not _contains_type_spec(spec):
arg_names[-1] += "={}".format(spec)
signature = f"{self._func_graph.name}({', '.join(arg_names)})"
return signature
def _flat_signature_summary(self):
"""Returns a string summarizing this function's flat signature."""
assert self._arg_keywords is not None
assert self._num_positional_args is not None
arg_names = self._arg_keywords
if self._num_positional_args > len(arg_names):
arg_names.extend(
"<arg{}>".format(i + 1)
for i in range(len(arg_names), self._num_positional_args))
return f"{self._func_graph.name}({', '.join(arg_names)})"
def pretty_printed_signature(self, verbose=True):
"""Returns a string summarizing the signature of this concrete function."""
if not verbose:
return self._structured_signature_summary(default_values=True)
def pretty_print_spec(spec):
"""Returns a string describing the spec for a single argument."""
if isinstance(spec, tensor_spec.TensorSpec):
return "{} Tensor, shape={}".format(spec.dtype.name, spec.shape)
elif nest.is_nested(spec):
pieces = nest.flatten(spec, expand_composites=False)
markers = [_Marker("<{}>".format(i + 1)) for i in range(len(pieces))]
structure = nest.pack_sequence_as(spec, markers)
# Ensure dictionaries are sorted by key (for determinism)
result = pprint.pformat(structure, width=10000)
for (marker, piece) in zip(markers, pieces):
result += "\n {}: {}".format(marker, pretty_print_spec(piece))
return result
else:
return repr(spec)
lines = [self._structured_signature_summary(default_values=True)]
arg_specs, kwarg_specs = self.structured_input_signature
names = function_type_utils.to_arg_names(self.function_type)
# If an explicit input_signature is provided to @tf.function, then any
# arguments with defaults that are not covered by that explicit signature
# are simply dropped from the signature.
# TODO(b/159639913) Look into whether dropping arguments with default values
# from the signature is the right thing to do.
# Note: we can skip bound args, since we already displayed their bound
# value in the signature summary.
arg_details = []
for (name, spec) in zip(names[:len(arg_specs)], list(arg_specs)):
if _contains_type_spec(spec):
arg_details.append(" {}: {}".format(name, pretty_print_spec(spec)))
if kwarg_specs:
for kwarg in sorted(kwarg_specs):
spec = kwarg_specs[kwarg]
if _contains_type_spec(spec):
arg_details.append(" {}: {}".format(
kwarg, pretty_print_spec(spec)))
if arg_details:
lines.append(" Args:")
lines.extend(arg_details)
lines.append(" Returns:")
def spec_from_value(value):
# For loaded function, structured_outputs are already specs.
if isinstance(value, type_spec.TypeSpec):
return value
return type_spec.type_spec_from_value(value)
lines.append(" {}".format(
pretty_print_spec(
nest.map_structure(spec_from_value, self.structured_outputs))))
return "\n".join(lines)
def __repr__(self):
if self.function_type is not None:
return "<ConcreteFunction {} at 0x{:X}>".format(
self.pretty_printed_signature(verbose=False), id(self))
elif not (self._num_positional_args is None or self._arg_keywords is None):
return "<ConcreteFunction {} at 0x{:X}>".format(
self._flat_signature_summary(), id(self))
else:
return object.__repr__(self)
def __str__(self):
if self.function_type is not None:
return "ConcreteFunction {}".format(self.pretty_printed_signature())
else:
return self.__repr__()
def _trackable_children(self, save_type="checkpoint", **kwargs):
"""Implements `Trackable`."""
if save_type == "checkpoint":
# Checkpoint dependencies do not include functions at all. Users
# expect the checkpointed variables to be saved using the model
# architecture, e.g. `model.layers[1].kernel` or `model.variables`.
return {}
captured_trackables = {}
for n, (capture, _) in enumerate(self.graph.captures):
if (capture.dtype not in (dtypes.variant, dtypes.resource) and
not resource_variable_ops.is_resource_variable(capture)):
# Variant/resource type tensors are skipped since we have no way of
# getting the `Trackable` wrapper for these tensors. The wrappers are
# expected to be elsewhere in the saved object graph.
# TODO(b/223866972): Directly encode/decode tensor captures.
# Resource variable captures are also skipped at this time, to maintain
# existing behavior.
# TODO(b/217979389): Return the non-constant captures as children.
captured_trackables[f"capture_{n}"] = capture
return captured_trackables
def _deserialization_dependencies(self, children):
return children
def _export_to_saved_model_graph(self, object_map, tensor_map,
**unused_kwargs):
if not self.graph.saveable:
raise ValueError(
(f"Unable to save function {self.name} for the following reason(s):\n"
+ "\n".join(self.graph.saving_errors)))
self.add_to_graph()
object_map[self] = saved_model_exported_concrete.ExportedConcreteFunction(
self, tensor_map)
return []
_pywrap_utils.RegisterType("Tensor", ops.Tensor)
_pywrap_utils.RegisterType("EagerTensor", ops.EagerTensor)
_pywrap_utils.RegisterType("IndexedSlices", indexed_slices.IndexedSlices)
class ConcreteFunctionGarbageCollector:
"""Cleans up reference cycles when a `ConcreteFunction` goes out of scope."""
__slots__ = ["_func_graph"]
def __init__(self, func_graph):
self._func_graph = func_graph
def release(self):
"""Call off the FuncGraph deletion."""
self._func_graph = None
def __del__(self):
if func_graph_module is None or self._func_graph is None:
return
try:
func_graph_module.dismantle_func_graph(self._func_graph)
except: # pylint: disable=bare-except
pass
class _Marker(object):
"""Markers used to pretty-print nested args in function signatures."""
__slots__ = ["_s"]
def __init__(self, s):
self._s = s
def __repr__(self):
return str(self._s)
def _contains_type_spec(value):
return any(isinstance(x, type_spec.TypeSpec) for x in nest.flatten(value))
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
3710d154cf29a95d73ae991be6631907e17e1022 | da2993b3aaa18bb35f42886b1f4d7f938d055ff5 | /backend/mysite/mysite/urls.py | c0f738c13e004c447355fc5fac05e8af36dd7f2c | [] | no_license | karthikeyansa/React_Django_AuthToken | 00838c6ef679b589ad38aba864b21a64478a33c4 | cab17eff3ef75ade389c33c5f9109fdbc366a8d3 | refs/heads/master | 2023-02-09T23:10:24.311347 | 2021-01-01T18:34:53 | 2021-01-01T18:34:53 | 304,940,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | from django.contrib import admin
from django.urls import path,include
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('admin/', admin.site.urls),
path('api/',include('app.urls')),
path('auth/',obtain_auth_token)
]
| [
"karthikeyansa39@gmail.com"
] | karthikeyansa39@gmail.com |
79c81670f0d6cb73535fd86bb43d6d32d320e3b6 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_None/trend_Lag1Trend/cycle_0/ar_/test_artificial_128_None_Lag1Trend_0__0.py | c224a904afad41ec2d371e5e1389e07997af4970 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 263 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 0, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
0a37a74c812523a59805fb7704eefccb904cbfb9 | 35fa43655e18d18f2de898bce3a96456b05468de | /fernet_fields/test/settings/sqlite.py | 7ded7cba6f65dacc0789763741bf0df033f41288 | [
"BSD-3-Clause"
] | permissive | singular-labs/django-fernet-fields | b077588ba5f99383b9312c1ebbcb2c104be0f1e4 | f2d57ef179409cee554a177a9f7d90974acd14ed | refs/heads/master | 2023-08-17T18:59:58.627042 | 2023-08-14T12:06:48 | 2023-08-14T12:06:48 | 253,502,252 | 0 | 0 | BSD-3-Clause | 2023-08-14T12:06:50 | 2020-04-06T13:13:54 | Python | UTF-8 | Python | false | false | 293 | py | from .base import * # noqa
import os
HERE = os.path.dirname(os.path.abspath(__file__))
DB = os.path.join(HERE, 'testdb.sqlite')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DB,
'TEST': {
'NAME': DB,
},
},
}
| [
"carl@oddbird.net"
] | carl@oddbird.net |
cb6d6b0ee4a364082c3ad8f3a2aa5e19fb59a36c | 8032f671147f62ce91d6a42be5bafebdfeb236f9 | /tests/test_01_dxf_entities/test_133_sun.py | 415a0407d23625c42466f7e16ff56c0d3ecf7231 | [
"MIT"
] | permissive | mamofejo/ezdxf | 3ebcd9afae06e53d56a8622f8406e2c9a95e4971 | bd5a08a85608360266eb8702d48638195c72c247 | refs/heads/master | 2023-02-26T22:04:48.798010 | 2021-02-05T14:06:28 | 2021-02-05T14:06:28 | 336,305,662 | 0 | 0 | MIT | 2021-02-05T15:29:09 | 2021-02-05T15:08:54 | null | UTF-8 | Python | false | false | 2,054 | py | # Copyright (c) 2019 Manfred Moitzi
# License: MIT License
import pytest
import ezdxf
from ezdxf.entities.sun import Sun
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
SUN = """0
SUN
5
0
330
0
100
AcDbSun
90
1
290
1
63
7
421
16777215
40
1.0
291
1
91
2456922
92
43200
292
0
70
0
71
256
280
1
"""
@pytest.fixture
def entity():
return Sun.from_text(SUN)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert 'SUN' in ENTITY_CLASSES
def test_default_init():
entity = Sun()
assert entity.dxftype() == 'SUN'
assert entity.dxf.handle is None
assert entity.dxf.owner is None
def test_default_new():
entity = Sun.new(handle='ABBA', owner='0', dxfattribs={
})
assert entity.dxf.version == 1
assert entity.dxf.status == 1
assert entity.dxf.color == 7
assert entity.dxf.true_color == 16777215
assert entity.dxf.intensity == 1
assert entity.dxf.shadows == 1
assert entity.dxf.julian_day == 2456922
assert entity.dxf.time == 43200
assert entity.dxf.daylight_savings_time == 0
assert entity.dxf.shadow_type == 0
assert entity.dxf.shadow_map_size == 256
assert entity.dxf.shadow_softness == 1
def test_load_from_text(entity):
assert entity.dxf.version == 1
assert entity.dxf.status == 1
assert entity.dxf.color == 7
assert entity.dxf.true_color == 16777215
assert entity.dxf.intensity == 1
assert entity.dxf.shadows == 1
assert entity.dxf.julian_day == 2456922
assert entity.dxf.time == 43200
assert entity.dxf.daylight_savings_time == 0
assert entity.dxf.shadow_type == 0
assert entity.dxf.shadow_map_size == 256
assert entity.dxf.shadow_softness == 1
def test_write_dxf():
entity = Sun.from_text(SUN)
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(SUN)
assert result == expected
def test_sun():
doc = ezdxf.new('R2007')
sun = doc.objects.new_entity('SUN', {})
assert sun.dxftype() == 'SUN'
assert sun.dxf.version == 1
| [
"mozman@gmx.at"
] | mozman@gmx.at |
a8d9113b291db397bd0c08f5ef6b239a0c3e074d | 1c4110a0bdbb888fd7a82579810cda2c73b52dba | /20210715 Pycharm/Pycharm/venv/Lib/site-packages/pandda/handlers.py | 81cc12234bd1d249abfacfb8a4705b7293be600f | [] | no_license | DrillND/python | d09786e2937a10c9c67170826131b8ee204e0b37 | f6aa1d4d29e4519f89a63af4c3c8f83ed60630ea | refs/heads/main | 2023-06-19T11:51:14.307597 | 2021-07-16T07:18:52 | 2021-07-16T07:18:52 | 355,095,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,558 | py | import os
import iotbx.pdb
import iotbx.mtz
import cctbx.maptbx
import scitbx.math.superpose
from bamboo.common import Meta, Info
from bamboo.common.file import FileManager
from bamboo.common.path import easy_directory
from giant.structure import make_label
from giant.structure.align import perform_flexible_alignment, find_nearest_calphas, transform_coordinates_with_flexible_alignment
from giant.xray.data import CrystalSummary
from giant.xray.symmetry import combine_hierarchies, generate_adjacent_symmetry_copies
########################################################################################################
#
# DATASET HANDLER CLASSES
#
########################################################################################################
class DatasetHandler(object):
def __init__(self, dataset_number, pdb_filename, mtz_filename=None, dataset_tag=None):
"""Create a dataset object to allow common functions to be applied easily to a number of datasets"""
if pdb_filename: assert os.path.exists(pdb_filename), 'PDB file does not exist!'
if mtz_filename: assert os.path.exists(mtz_filename), 'MTZ file does not exist!'
# Store dataset number
self.num = dataset_number
# Store the tag for the dataset
if dataset_tag:
self.tag = dataset_tag
else:
# If num < 0 - mark as a reference dataset
if self.num < 0: self.tag = 'REF{:05d}'.format(self.num)
else: self.tag = 'D{:05d}'.format(self.num)
# Output Directories
self.output_handler = None
self.child = None
self.meta = Meta()
########################################################
# Store filenames
self._pdb_file = pdb_filename
self._mtz_file = mtz_filename
# PDB Objects
self._structure = self.new_structure()
# Data summaries
if self._pdb_file: self.pdb_summary = None
if self._mtz_file: self.mtz_summary = CrystalSummary.from_mtz(mtz_file=self._mtz_file)
########################################################
# All Structure factors
self.sfs = None
# Truncated structure factors
self.tr_sfs = None
# Initialise other variables
self.unit_cell = None
self.space_group = None
# Single matrix - global alignment
self._global_rt_transform = None
# Multiple matrices - local alignment
self._local_rt_transforms = None
########################################################
self.crystal_contact_generators = None
########################################################
# Map of the clusters in the dataset
self.events = []
#####################################################################
# #
# UTILITY FUNCTIONS #
# #
#####################################################################
def initialise_output_directory(self, outputdir):
"""Initialise a dataset output directory"""
# Create a file and directory organiser
self.output_handler = FileManager(rootdir=easy_directory(outputdir))
def get_pickle_copy(self):
"""Get copy of self that can be pickled - some cctbx objects cannot be pickled..."""
return self
def pdb_filename(self):
return self._pdb_file
def mtz_filename(self):
return self._mtz_file
#####################################################################
# #
# HIGH LEVEL OBJECTS #
# #
#####################################################################
def input(self):
return self._structure.input
def hierarchy(self):
return self._structure.hierarchy
def new_structure(self):
"""Generate a new copy of the input-hierarchy pair, from the pdb file"""
return iotbx.pdb.hierarchy.input(file_name=self._pdb_file)
def reflection_data(self):
"""Return an object containing the reflection data"""
return iotbx.mtz.object(self._mtz_file)
#####################################################################
# #
# STRUCTURE STUFF #
# #
#####################################################################
def heavy_atom_sites(self):
xray_structure = self.input().xray_structure_simple()
return xray_structure.sites_cart().select(xray_structure.heavy_selection())
def calpha_sites(self):
xray_structure = self.input().xray_structure_simple()
return xray_structure.sites_cart().select(xray_structure.backbone_selection(atom_names=['CA']))
def backbone_sites(self):
xray_structure = self.input().xray_structure_simple()
return xray_structure.sites_cart().select(xray_structure.backbone_selection())
def calphas(self):
"""Get the calphas for the structure"""
return self.hierarchy().select(self.hierarchy().atom_selection_cache().selection('pepnames and name CA'))
def calpha_labels(self):
"""Return the labels of the calphas of the structure"""
return [make_label(a) for a in self.calphas().atoms_with_labels()]
def find_nearest_calpha(self, points, hierarchy=None):
"""Returns the labels of the nearest calpha for each of the given points"""
if hierarchy is None: hierarchy = self.hierarchy()
return find_nearest_calphas(hierarchy, coordinates=points)
#####################################################################
# #
# ALIGNMENT STUFF #
# #
#####################################################################
def set_global_alignment(self, alignment):
self._global_rt_transform = alignment
def global_alignment_transform(self):
return self._global_rt_transform
def set_local_alignments(self, alignment):
self._local_rt_transforms = alignment
def local_alignment_transforms(self):
return self._local_rt_transforms
def transform_coordinates(self, points, method, point_mappings=None, inverse=False):
"""Transform coordinates using contained alignments"""
assert method in ['local','global'], 'METHOD NOT DEFINED: {!s}'.format(method)
if method == 'global':
if inverse: return self.global_alignment_transform().inverse() * points
else: return self.global_alignment_transform() * points
elif method == 'local':
assert point_mappings is not None
return transform_coordinates_with_flexible_alignment( alignments = self.local_alignment_transforms(),
coordinates = points,
mappings = point_mappings,
inverse = inverse)
def transform_from_reference(self, points, method, point_mappings=None):
"""Use alignment to map to reference frame from our frame"""
return self.transform_coordinates( points = points,
method = method,
point_mappings = point_mappings,
inverse = True )
def transform_to_reference(self, points, method, point_mappings=None):
"""Use alignment to map to reference frame from our frame"""
if point_mappings is None:
point_mappings = self.find_nearest_calpha(points)
return self.transform_coordinates( points = points,
method = method,
point_mappings = point_mappings,
inverse = False )
#####################################################################
# #
# SYMMETRY STUFF #
# #
#####################################################################
def generate_symmetry_copies(self, rt_method=None, save_operators=True, buffer=10):
"""Generate the symmetry copies of the reference structure in the reference frame"""
# Use symmetry operations to create the symmetry mates of the reference structure
sym_ops, sym_hierarchies, chain_mappings = generate_adjacent_symmetry_copies( ref_hierarchy = self.new_structure().hierarchy,
crystal_symmetry = self.input().crystal_symmetry(),
buffer_thickness = buffer )
# Record the symmetry operations that generate the crystal contacts
if save_operators: self.crystal_contact_generators = sym_ops
# Create a combined hierarchy of the crystal contacts
symmetry_root = combine_hierarchies(sym_hierarchies)
# Transform to reference frame?
if rt_method: symmetry_root.atoms().set_xyz(self.transform_to_reference(points=symmetry_root.atoms().extract_xyz(), method=rt_method))
# Save coordinates
self.symmetry_copies = symmetry_root
return self.symmetry_copies
class ReferenceDatasetHandler(DatasetHandler):
_origin_shift = (0,0,0)
_binning = None
def set_origin_shift(self, origin_shift):
self._origin_shift = origin_shift
def origin_shift(self):
return self._origin_shift
def set_map_scale(self, map_mean, map_rms):
"""Record the map mean and rms values"""
self._map_mean = map_mean
self._map_rms = map_rms
def map_scale(self):
return (self._map_mean, self._map_rms)
########################################################################################################
#
# MAP HANDLER CLASSES
#
########################################################################################################
def map_handler(map_data, unit_cell):
"""Map handler for easy sampling of map"""
basic_map = cctbx.maptbx.basic_map( cctbx.maptbx.basic_map_unit_cell_flag(),
map_data,
map_data.focus(),
unit_cell.orthogonalization_matrix(),
cctbx.maptbx.out_of_bounds_clamp(0).as_handle(),
unit_cell
)
return basic_map
########################################################################################################
#
# HANDLER FUNCTIONS
#
########################################################################################################
def align_dataset_to_reference(d_handler, ref_handler, method):
"""Calculate the rotation and translation needed to align one structure to another"""
assert method in ['both','local','global'], 'METHOD NOT DEFINED: {!s}'.format(method)
global_rt_transform = local_rt_transforms = None
if method == 'global' or method == 'both':
my_sites = d_handler.calpha_sites()
ref_sites = ref_handler.calpha_sites()
assert len(my_sites) == len(ref_sites)
global_rt_transform = scitbx.math.superpose.least_squares_fit(reference_sites=ref_sites, other_sites=my_sites).rt()
if method == 'local' or method == 'both':
local_rt_transforms = perform_flexible_alignment(mov_hierarchy=d_handler.hierarchy(), ref_hierarchy=ref_handler.hierarchy(), cutoff_radius=10)
return global_rt_transform, local_rt_transforms
| [
"gornhub13@gmail.com"
] | gornhub13@gmail.com |
428acd38c50ae5ec8e63de2b7e9a6f24510905af | dd74129c42933062ca4a6304f9a715bd18f3806b | /setup.py | d40f460af6508730e4e85c2efdcba1be69c42c05 | [] | no_license | sensein/cmixf | 12d1873508835023a32e6898baee831ea2ef91f9 | 28bf47ee8c7d1ba4a5241bcc19563df75b578fb5 | refs/heads/master | 2022-07-31T15:59:03.878678 | 2020-05-25T20:55:12 | 2020-05-25T20:55:12 | 266,423,055 | 2 | 1 | null | 2020-05-25T20:55:13 | 2020-05-23T21:32:50 | Python | UTF-8 | Python | false | false | 715 | py | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""CMIXF parser
"""
import sys
from setuptools import setup
import versioneer
# Give setuptools a hint to complain if it's too old a version
# 30.3.0 allows us to put most metadata in setup.cfg
# Should match pyproject.toml
SETUP_REQUIRES = ["setuptools >= 30.3.0"]
# This enables setuptools to install wheel on-the-fly
SETUP_REQUIRES += ["wheel"] if "bdist_wheel" in sys.argv else []
if __name__ == "__main__":
setup(
name="cmixf",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
setup_requires=SETUP_REQUIRES,
)
| [
"satra@mit.edu"
] | satra@mit.edu |
c9562ba1567ff3184f04a6950ae61145523b1927 | b017d566bf96da4e8251d9f37cc9ba6ed614b6b7 | /pyapp/webmux.py | dd74859faca27d550a5f798c2463bf28cb4b542d | [
"MIT"
] | permissive | mrbox24/webmux | a50f4372aabe1b8f2aad39e447b4a70bdcfef308 | 95bd462b412c5bba2ce4a8fcec9ef3268ed541d2 | refs/heads/master | 2020-11-27T17:19:15.798241 | 2019-12-22T09:06:36 | 2019-12-22T09:06:36 | 229,542,638 | 0 | 0 | null | 2019-12-22T09:03:29 | 2019-12-22T09:03:28 | null | UTF-8 | Python | false | false | 10,665 | py | #!/usr/bin/env python
from __future__ import print_function, absolute_import
import logging
import os, os.path, socket
import sys, subprocess, threading, time
import requests, re
import tornado.web
from tornado.netutil import bind_unix_socket
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
from tornado.escape import json_decode
import tornado.options
import terminado
import traceback
STATIC_DIR = os.path.join(os.path.dirname(terminado.__file__), "_static")
TEMPLATE_DIR = os.path.dirname(__file__)
# This is the port we'll start handing things out at
port_base = 2023
server_list = {}
def get_global_ip():
global server_list
while server_list['sophia']['global_ip'] == 'saba.us':
try:
findTags = re.compile(r'<.*?>')
findIP = re.compile(r'\d+\.\d+\.\d+\.\d+')
html = requests.get('http://checkip.dyndns.org' ).text()
ipaddress = findIP.search(findTags.sub('', html))
if ipaddress is not None:
server_list['sophia']['global_ip'] = ipaddress.group(0)
logging.info("Found global IP to be %s"%(server_list['sophia']['global_ip']))
except:
pass
def get_local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("1.1.1.1", 80))
return s.getsockname()[0]
def reset_server_list():
global server_list
server_list = {
'sophia': {
'hostname': 'sophia',
'host_port': 22,
'webmux_port': 22,
'global_ip': 'saba.us',
'local_ip': get_local_ip(),
'user': 'sabae',
'direct': True,
'last_direct_try': 1e100,
}
}
t = threading.Thread(target=get_global_ip)
t.daemon = True
t.start()
def kill_all_tunnels():
"""
Sometimes we just need to kill all the tunnels that have come in ever, so we
don't rely upon our list, we instead ask `lsof` to look for all processes
that are listening on the first 100 ports of our port_base and kill 'em all.
"""
lsof_cmd = "lsof -i:%d-%d -P -n"%(port_base, port_base+100)
try:
lsof_output = subprocess.check_output(lsof_cmd.split()).decode('utf-8')
except subprocess.CalledProcessError:
return []
except:
traceback.print_exc(file=sys.stdout)
logging.warning("Unable to probe active tunnels")
return []
ssh_procs = list(set([l.split()[1] for l in lsof_output.split('\n')[1:] if l]))
for p in ssh_procs:
subprocess.call(["kill", p])
return ssh_procs
class WebmuxTermManager(terminado.NamedTermManager):
"""Share terminals between websockets connected to the same endpoint.
"""
def __init__(self, max_terminals=None, **kwargs):
super(WebmuxTermManager, self).__init__(**kwargs)
def get_terminal(self, port_number):
from terminado.management import MaxTerminalsReached
# This is important lel
assert port_number is not None
if port_number in self.terminals:
return self.terminals[port_number]
if self.max_terminals and len(self.terminals) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
# Find server mapped to this port
name = next(filter(lambda n: server_list[n]['webmux_port'] == int(port_number), server_list.keys()))
s = server_list[name]
# Create new terminal
logging.info("Attempting to connect to: %s@%s:%d", s['user'], name, s['webmux_port'])
self.shell_command = ["ssh", "-o", "UserKnownHostsFile /dev/null", "-o", "StrictHostKeyChecking no", "-p", port_number, s['user']+"@webmux.e.ip.saba.us"]
term = self.new_terminal()
term.term_name = port_number
self.terminals[port_number] = term
self.start_reading(term)
return term
class IndexPageHandler(tornado.web.RequestHandler):
"""Render the index page"""
def get(self):
logging.info("Hit the index page")
return self.render("index.html", static=self.static_url, server_list=server_list)
class RegistrationPageHandler(tornado.web.RequestHandler):
"""Return a port number for a hostname"""
def post(self):
try:
data = json_decode(self.request.body)
except:
logging.warn("Couldn't decode JSON body \"%s\" from IP %s"%(self.request.body, self.request.headers.get('X-Real-Ip')))
return
# Always update the 'global_ip'
data['global_ip'] = self.request.headers.get("X-Real-IP")
# Convert `host_port` to an integer
data['host_port'] = int(data['host_port'])
# If this hostname does not already exist in server_list, then initialize some sane defaults for `data`
# before we put it into `server_list`.
if not data['hostname'] in server_list:
port_number = max([server_list[k]['webmux_port'] for k in server_list] + [port_base - 1]) + 1
data['webmux_port'] = port_number
data['direct'] = False
data['last_direct_try'] = 0
server_list[data['hostname']] = data
else:
# Otherwise update server_list with the given data
server_list[data['hostname']].update(data)
data = server_list[data['hostname']]
# Log out a little bit
logging.info("Registered %s at %s:%d on webmux port %d"%(data['hostname'], data['global_ip'], data['host_port'], data['webmux_port']))
self.write(str(data['webmux_port']))
class ResetPageHandler(tornado.web.RequestHandler):
"""Reset all SSH connections forwarding ports"""
def get(self):
ssh_procs = kill_all_tunnels()
reset_server_list()
logging.info("Killed %d live SSH tunnels"%(len(ssh_procs)))
self.write("Killed %d live SSH tunnels"%(len(ssh_procs)))
class TerminalPageHandler(tornado.web.RequestHandler):
def get_host(self, port_number):
for hostname in server_list:
if server_list[hostname]['webmux_port'] == port_number:
return hostname
return "host on port " + port_number
"""Render the /shell/[\\d]+ pages"""
def get(self, port_number):
return self.render("term.html", static=self.static_url,
ws_url_path="/_websocket/"+port_number,
hostname=self.get_host(port_number))
def sabanetify(hostname):
import hashlib
h = hashlib.sha256(hostname.encode('utf-8')).hexdigest()[:16]
return "fd37:5040::" + ":".join([h[idx:idx+4] for idx in range(0, len(h), 4)])
class BashPageHandler(tornado.web.RequestHandler):
"""Render the /bash page"""
def get(self):
global server_list
commands = "#webmuxbash\n"
# Add some helpful tools at the beginning
commands += """
# Helper function to see if we're on the same global subnet or not,
# (just checks if the X's are the same in X.X.X.Z, this is good enough
# 99% of the time)
same_global_subnet() {
if [[ -z "${GLOBAL_IP}" ]]; then
GLOBAL_IP="$(curl -s http://whatismyip.akamai.com)"
fi
[[ ${GLOBAL_IP%.*} == ${1%.*} ]]
}
# Check if an interface is "up"
if_up()
{
if [[ $(uname 2>/dev/null) == "Darwin" ]]; then
[[ -n $(ifconfig "$1" 2>/dev/null | grep -e "flags=.*UP[,>]") ]]
else
[[ -n $(ip address show "$1" up 2>/dev/null) ]]
fi
}
wireguard_up() { if_up $(wg show interfaces 2>/dev/null); }
"""
for name in server_list:
s = server_list[name]
build_command = lambda name, prog: "function %s() { title %s; tmux_escape %s \"$@\"; title; }\n"%(name, name, prog)
ssh_cmd = "ssh -A -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "
# Add .global for connecting to global host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['global_ip'])
commands += build_command(name+".global", prog)
# Add .local for connecting to local host IP directly
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], s['local_ip'])
commands += build_command(name+".local", prog)
# Add .webmux command for connecting to webmux reverse-tunnel
prog = ssh_cmd + "-p %d %s@webmux.e.ip.saba.us"%(s['webmux_port'], s['user'])
commands += build_command(name+".webmux", prog)
# Add .sabanet command for connecting over wireguard
prog = ssh_cmd + "-p %d %s@%s"%(s['host_port'], s['user'], sabanetify(name))
commands += build_command(name+".sabanet", prog)
commands += """
function %s()
{
if wireguard_up; then
%s.sabanet "$@";
elif same_global_subnet "%s"; then
%s.local "$@";
else
%s.webmux "$@";
fi;
}
"""%(name, name, s['global_ip'], name, name)
self.write('\n'.join([l.lstrip() for l in commands.split('\n')]))
if __name__ == "__main__":
# Parse things like --loglevel
tornado.options.parse_command_line()
term_manager = WebmuxTermManager(shell_command=["echo"], max_terminals=100)
handlers = [
(r"/", IndexPageHandler),
(r"/bash", BashPageHandler),
(r"/reset", ResetPageHandler),
(r"/register", RegistrationPageHandler),
(r"/_websocket/(\w+)", terminado.TermSocket, {'term_manager': term_manager}),
(r"/shell/([\d]+)/?", TerminalPageHandler),
(r"/webmux_static/(.*)", tornado.web.StaticFileHandler, {'path':os.path.join(TEMPLATE_DIR,"webmux_static")}),
]
application = tornado.web.Application(handlers, static_path=STATIC_DIR,
template_path=TEMPLATE_DIR,
term_manager=term_manager, debug=True)
application.listen(8888)
try:
# If we restarted or something, then be sure to cause all tunnels to reconnect
reset_server_list()
ssh_procs = kill_all_tunnels()
logging.info("Killed %d SSH tunnels"%(len(ssh_procs)))
logging.info("All systems operational, commander")
IOLoop.current().start()
except KeyboardInterrupt:
logging.info("\nShutting down due to SIGINT")
finally:
term_manager.shutdown()
IOLoop.current().close()
| [
"staticfloat@gmail.com"
] | staticfloat@gmail.com |
c80ae0ca01e06a0f390815cba89694ab583cb5b0 | 2edaaa8e2d11ac9ec02f3949e684fb5037719fbf | /Python/10 Days of Statistics/Central Limit Theorem 3.py | 9e56f0a395082cb5f24971407aca8733b87599f1 | [] | no_license | vipulsingh24/Hacker-Rank | 5419fb9b29780ad59fea96121a0d0888f1cdc152 | 789d72f5c3f6bf1536ab44c460c59733065823b7 | refs/heads/master | 2020-03-18T15:17:49.542451 | 2018-07-01T16:03:19 | 2018-07-01T16:03:19 | 134,899,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | '''
You have a sample of 100 values from a population with mean 500 and with standard deviation 80.
Compute the interval that covers the middle 95% of the distribution of the sample mean; in other words,
compute A and B such that P(A < x < B) = 0.95. Use the value of z = 1.96.
'''
import math
n = 100 # Sample Size
m = 500 # Mean
sd = 80 # Standard Deviation
z = 1.96
moe = z * (sd / math.sqrt(n)) # Margin of Error
# Lower level (A)
print(round(m - moe, 2))
print(round(m + moe, 2))
| [
"letsmailvipul@gmail.com"
] | letsmailvipul@gmail.com |
c96c4f6fc13242502ce3b163d701be75a220f796 | 97dae48fa3c613a84655c1c0b12cdc0db2c555bb | /algorithm/bitwise/add_two.py | de1cbc069f8c30b9d39c0879e14027c02d41b4b8 | [] | no_license | klknet/geeks4geeks | 6aa5841b15be41057dc987524721ea1ea37e02ea | d7d9099af7617a4000f38c75d2c7214bed570eda | refs/heads/master | 2021-07-12T06:34:30.048691 | 2020-06-22T07:51:14 | 2020-06-22T07:51:14 | 170,288,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | """
Add two numbers without using arithmetic operators.
"""
def add_two(x, y):
while y != 0:
x, y = x ^ y, (x & y) << 1
return x
def add_two_recur(x, y):
if y == 0:
return x
return add_two(x ^ y, (x & y) << 1)
def smallest(x, y, z):
return my_min(x, my_min(y, z))
def my_min(x, y):
return y + ((x - y) & ((x - y) >> 31))
print(add_two(13, 19))
print(add_two_recur(13, 19))
print(smallest(12, -19, 2))
| [
"konglk@aliyun.com"
] | konglk@aliyun.com |
9e8d1c9052e9fd9c61e954143162bfaeaf8a867a | 222c5f0e36717a053bcfd61c7fcfd1e2975d52ad | /mypackage/test/test_mymodule.py | 73a50bc5152b51abd3e56f088075401b1e80ac4b | [] | no_license | ParmEd/test-jenkins-project | ea6ce801bb03d474323294a2c9c9b005736582ed | d05e12a06ea1acc90b6ff1382da7fdb536439454 | refs/heads/master | 2021-06-11T14:39:52.533680 | 2016-12-26T21:03:44 | 2016-12-26T21:03:44 | 77,406,843 | 0 | 0 | null | 2017-02-05T22:25:06 | 2016-12-26T21:04:22 | Python | UTF-8 | Python | false | false | 199 | py | from __future__ import absolute_import
from nose.tools import assert_equal
from ..mymodule import myfunction
def test_my_function():
""" Tests my function """
assert_equal(myfunction(), 0)
| [
"jason.swails@gmail.com"
] | jason.swails@gmail.com |
0a2ba5e6111f6bab5f5212a505cdb7146d25f4f4 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/62/usersdata/195/32504/submittedfiles/ex1.py | 88cadca3ab6339c897432a8160b8d83ee8e60d87 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | from_future_ import division
a=float(input('Digite a:'))
b=float(input('Digite b:'))
c=float(input('Digite c:'))
delta=(b*b)-(4*a*c)
if delta>=0:
x1=(-b+delta**(1/2))/2*a
x2=(-b-delta**(1/2))/2*a
print('X1:%.2f' %x1)
print('X2:%.2f' %x2)
else:
print('SSR')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
2f758c6bccce92a39ee512572b55a09aa2a0f479 | 2c7ae872d789701fafdeada20d0df8f07fd931c6 | /examples/12_write_program_scope.py | d45fb943d2329b2e76ccde63dcb059f254052435 | [
"Apache-2.0"
] | permissive | Ozsqhbj/pylogix | 55b78deb9c730e52f15bfa65d27844cd3bc3e12b | d6774690478334983d3695b367bd67233dc529d7 | refs/heads/master | 2022-11-08T11:40:58.428297 | 2022-11-01T18:37:17 | 2022-11-01T18:37:17 | 166,840,261 | 1 | 1 | Apache-2.0 | 2019-01-21T15:53:11 | 2019-01-21T15:53:11 | null | UTF-8 | Python | false | false | 626 | py | '''
the following import is only necessary because eip.py is not in this directory
'''
import sys
sys.path.append('..')
'''
Write a program scoped tag
I have a program named "MiscHMI" in my main task.
In MiscHMI, the tag I'm reading will be TimeArray[0]
You have to specify that the tag will be program scoped
by appending the tag name with "Program" and the beginning,
then add the program name, finally the tag name. So our
example will look like this:
Program:MiscHMI.TimeArray[0]
'''
from pylogix import PLC
with PLC() as comm:
comm.IPAddress = '192.168.1.9'
comm.Write('Program:MiscHMI.TimeArray[0]', 2019)
| [
"dmroeder@gmail.com"
] | dmroeder@gmail.com |
32efff5a989beba8a65027873b832074988be3d1 | 5125880d0b7af5cc86ab65f839f5db0d5cff9640 | /python/dex/__init__.py | 6850c436d32d6d30002244b9e4128e1f497b1430 | [
"BSD-3-Clause"
] | permissive | stjordanis/dex-lang | 8d9582f96cc02bf631cf325f3dde9a729ed9941e | d8540257c8e00c9d9e86f4b53190052b5a145b68 | refs/heads/main | 2023-02-02T12:12:42.068628 | 2020-12-19T21:57:04 | 2020-12-19T21:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,032 | py | # Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
import itertools as it
import ctypes
import pathlib
import atexit
from enum import Enum
from typing import List
__all__ = ['execute']
here = pathlib.Path(__file__).parent.absolute()
lib = ctypes.cdll.LoadLibrary(here / 'libDex.so')
def tagged_union(name: str, members: List[type]):
named_members = [(f"t{i}", member) for i, member in enumerate(members)]
payload = type(name + "Payload", (ctypes.Union,), {"_fields_": named_members})
union = type(name, (ctypes.Structure,), {
"_fields_": [("tag", ctypes.c_uint64), ("payload", payload)],
"value": property(lambda self: getattr(self.payload, f"t{self.tag}")),
})
return union
CLit = tagged_union("Lit", [ctypes.c_int64, ctypes.c_int32, ctypes.c_int8, ctypes.c_double, ctypes.c_float])
class CRectArray(ctypes.Structure):
_fields_ = [("data", ctypes.c_void_p),
("shape_ptr", ctypes.POINTER(ctypes.c_int64)),
("strides_ptr", ctypes.POINTER(ctypes.c_int64))]
CAtom = tagged_union("CAtom", [CLit, CRectArray])
assert ctypes.sizeof(CAtom) == 4 * 8
class HsAtom(ctypes.Structure): pass
class HsContext(ctypes.Structure): pass
_init = lib.dexInit
_init.restype = None
_init.argtypes = []
_fini = lib.dexFini
_fini.restype = None
_fini.argtypes = []
_create_context = lib.dexCreateContext
_create_context.restype = ctypes.POINTER(HsContext)
_create_context.argtypes = []
_destroy_context = lib.dexDestroyContext
_destroy_context.restype = None
_destroy_context.argtypes = [ctypes.POINTER(HsContext)]
_print = lib.dexPrint
_print.restype = ctypes.c_char_p
_print.argtypes = [ctypes.POINTER(HsAtom)]
_insert = lib.dexInsert
_insert.restype = ctypes.POINTER(HsContext)
_insert.argtypes = [ctypes.POINTER(HsContext), ctypes.c_char_p, ctypes.POINTER(HsAtom)]
_eval = lib.dexEval
_eval.restype = ctypes.POINTER(HsContext)
_eval.argtypes = [ctypes.POINTER(HsContext), ctypes.c_char_p]
_evalExpr = lib.dexEvalExpr
_evalExpr.restype = ctypes.POINTER(HsAtom)
_evalExpr.argtypes = [ctypes.POINTER(HsContext), ctypes.c_char_p]
_lookup = lib.dexLookup
_lookup.restype = ctypes.POINTER(HsAtom)
_lookup.argtypes = [ctypes.POINTER(HsContext), ctypes.c_char_p]
_toCAtom = lib.dexToCAtom
_toCAtom.restype = ctypes.c_int
_toCAtom.argtypes = [ctypes.POINTER(HsAtom), ctypes.POINTER(CAtom)]
_getError = lib.dexGetError
_getError.restype = ctypes.c_char_p
_getError.argtypes = []
_init()
_nofree = False
@atexit.register
def _teardown():
global _nofree
_fini()
_nofree = True # Don't destruct any Haskell objects after the RTS has been shutdown
def _as_cstr(x: str):
return ctypes.c_char_p(x.encode('ascii'))
def _from_cstr(cx):
return cx.value.decode('ascii')
class Module:
__slots__ = ('_as_parameter_',)
def __init__(self, source):
self._as_parameter_ = _eval(prelude, _as_cstr(source))
if not self._as_parameter_:
raise RuntimeError(_from_cstr(_getError()))
def __del__(self):
if _nofree:
return
_destroy_context(self)
def __getattr__(self, name):
result = _lookup(self, _as_cstr(name))
if not result:
raise RuntimeError(_from_cstr(_getError()))
return Atom(result, self)
class Prelude(Module):
__slots__ = ()
def __init__(self):
self._as_parameter_ = _create_context()
if not self._as_parameter_:
raise RuntimeError("Failed to initialize prelude!")
prelude = Prelude()
def eval(expr: str, module=prelude, _env=None):
if _env is None:
_env = module
result = _evalExpr(_env, _as_cstr(expr))
if not result:
raise RuntimeError(_from_cstr(_getError()))
return Atom(result, module)
class Atom:
__slots__ = ('_as_parameter_', 'module')
def __init__(self, ptr, module):
self._as_parameter_ = ptr
self.module = module
def __del__(self):
# TODO: Free
pass
def __repr__(self):
return _print(self).decode('ascii')
def __int__(self):
return int(self._as_scalar())
def __float__(self):
return float(self._as_scalar())
def _as_scalar(self):
result = CAtom()
success = _toCAtom(self, ctypes.pointer(result))
if not success:
raise RuntimeError(_from_cstr(_getError()))
value = result.value
if not isinstance(value, CLit):
raise TypeError("Atom is not a scalar value")
return value.value
def __call__(self, *args):
# TODO: Make those calls more hygenic
env = self.module
for i, atom in enumerate(it.chain((self,), args)):
# NB: Atoms can contain arbitrary references
if atom.module is not prelude and atom.module is not self.module:
raise RuntimeError("Mixing atoms coming from different Dex modules is not supported yet!")
old_env, env = env, _insert(env, _as_cstr(f"python_arg{i}"), atom)
_destroy_context(old_env)
return eval(" ".join(f"python_arg{i}" for i in range(len(args) + 1)), module=self.module, _env=env)
| [
"adam.paszke@gmail.com"
] | adam.paszke@gmail.com |
ca32970d230cf2969c530014e4f7b5cc8096dc67 | c057d05c60521096897f120c598b3ebcddb07c21 | /test/test_audit_category.py | 221b3f250fc45e5ddc03d3978cc922a8211c3bef | [] | no_license | ottomata/fec_python | 97e9ae13b37dc0e26421f7d3c682fcfb1753db4f | 70a50c0c1c90dd553a81f7b450b037646dfe4cb3 | refs/heads/master | 2022-09-27T03:22:57.054350 | 2020-06-05T16:48:59 | 2020-06-05T16:48:59 | 269,473,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,466 | py | # coding: utf-8
"""
OpenFEC
This API allows you to explore the way candidates and committees fund their campaigns. The FEC API is a RESTful web service supporting full-text and field-specific searches on FEC data. [Bulk downloads](https://www.fec.gov/data/advanced/?tab=bulk-data) are available on the current site. Information is tied to the underlying forms by file ID and image ID. Data is updated nightly. There is a lot of data, but a good place to start is to use search to find interesting candidates and committees. Then, you can use their IDs to find report or line item details with the other endpoints. If you are interested in individual donors, check out contributor information in schedule_a. Get an [API key here](https://api.data.gov/signup/). That will enable you to place up to 1,000 calls an hour. Each call is limited to 100 results per page. You can email questions, comments or a request to get a key for 120 calls per minute to [APIinfo@fec.gov](mailto:apiinfo@fec.gov). You can also ask questions and discuss the data in the [FEC data Google Group](https://groups.google.com/forum/#!forum/fec-data). API changes will also be added to this group in advance of the change. The model definitions and schema are available at [/swagger](/swagger/). This is useful for making wrappers and exploring the data. A few restrictions limit the way you can use FEC data. For example, you can’t use contributor lists for commercial purposes or to solicit donations. [Learn more here](https://www.fec.gov/updates/sale-or-use-contributor-information/). [View our source code](https://github.com/fecgov/openFEC). We welcome issues and pull requests! # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.audit_category import AuditCategory # noqa: E501
from swagger_client.rest import ApiException
class TestAuditCategory(unittest.TestCase):
"""AuditCategory unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAuditCategory(self):
"""Test AuditCategory"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.audit_category.AuditCategory() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"acotto@gmail.com"
] | acotto@gmail.com |
0a405906f42b2f5c2a7f3d2f861450fc9416b069 | b04183027d7fe10d034092fc5de349dbca4a2851 | /KuegeliFarbe/testServoWithGetCh.py | eb2ef4338e52d994d2b0b9fc766729385b8bc4f8 | [] | no_license | magictimelapse/RaspberryJamSchweiz | b96afb123c9b38de8f8eff8c42e0f28204958762 | 8ad4b30e04dfc081b6af60e8049cb73624e6073d | refs/heads/master | 2021-01-20T21:07:09.302708 | 2018-07-03T17:04:43 | 2018-07-03T17:04:43 | 64,651,932 | 1 | 3 | null | 2017-11-23T20:53:14 | 2016-08-01T08:58:18 | Python | UTF-8 | Python | false | false | 687 | py | from servoControl import auf,zu
from gibPfeilTaste import gibPfeilTaste
import time
servos = {
"rechts": {"number": 0, "state":False},
"links": {"number": 1, "state":False},
"unten": {"number": 2, "state":False},
"oben": {"number": 3, "state":False}
}
def setServos(servos):
for servo in servos:
if servos[servo]["state"]:
auf(servos[servo]["number"])
else:
zu(servos[servo]["number"])
time.sleep(0.25)
setServos(servos)
while True:
pfeil = gibPfeilTaste()
print (pfeil)
servos[pfeil]["state"] = not servos[pfeil]["state"]
#states[number] = not states[number]
setServos(servos)
| [
"michael.rissi@gmail.com"
] | michael.rissi@gmail.com |
b4bdbce3e32f22cf9feb607922ec0f459cf0538e | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /third_party/blink/renderer/core/scroll/DEPS | 7f381c044046741ceff3767fe86986d6fdfcbecd | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0"
] | permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 120 | specific_include_rules = {
"mac_scrollbar_animator_impl.h": [
"+ui/native_theme/scrollbar_animator_mac.h",
],
}
| [
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | chromium-scoped@luci-project-accounts.iam.gserviceaccount.com | |
a6bc37dc9ad5bc6951bca4dc1f04117dbd420531 | 7ab35999f8a9fcdbc153176d50f38857eaf169b3 | /exercises/generators.py | 8a6ede0dbac143539a45d075fd1057a2472367b4 | [] | no_license | teknik-eksjo/iterators | 5a5e54f581f5d2e19fc93d67b975d533899bdc41 | b16ef34343ce28e38db1b8291fdce2e52bb8bcd8 | refs/heads/master | 2021-01-10T02:57:58.891394 | 2017-10-27T07:04:02 | 2017-10-27T07:04:02 | 48,051,745 | 1 | 20 | null | 2017-11-27T12:44:52 | 2015-12-15T15:31:18 | Python | UTF-8 | Python | false | false | 1,785 | py | """Övningar på generators."""
def cubes():
"""Implementera en generator som skapar en serie med kuber (i ** 3).
Talserien utgår från de positiva heltalen: 1, 2, 3, 4, 5, 6, ...
Talserien som skapas börjar således: 1, 8, 27, 64, 125, 216, ...
Talserien ska inte ha något slut.
"""
pass
def primes():
"""Implementera en generator som returnerar primtal.
Talserien som förväntas börjar alltså: 2, 3, 5, 7, 11, 13, 17, 19, 23, ...
"""
pass
def fibonacci():
"""Implementera en generator som returnerar de berömda fibonacci-talen.
Fibonaccis talserie börjar med 0 och 1. Nästa tal är sedan summan av de
två senaste.
Alltså börjar serien: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, ...
"""
pass
def alphabet():
"""En generator som returnerar namnen på tecknen i det hebreiska alfabetet.
Iteratorn returnerar namnen för de hebreiska bokstäverna i alfabetisk
ordning. Namnen och ordningen är:
Alef, Bet, Gimel, Dalet, He, Vav, Zayin, Het, Tet, Yod, Kaf, Lamed, Mem,
Nun, Samekh, Ayin, Pe, Tsadi, Qof, Resh, Shin, Tav
"""
def permutations(s):
"""En generator som returnerar alla permutationer av en inmatad sträng.
Då strängen 'abc' matas in fås: 'abc', 'acb', 'bac', 'bca', 'cba', 'cab'
"""
pass
def look_and_say():
"""En generator som implementerar look-and-say-talserien.
Sekvensen fås genom att man läser ut och räknar antalet siffror i
föregående tal.
1 läses 'en etta', alltså 11
11 läses 'två ettor', alltså 21
21 läses 'en tvåa, en etta', alltså 1211
1211 läses 'en etta, en tvåa, två ettor', alltså 111221
111221 läses 'tre ettor, två tvåor, en etta', alltså 312211
"""
pass
| [
"linus@etnolit.se"
] | linus@etnolit.se |
5e0e8fb64e239e682842f4872ffa5875bf6193ed | d85f3bfcc7efb3313bd77ba43abbde8527c731d9 | /ch03/ex3-4.py | f0c21790e3b61443d823170f444377a3dd28e0b7 | [] | no_license | freebz/Introducing-Python | 8c62767e88b89eb614abd3ea4cf19aae946f5379 | ecf2082946eac83072328a80ed1e06b416ef5170 | refs/heads/master | 2020-04-08T21:14:42.398462 | 2018-11-29T17:03:11 | 2018-11-29T17:03:11 | 159,736,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,297 | py | # 3.5 셋
# 3.5.1 셋 생성하기: set()
empty_set = set()
empty_set
# set()
even_numbers = {0, 2, 4, 6, 8}
even_numbers
# {0, 2, 4, 6, 8}
odd_numbers = {1, 3, 5, 7, 9}
odd_numbers
# {1, 3, 5, 7, 9}
# 3.5.2 데이터 타입 변환하기: set()
set( 'letters' )
# {'t', 'l', 's', 'e', 'r'}
set( ['Dasher', 'Dancer', 'Prancer', 'Mason-Dixon'] )
# {'Dancer', 'Dasher', 'Prancer', 'Mason-Dixon'}
set( ('Ummagumma', 'Echoes', 'Atom Heart Mother') )
# {'Atom Heart Mother', 'Ummagumma', 'Echoes'}
set( {'apple': 'red', 'orange': 'orange', 'cheery': 'red'} )
# {'cheery', 'apple', 'orange'}
# 3.5.3 in으로 값 멤버십 테스트하기
drinks = {
'martini': {'vodka', 'vermouth'},
'black russian': {'vodka', 'kahlua'},
'white russian': {'cream', 'kahlua', 'vodka'},
'manhattan': {'rye', 'vermouth', 'bitters'},
'screwdriver': {'orange juice', 'vodka'}
}
for name, contents in drinks.items():
if 'vodka' in contents:
print(name)
# martini
# black russian
# white russian
# screwdriver
for name, contents in drinks.items():
if 'vodka' in contents and not ('vermouth' in contents or
'cream' in contents):
print(name)
# black russian
# screwdriver
# 3.5.4 콤비네이션과 연산자
for name, contents in drinks.items():
if contents & {'vermouth', 'orange juice'}:
print(name)
# martini
# manhattan
# screwdriver
for name, contents in drinks.items():
if 'vodka' in contents and not contents & {'vermouth', 'cream'}:
print(name)
# black russian
# screwdriver
bruss = drinks['black russian']
wruss = drinks['white russian']
a = {1, 2}
b = {2, 3}
a & b
# {2}
a.intersection(b)
# {2}
bruss & wruss
# {'kahlua', 'vodka'}
a | b
# {1, 2, 3}
a.union(b)
# {1, 2, 3}
bruss | wruss
# {'vodka', 'cream', 'kahlua'}
a - b
# {1}
a.difference(b)
# {1}
bruss - wruss
# set()
wruss - bruss
# {'cream'}
a ^ b
# {1, 3}
a.symmetric_difference(b)
# {1, 3}
bruss ^ wruss
# {'cream'}
a <= b
# False
a.issubset(b)
# False
bruss <= wruss
# True
a <= a
# True
a.issubset(a)
# True
a < b
# False
a < a
# False
bruss < wruss
# True
a >= b
# False
a.issuperset(b)
# False
wruss >= bruss
# True
a >= a
# True
a.issuperset(a)
# True
a > b
# False
wruss > bruss
# True
| [
"freebz@hananet.net"
] | freebz@hananet.net |
b3bc967860631430270fe4a366e8ce79aa492caf | 531caac957596fc623e534bce734ef6b45be0b07 | /tests/operators/dynamic_shape/test_cast.py | 1ebaaf932f5e4f2f4265fd02ac81982bbe3f88da | [
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | wxyhv/akg | 02e64d81bbb84472e0bf1c57a691b688ea743d6e | fc9b6f5b6fa024da89bf90466a815359ca54015d | refs/heads/master | 2023-03-11T02:59:18.472826 | 2021-02-23T07:44:16 | 2021-02-23T07:44:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,995 | py | import boot
import pytest
def test_cast():
#boot.run("test_resnet50_cast_000", "cast_run", ((64, 128, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_001", "cast_run", ((32, 64, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_002", "cast_run", ((16, 32, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_003", "cast_run", ((4, 16, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_004", "cast_run", ((49, 4, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_005", "cast_run", ((32, 4, 112, 112, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_006", "cast_run", ((32, 4, 56, 56, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_007", "cast_run", ((32, 16, 56, 56, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_008", "cast_run", ((36, 4, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_009", "cast_run", ((4, 4, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_010", "cast_run", ((32, 4, 56, 56, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_011", "cast_run", ((16, 4, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_012", "cast_run", ((32, 16, 56, 56, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_013", "cast_run", ((32, 32, 28, 28, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_014", "cast_run", ((8, 32, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_015", "cast_run", ((72, 8, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_016", "cast_run", ((16, 8, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_017", "cast_run", ((32, 8, 56, 56, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_018", "cast_run", ((32, 8, 56, 56, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_019", "cast_run", ((32, 8, 28, 28, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_020", "cast_run", ((32, 8, 28, 28, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_021", "cast_run", ((32, 8, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_022", "cast_run", ((32, 32, 28, 28, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_023", "cast_run", ((32, 64, 14, 14, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_024", "cast_run", ((16, 64, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_025", "cast_run", ((144, 16, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_026", "cast_run", ((32, 16, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_027", "cast_run", ((32, 16, 28, 28, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_028", "cast_run", ((32, 16, 28, 28, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_029", "cast_run", ((32, 16, 14, 14, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_030", "cast_run", ((32, 16, 14, 14, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_031", "cast_run", ((64, 16, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_032", "cast_run", ((32, 64, 14, 14, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_033", "cast_run", ((32, 128, 7, 7, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_034", "cast_run", ((32, 128, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_035", "cast_run", ((288, 32, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_036", "cast_run", ((64, 32, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_037", "cast_run", ((32, 32, 14, 14, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_038", "cast_run", ((32, 32, 14, 14, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_039", "cast_run", ((32, 32, 7, 7, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_040", "cast_run", ((32, 32, 7, 7, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_041", "cast_run", ((128, 32, 16, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_042", "cast_run", ((32, 128, 7, 7, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_043", "cast_run", ((32, 4, 112, 112, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_044", "cast_run", ((32, 128, 1, 1, 16), "float32", "float16"), "dynamic")
#boot.run("test_resnet50_cast_045", "cast_run", ((32, 2048, 1, 1), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_048", "cast_run", ((64, 128, 16, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_049", "cast_run", ((32, 64, 16, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_050", "cast_run", ((16, 32, 16, 16), "float16", "float32"), "dynamic")
#boot.run("test_resnet50_cast_051", "cast_run", ((4, 16, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_052", "cast_run", ((49, 4, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_053", "cast_run", ((36, 4, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_054", "cast_run", ((4, 4, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_055", "cast_run", ((16, 4, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_056", "cast_run", ((8, 32, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_057", "cast_run", ((72, 8, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_058", "cast_run", ((16, 8, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_059", "cast_run", ((32, 8, 56, 56, 16), "float32", "float16"), "dynamic")
boot.run("test_resnet50_cast_060", "cast_run", ((32, 8, 56, 56, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_061", "cast_run", ((32, 8, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_062", "cast_run", ((16, 64, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_063", "cast_run", ((144, 16, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_064", "cast_run", ((32, 16, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_065", "cast_run", ((32, 16, 28, 28, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_066", "cast_run", ((32, 16, 28, 28, 16), "float32", "float16"), "dynamic")
boot.run("test_resnet50_cast_067", "cast_run", ((64, 16, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_068", "cast_run", ((32, 128, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_069", "cast_run", ((288, 32, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_070", "cast_run", ((64, 32, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_071", "cast_run", ((32, 32, 14, 14, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_072", "cast_run", ((32, 32, 14, 14, 16), "float32", "float16"), "dynamic")
boot.run("test_resnet50_cast_073", "cast_run", ((128, 32, 16, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_074", "cast_run", ((32, 2048, 1, 1), "float32", "float16"), "dynamic")
boot.run("test_resnet50_cast_075", "cast_run", ((32, 128, 1, 1, 16), "float16", "float32"), "dynamic")
boot.run("test_resnet50_cast_080", "cast_run", ((64, 128, 16, 16), "bool", "int32"), "dynamic")
| [
"ckey.chengbin@huawei.com"
] | ckey.chengbin@huawei.com |
e9ebcdf8f07d730698b3da8ca557cdc0dc299750 | 2f63688febd21dc3ae6b19abfa79ad313c820154 | /0939_Minimum_Area_Rectangle/try_2.py | 773cc7d337543910410b251e4b4546fdfb4f8b73 | [] | no_license | novayo/LeetCode | cadd03587ee4ed6e35f60294070165afc1539ac8 | 54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7 | refs/heads/master | 2023-08-14T00:35:15.528520 | 2023-07-30T05:56:05 | 2023-07-30T05:56:05 | 200,248,146 | 8 | 1 | null | 2022-11-19T04:37:54 | 2019-08-02T14:24:19 | Python | UTF-8 | Python | false | false | 995 | py | class Solution:
def minAreaRect(self, points: List[List[int]]) -> int:
# 以x軸為主,去建立一個graph
def buildGraph():
graph = {}
for element in points:
if element[0] in graph:
graph[element[0]].add(element[1])
else:
graph[element[0]] = set()
graph[element[0]].add(element[1])
return graph
graph = buildGraph()
ans = float("inf")
# 找到不同的兩個點(找對角線),如果x1有出現在y2的x上且x2有出現在y1的x上的話,就成立
for e1 in points:
for e2 in points[points.index(e1)+1:]:
if e1[0] != e2[0] and e1[1] != e2[1]:
if e1[1] in graph[e2[0]] and e2[1] in graph[e1[0]]:
ans = min(ans, abs(e1[0] - e2[0]) * abs(e1[1] - e2[1]))
return 0 if ans == float("inf") else ans
| [
"f14051172@gs.ncku.edu.tw"
] | f14051172@gs.ncku.edu.tw |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.