blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
77a82b1e7d4bc1d00910f61fcaba9536d4b9e2e4 | 89145800ada60f8d2d1b3200b6f384c1a4f8fff8 | /aparcamientos/migrations/0007_auto_20170513_2045.py | c46410372c612861e5c7097ab767691fd484be12 | [] | no_license | malozanom/X-Serv-Practica-Aparcamientos | 2f8f2cab9b9ca096ab3209d8fa6579aacbdce593 | d6da3af090aef7e8b0d23add7a5ff76f979d0311 | refs/heads/master | 2021-06-24T17:34:40.930085 | 2019-11-04T18:36:38 | 2019-11-04T18:36:38 | 90,887,116 | 0 | 0 | null | 2017-05-10T16:46:30 | 2017-05-10T16:46:30 | null | UTF-8 | Python | false | false | 598 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aparcamientos', '0006_auto_20170513_2038'),
]
operations = [
migrations.AlterField(
model_name='aparcamiento',
name='email',
field=models.EmailField(max_length=254, null=True),
),
migrations.AlterField(
model_name='aparcamiento',
name='telefono',
field=models.CharField(max_length=30, null=True),
),
]
| [
"you@example.com"
] | you@example.com |
6e1a191a96a7f8c72db1a5e68631517295612b77 | 81c5323d6a456479d32c9a2a2b9ab61484dd7922 | /otter/assign/r_adapter/tests.py | a1922198413bc3a05d3eb9c88d3baaf1309b1b53 | [
"BSD-3-Clause"
] | permissive | fperez/otter-grader | c488f339fa6db8577c09bbcba22d919519fb3fb5 | 6c9ded064b7c8bbd115d8ab54330f4400a564b17 | refs/heads/master | 2023-07-27T16:33:33.392295 | 2021-08-26T18:12:18 | 2021-08-26T18:12:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,289 | py | """
ottr test adapters for Otter Assign
"""
import re
import pprint
import yaml
import nbformat
from collections import namedtuple
from ..constants import BEGIN_TEST_CONFIG_REGEX, END_TEST_CONFIG_REGEX, OTTR_TEST_FILE_TEMPLATE, OTTR_TEST_NAME_REGEX, TEST_REGEX
from ..tests import write_test
from ..utils import get_source, lock
Test = namedtuple('Test', ['name', 'hidden', 'points', 'body', 'success_message', 'failure_message'])
def read_test(cell, question, assignment, rmd=False):
"""
Returns the contents of a test as a ``(name, hidden, body)`` named tuple
Args:
cell (``nbformat.NotebookNode``): a test cell
question (``dict``): question metadata
assignment (``otter.assign.assignment.Assignment``): the assignment configurations
rmd (``bool``, optional): whether the cell is from an Rmd file; if true, the first and last
lines of ``cell``'s source are trimmed, since they should be backtick delimeters
Returns:
``Test``: test named tuple
"""
if rmd:
source = get_source(cell)[1:-1]
else:
source = get_source(cell)
if source[0].lstrip().startswith("#"):
hidden = bool(re.search(r"hidden", source[0], flags=re.IGNORECASE))
else:
hidden = False
i = 0 if hidden else -1
if rmd:
i = 0
if re.match(BEGIN_TEST_CONFIG_REGEX, source[0], flags=re.IGNORECASE):
for i, line in enumerate(source):
if re.match(END_TEST_CONFIG_REGEX, line, flags=re.IGNORECASE):
break
config = yaml.full_load("\n".join(source[1:i]))
assert isinstance(config, dict), f"Invalid test config in cell {cell}"
else:
config = {}
test_name = config.get("name", None)
hidden = config.get("hidden", hidden)
points = config.get("points", None)
success_message = config.get("success_message", None)
failure_message = config.get("failure_message", None)
# for line in lines:
# match = re.match(OTTR_TEST_NAME_REGEX, line)
# if match:
# test_name = match.group(1)
# break
# assert test_name is not None, f"Could not parse test name:\n{cell}"
# TODO: hook up success_message and failure_message
# TODO: add parsing for TEST CONFIG blocks
return Test(test_name, hidden, None, '\n'.join(source[i+1:]), success_message, failure_message)
def gen_test_cell(question, tests, tests_dict, assignment):
"""
Parses a list of test named tuples and creates a single test file. Adds this test file as a value
to ``tests_dict`` with a key corresponding to the test's name, taken from ``question``. Returns
a code cell that runs the check on this test.
Args:
question (``dict``): question metadata
tests (``list`` of ``Test``): tests to be written
tests_dict (``dict``): the tests for this assignment
assignment (``otter.assign.assignment.Assignment``): the assignment configurations
Returns:
``nbformat.NotebookNode``: code cell calling ``ottr::check`` on this test
"""
cell = nbformat.v4.new_code_cell()
cell.source = ['. = ottr::check("tests/{}.R")'.format(question['name'])]
points = question.get('points', len(tests))
if points is None:
points = 1
if isinstance(points, (int, float)):
if points % len(tests) == 0:
points = [points // len(tests) for _ in range(len(tests))]
else:
points = [points / len(tests) for _ in range(len(tests))]
assert isinstance(points, list) and len(points) == len(tests), \
f"Points for question {question['name']} could not be parsed:\n{points}"
# update point values
tests = [tc._replace(points=p) for tc, p in zip(tests, points)]
test = gen_suite(question['name'], tests, points)
tests_dict[question['name']] = test
lock(cell)
return cell
def gen_suite(name, tests, points):
"""
Generates an R-formatted test file for ottr
Args:
name (``str``): the test name
tests (``list`` of ``Test``): the test case named tuples that define this test file
points (``float`` or ``int`` or ``list`` of ``float`` or ``int``): th points per question
Returns:
``str``: the rendered R test file
"""
template_data = {'name': name, 'test_cases': tests}
return OTTR_TEST_FILE_TEMPLATE.render(**template_data)
def remove_hidden_tests_from_dir(nb, test_dir, assignment, use_files=True):
"""
Rewrites test files in a directory to remove hidden tests
Args:
nb (``nbformat.NotebookNode``): the student notebook
test_dir (``pathlib.Path``): path to test files directory
assignment (``otter.assign.assignment.Assignment``): the assignment configurations
use_files (``bool``, optional): ignored for R assignments
"""
for f in test_dir.iterdir():
if f.suffix != '.R':
continue
with open(f) as f2:
test = f2.read()
test = re.sub(r" ottr::TestCase\$new\(\s*hidden = TRUE[\w\W]+?^ \),?", "", test, flags=re.MULTILINE)
test = re.sub(r",(\s* \))", r"\1", test, flags=re.MULTILINE) # removes a trailing comma if present
write_test({}, f, test, use_file=True)
| [
"cpyles@berkeley.edu"
] | cpyles@berkeley.edu |
71e9ae05007db3f56841804f0ffc628cd0c3a41a | 1b48b3980abbe11691310a7f35efef62bc0ae831 | /Qt/_dir/QListWidgetItem.py | f2062d8e84265d8f629e3b90a4e90f97e2ccdfeb | [] | no_license | FXTD-ODYSSEY/MayaScript | 7619b1ebbd664988a553167262c082cd01ab80d5 | 095d6587d6620469e0f1803d59a506682714da17 | refs/heads/master | 2022-11-05T08:37:16.417181 | 2022-10-31T11:50:26 | 2022-10-31T11:50:26 | 224,664,871 | 45 | 11 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | ['ItemType', 'Type', 'UserType', '__class__', '__delattr__', '__dict__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__le__', '__lshift__', '__lt__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__rlshift__', '__rrshift__', '__rshift__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'background', 'backgroundColor', 'checkState', 'clone', 'data', 'flags', 'font', 'foreground', 'icon', 'isHidden', 'isSelected', 'listWidget', 'read', 'setBackground', 'setBackgroundColor', 'setCheckState', 'setData', 'setFlags', 'setFont', 'setForeground', 'setHidden', 'setIcon', 'setSelected', 'setSizeHint', 'setStatusTip', 'setText', 'setTextAlignment', 'setTextColor', 'setToolTip', 'setWhatsThis', 'sizeHint', 'statusTip', 'text', 'textAlignment', 'textColor', 'toolTip', 'type', 'whatsThis', 'write']
| [
"timmyliang@tencent.com"
] | timmyliang@tencent.com |
6ecfa4e76bd00ac931f5a23055997abaa3f3c7c0 | 02952ddf96e7960a3faef74485f4ffc12bcf2973 | /projects/node_failure/grayscott_example.py | 70cc7692d34770f851f5fac30d67789e8b4e26c1 | [
"BSD-2-Clause"
] | permissive | danielru/pySDC | 5decca37e1ecea643fe21dac0f978e3fdaa24ac6 | 558b2b4db3aeb97e6a87e41cd4958a8a948af37a | refs/heads/master | 2020-12-25T10:58:57.215298 | 2017-03-21T06:45:59 | 2017-03-21T06:45:59 | 31,062,846 | 0 | 0 | null | 2015-02-20T11:52:33 | 2015-02-20T11:52:33 | null | UTF-8 | Python | false | false | 5,773 | py | import numpy as np
import projects.node_failure.emulate_hard_faults as ft
from projects.node_failure.allinclusive_classic_nonMPI_hard_faults import allinclusive_classic_nonMPI_hard_faults
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.sweeper_classes.generic_LU import generic_LU
from pySDC.implementations.datatype_classes.fenics_mesh import fenics_mesh
from pySDC.implementations.transfer_classes.TransferFenicsMesh import mesh_to_mesh_fenics
from pySDC.implementations.problem_classes.GrayScott_1D_FEniCS_implicit import fenics_grayscott
# noinspection PyShadowingNames,PyShadowingBuiltins
def main(ft_strategies):
"""
This routine generates the heatmaps showing the residual for node failures at different steps and iterations
"""
num_procs = 32
# setup parameters "in time"
t0 = 0
dt = 2.0
Tend = 1280.0
Nsteps = int((Tend - t0) / dt)
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-07
level_params['dt'] = dt
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['finter'] = True
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = 'LU'
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
# initialize problem parameters
problem_params = dict()
# problem_params['Du'] = 1.0
# problem_params['Dv'] = 0.01
# problem_params['A'] = 0.01
# problem_params['B'] = 0.10
# splitting pulses until steady state
# problem_params['Du'] = 1.0
# problem_params['Dv'] = 0.01
# problem_params['A'] = 0.02
# problem_params['B'] = 0.079
# splitting pulses until steady state
problem_params['Du'] = 1.0
problem_params['Dv'] = 0.01
problem_params['A'] = 0.09
problem_params['B'] = 0.086
problem_params['t0'] = t0 # ugly, but necessary to set up ProblemClass
problem_params['c_nvars'] = [256]
problem_params['family'] = 'CG'
problem_params['order'] = [4]
problem_params['refinements'] = [1, 0]
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = fenics_grayscott # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['dtype_u'] = fenics_mesh # pass data type for u
description['dtype_f'] = fenics_mesh # pass data type for f
description['sweeper_class'] = generic_LU # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh_fenics # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
ft.hard_random = 0.03
controller = allinclusive_classic_nonMPI_hard_faults(num_procs=num_procs,
controller_params=controller_params,
description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
for strategy in ft_strategies:
print('------------------------------------------ working on strategy ', strategy)
ft.strategy = strategy
# read in reference data from clean run, will provide reproducable locations for faults
if strategy is not 'NOFAULT':
reffile = np.load('data/PFASST_GRAYSCOTT_stats_hf_NOFAULT_P16.npz')
ft.refdata = reffile['hard_stats']
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# get residuals of the run
extract_stats = filter_stats(stats, type='residual_post_iteration')
# find boundaries for x-,y- and c-axis as well as arrays
maxprocs = 0
maxiter = 0
minres = 0
maxres = -99
for k, v in extract_stats.items():
maxprocs = max(maxprocs, getattr(k, 'process'))
maxiter = max(maxiter, getattr(k, 'iter'))
minres = min(minres, np.log10(v))
maxres = max(maxres, np.log10(v))
# grep residuals and put into array
residual = np.zeros((maxiter, maxprocs + 1))
residual[:] = -99
for k, v in extract_stats.items():
step = getattr(k, 'process')
iter = getattr(k, 'iter')
if iter is not -1:
residual[iter - 1, step] = np.log10(v)
# stats magic: get niter (probably redundant with maxiter)
extract_stats = filter_stats(stats, level=-1, type='niter')
sortedlist_stats = sort_stats(extract_stats, sortby='process')
iter_count = np.zeros(Nsteps)
for item in sortedlist_stats:
iter_count[item[0]] = item[1]
print(iter_count)
np.savez('data/PFASST_GRAYSCOTT_stats_hf_' + ft.strategy + '_P' + str(num_procs), residual=residual,
iter_count=iter_count, hard_stats=ft.hard_stats)
if __name__ == "__main__":
# ft_strategies = ['SPREAD', 'SPREAD_PREDICT', 'INTERP', 'INTERP_PREDICT']
ft_strategies = ['NOFAULT']
main(ft_strategies=ft_strategies)
| [
"r.speck@fz-juelich.de"
] | r.speck@fz-juelich.de |
383a092bc9a555f76149adf252e0ffee79da8587 | c1120d1d6352f35dc988b9874b24cd30f83f2f58 | /jobs_admin/apps.py | 9dc1cab06b32ff7f0fe79c1f909690269471d036 | [] | no_license | andrem122/Invoice-Management | 70032d86cfdfb2ed21479baae3a8057f88b61047 | 7f7a617a39602a656ff54724c344745038f304b4 | refs/heads/master | 2022-12-11T19:19:47.898336 | 2020-10-01T01:39:52 | 2020-10-01T01:39:52 | 120,393,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | from django.apps import AppConfig
class JobsAdminConfig(AppConfig):
name = 'jobs_admin'
| [
"andre.mashraghi@gmail.com"
] | andre.mashraghi@gmail.com |
4bc2fe82e33b7056ddfa4db9de4d352059f62de5 | 95aa541b9dcede42e6969947d06a974973dd052a | /ch10_thread_and_subprocess/manpage_server.py | 154595fe1845e23af8d4100d18008086d16f2c64 | [] | no_license | AstinCHOI/book_twisted | a693d5eaf7a826b26671e9e98d167f6808dcc0dc | dc5a5dff1ac432e2fb95670b683628e09d24774e | refs/heads/master | 2016-09-05T16:11:03.361046 | 2015-03-27T07:27:23 | 2015-03-27T07:27:23 | 28,185,272 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | import sys
from twisted.internet import protocol, utils, reactor
from twisted.protocols.basic import LineReceiver
from twisted.python import log
class RunCommand(LineReceiver):
def lineReceived(self, line):
log.msg("Man pages requested for: %s" % (line,))
commands = line.strip().split(" ")
output = utils.getProcessOutput("man", commands, errortoo=True)
output.addCallback(self.writeSuccessResponse)
def writeSuccessResponse(self, result):
self.transport.write(result)
self.transport.loseConnection()
class RunCommandFactory(protocol.Factory):
def buildProtocol(self, addr):
return RunCommand()
log.startLogging(sys.stdout)
reactor.listenTCP(8000, RunCommandFactory())
reactor.run()
| [
"asciineo@gmail.com"
] | asciineo@gmail.com |
41c52d2f3a7f55a45be9dfe6db28d0dc6eccfa11 | b06978b6020ce3240912ba5c131c4f38a86d7996 | /Pycharm_files/Clara_projects/venv/bin/easy_install-3.7 | c2619aa12d88e86faba1383e852d858ca0ce059b | [] | no_license | mn4774jm/PycharmProjects | 95dc8ee6b89a85ba02d4134aa5b5bce11004647b | 886bcf2400abc9a1f797fe98d09241f99fa16322 | refs/heads/master | 2021-08-09T10:20:27.907847 | 2020-09-04T15:21:21 | 2020-09-04T15:21:21 | 219,878,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | 7 | #!/Users/tom/PycharmProjects/Clara_projects/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"mn4774jm@go.minneapolis.edu"
] | mn4774jm@go.minneapolis.edu |
808da2cebeee1cf74826f659edf3a3e4224feeb7 | 54f33f6026b6fb71caba620f4bf39ec48ad76422 | /trazabilidad/migrations/0004_auto_20200707_1259.py | 6674d2af94293ccf727b9c3873faf9e43c2fc171 | [] | no_license | geovanniberdugo/lab7 | 7d30458c64bc32e21c390d4836db76bf894609f5 | 8a945d937864961da3fcd1e8c4dbf7575febc58d | refs/heads/main | 2023-02-13T02:03:19.067815 | 2021-01-15T22:24:35 | 2021-01-15T22:24:35 | 330,033,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | # Generated by Django 2.2.13 on 2020-07-07 17:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('trazabilidad', '0003_recepcion_responsable_tecnico'),
]
operations = [
migrations.AlterModelOptions(
name='reporte',
options={'permissions': [('can_generar_informe', 'Puede generar nuevo informe de resultado'), ('can_see_informe_resultados', 'Puede ver informe de resultado regenerado')], 'verbose_name': 'reporte', 'verbose_name_plural': 'reportes'},
),
]
| [
"geovanni.berdugo@gmail.com"
] | geovanni.berdugo@gmail.com |
336baca78251949d3080584964342725bd72e442 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/111/usersdata/224/64138/submittedfiles/av2_p3_m2.py | 6a24b72cb7c12e52bc9bb665c8c885fcad335b6c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | # -*- coding: utf-8 -*-
import numpy as np
def L(A):
L=[]
for i in range(0,A.shape[0],1):
soma=0
for j in range(0,A.shape[1],1):
soma=soma+A[i,j]
L.append(soma)
return(L)
def C(A):
C=[]
for j in range(0,A.shape[1],1):
soma2=0
for i in range(0,A.shape[0],1):
soma2=soma2+A[i,j]
return(C)
def TERMO(c):
for i in range(0,len(c),1):
contador=0
for j in range(0,len(c),1):
if c[i]==c[j]:
contador=contador+1
if contador==1:
return i
def numero(Lista,Termo):
for i in range(0,len(Lista),1):
if i!=Termo:
numero=Lista[Termo]-lista[i]
return (Numero)
n=int(input('Dimensão da matriz A: '))
A=np.zeros((n,n))
for i in range(0,A.shape[0],1):
for j in range(0,A.shape[1],1):
A[i,j]=int(input('TERMO: '))
L=L(A)
C=C(A)
v=Termo(L)
w=Termo(C)
h=A[v,w]
m=h-(numero(L,v))
print('%.d' %m)
print('%.d' %h)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
98981c98e568239d5f45a9956b008fa6e08dddc3 | d2eb7bd335175edd844a3e6c1c633ee0dc2dbb25 | /contests_cf/cgr13/a.py | 17700eeffc72067c1d7b3c7ce4a4409baea8f942 | [
"BSD-2-Clause"
] | permissive | stdiorion/competitive-programming | 5020a12b85f1e691ceb0cacd021606a9dc58b72c | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | refs/heads/main | 2023-03-27T01:13:42.691586 | 2021-03-08T08:05:53 | 2021-03-08T08:05:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | n, Q = map(int, input().split())
a = list(map(int, input().split()))
queries = [tuple(map(int, input().split())) for _ in range(Q)]
sa = sum(a)
for t, q in queries:
if t == 1:
if a[q - 1]:
sa -= 1
else:
sa += 1
a[q - 1] ^= 1
if t == 2:
print(int(sa >= q)) | [
"itkn1900@gmail.com"
] | itkn1900@gmail.com |
f1e1916d5ee16ab930b58883865ded11391e592b | 9d8e2dd4441c50b443390f76c899ad1f46c42c0e | /hacker_rank/warmup/acm_icpc.py | d89e3c9e0e9eacebe4f54393b2719d36db302d06 | [] | no_license | vikramjit-sidhu/algorithms | 186ec32de471386ce0fd6b469403199a5e3bbc6d | cace332fc8e952db76c19e200cc91ec8485ef14f | refs/heads/master | 2021-01-01T16:20:52.071495 | 2015-08-03T17:42:29 | 2015-08-03T17:42:29 | 29,119,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | """
Hacker Rank - Algorithms Warmup
ACM ICPC team
https://www.hackerrank.com/challenges/acm-icpc-team
"""
def find_max_skillset(n, person_skills):
""" Using brute force method - O(n^2) """
max_or = 0 #the number representing the max no of topics a pair can have in common, using bitwise or gate
count_max_or = 0 #the number of pairs with max no of topics in common
len_list = len(person_skills)
for i in range(len_list):
for j in range(i+1, len_list):
or_op = bin(person_skills[i] | person_skills[j]).count('1')
if or_op > max_or:
count_max_or = 1
max_or = or_op
elif or_op == max_or:
count_max_or += 1
return (max_or, count_max_or)
def main():
n, m = (int(i) for i in input().strip().split(' '))
person_skills = []
for i in range(n):
person_skills.append(int(input().strip(), 2))
common_skills, count_skills = find_max_skillset(n, person_skills)
print(common_skills)
print(count_skills)
if __name__ == '__main__':
main() | [
"vikram.sidhu.007@gmail.com"
] | vikram.sidhu.007@gmail.com |
71b60f4bbb9e39e2541f0232e385b0b1d2e736a1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03150/s166029107.py | 7912d0c3c96249b3eef4d049d6bd02ec4463ca6b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | S = input()
N = len(S)
for i in range(N):
for j in range(i,N):
if S[:i]+S[j:] == "keyence":
print("YES")
exit()
print("NO")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f5c021dd4ad92eb1323ab6f3c80d395203333111 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/containerservice/azure-mgmt-containerservice/generated_samples/managed_clusters_reset_service_principal_profile.py | 739b2c854e6450765b65166d73c7b8c134a5715c | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 1,688 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.containerservice import ContainerServiceClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-containerservice
# USAGE
python managed_clusters_reset_service_principal_profile.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ContainerServiceClient(
credential=DefaultAzureCredential(),
subscription_id="subid1",
)
client.managed_clusters.begin_reset_service_principal_profile(
resource_group_name="rg1",
resource_name="clustername1",
parameters={"clientId": "clientid", "secret": "secret"},
).result()
# x-ms-original-file: specification/containerservice/resource-manager/Microsoft.ContainerService/aks/stable/2023-04-01/examples/ManagedClustersResetServicePrincipalProfile.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
a50387cc2103e7bb54d4efc666435bc50c0049b5 | 3c5ae1e6dcd105354971c78ef279420b0b817ffa | /nova-master/nova/version.py | 01b6b47658d7e4e1fb5362ba57ca75420093f641 | [
"Apache-2.0"
] | permissive | Yunzhe-Sun/CentOS-openstack | e9e99360cedd46e9bf7f2f1b2dab22032c6b2aca | e22b8090e7bf748c38448a61793a23ca71ec91d4 | refs/heads/master | 2021-07-15T23:19:15.560323 | 2021-07-07T02:43:01 | 2021-07-07T02:43:01 | 218,758,174 | 0 | 1 | null | 2021-07-07T02:43:02 | 2019-10-31T12:13:49 | Python | UTF-8 | Python | false | false | 2,531 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
from nova.openstack.common.gettextutils import _
NOVA_VENDOR = "OpenStack Foundation"
NOVA_PRODUCT = "OpenStack Nova"
NOVA_PACKAGE = None # OS distro package version suffix
loaded = False
version_info = pbr.version.VersionInfo('nova')
version_string = version_info.version_string
def _load_config():
# Don't load in global context, since we can't assume
# these modules are accessible when distutils uses
# this module
import ConfigParser
from oslo.config import cfg
from nova.openstack.common import log as logging
global loaded, NOVA_VENDOR, NOVA_PRODUCT, NOVA_PACKAGE
if loaded:
return
loaded = True
cfgfile = cfg.CONF.find_file("release")
if cfgfile is None:
return
try:
cfg = ConfigParser.RawConfigParser()
cfg.read(cfgfile)
NOVA_VENDOR = cfg.get("Nova", "vendor")
if cfg.has_option("Nova", "vendor"):
NOVA_VENDOR = cfg.get("Nova", "vendor")
NOVA_PRODUCT = cfg.get("Nova", "product")
if cfg.has_option("Nova", "product"):
NOVA_PRODUCT = cfg.get("Nova", "product")
NOVA_PACKAGE = cfg.get("Nova", "package")
if cfg.has_option("Nova", "package"):
NOVA_PACKAGE = cfg.get("Nova", "package")
except Exception as ex:
LOG = logging.getLogger(__name__)
LOG.error(_("Failed to load %(cfgfile)s: %(ex)s"),
{'cfgfile': cfgfile, 'ex': ex})
def vendor_string():
_load_config()
return NOVA_VENDOR
def product_string():
_load_config()
return NOVA_PRODUCT
def package_string():
_load_config()
return NOVA_PACKAGE
def version_string_with_package():
if package_string() is None:
return version_info.version_string()
else:
return "%s-%s" % (version_info.version_string(), package_string())
| [
"171194570@qq.com"
] | 171194570@qq.com |
10c80893e0962195bc013fc9cbf7d856336bd164 | 089f35b5a6ba8df05fbd5fe5540235074d36b465 | /python-basic-project/unit03/01.py | 52e904b1ad00339e95e2887544cca2195024058a | [
"MIT"
] | permissive | sangwon9591/learningspoons-bootcamp-finance | e42b5e2793a875a6800dbb21e1bf8793d6a0bf92 | 57a9971d9156e274372918bdea10eda090c65437 | refs/heads/master | 2023-04-15T04:57:30.708424 | 2021-04-21T14:12:05 | 2021-04-21T14:12:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | import pyupbit
price = pyupbit.get_current_price("KRW-BTC")
print(price) | [
"brayden.jo@outlook.com"
] | brayden.jo@outlook.com |
c489de6483130e1719c5238ee2f413453c3d8a12 | 5a281cb78335e06c631181720546f6876005d4e5 | /karbor-1.3.0/karbor/common/opts.py | 141e3d4b1f62c335e989a939c6679f778d7b5c9f | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 3,875 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import itertools
import karbor.api.common
import karbor.api.v1.protectables
import karbor.api.v1.providers
import karbor.common.config
import karbor.db.api
import karbor.exception
import karbor.service
import karbor.services.operationengine.engine.executors.green_thread_executor as green_thread_executor # noqa
import karbor.services.operationengine.engine.executors.thread_pool_executor as thread_pool_executor # noqa
import karbor.services.operationengine.engine.triggers.timetrigger as time_trigger # noqa
import karbor.services.operationengine.karbor_client
import karbor.services.operationengine.manager
import karbor.services.operationengine.operations.base as base
import karbor.services.protection.clients.cinder
import karbor.services.protection.clients.glance
import karbor.services.protection.clients.manila
import karbor.services.protection.clients.neutron
import karbor.services.protection.clients.nova
import karbor.services.protection.flows.restore
import karbor.services.protection.flows.worker
import karbor.services.protection.manager
import karbor.wsgi.eventlet_server
__all__ = ['list_opts']
_opts = [
('clients_keystone', list(itertools.chain(
karbor.common.config.keystone_client_opts))),
('operationengine', list(itertools.chain(
green_thread_executor.green_thread_executor_opts,
karbor.services.operationengine.manager.trigger_manager_opts))),
('karbor_client', list(itertools.chain(
karbor.common.config.service_client_opts))),
('cinder_client', list(itertools.chain(
karbor.common.config.service_client_opts,
karbor.services.protection.clients.cinder.cinder_client_opts))),
('glance_client', list(itertools.chain(
karbor.common.config.service_client_opts,
karbor.services.protection.clients.glance.glance_client_opts))),
('manila_client', list(itertools.chain(
karbor.common.config.service_client_opts,
karbor.services.protection.clients.manila.manila_client_opts))),
('neutron_client', list(itertools.chain(
karbor.common.config.service_client_opts,
karbor.services.protection.clients.neutron.neutron_client_opts))),
('nova_client', list(itertools.chain(
karbor.common.config.service_client_opts,
karbor.services.protection.clients.nova.nova_client_opts))),
('DEFAULT', list(itertools.chain(
karbor.common.config.core_opts,
karbor.common.config.debug_opts,
karbor.common.config.global_opts,
karbor.api.common.api_common_opts,
karbor.api.v1.protectables.query_instance_filters_opts,
karbor.api.v1.providers.query_provider_filters_opts,
karbor.api.v1.providers.query_checkpoint_filters_opts,
karbor.db.api.db_opts,
thread_pool_executor.executor_opts,
time_trigger.time_trigger_opts,
base.record_operation_log_executor_opts,
karbor.services.protection.flows.restore.sync_status_opts,
karbor.services.protection.flows.worker.workflow_opts,
karbor.services.protection.manager.protection_manager_opts,
karbor.wsgi.eventlet_server.socket_opts,
karbor.exception.exc_log_opts,
karbor.service.service_opts)))]
def list_opts():
return [(g, copy.deepcopy(o)) for g, o in _opts]
| [
"Wayne Gong@minbgong-winvm.cisco.com"
] | Wayne Gong@minbgong-winvm.cisco.com |
2f01de5c5e7a5e29462094ffdd55f586b4adf634 | 5739e542af6b6442491a8b077b4f3da1c17a6881 | /tensor2tensor/mesh_tensorflow/mtf_image_transformer.py | ab37e7a4de795124831b5408301f3d9712bf59c1 | [
"Apache-2.0"
] | permissive | scott-gray/tensor2tensor | 3c1acdcc335d86e28ea72bea89fcc18396aa9f56 | 3d418439076c1732b0703b7dbc1b6269aef7c469 | refs/heads/master | 2020-03-26T12:19:56.499115 | 2018-08-15T17:03:38 | 2018-08-15T17:04:13 | 144,887,126 | 5 | 1 | null | 2018-08-15T18:08:49 | 2018-08-15T18:08:49 | null | UTF-8 | Python | false | false | 9,756 | py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image Transformer model with model and data parallelism using MTF.
Integration of Mesh tensorflow with Image Transformer to do model parallelism.
Currently, this supports unconditional image generation. Specify a particular
architecture layout in the hparams that specifies how different dimensions are
split or replicated along the mesh dimensions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.mesh_tensorflow import mesh_tensorflow as mtf
from tensor2tensor.mesh_tensorflow import mtf_layers
from tensor2tensor.mesh_tensorflow import mtf_model
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_model
class MtfImageTransformer(mtf_model.MtfModel):
"""Transformer in mesh_tensorflow."""
def set_activation_type(self):
hparams = self._hparams
if hparams.activation_dtype == "float32":
activation_dtype = tf.float32
elif hparams.activation_dtype == "float16":
activation_dtype = tf.float16
elif hparams.activation_dtype == "bfloat16":
activation_dtype = tf.bfloat16
else:
raise ValueError(
"unknown hparams.activation_dtype %s" % hparams.activation_dtype)
return activation_dtype
def mtf_model_fn(self, features, mesh):
features = copy.copy(features)
tf.logging.info("features = %s" % features)
hparams = self._hparams
activation_dtype = self.set_activation_type()
# We assume fixed vocab size for targets
targets_vocab_size = self._problem_hparams.target_modality._vocab_size # pylint: disable=protected-access
targets = tf.to_int32(features["targets"])
# Image preprocessing, reshape into a 1D sequence and shift right.
length = hparams.img_len*hparams.img_len*hparams.num_channels
targets = tf.reshape(targets, [hparams.batch_size, length])
shifted_targets = common_layers.shift_right_2d(targets)
# Declare all the dimensions
model_dim = mtf.Dimension("model", hparams.hidden_size)
batch_dim = mtf.Dimension("batch", hparams.batch_size)
length_dim = mtf.Dimension("length", length)
filter_dim = mtf.Dimension("filter_size", hparams.filter_size)
kv_channels = mtf.Dimension("kv_channels", hparams.d_kv)
heads = mtf.Dimension("heads", hparams.num_heads)
def import_to_batch_by_length(x, name):
return mtf.import_tf_tensor(
mesh, x, mtf.Shape([batch_dim, length_dim]), name=name)
def layer_prepostprocess_dropout(x):
return mtf.dropout(
x, keep_prob=1.0 - hparams.layer_prepostprocess_dropout,
noise_shape=mtf.Shape([batch_dim, model_dim]))
targets = import_to_batch_by_length(targets, "targets")
shifted_targets = import_to_batch_by_length(
shifted_targets, "shifted_targets")
extra_losses = []
# TODO(nikip): Verify conditional.
if self.has_input and not hparams.unconditional:
vocab_size = hparams.num_classes
inputs_vocab_dim = mtf.Dimension("vocab", vocab_size)
inputs = tf.squeeze(tf.to_int32(features["inputs"]), [2, 3])
inputs = import_to_batch_by_length(inputs, "inputs")
# Input embeddings
inputs, _ = mtf_layers.embedding(
inputs, inputs_vocab_dim, model_dim,
activation_dtype=activation_dtype,
name="inputs_embedding")
# Create targets content and position embeddings.
targets_position = mtf.range(mesh, length_dim, dtype=tf.int32)
targets_vocab_size = 256 * hparams.num_channels
targets_vocab_dim = mtf.Dimension("vocab", targets_vocab_size)
outputs_vocab_dim = mtf.Dimension("output_vocab", 256)
# Create embedding var for targets and positions and do a gather.
targets_embedding_var = mtf.get_variable(
mesh, "targets_embedding",
mtf.Shape([targets_vocab_dim, model_dim]),
initializer=tf.random_normal_initializer(),
activation_dtype=activation_dtype)
positional_embedding_var = mtf.get_variable(
mesh, "positional_embedding",
mtf.Shape([targets_vocab_dim, model_dim]),
initializer=tf.random_normal_initializer(),
activation_dtype=activation_dtype)
x = (mtf.gather(targets_embedding_var, shifted_targets, targets_vocab_dim) +
mtf.gather(
positional_embedding_var, targets_position, targets_vocab_dim))
# Image Transformer Decoder
# [ self attention - ffn - residual + dropout] x n
for layer in range(hparams.num_decoder_layers):
layer_name = "decoder_layer_%d" % layer
with tf.variable_scope(layer_name):
# Self attention layer
x += layer_prepostprocess_dropout(
mtf_layers.masked_local_attention_1d(
mtf_layers.layer_norm(x, model_dim, name="layer_norm_self_att"),
None,
kv_channels,
heads,
block_length=hparams.block_length,
name="self_att"))
# ffn layer
x += layer_prepostprocess_dropout(mtf_layers.dense_relu_dense(
mtf_layers.layer_norm(x, model_dim, name="layer_norm_ffn"),
filter_dim, hparams.dropout, dropout_broadcast_dims=[length_dim]))
x = mtf_layers.layer_norm(x, model_dim, name="decoder_final_layer_norm")
# Calculate the logits and loss.
logits = mtf_layers.dense(x, outputs_vocab_dim, name="logits")
soft_targets = mtf.one_hot(
targets, outputs_vocab_dim, dtype=activation_dtype)
loss = mtf_layers.softmax_cross_entropy_with_logits(
logits, soft_targets, outputs_vocab_dim)
loss = mtf.reduce_mean(loss)
for l in extra_losses:
loss += l
return logits, loss
@registry.register_hparams
def mtf_image_transformer_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.no_data_parallelism = True
hparams.use_fixed_batch_size = True
hparams.batch_size = 1
hparams.max_length = 256
hparams.hidden_size = 256
hparams.label_smoothing = 0.0
# 8-way model-parallelism
hparams.add_hparam("mesh_shape", "8")
hparams.add_hparam("layout", "vocab:0;filter_size:0;heads:0")
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("filter_size", 512)
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 6)
hparams.add_hparam("attention_key_size", 256)
hparams.add_hparam("attention_value_size", 256)
# Share weights between input and target embeddings
hparams.shared_embedding = True
# mixture of experts hparams
hparams.add_hparam("ffn_layer", "dense_relu_dense")
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-3
hparams.shared_embedding_and_softmax_weights = True
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.add_hparam("d_kv", 32)
# Image related hparams
hparams.add_hparam("img_len", 32)
hparams.add_hparam("num_channels", 3)
hparams.add_hparam("unconditional", True)
hparams.add_hparam("block_length", 128)
return hparams
@registry.register_hparams
def mtf_image_transformer_tiny():
"""Catch bugs locally..."""
hparams = mtf_image_transformer_base()
hparams.hidden_size = 128
hparams.filter_size = 256
hparams.batch_size = 4
hparams.num_encoder_layers = 1
hparams.num_decoder_layers = 1
hparams.num_heads = 4
hparams.attention_key_size = 128
hparams.attention_value_size = 128
# data parallelism and model-parallelism
hparams.mesh_shape = "2.2"
hparams.layout = "batch:0;filter_size:1"
return hparams
@registry.register_hparams
def mtf_image_transformer_single():
"""Small single parameters."""
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = ""
hparams.layout = ""
hparams.hidden_size = 32
hparams.filter_size = 32
hparams.batch_size = 1
hparams.num_encoder_layers = 1
hparams.num_decoder_layers = 1
hparams.num_heads = 2
hparams.attention_key_size = 32
hparams.attention_value_size = 32
hparams.block_length = 16
return hparams
@registry.register_hparams
def mtf_image_transformer_base_single():
"""Small single parameters."""
hparams = mtf_image_transformer_base()
hparams.num_decoder_layers = 6
hparams.filter_size = 256
hparams.block_length = 128
hparams.mesh_shape = ""
hparams.layout = ""
return hparams
@registry.register_hparams
def mtf_image_transformer_tiny_moe():
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = "4"
hparams.layout = "batch:0,experts:0"
hparams.ffn_layer = "moe"
return hparams
@registry.register_hparams
def mtf_image_transformer_tiny_8gpu():
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = "8"
hparams.layout = "vocab:0;filter_size:0;heads:0"
return hparams
@registry.register_hparams
def mtf_image_transformer_length_sharded():
hparams = mtf_image_transformer_tiny()
hparams.mesh_shape = "2"
hparams.layout = "length:0"
return hparams
| [
"copybara-piper@google.com"
] | copybara-piper@google.com |
d2a1e18f1dbe66e35809ad3d837e3d80d6088ed7 | 2dfec35bafb4a808771b723ca94c102240b5d8f4 | /align/alignProject/lineWriteExcel.py | d830271cf966eca71653032874f36f2d7fa74475 | [] | no_license | mengguiyouziyi/outsourcing | cde7169d94bcdde63091e92f91d140c72c1d184c | 140172ca792c9808cb69251468a14529b84ea380 | refs/heads/master | 2022-11-13T19:55:54.851329 | 2020-06-18T02:51:25 | 2020-06-18T02:51:25 | 273,120,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,711 | py | import os
import xlwt
def toExcel(file_align, en_sen, zh_sen, xlsx):
"""
根据对齐文件 1 <-> 1,在英中断句文件中找到相对应的句子,写入到excel中
:param file_align: 对齐文件路径
:param en_sen: 英文断句文件路径
:param zh_sen: 中文断句文件路径
:param xlsx: 导出的excel文件路径
:return:
"""
current_path = os.getcwd()
# 筛选对应行号,入 xxx_num_list
align = open(os.path.join(current_path, '../file', file_align), 'r', encoding='utf-8')
src_num_list = []
tgt_num_list = []
for line in align.readlines():
if 'omitted' in line or ',' in line:
continue
line = line.strip()
src_num, tgt_num = line.split(' <=> ')
src_num_list.append(src_num)
tgt_num_list.append(tgt_num)
# 根据行号提取对照文本,入 xxx_list
src_sen_file = open(os.path.join(current_path, '../file', en_sen), 'r', encoding='utf-8')
tgt_sen_file = open(os.path.join(current_path, '../file', zh_sen), 'r', encoding='utf-8')
src_sen_list = src_sen_file.readlines()
src_list = [src_sen_list[int(i) - 1] for i in src_num_list]
tgt_sen_list = tgt_sen_file.readlines()
tgt_list = [tgt_sen_list[int(i) - 1] for i in tgt_num_list]
# 将对照文本写入文件
xlsx_file = os.path.join(current_path, '../file', xlsx)
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('align')
for i, en, zh in zip(range(len(src_list)), src_list, tgt_list):
worksheet.write(i, 0, en.strip())
worksheet.write(i, 1, zh.strip())
workbook.save(xlsx_file)
align.close()
src_sen_file.close()
tgt_sen_file.close()
| [
"775618369@qq.com"
] | 775618369@qq.com |
05a6452019d5174fb690d15af867475d68631d14 | 63bacb52d016cf7a237dacd79ba2861842c49ca9 | /test/test_entity_connections_api.py | a4ae054613ef7893641cedf678af4e98b49895ed | [] | no_license | arundharumar-optimizely/zuora-client-python | ee9667956b32b64b456920ad6246e02528fe6645 | a529a01364e41844c91f39df300c85c8d332912a | refs/heads/master | 2020-07-05T23:09:20.081816 | 2019-07-30T21:46:47 | 2019-07-30T21:46:47 | 202,811,594 | 0 | 0 | null | 2019-08-16T23:26:52 | 2019-08-16T23:26:52 | null | UTF-8 | Python | false | false | 41,022 | py | # coding: utf-8
"""
Zuora API Reference
# Introduction Welcome to the reference for the Zuora REST API! <a href=\"http://en.wikipedia.org/wiki/REST_API\" target=\"_blank\">REST</a> is a web-service protocol that lends itself to rapid development by using everyday HTTP and JSON technology. The Zuora REST API provides a broad set of operations and resources that: * Enable Web Storefront integration from your website. * Support self-service subscriber sign-ups and account management. * Process revenue schedules through custom revenue rule models. * Enable manipulation of most objects in the Zuora Object Model. Want to share your opinion on how our API works for you? <a href=\"https://community.zuora.com/t5/Developers/API-Feedback-Form/gpm-p/21399\" target=\"_blank\">Tell us how you feel </a>about using our API and what we can do to make it better. ## Access to the API If you have a Zuora tenant, you can access the Zuora REST API via one of the following endpoints: | Tenant | Base URL for REST Endpoints | |-------------------------|-------------------------| |US Production | https://rest.zuora.com | |US API Sandbox | https://rest.apisandbox.zuora.com| |US Performance Test | https://rest.pt1.zuora.com | |EU Production | https://rest.eu.zuora.com | |EU Sandbox | https://rest.sandbox.eu.zuora.com | The Production endpoint provides access to your live user data. API Sandbox tenants are a good place to test code without affecting real-world data. If you would like Zuora to provision an API Sandbox tenant for you, contact your Zuora representative for assistance. **Note:** If you have a tenant in the Production Copy Environment, submit a request at <a href=\"http://support.zuora.com/\" target=\"_blank\">Zuora Global Support</a> to enable the Zuora REST API in your tenant and obtain the base URL for REST endpoints. If you do not have a Zuora tenant, go to <a href=\"https://www.zuora.com/resource/zuora-test-drive\" target=\"_blank\">https://www.zuora.com/resource/zuora-test-drive</a> and sign up for a Production Test Drive tenant. The tenant comes with seed data, including a sample product catalog. # API Changelog You can find the <a href=\"https://community.zuora.com/t5/Developers/API-Changelog/gpm-p/18092\" target=\"_blank\">Changelog</a> of the API Reference in the Zuora Community. # Authentication ## OAuth v2.0 Zuora recommends that you use OAuth v2.0 to authenticate to the Zuora REST API. Currently, OAuth is not available in every environment. See [Zuora Testing Environments](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/D_Zuora_Environments) for more information. Zuora recommends you to create a dedicated API user with API write access on a tenant when authenticating via OAuth, and then create an OAuth client for this user. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for how to do this. By creating a dedicated API user, you can control permissions of the API user without affecting other non-API users. If a user is deactivated, all of the user's OAuth clients will be automatically deactivated. Authenticating via OAuth requires the following steps: 1. Create a Client 2. Generate a Token 3. Make Authenticated Requests ### Create a Client You must first [create an OAuth client](https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users#Create_an_OAuth_Client_for_a_User) in the Zuora UI. To do this, you must be an administrator of your Zuora tenant. This is a one-time operation. You will be provided with a Client ID and a Client Secret. Please note this information down, as it will be required for the next step. **Note:** The OAuth client will be owned by a Zuora user account. If you want to perform PUT, POST, or DELETE operations using the OAuth client, the owner of the OAuth client must have a Platform role that includes the \"API Write Access\" permission. ### Generate a Token After creating a client, you must make a call to obtain a bearer token using the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) operation. This operation requires the following parameters: - `client_id` - the Client ID displayed when you created the OAuth client in the previous step - `client_secret` - the Client Secret displayed when you created the OAuth client in the previous step - `grant_type` - must be set to `client_credentials` **Note**: The Client ID and Client Secret mentioned above were displayed when you created the OAuth Client in the prior step. The [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response specifies how long the bearer token is valid for. Call [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) again to generate a new bearer token. ### Make Authenticated Requests To authenticate subsequent API requests, you must provide a valid bearer token in an HTTP header: `Authorization: Bearer {bearer_token}` If you have [Zuora Multi-entity](https://www.zuora.com/developer/api-reference/#tag/Entities) enabled, you need to set an additional header to specify the ID of the entity that you want to access. You can use the `scope` field in the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response to determine whether you need to specify an entity ID. If the `scope` field contains more than one entity ID, you must specify the ID of the entity that you want to access. For example, if the `scope` field contains `entity.1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` and `entity.c92ed977-510c-4c48-9b51-8d5e848671e9`, specify one of the following headers: - `Zuora-Entity-Ids: 1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` - `Zuora-Entity-Ids: c92ed977-510c-4c48-9b51-8d5e848671e9` **Note**: For a limited period of time, Zuora will accept the `entityId` header as an alternative to the `Zuora-Entity-Ids` header. If you choose to set the `entityId` header, you must remove all \"-\" characters from the entity ID in the `scope` field. If the `scope` field contains a single entity ID, you do not need to specify an entity ID. ## Other Supported Authentication Schemes Zuora continues to support the following additional legacy means of authentication: * Use username and password. Include authentication with each request in the header: * `apiAccessKeyId` * `apiSecretAccessKey` Zuora recommends that you create an API user specifically for making API calls. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for more information. * Use an authorization cookie. The cookie authorizes the user to make calls to the REST API for the duration specified in **Administration > Security Policies > Session timeout**. The cookie expiration time is reset with this duration after every call to the REST API. To obtain a cookie, call the [Connections](https://www.zuora.com/developer/api-reference/#tag/Connections) resource with the following API user information: * ID * Password * For CORS-enabled APIs only: Include a 'single-use' token in the request header, which re-authenticates the user with each request. See below for more details. ### Entity Id and Entity Name The `entityId` and `entityName` parameters are only used for [Zuora Multi-entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity \"Zuora Multi-entity\"). These are the legacy parameters that Zuora will only continue to support for a period of time. Zuora recommends you to use the `Zuora-Entity-Ids` parameter instead. The `entityId` and `entityName` parameters specify the Id and the [name of the entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/B_Introduction_to_Entity_and_Entity_Hierarchy#Name_and_Display_Name \"Introduction to Entity and Entity Hierarchy\") that you want to access, respectively. Note that you must have permission to access the entity. You can specify either the `entityId` or `entityName` parameter in the authentication to access and view an entity. * If both `entityId` and `entityName` are specified in the authentication, an error occurs. * If neither `entityId` nor `entityName` is specified in the authentication, you will log in to the entity in which your user account is created. To get the entity Id and entity name, you can use the GET Entities REST call. For more information, see [API User Authentication](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/A_Overview_of_Multi-entity#API_User_Authentication \"API User Authentication\"). ### Token Authentication for CORS-Enabled APIs The CORS mechanism enables REST API calls to Zuora to be made directly from your customer's browser, with all credit card and security information transmitted directly to Zuora. This minimizes your PCI compliance burden, allows you to implement advanced validation on your payment forms, and makes your payment forms look just like any other part of your website. For security reasons, instead of using cookies, an API request via CORS uses **tokens** for authentication. The token method of authentication is only designed for use with requests that must originate from your customer's browser; **it should not be considered a replacement to the existing cookie authentication** mechanism. See [Zuora CORS REST](https://knowledgecenter.zuora.com/DC_Developers/C_REST_API/Zuora_CORS_REST \"Zuora CORS REST\") for details on how CORS works and how you can begin to implement customer calls to the Zuora REST APIs. See [HMAC Signatures](https://www.zuora.com/developer/api-reference/#operation/POSTHMACSignature \"HMAC Signatures\") for details on the HMAC method that returns the authentication token. # Requests and Responses ## Request IDs As a general rule, when asked to supply a \"key\" for an account or subscription (accountKey, account-key, subscriptionKey, subscription-key), you can provide either the actual ID or the number of the entity. ## HTTP Request Body Most of the parameters and data accompanying your requests will be contained in the body of the HTTP request. The Zuora REST API accepts JSON in the HTTP request body. No other data format (e.g., XML) is supported. ### Data Type ([Actions](https://www.zuora.com/developer/api-reference/#tag/Actions) and CRUD operations only) We recommend that you do not specify the decimal values with quotation marks, commas, and spaces. Use characters of `+-0-9.eE`, for example, `5`, `1.9`, `-8.469`, and `7.7e2`. Also, Zuora does not convert currencies for decimal values. ## Testing a Request Use a third party client, such as [curl](https://curl.haxx.se \"curl\"), [Postman](https://www.getpostman.com \"Postman\"), or [Advanced REST Client](https://advancedrestclient.com \"Advanced REST Client\"), to test the Zuora REST API. You can test the Zuora REST API from the Zuora API Sandbox or Production tenants. If connecting to Production, bear in mind that you are working with your live production data, not sample data or test data. ## Testing with Credit Cards Sooner or later it will probably be necessary to test some transactions that involve credit cards. For suggestions on how to handle this, see [Going Live With Your Payment Gateway](https://knowledgecenter.zuora.com/CB_Billing/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards \"C_Zuora_User_Guides/A_Billing_and_Payments/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards\" ). ## Concurrent Request Limits Zuora enforces tenant-level concurrent request limits. See <a href=\"https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Policies/Concurrent_Request_Limits\" target=\"_blank\">Concurrent Request Limits</a> for more information. ## Timeout Limit If a request does not complete within 120 seconds, the request times out and Zuora returns a Gateway Timeout error. ## Error Handling Responses and error codes are detailed in [Responses and errors](https://knowledgecenter.zuora.com/DC_Developers/C_REST_API/Responses_and_Errors \"Responses and errors\"). # Pagination When retrieving information (using GET methods), the optional `pageSize` query parameter sets the maximum number of rows to return in a response. The maximum is `40`; larger values are treated as `40`. If this value is empty or invalid, `pageSize` typically defaults to `10`. The default value for the maximum number of rows retrieved can be overridden at the method level. If more rows are available, the response will include a `nextPage` element, which contains a URL for requesting the next page. If this value is not provided, no more rows are available. No \"previous page\" element is explicitly provided; to support backward paging, use the previous call. ## Array Size For data items that are not paginated, the REST API supports arrays of up to 300 rows. Thus, for instance, repeated pagination can retrieve thousands of customer accounts, but within any account an array of no more than 300 rate plans is returned. # API Versions The Zuora REST API are version controlled. Versioning ensures that Zuora REST API changes are backward compatible. Zuora uses a major and minor version nomenclature to manage changes. By specifying a version in a REST request, you can get expected responses regardless of future changes to the API. ## Major Version The major version number of the REST API appears in the REST URL. Currently, Zuora only supports the **v1** major version. For example, `POST https://rest.zuora.com/v1/subscriptions`. ## Minor Version Zuora uses minor versions for the REST API to control small changes. For example, a field in a REST method is deprecated and a new field is used to replace it. Some fields in the REST methods are supported as of minor versions. If a field is not noted with a minor version, this field is available for all minor versions. If a field is noted with a minor version, this field is in version control. You must specify the supported minor version in the request header to process without an error. If a field is in version control, it is either with a minimum minor version or a maximum minor version, or both of them. You can only use this field with the minor version between the minimum and the maximum minor versions. For example, the `invoiceCollect` field in the POST Subscription method is in version control and its maximum minor version is 189.0. You can only use this field with the minor version 189.0 or earlier. If you specify a version number in the request header that is not supported, Zuora will use the minimum minor version of the REST API. In our REST API documentation, if a field or feature requires a minor version number, we note that in the field description. You only need to specify the version number when you use the fields require a minor version. To specify the minor version, set the `zuora-version` parameter to the minor version number in the request header for the request call. For example, the `collect` field is in 196.0 minor version. If you want to use this field for the POST Subscription method, set the `zuora-version` parameter to `196.0` in the request header. The `zuora-version` parameter is case sensitive. For all the REST API fields, by default, if the minor version is not specified in the request header, Zuora will use the minimum minor version of the REST API to avoid breaking your integration. ### Minor Version History The supported minor versions are not serial. This section documents the changes made to each Zuora REST API minor version. The following table lists the supported versions and the fields that have a Zuora REST API minor version. | Fields | Minor Version | REST Methods | Description | |:--------|:--------|:--------|:--------| | invoiceCollect | 189.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice and collects a payment for a subscription. | | collect | 196.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Collects an automatic payment for a subscription. | | invoice | 196.0 and 207.0| [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice for a subscription. | | invoiceTargetDate | 196.0 and earlier | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | invoiceTargetDate | 207.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | includeExisting DraftInvoiceItems | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | includeExisting DraftDocItems | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | previewType | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `InvoiceItem`(default), `ChargeMetrics`, and `InvoiceItemChargeMetrics`. | | previewType | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `LegalDoc`(default), `ChargeMetrics`, and `LegalDocChargeMetrics`. | | runBilling | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice or credit memo for a subscription. **Note:** Credit memos are only available if you have the Invoice Settlement feature enabled. | | invoiceDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice being generated, as `yyyy-mm-dd`. | | invoiceTargetDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice is generated, as `yyyy-mm-dd`. | | documentDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice and credit memo being generated, as `yyyy-mm-dd`. | | targetDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice or a credit memo is generated, as `yyyy-mm-dd`. | | memoItemAmount | 223.0 and earlier | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | amount | 224.0 and later | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | subscriptionNumbers | 222.4 and earlier | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers of the subscriptions in an order. | | subscriptions | 223.0 and later | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers and statuses in an order. | | creditTaxItems | 238.0 and earlier | [Get credit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItems \"Get credit memo items\"); [Get credit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItem \"Get credit memo item\") | Container for the taxation items of the credit memo item. | | taxItems | 238.0 and earlier | [Get debit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItems \"Get debit memo items\"); [Get debit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItem \"Get debit memo item\") | Container for the taxation items of the debit memo item. | | taxationItems | 239.0 and later | [Get credit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItems \"Get credit memo items\"); [Get credit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItem \"Get credit memo item\"); [Get debit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItems \"Get debit memo items\"); [Get debit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItem \"Get debit memo item\") | Container for the taxation items of the memo item. | #### Version 207.0 and Later The response structure of the [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") and [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") methods are changed. The following invoice related response fields are moved to the invoice container: * amount * amountWithoutTax * taxAmount * invoiceItems * targetDate * chargeMetrics # Zuora Object Model The following diagram presents a high-level view of the key Zuora objects. Click the image to open it in a new tab to resize it. <a href=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" target=\"_blank\"><img src=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" alt=\"Zuora Object Model Diagram\"></a> See the following articles for information about other parts of the Zuora business object model: * <a href=\"https://knowledgecenter.zuora.com/CB_Billing/Invoice_Settlement/D_Invoice_Settlement_Object_Model\" target=\"_blank\">Invoice Settlement Object Model</a> * <a href=\"https://knowledgecenter.zuora.com/BC_Subscription_Management/Orders/BA_Orders_Object_Model\" target=\"_blank\">Orders Object Model</a> You can use the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation to list the fields of each Zuora object that is available in your tenant. When you call the operation, you must specify the API name of the Zuora object. The following table provides the API name of each Zuora object: | Object | API Name | |-----------------------------------------------|--------------------------------------------| | Account | `Account` | | Accounting Code | `AccountingCode` | | Accounting Period | `AccountingPeriod` | | Amendment | `Amendment` | | Application Group | `ApplicationGroup` | | Billing Run | <p>`BillingRun`</p><p>**Note:** The API name of this object is `BillingRun` in the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation, Export ZOQL queries, and Data Query. Otherwise, the API name of this object is `BillRun`.</p> | | Contact | `Contact` | | Contact Snapshot | `ContactSnapshot` | | Credit Balance Adjustment | `CreditBalanceAdjustment` | | Credit Memo | `CreditMemo` | | Credit Memo Application | `CreditMemoApplication` | | Credit Memo Application Item | `CreditMemoApplicationItem` | | Credit Memo Item | `CreditMemoItem` | | Credit Memo Part | `CreditMemoPart` | | Credit Memo Part Item | `CreditMemoPartItem` | | Credit Taxation Item | `CreditTaxationItem` | | Custom Exchange Rate | `FXCustomRate` | | Debit Memo | `DebitMemo` | | Debit Memo Item | `DebitMemoItem` | | Debit Taxation Item | `DebitTaxationItem` | | Discount Applied Metrics | `DiscountAppliedMetrics` | | Entity | `Tenant` | | Feature | `Feature` | | Gateway Reconciliation Event | `PaymentGatewayReconciliationEventLog` | | Gateway Reconciliation Job | `PaymentReconciliationJob` | | Gateway Reconciliation Log | `PaymentReconciliationLog` | | Invoice | `Invoice` | | Invoice Adjustment | `InvoiceAdjustment` | | Invoice Item | `InvoiceItem` | | Invoice Item Adjustment | `InvoiceItemAdjustment` | | Invoice Payment | `InvoicePayment` | | Journal Entry | `JournalEntry` | | Journal Entry Item | `JournalEntryItem` | | Journal Run | `JournalRun` | | Order | `Order` | | Order Action | `OrderAction` | | Order ELP | `OrderElp` | | Order Item | `OrderItem` | | Order MRR | `OrderMrr` | | Order Quantity | `OrderQuantity` | | Order TCB | `OrderTcb` | | Order TCV | `OrderTcv` | | Payment | `Payment` | | Payment Application | `PaymentApplication` | | Payment Application Item | `PaymentApplicationItem` | | Payment Method | `PaymentMethod` | | Payment Method Snapshot | `PaymentMethodSnapshot` | | Payment Method Transaction Log | `PaymentMethodTransactionLog` | | Payment Method Update | `UpdaterDetail` | | Payment Part | `PaymentPart` | | Payment Part Item | `PaymentPartItem` | | Payment Run | `PaymentRun` | | Payment Transaction Log | `PaymentTransactionLog` | | Processed Usage | `ProcessedUsage` | | Product | `Product` | | Product Feature | `ProductFeature` | | Product Rate Plan | `ProductRatePlan` | | Product Rate Plan Charge | `ProductRatePlanCharge` | | Product Rate Plan Charge Tier | `ProductRatePlanChargeTier` | | Rate Plan | `RatePlan` | | Rate Plan Charge | `RatePlanCharge` | | Rate Plan Charge Tier | `RatePlanChargeTier` | | Refund | `Refund` | | Refund Application | `RefundApplication` | | Refund Application Item | `RefundApplicationItem` | | Refund Invoice Payment | `RefundInvoicePayment` | | Refund Part | `RefundPart` | | Refund Part Item | `RefundPartItem` | | Refund Transaction Log | `RefundTransactionLog` | | Revenue Charge Summary | `RevenueChargeSummary` | | Revenue Charge Summary Item | `RevenueChargeSummaryItem` | | Revenue Event | `RevenueEvent` | | Revenue Event Credit Memo Item | `RevenueEventCreditMemoItem` | | Revenue Event Debit Memo Item | `RevenueEventDebitMemoItem` | | Revenue Event Invoice Item | `RevenueEventInvoiceItem` | | Revenue Event Invoice Item Adjustment | `RevenueEventInvoiceItemAdjustment` | | Revenue Event Item | `RevenueEventItem` | | Revenue Event Item Credit Memo Item | `RevenueEventItemCreditMemoItem` | | Revenue Event Item Debit Memo Item | `RevenueEventItemDebitMemoItem` | | Revenue Event Item Invoice Item | `RevenueEventItemInvoiceItem` | | Revenue Event Item Invoice Item Adjustment | `RevenueEventItemInvoiceItemAdjustment` | | Revenue Event Type | `RevenueEventType` | | Revenue Schedule | `RevenueSchedule` | | Revenue Schedule Credit Memo Item | `RevenueScheduleCreditMemoItem` | | Revenue Schedule Debit Memo Item | `RevenueScheduleDebitMemoItem` | | Revenue Schedule Invoice Item | `RevenueScheduleInvoiceItem` | | Revenue Schedule Invoice Item Adjustment | `RevenueScheduleInvoiceItemAdjustment` | | Revenue Schedule Item | `RevenueScheduleItem` | | Revenue Schedule Item Credit Memo Item | `RevenueScheduleItemCreditMemoItem` | | Revenue Schedule Item Debit Memo Item | `RevenueScheduleItemDebitMemoItem` | | Revenue Schedule Item Invoice Item | `RevenueScheduleItemInvoiceItem` | | Revenue Schedule Item Invoice Item Adjustment | `RevenueScheduleItemInvoiceItemAdjustment` | | Subscription | `Subscription` | | Subscription Product Feature | `SubscriptionProductFeature` | | Taxable Item Snapshot | `TaxableItemSnapshot` | | Taxation Item | `TaxationItem` | | Updater Batch | `UpdaterBatch` | | Usage | `Usage` | # noqa: E501
OpenAPI spec version: 2019-07-26
Contact: docs@zuora.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import zuora_client
from zuora_client.api.entity_connections_api import EntityConnectionsApi # noqa: E501
from zuora_client.rest import ApiException
class TestEntityConnectionsApi(unittest.TestCase):
"""EntityConnectionsApi unit test stubs"""
def setUp(self):
self.api = zuora_client.api.entity_connections_api.EntityConnectionsApi() # noqa: E501
def tearDown(self):
pass
def test_g_et_entity_connections(self):
"""Test case for g_et_entity_connections
Multi-entity: Get connections # noqa: E501
"""
pass
def test_p_ost_entity_connections(self):
"""Test case for p_ost_entity_connections
Multi-entity: Initiate connection # noqa: E501
"""
pass
def test_p_ut_entity_connections_accept(self):
"""Test case for p_ut_entity_connections_accept
Multi-entity: Accept connection # noqa: E501
"""
pass
def test_p_ut_entity_connections_deny(self):
"""Test case for p_ut_entity_connections_deny
Multi-entity: Deny connection # noqa: E501
"""
pass
def test_p_ut_entity_connections_disconnect(self):
"""Test case for p_ut_entity_connections_disconnect
Multi-entity: Disconnect connection # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"brian.lucas@optimizely.com"
] | brian.lucas@optimizely.com |
cbea139f8853a006a1972d5ee7af4fcd57543eaa | 8be5929368bd987caf744c92f234884bf49d3d42 | /lib/app/reportdatasources/loader.py | 10ddd1e2dc402ed2c6599f4203b634578a5d1d95 | [
"BSD-3-Clause"
] | permissive | oleg-soroka-lt/noc | 9b670d67495f414d78c7080ad75f013ab1bf4dfb | c39422743f52bface39b54d5d915dcd621f83856 | refs/heads/master | 2023-08-21T03:06:32.235570 | 2021-10-20T10:22:02 | 2021-10-20T10:22:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | # ----------------------------------------------------------------------
# ReportDataSource Loader
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import logging
import os
# NOC modules
from noc.core.loader.base import BaseLoader
from .base import ReportDataSource
logger = logging.getLogger(__name__)
BASE_PREFIX = os.path.join("lib", "app", "reportdatasources")
class ReportDataSourceLoader(BaseLoader):
name = "reportdatasource"
base_cls = ReportDataSource
base_path = ("lib", "app", "reportdatasources")
ignored_names = {"base", "loader"}
# Create singleton object
loader = ReportDataSourceLoader()
| [
"aversanta@gmail.com"
] | aversanta@gmail.com |
2b16643e7e21bbfc7d0c383285dbed507442b4ec | 0c66e605e6e4129b09ea14dbb6aa353d18aaa027 | /diventi/accounts/migrations/0230_auto_20190813_0943.py | cad59fc29518cbca8c11ec4722683b2757ed0e2f | [
"Apache-2.0"
] | permissive | flavoi/diventi | 58fbc8c947f387cbcc1ce607878a59a6f2b72313 | c0b1efe2baa3ff816d6ee9a8e86623f297973ded | refs/heads/master | 2023-07-20T09:32:35.897661 | 2023-07-11T19:44:26 | 2023-07-11T19:44:26 | 102,959,477 | 2 | 1 | Apache-2.0 | 2023-02-08T01:03:17 | 2017-09-09T14:10:51 | Python | UTF-8 | Python | false | false | 452 | py | # Generated by Django 2.2.4 on 2019-08-13 07:43
import diventi.accounts.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0229_auto_20190812_1402'),
]
operations = [
migrations.AlterModelManagers(
name='diventiuser',
managers=[
('objects', diventi.accounts.models.DiventiUserManager()),
],
),
]
| [
"flavius476@gmail.com"
] | flavius476@gmail.com |
d89e6017f9e196bd0d483c3a18cb9fb5229046ed | 04683108de1628e0e5286d424296eda7e340f62a | /test/functional/feature_proxy.py | ce5d0fbd4b3014d00e7a3f8f76e222adc4e45ea0 | [
"MIT"
] | permissive | youngseoka/youngseokcoin_dd | 0c52a036c1a724c351676e594f4da71178e2ec59 | ddfa2836901ce0346261118379e116042130d8de | refs/heads/master | 2022-07-11T23:13:14.226723 | 2020-05-14T17:59:07 | 2020-05-14T17:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,440 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Youngseokcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test youngseokcoind with different proxy configuration.
Test plan:
- Start youngseokcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on youngseokcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create youngseokcoinds that connect to them
- Manipulate the youngseokcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import YoungseokcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(YoungseokcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert isinstance(cmd, Socks5Command)
# Note: youngseokcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert isinstance(cmd, Socks5Command)
# Note: youngseokcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("youngseokcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert isinstance(cmd, Socks5Command)
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"youngseokcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert isinstance(cmd, Socks5Command)
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| [
"youngseokaaa@gmail.com"
] | youngseokaaa@gmail.com |
74fa365c1adbad9b80577e126779020c1164fc07 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_085029.80+325447.4/sdB_SDSSJ_085029.80+325447.4_coadd.py | 499208837b41dc63ff2c0793947849f68567605c | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[132.624167,32.913167], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_SDSSJ_085029.80+325447.4/sdB_SDSSJ_085029.80+325447.4_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_SDSSJ_085029.80+325447.4/sdB_SDSSJ_085029.80+325447.4_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
f41e8f81b5f3338158a8eb1869dcfb2e36386ba3 | 871b090c3562669c6ff2fc92508419b680443fad | /Booking_Management/edl_management/migrations/0026_auto_20210622_2153.py | b4a828c53fcaeae9de2eabecc4d009fe221f5d14 | [] | no_license | maxcrup007/CLEM | 4ace1a28be7a0463d8f04a04c0e19d991d7cdb70 | 3a1cb35fce426f1826e8764487ad97c631ff59a9 | refs/heads/master | 2023-06-06T00:13:26.663741 | 2021-06-24T17:12:19 | 2021-06-24T17:12:19 | 379,998,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | # Generated by Django 3.0 on 2021-06-22 14:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('edl_management', '0025_auto_20210622_1351'),
]
operations = [
migrations.AlterField(
model_name='bookequipmentletter',
name='project',
field=models.ForeignKey(help_text='โครงการที่ทำอยู่', null=True, on_delete=django.db.models.deletion.CASCADE, to='edl_management.Project'),
),
]
| [
"36732487+maxcrup007@users.noreply.github.com"
] | 36732487+maxcrup007@users.noreply.github.com |
6fe1d3435b2d13a8d15eb7cc8183c60cff221de0 | bd6ae68d882cc40876b4f2906fa522ef807d1d89 | /tepy/openstack/glance/glance/db/__init__.py | e2120dd34c26f8e392620da9be0ff726eb6de972 | [
"Apache-2.0"
] | permissive | bopopescu/Projects-1 | 7ecc1f472f418b701bc40c8a58bab447e678c68f | 1b7fd99bf1ed96adb7a9486daf098947a9208f77 | refs/heads/master | 2022-11-19T18:31:32.863170 | 2018-02-26T02:09:09 | 2018-02-26T02:09:09 | 281,792,443 | 0 | 0 | null | 2020-07-22T22:06:26 | 2020-07-22T22:06:25 | null | UTF-8 | Python | false | false | 34,009 | py | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import importutils
from wsme.rest import json
from glance.api.v2.model.metadef_property_type import PropertyType
from glance.common import crypt
from glance.common import exception
from glance.common import location_strategy
import glance.domain
import glance.domain.proxy
from glance.i18n import _
CONF = cfg.CONF
CONF.import_opt('image_size_cap', 'glance.common.config')
CONF.import_opt('metadata_encryption_key', 'glance.common.config')
def get_api():
api = importutils.import_module(CONF.data_api)
if hasattr(api, 'configure'):
api.configure()
return api
def unwrap(db_api):
return db_api
# attributes common to all models
BASE_MODEL_ATTRS = set(['id', 'created_at', 'updated_at', 'deleted_at',
'deleted'])
IMAGE_ATTRS = BASE_MODEL_ATTRS | set(['name', 'status', 'size', 'virtual_size',
'disk_format', 'container_format',
'min_disk', 'min_ram', 'is_public',
'locations', 'checksum', 'owner',
'protected'])
class ImageRepo(object):
def __init__(self, context, db_api):
self.context = context
self.db_api = db_api
def get(self, image_id):
try:
db_api_image = dict(self.db_api.image_get(self.context, image_id))
if db_api_image['deleted']:
raise exception.ImageNotFound()
except (exception.ImageNotFound, exception.Forbidden):
msg = _("No image found with ID %s") % image_id
raise exception.ImageNotFound(msg)
tags = self.db_api.image_tag_get_all(self.context, image_id)
image = self._format_image_from_db(db_api_image, tags)
return ImageProxy(image, self.context, self.db_api)
def list(self, marker=None, limit=None, sort_key=None,
sort_dir=None, filters=None, member_status='accepted'):
sort_key = ['created_at'] if not sort_key else sort_key
sort_dir = ['desc'] if not sort_dir else sort_dir
db_api_images = self.db_api.image_get_all(
self.context, filters=filters, marker=marker, limit=limit,
sort_key=sort_key, sort_dir=sort_dir,
member_status=member_status, return_tag=True)
images = []
for db_api_image in db_api_images:
db_image = dict(db_api_image)
image = self._format_image_from_db(db_image, db_image['tags'])
images.append(image)
return images
def _format_image_from_db(self, db_image, db_tags):
properties = {}
for prop in db_image.pop('properties'):
# NOTE(markwash) db api requires us to filter deleted
if not prop['deleted']:
properties[prop['name']] = prop['value']
locations = [loc for loc in db_image['locations']
if loc['status'] == 'active']
if CONF.metadata_encryption_key:
key = CONF.metadata_encryption_key
for l in locations:
l['url'] = crypt.urlsafe_decrypt(key, l['url'])
return glance.domain.Image(
image_id=db_image['id'],
name=db_image['name'],
status=db_image['status'],
created_at=db_image['created_at'],
updated_at=db_image['updated_at'],
visibility=db_image['visibility'],
min_disk=db_image['min_disk'],
min_ram=db_image['min_ram'],
protected=db_image['protected'],
locations=location_strategy.get_ordered_locations(locations),
checksum=db_image['checksum'],
owner=db_image['owner'],
disk_format=db_image['disk_format'],
container_format=db_image['container_format'],
size=db_image['size'],
virtual_size=db_image['virtual_size'],
extra_properties=properties,
tags=db_tags
)
def _format_image_to_db(self, image):
locations = image.locations
if CONF.metadata_encryption_key:
key = CONF.metadata_encryption_key
ld = []
for loc in locations:
url = crypt.urlsafe_encrypt(key, loc['url'])
ld.append({'url': url, 'metadata': loc['metadata'],
'status': loc['status'],
# NOTE(zhiyan): New location has no ID field.
'id': loc.get('id')})
locations = ld
return {
'id': image.image_id,
'name': image.name,
'status': image.status,
'created_at': image.created_at,
'min_disk': image.min_disk,
'min_ram': image.min_ram,
'protected': image.protected,
'locations': locations,
'checksum': image.checksum,
'owner': image.owner,
'disk_format': image.disk_format,
'container_format': image.container_format,
'size': image.size,
'virtual_size': image.virtual_size,
'visibility': image.visibility,
'properties': dict(image.extra_properties),
}
def add(self, image):
image_values = self._format_image_to_db(image)
if (image_values['size'] is not None
and image_values['size'] > CONF.image_size_cap):
raise exception.ImageSizeLimitExceeded
# the updated_at value is not set in the _format_image_to_db
# function since it is specific to image create
image_values['updated_at'] = image.updated_at
new_values = self.db_api.image_create(self.context, image_values)
self.db_api.image_tag_set_all(self.context,
image.image_id, image.tags)
image.created_at = new_values['created_at']
image.updated_at = new_values['updated_at']
def save(self, image, from_state=None):
image_values = self._format_image_to_db(image)
if (image_values['size'] is not None
and image_values['size'] > CONF.image_size_cap):
raise exception.ImageSizeLimitExceeded
try:
new_values = self.db_api.image_update(self.context,
image.image_id,
image_values,
purge_props=True,
from_state=from_state)
except (exception.ImageNotFound, exception.Forbidden):
msg = _("No image found with ID %s") % image.image_id
raise exception.ImageNotFound(msg)
self.db_api.image_tag_set_all(self.context, image.image_id,
image.tags)
image.updated_at = new_values['updated_at']
def remove(self, image):
try:
self.db_api.image_update(self.context, image.image_id,
{'status': image.status},
purge_props=True)
except (exception.ImageNotFound, exception.Forbidden):
msg = _("No image found with ID %s") % image.image_id
raise exception.ImageNotFound(msg)
# NOTE(markwash): don't update tags?
new_values = self.db_api.image_destroy(self.context, image.image_id)
image.updated_at = new_values['updated_at']
class ImageProxy(glance.domain.proxy.Image):
def __init__(self, image, context, db_api):
self.context = context
self.db_api = db_api
self.image = image
super(ImageProxy, self).__init__(image)
class ImageMemberRepo(object):
def __init__(self, context, db_api, image):
self.context = context
self.db_api = db_api
self.image = image
def _format_image_member_from_db(self, db_image_member):
return glance.domain.ImageMembership(
id=db_image_member['id'],
image_id=db_image_member['image_id'],
member_id=db_image_member['member'],
status=db_image_member['status'],
created_at=db_image_member['created_at'],
updated_at=db_image_member['updated_at']
)
def _format_image_member_to_db(self, image_member):
image_member = {'image_id': self.image.image_id,
'member': image_member.member_id,
'status': image_member.status,
'created_at': image_member.created_at}
return image_member
def list(self):
db_members = self.db_api.image_member_find(
self.context, image_id=self.image.image_id)
image_members = []
for db_member in db_members:
image_members.append(self._format_image_member_from_db(db_member))
return image_members
def add(self, image_member):
try:
self.get(image_member.member_id)
except exception.NotFound:
pass
else:
msg = _('The target member %(member_id)s is already '
'associated with image %(image_id)s.') % {
'member_id': image_member.member_id,
'image_id': self.image.image_id}
raise exception.Duplicate(msg)
image_member_values = self._format_image_member_to_db(image_member)
# Note(shalq): find the image member including the member marked with
# deleted. We will use only one record to represent membership between
# the same image and member. The record of the deleted image member
# will be reused, if it exists, update its properties instead of
# creating a new one.
members = self.db_api.image_member_find(self.context,
image_id=self.image.image_id,
member=image_member.member_id,
include_deleted=True)
if members:
new_values = self.db_api.image_member_update(self.context,
members[0]['id'],
image_member_values)
else:
new_values = self.db_api.image_member_create(self.context,
image_member_values)
image_member.created_at = new_values['created_at']
image_member.updated_at = new_values['updated_at']
image_member.id = new_values['id']
def remove(self, image_member):
try:
self.db_api.image_member_delete(self.context, image_member.id)
except (exception.NotFound, exception.Forbidden):
msg = _("The specified member %s could not be found")
raise exception.NotFound(msg % image_member.id)
def save(self, image_member, from_state=None):
image_member_values = self._format_image_member_to_db(image_member)
try:
new_values = self.db_api.image_member_update(self.context,
image_member.id,
image_member_values)
except (exception.NotFound, exception.Forbidden):
raise exception.NotFound()
image_member.updated_at = new_values['updated_at']
def get(self, member_id):
try:
db_api_image_member = self.db_api.image_member_find(
self.context,
self.image.image_id,
member_id)
if not db_api_image_member:
raise exception.NotFound()
except (exception.NotFound, exception.Forbidden):
raise exception.NotFound()
image_member = self._format_image_member_from_db(
db_api_image_member[0])
return image_member
class TaskRepo(object):
def __init__(self, context, db_api):
self.context = context
self.db_api = db_api
def _format_task_from_db(self, db_task):
return glance.domain.Task(
task_id=db_task['id'],
task_type=db_task['type'],
status=db_task['status'],
owner=db_task['owner'],
expires_at=db_task['expires_at'],
created_at=db_task['created_at'],
updated_at=db_task['updated_at'],
task_input=db_task['input'],
result=db_task['result'],
message=db_task['message'],
)
def _format_task_stub_from_db(self, db_task):
return glance.domain.TaskStub(
task_id=db_task['id'],
task_type=db_task['type'],
status=db_task['status'],
owner=db_task['owner'],
expires_at=db_task['expires_at'],
created_at=db_task['created_at'],
updated_at=db_task['updated_at'],
)
def _format_task_to_db(self, task):
task = {'id': task.task_id,
'type': task.type,
'status': task.status,
'input': task.task_input,
'result': task.result,
'owner': task.owner,
'message': task.message,
'expires_at': task.expires_at,
'created_at': task.created_at,
'updated_at': task.updated_at,
}
return task
def get(self, task_id):
try:
db_api_task = self.db_api.task_get(self.context, task_id)
except (exception.NotFound, exception.Forbidden):
msg = _('Could not find task %s') % task_id
raise exception.NotFound(msg)
return self._format_task_from_db(db_api_task)
def list(self, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None):
db_api_tasks = self.db_api.task_get_all(self.context,
filters=filters,
marker=marker,
limit=limit,
sort_key=sort_key,
sort_dir=sort_dir)
return [self._format_task_stub_from_db(task) for task in db_api_tasks]
def save(self, task):
task_values = self._format_task_to_db(task)
try:
updated_values = self.db_api.task_update(self.context,
task.task_id,
task_values)
except (exception.NotFound, exception.Forbidden):
msg = _('Could not find task %s') % task.task_id
raise exception.NotFound(msg)
task.updated_at = updated_values['updated_at']
def add(self, task):
task_values = self._format_task_to_db(task)
updated_values = self.db_api.task_create(self.context, task_values)
task.created_at = updated_values['created_at']
task.updated_at = updated_values['updated_at']
def remove(self, task):
task_values = self._format_task_to_db(task)
try:
self.db_api.task_update(self.context, task.task_id, task_values)
updated_values = self.db_api.task_delete(self.context,
task.task_id)
except (exception.NotFound, exception.Forbidden):
msg = _('Could not find task %s') % task.task_id
raise exception.NotFound(msg)
task.updated_at = updated_values['updated_at']
task.deleted_at = updated_values['deleted_at']
class MetadefNamespaceRepo(object):
def __init__(self, context, db_api):
self.context = context
self.db_api = db_api
def _format_namespace_from_db(self, namespace_obj):
return glance.domain.MetadefNamespace(
namespace_id=namespace_obj['id'],
namespace=namespace_obj['namespace'],
display_name=namespace_obj['display_name'],
description=namespace_obj['description'],
owner=namespace_obj['owner'],
visibility=namespace_obj['visibility'],
protected=namespace_obj['protected'],
created_at=namespace_obj['created_at'],
updated_at=namespace_obj['updated_at']
)
def _format_namespace_to_db(self, namespace_obj):
namespace = {
'namespace': namespace_obj.namespace,
'display_name': namespace_obj.display_name,
'description': namespace_obj.description,
'visibility': namespace_obj.visibility,
'protected': namespace_obj.protected,
'owner': namespace_obj.owner
}
return namespace
def add(self, namespace):
self.db_api.metadef_namespace_create(
self.context,
self._format_namespace_to_db(namespace)
)
def get(self, namespace):
try:
db_api_namespace = self.db_api.metadef_namespace_get(
self.context, namespace)
except (exception.NotFound, exception.Forbidden):
msg = _('Could not find namespace %s') % namespace
raise exception.NotFound(msg)
return self._format_namespace_from_db(db_api_namespace)
def list(self, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None):
db_namespaces = self.db_api.metadef_namespace_get_all(
self.context,
marker=marker,
limit=limit,
sort_key=sort_key,
sort_dir=sort_dir,
filters=filters
)
return [self._format_namespace_from_db(namespace_obj)
for namespace_obj in db_namespaces]
def remove(self, namespace):
try:
self.db_api.metadef_namespace_delete(self.context,
namespace.namespace)
except (exception.NotFound, exception.Forbidden):
msg = _("The specified namespace %s could not be found")
raise exception.NotFound(msg % namespace.namespace)
def remove_objects(self, namespace):
try:
self.db_api.metadef_object_delete_namespace_content(
self.context,
namespace.namespace
)
except (exception.NotFound, exception.Forbidden):
msg = _("The specified namespace %s could not be found")
raise exception.NotFound(msg % namespace.namespace)
def remove_properties(self, namespace):
try:
self.db_api.metadef_property_delete_namespace_content(
self.context,
namespace.namespace
)
except (exception.NotFound, exception.Forbidden):
msg = _("The specified namespace %s could not be found")
raise exception.NotFound(msg % namespace.namespace)
def remove_tags(self, namespace):
try:
self.db_api.metadef_tag_delete_namespace_content(
self.context,
namespace.namespace
)
except (exception.NotFound, exception.Forbidden):
msg = _("The specified namespace %s could not be found")
raise exception.NotFound(msg % namespace.namespace)
def object_count(self, namespace_name):
return self.db_api.metadef_object_count(
self.context,
namespace_name
)
def property_count(self, namespace_name):
return self.db_api.metadef_property_count(
self.context,
namespace_name
)
def save(self, namespace):
try:
self.db_api.metadef_namespace_update(
self.context, namespace.namespace_id,
self._format_namespace_to_db(namespace)
)
except exception.NotFound as e:
raise exception.NotFound(explanation=e.msg)
return namespace
class MetadefObjectRepo(object):
def __init__(self, context, db_api):
self.context = context
self.db_api = db_api
self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api)
def _format_metadef_object_from_db(self, metadata_object,
namespace_entity):
required_str = metadata_object['required']
required_list = required_str.split(",") if required_str else []
# Convert the persisted json schema to a dict of PropertyTypes
property_types = {}
json_props = metadata_object['json_schema']
for id in json_props:
property_types[id] = json.fromjson(PropertyType, json_props[id])
return glance.domain.MetadefObject(
namespace=namespace_entity,
object_id=metadata_object['id'],
name=metadata_object['name'],
required=required_list,
description=metadata_object['description'],
properties=property_types,
created_at=metadata_object['created_at'],
updated_at=metadata_object['updated_at']
)
def _format_metadef_object_to_db(self, metadata_object):
required_str = (",".join(metadata_object.required) if
metadata_object.required else None)
# Convert the model PropertyTypes dict to a JSON string
properties = metadata_object.properties
db_schema = {}
if properties:
for k, v in properties.items():
json_data = json.tojson(PropertyType, v)
db_schema[k] = json_data
db_metadata_object = {
'name': metadata_object.name,
'required': required_str,
'description': metadata_object.description,
'json_schema': db_schema
}
return db_metadata_object
def add(self, metadata_object):
self.db_api.metadef_object_create(
self.context,
metadata_object.namespace,
self._format_metadef_object_to_db(metadata_object)
)
def get(self, namespace, object_name):
try:
namespace_entity = self.meta_namespace_repo.get(namespace)
db_metadata_object = self.db_api.metadef_object_get(
self.context,
namespace,
object_name)
except (exception.NotFound, exception.Forbidden):
msg = _('Could not find metadata object %s') % object_name
raise exception.NotFound(msg)
return self._format_metadef_object_from_db(db_metadata_object,
namespace_entity)
def list(self, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None):
namespace = filters['namespace']
namespace_entity = self.meta_namespace_repo.get(namespace)
db_metadata_objects = self.db_api.metadef_object_get_all(
self.context, namespace)
return [self._format_metadef_object_from_db(metadata_object,
namespace_entity)
for metadata_object in db_metadata_objects]
def remove(self, metadata_object):
try:
self.db_api.metadef_object_delete(
self.context,
metadata_object.namespace.namespace,
metadata_object.name
)
except (exception.NotFound, exception.Forbidden):
msg = _("The specified metadata object %s could not be found")
raise exception.NotFound(msg % metadata_object.name)
def save(self, metadata_object):
try:
self.db_api.metadef_object_update(
self.context, metadata_object.namespace.namespace,
metadata_object.object_id,
self._format_metadef_object_to_db(metadata_object))
except exception.NotFound as e:
raise exception.NotFound(explanation=e.msg)
return metadata_object
class MetadefResourceTypeRepo(object):
def __init__(self, context, db_api):
self.context = context
self.db_api = db_api
self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api)
def _format_resource_type_from_db(self, resource_type, namespace):
return glance.domain.MetadefResourceType(
namespace=namespace,
name=resource_type['name'],
prefix=resource_type['prefix'],
properties_target=resource_type['properties_target'],
created_at=resource_type['created_at'],
updated_at=resource_type['updated_at']
)
def _format_resource_type_to_db(self, resource_type):
db_resource_type = {
'name': resource_type.name,
'prefix': resource_type.prefix,
'properties_target': resource_type.properties_target
}
return db_resource_type
def add(self, resource_type):
self.db_api.metadef_resource_type_association_create(
self.context, resource_type.namespace,
self._format_resource_type_to_db(resource_type)
)
def get(self, resource_type, namespace):
namespace_entity = self.meta_namespace_repo.get(namespace)
db_resource_type = (
self.db_api.
metadef_resource_type_association_get(
self.context,
namespace,
resource_type
)
)
return self._format_resource_type_from_db(db_resource_type,
namespace_entity)
def list(self, filters=None):
namespace = filters['namespace']
if namespace:
namespace_entity = self.meta_namespace_repo.get(namespace)
db_resource_types = (
self.db_api.
metadef_resource_type_association_get_all_by_namespace(
self.context,
namespace
)
)
return [self._format_resource_type_from_db(resource_type,
namespace_entity)
for resource_type in db_resource_types]
else:
db_resource_types = (
self.db_api.
metadef_resource_type_get_all(self.context)
)
return [glance.domain.MetadefResourceType(
namespace=None,
name=resource_type['name'],
prefix=None,
properties_target=None,
created_at=resource_type['created_at'],
updated_at=resource_type['updated_at']
) for resource_type in db_resource_types]
def remove(self, resource_type):
try:
self.db_api.metadef_resource_type_association_delete(
self.context, resource_type.namespace.namespace,
resource_type.name)
except (exception.NotFound, exception.Forbidden):
msg = _("The specified resource type %s could not be found ")
raise exception.NotFound(msg % resource_type.name)
class MetadefPropertyRepo(object):
def __init__(self, context, db_api):
self.context = context
self.db_api = db_api
self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api)
def _format_metadef_property_from_db(
self,
property,
namespace_entity):
return glance.domain.MetadefProperty(
namespace=namespace_entity,
property_id=property['id'],
name=property['name'],
schema=property['json_schema']
)
def _format_metadef_property_to_db(self, property):
db_metadata_object = {
'name': property.name,
'json_schema': property.schema
}
return db_metadata_object
def add(self, property):
self.db_api.metadef_property_create(
self.context,
property.namespace,
self._format_metadef_property_to_db(property)
)
def get(self, namespace, property_name):
try:
namespace_entity = self.meta_namespace_repo.get(namespace)
db_property_type = self.db_api.metadef_property_get(
self.context,
namespace,
property_name
)
except (exception.NotFound, exception.Forbidden):
msg = _('Could not find property %s') % property_name
raise exception.NotFound(msg)
return self._format_metadef_property_from_db(
db_property_type, namespace_entity)
def list(self, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None):
namespace = filters['namespace']
namespace_entity = self.meta_namespace_repo.get(namespace)
db_properties = self.db_api.metadef_property_get_all(
self.context, namespace)
return (
[self._format_metadef_property_from_db(
property, namespace_entity) for property in db_properties]
)
def remove(self, property):
try:
self.db_api.metadef_property_delete(
self.context, property.namespace.namespace, property.name)
except (exception.NotFound, exception.Forbidden):
msg = _("The specified property %s could not be found")
raise exception.NotFound(msg % property.name)
def save(self, property):
try:
self.db_api.metadef_property_update(
self.context, property.namespace.namespace,
property.property_id,
self._format_metadef_property_to_db(property)
)
except exception.NotFound as e:
raise exception.NotFound(explanation=e.msg)
return property
class MetadefTagRepo(object):
def __init__(self, context, db_api):
self.context = context
self.db_api = db_api
self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api)
def _format_metadef_tag_from_db(self, metadata_tag,
namespace_entity):
return glance.domain.MetadefTag(
namespace=namespace_entity,
tag_id=metadata_tag['id'],
name=metadata_tag['name'],
created_at=metadata_tag['created_at'],
updated_at=metadata_tag['updated_at']
)
def _format_metadef_tag_to_db(self, metadata_tag):
db_metadata_tag = {
'name': metadata_tag.name
}
return db_metadata_tag
def add(self, metadata_tag):
self.db_api.metadef_tag_create(
self.context,
metadata_tag.namespace,
self._format_metadef_tag_to_db(metadata_tag)
)
def add_tags(self, metadata_tags):
tag_list = []
namespace = None
for metadata_tag in metadata_tags:
tag_list.append(self._format_metadef_tag_to_db(metadata_tag))
if namespace is None:
namespace = metadata_tag.namespace
self.db_api.metadef_tag_create_tags(
self.context, namespace, tag_list)
def get(self, namespace, name):
try:
namespace_entity = self.meta_namespace_repo.get(namespace)
db_metadata_tag = self.db_api.metadef_tag_get(
self.context,
namespace,
name)
except (exception.NotFound, exception.Forbidden):
msg = _('Could not find metadata tag %s') % name
raise exception.NotFound(msg)
return self._format_metadef_tag_from_db(db_metadata_tag,
namespace_entity)
def list(self, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None):
namespace = filters['namespace']
namespace_entity = self.meta_namespace_repo.get(namespace)
db_metadata_tag = self.db_api.metadef_tag_get_all(
self.context, namespace, filters, marker, limit, sort_key,
sort_dir)
return [self._format_metadef_tag_from_db(metadata_tag,
namespace_entity)
for metadata_tag in db_metadata_tag]
def remove(self, metadata_tag):
try:
self.db_api.metadef_tag_delete(
self.context,
metadata_tag.namespace.namespace,
metadata_tag.name
)
except (exception.NotFound, exception.Forbidden):
msg = _("The specified metadata tag %s could not be found")
raise exception.NotFound(msg % metadata_tag.name)
def save(self, metadata_tag):
try:
self.db_api.metadef_tag_update(
self.context, metadata_tag.namespace.namespace,
metadata_tag.tag_id,
self._format_metadef_tag_to_db(metadata_tag))
except exception.NotFound as e:
raise exception.NotFound(explanation=e.msg)
return metadata_tag
| [
"liujicun01@163.com"
] | liujicun01@163.com |
2f55c363d20f9a90e541fbc60990f6901e6e451d | 87274e9dd7cf48645b3bc6baa5e7572b9fcdcb0b | /ceshi/ceshi/spiders/lagou_spider.py | a471e1cd80f359b586c3576741c0fef3563362f2 | [] | no_license | ixiaoxinxin/xiaoxinxinPAPAPA | 1619652812cd1db4caa84528c814f58e3afa1ce4 | ac97c81f24e725ff5f33160b032e2f99c7f1dfe2 | refs/heads/master | 2021-05-02T01:27:20.706064 | 2017-02-05T14:24:13 | 2017-02-05T14:24:13 | 79,033,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | # -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.contrib.spiders import Rule,CrawlSpider
from scrapy.selector import Selector
class LagouSpider(Spider):
name = "lagou"
allowed_domains = ["lagou.com"]
start_urls = [#这个名称不能写错,否则会识别不到url,d导致爬不到数据
"https://www.lagou.com/zhaopin/ceshigongchengshi/",
# "https://www.lagou.com/zhaopin/zidonghuaceshi/",
# "https://www.lagou.com/zhaopin/gongnengceshi/",
# "https://www.lagou.com/zhaopin/xingnengceshi/",
# "https://www.lagou.com/zhaopin/ceshikaifa/",
# "https://www.lagou.com/zhaopin/heiheceshi/",
# "https://www.lagou.com/zhaopin/shoujiceshi/"
]
def parse(self, response):
#filename = response.url.split("/")[-2]#将最后一个地址作为文件名进行存储
#open(filename,'wb').write(response.body)
sel = Selector(response)
sites = sel.xpath('//*[@id="s_position_list"]/ul')
for site in sites :
title = site.xpath('//*[@id="s_position_list"]/ul/li').extract()
#link = site.xpath('a/@href').extract()
#desc = site.xpath('text()').extract()
#将中文抓取结果从Unicode转换成utf-8
#http://blog.csdn.net/cc7756789w/article/details/46049369
s = str(title).replace('u\'','\'')
print s.decode("unicode-escape")
#print title | [
"295563386@qq.com"
] | 295563386@qq.com |
61d98a99974294a3d48ec28e780ce63457575bf4 | 272a8b0b38e4af5f22dd811040f0ca2b0b111c61 | /exp_scripts/loss_gan_noload3.py | 3d8162cf5e1b818f3e989bbee573ebe95b041078 | [] | no_license | jessemin/GeneGan | 1c1a97b6ab566a7c556ce1452e4c35530b0b626c | 2ad94e842cfaee531d7e13af7472b623bf96de30 | refs/heads/master | 2021-09-13T13:02:33.629138 | 2018-04-30T06:57:13 | 2018-04-30T06:57:13 | 112,046,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | import os
os.chdir('../exp_notebooks')
os.system('python loss_gan.py\
-w=2001\
-save=loss_gan_noload_3\
-sample_num=10000\
-g_weight=0.5\
-mse_weight=0.5\
-g_lr=0.001\
-d_lr=0.001\
--smooth_rate=0.1\
-cuda=1')
| [
"jesikmin@stanford.edu"
] | jesikmin@stanford.edu |
dd881acb19392265ebaf229e8e5e36e1235eb54b | 6527b66fd08d9e7f833973adf421faccd8b765f5 | /yuancloud/addons/hw_escpos/__init__.py | b69f45ed25a74e18525c21c133b53d0f06904456 | [
"MIT"
] | permissive | cash2one/yuancloud | 9a41933514e57167afb70cb5daba7f352673fb4d | 5a4fd72991c846d5cb7c5082f6bdfef5b2bca572 | refs/heads/master | 2021-06-19T22:11:08.260079 | 2017-06-29T06:26:15 | 2017-06-29T06:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | # -*- coding: utf-8 -*-
# Part of YuanCloud. See LICENSE file for full copyright and licensing details.
import controllers
import escpos
| [
"liuganghao@lztogether.com"
] | liuganghao@lztogether.com |
86c561dd371fef267ab970ef768b6ee1bf15dfa5 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_21984.py | d4bc79b9b96d2041bf6af93a6972d7b221585904 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,843 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((541.472, 468.081, 526.313), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((534.285, 504.685, 585.049), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((534.475, 549.831, 653.102), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((651.585, 530.029, 580.799), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((487.693, 652.184, 810.545), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((537.092, 485.931, 568.899), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((537.059, 484.551, 568.004), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((511.366, 472.651, 567.97), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((524.458, 460.941, 590.169), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((550.317, 450.482, 594.362), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((551.634, 430.344, 613.84), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((560.255, 404.426, 607.614), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((526.065, 482.591, 542.821), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((587.709, 325.194, 672.736), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((517.408, 469.655, 795.379), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((517.408, 469.655, 795.379), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((512.378, 464.977, 767.327), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((509.483, 461.965, 738.681), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((509.443, 460.145, 709.858), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((511.895, 462.513, 681.153), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((513.418, 472.682, 654.361), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((512.737, 485.69, 628.776), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((623.239, 358.35, 824.362), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((408.719, 617.7, 430.82), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((494.633, 524.609, 643.664), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((494.633, 524.609, 643.664), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((511.085, 543.632, 629.67), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((531.893, 562.606, 622.692), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((559.099, 560.079, 632.275), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((586.399, 505.311, 523.96), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((541.693, 614.727, 743.925), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((556.134, 511.717, 567.651), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((556.171, 511.738, 567.619), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((542.127, 516.957, 543.489), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((544.351, 489.55, 534.987), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((542.47, 462.147, 542.945), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((538.499, 441.192, 561.584), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((532.39, 423.059, 582.719), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((527.721, 405.445, 604.714), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((469.701, 467.201, 594.455), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((589.394, 343.99, 615.964), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((469.811, 512.591, 585.963), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((494.948, 520.889, 590.272), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((547.785, 540.805, 601.993), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((599.849, 562.16, 613.416), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((604.357, 566.448, 532.654), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((666.213, 587.557, 689.507), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((532.023, 538.244, 489.316), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((531.007, 546.014, 517.079), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((529.318, 552.068, 545.841), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((524.148, 569.479, 570.079), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((519.322, 584.037, 596.64), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((514.925, 595.775, 624.636), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((516.507, 530.963, 581.084), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((510.334, 661.384, 678.1), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
0d8a541cf7e89dc8bfa362e75441297927f3dbdb | 61afe17201589a61c39429602ca11e3fdacf47a9 | /Chapter3/Day23/16.进程间数据的共享_Array.py | c663d6e8677d235e387ba052b83e1d4875630641 | [] | no_license | Liunrestrained/Python- | ec09315c50b395497dd9b0f83219fef6355e9b21 | 6b2cb4ae74c59820c6eabc4b0e98961ef3b941b2 | refs/heads/main | 2023-07-17T14:16:12.084304 | 2021-08-28T14:05:12 | 2021-08-28T14:05:12 | 399,408,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from multiprocessing import Process, Value, Array
def f(data_array):
data_array[0] = 66
if __name__ == '__main__':
arr = Array('i', [11, 22, 33, 44]) # 数组:元素类型必须是int;只能是这么几个数据,不能多,也不能少
p = Process(target=f, args=(arr, ))
p.start()
p.join()
print(arr[:]) # [66, 22, 33, 44]
| [
"noreply@github.com"
] | Liunrestrained.noreply@github.com |
1072aa5aaa01fe24e899ea2b0f8a6e9a903d8087 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/periodicities/Business_Hour/Cycle_Business_Hour_25_BH_12.py | 110c2fc033a09aba57a6284c31765160ac140d2c | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 82 | py | import tests.periodicities.period_test as per
per.buildModel((12 , 'BH' , 25));
| [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
e890e6a1b3efbd2ecb9561220b3fdbe2a0c6c70d | 3e78bf8a64318c091837cab3c1288cafc3617310 | /scripts/Script.py | c3c3b28229d28ae56f80278c459c443cc3f2cd66 | [] | no_license | eddiedb6/rate | e6887a6f8324e79fafa0ccd75a280dcc883a18b7 | 9c54bf949348d74fe8c7e4b0d3a1abd56b936821 | refs/heads/master | 2022-03-10T12:04:53.025473 | 2019-11-06T10:15:42 | 2019-11-06T10:15:42 | 105,227,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | py | import sys
import os
import time
# __file__ will be AFW.py in auto/afw
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], "../../scripts"))
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], "../../util"))
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], "../.."))
from RateConfig import *
from ScriptRateSearch import *
from ScriptRateFetch import *
from DBUtil import *
from DumpUtil import *
# {
# "date": "2016-09-01",
# "currency": 2,
# "rate": 660
# }
rates = []
if ImportPath == "":
rates = FetchExchangeRateForUpdateFromDB()
else:
rates = FetchExchangeRateForUpdateFromFile()
# Open browser
browser = afw.OpenWebBrowser("Browser")
# Fetch each rate from network
for rate in rates:
# Open page
if not browser.OpenURL("URLRate"):
break
time.sleep(SleepSeconds)
if not SearchRate(browser, rate["date"], rate["currency"]):
break
time.sleep(SleepSeconds)
rateString = FetchRate(browser)
if rateString is not None:
rate["rate"] = float(rateString)
browser.Quit()
if ExportPath == "":
UpdateExchangeRateToDB(rates)
else:
UpdateExchangeRateToFile(rates)
# Report
failedRates = []
print("")
for rate in rates:
if "rate" in rate:
print("Get rate: " + str(rate["rate"]) + ", " + rate["date"] + ", " + str(rate["currency"]))
else:
failedRates.append(rate)
print("")
for rate in failedRates:
print("Failed to get rate: " + rate["date"] + ", " + str(rate["currency"]))
| [
"eddiedb6@gmail.com"
] | eddiedb6@gmail.com |
f610ac4877ded41728b8e65579ba101a3dbe0b45 | 7f31d42f80dd93d6f18baed192125991b3645d66 | /examples/scripts/x86/check_constraint1.py | d96398bde35cf6fda16fd4ffcb7f772c978a822b | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | jmorse/barf-project | e183aba9ab99d0b742c320cba8169704441777ed | 7b336f9747216b44773f01a57bafbe4cbdf3c838 | refs/heads/master | 2021-05-08T07:02:07.175186 | 2017-10-17T15:51:47 | 2017-10-17T15:51:47 | 106,693,149 | 0 | 0 | null | 2017-10-12T12:59:04 | 2017-10-12T12:59:03 | null | UTF-8 | Python | false | false | 2,459 | py | #! /usr/bin/env python
from barf import BARF
if __name__ == "__main__":
#
# Open file
#
barf = BARF("../../samples/bin/constraint1.x86")
#
# Check constraint
#
# 80483ed: 55 push ebp
# 80483ee: 89 e5 mov ebp,esp
# 80483f0: 83 ec 10 sub esp,0x10
# 80483f3: 8b 45 f8 mov eax,DWORD PTR [ebp-0x8]
# 80483f6: 8b 55 f4 mov edx,DWORD PTR [ebp-0xc]
# 80483f9: 01 d0 add eax,edx
# 80483fb: 83 c0 05 add eax,0x5
# 80483fe: 89 45 fc mov DWORD PTR [ebp-0x4],eax
# 8048401: 8b 45 fc mov eax,DWORD PTR [ebp-0x4]
# 8048404: c9 leave
# 8048405: c3 ret
print("[+] Adding instructions to the analyzer...")
for addr, asm_instr, reil_instrs in barf.translate(start=0x80483ed, end=0x8048401):
print("0x{0:08x} : {1}".format(addr, asm_instr))
for reil_instr in reil_instrs:
barf.code_analyzer.add_instruction(reil_instr)
print("[+] Adding pre and post conditions to the analyzer...")
ebp = barf.code_analyzer.get_register_expr("ebp", mode="post")
# Preconditions: set range for variable a and b
a = barf.code_analyzer.get_memory_expr(ebp-0x8, 4, mode="pre")
b = barf.code_analyzer.get_memory_expr(ebp-0xc, 4, mode="pre")
for constr in [a >= 2, a <= 100, b >= 2, b <= 100]:
barf.code_analyzer.add_constraint(constr)
# Postconditions: set desired value for the result
c = barf.code_analyzer.get_memory_expr(ebp-0x4, 4, mode="post")
for constr in [c >= 26, c <= 28]:
barf.code_analyzer.add_constraint(constr)
print("[+] Check for satisfiability...")
if barf.code_analyzer.check() == 'sat':
print("[+] Satisfiable! Possible assignments:")
# Get concrete value for expressions
a_val = barf.code_analyzer.get_expr_value(a)
b_val = barf.code_analyzer.get_expr_value(b)
c_val = barf.code_analyzer.get_expr_value(c)
# Print values
print("- a: {0:#010x} ({0})".format(a_val))
print("- b: {0:#010x} ({0})".format(b_val))
print("- c: {0:#010x} ({0})".format(c_val))
assert a_val + b_val + 5 == c_val
else:
print("[-] Unsatisfiable!")
| [
"cnheitman@fundacionsadosky.org.ar"
] | cnheitman@fundacionsadosky.org.ar |
61d05adc137915e86dd216e736831bf0cc5917fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03665/s821294954.py | b2af28f559909e7fcc451a213b7441c4c193ce2c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | def two_int():
N, K = map(int, input().split())
return N,K
def one_int():
return int(input())
def one_str():
return input()
def many_int():
return list(map(int, input().split()))
N, P = two_int()
A=many_int()
even = 0
odd = 0
for a in A:
if a%2==0:
even+=1
else:
odd+=1
import math
def comb(n,r):
return math.factorial(n) // (math.factorial(n-r) * math.factorial(r))
even_count=0
for i in range(even+1):
even_count += comb(even, i)
odd_count=0
if P==1:
for i in range(1, odd+1, 2):
odd_count += comb(odd, i)
elif P==0:
for i in range(2, odd+1, 2):
odd_count += comb(odd, i)
odd_count+=1
if odd_count != 0 and even_count != 0:
print(odd_count * even_count)
else:
print(odd_count) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
912c4c70d6ec6508942c8a1919093486e3750e9c | 436177bf038f9941f67e351796668700ffd1cef2 | /venv/Lib/site-packages/sklearn/datasets/_samples_generator.py | 1d4ba96f5cfd87b62b93b714a4704ecb5533f151 | [] | no_license | python019/matplotlib_simple | 4359d35f174cd2946d96da4d086026661c3d1f9c | 32e9a8e773f9423153d73811f69822f9567e6de4 | refs/heads/main | 2023-08-22T18:17:38.883274 | 2021-10-07T15:55:50 | 2021-10-07T15:55:50 | 380,471,961 | 29 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63,054 | py | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
from collections.abc import Iterable
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.random import sample_without_replacement
from ..utils.validation import _deprecate_positional_args
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions.
"""
if dimensions > 30:
return np.hstack([rng.randint(2, size=(samples, dimensions - 30)),
_generate_hypercube(samples, 30, rng)])
out = sample_without_replacement(2 ** dimensions, samples,
random_state=rng).astype(dtype='>u4',
copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
@_deprecate_positional_args
def make_classification(n_samples=100, n_features=20, *, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of an ``n_informative``-dimensional hypercube with sides of
length ``2*class_sep`` and assigns an equal number of clusters to each
class. It introduces interdependence between these features and adds
various types of further noise to the data.
Without shuffling, ``X`` horizontally stacks features in the following
order: the primary ``n_informative`` features, followed by ``n_redundant``
linear combinations of the informative features, followed by ``n_repeated``
duplicates, drawn randomly with replacement from the informative and
redundant features. The remaining features are filled with random noise.
Thus, without shuffling, all useful features are contained in the columns
``X[:, :n_informative + n_redundant + n_repeated]``.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=20
The total number of features. These comprise ``n_informative``
informative features, ``n_redundant`` redundant features,
``n_repeated`` duplicated features and
``n_features-n_informative-n_redundant-n_repeated`` useless features
drawn at random.
n_informative : int, default=2
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension ``n_informative``. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, default=2
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, default=0
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, default=2
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, default=2
The number of clusters per class.
weights : array-like of shape (n_classes,) or (n_classes - 1,),\
default=None
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if ``len(weights) == n_classes - 1``,
then the last class weight is automatically inferred.
More than ``n_samples`` samples may be returned if the sum of
``weights`` exceeds 1. Note that the actual class proportions will
not exactly match ``weights`` when ``flip_y`` isn't 0.
flip_y : float, default=0.01
The fraction of samples whose class is assigned randomly. Larger
values introduce noise in the labels and make the classification
task harder. Note that the default setting flip_y > 0 might lead
to less than ``n_classes`` in y in some cases.
class_sep : float, default=1.0
The factor multiplying the hypercube size. Larger values spread
out the clusters/classes and make the classification task easier.
hypercube : bool, default=True
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, ndarray of shape (n_features,) or None, default=0.0
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, ndarray of shape (n_features,) or None, default=1.0
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : bool, default=True
Shuffle the samples and the features.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See Also
--------
make_blobs : Simplified variant.
make_multilabel_classification : Unrelated generator for multilabel tasks.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
# Use log2 to avoid overflow errors
if n_informative < np.log2(n_classes * n_clusters_per_class):
msg = "n_classes({}) * n_clusters_per_class({}) must be"
msg += " smaller or equal 2**n_informative({})={}"
raise ValueError(msg.format(n_classes, n_clusters_per_class,
n_informative, 2**n_informative))
if weights is not None:
if len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
if len(weights) == n_classes - 1:
if isinstance(weights, list):
weights = weights + [1.0 - sum(weights)]
else:
weights = np.resize(weights, n_classes)
weights[-1] = 1.0 - sum(weights[:-1])
else:
weights = [1.0 / n_classes] * n_classes
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
# Distribute samples among clusters by weight
n_samples_per_cluster = [
int(n_samples * weights[k % n_classes] / n_clusters_per_class)
for k in range(n_clusters)]
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float, copy=False)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
@_deprecate_positional_args
def make_multilabel_classification(n_samples=100, n_features=20, *,
n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=20
The total number of features.
n_classes : int, default=5
The number of classes of the classification problem.
n_labels : int, default=2
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, default=50
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, default=True
If ``True``, some instances might not belong to any class.
sparse : bool, default=False
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : {'dense', 'sparse'} or False, default='dense'
If ``'dense'`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, default=False
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
The label sets. Sparse matrix should be of CSR format.
p_c : ndarray of shape (n_classes,)
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : ndarray of shape (n_features, n_classes)
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
if n_classes < 1:
raise ValueError(
"'n_classes' should be an integer greater than 0. Got {} instead."
.format(n_classes)
)
if length < 1:
raise ValueError(
"'length' should be an integer greater than 0. Got {} instead."
.format(length)
)
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
@_deprecate_positional_args
def make_hastie_10_2(n_samples=12000, *, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=12000
The number of samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 10)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See Also
--------
make_gaussian_quantiles : A generalization of this dataset approach.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64, copy=False)
y[y == 0.0] = -1.0
return X, y
@_deprecate_positional_args
def make_regression(n_samples=100, n_features=100, *, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=100
The number of features.
n_informative : int, default=10
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, default=1
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, default=0.0
The bias term in the underlying linear model.
effective_rank : int, default=None
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float, default=0.5
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None. When a float, it should be
between 0 and 1.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
shuffle : bool, default=True
Shuffle the samples and the features.
coef : bool, default=False
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
The output values.
coef : ndarray of shape (n_features,) or (n_features, n_targets)
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
@_deprecate_positional_args
def make_circles(n_samples=100, *, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or tuple of shape (2,), dtype=int, default=100
If int, it is the total number of points generated.
For odd numbers, the inner circle will have one point more than the
outer circle.
If two-element tuple, number of points in outer circle and inner
circle.
.. versionchanged:: 0.23
Added two-element tuple.
shuffle : bool, default=True
Whether to shuffle the samples.
noise : float, default=None
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
factor : float, default=.8
Scale factor between inner and outer circle in the range `(0, 1)`.
Returns
-------
X : ndarray of shape (n_samples, 2)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels (0 or 1) for class membership of each sample.
"""
if factor >= 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
if isinstance(n_samples, numbers.Integral):
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
else:
try:
n_samples_out, n_samples_in = n_samples
except ValueError as e:
raise ValueError('`n_samples` can be either an int or '
'a two-element tuple.') from e
generator = check_random_state(random_state)
# so as not to have the first point = last point, we set endpoint=False
linspace_out = np.linspace(0, 2 * np.pi, n_samples_out, endpoint=False)
linspace_in = np.linspace(0, 2 * np.pi, n_samples_in, endpoint=False)
outer_circ_x = np.cos(linspace_out)
outer_circ_y = np.sin(linspace_out)
inner_circ_x = np.cos(linspace_in) * factor
inner_circ_y = np.sin(linspace_in) * factor
X = np.vstack([np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y)]).T
y = np.hstack([np.zeros(n_samples_out, dtype=np.intp),
np.ones(n_samples_in, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
@_deprecate_positional_args
def make_moons(n_samples=100, *, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles.
A simple toy dataset to visualize clustering and classification
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or tuple of shape (2,), dtype=int, default=100
If int, the total number of points generated.
If two-element tuple, number of points in each of two moons.
.. versionchanged:: 0.23
Added two-element tuple.
shuffle : bool, default=True
Whether to shuffle the samples.
noise : float, default=None
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 2)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels (0 or 1) for class membership of each sample.
"""
if isinstance(n_samples, numbers.Integral):
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
else:
try:
n_samples_out, n_samples_in = n_samples
except ValueError as e:
raise ValueError('`n_samples` can be either an int or '
'a two-element tuple.') from e
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack([np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y)]).T
y = np.hstack([np.zeros(n_samples_out, dtype=np.intp),
np.ones(n_samples_in, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
@_deprecate_positional_args
def make_blobs(n_samples=100, n_features=2, *, centers=None, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None,
return_centers=False):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or array-like, default=100
If int, it is the total number of points equally divided among
clusters.
If array-like, each element of the sequence indicates
the number of samples per cluster.
.. versionchanged:: v0.20
one can now pass an array-like to the ``n_samples`` parameter
n_features : int, default=2
The number of features for each sample.
centers : int or ndarray of shape (n_centers, n_features), default=None
The number of centers to generate, or the fixed center locations.
If n_samples is an int and centers is None, 3 centers are generated.
If n_samples is array-like, centers must be
either None or an array of length equal to the length of n_samples.
cluster_std : float or array-like of float, default=1.0
The standard deviation of the clusters.
center_box : tuple of float (min, max), default=(-10.0, 10.0)
The bounding box for each cluster center when centers are
generated at random.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
return_centers : bool, default=False
If True, then return the centers of each cluster
.. versionadded:: 0.23
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for cluster membership of each sample.
centers : ndarray of shape (n_centers, n_features)
The centers of each cluster. Only returned if
``return_centers=True``.
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
>>> X, y = make_blobs(n_samples=[3, 3, 4], centers=None, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 1, 2, 0, 2, 2, 2, 1, 1, 0])
See Also
--------
make_classification : A more intricate variant.
"""
generator = check_random_state(random_state)
if isinstance(n_samples, numbers.Integral):
# Set n_centers by looking at centers arg
if centers is None:
centers = 3
if isinstance(centers, numbers.Integral):
n_centers = centers
centers = generator.uniform(center_box[0], center_box[1],
size=(n_centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
n_centers = centers.shape[0]
else:
# Set n_centers by looking at [n_samples] arg
n_centers = len(n_samples)
if centers is None:
centers = generator.uniform(center_box[0], center_box[1],
size=(n_centers, n_features))
try:
assert len(centers) == n_centers
except TypeError as e:
raise ValueError("Parameter `centers` must be array-like. "
"Got {!r} instead".format(centers)) from e
except AssertionError as e:
raise ValueError(
f"Length of `n_samples` not consistent with number of "
f"centers. Got n_samples = {n_samples} and centers = {centers}"
) from e
else:
centers = check_array(centers)
n_features = centers.shape[1]
# stds: if cluster_std is given as list, it must be consistent
# with the n_centers
if (hasattr(cluster_std, "__len__") and len(cluster_std) != n_centers):
raise ValueError("Length of `clusters_std` not consistent with "
"number of centers. Got centers = {} "
"and cluster_std = {}".format(centers, cluster_std))
if isinstance(cluster_std, numbers.Real):
cluster_std = np.full(len(centers), cluster_std)
X = []
y = []
if isinstance(n_samples, Iterable):
n_samples_per_center = n_samples
else:
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(generator.normal(loc=centers[i], scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
total_n_samples = np.sum(n_samples)
indices = np.arange(total_n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
if return_centers:
return X, y, centers
else:
return X, y
@_deprecate_positional_args
def make_friedman1(n_samples=100, n_features=10, *, noise=0.0,
random_state=None):
"""Generate the "Friedman #1" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=10
The number of features. Should be at least 5.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
@_deprecate_positional_args
def make_friedman2(n_samples=100, *, noise=0.0, random_state=None):
"""Generate the "Friedman #2" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 4)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
@_deprecate_positional_args
def make_friedman3(n_samples=100, *, noise=0.0, random_state=None):
"""Generate the "Friedman #3" regression problem.
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
noise : float, default=0.0
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset noise. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 4)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
@_deprecate_positional_args
def make_low_rank_matrix(n_samples=100, n_features=100, *, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values.
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=100
The number of features.
effective_rank : int, default=10
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float, default=0.5
The relative importance of the fat noisy tail of the singular values
profile. The value should be between 0 and 1.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic',
check_finite=False)
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic',
check_finite=False)
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
@_deprecate_positional_args
def make_sparse_coded_signal(n_samples, *, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
Number of samples to generate
n_components : int
Number of components in the dictionary
n_features : int
Number of features of the dataset to generate
n_nonzero_coefs : int
Number of active (non-zero) coefficients in each sample
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
data : ndarray of shape (n_features, n_samples)
The encoded signal (Y).
dictionary : ndarray of shape (n_features, n_components)
The dictionary with normalized components (D).
code : ndarray of shape (n_components, n_samples)
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
@_deprecate_positional_args
def make_sparse_uncorrelated(n_samples=100, n_features=10, *,
random_state=None):
"""Generate a random regression problem with sparse uncorrelated design.
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=10
The number of features.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The input samples.
y : ndarray of shape (n_samples,)
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
@_deprecate_positional_args
def make_spd_matrix(n_dim, *, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_dim, n_dim)
The random symmetric, positive-definite matrix.
See Also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, _, Vt = linalg.svd(np.dot(A.T, A), check_finite=False)
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), Vt)
return X
@_deprecate_positional_args
def make_sparse_spd_matrix(dim=1, *, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim : int, default=1
The size of the random matrix to generate.
alpha : float, default=0.95
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity. The value should be in the range 0 and 1.
norm_diag : bool, default=False
Whether to normalize the output matrix to make the leading diagonal
elements all 1
smallest_coef : float, default=0.1
The value of the smallest coefficient between 0 and 1.
largest_coef : float, default=0.9
The value of the largest coefficient between 0 and 1.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See Also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
@_deprecate_positional_args
def make_swiss_roll(n_samples=100, *, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of sample points on the S curve.
noise : float, default=0.0
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 3)
The points.
t : ndarray of shape (n_samples,)
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
@_deprecate_positional_args
def make_s_curve(n_samples=100, *, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of sample points on the S curve.
noise : float, default=0.0
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, 3)
The points.
t : ndarray of shape (n_samples,)
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
@_deprecate_positional_args
def make_gaussian_quantiles(*, mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
r"""Generate isotropic Gaussian and label samples by quantile.
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : ndarray of shape (n_features,), default=None
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, default=1.0
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, default=100
The total number of points equally divided among classes.
n_features : int, default=2
The number of features for each sample.
n_classes : int, default=3
The number of classes
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
@_deprecate_positional_args
def make_biclusters(shape, n_clusters, *, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable of shape (n_rows, n_cols)
The shape of the result.
n_clusters : int
The number of biclusters.
noise : float, default=0.0
The standard deviation of the gaussian noise.
minval : int, default=10
Minimum value of a bicluster.
maxval : int, default=100
Maximum value of a bicluster.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape `shape`
The generated array.
rows : ndarray of shape (n_clusters, X.shape[0])
The indicators for cluster membership of each row.
cols : ndarray of shape (n_clusters, X.shape[1])
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See Also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack([row_labels == c for c in range(n_clusters)])
cols = np.vstack([col_labels == c for c in range(n_clusters)])
return result, rows, cols
@_deprecate_positional_args
def make_checkerboard(shape, n_clusters, *, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : tuple of shape (n_rows, n_cols)
The shape of the result.
n_clusters : int or array-like or shape (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, default=0.0
The standard deviation of the gaussian noise.
minval : int, default=10
Minimum value of a bicluster.
maxval : int, default=100
Maximum value of a bicluster.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape `shape`
The generated array.
rows : ndarray of shape (n_clusters, X.shape[0])
The indicators for cluster membership of each row.
cols : ndarray of shape (n_clusters, X.shape[1])
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See Also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack([row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters)])
cols = np.vstack([col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters)])
return result, rows, cols
| [
"82611064+python019@users.noreply.github.com"
] | 82611064+python019@users.noreply.github.com |
868c53f25cc4f5ef1a654a7bb8fa9c5eb7ad3c89 | 141d1fb160fcfb4294d4b0572216033218da702d | /exec -l /bin/zsh/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/dns/v1beta2/dns_v1beta2_client.py | e66c6f897750b41139ac64b7e930be260e2c63b3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | sudocams/tech-club | 1f2d74c4aedde18853c2b4b729ff3ca5908e76a5 | c8540954b11a6fd838427e959e38965a084b2a4c | refs/heads/master | 2021-07-15T03:04:40.397799 | 2020-12-01T20:05:55 | 2020-12-01T20:05:55 | 245,985,795 | 0 | 1 | null | 2021-04-30T21:04:39 | 2020-03-09T08:51:41 | Python | UTF-8 | Python | false | false | 24,714 | py | """Generated client library for dns version v1beta2."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.dns.v1beta2 import dns_v1beta2_messages as messages
class DnsV1beta2(base_api.BaseApiClient):
"""Generated client library for service dns version v1beta2."""
MESSAGES_MODULE = messages
BASE_URL = u'https://dns.googleapis.com/dns/v1beta2/'
MTLS_BASE_URL = u''
_PACKAGE = u'dns'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloud-platform.read-only', u'https://www.googleapis.com/auth/ndev.clouddns.readonly', u'https://www.googleapis.com/auth/ndev.clouddns.readwrite']
_VERSION = u'v1beta2'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'DnsV1beta2'
_URL_VERSION = u'v1beta2'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new dns handle."""
url = url or self.BASE_URL
super(DnsV1beta2, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.changes = self.ChangesService(self)
self.dnsKeys = self.DnsKeysService(self)
self.managedZoneOperations = self.ManagedZoneOperationsService(self)
self.managedZones = self.ManagedZonesService(self)
self.policies = self.PoliciesService(self)
self.projects = self.ProjectsService(self)
self.resourceRecordSets = self.ResourceRecordSetsService(self)
class ChangesService(base_api.BaseApiService):
"""Service class for the changes resource."""
_NAME = u'changes'
def __init__(self, client):
super(DnsV1beta2.ChangesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Create method for the changes service.
Args:
request: (DnsChangesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Change) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dns.changes.create',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/managedZones/{managedZone}/changes',
request_field=u'change',
request_type_name=u'DnsChangesCreateRequest',
response_type_name=u'Change',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Get method for the changes service.
Args:
request: (DnsChangesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Change) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.changes.get',
ordered_params=[u'project', u'managedZone', u'changeId'],
path_params=[u'changeId', u'managedZone', u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/managedZones/{managedZone}/changes/{changeId}',
request_field='',
request_type_name=u'DnsChangesGetRequest',
response_type_name=u'Change',
supports_download=False,
)
def List(self, request, global_params=None):
r"""List method for the changes service.
Args:
request: (DnsChangesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ChangesListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.changes.list',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[u'maxResults', u'pageToken', u'sortBy', u'sortOrder'],
relative_path=u'projects/{project}/managedZones/{managedZone}/changes',
request_field='',
request_type_name=u'DnsChangesListRequest',
response_type_name=u'ChangesListResponse',
supports_download=False,
)
class DnsKeysService(base_api.BaseApiService):
"""Service class for the dnsKeys resource."""
_NAME = u'dnsKeys'
def __init__(self, client):
super(DnsV1beta2.DnsKeysService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Get method for the dnsKeys service.
Args:
request: (DnsDnsKeysGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DnsKey) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.dnsKeys.get',
ordered_params=[u'project', u'managedZone', u'dnsKeyId'],
path_params=[u'dnsKeyId', u'managedZone', u'project'],
query_params=[u'clientOperationId', u'digestType'],
relative_path=u'projects/{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}',
request_field='',
request_type_name=u'DnsDnsKeysGetRequest',
response_type_name=u'DnsKey',
supports_download=False,
)
def List(self, request, global_params=None):
r"""List method for the dnsKeys service.
Args:
request: (DnsDnsKeysListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DnsKeysListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.dnsKeys.list',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[u'digestType', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/managedZones/{managedZone}/dnsKeys',
request_field='',
request_type_name=u'DnsDnsKeysListRequest',
response_type_name=u'DnsKeysListResponse',
supports_download=False,
)
class ManagedZoneOperationsService(base_api.BaseApiService):
"""Service class for the managedZoneOperations resource."""
_NAME = u'managedZoneOperations'
def __init__(self, client):
super(DnsV1beta2.ManagedZoneOperationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Get method for the managedZoneOperations service.
Args:
request: (DnsManagedZoneOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.managedZoneOperations.get',
ordered_params=[u'project', u'managedZone', u'operation'],
path_params=[u'managedZone', u'operation', u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/managedZones/{managedZone}/operations/{operation}',
request_field='',
request_type_name=u'DnsManagedZoneOperationsGetRequest',
response_type_name=u'Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""List method for the managedZoneOperations service.
Args:
request: (DnsManagedZoneOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ManagedZoneOperationsListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.managedZoneOperations.list',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[u'maxResults', u'pageToken', u'sortBy'],
relative_path=u'projects/{project}/managedZones/{managedZone}/operations',
request_field='',
request_type_name=u'DnsManagedZoneOperationsListRequest',
response_type_name=u'ManagedZoneOperationsListResponse',
supports_download=False,
)
class ManagedZonesService(base_api.BaseApiService):
"""Service class for the managedZones resource."""
_NAME = u'managedZones'
def __init__(self, client):
super(DnsV1beta2.ManagedZonesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Create method for the managedZones service.
Args:
request: (DnsManagedZonesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ManagedZone) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dns.managedZones.create',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/managedZones',
request_field=u'managedZone',
request_type_name=u'DnsManagedZonesCreateRequest',
response_type_name=u'ManagedZone',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Delete method for the managedZones service.
Args:
request: (DnsManagedZonesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DnsManagedZonesDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'dns.managedZones.delete',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/managedZones/{managedZone}',
request_field='',
request_type_name=u'DnsManagedZonesDeleteRequest',
response_type_name=u'DnsManagedZonesDeleteResponse',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Get method for the managedZones service.
Args:
request: (DnsManagedZonesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ManagedZone) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.managedZones.get',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/managedZones/{managedZone}',
request_field='',
request_type_name=u'DnsManagedZonesGetRequest',
response_type_name=u'ManagedZone',
supports_download=False,
)
def List(self, request, global_params=None):
r"""List method for the managedZones service.
Args:
request: (DnsManagedZonesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ManagedZonesListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.managedZones.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'dnsName', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/managedZones',
request_field='',
request_type_name=u'DnsManagedZonesListRequest',
response_type_name=u'ManagedZonesListResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Patch method for the managedZones service.
Args:
request: (DnsManagedZonesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'dns.managedZones.patch',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/managedZones/{managedZone}',
request_field=u'managedZoneResource',
request_type_name=u'DnsManagedZonesPatchRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Update method for the managedZones service.
Args:
request: (DnsManagedZonesUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'dns.managedZones.update',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/managedZones/{managedZone}',
request_field=u'managedZoneResource',
request_type_name=u'DnsManagedZonesUpdateRequest',
response_type_name=u'Operation',
supports_download=False,
)
class PoliciesService(base_api.BaseApiService):
"""Service class for the policies resource."""
_NAME = u'policies'
def __init__(self, client):
super(DnsV1beta2.PoliciesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Create method for the policies service.
Args:
request: (DnsPoliciesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'dns.policies.create',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/policies',
request_field=u'policy',
request_type_name=u'DnsPoliciesCreateRequest',
response_type_name=u'Policy',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Delete method for the policies service.
Args:
request: (DnsPoliciesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DnsPoliciesDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'dns.policies.delete',
ordered_params=[u'project', u'policy'],
path_params=[u'policy', u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/policies/{policy}',
request_field='',
request_type_name=u'DnsPoliciesDeleteRequest',
response_type_name=u'DnsPoliciesDeleteResponse',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Get method for the policies service.
Args:
request: (DnsPoliciesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.policies.get',
ordered_params=[u'project', u'policy'],
path_params=[u'policy', u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/policies/{policy}',
request_field='',
request_type_name=u'DnsPoliciesGetRequest',
response_type_name=u'Policy',
supports_download=False,
)
def List(self, request, global_params=None):
r"""List method for the policies service.
Args:
request: (DnsPoliciesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(PoliciesListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.policies.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/policies',
request_field='',
request_type_name=u'DnsPoliciesListRequest',
response_type_name=u'PoliciesListResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Patch method for the policies service.
Args:
request: (DnsPoliciesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(PoliciesPatchResponse) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'dns.policies.patch',
ordered_params=[u'project', u'policy'],
path_params=[u'policy', u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/policies/{policy}',
request_field=u'policyResource',
request_type_name=u'DnsPoliciesPatchRequest',
response_type_name=u'PoliciesPatchResponse',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Update method for the policies service.
Args:
request: (DnsPoliciesUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(PoliciesUpdateResponse) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'dns.policies.update',
ordered_params=[u'project', u'policy'],
path_params=[u'policy', u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}/policies/{policy}',
request_field=u'policyResource',
request_type_name=u'DnsPoliciesUpdateRequest',
response_type_name=u'PoliciesUpdateResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(DnsV1beta2.ProjectsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Get method for the projects service.
Args:
request: (DnsProjectsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Project) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.projects.get',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'clientOperationId'],
relative_path=u'projects/{project}',
request_field='',
request_type_name=u'DnsProjectsGetRequest',
response_type_name=u'Project',
supports_download=False,
)
class ResourceRecordSetsService(base_api.BaseApiService):
"""Service class for the resourceRecordSets resource."""
_NAME = u'resourceRecordSets'
def __init__(self, client):
super(DnsV1beta2.ResourceRecordSetsService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
r"""List method for the resourceRecordSets service.
Args:
request: (DnsResourceRecordSetsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ResourceRecordSetsListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'dns.resourceRecordSets.list',
ordered_params=[u'project', u'managedZone'],
path_params=[u'managedZone', u'project'],
query_params=[u'maxResults', u'name', u'pageToken', u'type'],
relative_path=u'projects/{project}/managedZones/{managedZone}/rrsets',
request_field='',
request_type_name=u'DnsResourceRecordSetsListRequest',
response_type_name=u'ResourceRecordSetsListResponse',
supports_download=False,
)
| [
"yogocamlus@gmail.com"
] | yogocamlus@gmail.com |
de13c796eb2ff43e6f5f5a08ccc5e6911a6a6749 | d2cfe3ef86d89d580f2a9670f9fc9b11c3e424b4 | /rvsml/EvaluateRVSML.py | 67a0ebf4c95d06d1ab04f36c3197a780b026bcac | [
"BSD-3-Clause"
] | permissive | ShuheiKuriki/SentEval_by_RVSML | 4e89ccd20fa6446c1656d76bafbbd79199b36e86 | b2cebb65c755bfec8ebb0a0400f0d69a5725bd6f | refs/heads/master | 2022-04-25T22:01:21.612920 | 2020-04-14T19:07:08 | 2020-04-14T19:07:08 | 254,819,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,015 | py | import numpy as np
import time
from rvsml.NNClassifier import *
from rvsml.RVSML_OT_Learning import *
import logging
from array import array
def EvaluateRVSML_dtw(classnum, dim, trainset, trainsetnum, testset, testsetdata, testsetdatanum, testsetlabel, testsetnum, logname, logfile):
#rankdim = 58
CVAL = 1
logger = logging.getLogger(logname)
logger.setLevel(10)
sh = logging.StreamHandler()
logger.addHandler(sh)
logging.basicConfig(filename=logfile, format="%(message)s", filemode='w')
# addpath('E:/BING/ActionRecognition/FrameWideFeatures/libsvm-3.20/matlab')
delta = 1
lambda1 = 50
lambda2 = 0.1
max_iters = 10
err_limit = 10**(-2)
class Options:
def __init__(self, max_iters, err_limit, lambda1, lambda2, delta):
self.max_iters = max_iters
self.err_limit = err_limit
self.lambda1 = lambda1
self.lambda2 = lambda2
self.delta = delta
options = Options(max_iters,err_limit,lambda1,lambda2,delta)
# testsetlabel = testsetlabel.T
# for name in matlist:
# exec("%s = %s[0]" % (name, name))
# testsetdatanum = testsetdatanum[0]
trainset_m = trainset
# shape=trainset_m[0].shape
# for c in range(classnum):
# for m in range(trainsetnum[c]):
# trainset_m[c][0][m] = trainset[c][0][m] - traindatamean
# testsetdata_m = testsetdata
# for m in range(testsetdatanum):
# testsetdata_m[m] = testsetdata[m] - traindatamean
## RVSML-DTW
logger.info("data load done")
logger.info("DTW start")
templatenum = 4
lambda0 = 0.0005
tic = time.time()
L = RVSML_OT_Learning_dtw(trainset_m, trainsetnum, dim,templatenum,lambda0,options,logname)
RVSML_dtw_time = time.time() - tic
logger.info("DTW learning done")
## classification with the learned metric
traindownset = [0]*classnum
testdownsetdata = [0]*testsetdatanum
for j in range(classnum):
traindownset[j] = [0]*trainsetnum[j]
for m in range(trainsetnum[j]):
traindownset[j][m] = np.dot(trainset[j][m] ,L)
for j in range(testsetdatanum):
testdownsetdata[j] = np.dot(testsetdata[j], L)
RVSML_dtw_macro, RVSML_dtw_micro, RVSML_dtw_acc, dtw_knn_time, dtw_knn_average_time = NNClassifier_dtw(classnum,traindownset,trainsetnum,testdownsetdata,testsetdatanum,testsetlabel,options,logname)
RVSML_dtw_acc_1 = RVSML_dtw_acc[0]
logger.info('Training time of RVSML instantiated by DTW is {:.4f} \n'.format(RVSML_dtw_time))
logger.info('Classification using 1 nearest neighbor classifier with DTW distance:\n')
logger.info('MAP macro is {:.4f}, micro is {:.4f} \n'.format(RVSML_dtw_macro, RVSML_dtw_micro))
logger.info('dtw_knn_time is {:.4f} \n'.format(dtw_knn_time))
logger.info('dtw_knn_average_time is {:.4f} \n'.format(dtw_knn_average_time))
for acc in RVSML_dtw_acc:
logger.info('Accuracy is {:.4f} \n'.format(acc))
return | [
"shukuri.7336.8@gmail.com"
] | shukuri.7336.8@gmail.com |
c62305d533cfdea397699d4dcbbcc47d4d51441f | f63fdd673e5b9881a0395be83e0a6b5ccc7edbdb | /ReedsShepp_planner/drawing_pallet_jack.py | ac856f0c27793f58e4388e1c15825a7020aa8cfb | [] | no_license | karry3775/Motion-Planning-Python | 63ad4bf928c57b9ea6b8c19fceea16dbc2adb783 | 20ed30526bd03380ead8b29357e5bf7eb291ba9b | refs/heads/master | 2023-02-08T18:19:26.644696 | 2020-12-30T22:22:36 | 2020-12-30T22:22:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,109 | py | import matplotlib.pyplot as plt
import numpy as np
import math
def wrapToPi(theta):
return math.atan2(math.sin(theta),math.cos(theta))
def dpj(x,y,theta,steer):
# plotting the center point
wd = 1.5
plt.plot(x,y,'co')
plt.arrow(x,y,math.cos(theta),math.sin(theta),width=0.01,color='orange')
plt.arrow(x,y,math.cos(wrapToPi(theta+steer)),math.sin(wrapToPi(theta+steer)),width=0.01,color='g')
#wheel cordinates
A = np.array([-0.3,0.1,1]).T
B = np.array([0.3,0.1,1]).T
C = np.array([-0.3,-0.1,1]).T
D = np.array([0.3,-0.1,1]).T
#tranform the wheel
T_wheel = np.array([[math.cos(wrapToPi(theta + steer)),-math.sin(wrapToPi(theta + steer)),x],
[math.sin(wrapToPi(theta + steer)),math.cos(wrapToPi(theta + steer)),y],
[0,0,1]])
A = np.matmul(T_wheel,A)
B = np.matmul(T_wheel,B)
C = np.matmul(T_wheel,C)
D = np.matmul(T_wheel,D)
#front cordinates
a = np.array([0.5,-0.5,1]).T
b = np.array([0.5,0.5,1]).T
c = np.array([0,1,1]).T
d = np.array([0,-1,1]).T
e = np.array([-0.5,1,1]).T
f = np.array([-0.5,-1,1]).T
#dotted front
X = np.array([-0.5,0.75,1]).T
Y = np.array([0,0.75,1]).T
Z = np.array([0.25,0.5,1]).T
W = np.array([0.25,-0.5,1]).T
U = np.array([0,-0.75,1]).T
V = np.array([-0.5,-0.75,1]).T
#back support
g = np.array([-1.5,1,1]).T
h = np.array([-1.5,-1,1]).T
i = np.array([-1.5,0.75,1]).T
j = np.array([-1.5,0.25,1]).T
k = np.array([-1.5,-0.25,1]).T
l = np.array([-1.5,-0.75,1]).T
#drawing the pallet_first_ends
m = np.array([-4,0.75,1]).T
n = np.array([-4,0.25,1]).T
o = np.array([-4,-0.25,1]).T
p = np.array([-4,-0.75,1]).T
#drawing the lasts
q = np.array([-4.5,0.75-0.2,1]).T
r = np.array([-4.5,0.25+0.2,1]).T
s = np.array([-4.5,-0.25-0.2,1]).T
t = np.array([-4.5,-0.75+0.2,1]).T
# Tranformations
T = np.array([[math.cos(wrapToPi(theta)),-math.sin(wrapToPi(theta)),x],
[math.sin(wrapToPi(theta)),math.cos(wrapToPi(theta)),y],
[0,0,1]])
#front cordinates
a = np.matmul(T,a)
b = np.matmul(T,b)
c = np.matmul(T,c)
d = np.matmul(T,d)
e = np.matmul(T,e)
f = np.matmul(T,f)
#dotted front
X = np.matmul(T,X)
Y = np.matmul(T,Y)
Z = np.matmul(T,Z)
W = np.matmul(T,W)
U = np.matmul(T,U)
V = np.matmul(T,V)
#back support
g = np.matmul(T,g)
h = np.matmul(T,h)
i = np.matmul(T,i)
j = np.matmul(T,j)
k = np.matmul(T,k)
l = np.matmul(T,l)
#drawing the pallet_first_ends
m = np.matmul(T,m)
n = np.matmul(T,n)
o = np.matmul(T,o)
p = np.matmul(T,p)
#drawing the lasts
q = np.matmul(T,q)
r = np.matmul(T,r)
s = np.matmul(T,s)
t = np.matmul(T,t)
back_center = [(n[0]+o[0])/2,(n[1]+o[1])/2]
#plotting color
plt.fill([r[0],q[0],m[0],i[0],j[0],n[0],r[0]],[r[1],q[1],m[1],i[1],j[1],n[1],r[1]],color='grey')
plt.fill([s[0],o[0],k[0],l[0],p[0],t[0],s[0]],[s[1],o[1],k[1],l[1],p[1],t[1],s[1]],color='grey')
plt.fill([g[0],e[0],f[0],h[0],g[0]],[g[1],e[1],f[1],h[1],g[1]],color='orange')
plt.fill([e[0],c[0],b[0],a[0],d[0],f[0],e[0]],[e[1],c[1],b[1],a[1],d[1],f[1],e[1]],color='orange')
plt.fill([A[0],B[0],D[0],C[0],A[0]],[A[1],B[1],D[1],C[1],A[1]],color='blue')
plt.plot([a[0],b[0]],[a[1],b[1]],'k',linewidth=wd)
plt.plot([a[0],d[0]],[a[1],d[1]],'k',linewidth=wd)
plt.plot([c[0],b[0]],[c[1],b[1]],'k',linewidth=wd)
plt.plot([c[0],e[0]],[c[1],e[1]],'k',linewidth=wd)
plt.plot([d[0],f[0]],[d[1],f[1]],'k',linewidth=wd)
plt.plot([e[0],f[0]],[e[1],f[1]],'k',linewidth=wd)
plt.plot([X[0],Y[0]],[X[1],Y[1]],'g--')
plt.plot([Z[0],Y[0]],[Z[1],Y[1]],'g--')
plt.plot([Z[0],W[0]],[Z[1],W[1]],'g--')
plt.plot([U[0],W[0]],[U[1],W[1]],'g--')
plt.plot([U[0],V[0]],[U[1],V[1]],'g--')
plt.plot([g[0],h[0]],[g[1],h[1]],'k',linewidth=wd)
plt.plot([g[0],e[0]],[g[1],e[1]],'k',linewidth=wd)
plt.plot([h[0],f[0]],[h[1],f[1]],'k',linewidth=wd)
plt.plot([i[0],l[0]],[i[1],l[1]],'k',linewidth=wd)
plt.plot([m[0],i[0]],[m[1],i[1]],'k',linewidth=wd)
plt.plot([n[0],j[0]],[n[1],j[1]],'k',linewidth=wd)
plt.plot([o[0],k[0]],[o[1],k[1]],'k',linewidth=wd)
plt.plot([p[0],l[0]],[p[1],l[1]],'k',linewidth=wd)
plt.plot([m[0],q[0]],[m[1],q[1]],'k',linewidth=wd)
plt.plot([q[0],r[0]],[q[1],r[1]],'k',linewidth=wd)
plt.plot([n[0],r[0]],[n[1],r[1]],'k',linewidth=wd)
plt.plot([o[0],s[0]],[o[1],s[1]],'k',linewidth=wd)
plt.plot([s[0],t[0]],[s[1],t[1]],'k',linewidth=wd)
plt.plot([p[0],t[0]],[p[1],t[1]],'k',linewidth=wd)
plt.plot([A[0],B[0]],[A[1],B[1]],'k')
plt.plot([A[0],C[0]],[A[1],C[1]],'k')
plt.plot([D[0],B[0]],[D[1],B[1]],'k')
plt.plot([D[0],C[0]],[D[1],C[1]],'k')
plt.plot([back_center[0],x],[back_center[1],y],'o--',linewidth=wd)
# plt.axes().set_aspect('equal','datalim')
# plt.show()
x = 1
y = 2
theta = math.pi/6
steer = math.pi/3
# dpj(x,y,theta,steer)
| [
"kartikprakash3775@gmail.com"
] | kartikprakash3775@gmail.com |
f99707e466fb45f4b828045d6ea2f479b3607a9e | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4145/codes/1785_1593.py | bdc0b42949e4321e6639bacb31776578fbdc14d2 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from numpy import*
v = array(eval(input("vetor de notas: ")))
#v=input("vetor de notas: ")
#vv=arange(v,dtype ==float)
i=1
n=0
c=0
d=0
while(i<size(v)):
v[c]= v[c]*i
c=c+1
d=d+i
i=i+1
m =sum(v)/d
#print(round(n/d,2))
print(round(m,2)) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
0ef05a08b6f483dac468d83fbee9b86c0391c9de | 28a7feb26470d0d6743454222b74abadf05af449 | /dcase_util/utils/timer.py | 79247276c5e10e9961884ce814bfae551278dea5 | [
"MIT"
] | permissive | DCASE-REPO/dcase_util | de322c900388de66af3398af294e095e28392beb | a2694b0b9ad4592c9c27c935fb92b0e5751b8ab4 | refs/heads/master | 2023-06-08T08:06:23.525652 | 2022-06-09T22:05:38 | 2022-06-09T22:05:38 | 110,171,294 | 137 | 41 | MIT | 2022-06-09T22:05:39 | 2017-11-09T22:03:27 | Python | UTF-8 | Python | false | false | 1,526 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import datetime
import time
class Timer(object):
"""Timer class"""
def __init__(self):
# Initialize internal properties
self._start = None
self._elapsed = None
def start(self):
"""Start timer
Returns
-------
self
"""
self._elapsed = None
self._start = time.time()
return self
def stop(self):
"""Stop timer
Returns
-------
self
"""
self._elapsed = (time.time() - self._start)
return self._elapsed
def elapsed(self):
"""Return elapsed time in seconds since timer was started
Can be used without stopping the timer
Returns
-------
float
Seconds since timer was started
"""
return time.time() - self._start
def get_string(self, elapsed=None):
"""Get elapsed time in a string format
Parameters
----------
elapsed : float
Elapsed time in seconds
Default value "None"
Returns
-------
str
Time delta between start and stop
"""
if elapsed is None:
elapsed = (time.time() - self._start)
return str(datetime.timedelta(seconds=elapsed))
def __enter__(self):
self.start()
def __exit__(self, type, value, traceback):
self.stop()
| [
"toni.heittola@gmail.com"
] | toni.heittola@gmail.com |
22476f16fbbdba9d4cd70f408ea0265dce33ddf2 | e522dc3b8ae16fb6adf8c679c2fcd61e06979f29 | /example/serial_example_2.py | c0f03b4ff72605d2fc8273faa36f0f388dd3043c | [
"MIT"
] | permissive | amaork/raspi-io | 96e92330555e7700f54633f582efbc7620f8b10b | aaea4532569010a64f3c54036b9db7eb81515d1a | refs/heads/master | 2021-09-17T15:27:43.853195 | 2021-08-27T08:51:24 | 2021-08-27T08:51:24 | 94,192,125 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | #!/usr/bin/env python3.5
from raspi_io import Serial
import raspi_io.utility as utility
if __name__ == "__main__":
cnt = 0
port = Serial(utility.scan_server()[0], '/dev/ttyS0', 115200, timeout=1)
while True:
port.read(1)
| [
"amaork@gmail.com"
] | amaork@gmail.com |
153829a56bfdcb6f0bf9cf971b037d569141c8b2 | a8750439f200e4efc11715df797489f30e9828c6 | /codechef/CFR_383_2_742_A.py | b5af70d5e071990b82d467c9a9062dbe056f81f8 | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py |
# -*- coding: utf-8 -*-
# @Date : 2018-10-02 08:00:37
# @Author : raj lath (oorja.halt@gmail.com)
# @Link : link
# @Version : 1.0.0
from sys import stdin
max_val=int(10e12)
min_val=int(-10e12)
def read_int() : return int(stdin.readline())
def read_ints() : return [int(x) for x in stdin.readline().split()]
def read_str() : return input()
def read_strs() : return [x for x in stdin.readline().split()]
print(pow(1378, read_int(), 10)) | [
"raj.lath@gmail.com"
] | raj.lath@gmail.com |
98039b25a467025b1eb7152af7fecb84cafc6724 | 4626631c5e68a13ed4dde041212da39d344d74d9 | /examples/scripts/define-user-roles.py | 8066ecfe60673ba22a2b7e702350628e5d50c9c2 | [
"MIT"
] | permissive | xod442/python-hpOneView | a1482677e3252dabf1e14f9349c119428331089f | b78fb81cba34992bb84ed3814aae04ce05ef913f | refs/heads/master | 2021-01-18T05:53:42.466348 | 2015-08-11T15:59:16 | 2015-08-11T15:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,513 | py | #!/usr/bin/env python3
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import sys
if sys.version_info < (3, 4):
raise Exception("Must use Python 3.4 or later")
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print("EULA display needed")
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def getrole(sec):
users = sec.get_users()
pprint(users)
def setroles(sec, name, roles):
# Valid Roles:
# 'Read only', 'Full', 'Backup administrator', 'Network administrator'
# 'Server administrator', 'Storage administrator'
print(('Set User: ' + name + ' role: ' + roles))
sec.set_user_roles(name, roles)
def setrole(sec, name, role):
# Valid Roles:
# 'Read only', 'Full', 'Backup administrator', 'Network administrator'
# 'Server administrator', 'Storage administrator'
print(('Set User: ' + name + ' role: ' + role))
sec.set_user_role(name, role)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Define User Roles
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-x', dest='upass', required=False,
help='''
New user password''')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-g', dest='getrole', action='store_true',
help='''
Display the users and exit''')
group.add_argument('-n', dest='name',
help='''
Username to add''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
srv = hpov.servers(con)
net = hpov.networking(con)
sec = hpov.security(con)
sts = hpov.settings(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
if args.getrole:
getrole(sec)
sys.exit()
setrole(sec, args.name, 'Read only')
# setroles(sec, args.name, ['Backup administrator', 'Network administrator'])
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| [
"troy@debdev.org"
] | troy@debdev.org |
1a6c349482a32dd6f302abf1c4292833b754d0c1 | f151d2e8ce0f09069f76a2719fcc4bc106f90e15 | /config.py | afe05ab4494fe3e5491ab130e7960f456b01545f | [] | no_license | Ali-Khakpash/flask-admin | b8d71e85edb644f8f3754ea8bdbcc8f79e0425e3 | f2beab858368dabe5c9f48b2e41ff8ddbca0fdae | refs/heads/master | 2020-12-02T04:10:38.978578 | 2020-06-05T09:47:17 | 2020-06-05T09:47:17 | 230,882,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
#SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SECRET_KEY = os.urandom(24)
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = ''
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = ''
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
} | [
"ali.khakpash@gmail.com"
] | ali.khakpash@gmail.com |
c377866aad88f931fedcf91312e31f92701861f1 | 44e1f22280921216c8ef9acabec761cbe450030a | /src/models/train_funcs.py | 4e4419dcd6969838bb147df62c7aa30da36305f0 | [] | no_license | EstherWMaina/kenya-crop-mask | 69e727874ad2305f6a1fc159061f85f632b4f795 | b51c21e73c296b70ffa79ebd57162d23553e99a1 | refs/heads/master | 2023-06-15T09:50:48.156473 | 2021-06-01T19:04:15 | 2021-06-01T19:04:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | from argparse import Namespace
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
def train_model(model: pl.LightningModule, hparams: Namespace) -> pl.LightningModule:
early_stop_callback = EarlyStopping(
monitor="val_loss", min_delta=0.00, patience=hparams.patience, verbose=True, mode="min",
)
trainer = pl.Trainer(
default_save_path=hparams.data_folder,
max_epochs=hparams.max_epochs,
early_stop_callback=early_stop_callback,
)
trainer.fit(model)
return model
| [
"gabriel.tseng@mail.mcgill.ca"
] | gabriel.tseng@mail.mcgill.ca |
8f99742151ed8f40985dfb4fd6b45b2ef1a0ae0d | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/I_to_M_Gk3_no_pad/pyramid_tight_crop_size256_pad20_jit15/pyr_3s/bce_s001_tv_s0p1_L8/step11_L2345678.py | 621e5a5f3d41ee90f7032450b516ccccb9fcc450 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,834 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
###############################################################################################################################################################################################################
# 按F5執行時, 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~ 才可 import step10_a.py 喔!
code_exe_dir = os.path.dirname(code_exe_path) ### 目前執行 step10_b.py 的 dir
if(os.getcwd() != code_exe_dir): ### 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~
os.chdir(code_exe_dir)
# print("current_path:", os.getcwd())
###############################################################################################################################################################################################################
import Exps_7_v3.I_to_M_Gk3_no_pad.pyramid_tight_crop_size256_pad20_jit15.pyr_0s.bce_s001_tv_s0p1_L8.step10_a as L8_0side
import Exps_7_v3.I_to_M_Gk3_no_pad.pyramid_tight_crop_size256_pad20_jit15.pyr_1s.bce_s001_tv_s0p1_L8.step10_a as L8_1side
import Exps_7_v3.I_to_M_Gk3_no_pad.pyramid_tight_crop_size256_pad20_jit15.pyr_2s.bce_s001_tv_s0p1_L8.step10_a as L8_2side
import step10_a as L8_3side
#################################################################################################################################################################################################################################################################################################################################################################################################
########
# 1side_1
########
ch032_1side_1__23side_all = [
[L8_1side.ch032_1side_1 , L8_3side.empty , ],
[L8_2side.ch032_1side_1__2side_1 , L8_3side.ch032_1side_1__2side_1__3side_1 , ],
]
########
# 1side_2
########
ch032_1side_2__23side_all = [
[L8_1side.ch032_1side_2 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_2__2side_1 , L8_3side.ch032_1side_2__2side_1__3side_1 , L8_3side.empty , ],
[L8_2side.ch032_1side_2__2side_2 , L8_3side.ch032_1side_2__2side_2__3side_1 , L8_3side.ch032_1side_2__2side_2__3side_2 , ],
]
########
# 1side_3
########
ch032_1side_3__23side_all = [
[L8_1side.ch032_1side_3 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_3__2side_1 , L8_3side.ch032_1side_3__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_3__2side_2 , L8_3side.ch032_1side_3__2side_2__3side_1 , L8_3side.ch032_1side_3__2side_2__3side_2 , L8_3side.empty , ],
[L8_2side.ch032_1side_3__2side_3 , L8_3side.ch032_1side_3__2side_3__3side_1 , L8_3side.ch032_1side_3__2side_3__3side_2 , L8_3side.ch032_1side_3__2side_3__3side_3 , ],
]
########
# 1side_4
########
ch032_1side_4__23side_all = [
[L8_1side.ch032_1side_4 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_4__2side_1 , L8_3side.ch032_1side_4__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_4__2side_2 , L8_3side.ch032_1side_4__2side_2__3side_1 , L8_3side.ch032_1side_4__2side_2__3side_2 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_4__2side_3 , L8_3side.ch032_1side_4__2side_3__3side_1 , L8_3side.ch032_1side_4__2side_3__3side_2 , L8_3side.ch032_1side_4__2side_3__3side_3 , L8_3side.empty , ],
[L8_2side.ch032_1side_4__2side_4 , L8_3side.ch032_1side_4__2side_4__3side_1 , L8_3side.ch032_1side_4__2side_4__3side_2 , L8_3side.ch032_1side_4__2side_4__3side_3 , L8_3side.ch032_1side_4__2side_4__3side_4 , ],
]
########
# 1side_5
########
ch032_1side_5__23side_all = [
[L8_1side.ch032_1side_5 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_5__2side_1 , L8_3side.ch032_1side_5__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_5__2side_2 , L8_3side.ch032_1side_5__2side_2__3side_1 , L8_3side.ch032_1side_5__2side_2__3side_2 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_5__2side_3 , L8_3side.ch032_1side_5__2side_3__3side_1 , L8_3side.ch032_1side_5__2side_3__3side_2 , L8_3side.ch032_1side_5__2side_3__3side_3 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_5__2side_4 , L8_3side.ch032_1side_5__2side_4__3side_1 , L8_3side.ch032_1side_5__2side_4__3side_2 , L8_3side.ch032_1side_5__2side_4__3side_3 , L8_3side.ch032_1side_5__2side_4__3side_4 , L8_3side.empty , ],
[L8_2side.ch032_1side_5__2side_5 , L8_3side.ch032_1side_5__2side_5__3side_1 , L8_3side.ch032_1side_5__2side_5__3side_2 , L8_3side.ch032_1side_5__2side_5__3side_3 , L8_3side.ch032_1side_5__2side_5__3side_4 , L8_3side.ch032_1side_5__2side_5__3side_5 , ],
]
########
# 1side_6
########
ch032_1side_6__23side_all = [
[L8_1side.ch032_1side_6 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_6__2side_1 , L8_3side.ch032_1side_6__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_6__2side_2 , L8_3side.ch032_1side_6__2side_2__3side_1 , L8_3side.ch032_1side_6__2side_2__3side_2 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_6__2side_3 , L8_3side.ch032_1side_6__2side_3__3side_1 , L8_3side.ch032_1side_6__2side_3__3side_2 , L8_3side.ch032_1side_6__2side_3__3side_3 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_6__2side_4 , L8_3side.ch032_1side_6__2side_4__3side_1 , L8_3side.ch032_1side_6__2side_4__3side_2 , L8_3side.ch032_1side_6__2side_4__3side_3 , L8_3side.ch032_1side_6__2side_4__3side_4 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_6__2side_5 , L8_3side.ch032_1side_6__2side_5__3side_1 , L8_3side.ch032_1side_6__2side_5__3side_2 , L8_3side.ch032_1side_6__2side_5__3side_3 , L8_3side.ch032_1side_6__2side_5__3side_4 , L8_3side.ch032_1side_6__2side_5__3side_5 , L8_3side.empty , ],
[L8_2side.ch032_1side_6__2side_6 , L8_3side.ch032_1side_6__2side_6__3side_1 , L8_3side.ch032_1side_6__2side_6__3side_2 , L8_3side.ch032_1side_6__2side_6__3side_3 , L8_3side.ch032_1side_6__2side_6__3side_4 , L8_3side.ch032_1side_6__2side_6__3side_5 , L8_3side.ch032_1side_6__2side_6__3side_6 , ],
]
########
# 1side_7
########
ch032_1side_7__23side_all = [
[L8_1side.ch032_1side_7 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_1 , L8_3side.ch032_1side_7__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_2 , L8_3side.ch032_1side_7__2side_2__3side_1 , L8_3side.ch032_1side_7__2side_2__3side_2 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_3 , L8_3side.ch032_1side_7__2side_3__3side_1 , L8_3side.ch032_1side_7__2side_3__3side_2 , L8_3side.ch032_1side_7__2side_3__3side_3 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_4 , L8_3side.ch032_1side_7__2side_4__3side_1 , L8_3side.ch032_1side_7__2side_4__3side_2 , L8_3side.ch032_1side_7__2side_4__3side_3 , L8_3side.ch032_1side_7__2side_4__3side_4 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_5 , L8_3side.ch032_1side_7__2side_5__3side_1 , L8_3side.ch032_1side_7__2side_5__3side_2 , L8_3side.ch032_1side_7__2side_5__3side_3 , L8_3side.ch032_1side_7__2side_5__3side_4 , L8_3side.ch032_1side_7__2side_5__3side_5 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_6 , L8_3side.ch032_1side_7__2side_6__3side_1 , L8_3side.ch032_1side_7__2side_6__3side_2 , L8_3side.ch032_1side_7__2side_6__3side_3 , L8_3side.ch032_1side_7__2side_6__3side_4 , L8_3side.ch032_1side_7__2side_6__3side_5 , L8_3side.ch032_1side_7__2side_6__3side_6 , L8_3side.empty , ],
[L8_2side.ch032_1side_7__2side_7 , L8_3side.ch032_1side_7__2side_7__3side_1 , L8_3side.ch032_1side_7__2side_7__3side_2 , L8_3side.ch032_1side_7__2side_7__3side_3 , L8_3side.ch032_1side_7__2side_7__3side_4 , L8_3side.ch032_1side_7__2side_7__3side_5 , L8_3side.ch032_1side_7__2side_7__3side_6 , L8_3side.ch032_1side_7__2side_7__3side_7 , ],
]
########
# 1side_8
########
ch032_1side_8__23side_all = [
[L8_1side.ch032_1side_8 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_1 , L8_3side.ch032_1side_8__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_2 , L8_3side.ch032_1side_8__2side_2__3side_1 , L8_3side.ch032_1side_8__2side_2__3side_2 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_3 , L8_3side.ch032_1side_8__2side_3__3side_1 , L8_3side.ch032_1side_8__2side_3__3side_2 , L8_3side.ch032_1side_8__2side_3__3side_3 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_4 , L8_3side.ch032_1side_8__2side_4__3side_1 , L8_3side.ch032_1side_8__2side_4__3side_2 , L8_3side.ch032_1side_8__2side_4__3side_3 , L8_3side.ch032_1side_8__2side_4__3side_4 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_5 , L8_3side.ch032_1side_8__2side_5__3side_1 , L8_3side.ch032_1side_8__2side_5__3side_2 , L8_3side.ch032_1side_8__2side_5__3side_3 , L8_3side.ch032_1side_8__2side_5__3side_4 , L8_3side.ch032_1side_8__2side_5__3side_5 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_6 , L8_3side.ch032_1side_8__2side_6__3side_1 , L8_3side.ch032_1side_8__2side_6__3side_2 , L8_3side.ch032_1side_8__2side_6__3side_3 , L8_3side.ch032_1side_8__2side_6__3side_4 , L8_3side.ch032_1side_8__2side_6__3side_5 , L8_3side.ch032_1side_8__2side_6__3side_6 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_7 , L8_3side.ch032_1side_8__2side_7__3side_1 , L8_3side.ch032_1side_8__2side_7__3side_2 , L8_3side.ch032_1side_8__2side_7__3side_3 , L8_3side.ch032_1side_8__2side_7__3side_4 , L8_3side.ch032_1side_8__2side_7__3side_5 , L8_3side.ch032_1side_8__2side_7__3side_6 , L8_3side.ch032_1side_8__2side_7__3side_7 , L8_3side.empty , ],
[L8_2side.ch032_1side_8__2side_8 , L8_3side.ch032_1side_8__2side_8__3side_1 , L8_3side.ch032_1side_8__2side_8__3side_2 , L8_3side.ch032_1side_8__2side_8__3side_3 , L8_3side.ch032_1side_8__2side_8__3side_4 , L8_3side.ch032_1side_8__2side_8__3side_5 , L8_3side.ch032_1side_8__2side_8__3side_6 , L8_3side.ch032_1side_8__2side_8__3side_7 , L8_3side.ch032_1side_8__2side_8__3side_8 , ],
]
########
# 1side_9
########
ch032_1side_9__23side_all = [
[L8_1side.ch032_1side_9 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_1 , L8_3side.ch032_1side_9__2side_1__3side_1 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_2 , L8_3side.ch032_1side_9__2side_2__3side_1 , L8_3side.ch032_1side_9__2side_2__3side_2 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_3 , L8_3side.ch032_1side_9__2side_3__3side_1 , L8_3side.ch032_1side_9__2side_3__3side_2 , L8_3side.ch032_1side_9__2side_3__3side_3 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_4 , L8_3side.ch032_1side_9__2side_4__3side_1 , L8_3side.ch032_1side_9__2side_4__3side_2 , L8_3side.ch032_1side_9__2side_4__3side_3 , L8_3side.ch032_1side_9__2side_4__3side_4 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_5 , L8_3side.ch032_1side_9__2side_5__3side_1 , L8_3side.ch032_1side_9__2side_5__3side_2 , L8_3side.ch032_1side_9__2side_5__3side_3 , L8_3side.ch032_1side_9__2side_5__3side_4 , L8_3side.ch032_1side_9__2side_5__3side_5 , L8_3side.empty , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_6 , L8_3side.ch032_1side_9__2side_6__3side_1 , L8_3side.ch032_1side_9__2side_6__3side_2 , L8_3side.ch032_1side_9__2side_6__3side_3 , L8_3side.ch032_1side_9__2side_6__3side_4 , L8_3side.ch032_1side_9__2side_6__3side_5 , L8_3side.ch032_1side_9__2side_6__3side_6 , L8_3side.empty , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_7 , L8_3side.ch032_1side_9__2side_7__3side_1 , L8_3side.ch032_1side_9__2side_7__3side_2 , L8_3side.ch032_1side_9__2side_7__3side_3 , L8_3side.ch032_1side_9__2side_7__3side_4 , L8_3side.ch032_1side_9__2side_7__3side_5 , L8_3side.ch032_1side_9__2side_7__3side_6 , L8_3side.ch032_1side_9__2side_7__3side_7 , L8_3side.empty , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_8 , L8_3side.ch032_1side_9__2side_8__3side_1 , L8_3side.ch032_1side_9__2side_8__3side_2 , L8_3side.ch032_1side_9__2side_8__3side_3 , L8_3side.ch032_1side_9__2side_8__3side_4 , L8_3side.ch032_1side_9__2side_8__3side_5 , L8_3side.ch032_1side_9__2side_8__3side_6 , L8_3side.ch032_1side_9__2side_8__3side_7 , L8_3side.ch032_1side_9__2side_8__3side_8 , L8_3side.empty , ],
[L8_2side.ch032_1side_9__2side_9 , L8_3side.ch032_1side_9__2side_9__3side_1 , L8_3side.ch032_1side_9__2side_9__3side_2 , L8_3side.ch032_1side_9__2side_9__3side_3 , L8_3side.ch032_1side_9__2side_9__3side_4 , L8_3side.ch032_1side_9__2side_9__3side_5 , L8_3side.ch032_1side_9__2side_9__3side_6 , L8_3side.ch032_1side_9__2side_9__3side_7 , L8_3side.ch032_1side_9__2side_9__3side_8 , L8_3side.ch032_1side_9__2side_9__3side_9 , ],
]
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
f09b8f651cefa953c0742615d4f262d8e9c98712 | e4ab984c6d27167849f6c6e2d8ced3c0ee167c7c | /Edabit/Say_Hello_to_Guests.py | f28b89fb446b0024e359066dc21b26dc4ffc8239 | [] | no_license | ravalrupalj/BrainTeasers | b3bc2a528edf05ef20291367f538cf214c832bf9 | c3a48453dda29fe016ff89f21f8ee8d0970a3cf3 | refs/heads/master | 2023-02-10T02:09:59.443901 | 2021-01-06T02:03:34 | 2021-01-06T02:03:34 | 255,720,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | #Say Hello to Guests
#In this exercise you will have to:
#Take a list of names.
#Add "Hello" to every name.
#Make one big string with all greetings.
#The solution should be one string with a comma in between every "Hello (Name)".
#Each greeting has to be separated with a comma and a space.
#If you're given an empty list [], return an empty string "".
def greet_people(names):
r=[]
for i in names:
t=('Hello '+i)
r.append(t)
return ', '.join(r)
print(greet_people(["Joe"]))
#➞ "Hello Joe"
print(greet_people(["Angela", "Joe"]) )
#➞ "Hello Angela, Hello Joe"
print(greet_people(["Frank", "Angela", "Joe"]) )
#➞ "Hello Frank, Hello Angela, Hello Joe"
| [
"63676082+ravalrupalj@users.noreply.github.com"
] | 63676082+ravalrupalj@users.noreply.github.com |
db3ccdd045b929d6f0651b9eddf8263751f07e22 | 2b8f1b067a6602a6520e9846a2df8b83a359623a | /BOJ/단계별로 풀어보기/18_ 그리디 알고리즘/11047.py | 261066b1e4355ae315db35c339efe4c72692125d | [] | no_license | ymink716/PS | 3f9df821a1d4db110cd9d56b09b4c1d756951dd8 | e997ecf5a3bec1d840486b8d90b934ae1cbafe94 | refs/heads/master | 2023-08-18T18:21:45.416083 | 2023-08-16T07:26:18 | 2023-08-16T07:26:18 | 218,685,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | n, k = map(int, input().split())
coins = [int(input()) for _ in range(n)]
count = 0
for i in range(n-1, -1, -1):
if k >= coins[i]:
count += (k // coins[i])
k = k % coins[i]
print(count) | [
"ymink716@gmail.com"
] | ymink716@gmail.com |
f6c6735da757355ba46dec2b33f48b3df3694037 | 45129489b5556a70d3caa6020b0f035de8019a94 | /probn/01.04/27.py | 2b62b314c4fb4d838ef574020e8bdfce27aa20b8 | [] | no_license | trofik00777/EgeInformatics | 83d853b1e8fd1d1a11a9d1f06d809f31e6f986c0 | 60f2587a08d49ff696f50b68fe790e213c710b10 | refs/heads/main | 2023-06-06T13:27:34.627915 | 2021-06-23T20:15:28 | 2021-06-23T20:15:28 | 362,217,172 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | n = int(input())
h1, h2, h_a = 0, 0, 0
is_ch_nech = 0
diff = []
for _ in range(n):
a, b, c = sorted(map(int, input().split()))
h1 += a
h2 += b
h_a += c
if (a + b) % 2:
is_ch_nech += 1
ac, bc = c - a, c - b
k = []
if ac % 2:
k.append(ac)
if bc % 2:
k.append(bc)
if k:
diff.append(min(k))
diff.sort()
if h1 % 2 == 0 and h2 % 2 == 0:
print("cool")
print(h_a)
else:
if (h1 + h2) % 2 == 0:
if is_ch_nech > 1:
print("chnch")
print(h_a)
else:
print("not chnch")
print(h_a - diff[0] - diff[1])
else:
print("bad")
| [
"noreply@github.com"
] | trofik00777.noreply@github.com |
bcadedacfc6c370a4221aaeaa5438df25888438c | 13c2639490aa8cc3ecf891ae3422f0e105dd886e | /api/apps.py | c3118076780bbe0eedb6d061b647017eb6d9fecb | [] | no_license | maratovision/rest_api | d32fdfc8d5d8968d2c8ef77aaed05b25d6fa26a0 | b734f3cf1c626f4043dbaa0fa7a6f41ebf9cdcae | refs/heads/main | 2023-04-08T12:54:40.736337 | 2021-04-08T20:24:15 | 2021-04-08T20:24:15 | 356,038,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | from django.apps import AppConfig
class RestConfig(AppConfig):
name = 'api'
| [
"maratovision@gmail.com"
] | maratovision@gmail.com |
f496c9ca6c2d179194b41af953114afe4dbcd9df | 32aa7d3f9a90bafaf0ff89d01fe71e970cbe64a6 | /pytext/torchscript/tensorizer/roberta.py | 7a72df0d120cb39d6dced64be2d96f6fddf2a497 | [
"BSD-3-Clause"
] | permissive | HarounH/pytext | 67135563939723c47076de3c4d213549ba6246b6 | 6dd8fccac0b366782b5319af6236d2d337a25b42 | refs/heads/master | 2020-09-12T21:34:35.749162 | 2019-11-18T23:07:53 | 2019-11-18T23:12:13 | 222,563,726 | 0 | 0 | NOASSERTION | 2019-11-18T23:17:42 | 2019-11-18T23:17:41 | null | UTF-8 | Python | false | false | 3,733 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List, Optional, Tuple
import torch
from pytext.torchscript.utils import pad_2d, pad_2d_mask
from .bert import ScriptBERTTensorizerBase
class ScriptRoBERTaTensorizer(ScriptBERTTensorizerBase):
@torch.jit.script_method
def _lookup_tokens(self, tokens: List[Tuple[str, int, int]]) -> List[int]:
return self.vocab_lookup(
tokens,
bos_idx=self.vocab.bos_idx,
eos_idx=self.vocab.eos_idx,
use_eos_token_for_bos=False,
max_seq_len=self.max_seq_len,
)[0]
class ScriptRoBERTaTensorizerWithIndices(ScriptBERTTensorizerBase):
@torch.jit.script_method
def _lookup_tokens(
self, tokens: List[Tuple[str, int, int]]
) -> Tuple[List[int], List[int], List[int]]:
return self.vocab_lookup(
tokens,
bos_idx=self.vocab.bos_idx,
eos_idx=self.vocab.eos_idx,
use_eos_token_for_bos=False,
max_seq_len=self.max_seq_len,
)
@torch.jit.script_method
def numberize(
self, text_row: Optional[List[str]], token_row: Optional[List[List[str]]]
) -> Tuple[List[int], int, List[int], List[int], List[int]]:
token_ids: List[int] = []
seq_len: int = 0
start_indices: List[int] = []
end_indices: List[int] = []
positions: List[int] = []
per_sentence_tokens: List[List[Tuple[str, int, int]]] = self.tokenize(
text_row, token_row
)
for idx, per_sentence_token in enumerate(per_sentence_tokens):
lookup_ids, start_ids, end_ids = self._lookup_tokens(per_sentence_token)
lookup_ids = self._wrap_numberized_tokens(lookup_ids, idx)
token_ids.extend(lookup_ids)
start_indices.extend(start_ids)
end_indices.extend(end_ids)
seq_len = len(token_ids)
positions = [i for i in range(seq_len)]
return token_ids, seq_len, start_indices, end_indices, positions
@torch.jit.script_method
def tensorize(
self,
texts: Optional[List[List[str]]] = None,
tokens: Optional[List[List[List[str]]]] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
tokens_2d: List[List[int]] = []
seq_len_2d: List[int] = []
start_indices_2d: List[List[int]] = []
end_indices_2d: List[List[int]] = []
positions_2d: List[List[int]] = []
for idx in range(self.batch_size(texts, tokens)):
numberized: Tuple[
List[int], int, List[int], List[int], List[int]
] = self.numberize(
self.get_texts_by_index(texts, idx),
self.get_tokens_by_index(tokens, idx),
)
tokens_2d.append(numberized[0])
seq_len_2d.append(numberized[1])
start_indices_2d.append(numberized[2])
end_indices_2d.append(numberized[3])
positions_2d.append(numberized[4])
tokens, pad_mask = pad_2d_mask(tokens_2d, pad_value=self.vocab.pad_idx)
start_indices = torch.tensor(
pad_2d(start_indices_2d, seq_lens=seq_len_2d, pad_idx=self.vocab.pad_idx),
dtype=torch.long,
)
end_indices = torch.tensor(
pad_2d(end_indices_2d, seq_lens=seq_len_2d, pad_idx=self.vocab.pad_idx),
dtype=torch.long,
)
positions = torch.tensor(
pad_2d(positions_2d, seq_lens=seq_len_2d, pad_idx=self.vocab.pad_idx),
dtype=torch.long,
)
return tokens, pad_mask, start_indices, end_indices, positions
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
e7c56cdf9130241750530b46da969e1b3999db5a | 01b7728c138818a43a967b1129c7cf328d1620c2 | /built-in/tickets/create_ticket.py | 2136677d04b3cce6bf50f1f074970c639d8e3b94 | [] | no_license | lliurex/n4d | 62d303947bc4ae0ff20cb1f2217e532ef17091a5 | 09e5f15acbb1ce584a074dc7540959258c165f66 | refs/heads/master | 2023-08-31T23:34:04.859965 | 2023-04-14T11:17:12 | 2023-04-14T11:17:12 | 133,468,639 | 0 | 1 | null | 2023-02-16T08:57:32 | 2018-05-15T06:22:13 | Python | UTF-8 | Python | false | false | 344 | py | import n4d.responses
def create_ticket(self,user):
ret=self.tickets_manager.create_ticket(user)
if ret:
return n4d.responses.build_successful_call_response(True,"Ticket created for user %s"%user)
else:
CREATE_TICKET_ERROR=-5
return n4d.responses.build_failed_call_response(CREATE_TICKET_ERROR,"Failed to create ticket")
#def test
| [
"hectorgh@gmail.com"
] | hectorgh@gmail.com |
3a67c44f85c9bc3d0ad12206c20f27c4d045cce5 | 8e123e4d2859cba2ad62ddaa58c7998e8a984e28 | /dojo_survey/urls.py | 6d7962f50d01d48a6beb2d6b249cac64d336a4b3 | [] | no_license | Ktlim83/Sample_Form_Django | c8bc94b114c766222a2d7ce3ff1374705ba79631 | 24f71f1318a5325b81a2a00d6f0c34b3af7b1afd | refs/heads/master | 2022-10-24T15:25:05.932983 | 2020-06-17T00:31:24 | 2020-06-17T00:31:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | """dojo_survey URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
urlpatterns = [
path('', include ('dojo_form.urls')),
] | [
"63866293+Kweef206@users.noreply.github.com"
] | 63866293+Kweef206@users.noreply.github.com |
c2a1ec30d6c85e49248724ac2e7f709f702411e3 | 9df267613fe858c7f8dac85255fdf1f592c8bdf9 | /image/image-open.py | 97f958ba4fadfede8820597af1b5e029d31ce44f | [
"MIT"
] | permissive | martinmcbride/python-imaging-book-examples | 6ad44067dd20919ff6e8af0f482bc245a8338756 | 37e4ccf9b7b2fc3ff75b1fdb9f772de452a843b2 | refs/heads/main | 2023-07-17T12:01:19.851394 | 2021-08-22T20:49:46 | 2021-08-22T20:49:46 | 365,778,031 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | # Author: Martin McBride
# Created: 2021-07-17
# Copyright (C) 2021, Martin McBride
# License: MIT
# Open an image
from PIL import Image
# Open an image of any supported format
image = Image.open("boat-small.jpg")
image.close()
# Only open PNG or JPEG images
image = Image.open("boat-small.jpg", formats=['PNG', 'JPEG'])
image.close()
# Only open PNG images. This will fail, because it is a JPEG file
image = Image.open("boat-small.jpg", formats=['PNG'])
image.close() | [
"mcbride.martin@gmail.com"
] | mcbride.martin@gmail.com |
b40b07274f7adcd1bfcfcbb81396f652db3b129c | 4d6975caece0acdc793a41e8bc6d700d8c2fec9a | /leetcode/1501.circle-and-rectangle-overlapping/1501.circle-and-rectangle-overlapping.py | fa66d6e63ecac5efece6056e67d08b33212fa829 | [] | no_license | guiconti/workout | 36a3923f2381d6e7023e127100409b3a2e7e4ccb | 5162d14cd64b720351eb30161283e8727cfcf376 | refs/heads/master | 2021-08-03T10:32:02.108714 | 2021-07-26T04:38:14 | 2021-07-26T04:38:14 | 221,025,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | class Solution:
def checkOverlap(self, radius: int, x_center: int, y_center: int, x1: int, y1: int, x2: int, y2: int) -> bool:
| [
"guibasconti@gmail.com"
] | guibasconti@gmail.com |
219dea222cde58156d3ae38cb078aea71abb1c9c | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/test_michelson_coding_KT1G72.py | 981ba4b2284f21132fc049c53a0e55ab6f03c747 | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 5,342 | py | from unittest import TestCase
from tests import get_data
from pytezos.michelson.micheline import michelson_to_micheline
from pytezos.michelson.formatter import micheline_to_michelson
class MichelsonCodingTestKT1G72(TestCase):
def setUp(self):
self.maxDiff = None
def test_michelson_parse_code_KT1G72(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/code_KT1G72.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/code_KT1G72.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_code_KT1G72(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/code_KT1G72.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/code_KT1G72.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_code_KT1G72(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/code_KT1G72.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_storage_KT1G72(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/storage_KT1G72.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/storage_KT1G72.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_storage_KT1G72(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/storage_KT1G72.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/storage_KT1G72.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_storage_KT1G72(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/storage_KT1G72.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_onmWHA(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_onmWHA.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_onmWHA.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_onmWHA(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_onmWHA.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_onmWHA.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_onmWHA(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_onmWHA.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_onnQTu(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_onnQTu.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_onnQTu.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_onnQTu(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_onnQTu.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_onnQTu.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_onnQTu(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_onnQTu.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooGRZm(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_ooGRZm.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_ooGRZm.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooGRZm(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_ooGRZm.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_ooGRZm.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooGRZm(self):
expected = get_data(
path='contracts/KT1G72fc8TP3C7WgnaMB8uG3ZbDgfkJNBWEr/parameter_ooGRZm.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
| [
"mz@baking-bad.org"
] | mz@baking-bad.org |
fdd6cd19067be398e034485ffed4ce17161ee1f1 | 5acc20092ee93935594a7e0522924245a43e5531 | /decision_trees/plot_tree_regression.py | d4307a1f717e2be72689a8cbfde8de070109e9ce | [] | no_license | shengchaohua/sklearn-examples | aae2332c4382a57a70c1887777c125e6dc4579d6 | 1dac6a9b5e703185a8da1df7c724022fbd56a9e4 | refs/heads/master | 2020-05-05T01:19:20.037746 | 2019-10-18T08:55:01 | 2019-10-18T08:55:01 | 179,599,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random data set
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, s=20, edgecolor="black", c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| [
"shengchaohua163@163.com"
] | shengchaohua163@163.com |
a67a2f111943e37b2de75f874d927ab0bbffa793 | bf20548c143fdaecc1d8b5746dab142414b27786 | /galaxy-tool-make-otu-table/make_otu_table.py | 22524e4c043b77b7ef423080b9d6b2de61662c31 | [] | no_license | zeromtmu/galaxy-tool-temp-2019 | e9f58956b014e2e4e9260b028c14549f90756f05 | 704c3b850e8ddf5420dc458a0282717ab2268c40 | refs/heads/master | 2021-10-25T05:02:55.328975 | 2019-04-01T11:40:41 | 2019-04-01T11:40:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,777 | py | #!/usr/bin/python
"""
"""
import sys, os, argparse, string
import glob
from Bio import SeqIO
from subprocess import call, Popen, PIPE
# Retrieve the commandline arguments
parser = argparse.ArgumentParser(description='')
requiredArguments = parser.add_argument_group('required arguments')
requiredArguments.add_argument('-i', '--input', metavar='input zipfile', dest='inzip', type=str,
help='Inputfile in zip format', required=True)
requiredArguments.add_argument('-t', '--input_type', metavar='FASTQ or FASTA input', dest='input_type', type=str,
help='Sets the input type, FASTQ or FASTA', required=True)
requiredArguments.add_argument('-c', '--cluster_command', metavar='otu or zotu(UNOISE)', dest='cluster', type=str,
help='Choice of clustering, usearch -cluster_otus or unoise', required=True, choices=['unoise', 'cluster_otus', 'vsearch', 'dada2','vsearch_unoise'])
requiredArguments.add_argument('-of', '--folder_output', metavar='folder output', dest='out_folder', type=str,
help='Folder name for the output files', required=True)
requiredArguments.add_argument('-a', '--unoise_alpha', metavar='unoise_alpha', dest='unoise_alpha', type=str,
help='unoise_alpha value', required=False, nargs='?', default="2.0")
requiredArguments.add_argument('-cluster_id', '--cluster_id', metavar='Minimal cluster identity percentage', dest='clusterid', type=str,
help='Minimal cluster identity percentage', required=False, nargs='?', default="97")
requiredArguments.add_argument('-cluster_size', '--cluster_size', metavar='Minimal cluster size', dest='clustersize', type=str,
help='Minimal cluster size', required=False, nargs='?', default="1")
requiredArguments.add_argument('-abundance_minsize', metavar='minimal abundance', dest='abundance_minsize', type=str,
help='unoise minsize', required=False, nargs='?', default="1")
args = parser.parse_args()
def check_if_fasta(file):
with open(file, "r") as handle:
fasta = SeqIO.parse(handle, "fasta")
return any(fasta)
def extension_check(outputFolder):
files = [os.path.basename(x) for x in sorted(glob.glob(outputFolder + "/files/*"))]
fileFound = False
for x in files:
if args.input_type == "FASTQ":
if os.path.splitext(x)[1].lower() == ".fastq" or os.path.splitext(x)[1] == ".fq":
fastafile = os.path.splitext(x)[0].translate((string.maketrans("-. ", "___"))) + ".fa"
error = Popen(["awk '{if(NR%4==1) {printf(\">%s\\n\",substr($0,2));} else if(NR%4==2) print;}' " + outputFolder + "/files/" + x + " > "+outputFolder+"/fasta/" + fastafile], stdout=PIPE, stderr=PIPE, shell=True).communicate()[1].strip()
admin_log(outputFolder, error=error, function="extension_check")
#add new line after last sequence
call(["sed -i '$a\\' "+outputFolder+"/fasta/" + fastafile], shell=True)
#Add sample name to fasta file like >[samplename].description
call(["sed 's/>/>" + fastafile[:-3] + "./' " + outputFolder + "/fasta/"+fastafile+" >> " + outputFolder + "/combined.fa"], shell=True)
#DADA2 needs fastq files
call(["cat " + outputFolder + "/files/"+x+" >> "+ outputFolder + "/combined_dada.fastq"], shell=True)
fileFound = True
else:
admin_log(outputFolder, error=x+"\nWrong extension, no fastq file (.fastq, .fq) file will be ignored", function="extension_check")
else:
if check_if_fasta(outputFolder + "/files/" + x):
fastafile = os.path.splitext(x)[0].translate((string.maketrans("-. ", "___"))) + ".fa"
call(["mv", outputFolder + "/files/" + x, outputFolder + "/fasta/" + fastafile])
call(["sed -i '$a\\' " + outputFolder + "/fasta/" + fastafile], shell=True)
call(["sed 's/>/>" + fastafile[:-3] + "./' " + outputFolder + "/fasta/" + fastafile + " >> " + outputFolder + "/combined.fa"], shell=True)
fileFound = True
else:
admin_log(outputFolder, error="This is not a fasta file, file will be ignored: " + x, function="extension_check")
Popen(["rm", "-rf", outputFolder + "/files"], stdout=PIPE, stderr=PIPE)
if not fileFound:
admin_log(outputFolder, error="Tool stopped, no "+args.input_type+" files found", function="extension_check")
exit()
def admin_log(outputFolder, out=None, error=None, function=""):
with open(outputFolder + "/log.log", 'a') as adminlogfile:
seperation = 60 * "="
if out:
adminlogfile.write(function + " \n" + seperation + "\n" + out + "\n\n")
if error:
adminlogfile.write(function + "\n" + seperation + "\n" + error + "\n\n")
def remove_files(outputFolder):
call(["rm", "-rf", outputFolder+"/fasta"])
if args.cluster != "dada2":
call(["rm", outputFolder+"/combined.fa", outputFolder+"/uniques.fa"])
if args.cluster == "dada2":
call(["rm", outputFolder + "/combined_dada.fastq", outputFolder + "/combined_dada_filtered.fastq"])
def vsearch_derep_fulllength(outputFolder):
out, error = Popen(["vsearch", "--derep_fulllength", outputFolder+"/combined.fa", "--output", outputFolder+"/uniques.fa", "-sizeout"], stdout=PIPE, stderr=PIPE).communicate()
admin_log(outputFolder, out=out, error=error, function="derep_fulllength")
def usearch_cluster(outputFolder):
#sort by size
out, error = Popen(["vsearch", "--sortbysize", outputFolder+"/uniques.fa", "--output", outputFolder+"/uniques_sorted.fa", "--minsize", args.abundance_minsize], stdout=PIPE, stderr=PIPE).communicate()
admin_log(outputFolder, out=out, error=error, function="sortbysize")
if args.cluster == "cluster_otus":
out, error = Popen(["/home/galaxy/Tools/usearch/usearch11", "-cluster_otus", outputFolder+"/uniques_sorted.fa", "-uparseout", outputFolder+"/cluster_file.txt", "-otus", outputFolder+"/otu_sequences.fa", "-relabel", "Otu", "-fulldp"], stdout=PIPE, stderr=PIPE).communicate()
admin_log(outputFolder, out=out, error=error, function="cluster_otus")
if args.cluster == "unoise":
out, error = Popen(["/home/galaxy/Tools/usearch/usearch11","-unoise3", outputFolder+"/uniques_sorted.fa", "-unoise_alpha", args.unoise_alpha, "-minsize", args.abundance_minsize, "-tabbedout", outputFolder+"/cluster_file.txt", "-zotus", outputFolder+"/zotususearch.fa"], stdout=PIPE, stderr=PIPE).communicate()
admin_log(outputFolder, out=out, error=error, function="unoise")
count = 1
with open(outputFolder + "/zotususearch.fa", "rU") as handle, open(outputFolder + "/otu_sequences.fa", 'a') as newotu:
for record in SeqIO.parse(handle, "fasta"):
newotu.write(">Otu" + str(count) + "\n")
newotu.write(str(record.seq) + "\n")
count += 1
Popen(["rm", outputFolder + "/zotususearch.fa"])
if args.cluster == "vsearch":
out, error = Popen(["vsearch", "--uchime_denovo", outputFolder+"/uniques_sorted.fa", "--sizein", "--fasta_width", "0", "--nonchimeras", outputFolder+"/non_chimera.fa"], stdout=PIPE, stderr=PIPE).communicate()
admin_log(outputFolder, out=out, error=error, function="vsearch uchime")
out, error = Popen(["vsearch", "--cluster_size", outputFolder+"/non_chimera.fa", "--id", args.clusterid, "--sizein", "--fasta_width", "0", "--relabel", "Otu", "--centroids", outputFolder+"/otu_sequences.fa"], stdout=PIPE, stderr=PIPE).communicate()
admin_log(outputFolder, out=out, error=error, function="vsearch cluster")
call(["rm", outputFolder + "/non_chimera.fa"])
if args.cluster == "vsearch_unoise":
out, error = Popen(["vsearch", "--cluster_unoise", outputFolder+"/uniques_sorted.fa", "--unoise_alpha", args.unoise_alpha,"--minsize", args.abundance_minsize, "--centroids", outputFolder+"/zotusvsearch.fa"], stdout=PIPE, stderr=PIPE).communicate()
admin_log(outputFolder, out=out, error=error, function="vsearch unoise")
out, error = Popen(["vsearch", "--uchime3_denovo", outputFolder+"/zotusvsearch.fa","--fasta_width", "0", "--nonchimeras", outputFolder + "/otu_sequences_nochime.fa"], stdout=PIPE, stderr=PIPE).communicate()
admin_log(outputFolder, out=out, error=error, function="vsearch uchime_denovo3")
count = 1
with open(outputFolder + "/otu_sequences_nochime.fa", "rU") as handle, open(outputFolder + "/otu_sequences.fa", 'a') as newotu:
for record in SeqIO.parse(handle, "fasta"):
newotu.write(">Otu" + str(count) + "\n")
newotu.write(str(record.seq) + "\n")
count += 1
Popen(["rm", outputFolder + "/otu_sequences_nochime.fa"])
def dada2_cluster(outputFolder):
ncount = 0
with open(outputFolder + "/combined_dada.fastq", "rU") as handle, open(outputFolder +"/combined_dada_filtered.fastq", "a") as output:
for record in SeqIO.parse(handle, "fastq"):
if "N" in str(record.seq):
ncount += 1
else:
output.write(record.format("fastq"))
admin_log(outputFolder, out="Sequences with N bases found and removed: "+str(ncount), function="remove N bases")
out, error = Popen(["Rscript", "/home/galaxy/Tools/galaxy-tool-make-otu-table/dada2.R", outputFolder + "/combined_dada_filtered.fastq", outputFolder + "/otu_sequences.fa"], stdout=PIPE, stderr=PIPE).communicate()
admin_log(outputFolder, out=out, error=error, function="dada2")
def usearch_otu_tab(outputFolder):
out, error = Popen(["vsearch", "--usearch_global", outputFolder+"/combined.fa", "--db", outputFolder+"/otu_sequences.fa", "--id", "0.97", "--otutabout", outputFolder+"/otutab.txt", "--biomout", outputFolder+"/bioom.json"], stdout=PIPE, stderr=PIPE).communicate()
admin_log(outputFolder, out=out, error=error, function="otutab")
def zip_it_up(outputFolder):
out, error = Popen(["zip","-r","-j", outputFolder+"/all_output.zip", outputFolder+"/"], stdout=PIPE, stderr=PIPE).communicate()
admin_log(outputFolder, out=out, error=error, function="zip_it_up")
def send_output(outputFolder):
if args.out:
zip_it_up(outputFolder)
if args.out_log:
call(["mv", outputFolder + "/adminlog.log", args.out_log])
if args.out_seq:
call(["mv", outputFolder + "/otu_sequences.fa", args.out_seq])
if args.out_otu_table:
call(["mv", outputFolder + "/otutab.txt", args.out_otu_table])
if args.out_bioom_file:
call(["mv", outputFolder + "/bioom.json", args.out_bioom_file])
def make_output_folders(outputFolder):
"""
Output en work folders are created. The wrapper uses these folders to save the files that are used between steps.
:param outputFolder: outputFolder path
"""
call(["mkdir", "-p", outputFolder])
call(["mkdir", outputFolder + "/files"])
call(["mkdir", outputFolder + "/fasta"])
def main():
outputFolder = args.out_folder
make_output_folders(outputFolder)
zip_out, zip_error = Popen(["unzip", args.inzip, "-d", outputFolder.strip() + "/files"], stdout=PIPE,stderr=PIPE).communicate()
admin_log(outputFolder, zip_out, zip_error)
extension_check(outputFolder)
if args.cluster == "dada2":
dada2_cluster(outputFolder)
else:
vsearch_derep_fulllength(outputFolder)
usearch_cluster(outputFolder)
usearch_otu_tab(outputFolder)
remove_files(outputFolder)
zip_it_up(outputFolder)
if __name__ == '__main__':
main()
| [
"martenhoogeveen@gmail.com"
] | martenhoogeveen@gmail.com |
ca9258e6924e408fc0176eaaf8cc9f02aa0c7bfa | 3844678a0fb3b1f0838fb04bc57fd93dee6ee631 | /siteApps/ecrisApps/css/ECRIS/script/glassman_set_value.py | fcce39d050b67b9e8f1089766248385506406dbb | [] | no_license | jeonghanlee/Work | daa9295da3af3ff6c3a68daf51fac804dd1942cd | bef817911ea29fe091547f001ac35ac3765d8258 | refs/heads/master | 2022-09-28T03:59:29.435017 | 2022-09-15T18:26:34 | 2022-09-15T18:26:34 | 91,843,357 | 3 | 0 | null | 2019-01-08T16:10:37 | 2017-05-19T20:34:36 | VHDL | UTF-8 | Python | false | false | 468 | py | from org.csstudio.opibuilder.scriptUtil import PVUtil
from org.csstudio.opibuilder.scriptUtil import ConsoleUtil
sys = widget.getMacroValue("SYS")
subsys = widget.getMacroValue("SUBSYS")
dev= widget.getMacroValue("DEV")
par= widget.getMacroValue("PAR")
pv_name=sys + "-" + subsys + ":" + dev + ":" + "v0set"
text_box="text_set_voltage_" + par
value=display.getWidget(text_box).getPropertyValue("text")
PVUtil.writePV(pv_name, value)
#ConsoleUtil.writeInfo(value)
| [
"silee7103@gmail.com"
] | silee7103@gmail.com |
25f96cedec9f44d673e174f26f7009c567e4d75e | 6daaf3cecb19f95265188adc9afc97e640ede23c | /python_design/pythonprogram_design/Ch4/4-2-E55.py | 3183e603b8e24766d0c7c8793cf4ac19b5a5405c | [] | no_license | guoweifeng216/python | 723f1b29610d9f536a061243a64cf68e28a249be | 658de396ba13f80d7cb3ebd3785d32dabe4b611d | refs/heads/master | 2021-01-20T13:11:47.393514 | 2019-12-04T02:23:36 | 2019-12-04T02:23:36 | 90,457,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | def main():
## Sort New England states by land area.
NE = [("Maine", 30840, 1.329), ("Vermont", 9217, .626),
("New Hampshire", 8953, 1.321), ("Massachusetts", 7800, 6.646),
("Connecticut", 4842, 3.59), ("Rhode Island", 1044, 1.05)]
NE.sort(key=lambda state: state[1], reverse=True)
print("Sorted by land area in descending order:")
for state in NE:
print(state[0], " ", end="")
print()
main()
| [
"weifeng.guo@cnexlabs.com"
] | weifeng.guo@cnexlabs.com |
9682c7a2b7ccafa088bfb89d7104ae3684b1c697 | 3691259d4be62b60d8d52f38b36d6a24e5fd4536 | /libcloud/compute/drivers/ktucloud.py | 75a5eef6eafc0c674f4fdb81343b669158f21aba | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | chenjiang1985/libcloud | f385fac278777c2bbfedaf440d353c9ad9eb5c69 | 587212da626dfe0e2936737108bcc49d666cf4b4 | refs/heads/master | 2021-07-16T14:29:21.821490 | 2019-11-27T02:20:43 | 2019-11-27T02:20:43 | 222,844,781 | 1 | 2 | Apache-2.0 | 2020-10-27T22:06:36 | 2019-11-20T03:41:31 | Python | UTF-8 | Python | false | false | 3,610 | py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.compute.providers import Provider
from libcloud.compute.base import Node, NodeImage, NodeSize
from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver
class KTUCloudNodeDriver(CloudStackNodeDriver):
"""Driver for KTUCloud Compute platform."""
EMPTY_DISKOFFERINGID = '0'
type = Provider.KTUCLOUD
name = 'KTUCloud'
website = 'https://ucloudbiz.olleh.com/'
def list_images(self, location=None):
args = {
'templatefilter': 'executable'
}
if location is not None:
args['zoneid'] = location.id
imgs = self._sync_request(command='listAvailableProductTypes',
method='GET')
images = []
for img in imgs['producttypes']:
images.append(
NodeImage(
img['serviceofferingid'],
img['serviceofferingdesc'],
self,
{'hypervisor': '',
'format': '',
'os': img['templatedesc'],
'templateid': img['templateid'],
'zoneid': img['zoneid']}
)
)
return images
def list_sizes(self, location=None):
szs = self._sync_request('listAvailableProductTypes')
sizes = []
for sz in szs['producttypes']:
diskofferingid = sz.get('diskofferingid',
self.EMPTY_DISKOFFERINGID)
sizes.append(NodeSize(
diskofferingid,
sz['diskofferingdesc'],
0, 0, 0, 0, self)
)
return sizes
def create_node(self, name, size, image, location=None, **kwargs):
params = {'displayname': name,
'serviceofferingid': image.id,
'templateid': str(image.extra['templateid']),
'zoneid': str(image.extra['zoneid'])}
usageplantype = kwargs.pop('usageplantype', None)
if usageplantype is None:
params['usageplantype'] = 'hourly'
else:
params['usageplantype'] = usageplantype
if size.id != self.EMPTY_DISKOFFERINGID:
params['diskofferingid'] = size.id
result = self._async_request(
command='deployVirtualMachine',
params=params,
method='GET')
node = result['virtualmachine']
return Node(
id=node['id'],
name=node['displayname'],
state=self.NODE_STATE_MAP[node['state']],
public_ips=[],
private_ips=[],
driver=self,
extra={
'zoneid': image.extra['zoneid'],
'ip_addresses': [],
'forwarding_rules': [],
}
)
| [
"jacob.cj@alibaba-inc.com"
] | jacob.cj@alibaba-inc.com |
8b6ba1d95b208bc8087ef5dc27da02b1de14f438 | d7f64c3929642836691f4312cd936dd5647a2d80 | /ast.py | 93adf258d453a2a01bbb6c06fbdbc8a327e71293 | [] | no_license | folkol/imp | bb09528f5e1f2376ecaec6be94cbdee54fd306fe | ac1a085360d0f75f7326a731d51475cca386e923 | refs/heads/master | 2021-01-25T14:33:46.808721 | 2018-03-03T19:18:44 | 2018-03-03T19:18:44 | 123,711,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,844 | py | class ArithmeticExpression(object):
pass
class IntExp(ArithmeticExpression):
def __init__(self, i):
self.i = i
def eval(self, ignored):
return self.i
class Variable(ArithmeticExpression):
def __init__(self, name):
self.name = name
def eval(self, env):
if self.name in env:
return env[self.name]
else:
return 0
class BinaryOperator(ArithmeticExpression):
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
def eval(self, env):
left_value = self.left.eval(env)
right_value = self.right.eval(env)
if self.op == '+':
value = left_value + right_value
elif self.op == '-':
value = left_value - right_value
elif self.op == '*':
value = left_value * right_value
elif self.op == '/':
value = left_value / right_value
else:
raise RuntimeError('unknown operator: ' + self.op)
return value
class BooleanExpression(object):
pass
class RelationalExpression(BooleanExpression):
def __init__(self, op, left, right):
self.op = op
self.left = left
self.right = right
def eval(self, env):
left_value = self.left.eval(env)
right_value = self.right.eval(env)
if self.op == '<':
value = left_value < right_value
elif self.op == '<=':
value = left_value <= right_value
elif self.op == '>':
value = left_value > right_value
elif self.op == '>=':
value = left_value >= right_value
elif self.op == '=':
value = left_value == right_value
elif self.op == '!=':
value = left_value != right_value
else:
raise RuntimeError('unknown operator: ' + self.op)
return value
class And(BooleanExpression):
def __init__(self, left, right):
self.left = left
self.right = right
def eval(self, env):
left_value = self.left.eval(env)
right_value = self.right.eval(env)
return left_value and right_value
class Or(BooleanExpression):
def __init__(self, left, right):
self.left = left
self.right = right
def eval(self, env):
left_value = self.left.eval(env)
right_value = self.right.eval(env)
return left_value or right_value
class Not(BooleanExpression):
def __init__(self, exp):
self.exp = exp
def eval(self, env):
value = self.exp.eval(env)
return not value
class Statement(object):
pass
class Assignment(Statement):
def __init__(self, name, aexp):
self.name = name
self.aexp = aexp
def eval(self, env):
value = self.aexp.eval(env)
env[self.name] = value
class CompoundStatement(Statement):
def __init__(self, first, second):
self.second = second
self.first = first
def eval(self, env):
self.first.eval(env)
self.second.eval(env)
class If(Statement):
def __init__(self, condition, true_stmt, false_stmt):
self.condition = condition
self.true_stmt = true_stmt
self.false_stmt = false_stmt
def eval(self, env):
condition_value = self.condition.eval(env)
if condition_value:
self.true_stmt.eval(env)
else:
if self.false_stmt:
self.false_stmt.eval(env)
class While(Statement):
def __init__(self, condition, body):
self.condition = condition
self.body = body
def eval(self, env):
condition_value = self.condition.eval(env)
while condition_value:
self.body.eval(env)
condition_value = self.condition.eval(env)
| [
"mattias4@kth.se"
] | mattias4@kth.se |
fc0b6f99fcb1be107a0ce98c126476be63146522 | e68a40e90c782edae9d8f89b827038cdc69933c4 | /res_bw/scripts/common/lib/plat-mac/lib-scriptpackages/_builtinsuites/builtin_suite.py | 44040300db7fa8e6ef5204fd93a23f5516945284 | [] | no_license | webiumsk/WOT-0.9.16 | 2486f8b632206b992232b59d1a50c770c137ad7d | 71813222818d33e73e414e66daa743bd7701492e | refs/heads/master | 2021-01-10T23:12:33.539240 | 2016-10-11T21:00:57 | 2016-10-11T21:00:57 | 70,634,922 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,654 | py | # 2016.10.11 22:22:11 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/plat-mac/lib-scriptpackages/_builtinSuites/builtin_Suite.py
"""Suite builtin_Suite: Every application supports open, reopen, print, run, and quit
Level 1, version 1
"""
import aetools
import MacOS
_code = 'aevt'
class builtin_Suite_Events:
def open(self, _object, _attributes = {}, **_arguments):
"""open: Open the specified object(s)
Required argument: list of objects to open
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'odoc'
if _arguments:
raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
if _arguments.has_key('----'):
return _arguments['----']
def run(self, _no_object = None, _attributes = {}, **_arguments):
"""run: Run an application. Most applications will open an empty, untitled window.
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'oapp'
if _arguments:
raise TypeError, 'No optional args expected'
if _no_object is not None:
raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
if _arguments.has_key('----'):
return _arguments['----']
else:
return
def reopen(self, _no_object = None, _attributes = {}, **_arguments):
"""reopen: Reactivate a running application. Some applications will open a new untitled window if no window is open.
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'rapp'
if _arguments:
raise TypeError, 'No optional args expected'
if _no_object is not None:
raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
if _arguments.has_key('----'):
return _arguments['----']
else:
return
def _print(self, _object, _attributes = {}, **_arguments):
"""print: Print the specified object(s)
Required argument: list of objects to print
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'pdoc'
if _arguments:
raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
if _arguments.has_key('----'):
return _arguments['----']
_argmap_quit = {'saving': 'savo'}
def quit(self, _no_object = None, _attributes = {}, **_arguments):
"""quit: Quit an application
Keyword argument saving: specifies whether to save currently open documents
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'quit'
aetools.keysubst(_arguments, self._argmap_quit)
if _no_object is not None:
raise TypeError, 'No direct arg expected'
aetools.enumsubst(_arguments, 'savo', _Enum_savo)
_reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
if _arguments.has_key('----'):
return _arguments['----']
else:
return
_argmap_close = {'saving': 'savo',
'saving_in': 'kfil'}
_Enum_savo = {'yes': 'yes ',
'no': 'no ',
'ask': 'ask '}
_classdeclarations = {}
_propdeclarations = {}
_compdeclarations = {}
_enumdeclarations = {'savo': _Enum_savo}
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\plat-mac\lib-scriptpackages\_builtinsuites\builtin_suite.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.10.11 22:22:11 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
e4fdcb98e8edf08861cc723590e7f6122279a99c | b2ddc8011a4048d810bf4611c53b561293dd9452 | /testcube/settings.py | ec405de5927a291fe6e7c342a2d79d4ab3ae6c6a | [
"MIT"
] | permissive | RainYang0925/testcube | 677f6955d8a12e45b8c53037aad053482ab45f4f | a294c35a781e8495400ae7e04c342b326bc9c8ac | refs/heads/master | 2021-01-25T08:00:51.443311 | 2017-06-07T03:52:25 | 2017-06-07T03:52:25 | 93,696,358 | 1 | 0 | null | 2017-06-08T01:49:25 | 2017-06-08T01:49:25 | null | UTF-8 | Python | false | false | 3,973 | py | """
Django settings for testcube project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from os import environ
from os.path import join, dirname, abspath
SETTINGS_DIR = dirname(abspath(__file__))
BASE_DIR = dirname(SETTINGS_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = environ.get('TESTCUBG_SECRET_KEY', 'hard to guess key')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG_VALUE = environ.get('TESTCUBE_DEBUG', '').lower()
DEBUG = DEBUG_VALUE in ('true', 'yes', 'y', 'enabled', '1')
if environ.get('TESTCUBE_ALLOWED_HOSTS'):
ALLOWED_HOSTS = environ['TESTCUBE_ALLOWED_HOSTS'].split(',')
DB_ENGINE = environ.get('TESTCUBE_DB_ENGINE') or 'django.db.backends.sqlite3'
DB_NAME = environ.get('TESTCUBE_DB_NAME') or 'db.sqlite3'
STATIC_URL = environ.get('TESTCUBE_STATIC_URL') or '/static/'
STATIC_ROOT = environ.get('TESTCUBE_STATIC_ROOT') or join(BASE_DIR, 'dist')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'bootstrapform',
'testcube.core',
'testcube.users'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'testcube.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [join(SETTINGS_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG
},
},
]
WSGI_APPLICATION = 'testcube.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': DB_ENGINE,
'NAME': DB_NAME,
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_DIRS = [
join(SETTINGS_DIR, 'static')
]
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'PAGE_SIZE': 10
}
| [
"toby.qin@live.com"
] | toby.qin@live.com |
223f66caff5c88c80988b760c208573cadc95bf5 | f7a20374403b55189cc5db6e8fa34d0ba290387c | /modules/process_launch/wizard/__init__.py | 0ef95b703baa1af1000819826388c416f26f5cc9 | [] | no_license | dark-ice/upink_modules | 1a7b5a165cc5e05396c62cf33c261b907c23e33c | c497bf87a39796f1df3877542359b1927bec3a76 | refs/heads/master | 2021-05-01T04:40:16.436666 | 2014-04-12T15:09:31 | 2014-04-12T15:09:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | __author__ = 'andrey'
import sla
| [
"karbanovich.andrey@gmail.com"
] | karbanovich.andrey@gmail.com |
3245aee47e4ac2131d988470aa907e073849400c | 7bd82b4fa83ca2442e204d3d2a721e3759f44baa | /project_name/admin.py | 590a114047ce337fde50dc3f4f01b9488a88c2c9 | [
"MIT"
] | permissive | rupin/heroku-django-template | 0b43455739772292feda6c5d89fbc0793799e64e | bdb117d73a7c1e85c6c82778784319787f15bacf | refs/heads/master | 2020-06-29T08:24:14.601479 | 2019-10-24T04:52:26 | 2019-10-24T04:52:26 | 200,485,262 | 0 | 0 | null | 2019-08-04T11:38:44 | 2019-08-04T11:38:43 | null | UTF-8 | Python | false | false | 494 | py | from import_export.admin import ImportExportModelAdmin
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import *
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
list_display = ['email', 'username',]
admin.site.register(CustomUser, CustomUserAdmin)
| [
"rupin.chheda@gmail.com"
] | rupin.chheda@gmail.com |
cc9fdcd55675b096709fb144e7d45b92487833b2 | ba2dbc19e899faaa17b994a1224e455a3de5b9ad | /02 Data Science/2. Analysis/1. CSV/3pandas_value_meets_condition.py | d1d763c6537a94766d202df868af4bb5e45b9337 | [] | no_license | xsky21/bigdata2019 | 52d3dc9379a05ba794c53a28284de2168d0fc366 | 19464a6f8862b6e6e3d4e452e0dab85bdd954e40 | refs/heads/master | 2020-04-21T10:56:34.637812 | 2019-04-16T04:16:27 | 2019-04-16T04:16:27 | 169,503,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | #!/usr/bin/env python3
import pandas as pd
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
data_frame = pd.read_csv(input_file)
k = data_frame['Cost']
data_frame['Cost'] = data_frame['Cost'].str.strip('$').astype(float)
data_frame_value_meets_condition = data_frame.loc[(data_frame['Supplier Name'].str.contains('Z'))|(data_frame['Cost']> 600.0),:]
data_frame_value_meets_condition.to_csv(output_file,index=False)
| [
"studerande5@gmail.com"
] | studerande5@gmail.com |
f9771dc0b891f65fb7526cfd3c55da8d270457c1 | 9680ba23fd13b4bc0fc3ce0c9f02bb88c6da73e4 | /Brian Heinold (243) ile Python/p10506.py | bd4165bb54bc3dcc03caa0f97cad5ca92ee31c40 | [] | no_license | mnihatyavas/Python-uygulamalar | 694091545a24f50a40a2ef63a3d96354a57c8859 | 688e0dbde24b5605e045c8ec2a9c772ab5f0f244 | refs/heads/master | 2020-08-23T19:12:42.897039 | 2020-04-24T22:45:22 | 2020-04-24T22:45:22 | 216,670,169 | 0 | 0 | null | null | null | null | ISO-8859-9 | Python | false | false | 1,237 | py | # coding:iso-8859-9 Türkçe
from random import randint
from math import trunc
# Virgüllerle ayrık 2 ayrı veri giriş yöntemi
a, b = eval (input ('Virgüllerle ayrık 2 sayı girin: '))
if a<b: a,b=b,a # (küçük/büyük) kontrolsüz, biçimsiz sonuçlar üretebiliyor...
print (a, "+", b, "=", a+b)
print (a, "-", b, "=", a-b)
print (a, "*", b, "=", a*b)
print (a, "/", b, "=", a/b)
""" Her 2 sayının da negatif olması durumunda, sonucun pozitif ve
sıfırdan büyük çıkmasını isterseniz negatif küçüğü üste alabilirsiniz:
Yani eğer a=-5, b=-15 ise a, b'den büyük olduğu halde
print (b, "/", a, "=", b/a)
ifadesi sonucu +3 yansıtır
"""
print (a, "^", b, "=", a**b)
print (a, "%", b, "=", a%b)
print (a, "yüzde", b, "= %", (a-b)/b*100)
""" Çoklu yorum satırı
Program adı: Tek ve çok satırlı python yorumları
Kodlayan: M.Nihat Yavaş
Tarih: 29.09.2018-23:23 """
""" veya
Çoklu yorum satırı
Program adı: Tek ve çok satırlı python yorumları
Kodlayan: M.Nihat Yavaş
Tarih: 29.09.2018-23:23
"""
Çıktı=""" veya
Çoklu yorum satırı
Program adı: Tek ve çok satırlı python yorumları
Kodlayan: M.Nihat Yavaş
Tarih: 29.09.2018-23:23
""" | [
"noreply@github.com"
] | mnihatyavas.noreply@github.com |
0875ce4acd4136ebb9d8128af0cd76afdae5a06e | b4ca78134c296d8e03c39496bcc57369fd5f619b | /kubehub/views/k8s_cluster_view.py | 4305fb86a5f76432fac40d1bc2aa06664faecb27 | [] | no_license | dedicatted/kubehub-backend | 7f4b57033962f1ef8604a2cee0cf55bebd533ec9 | 3b944e462f5366b2dbad55063f325e4aa1b19b0e | refs/heads/master | 2023-02-05T04:44:50.213133 | 2020-06-10T15:02:03 | 2020-06-10T15:02:03 | 236,169,121 | 1 | 1 | null | 2023-01-24T23:19:38 | 2020-01-25T12:45:32 | Python | UTF-8 | Python | false | false | 5,236 | py | from django.http import JsonResponse
from django.forms.models import model_to_dict
from django.views.decorators.csrf import csrf_exempt
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import api_view, permission_classes
from json import loads
from ..models.proxmox_vm_group import ProxmoxVmGroup
from ..models.k8s_cluster import KubernetesCluster
from ..models.kubespray_deploy import KubesprayDeploy
from ..proxmox.vm_group_delete import vm_group_delete
from ..k8s_deploy.kubespray_deploy import kubespray_deploy
from ..serializers.vm_group_from_img_serializer import VmGroupFromImageSerializer
from ..serializers.k8s_cluster_serializer import KubernetesClusterSerializer
@api_view(['GET'])
@permission_classes([IsAuthenticated])
@csrf_exempt
def kubernetes_cluster_list(request):
if request.method == 'GET':
try:
k8s_clusters = []
for k8s_cluster in KubernetesCluster.objects.all():
kubespray_deploy_list = KubesprayDeploy.objects.filter(k8s_cluster=k8s_cluster)
k8s_cluster_dict = model_to_dict(k8s_cluster)
k8s_cluster_dict['kubespray_deployments'] = [
model_to_dict(kubespray_deploy_attempt)
for kubespray_deploy_attempt in kubespray_deploy_list
]
k8s_clusters.append(k8s_cluster_dict)
return JsonResponse({'kubernetes_cluster_list': k8s_clusters})
except Exception as e:
return JsonResponse({'errors': {f'{type(e).__name__}': [str(e)]}})
@api_view(['POST'])
@permission_classes([IsAuthenticated])
@csrf_exempt
def kubernetes_cluster_add(request):
if request.method == 'POST':
try:
kubernetes_cluster = loads(request.body)
kcs = KubernetesClusterSerializer(data=kubernetes_cluster)
kubernetes_cluster['status'] = 'deploying'
if kcs.is_valid():
kc = kcs.create(kcs.validated_data)
deploy = kubespray_deploy(
k8s_cluster_id=kc.id
)
if deploy.get('status') == 'successful':
k8s_cluster_status_update(
pk=kc.id,
status='running'
)
else:
k8s_cluster_status_update(
pk=kc.id,
status='error'
)
return JsonResponse(model_to_dict(kc))
else:
return JsonResponse({'errors': kcs.errors})
except Exception as e:
return JsonResponse({'errors': {f'{type(e).__name__}': [str(e)]}})
@api_view(['POST'])
@permission_classes([IsAuthenticated])
@csrf_exempt
def kubernetes_cluster_remove(request):
if request.method == 'POST':
try:
data = loads(request.body)
data['vm_group_id'] = KubernetesCluster.objects.get(pk=data['k8s_cluster_id']).vm_group.id
vm_group_pk = data.get('vm_group_id')
k8s_cluster_pk = data.get('k8s_cluster_id')
try:
k8s_cluster_status_update(
pk=k8s_cluster_pk,
status='removing'
)
vm_group_status_update(
pk=vm_group_pk,
status='removing'
)
delete = vm_group_delete(data)
if delete:
vm_group_status_update(
pk=vm_group_pk,
status='removed'
)
k8s_cluster_status_update(
pk=k8s_cluster_pk,
status='removed'
)
k8s_cluster_instance = KubernetesCluster.objects.get(pk=k8s_cluster_pk)
k8s_cluster_instance.delete()
vm_group_instance = ProxmoxVmGroup.objects.get(pk=vm_group_pk)
vm_group_instance.delete()
return JsonResponse({'deleted': model_to_dict(k8s_cluster_instance)})
except Exception as e:
k8s_cluster_status_update(
pk=k8s_cluster_pk,
status='error'
)
vm_group_status_update(
pk=vm_group_pk,
status='error'
)
return JsonResponse({'errors': {f'{type(e).__name__}': [str(e)]}})
except Exception as e:
return JsonResponse({'errors': {f'{type(e).__name__}': [str(e)]}})
def k8s_cluster_status_update(pk, status):
instance = KubernetesCluster.objects.get(pk=pk)
data = {'status': status}
k8scs = KubernetesClusterSerializer(data=data, partial=True)
if k8scs.is_valid():
k8sc = k8scs.update(instance, k8scs.validated_data)
return model_to_dict(k8sc)
def vm_group_status_update(pk, status):
instance = ProxmoxVmGroup.objects.get(pk=pk)
data = {'status': status}
vmgs = VmGroupFromImageSerializer(data=data, partial=True)
if vmgs.is_valid():
vmg = vmgs.update(instance, vmgs.validated_data)
return model_to_dict(vmg)
| [
"noreply@github.com"
] | dedicatted.noreply@github.com |
497314bfc946aa25317f6658dd9d4e8e9f00df30 | 2e3349340d12733892c6208b32ba955b4360d1db | /kunsplash/models/image_urls.py | 24794fa55f8b194bf76145715ce67f9bb4eb67ac | [] | no_license | kkristof200/py_unsplash | 4b8a6d7148dbd738de1caa44a551a30dce3a94eb | 9b4a428580de8e36a77e01b3f27501cbe73d1b83 | refs/heads/main | 2023-01-06T07:36:36.323854 | 2020-11-15T23:01:19 | 2020-11-15T23:01:19 | 313,140,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | # ----------------------------------------------------------- class: ImageUrls ----------------------------------------------------------- #
class ImageUrls:
# ------------------------------------------------------------- Init ------------------------------------------------------------- #
def __init__(
self,
d: dict
):
self.raw = d['raw']
self.full = d['full']
self.regular = d['regular']
self.small = d['small']
self.thumb = d['thumb']
core = self.raw.split('?')[0]
self.hd = core + '?ixlib=rb-1.2.1&fm=jpg&crop=entropy&cs=tinysrgb&w={}&fit=max'.format(720)
self.fullHD = core + '?ixlib=rb-1.2.1&fm=jpg&crop=entropy&cs=tinysrgb&w={}&fit=max'.format(1080)
# ---------------------------------------------------------------------------------------------------------------------------------------- # | [
"kovacskristof200@gmail.com"
] | kovacskristof200@gmail.com |
f806d3225b073d92885ee4e935e1b1b677ff49af | b54d2b785d324828decd84941c7dbe6e1d2c5cf0 | /venv/Session10B.py | aadd65c5d21d207830a51fbf3430fbd0b8888195 | [] | no_license | ishantk/GW2019PA1 | 193c30dd930d17dacdd36f3adff20246c17ae6f9 | 120a63b05160a78a2c05c6e9f8561a7c02c6b88e | refs/heads/master | 2020-05-31T06:36:48.814161 | 2019-07-19T06:20:48 | 2019-07-19T06:20:48 | 190,145,890 | 14 | 15 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | class Parent:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
print(">> Parent Constructor Executed")
def showDetails(self):
print(">> Hello, ",self.fname, self.lname)
class Child(Parent): # Relationship -> IS-A
def __init__(self, fname, lname, vehicles, salary):
Parent.__init__(self, fname, lname)
self.vehicles = vehicles
self.salary = salary
print(">> Child Constructor Executed")
# Overriding
def showDetails(self):
Parent.showDetails(self)
print(">> Hello in Child, ", self.vehicles, self.salary)
print("Parent Class Dictionary: ",Parent.__dict__)
print("Child Class Dictionary: ",Child.__dict__)
ch = Child("John", "Watson", 2, 30000)
print(ch.__dict__)
ch.showDetails() # Child.showDetails(ch)
# Parent.showDetails(ch)
# Rule 2 : In Child to have customizations, we shall access Parent's Properties :)
# Funda : If same function with the same name is also available in Child -> OVERRIDING
| [
"er.ishant@gmail.com"
] | er.ishant@gmail.com |
406b3a67d50ad721e4827ee2929ce4120881cd16 | 133cbe0eeccd42d3e0f77de56a48032a4d8a5dd6 | /astropy_timeseries/io/kepler.py | decaccf371fcb10b59a170b8ac1c2919e8c9186c | [] | no_license | barentsen/astropy-timeseries | 5d1a4c4af8aaa4bd7271d0284d32b7101d4c2b2e | 1a0852efcf37854c4088d2cd3f86b1e38eb71b8f | refs/heads/master | 2020-05-01T02:08:36.803144 | 2019-02-25T16:04:07 | 2019-02-25T16:04:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | from astropy.io import registry
from astropy.table import Table
from astropy.time import Time
from ..sampled import TimeSeries
__all__ = ['kepler_fits_reader']
def kepler_fits_reader(filename):
# Parse Kepler FITS file with regular FITS reader
tab = Table.read(filename, format='fits')
for colname in tab.colnames:
# Fix units
if tab[colname].unit == 'e-/s':
tab[colname].unit = 'electron/s'
# Rename columns to lowercase
tab.rename_column(colname, colname.lower())
# Compute Time object
time = Time(tab['time'].data + 2454833, scale='tcb', format='jd')
# Remove original time column
tab.remove_column('time')
# Create time series
ts = TimeSeries(time=time, data=tab)
ts.time.format = 'isot'
return ts
registry.register_reader('kepler.fits', TimeSeries, kepler_fits_reader)
| [
"thomas.robitaille@gmail.com"
] | thomas.robitaille@gmail.com |
3672299abcf38d60edba474bdaa16a910e9e2918 | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/validators/bar/marker/_reversescale.py | 0643f12996715a3baf7cbdc908afd0a358154f87 | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 476 | py | import _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name='reversescale', parent_name='bar.marker', **kwargs
):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| [
"jmswank7@gmail.com"
] | jmswank7@gmail.com |
e7cdbdb4afd52e49b81f4f1fbd4b703332ea426d | 05b0162d5ee7ab74f71ad4f21d5188a8735dfaef | /plugins/action/sg_mapping_deploy_all.py | 0c785231f7c774872d04b7ffa8364529096202eb | [
"MIT"
] | permissive | steinzi/ansible-ise | 567b2e6d04ce3ca6fbdbb6d0f15cd1913a1e215a | 0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0 | refs/heads/main | 2023-06-25T15:28:22.252820 | 2021-07-23T14:21:40 | 2021-07-23T14:21:40 | 388,820,896 | 0 | 0 | MIT | 2021-07-23T14:03:07 | 2021-07-23T14:03:06 | null | UTF-8 | Python | false | false | 2,511 | py | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
try:
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
except ImportError:
ANSIBLE_UTILS_IS_INSTALLED = False
else:
ANSIBLE_UTILS_IS_INSTALLED = True
from ansible.errors import AnsibleActionFail
from ansible_collections.cisco.ise.plugins.module_utils.ise import (
ISESDK,
ise_argument_spec,
)
# Get common arguements specification
argument_spec = ise_argument_spec()
# Add arguments specific for this module
argument_spec.update(dict(
))
required_if = []
required_one_of = []
mutually_exclusive = []
required_together = []
class ActionModule(ActionBase):
def __init__(self, *args, **kwargs):
if not ANSIBLE_UTILS_IS_INSTALLED:
raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'")
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = True
self._result = None
# Checks the supplied parameters against the argument spec for this module
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args,
schema=dict(argument_spec=argument_spec),
schema_format="argspec",
schema_conditionals=dict(
required_if=required_if,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
),
name=self._task.action,
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def get_object(self, params):
new_object = dict(
)
return new_object
def run(self, tmp=None, task_vars=None):
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
ise = ISESDK(params=self._task.args)
response = ise.exec(
family="ip_to_sgt_mapping",
function='deploy_all_ip_to_sgt_mapping',
params=self.get_object(self._task.args),
).response
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
| [
"wastorga@altus.co.cr"
] | wastorga@altus.co.cr |
f0191ceb0448c8b6eacede545dd5863c69667797 | cb1d59b57510d222efcfcd37e7e4e919b6746d6e | /python/serialize_and_deserialize_BST.py | b07ff5bbbf5e37248ebf473b88bb3d16d5bafbd1 | [] | no_license | pzmrzy/LeetCode | 416adb7c1066bc7b6870c6616de02bca161ef532 | ef8c9422c481aa3c482933318c785ad28dd7703e | refs/heads/master | 2021-06-05T14:32:33.178558 | 2021-05-17T03:35:49 | 2021-05-17T03:35:49 | 49,551,365 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
ret = []
def preorder(root):
if root:
ret.append(root.val)
preorder(root.left)
preorder(root.right)
preorder(root)
return ' '.join(map(str, ret))
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
nums = collections.deque(int(n) for n in data.split())
def build(mmin, mmax):
if nums and mmin < nums[0] < mmax:
n = nums.popleft()
node = TreeNode(n)
node.left = build(mmin, n)
node.right = build(n, mmax)
return node
return build(float('-inf'), float('inf'))
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| [
"pzmrzy@gmail.com"
] | pzmrzy@gmail.com |
a25e58ffc121cdabeac8d4d10d327c475e84b51a | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_LOG/test_c142758.py | 9a8984479db4dc8ffc54e5244552c4f02ae21dfe | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def_physical_interface import *
from page_obj.scg.scg_def_vlan_interface import *
from page_obj.scg.scg_def_bridge import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_log import *
from page_obj.common.ssh import *
from page_obj.scg.scg_def_dhcp import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
from page_obj.scg.scg_def import *
test_id = 142758
def test_c142758(browser):
try:
login_web(browser, url=dev1)
edit_log_localdb_traffic_jyl(browser, traffic="5000", traffic_num="50")
edit_log_localdb_traffic_jyl(browser, traffic="800000", traffic_num="50")
loginfo1 = get_log(browser, 管理日志)
try:
assert "配置 [LogDB]对象成功" in loginfo1
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "配置 [LogDB]对象成功" in loginfo1
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
reload(hostip=dev1)
rail_fail(test_run_id, test_id)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
| [
"15501866985@163.com"
] | 15501866985@163.com |
f56c525467ade5e3001eaa43d9726b72abff18cd | 19a7cbef5ccfdba8cfcaab0ab2382faea37b0fc6 | /backend/task_profile/migrations/0001_initial.py | 6253f2bb0e045f5037b01bc5d7cb47da1e50101e | [] | no_license | crowdbotics-apps/test-2-20753 | 6457b3d87abda53344537f0595d92ad0283efda8 | 6f59ab7cef1bd3d31adea98d4ecb74ba8044a180 | refs/heads/master | 2022-12-17T22:50:24.918623 | 2020-09-27T13:14:18 | 2020-09-27T13:14:18 | 299,032,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,518 | py | # Generated by Django 2.2.16 on 2020-09-27 13:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="TaskerProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("mobile_number", models.CharField(max_length=20)),
("photo", models.URLField()),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
("last_updated", models.DateTimeField(auto_now=True)),
("last_login", models.DateTimeField(blank=True, null=True)),
("description", models.TextField(blank=True, null=True)),
("city", models.CharField(blank=True, max_length=50, null=True)),
("vehicle", models.CharField(blank=True, max_length=50, null=True)),
("closing_message", models.TextField(blank=True, null=True)),
("work_area_radius", models.FloatField(blank=True, null=True)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskerprofile_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="Notification",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("type", models.CharField(max_length=20)),
("message", models.TextField()),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"user",
models.ManyToManyField(
related_name="notification_user", to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="InviteCode",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("code", models.CharField(max_length=20)),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="invitecode_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="CustomerProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("mobile_number", models.CharField(max_length=20)),
("photo", models.URLField()),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
("last_updated", models.DateTimeField(auto_now=True)),
("last_login", models.DateTimeField(blank=True, null=True)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="customerprofile_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
c87ffea0e40fdb83f0cd6960f8e62d33f1362501 | c1b237c351a27da9c32e7193593c568f6ed29f90 | /src/pydae/urisi/vscs/ac3ph4wgfpidq.py | 037837f59e775fd798a7b7eb70000a7997c644e7 | [
"MIT"
] | permissive | pydae/pydae | 046123f95b38c54ff874e264af126020845c6e52 | bdf01490d768fd97dfc540e862359b4a4540a5a0 | refs/heads/master | 2023-07-08T08:35:59.349878 | 2023-07-05T18:15:59 | 2023-07-05T18:15:59 | 238,614,352 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,463 | py |
def ac3ph4wgfpidq(grid,vsc_data):
'''
VSC with 3 phase and 4 wire working in open loop as a grid former.
'''
params_dict = grid.dae['params']
f_list = grid.dae['f']
x_list = grid.dae['x']
g_list = grid.dae['g']
y_list = grid.dae['y']
u_dict = grid.dae['u']
h_dict = grid.dae['h_dict']
alpha = np.exp(2.0/3*np.pi*1j)
A_0a = np.array([[1, 1, 1],
[1, alpha**2, alpha],
[1, alpha, alpha**2]])
A_a0 = 1/3* np.array([[1, 1, 1],
[1, alpha, alpha**2],
[1, alpha**2, alpha]])
omega_coi_i = 0
HS_coi = 0
# secondary frequency control
omega_coi = sym.Symbol('omega_coi',real=True)
xi_freq = sym.Symbol('xi_freq',real=True)
K_agc = sym.Symbol('K_agc',real=True)
name = vsc_data['bus']
# inputs
e_d_ref, e_q_ref = sym.symbols(f'e_d_{name}_ref,e_q_{name}_ref', real=True)
omega_ref,p_c = sym.symbols(f'omega_{name}_ref,p_c_{name}', real=True)
# parameters
S_n,U_n,H,K_f,T_f,K_sec,K_delta = sym.symbols(f'S_n_{name},U_n_{name},H_{name},K_f_{name},T_f_{name},K_sec_{name},K_delta_{name}', real=True)
R_s,R_sn,R_ng = sym.symbols(f'R_{name}_s,R_{name}_sn,R_{name}_ng', real=True)
X_s,X_sn,X_ng = sym.symbols(f'X_{name}_s,X_{name}_sn,X_{name}_ng', real=True)
T_e = sym.Symbol(f'T_e_{name}', real=True)
K_p = sym.Symbol(f"K_p_{name}", real=True)
T_p = sym.Symbol(f"T_p_{name}", real=True)
T_c = sym.Symbol(f"T_c_{name}", real=True)
T_w = sym.Symbol(f"T_w_{name}", real=True)
R_v = sym.Symbol(f"R_v_{name}", real=True)
X_v = sym.Symbol(f"X_v_{name}", real=True)
Droop = sym.Symbol(f"Droop_{name}", real=True)
# dynamical states
phi = sym.Symbol(f'phi_{name}', real=True)
xi_p = sym.Symbol(f'xi_p_{name}', real=True)
p_ef = sym.Symbol(f'p_ef_{name}', real=True)
De_ao_m,De_bo_m,De_co_m,De_no_m = sym.symbols(f'De_ao_m_{name},De_bo_m_{name},De_co_m_{name},De_no_m_{name}', real=True)
omega = sym.Symbol(f'omega_{name}', real=True)
# algebraic states
#e_an_i,e_bn_i,e_cn_i,e_ng_i = sym.symbols(f'e_{name}_an_i,e_{name}_bn_i,e_{name}_cn_i,e_{name}_ng_i', real=True)
v_sa_r,v_sb_r,v_sc_r,v_sn_r,v_og_r = sym.symbols(f'v_{name}_a_r,v_{name}_b_r,v_{name}_c_r,v_{name}_n_r,v_{name}_o_r', real=True)
v_sa_i,v_sb_i,v_sc_i,v_sn_i,v_og_i = sym.symbols(f'v_{name}_a_i,v_{name}_b_i,v_{name}_c_i,v_{name}_n_i,v_{name}_o_i', real=True)
i_sa_r,i_sb_r,i_sc_r,i_sn_r,i_ng_r = sym.symbols(f'i_vsc_{name}_a_r,i_vsc_{name}_b_r,i_vsc_{name}_c_r,i_vsc_{name}_n_r,i_vsc_{name}_ng_r', real=True)
i_sa_i,i_sb_i,i_sc_i,i_sn_i,i_ng_i = sym.symbols(f'i_vsc_{name}_a_i,i_vsc_{name}_b_i,i_vsc_{name}_c_i,i_vsc_{name}_n_i,i_vsc_{name}_ng_i', real=True)
omega_f = sym.Symbol(f'omega_f_{name}', real=True)
p_ef = sym.Symbol(f'p_ef_{name}', real=True)
p_m = sym.Symbol(f'p_m_{name}', real=True)
p_c = sym.Symbol(f'p_c_{name}', real=True)
p_cf= sym.Symbol(f'p_cf_{name}', real=True)
Z_sa = R_s + 1j*X_s
Z_sb = R_s + 1j*X_s
Z_sc = R_s + 1j*X_s
Z_sn = R_sn + 1j*X_sn
Z_ng = R_ng + 1j*X_ng
i_sa = i_sa_r + 1j*i_sa_i
i_sb = i_sb_r + 1j*i_sb_i
i_sc = i_sc_r + 1j*i_sc_i
i_sn = i_sn_r + 1j*i_sn_i
v_sa = v_sa_r + 1j*v_sa_i
v_sb = v_sb_r + 1j*v_sb_i
v_sc = v_sc_r + 1j*v_sc_i
v_sn = v_sn_r + 1j*v_sn_i
v_og = v_og_r + 1j*v_og_i
i_pos = 1/3*(i_sa + alpha*i_sb + alpha**2*i_sc)
I_b = S_n/(np.sqrt(3)*U_n)
i_dq = np.sqrt(2)*i_pos*sym.exp(1j*(phi-np.pi/2))/(np.sqrt(2)*I_b)
i_sd = sym.re(i_dq)
i_sq = sym.im(i_dq)
e_d = -i_sd*(R_v) + (X_v)*i_sq + e_d_ref
e_q = -i_sq*(R_v) - (X_v)*i_sd + e_q_ref
e_dq = e_d + 1j*e_q
v_tdq = U_n*sym.sqrt(1/3)*sym.sqrt(2)*e_dq*sym.exp(1j*(-phi-np.pi/2))
v_ta = -v_tdq/sym.sqrt(2)
v_tb = -v_tdq*alpha**2/sym.sqrt(2)
v_tc = -v_tdq*alpha/sym.sqrt(2)
e_no_cplx = 0.0
eq_i_sa_cplx = v_og + v_ta - i_sa*Z_sa - v_sa # v_sa = v_sag
eq_i_sb_cplx = v_og + v_tb - i_sb*Z_sb - v_sb
eq_i_sc_cplx = v_og + v_tc - i_sc*Z_sc - v_sc
eq_i_sn_cplx = v_og + e_no_cplx - i_sn*Z_sn - v_sn
eq_v_og_cplx = i_sa + i_sb + i_sc + i_sn + v_og/Z_ng
g_list += [sym.re(eq_i_sa_cplx)]
g_list += [sym.re(eq_i_sb_cplx)]
g_list += [sym.re(eq_i_sc_cplx)]
g_list += [sym.re(eq_i_sn_cplx)]
g_list += [sym.re(eq_v_og_cplx)]
g_list += [sym.im(eq_i_sa_cplx)]
g_list += [sym.im(eq_i_sb_cplx)]
g_list += [sym.im(eq_i_sc_cplx)]
g_list += [sym.im(eq_i_sn_cplx)]
g_list += [sym.im(eq_v_og_cplx)]
y_list += [i_sa_r,i_sb_r,i_sc_r,i_sn_r,v_og_r]
y_list += [i_sa_i,i_sb_i,i_sc_i,i_sn_i,v_og_i]
y_ini_str = [str(item) for item in y_list]
for ph in ['a','b','c','n']:
i_s_r = sym.Symbol(f'i_vsc_{name}_{ph}_r', real=True)
i_s_i = sym.Symbol(f'i_vsc_{name}_{ph}_i', real=True)
g_list[y_ini_str.index(f'v_{name}_{ph}_r')] += i_s_r
g_list[y_ini_str.index(f'v_{name}_{ph}_i')] += i_s_i
i_s = i_s_r + 1j*i_s_i
i_s_m = np.abs(i_s)
h_dict.update({f'i_vsc_{name}_{ph}_m':i_s_m})
h_dict.update({f'v_ta_{name}_r':sym.re(v_ta),f'v_tb_{name}_r':sym.re(v_tb),f'v_tc_{name}_r':sym.re(v_tc)})
h_dict.update({f'v_ta_{name}_i':sym.im(v_ta),f'v_tb_{name}_i':sym.im(v_tb),f'v_tc_{name}_i':sym.im(v_tc)})
h_dict.update({f'i_sd_{name}':i_sd,f'i_sq_{name}':i_sq})
V_1 = 400/np.sqrt(3)
u_dict.update({f'e_{name}_ao_m':V_1,f'e_{name}_bo_m':V_1,f'e_{name}_co_m':V_1,f'e_{name}_no_m':0.0})
u_dict.update({f'phi_a_{name}':0.0})
u_dict.update({f'phi_b_{name}':0.0})
u_dict.update({f'phi_c_{name}':0.0})
u_dict.update({f'p_c_{name}':0.0})
u_dict.update({f'omega_{name}_ref':1.0})
u_dict.update({f'{str(e_d_ref)}': 0.0})
u_dict.update({f'{str(e_q_ref)}':-1.0})
params_dict.update({f'X_v_{name}':vsc_data['X_v'],f'R_v_{name}':vsc_data['R_v']})
params_dict.update({f'X_{name}_s':vsc_data['X'],f'R_{name}_s':vsc_data['R']})
params_dict.update({f'X_{name}_sn':vsc_data['X_n'],f'R_{name}_sn':vsc_data['R_n']})
params_dict.update({f'X_{name}_ng':vsc_data['X_ng'],f'R_{name}_ng':vsc_data['R_ng']})
params_dict.update({f'S_n_{name}':vsc_data['S_n'],f'U_n_{name}':vsc_data['U_n']})
params_dict.update({f'T_e_{name}':vsc_data['T_e']})
params_dict.update({f'T_c_{name}':vsc_data['T_c']})
params_dict.update({f'Droop_{name}':vsc_data['Droop']})
params_dict.update({f'K_p_{name}':vsc_data['K_p']})
params_dict.update({f'T_p_{name}':vsc_data['T_p']})
params_dict.update({f'T_w_{name}':vsc_data['T_w']})
params_dict.update({f'K_sec_{name}':vsc_data['K_sec']})
params_dict.update({f'K_delta_{name}':vsc_data['K_delta']})
# VSG PI
v_sabc = sym.Matrix([[v_sa],[v_sb],[v_sc]])
i_sabc = sym.Matrix([[i_sa],[i_sb],[i_sc]])
v_szpn = A_a0*v_sabc
i_szpn = A_a0*i_sabc
s_pos = 3*v_szpn[1]*sym.conjugate(i_szpn[1])
s_neg = 3*v_szpn[2]*sym.conjugate(i_szpn[2])
s_zer = 3*v_szpn[0]*sym.conjugate(i_szpn[0])
p_pos = sym.re(s_pos)
p_agc = K_agc*xi_freq
p_r = K_sec*p_agc
epsilon_p = p_m - p_ef
## dynamic equations
dphi = 2*np.pi*50*(omega - omega_coi) - K_delta*phi
dxi_p = epsilon_p
dp_ef = 1/T_e*(p_pos/S_n - p_ef) # simulink: p_s_fil, T_e_f
dp_cf = 1/T_c*(p_c - p_cf)
domega_f = 1/T_w*(omega - omega_f)
dDe_ao_m = 1/T_v*(v_ra*V_1 - De_ao_m)
dDe_bo_m = 1/T_v*(v_rb*V_1 - De_bo_m)
dDe_co_m = 1/T_v*(v_rc*V_1 - De_co_m)
dDe_no_m = 1/T_v*(v_rn*V_1 - De_co_m)
p_pri = 1/Droop*(omega_f - omega_ref)
## algebraic equations
g_omega = -omega + K_p*(epsilon_p + xi_p/T_p) + 1
g_p_m = -p_m + p_cf + p_r - p_pri
g_list += [g_omega, g_p_m]
y_list += [ omega, p_m]
f_list += [dphi,dxi_p,dp_ef,dp_cf,domega_f,dDe_ao_m,dDe_bo_m,dDe_co_m,dDe_no_m]
x_list += [ phi, xi_p, p_ef, p_cf, omega_f, De_ao_m, De_bo_m, De_co_m, De_no_m]
h_dict.update({f'p_{name}_pos':sym.re(s_pos),f'p_{name}_neg':sym.re(s_neg),f'p_{name}_zer':sym.re(s_zer)})
h_dict.update({str(p_c):p_c,str(omega_ref):omega_ref})
h_dict.update({f'p_pri_{name}':p_pri})
# COI computation
HS_coi = S_n
omega_coi_i = S_n*omega_f
grid.omega_coi_h_i += omega_coi_i
grid.hs_total += HS_coi
| [
"jmmauricio6@gmail.com"
] | jmmauricio6@gmail.com |
6f508176b3ca094892486adba3d54a24a15695e5 | 268568ff2d483f39de78a5b29d941ce499cace33 | /spyder/app/tests/test_tour.py | 8119b0810110005f285b4bacd55f36840f505b6a | [
"MIT"
] | permissive | MarkMoretto/spyder-master | 61e7f8007144562978da9c6adecaa3022758c56f | 5f8c64edc0bbd203a97607950b53a9fcec9d2f0b | refs/heads/master | 2023-01-10T16:34:37.825886 | 2020-08-07T19:07:56 | 2020-08-07T19:07:56 | 285,901,914 | 2 | 1 | MIT | 2022-12-20T13:46:41 | 2020-08-07T19:03:37 | Python | UTF-8 | Python | false | false | 519 | py | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for tour.py
"""
# Test library imports
import pytest
# Local imports
from spyder.app.tour import TourTestWindow
@pytest.fixture
def tour(qtbot):
"Setup the QMainWindow for the tour."
tour = TourTestWindow()
qtbot.addWidget(tour)
return tour
def test_tour(tour, qtbot):
"""Test tour."""
tour.show()
assert tour
if __name__ == "__main__":
pytest.main()
| [
"mark.moretto@forcepoint.com"
] | mark.moretto@forcepoint.com |
bf43e790286174e15d0347f8e37777dde97aa064 | b932ddc6d1187a795ef3c2b2af0ef5b186c8463f | /clients/views.py | 6522a5f469990285833849200e9da9f4437b00aa | [] | no_license | FlashBanistan/drf-property-management | 77f7ce487878b08298627e08dbaf5b9599768e73 | 016fb3e512dafa901de70e0b75ce0a6f6de38933 | refs/heads/master | 2021-11-16T18:55:48.314808 | 2020-09-09T03:13:36 | 2020-09-09T03:13:36 | 98,379,119 | 1 | 0 | null | 2021-09-22T17:37:36 | 2017-07-26T04:21:59 | Python | UTF-8 | Python | false | false | 979 | py | from rest_framework import viewsets
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import user_passes_test
from .models import Client
from .serializers import ClientSerializer
@method_decorator(user_passes_test(lambda u: u.is_superuser), name="dispatch")
class ClientViewSet(viewsets.ModelViewSet):
queryset = Client.objects.all()
serializer_class = ClientSerializer
def client_from_request(request):
return request.user.client
class ClientAwareViewSet(viewsets.ModelViewSet):
def get_queryset(self):
return super().get_queryset().filter(client=client_from_request(self.request))
def perform_create(self, serializer, *args, **kwargs):
client = client_from_request(self.request)
serializer.save(client=client)
super(ClientAwareViewSet, self).perform_create(serializer, *args, **kwargs)
| [
"FlashBanistan66@gmail.com"
] | FlashBanistan66@gmail.com |
9d67d310c01d76d57b2c16257c4455828651f443 | 9e87897c988af634c3fddc42113992a65ec006f4 | /sandbox/pytorch/gmm.py | ffdc083af1b3e4f5ad2a1eb80ca3c16f29bd22be | [
"MIT"
] | permissive | luiarthur/cytof5 | 152eb06030785fdff90220f0d0a244a02204c2e9 | 6b4df5e9fd94bfd586e96579b8c618fdf6f913ed | refs/heads/master | 2021-07-20T13:39:45.821597 | 2021-03-02T23:27:35 | 2021-03-02T23:27:35 | 145,253,611 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | # https://www.kaggle.com/aakashns/pytorch-basics-linear-regression-from-scratch
# https://angusturner.github.io/generative_models/2017/11/03/pytorch-gaussian-mixture-model.html
# https://pytorch.org/tutorials/beginner/examples_autograd/two_layer_net_custom_function.html
import torch
import time
import math
from gmm_data_gen import genData
# Set random seed for reproducibility
torch.manual_seed(1)
# Set number of cpus to use
torch.set_num_threads(4)
# Define data type
dtype = torch.float64
# Define device-type (cpu / gpu)
device = torch.device("cpu")
# Param dimensions
J = 3
data = genData()
y_data = torch.tensor(data['y'])
y_mean = torch.mean(y_data).item()
y_sd = torch.std(y_data).item()
y_cs = (y_data - y_mean) / y_sd
N = len(y_cs)
# Create random Tensors for weights.
mu = torch.randn(J, device=device, dtype=dtype)
mu.requires_grad=True
log_sig2 = torch.empty(J, device=device, dtype=dtype).fill_(-5)
log_sig2.requires_grad=True
logit_w = torch.empty(J, device=device, dtype=dtype).fill_(1 / J)
logit_w.requires_grad=True
# logpdf of Normal
def lpdf_normal(x, m, v):
return -(x - m) ** 2 / (2 * v) - 0.5 * torch.log(2 * math.pi * v)
def pdf_normal(x, m, v):
return torch.exp(lpdf_normal(x, m, v))
def lpdf_loginvgamma_kernel(x, a, b):
return -a * x - b * torch.exp(-x)
def loglike(yi, m, log_s2, logit_w):
s2 = torch.exp(log_s2)
log_w = torch.log_softmax(logit_w, 0)
return torch.logsumexp(log_w + lpdf_normal(yi, m, s2), 0)
# which is equivalent to and more numerically stable to:
# w = torch.softmax(logit_w, 0)
# return torch.log(w.dot(pdf_normal(yi, mu, sig2)))
# loglike(y_data[0], mu, log_sig2, logit_w)
learning_rate = 1e-3
eps = 1E-8
optimizer = torch.optim.Adam([mu, log_sig2, logit_w], lr=learning_rate)
ll_out = [-math.inf, ]
for t in range(100000):
# zero out the gradient
optimizer.zero_grad()
# Forward pass
ll = torch.stack([loglike(yi, mu, log_sig2, logit_w) for yi in y_cs]).sum()
ll_out.append(ll.item())
lp_logsig2 = lpdf_loginvgamma_kernel(log_sig2, 3, 2).sum()
lp_logit_w = 0 # TODO
lp = lp_logsig2 + lp_logit_w
# Compute and print loss using operations on Tensors.
log_post = ll + lp
loss = -(log_post) / N
ll_diff = ll_out[-1] - ll_out[-2]
if ll_diff / N < eps:
break
else:
print('ll mean improvement: {}'.format(ll_diff / N))
print("{}: loglike: {}".format(t, ll.item() / N))
print('mu: {}'.format(list(map(lambda m: m * y_sd + y_mean, mu.tolist()))))
print('sig2: {}'.format(list(map(lambda s2: s2 * y_sd * y_sd, torch.exp(log_sig2).tolist()))))
print('w: {}'.format(torch.softmax(logit_w, 0).tolist()))
# Use autograd to compute the backward pass.
loss.backward()
# Update weights
optimizer.step()
# SAME AS ABOVE.
#
# Update weights using gradient descent.
# with torch.no_grad():
# mu -= mu.grad * learning_rate
# log_sig2 -= log_sig2.grad * learning_rate
# logit_w -= logit_w.grad * learning_rate
#
# # Manually zero the gradients after updating weights
# mu.grad.zero_()
# log_sig2.grad.zero_()
# logit_w.grad.zero_()
| [
"luiarthur@gmail.com"
] | luiarthur@gmail.com |
a816b6057d723ea0010e5dff9f5f61c79b1e910f | 64e3b825b050d5e2a998e6bb809098e95d16b83c | /basemap_matplotlib_使用案例/003_basemap_绘制中国地图_GitHub上的中国地图数据.py | a2bf5acafa3f99bef68d273e5932c2fdb4c8aa52 | [] | no_license | jackyin68/wuhan_2019-nCoV | a119828c32e7b8479c68b6a48993ab3aeab98805 | 340909ad6d012863452c6a2ffc61e4c3f091d7be | refs/heads/master | 2022-04-10T03:09:59.726502 | 2020-03-17T07:40:42 | 2020-03-17T07:40:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,489 | py | # -*- coding:utf-8 -*-
# project_xxx\venv\Scripts python
'''
Author: Felix
WeiXin: AXiaShuBai
Email: xiashubai@gmail.com
Blog: https://blog.csdn.net/u011318077
Date: 2020/1/31 15:31
Desc:
'''
# 首先导入绘图包和地图包
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# 第一步:设置图片大小及分辨率
plt.figure(figsize=(16, 8), dpi=300)
# 第二步:创建一个地图,设置经度纬度范围,只显示中国区域范围,projection选择投影模式,兰勃特投影
m = Basemap(llcrnrlon=77, llcrnrlat=14, urcrnrlon=140, urcrnrlat=51, projection='lcc', lat_1=33, lat_2=45, lon_0=100)
# 读取中国行政区文件,使用GitHub上已经整理好的地图文件,drawbounds参数显示图形
# 藏南区域和岛屿都有明显的标注,可以对比002结果,信息更加丰富,藏南更准确
m.readshapefile('../china_shapfiles/china-shapefiles-simple-version/china', 'china', drawbounds=True)
# 九段线地图数据
m.readshapefile('../china_shapfiles/china-shapefiles-simple-version/china_nine_dotted_line', 'china', drawbounds=True)
# 上面使用读取了本地地图文件,就不需要使用basemap绘制海岸线和国界线了,避免混乱
# 第三步:绘制地图上的线条,比如海岸线,国界线
# m.drawcoastlines(linewidth=1,linestyle='solid',color='black') # 绘制海岸线
# m.drawcountries(linewidth=1,linestyle='solid',color='black') # 绘制国界线
# 第四步:显示图形
plt.show() | [
"18200116656@qq.com"
] | 18200116656@qq.com |
1046870de0c05a4a728d3ef2a523485617239418 | b0717aeda1942dd35221e668b5d793077c074169 | /env/lib/python3.7/site-packages/twilio/rest/messaging/v1/__init__.py | 3f1e4ca3869413abfe1a5e499e1c2a2bec60d919 | [
"MIT"
] | permissive | stevehind/sms-steve-server | 3fdeed6de19f29aeaeb587fe7341831036455a25 | 9b0dac19f2e6ccf6452e738017132d93e993870b | refs/heads/master | 2022-12-21T23:34:10.475296 | 2020-01-27T16:24:39 | 2020-01-27T16:24:39 | 231,842,510 | 0 | 0 | MIT | 2022-05-25T05:03:16 | 2020-01-04T23:25:23 | Python | UTF-8 | Python | false | false | 1,615 | py | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.version import Version
from twilio.rest.messaging.v1.service import ServiceList
from twilio.rest.messaging.v1.session import SessionList
from twilio.rest.messaging.v1.webhook import WebhookList
class V1(Version):
def __init__(self, domain):
"""
Initialize the V1 version of Messaging
:returns: V1 version of Messaging
:rtype: twilio.rest.messaging.v1.V1.V1
"""
super(V1, self).__init__(domain)
self.version = 'v1'
self._services = None
self._sessions = None
self._webhooks = None
@property
def services(self):
"""
:rtype: twilio.rest.messaging.v1.service.ServiceList
"""
if self._services is None:
self._services = ServiceList(self)
return self._services
@property
def sessions(self):
"""
:rtype: twilio.rest.messaging.v1.session.SessionList
"""
if self._sessions is None:
self._sessions = SessionList(self)
return self._sessions
@property
def webhooks(self):
"""
:rtype: twilio.rest.messaging.v1.webhook.WebhookList
"""
if self._webhooks is None:
self._webhooks = WebhookList(self)
return self._webhooks
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Messaging.V1>'
| [
"steve.hind@gmail.com"
] | steve.hind@gmail.com |
b1f20e1c4908222e39f3237763cab46248ac30e6 | 741ee09b8b73187fab06ecc1f07f46a6ba77e85c | /AutonomousSourceCode/data/raw/squareroot/83e76566-5f8e-4c9d-bea6-8a3c1519efe1__square_root.py | c1d2251d2f20561b14fcba31b5ee9d1797198cb8 | [] | no_license | erickmiller/AutomatousSourceCode | fbe8c8fbf215430a87a8e80d0479eb9c8807accb | 44ee2fb9ac970acf7389e5da35b930d076f2c530 | refs/heads/master | 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | """ Comparing different ways of finding square root
1st column number a
2nd col square root from written function
3rd col square root computed by math.sqrt
4th col absolute value of difference between the two estimates
"""
def findsquare(a):
epsilon = 0.0000001
x = a/2.0
while True:
y = (x + a/x) / 2.0
if abs(y-x) < epsilon:
return x
x = y
def test_square(a):
import math
print a,
print (10-len(str(a)))*' ',
b = findsquare(a)
print b,
print (10-len(str(b)))*' ',
c = math.sqrt(a)
print c,
print (10-len(str(c)))*' ',
print abs(c - b)
test_square(35)
test_square(1001)
test_square(30000)
test_square(2)
| [
"erickmiller@gmail.com"
] | erickmiller@gmail.com |
e010a539cfed9506356780db88110ebfec463503 | 77dd413b4d4bcbe503785d9e16daacfc71febb78 | /salt/roster/flat.py | a562e16741c7cc622f385c18963d105b38e3cbe6 | [
"Apache-2.0"
] | permissive | amarnath/salt | 77bd308f431a8d70c6cf46212cfbb2b319019e63 | 0f458d7df5b9419ba9e8d68961f2ead197025d24 | refs/heads/develop | 2021-01-17T22:42:11.455863 | 2013-08-29T23:00:24 | 2013-08-29T23:00:24 | 12,478,872 | 0 | 2 | null | 2017-07-04T11:33:51 | 2013-08-30T05:36:29 | Python | UTF-8 | Python | false | false | 2,325 | py | '''
Read in the roster from a flat file using the renderer system
'''
# Import python libs
import os
import fnmatch
import re
# Import Salt libs
import salt.loader
from salt.template import compile_template
def targets(tgt, tgt_type='glob', **kwargs):
'''
Return the targets from the flat yaml file, checks opts for location but
defaults to /etc/salt/roster
'''
if os.path.isfile(__opts__['conf_file']) or not os.path.exists(__opts__['conf_file']):
template = os.path.join(
os.path.dirname(__opts__['conf_file']),
'roster')
else:
template = os.path.join(__opts__['conf_file'], 'roster')
rend = salt.loader.render(__opts__, {})
raw = compile_template(template, rend, __opts__['renderer'], **kwargs)
rmatcher = RosterMatcher(raw, tgt, tgt_type, 'ipv4')
return rmatcher.targets()
class RosterMatcher(object):
'''
Matcher for the roster data structure
'''
def __init__(self, raw, tgt, tgt_type, ipv='ipv4'):
self.tgt = tgt
self.tgt_type = tgt_type
self.raw = raw
self.ipv = ipv
def targets(self):
'''
Execute the correct tgt_type routine and return
'''
try:
return getattr(self, 'ret_{0}_minions'.format(self.tgt_type))()
except AttributeError:
return {}
def ret_glob_minions(self):
'''
Return minions that match via glob
'''
minions = {}
for minion in self.raw:
if fnmatch.fnmatch(minion, self.tgt):
data = self.get_data(minion)
if data:
minions[minion] = data
return minions
def ret_pcre_minions(self):
'''
Return minions that match via pcre
'''
minions = {}
for minion in self.raw:
if re.match(self.tgt, minion):
data = self.get_data(minion)
if data:
minions[minion] = data
return minions
def get_data(self, minion):
'''
Return the configured ip
'''
if isinstance(self.raw[minion], basestring):
return {'host': self.raw[minion]}
if isinstance(self.raw[minion], dict):
return self.raw[minion]
return False
| [
"thatch45@gmail.com"
] | thatch45@gmail.com |
1f052b4b2809f6daa1ec11d1b1292d2d680d53f5 | 4cdb3d1f9d0022284507877928d8f42d2fb0a5ee | /scripts/api/fetch_to_library.py | 6c497bcb402b8ba73fef0d49167a4497575da63e | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | phnmnl/galaxy | 9051eea65cc0885d6b3534f0ce7c4baea3b573e4 | 45a541f5c76c4d328c756b27ff58c9d17c072eeb | refs/heads/dev | 2020-12-28T21:39:38.829279 | 2018-03-22T18:13:01 | 2018-03-22T18:13:01 | 53,574,877 | 2 | 4 | NOASSERTION | 2018-10-09T09:17:06 | 2016-03-10T10:11:26 | Python | UTF-8 | Python | false | false | 1,013 | py | import argparse
import json
import requests
import yaml
def main():
parser = argparse.ArgumentParser(description='Upload a directory into a data library')
parser.add_argument("-u", "--url", dest="url", required=True, help="Galaxy URL")
parser.add_argument("-a", "--api", dest="api_key", required=True, help="API Key")
parser.add_argument('target', metavar='FILE', type=str,
help='file describing data library to fetch')
args = parser.parse_args()
with open(args.target, "r") as f:
target = yaml.load(f)
histories_url = args.url + "/api/histories"
new_history_response = requests.post(histories_url, data={'key': args.api_key})
fetch_url = args.url + '/api/tools/fetch'
payload = {
'key': args.api_key,
'targets': json.dumps([target]),
'history_id': new_history_response.json()["id"]
}
response = requests.post(fetch_url, data=payload)
print(response.content)
if __name__ == '__main__':
main()
| [
"jmchilton@gmail.com"
] | jmchilton@gmail.com |
f44a44c690da1bfd0c74d3ad297d84b89c3b0f3e | fb19849a37b3fa11908bd1654d62b40b8f6bf227 | /udoy_4.py | 858fa8cec22f6cea676338288e0cc195875192ec | [] | no_license | udoy382/PyCourseBT | 51593b0e9775dd95b1ee7caa911818863837fabf | cff88fe33d7a8b313f0a2dbf4d218160db863b48 | refs/heads/main | 2023-03-25T03:44:09.173235 | 2021-03-25T14:29:19 | 2021-03-25T14:29:19 | 351,466,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | # Privacy in Classes
class __Private:
def __init__(self, a, b):
# self.__a = a
self._a = a
self.b = b
print("Private class is created")
def _sum(self):
# return self.__a + self.b
return self._a + self.b
priv = __Private(10, 20)
print(priv._sum())
print(priv._a) | [
"srudoy436@gmail.com"
] | srudoy436@gmail.com |
9f8f92e26f9d76cf09b8fc8dab1a2a0bd6f94e30 | 9fda7a515674a76c80874f9deb63a6c472195711 | /demo/office31/best_classifier/Amazon2Webcam/exp_Temp_FL_IW.py | 1eaf35e0bd93235aaff5a5495d9a14ccaf2abde6 | [
"Apache-2.0"
] | permissive | sangdon/calibration-under-covariateshift | c1f65062ed8a8df108747162b0385b69ad4c8a50 | b1ed33d253a0c2539f8dd910b9ffa63150a14383 | refs/heads/main | 2023-03-23T00:33:27.350912 | 2021-03-19T04:19:20 | 2021-03-19T04:19:20 | 347,825,644 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | import os, sys
##--------------------------------------------------
from exp_Amazon2Webcam import exp
sys.path.append("../../")
sys.path.append("../../../../../")
## param
from params import Temp_FL_IW_AmazonParamParser as ParamParser
## model
from train_model import train_model as netS
from models.DAForecasters import SimpleDiscriminatorNet as netD
from models.DAForecasters import SimpleFNNForecaster as netF
from models.DAForecasters import DAForecaster_Temp_FL_IW as DAF
model_init_fn = lambda params: DAF(netS(False),
netD(params.F_n_hiddens, params.D_n_hiddens),
netF(params.n_features, params.F_n_hiddens, params.n_labels))
## algorithm
from calibration.DA import Temp_FL_IW as DACalibrator
##--------------------------------------------------
if __name__ == "__main__":
## meta
exp_name = os.path.splitext(os.path.basename(__file__))[0]
## run exps
exp(exp_name, ParamParser, model_init_fn, netS, DACalibrator)
| [
"ggdons@gmail.com"
] | ggdons@gmail.com |
d672e788c47c90bccd96eed4786b21e953f8ecf7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02594/s006908287.py | 7d015c45ecdc8404a7f9fdf11589075fde91de6c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | n = int(input())
if n >= 30 :
print("Yes")
else:
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
29eb355dde09276ea14d9335e447f6f8488466b2 | fd4163b9032ea17ea34e7575855a33746c1772c6 | /src/azure-cli/azure/cli/command_modules/ams/tests/latest/test_ams_sp_scenarios.py | 55ca18445ab2a90c6d7f433ac505010554a6f9e2 | [
"MIT"
] | permissive | ebencarek/azure-cli | b1524ed579353ee61feb9a2ec88b134f7252d5cd | ede3aaa83aa4ac8e352e508408cada685f9f8275 | refs/heads/az-cli-private-env | 2021-06-02T04:51:29.831098 | 2020-05-01T00:15:31 | 2020-05-01T00:15:31 | 137,520,442 | 2 | 9 | MIT | 2020-05-01T00:15:33 | 2018-06-15T18:33:10 | Python | UTF-8 | Python | false | false | 2,501 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import mock
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer
from azure_devtools.scenario_tests import AllowLargeResponse
class AmsSpTests(ScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='storage_account_for_create')
@AllowLargeResponse()
def test_ams_sp_create_reset(self, resource_group, storage_account_for_create):
with mock.patch('azure.cli.command_modules.ams._utils._gen_guid', side_effect=self.create_guid):
amsname = self.create_random_name(prefix='ams', length=12)
self.kwargs.update({
'amsname': amsname,
'storageAccount': storage_account_for_create,
'location': 'westus2'
})
self.cmd('az ams account create -n {amsname} -g {rg} --storage-account {storageAccount} -l {location}', checks=[
self.check('name', '{amsname}'),
self.check('location', 'West US 2')
])
spPassword = self.create_random_name(prefix='spp!', length=16)
spNewPassword = self.create_random_name(prefix='spp!', length=16)
self.kwargs.update({
'spName': 'http://{}'.format(resource_group),
'spPassword': spPassword,
'spNewPassword': spNewPassword,
'role': 'Owner'
})
try:
self.cmd('az ams account sp create -a {amsname} -n {spName} -g {rg} -p {spPassword} --role {role}', checks=[
self.check('AadSecret', '{spPassword}'),
self.check('ResourceGroup', '{rg}'),
self.check('AccountName', '{amsname}')
])
self.cmd('az ams account sp reset-credentials -a {amsname} -n {spName} -g {rg} -p {spNewPassword} --role {role}', checks=[
self.check('AadSecret', '{spNewPassword}'),
self.check('ResourceGroup', '{rg}'),
self.check('AccountName', '{amsname}')
])
finally:
self.cmd('ad app delete --id {spName}')
| [
"tjprescott@users.noreply.github.com"
] | tjprescott@users.noreply.github.com |
52c9a144a025ca9815e448130e5b5ffb0d68d30f | 12a5b72982291ac7c074210afc2c9dfe2c389709 | /online_judges/URI/Data_Structures/1069/code.py | a63f5171a71f9481b3f88fb686a8311ec953073a | [] | no_license | krantirk/Algorithms-and-code-for-competitive-programming. | 9b8c214758024daa246a1203e8f863fc76cfe847 | dcf29bf976024a9d1873eadc192ed59d25db968d | refs/heads/master | 2020-09-22T08:35:19.352751 | 2019-05-21T11:56:39 | 2019-05-21T11:56:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | from Queue import LifoQueue
n = int(raw_input())
for i in xrange(n):
s = raw_input()
fila = LifoQueue()
resposta = 0
for e in s:
if e == '<':
fila.put(1)
elif e == '>' and not fila.empty():
fila.get()
resposta += 1
print resposta
| [
"mariannelinharesm@gmail.com"
] | mariannelinharesm@gmail.com |
02c7599b6c6cee78be601d154db584e40d18d55f | 0b0abc06caa25dd269e1855d3cc6c72d34dc436c | /escuela/visitante/migrations/0005_detalle.py | aee59eb1e0833cae2e4229a9d2672e655bc22458 | [] | no_license | escuela2021/escuelagithub | 0130589214681d1ff9da36ffafd8aafb99c9b96d | f35897d1918af3a22d66b163153fc72a927516e8 | refs/heads/master | 2023-09-03T21:36:00.513263 | 2021-11-11T18:38:08 | 2021-11-11T18:38:08 | 427,109,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | # Generated by Django 3.2.4 on 2021-11-06 15:31
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('visitante', '0004_tema'),
]
operations = [
migrations.CreateModel(
name='detalle',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.TextField()),
('descripcion', models.TextField(blank=True, null=True)),
('imagen', models.ImageField(blank=True, null=True, upload_to='core')),
('dirurl', models.URLField(blank=True)),
('media', models.FileField(blank=True, null=True, upload_to='visitante')),
('texto', ckeditor.fields.RichTextField(blank=True, null=True)),
('tema', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='visitante.tema')),
],
),
]
| [
"gjangoinminutes@gmail.com"
] | gjangoinminutes@gmail.com |
1299544a9cd69674c4b9a5513dbb3441d4e300e5 | a63d907ad63ba6705420a6fb2788196d1bd3763c | /src/api/auth/core/ticket.py | 93fce93d029c09cdfef3a405f923fcb67d61ff8a | [
"MIT"
] | permissive | Tencent/bk-base | a38461072811667dc2880a13a5232004fe771a4b | 6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2 | refs/heads/master | 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 | NOASSERTION | 2022-04-02T10:30:56 | 2021-06-29T06:10:01 | Python | UTF-8 | Python | false | false | 7,563 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from auth.config.ticket import TICKET_FLOW_CONFIG, TICKET_TYPE_CHOICES
from auth.constants import DISPLAY_STATUS, FAILED, STOPPED, SUCCEEDED
from auth.core.ticket_objects import (
ApprovalController,
BatchReCalcObj,
CommonTicketObj,
DataTokenTicketObj,
ProjectDataTicketObj,
ResourceGroupTicketObj,
RoleTicketObj,
)
from auth.core.ticket_serializer import TicketStateSerializer
from auth.exceptions import UnexpectedTicketTypeErr
from common.business import Business
from django.db import transaction
class TicketFactory:
TICKET_OBJECTS = [
ProjectDataTicketObj,
ResourceGroupTicketObj,
RoleTicketObj,
DataTokenTicketObj,
BatchReCalcObj,
CommonTicketObj,
]
TICKET_OBJECT_CONFIG = {cls.__name__: cls for cls in TICKET_OBJECTS}
def __init__(self, ticket_type):
self.ticket_type = ticket_type
if ticket_type not in TICKET_FLOW_CONFIG:
raise UnexpectedTicketTypeErr()
_ticket_flow = TICKET_FLOW_CONFIG[ticket_type]
self.ticket_obj = self.TICKET_OBJECT_CONFIG[_ticket_flow.ticket_object](ticket_type)
def generate_ticket_data(self, data):
"""
生成ticket数据
@param data:
@return:
"""
return self.ticket_obj.generate_ticket_data(data)
def add_permission(self, ticket):
"""
添加权限
@param ticket:
@return:
"""
self.ticket_obj.add_permission(ticket)
def after_terminate(self, ticket, status):
"""
单据终止后的回调
@param ticket:
@param status:
@return:
"""
if hasattr(self.ticket_obj, "after_terminate"):
self.ticket_obj.after_terminate(ticket, status)
@classmethod
def list_ticket_types(cls):
return [{"id": item[0], "name": item[1]} for item in TICKET_TYPE_CHOICES]
@classmethod
def list_ticket_status(cls):
return [{"id": item[0], "name": item[1]} for item in DISPLAY_STATUS]
@classmethod
def serialize_ticket_queryset(cls, data, many=False, show_display=False):
"""
序列化单据
@param data:
@param many:
@param show_display:
@return:
"""
if many:
# 查询列表时暂时不需要列出所有显示名
results = []
for item in data:
results.append(cls(item.ticket_type).ticket_obj.SERIALIZER(item).data)
return results
else:
# 查询详情时把对象的显示名补充
result = cls(data.ticket_type).ticket_obj.SERIALIZER(data).data
if show_display:
result = cls(data.ticket_type).ticket_obj.SERIALIZER(data).wrap_permissions_display(result)
return result
@classmethod
def serialize_state_queryset(cls, data, ticket_type=None, many=True, show_display=False):
"""
序列化单据状态节点
@param data:
@param ticket_type: 单据类型
@param many:
@param show_display:
@return:
"""
if many:
result = []
# 指定单据类型时,同一state下的单据类型一定是一致的,不指定单据类型时则从各自state下取值
ticket_types = list({state.ticket.ticket_type for state in data})
if ticket_type:
if len(ticket_types) != 1:
raise UnexpectedTicketTypeErr()
else:
if ticket_type != ticket_types[0]:
raise UnexpectedTicketTypeErr()
else:
if len(ticket_types) == 1:
ticket_type = ticket_types[0]
for state in data:
item = TicketStateSerializer(state).data
item["ticket"] = cls(ticket_type or state.ticket.ticket_type).ticket_obj.SERIALIZER(state.ticket).data
result.append(item)
# 暂不支持不统一单据类型的权限显示名展示(效率较慢且暂无需求
if ticket_type and show_display:
result = cls(ticket_type).ticket_obj.SERIALIZER.wrap_permissions_display(result)
else:
result = TicketStateSerializer(data).data
result["ticket"] = cls(ticket_type).ticket_obj.SERIALIZER(data.ticket).data
if show_display:
result["ticket"]["permissions"] = (
cls(ticket_type).ticket_obj.SERIALIZER(data.ticket).wrap_permissions_display(result)
)
return result
@classmethod
def approve(cls, state, status, process_message, processed_by, add_biz_list=False):
"""
审批
@param state:
@param status:
@param process_message:
@param processed_by:
@param add_biz_list:
@return:
"""
context = None
if add_biz_list:
context = {"business": Business.get_name_dict()}
ApprovalController.approve(state, status, process_message, processed_by)
state_data = TicketStateSerializer(state).data
state_data["ticket"] = cls(state.ticket.ticket_type).ticket_obj.SERIALIZER(state.ticket, context=context).data
return state_data
@classmethod
def withdraw(cls, ticket, process_message, processed_by):
ticket.has_owner_permission(processed_by, raise_exception=True)
with transaction.atomic(using="basic"):
if not ticket.is_process_finish():
ticket.withdraw(process_message, processed_by)
return cls.serialize_ticket_queryset(ticket)
def get_content_for_notice(self, ticket):
"""
获取通知消息内容
@param [Ticket] ticket:
@return:
"""
return self.ticket_obj.get_content_for_notice(ticket)
def approve_finished(ticket, status, **kwargs):
if status == SUCCEEDED:
TicketFactory(ticket.ticket_type).add_permission(ticket)
if status in [FAILED, STOPPED]:
TicketFactory(ticket.ticket_type).after_terminate(ticket, status)
| [
"terrencehan@tencent.com"
] | terrencehan@tencent.com |
060e3d3c52f7c8946fc6ce8307a4633642f9db33 | 7b437e095068fb3f615203e24b3af5c212162c0d | /enaml/widgets/container.py | 1421529ddfd2d0cd52357c3dcb904f215742c914 | [
"BSD-3-Clause"
] | permissive | ContinuumIO/enaml | d8200f97946e5139323d22fba32c05231c2b342a | 15c20b035a73187e8e66fa20a43c3a4372d008bd | refs/heads/master | 2023-06-26T16:16:56.291781 | 2013-03-26T21:13:52 | 2013-03-26T21:13:52 | 9,047,832 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,031 | py | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import (
Bool, Constant, Coerced, ForwardTyped, Typed, observe, set_default
)
from enaml.core.declarative import d_
from enaml.layout.geometry import Box
from enaml.layout.layout_helpers import vbox
from .constraints_widget import (
ConstraintsWidget, ProxyConstraintsWidget, ConstraintMember
)
class ProxyContainer(ProxyConstraintsWidget):
""" The abstract definition of a proxy Container object.
"""
#: A reference to the Container declaration.
declaration = ForwardTyped(lambda: Container)
class Container(ConstraintsWidget):
""" A ConstraintsWidget subclass that provides functionality for
laying out constrainable children according to their system of
constraints.
The Container is the canonical component used to arrange child
widgets using constraints-based layout. Given a heierarchy of
components, the top-most Container will be charged with the actual
layout of the decendents. This allows constraints to cross the
boundaries of Containers, enabling powerful and flexible layouts.
There are widgets whose boundaries constraints may not cross. Some
examples of these would be a ScrollArea or a TabGroup. See the
documentation of a given container component as to whether or not
constraints may cross its boundaries.
"""
#: A boolean which indicates whether or not to allow the layout
#: ownership of this container to be transferred to an ancestor.
#: This is False by default, which means that every container
#: get its own layout solver. This improves speed and reduces
#: memory use (by keeping a solver's internal tableaux small)
#: but at the cost of not being able to share constraints
#: across Container boundaries. This flag must be explicitly
#: marked as True to enable sharing.
share_layout = d_(Bool(False))
#: A constant symbolic object that represents the internal left
#: boundary of the content area of the container.
contents_left = ConstraintMember()
#: A constant symbolic object that represents the internal right
#: boundary of the content area of the container.
contents_right = ConstraintMember()
#: A constant symbolic object that represents the internal top
#: boundary of the content area of the container.
contents_top = ConstraintMember()
#: A constant symbolic object that represents the internal bottom
#: boundary of the content area of the container.
contents_bottom = ConstraintMember()
#: A constant symbolic object that represents the internal width of
#: the content area of the container.
contents_width = Constant()
def _default_contents_width(self):
return self.contents_right - self.contents_left
#: A constant symbolic object that represents the internal height of
#: the content area of the container.
contents_height = Constant()
def _default_contents_height(self):
return self.contents_bottom - self.contents_top
#: A constant symbolic object that represents the internal center
#: along the vertical direction the content area of the container.
contents_v_center = Constant()
def _default_contents_v_center(self):
return self.contents_top + self.contents_height / 2.0
#: A constant symbolic object that represents the internal center
#: along the horizontal direction of the content area of the container.
contents_h_center = Constant()
def _default_contents_h_center(self):
return self.contents_left + self.contents_width / 2.0
#: A box object which holds the padding for this component. The
#: padding is the amount of space between the outer boundary box
#: and the content box. The default padding is (10, 10, 10, 10).
#: Certain subclasses, such as GroupBox, may provide additional
#: margin than what is specified by the padding.
padding = d_(Coerced(Box, (10, 10, 10, 10)))
#: Containers freely exapnd in width and height. The size hint
#: constraints for a Container are used when the container is
#: not sharing its layout. In these cases, expansion of the
#: container is typically desired.
hug_width = set_default('ignore')
hug_height = set_default('ignore')
#: A reference to the ProxyContainer object.
proxy = Typed(ProxyContainer)
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def widgets(self):
""" Get the child ConstraintsWidgets defined on the container.
"""
return [c for c in self.children if isinstance(c, ConstraintsWidget)]
#--------------------------------------------------------------------------
# Child Events
#--------------------------------------------------------------------------
def child_added(self, child):
""" Handle the child added event on the container.
This event handler will request a relayout if the added child
is an instance of 'ConstraintsWidget'.
"""
super(Container, self).child_added(child)
if isinstance(child, ConstraintsWidget):
self.request_relayout()
def child_removed(self, child):
""" Handle the child removed event on the container.
This event handler will request a relayout if the removed child
is an instance of 'ConstraintsWidget'.
"""
super(Container, self).child_removed(child)
if isinstance(child, ConstraintsWidget):
self.request_relayout()
#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe(('share_layout', 'padding'))
def _layout_invalidated(self, change):
""" A private observer which invalidates the layout.
"""
# The superclass handler is sufficient.
super(Container, self)._layout_invalidated(change)
#--------------------------------------------------------------------------
# Constraints Generation
#--------------------------------------------------------------------------
def _get_default_constraints(self):
""" The default constraints for a Container.
This method supplies default vbox constraint to the children of
the container if other constraints are not given.
"""
cns = super(Container, self)._get_default_constraints()
ws = (c for c in self.children if isinstance(c, ConstraintsWidget))
cns.append(vbox(*ws))
return cns
| [
"sccolbert@gmail.com"
] | sccolbert@gmail.com |
87f5c24314b39490e8e74e0b1de8d082502121b4 | fd7720dfc136eb92dbff8cc31e0f83bb8bbced16 | /db/queries.py | 7180c0c49d977ee644b04f66463964b52d135018 | [] | no_license | Villux/golden_goal | d134a1660dd32f0b4d05f720993dd23f8a064faf | f36f4dd0297e2e52c0f990cb3ac134f70fc16780 | refs/heads/master | 2020-03-27T01:53:09.863147 | 2018-11-15T15:40:04 | 2018-11-15T15:40:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,326 | py | create_elo_table = '''CREATE TABLE elo_table
(id integer PRIMARY KEY AUTOINCREMENT,
date TIMESTAMP,
team text,
elo real,
match_id integer,
season_id INTEGER NOT NULL,
FOREIGN KEY(season_id) REFERENCES season_table(id),
FOREIGN KEY(match_id) REFERENCES match_table(id));'''
drop_elo_table = "DROP TABLE IF EXISTS elo_table;"
create_odds_table = '''CREATE TABLE odds_table
(id integer PRIMARY KEY AUTOINCREMENT,
date TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
home_win REAL,
draw REAL,
away_win REAL,
description TEXT,
match_id integer,
FOREIGN KEY(match_id) REFERENCES match_table(id));'''
drop_odds_table = "DROP TABLE IF EXISTS odds_table;"
create_division_table = '''CREATE TABLE division_table
(id integer PRIMARY KEY AUTOINCREMENT,
data_tag text,
description text);'''
drop_division_table = "DROP TABLE IF EXISTS division_table;"
create_season_table = '''CREATE TABLE season_table
(id integer PRIMARY KEY AUTOINCREMENT,
start_date TIMESTAMP,
end_date TIMESTAMP,
description text,
division_id INTEGER,
FOREIGN KEY(division_id) REFERENCES division_table(id));'''
drop_season_table = "DROP TABLE IF EXISTS season_table;"
create_lineup_table = '''CREATE TABLE lineup_table
(id integer PRIMARY KEY AUTOINCREMENT,
url text,
hp1 integer,
hp2 integer,
hp3 integer,
hp4 integer,
hp5 integer,
hp6 integer,
hp7 integer,
hp8 integer,
hp9 integer,
hp10 integer,
hp11 integer,
hs1 integer,
hs2 integer,
hs3 integer,
hs4 integer,
hs5 integer,
hs6 integer,
hs7 integer,
ap1 integer,
ap2 integer,
ap3 integer,
ap4 integer,
ap5 integer,
ap6 integer,
ap7 integer,
ap8 integer,
ap9 integer,
ap10 integer,
ap11 integer,
as1 integer,
as2 integer,
as3 integer,
as4 integer,
as5 integer,
as6 integer,
as7 integer,
match_id integer UNIQUE,
FOREIGN KEY(hp1) REFERENCES player_identity_table(id),
FOREIGN KEY(hp2) REFERENCES player_identity_table(id),
FOREIGN KEY(hp3) REFERENCES player_identity_table(id),
FOREIGN KEY(hp4) REFERENCES player_identity_table(id),
FOREIGN KEY(hp5) REFERENCES player_identity_table(id),
FOREIGN KEY(hp6) REFERENCES player_identity_table(id),
FOREIGN KEY(hp7) REFERENCES player_identity_table(id),
FOREIGN KEY(hp8) REFERENCES player_identity_table(id),
FOREIGN KEY(hp9) REFERENCES player_identity_table(id),
FOREIGN KEY(hp10) REFERENCES player_identity_table(id),
FOREIGN KEY(hp11) REFERENCES player_identity_table(id),
FOREIGN KEY(hs1) REFERENCES player_identity_table(id),
FOREIGN KEY(hs2) REFERENCES player_identity_table(id),
FOREIGN KEY(hs3) REFERENCES player_identity_table(id),
FOREIGN KEY(hs4) REFERENCES player_identity_table(id),
FOREIGN KEY(hs5) REFERENCES player_identity_table(id),
FOREIGN KEY(hs6) REFERENCES player_identity_table(id),
FOREIGN KEY(hs7) REFERENCES player_identity_table(id),
FOREIGN KEY(ap1) REFERENCES player_identity_table(id),
FOREIGN KEY(ap2) REFERENCES player_identity_table(id),
FOREIGN KEY(ap3) REFERENCES player_identity_table(id),
FOREIGN KEY(ap4) REFERENCES player_identity_table(id),
FOREIGN KEY(ap5) REFERENCES player_identity_table(id),
FOREIGN KEY(ap6) REFERENCES player_identity_table(id),
FOREIGN KEY(ap7) REFERENCES player_identity_table(id),
FOREIGN KEY(ap8) REFERENCES player_identity_table(id),
FOREIGN KEY(ap9) REFERENCES player_identity_table(id),
FOREIGN KEY(ap10) REFERENCES player_identity_table(id),
FOREIGN KEY(ap11) REFERENCES player_identity_table(id),
FOREIGN KEY(as1) REFERENCES player_identity_table(id),
FOREIGN KEY(as2) REFERENCES player_identity_table(id),
FOREIGN KEY(as3) REFERENCES player_identity_table(id),
FOREIGN KEY(as4) REFERENCES player_identity_table(id),
FOREIGN KEY(as5) REFERENCES player_identity_table(id),
FOREIGN KEY(as6) REFERENCES player_identity_table(id),
FOREIGN KEY(as7) REFERENCES player_identity_table(id),
FOREIGN KEY(match_id) REFERENCES match_table(id));'''
drop_lineup_table = "DROP TABLE IF EXISTS lineup_table;"
create_match_id_index = "CREATE INDEX match_id_index ON lineup_table (match_id);"
create_player_identity_table = '''CREATE TABLE player_identity_table
(fifa_name text,
goalcom_name text,
fifa_id INTEGER PRIMARY KEY,
goalcom_url text);'''
drop_player_identity_table = "DROP TABLE IF EXISTS player_identity_table;"
create_fifa_id_index = "CREATE INDEX pit_fifa_id_index ON player_identity_table (fifa_id);"
| [
"villej.toiviainen@gmail.com"
] | villej.toiviainen@gmail.com |
ca79e5fd9f9270aabed432701bd25c1f14e6fdc8 | 29881fa0c087f3d3ce0e27fb51309384266203e1 | /price_register/forms.py | 62c7c641dab7ec7fc4fbd7a250098d5eaf789c6f | [] | no_license | aidant842/mymo | 0e5ec2a5c73b6755d994467e4afba10141f449ea | 877e7a38198d1b5effc6c3a63ad12e7166c20a77 | refs/heads/master | 2023-07-17T15:30:21.350974 | 2021-08-24T12:43:18 | 2021-08-24T12:43:18 | 340,033,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | from django import forms
class PropertyRegsiterFilter(forms.Form):
IE_COUNTY_CHOICES = [
(None, 'County'), ('carlow', 'Carlow'), ('cavan', 'Cavan'),
('clare', 'Clare'), ('cork', 'Cork'),
('donegal', 'Donegal'), ('dublin', 'Dublin'),
('galway', 'Galway'), ('kerry', 'Kerry'),
('kildare', 'Kildare'), ('kilkenny', 'Kilkenny'),
('laois', 'Laois'), ('leitrim', 'Leitrim'),
('limerick', 'Limerick'), ('longford', 'Longford'),
('louth', 'Louth'), ('mayo', 'Mayo'),
('meath', 'Meath'), ('monaghan', 'Monaghan'),
('offaly', 'Offaly'), ('roscommon', 'Roscommon'),
('sligo', 'Sligo'), ('tipperary', 'Tipperary'),
('waterford', 'Waterford'), ('westmeath', 'Westmeath'),
('wexford', 'Wexford'), ('wicklow', 'Wicklow'),
]
county = forms.CharField(widget=forms.Select
(choices=IE_COUNTY_CHOICES,
attrs={'class': 'form-select'}),
label='',
required=False)
area = forms.CharField(widget=forms.TextInput(
attrs={'placeholder': 'Area/Town'}),
label="",
required=False) | [
"aidant842@gmail.com"
] | aidant842@gmail.com |
734e216421a945afd8daef96e60cedf08644860b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_initial.py | d1cef3978d2c3fa4aabf672c80ed94a85932a083 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py |
#calss header
class _INITIAL():
def __init__(self,):
self.name = "INITIAL"
self.definitions = [u'the first letter of a name, especially when used to represent a name: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
573446d0eb0db0b59632fc96555a27bbc131d18f | bf8870d923adca9877d4b4dacef67f0a454727a8 | /codeforces.com/contest/133/a/pr.py | 63730bff7c0f31101ef62f87f15e0b187a1c0c68 | [] | no_license | artkpv/code-dojo | 6f35a785ee5ef826e0c2188b752134fb197b3082 | 0c9d37841e7fc206a2481e4640e1a024977c04c4 | refs/heads/master | 2023-02-08T22:55:07.393522 | 2023-01-26T16:43:33 | 2023-01-26T16:43:33 | 158,388,327 | 1 | 0 | null | 2023-01-26T08:39:46 | 2018-11-20T12:45:44 | C# | UTF-8 | Python | false | false | 400 | py | #!python3
from collections import deque, Counter
from itertools import combinations, permutations
from math import sqrt
import unittest
def read_int():
return int(input().strip())
def read_int_array():
return [int(i) for i in input().strip().split(' ')]
######################################################
s = input().strip()
print('YES' if any((c in 'HQ9') for c in s) else 'NO')
| [
"artyomkarpov@gmail.com"
] | artyomkarpov@gmail.com |
442f658071a199deed2ea39033e67851ce549cd2 | c50e5af8f72de6ef560ee6c0bbfa756087824c96 | /刷题/Leetcode/p974_Subarray_Sums_Divisible_by_K.py | 5515fb9f32761fd960718a1bc96ae72209273fe2 | [] | no_license | binghe2402/learnPython | 5a1beef9d446d8316aaa65f6cc9d8aee59ab4d1c | 2b9e21fe4a8eea0f8826c57287d59f9d8f3c87ce | refs/heads/master | 2022-05-27T03:32:12.750854 | 2022-03-19T08:00:19 | 2022-03-19T08:00:19 | 252,106,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | from typing import List
import collections
'''
前缀和
n*K = sum[i+1:j+1] = prefixSum[j]-prefixSum[i]
(prefixSum[i] - prefixSum[j]) % K == 0
prefixSum[i] % K == prefixSum[j] % K
'''
class Solution:
def subarraysDivByK(self, A: List[int], K: int) -> int:
prefixSum = 0
prefix_cnt = collections.Counter({0: 1})
cnt = 0
for i in A:
prefixSum += i
prefixSum_mod = prefixSum % K
cnt += prefix_cnt[prefixSum_mod]
prefix_cnt[prefixSum_mod] += 1
return cnt
A = [4, 5, 0, -2, -3, 1]
K = 5
s = Solution()
res = s.subarraysDivByK(A, K)
print(res)
| [
"binghe2402@hotmail.com"
] | binghe2402@hotmail.com |
bf36f0203b6e49922be55bdbe48c77f4e48f19a7 | 1e3dba48c257b8b17d21690239d51858e5009280 | /exit_server/config/wsgi.py | 2d6f7b7f4d45020bc3a0deda128c173c13eea691 | [] | no_license | JEJU-SAMDASU/EXIT-Server | e3eb87707e5f79b0399dc614507c8d5889a85a4a | ac1b73703c9fff54866c979a620fd22470345b07 | refs/heads/master | 2023-02-12T13:29:18.508066 | 2020-11-23T12:42:18 | 2020-11-23T12:42:18 | 315,288,514 | 0 | 0 | null | 2020-11-24T17:16:29 | 2020-11-23T11:07:47 | Python | UTF-8 | Python | false | false | 394 | py | """
WSGI config for exit_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
application = get_wsgi_application()
| [
"hanbin8269@gmail.com"
] | hanbin8269@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.