blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9567c8bcbcde190b17eefc5c683ea9c647bcc093 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/iotbx/detectors/context/endstation_max_iv_trial.py | 480a4fa17f0bc8627893d1fa007ce8b6d56b0c79 | [
"BSD-3-Clause",
"BSD-3-Clause-LBNL"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 10,153 | py | from __future__ import absolute_import, division, print_function
import math,time
from iotbx.detectors.context.camera_convention import Cameras
from iotbx.detectors.context.config_detector import ADSC910_at_BioCARS
class EndStation:
def __init__(self):
#set defaults
self.mos = {}
self.mos['mosflm_detector']="""#detector-take defaults\n"""
self.mos['mosflm_beamline']=beamlines['ALS']
self.set_camera_convention(1)
self.set_rotation_axis("ROTATION HORIZ ANTI")
def set_camera_convention(self,number):
self.cam_con = Cameras(number)
def camera_convention(self,):
return self.cam_con
def set_rotation_axis(self,axis):
if type(axis) == type("string"):
self.rot_axi = rotation_lookup[axis]
self.rot_axi_string = axis
else: #tuple type of 3-component vector elements of normalized direction
self.rot_axi= axis
def rotation_axis(self):
return self.rot_axi
def mosflm(self): return self.mos
beamlines = {
"ALS":
"""#beam
SYNCHROTRON POLARIZATION 0.9
DIVERGENCE 0.100 0.020
DISPERSION 0.0001
""",
"Australian":
"""#beam
SYNCHROTRON POLAR 0.9
DIVERGENCE 0.11 0.001
DISPER 0.001
""",
"CHESS":
"""#beam
SYNCHROTRON POLAR 0.89
DIVE 0.030 0.010
DISPER 0.0025
""",
"RAXIS":
"""#beam
""",
}
rotation_lookup = {
"ROTATION HORIZ ANTI":(0,1,0),
"ROTATION HORIZ CLOCK":(0,-1,0), #reverse phi
"ROTATION VERT ANTI":(-1,0,0),
"ROTATION VERT CLOCK":(1,0,0),
}
#ALS Quantum 4:
#omega 90 rotation horiz anti fast horiz origin ur rectang type adsc
#rmin 5 rmax 188.0 ymax 188.0 xmax 188.0 xscan 188.0 yscan 188.0
def EndStation_from_ImageObject(imageobject,phil_params):
endstation = EndStation()
endstation.set_camera_convention(1)
endstation.set_rotation_axis("ROTATION HORIZ ANTI")
import six
if isinstance(imageobject.parameters["DETECTOR_SN"],six.string_types) and \
"S/N E-32-0105" in imageobject.parameters["DETECTOR_SN"]:
# vertical goniometer axis at Max-IV
endstation.set_rotation_axis("ROTATION VERT ANTI")
print("MAX-IV Eiger 16M")
endstation.mos['mosflm_detector'] = """
# Specific implementation for Max IV BioMAX
DETECTOR EIGE OMEGA 270
"""
if imageobject.vendortype == "Bruker Proteus CCD":
endstation.set_rotation_axis("ROTATION VERT ANTI")
print("BRUKER rotation", endstation.rot_axi)
if imageobject.vendortype == "RAXIS":
endstation.set_rotation_axis("ROTATION VERT CLOCK")
#clockwise horizontal phi at most CHESS beamlines
#also at Australian Synchrotron
if imageobject.vendortype == "ADSC" and \
imageobject.serial_number in [406,414,441,448,457,471,924,928]:
endstation.set_rotation_axis("ROTATION HORIZ CLOCK")
if imageobject.vendortype == "ADSC" and imageobject.serial_number == 910:
if ADSC910_at_BioCARS(imageobject):
endstation.set_rotation_axis("ROTATION HORIZ CLOCK")
else:
endstation.set_rotation_axis("ROTATION VERT ANTI") # just a hypothesis
#vertical phi at APS 19ID
if imageobject.vendortype == "ADSC" and \
imageobject.serial_number in [914]:
endstation.set_rotation_axis("ROTATION HORIZ CLOCK")
if imageobject.vendortype == "RigakuSaturn":
endstation.set_rotation_axis("ROTATION VERT ANTI")
#change in phi axis rotation at CHESS A1
if imageobject.vendortype == "ADSC" and \
imageobject.serial_number in [441]:
record_date = imageobject.parameters["DATE"]
record_tse = time.mktime(time.strptime(record_date))
cutoff_441 = time.mktime(time.strptime("Fri Oct 01 00:00:00 2004"))
if record_tse < cutoff_441:
endstation.set_rotation_axis("ROTATION HORIZ ANTI")
# Special cases: Pringle-Shen goniometer at CHESS F3
if imageobject.vendortype == "ADSC" and \
imageobject.serial_number in [414] and \
phil_params.goniometer_rotation.lower().find(
'pringle-shen')>=0 and \
'AXIS' in imageobject.parameters and \
imageobject.parameters['AXIS']=='phi':
endstation.set_rotation_axis("ROTATION VERT ANTI")
if 'OMEGA' in imageobject.parameters and \
imageobject.parameters['OMEGA'] != 0.0 :
omega = imageobject.parameters['OMEGA']
from iotbx.detectors import rotate_vector_around
endstation.set_rotation_axis(
rotate_vector_around(endstation.rotation_axis(),
(0,1,0),-omega*math.pi/180.)
)
'''
#tested for CHESS F1 s/n 406:
SCANNER ROTATION HORIZ CLOCK FAST horizontal ORIGIN UR RECT TYPE ADSC
LIMITS RMIN 5 RMAX 137.2 XMAX 96.5 YMAX 97.5 XSCAN 94.0 YSCAN 94.0
BACKSTOP RADIUS 7.00 CENTRE 90.100 91.500
ADCOFFSET 20
NULLPIX 0
GAIN 0.300
#tested for CHESS F3 Pringle-Shen
SCANNER ROTATION VERT ANTI FAST horizontal ORIGIN UR RECT TYPE ADSC
LIMITS RMIN 5 RMAX 143.3 XMAX 106.8 YMAX 95.7 XSCAN 94.0 YSCAN 94.0
BACKSTOP RADIUS 4.00 CENTRE 106.600 94.200
GAIN 0.500
BIAS 5
'''
if imageobject.vendortype == "MacScience":
if imageobject.size1==3000:
endstation.mos['mosflm_detector'] = """DETECTOR DIP2030\n"""
endstation.set_rotation_axis("ROTATION HORIZ CLOCK")
if imageobject.vendortype == "MARCCD":
if imageobject.size1*imageobject.bin>=4096:
parameters = {'sz' : imageobject.size1*imageobject.bin,
'pix': imageobject.pixel_size / imageobject.bin}
parameters['scan']=parameters['pix']*parameters['sz']/2
endstation.mos['mosflm_detector'] = """#MARCCD detector
LIMITS XMIN 0 XMAX xmax_tag YMIN 0 YMAX ymax_tag xscan %(scan)d yscan %(scan)d
SIZE %(sz)d %(sz)d HEADER 1 byte 4096
PIXEL %(pix)f
NULLPIX 0
#4Kx4K MarCCD format is unknown to MOSFLM, which apparently defaults
#the nullpix to 10. This is a problem for weak-background images.
"""%parameters
'''the correct xmax_tag and ymax_tag are added later in the interface module'''
#identification of specific beamlines at Spring8 with Reversephi:
# BL41XU Mar MX225HE--Serial number 40
# BL32XU Mar MX225HE--Serial number 31
# rely on detector serial number, uncertain how to decode geometric description
# of rotation axis within the header.
if imageobject.parameters["DETECTOR_SN"] in [7]:
endstation.set_rotation_axis("ROTATION HORIZ CLOCK")
endstation.mos['mosflm_detector'] = """
# Specific implementation for APS SER-CAT BM22, chi=180 setting
DETECTOR MARCCD
DETECTOR REVERSEPHI
SIZE 4096 4096
PIXEL 0.07324 0.07324
"""
endstation.mos['mosflm_beamline'] = """GAIN 0.37
POLARISATION 0.99
DIVE 0.0001 0.00001
DISPER 0.0001
"""
if imageobject.parameters["DETECTOR_SN"] in [31,40]:
endstation.set_rotation_axis("ROTATION HORIZ CLOCK")
endstation.mos['mosflm_detector'] = """
# Specific implementation for Spring8 BL41XU Mar MX225HE
DETECTOR MARCCD
DETECTOR REVERSEPHI
SIZE 3072 3072
PIXEL 0.07324 0.07324
LIMITS RMIN 2 RMAX 159.1 XMAX 159.1 YMAX 112.5 XSCAN 159.1 YSCAN 159.1
"""
endstation.mos['mosflm_beamline'] = """GAIN 0.37
POLARISATION 0.99
DIVE 0.0001 0.00001
DISPER 0.0001
!offset 0.0 0.0
"""
if imageobject.vendortype == "ADSC" and \
endstation.rot_axi_string!="ROTATION HORIZ ANTI":
# Rough idea of the MOSFLM LIMITS
mmsize = imageobject.size1 * imageobject.pixel_size
maxx = max( abs(imageobject.beamx-mmsize), abs(imageobject.beamx) )
maxy = max( abs(imageobject.beamy-mmsize), abs(imageobject.beamy) )
parameters = {'rotation': endstation.rot_axi_string,
'rmax': math.sqrt(maxx*maxx + maxy*maxy),
'xmax': maxx,
'ymax': maxy,
'xscan': mmsize/2.,
'yscan': mmsize/2.}
if imageobject.serial_number in [457,928]: parameters['gain']=0.32
else: parameters['gain']=0.30
endstation.mos['mosflm_detector'] = """#detector
SCANNER %(rotation)s FAST horizontal ORIGIN UR RECT TYPE ADSC
LIMITS RMIN 5 RMAX %(rmax).1f XMAX %(xmax).1f YMAX %(ymax).1f XSCAN %(xscan).1f YSCAN %(yscan).1f
GAIN %(gain).3f
"""%parameters
if imageobject.serial_number in [457,928]:
endstation.mos['mosflm_beamline'] = beamlines['Australian']
else:
endstation.mos['mosflm_beamline'] = beamlines['CHESS']
if imageobject.vendortype == "ADSC" and imageobject.serial_number == 910:
if ADSC910_at_BioCARS(imageobject):
endstation.mos['mosflm_detector']=endstation.mos['mosflm_detector']+\
"#BIOCARS 14-BM-C S/N=910"
if imageobject.vendortype in [ "RAXIS" ]:
endstation.mos['mosflm_detector'] = """#detector
ADCOFFSet 5
"""
if imageobject.serial_number.lower().find('dr. r-axis iv')==0:
endstation.mos['mosflm_detector']=endstation.mos['mosflm_detector']+\
'DETECTOR RAXISIV'
#At least for MOSFLM 6.2.4, this seems to be important because it
# allows the program to accept large standard deviations for spots
# (above 32767). Otherwise MOSFLM can crash with SERIOUS ERROR message.
# Both DTrek and Raxis formats have "RAXIS" vendortype but only Raxis has "head" attribute
if "head" in imageobject.__dict__ and \
imageobject.head['Device'].lower().find('r-axis2')==0:
endstation.mos['mosflm_detector']=endstation.mos['mosflm_detector']+\
'detector raxis'
endstation.mos['mosflm_beamline'] = beamlines['RAXIS']
if imageobject.vendortype in [ "CBF" ]:
endstation.mos['mosflm_detector'] = """#detector cbf"""
if imageobject.vendortype in [ "Pilatus-6M" ]:
endstation.mos['mosflm_detector'] = """#detector Pilatus-6M"""
#flags mosflm interface to include the start & angle fix, mosflm 7.0.3 & below
if imageobject.vendortype in [ "MARIP" ]:
endstation.mos['mosflm_detector'] = """#detector
ADCOFFSet 5
"""#Just a guess: image plates require an offset to insure stability
# over long integration sweeps. Example: TM0064/11_20_01/1c3p3
#additional information for universally specifying image size
endstation.mos['mosflm_detector'] = endstation.mos['mosflm_detector'] + \
"\n#UIS_PIXEL %(pix)f\n#UIS_SIZE %(sz)d"%{
'sz' : imageobject.size1*imageobject.bin,
'pix': imageobject.pixel_size / imageobject.bin}
return endstation
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
9274bbf79907ef20466dc8880621cee52ca5aab4 | 0cf8c9fb5aa980cc4cd3549c0f2e6c834b79a8eb | /run.py | 82f02c8d9aa9d4f67d17214254c182c81a12c166 | [] | no_license | mehanig/stepic_video_stats | 0a80a37380d5e7995f05e00c2f01fa3fd231b83c | 3bd1a350632de21b04a93b54eea6a63e817057f9 | refs/heads/master | 2020-04-10T15:43:29.452334 | 2015-03-29T16:33:21 | 2015-03-29T16:33:21 | 32,393,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | #!stepicStat_Venv/bin/python
from app import app
app.run(debug=True)
| [
"mehanig@gmail.com"
] | mehanig@gmail.com |
6a11fda002aeaa9122f9038288d6b08cb58e12d2 | 561b2f8eace9e706cd441194e317faf816805475 | /build_isolated/turtlebot3_msgs/catkin_generated/stamps/turtlebot3_msgs/_setup_util.py.stamp | 949cecde3780e07a6cb8a2df14e3a6824cfe86c7 | [] | no_license | mis0butter/towr_dog | 99e0ae387e3c92cf1ba11bb922290ce5d6ed5b2a | 3add1e4279886a11901e965f558620d5a18a441e | refs/heads/master | 2023-04-03T13:16:31.359730 | 2021-04-13T13:47:43 | 2021-04-13T13:47:43 | 357,432,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,896 | stamp | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This file generates shell code for the setup.SHELL scripts to set environment variables."""
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
PATH_TO_ADD_SUFFIX = ['bin']
if IS_WINDOWS:
# while catkin recommends putting dll's into bin, 3rd party packages often put dll's into lib
# since Windows finds dll's via the PATH variable, prepend it with path to lib
PATH_TO_ADD_SUFFIX.extend([['lib', os.path.join('lib', 'x86_64-linux-gnu')]])
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': PATH_TO_ADD_SUFFIX,
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
"""
Generate shell code to reset environment variables.
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
"""
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
"""
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
"""
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
"""
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
"""
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
"""Generate shell code to prepend environment variables for the all workspaces."""
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted(key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH'):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
"""
Return the prefix to prepend to the environment variable NAME.
Adding any path in NEW_PATHS_STR without creating duplicate or empty items.
"""
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
"""Generate shell code with found environment hooks for the all workspaces."""
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = r'/home/junette/catkin_ws/devel_isolated/turtlebot3_gazebo;/home/junette/catkin_ws/devel_isolated/turtlebot3_description;/home/junette/catkin_ws/devel_isolated/turtlebot3;/home/junette/catkin_ws/devel_isolated/towr_ros;/home/junette/catkin_ws/devel_isolated/towr;/home/junette/catkin_ws/devel_isolated/spot_viz;/home/junette/catkin_ws/devel_isolated/spot_msgs;/home/junette/catkin_ws/devel_isolated/spot_driver;/home/junette/catkin_ws/devel_isolated/spot_description;/home/junette/catkin_ws/devel_isolated/r2d2;/home/junette/catkin_ws/devel_isolated/ifopt;/home/junette/catkin_ws/devel;/opt/ros/melodic'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
# CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
# base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| [
"junetter@gmail.com"
] | junetter@gmail.com |
326a44d068f321b42ae16056f6941771e9bcc1e0 | 066ee4df594a5dc90335d271b9d5a1b1e2a4d34c | /y/google-cloud-sdk/lib/googlecloudapis/manager/v1beta2/manager_v1beta2_client.py | 4195ac049ac88e88ad1debe3e551d61f61ec1551 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | ychen820/microblog | a2d82447525325ec58285c2e5db58b79cceaca1b | d379afa2db3582d5c3be652165f0e9e2e0c154c6 | refs/heads/master | 2021-01-20T05:58:48.424357 | 2015-04-28T22:03:09 | 2015-04-28T22:03:09 | 32,948,331 | 0 | 2 | BSD-3-Clause | 2020-07-25T05:04:35 | 2015-03-26T19:45:07 | Python | UTF-8 | Python | false | false | 10,754 | py | """Generated client library for manager version v1beta2."""
# NOTE: This file is autogenerated and should not be edited by hand.
from googlecloudapis.apitools.base.py import base_api
from googlecloudapis.manager.v1beta2 import manager_v1beta2_messages as messages
class ManagerV1beta2(base_api.BaseApiClient):
"""Generated client library for service manager version v1beta2."""
MESSAGES_MODULE = messages
_PACKAGE = u'manager'
_SCOPES = [u'https://www.googleapis.com/auth/appengine.admin', u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/compute', u'https://www.googleapis.com/auth/devstorage.read_write', u'https://www.googleapis.com/auth/ndev.cloudman', u'https://www.googleapis.com/auth/ndev.cloudman.readonly']
_VERSION = u'v1beta2'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = ''
_CLIENT_CLASS_NAME = u'ManagerV1beta2'
_URL_VERSION = u'v1beta2'
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new manager handle."""
url = url or u'https://www.googleapis.com/manager/v1beta2/'
super(ManagerV1beta2, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.deployments = self.DeploymentsService(self)
self.templates = self.TemplatesService(self)
class DeploymentsService(base_api.BaseApiService):
"""Service class for the deployments resource."""
_NAME = u'deployments'
def __init__(self, client):
super(ManagerV1beta2.DeploymentsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'manager.deployments.delete',
ordered_params=[u'projectId', u'region', u'deploymentName'],
path_params=[u'deploymentName', u'projectId', u'region'],
query_params=[],
relative_path=u'projects/{projectId}/regions/{region}/deployments/{deploymentName}',
request_field='',
request_type_name=u'ManagerDeploymentsDeleteRequest',
response_type_name=u'ManagerDeploymentsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'manager.deployments.get',
ordered_params=[u'projectId', u'region', u'deploymentName'],
path_params=[u'deploymentName', u'projectId', u'region'],
query_params=[],
relative_path=u'projects/{projectId}/regions/{region}/deployments/{deploymentName}',
request_field='',
request_type_name=u'ManagerDeploymentsGetRequest',
response_type_name=u'Deployment',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'manager.deployments.insert',
ordered_params=[u'projectId', u'region'],
path_params=[u'projectId', u'region'],
query_params=[],
relative_path=u'projects/{projectId}/regions/{region}/deployments',
request_field=u'deployment',
request_type_name=u'ManagerDeploymentsInsertRequest',
response_type_name=u'Deployment',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'manager.deployments.list',
ordered_params=[u'projectId', u'region'],
path_params=[u'projectId', u'region'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'projects/{projectId}/regions/{region}/deployments',
request_field='',
request_type_name=u'ManagerDeploymentsListRequest',
response_type_name=u'DeploymentsListResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Delete method for the deployments service.
Args:
request: (ManagerDeploymentsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ManagerDeploymentsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Get method for the deployments service.
Args:
request: (ManagerDeploymentsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Deployment) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Insert method for the deployments service.
Args:
request: (ManagerDeploymentsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Deployment) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""List method for the deployments service.
Args:
request: (ManagerDeploymentsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeploymentsListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class TemplatesService(base_api.BaseApiService):
"""Service class for the templates resource."""
_NAME = u'templates'
def __init__(self, client):
super(ManagerV1beta2.TemplatesService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'manager.templates.delete',
ordered_params=[u'projectId', u'templateName'],
path_params=[u'projectId', u'templateName'],
query_params=[],
relative_path=u'projects/{projectId}/templates/{templateName}',
request_field='',
request_type_name=u'ManagerTemplatesDeleteRequest',
response_type_name=u'ManagerTemplatesDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'manager.templates.get',
ordered_params=[u'projectId', u'templateName'],
path_params=[u'projectId', u'templateName'],
query_params=[],
relative_path=u'projects/{projectId}/templates/{templateName}',
request_field='',
request_type_name=u'ManagerTemplatesGetRequest',
response_type_name=u'Template',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'manager.templates.insert',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/templates',
request_field=u'template',
request_type_name=u'ManagerTemplatesInsertRequest',
response_type_name=u'Template',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'manager.templates.list',
ordered_params=[u'projectId'],
path_params=[u'projectId'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'projects/{projectId}/templates',
request_field='',
request_type_name=u'ManagerTemplatesListRequest',
response_type_name=u'TemplatesListResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Delete method for the templates service.
Args:
request: (ManagerTemplatesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ManagerTemplatesDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Get method for the templates service.
Args:
request: (ManagerTemplatesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Template) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Insert method for the templates service.
Args:
request: (ManagerTemplatesInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Template) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""List method for the templates service.
Args:
request: (ManagerTemplatesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TemplatesListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
| [
"ychen207@binghamton.edu"
] | ychen207@binghamton.edu |
9d95e5c86e9e640853644c48332baf0b1fa301cb | 5ac3d74d9f22de56d115602c6a51b461e8f7cd0c | /transactions.py | 2ef0998f137dac439e47f5c10c90990e4ca8399c | [] | no_license | mxito3/ethereum-pos | ff907b8755e5f68f843db03284c30a30baf54432 | 5bebf49510f93629a8e28350b831cb90c2a53ce3 | refs/heads/master | 2020-04-15T20:08:26.664331 | 2014-12-29T18:05:47 | 2014-12-29T18:05:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,054 | py | """This file explains how we tell if a transaction is valid or not, it explains
how we update the database when new transactions are added to the blockchain."""
#Whether you are a signer depends on:
#5000=long_time*2-medium_time
#500=medium_time/2
#K-5000: how much money you had at this point.
#K-5000, -4500: random numbers selected here
#K-2500, -1000: random numbers revealed in this range
#K: sign on this block and make deposit and give hash(secret)
#K+2500, +3500: get reward. slasher is no longer possible. reveals secret
import blockchain, custom, copy, tools, forth
E_check=tools.E_check
def sigs_match(Sigs, Pubs, msg):
pubs=copy.deepcopy(Pubs)
sigs=copy.deepcopy(Sigs)
def match(sig, pubs, msg):
for p in pubs:
if tools.verify(msg, sig, p):
return {'bool':True, 'pub':p}
return {'bool':False}
for sig in sigs:
a=match(sig, pubs, msg)
if not a['bool']:
return False
sigs.remove(sig)
pubs.remove(a['pub'])
return True
def signature_check(tx):#verify that a transaction has a valid ECDSA signature on it.
tx_copy = copy.deepcopy(tx)
tx_copy.pop('signatures')
if len(tx['pubkeys']) == 0:
tools.log('pubkey error')
return False
if len(tx['signatures']) > len(tx['pubkeys']):
tools.log('sigs too long')
return False
msg = tools.det_hash(tx_copy)
if not sigs_match(copy.deepcopy(tx['signatures']),
copy.deepcopy(tx['pubkeys']), msg):
tools.log('sigs do not match')
return False
return True
def mint_verify(tx, txs, out, DB):
length=tools.local_get('length')
height=tools.local_get('height')
custom.block_fee(int(tx['height'])-height)
gap=int(tx['height'])-height
for t in txs:
if t['type']=='mint':
out[0]+='no mint repeats'
if not tools.fee_check(tx, txs, DB):
out[0]+='fee check error'
return False
if tx['on_block']!=length+1:
out[0]+='on wrong block'
return False
if len(filter(lambda x: x['type']=='mint', txs))>0:
out[0]+='too many mints'
return False
amount=tools.mint_cost(txs, gap)
if tx['amount']!=amount:
tools.log('have: ' +str(tx['amount']))
tools.log('need: ' +str(amount))
tools.log('that amount is too big')
return False
return True
def spend_verify(tx, txs, out, DB):
txaddr=tools.addr(tx)
'''
h=tx['recent_hash']
l=tools.local_get('length')
r=range(l-10, l)
r=filter(lambda l: l>0, r)
recent_blocks=map(lambda x:tools.db_get(x), r)
recent_hashes=map(lambda x: x['block_hash'], recent_blocks)
if h not in recent_hashes:
tools.log('recent hash error')
return False
recent_txs=[]
def f(b, recent_txs=recent_txs):
recent_txs=recent_txs+b['txs']
map(f, recent_blocks)
recent_txs=filter(lambda t: t['type']=='spend', recent_txs)
recent_txs=filter(lambda t: t['recent_hash']==h, recent_txs)
recent_txs=filter(lambda t: t['to']==tx['to'], recent_txs)
recent_txs=filter(lambda t: t['amount']==tx['amount'], recent_txs)
recent_txs=filter(lambda t: t['fee']==tx['fee'], recent_txs)
recent_txs=filter(lambda t: tools.addr(t)==txaddr, recent_txs)
if len(recent_txs)>0:
out[0]+='no repeated spends'
return False
'''
if not signature_check(tx):
out[0]+='signature check'
return False
if len(tx['to'])<=30:
out[0]+='that address is too short'
out[0]+='tx: ' +str(tx)
return False
if not tools.fee_check(tx, txs, DB):
out[0]+='fee check error'
return False
return True
def sign_verify(tx, txs, out, DB):#check the validity of a transaction of type sign.
a=tools.addr(tx)
B=tx['B']#verify a proof that addr(tx) actually owned that much money long*2-medium ago.
M=custom.all_money
address=tools.addr(tx)
block=tools.db_get(tx['on_block'])
num=max(0,tx['on_block']-(custom.long_time*2-custom.medium_time))
election_block=tools.db_get(num)
if not signature_check(tx):
out[0]+='signature check'
return False
if 'root_hash' not in election_block:
out[0]+='no root hash'
return False
v=tools.db_verify(election_block['root_hash'], address, tx['proof'])
if v==False:
tools.log('your address did not exist that long ago.')
return False
if v['amount']!=tx['B']:
tools.log('that is not how much money you had that long ago')
return False
if 'secret_hash' not in tx:
tools.log('need the hash of a secret')
return False
for t in txs:
if tools.addr(t)==address and t['type']=='sign':
#tools.log('can only have one sign tx per block')
return False
if len(tx['jackpots'])<1:
tools.log('insufficient jackpots')
return False
if not signature_check(tx):
out[0]+='signature check'
return False
length=tools.local_get('length')
if int(tx['on_block'])!=int(length+1):
out[0]+='this tx is for the wrong block. have '+str(length+1) +' need: ' +str(tx['on_block'])
return False
if tx['on_block']>0:
if not tx['prev']==tools.db_get(length)['block_hash']:
tools.log('must give hash of previous block')
return False
ran=tools.det_random(tx['on_block'])
for j in tx['jackpots']:
if type(j)!=int or j not in range(200):
tools.log('bad jackpot')
return False
if len(filter(lambda x: x==j, tx['jackpots']))!=1:
tools.log('no repeated jackpots')
return False
if not tools.winner(B, M, ran, address, j):
tools.log('that jackpot is not valid: '+str(j))
return False
if tx['amount']<custom.minimum_deposit:
tools.log('you have to deposit more than that')
return False
return True
def slasher_verify(tx, txs, out, DB):
address=tools.addr(tx)
acc=tools.db_get(address)
if acc['secrets'][str(tx['on_block'])]['slashed']:
tools.log('Someone already slashed them, or they already took the reward.')
return False
if not sign_verify(tx['tx1'], [], [''], {}):
tools.log('one was not a valid tx')
return False
if not sign_verify(tx['tx2'], [], [''], {}):
tools.log('two was not a valid tx')
return False
tx1=copy.deepcopy(tx['tx1'])
tx2=copy.deepcopy(tx['tx2'])
tx1.pop('signatures')
tx2.pop('signatures')
tx1=unpackage(package(tx1))
tx2=unpackage(package(tx2))
msg1=tools.det_hash(tx1)
msg2=tools.det_hash(tx2)
if msg1==msg2:
tools.log('this is the same tx twice...')
return False
if tx1['on_block']!=tx2['on_block']:
tools.log('these are on different lengths')
return False
return True
def sign_transaction(length, address):
if length<=0:
return {'secret_hash':0}
txs=tools.db_get(length)['txs']
txs=filter(lambda t: t['type']=='sign', txs)
txs=filter(lambda t: tools.addr(t)==address, txs)
return(txs[0])
def reward_verify(tx, txs, out, DB):
address=tools.addr(tx)
acc=tools.db_get(address)
relative_reward=tools.relative_reward(tx['on_block'], address)
sign_tx=sign_transaction(tx['on_block'], address)
length=tools.local_get('length')
if len(sign_tx['jackpots'])!=tx['jackpots']:
tools.log('wrong number of jackpots')
return False
if length-custom.long_time+custom.medium_time/2<tx['on_block']or length-custom.long_time-custom.medium_time/2>tx['on_block']:
tools.log('you did not wait the correct amount of time')
return False
if acc['secrets'][str(tx['on_block'])]['slashed']:
tools.log('you were slashed, or you already claimed your reward at this height')
return False
if tx['amount']!=relative_reward+sign_tx['amount']:
tools.log('reward wrong size')
return False
if sign_tx['secret_hash']!=tools.det_hash(tx['reveal']):
tools.log('entropy+salt does not match')
return False
if tx['reveal']['entropy'] not in [0,1]:
tools.log('entropy must be either 0 or 1')
return False
return True
def make_contract_verify(tx, txs, out, DB):
if tools.db_existence(tx['id']):
tools.log('contract already exists')
return False
contract={'gas':int(tx['amount'])-custom.make_contract_fee, 'mem':tx['mem'], 'stack':[]}
if contract['gas']<0:
tools.log('insufficient gas')
return False
return True
def contract_do_verify(tx, txs, out, DB):
contract=tools.db_get(tx['contract_id'])
if 'mem' not in contract:
tools.log('not a contract')
return False
contract['gas']=tx['amount']-custom.contract_do_fee
new_contract=forth.forth(tx['code'], forth.ex_language, contract)
tools.log('new contract: ' +str(new_contract))
if type(new_contract)==list:
tools.log('contract failed: '+str(new_contract))
return False
if new_contract==['not enough gas']:
tools.log(new_contract[0])
return False
if contract['mem']!=tx['old_mem']:
tools.log('contrac: ' +str(contract))
tools.log('tx: ' +str(tx))
tools.log('old mem does not match')
return False
tools.log('new contract: ' +str(new_contract))
if new_contract['gas']<0:
tools.log('insufficient gas')
return False
return True
tx_check = {'mint':mint_verify,
'spend':spend_verify,
'sign':sign_verify,
'slasher':slasher_verify,
'reward':reward_verify,
'make_contract':make_contract_verify,
'contract_do':contract_do_verify}
'''
1) give signer's deposit
*reward is proportional to deposit size.
2) sign
3) double-sign slash
4) claim reward
*reveal one bit of entropy
*vote on system constants?
'''
#------------------------------------------------------
adjust_int=tools.adjust_int
adjust_string=tools.adjust_string
adjust_dict=tools.adjust_dict
adjust_list=tools.adjust_list
symmetric_put=tools.symmetric_put
def mint(tx, DB, add_block):
address = tools.addr(tx)
adjust_int(['amount'], address, tx['amount'], DB, add_block)
def spend(tx, DB, add_block):
address = tools.addr(tx)
adjust_int(['amount'], address, -int(tx['amount']), DB, add_block)
adjust_int(['amount'], tx['to'], tx['amount'], DB, add_block)
#adjust_int(['amount'], address, -custom.fee, DB, add_block)
adjust_int(['amount'], address, -int(tx['fee']), DB, add_block)
def sign(tx, DB, add_block):#should include hash(entroy_bit and salt)
address = tools.addr(tx)
adjust_int(['amount'], address, -int(tx['amount']), DB, add_block)
adjust_dict(['secrets'], address, False, {str(tx['on_block']):{'slashed':False}}, DB, add_block)
def slasher(tx, DB, add_block):
address = tools.addr(tx)
adjust_string(['secrets', tx['on_block'], 'slashed'], tools.addr(tx['tx1']), False, True, DB, add_block)
adjust_int(['amount'], address, tx['amount']/5, DB, add_block)
#tx={'amount':10000, 'tx1': , 'tx2': , 'reward_address': }
#record
def reward(tx, DB, add_block):
address = tools.addr(tx)
length=tools.db_get('length')
adjust_string(['secrets', tx['on_block'], 'slashed'], address, False, True, DB, add_block)
adjust_dict(['entropy'], address, False, {str(tx['on_block']):{'power':tx['jackpots'],'vote':tx['reveal']}}, DB, add_block)
adjust_int(['amount'], address, tx['amount'], DB, add_block)#relative_reward(on_block)+signer_bond
def make_contract(tx, DB, add_block):
address = tools.addr(tx)
adjust_int(['amount'], address, -int(tx['amount']), DB, add_block)
contract={'gas':int(tx['amount'])-custom.make_contract_fee, 'mem':tx['mem'], 'stack':[]}
symmetric_put(tx['id'], contract, DB, add_block)
#put the contract into the database.
def contract_do(tx, DB, add_block):
address = tools.addr(tx)
contract=tools.db_get(tx['contract_id'])
contract['gas']=tx['amount']-custom.contract_do_fee
new_contract=forth.forth(tx['code'], forth.ex_language, contract)
tools.log('new contract: ' +str(new_contract))
new_contract['stack']=[]
adjust_int(['amount'], address, -int(tx['amount']), DB, add_block)
adjust_string(['mem'], tx['contract_id'], contract, new_contract['mem'], DB, add_block)
#{'cost':50000, 'code':'dup * + get'}#cost can be negative.
update = {'mint':mint,
'spend':spend,
'sign':sign,
'slasher':slasher,
'reward':reward,
'make_contract':make_contract,
'contract_do':contract_do}
#contract looks like:
#{'mem':{'a':'this is a forth script', 'b':'so is this', 'balance':'54'}}
#forth: + - / * ** % put get runfunc stop n-dup n-swap n-roll hash int unicode
#contract_do example
#I can use everything from forth besides state manipulation, and I can use the functions defined in the contract.
| [
"zack.bitcoin@gmail.com"
] | zack.bitcoin@gmail.com |
b1a906eb52a85db92cdc55bd8f3e89853c2de98c | 7502ef2b7a6d4b4c9b789b0042e7b69024ffcc91 | /mysite/polls/views.py | 67e3609ad07cf9b36ed9cc99e730f67ae8811c2e | [] | no_license | CodingNomads/django-polls | ec0aa6d99971e03bac4a5fcddb08611c3c3dc1d0 | c82ab82d2bcffa7fb70d2cbfe5b9e4e059553a40 | refs/heads/master | 2021-12-13T16:07:01.203035 | 2021-08-11T15:35:48 | 2021-08-11T15:35:48 | 244,676,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils import timezone
from django.views import generic
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form including an error message.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| [
"breuss.martin@gmail.com"
] | breuss.martin@gmail.com |
9c9107d0837a28fefe69b8a4bd7eb009bb12d1d6 | 6609c26b4ed72c156104ce282c3cf88c6aac59f6 | /chapter17/examples/advance_features.py | c985a4f23c81b33511c5f99681a5ca31ba3a8b13 | [
"MIT"
] | permissive | yordanivh/intro_to_cs_w_python | 4ab9dbbc2963b285b22cacb6648d1300fded18ce | eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a | refs/heads/master | 2020-09-06T12:25:23.362118 | 2020-02-14T14:07:07 | 2020-02-14T14:07:07 | 220,423,698 | 0 | 0 | MIT | 2020-02-14T14:07:08 | 2019-11-08T08:41:25 | Python | UTF-8 | Python | false | false | 2,808 | py | #aggregation
>>> cur.execute('SELECT SUM (Population) FROM PopByRegion')
<sqlite3.Cursor object at 0x7f88a81907a0>
>>> cur.fetchone()
(8965762,)
#grouping
>>> cur.execute('''SELECT Region, SUM (Population) FROM PopByCountry GROUP BY Region''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('Eastern Asia', 1364389), ('North America', 661200)]
>>> cur.execute('''SELECT SUM (Population) FROM PopByCountry WHERE Region = "North America"''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[(661200,)]
>>> cur.execute('''SELECT SUM (Population) FROM PopByCountry WHERE Region = "Eastern Asia"''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[(1364389,)]
#self-joins
>>> cur.execute('''
... SELECT A.Country, B.Country
... FROM PopByCountry A INNER JOIN PopByCountry B
... WHERE (ABS(A.Population - B.Population) <=1000)
... AND (A.Country != B.Country)''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('Republic of Korea', 'Canada'), ('Bahamas', 'Greenland'), ('Canada', 'Republic of Korea'), ('Greenland', 'Bahamas')]
>>> cur.execute('''
... SELECT A.Country, B.Country
... FROM PopByCountry A INNER JOIN PopByCountry B
... WHERE (ABS(A.Population - B.Population) <=1000)
... AND (A.Country < B.Country)''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('Bahamas', 'Greenland'), ('Canada', 'Republic of Korea')]
#Nested Queries
>>> cur.execute('''SELECT DISTINCT Region
... FROM PopByCountry
... WHERE (PopByCountry.Population != 8764)''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('Eastern Asia',), ('North America',)]
>>> cur.execute('''SELECT DISTINCT Region
... FROM PopByCountry
... WHERE (PopByCountry.Population != 8764)''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('Eastern Asia',), ('North America',)]
>>> cur.execute('''
... SELECT DISTINCT Region
... FROM PopByCountry
... WHERE (PopByCountry.Population = 8764)
... ''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('Eastern Asia',)]
>>> cur.execute('''
... SELECT DISTINCT Region
... FROM PopByCountry
... WHERE Region NOT IN
... (SELECT DISTINCT Region
... FROM PopByCountry
... WHERE (PopByCountry.Population = 8764))
... ''')
<sqlite3.Cursor object at 0x7f88a8190810>
>>> cur.fetchall()
[('North America',)]
# Transactions
>>>cur.execute('SELECT SignedOut FROM Books WHERE ISBN = ?', isbn)
>>>signedOut = cur.fetchone()[0]
>>>cur.execute('''UPDATE Books SET SignedOut = ?
... WHERE ISBN = ?''', signedOut + 1, isbn)
>>>cur.commit()
>>>cur.execute('SELECT SignedOut FROM Books WHERE ISBN = ?', isbn)
>>>signedOut = cur.fetchone()[0]
>>>cur.execute('''UPDATE Books SET SignedOut = ?
... WHERE ISBN = ?''', signedOut - 1, isbn)
>>>cur.commit()
| [
"yordan@hashicorp.com"
] | yordan@hashicorp.com |
4cead7c664d102e7b8701b6679c11251c93f3262 | 5b9035dbfe0750e9933728f9631ad7a183dd3429 | /02/00/iterdecode.py | d501e5c1c4f91e322d041afbce2e925b84f80cc2 | [
"CC0-1.0"
] | permissive | pylangstudy/201709 | 271efbd4f337d912d0ca958a621eb2a040091528 | 53d868786d7327a83bfa7f4149549c6f9855a6c6 | refs/heads/master | 2021-01-21T12:16:21.950493 | 2017-09-30T00:02:34 | 2017-09-30T00:02:34 | 102,058,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | #!python3.6
#encoding:utf-8
import codecs
def byte_iter():
for i in range(5): yield f'日本語_{i}'.encode()
for text in codecs.iterdecode(byte_iter(), encoding='utf-8'):
print(text, text.encode())
| [
"pylangstudy@yahoo.co.jp"
] | pylangstudy@yahoo.co.jp |
0b2eb5a06ed674ebafc5ebd22ced1e6d9153f00e | 8fb1d41797595550418ecfc0e7558f38254b4606 | /django/contrib/gis/geos/point.py | 1b7d7f23ed51892d5d963e44d8a40d42147743a0 | [
"BSD-3-Clause",
"MIT"
] | permissive | hunch/hunch-gift-app | 2aad70a9f18124bf0de02d7a125fa93c765da008 | 8c7cad24cc0d9900deb4175e6b768c64a3d7adcf | refs/heads/master | 2016-09-06T03:13:52.153974 | 2012-03-26T18:11:59 | 2012-03-26T18:11:59 | 1,191,221 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,388 | py | from ctypes import c_uint
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos import prototypes as capi
class Point(GEOSGeometry):
_minlength = 2
_maxlength = 3
def __init__(self, x, y=None, z=None, srid=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
>>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, (int, float, long)) and isinstance(y, (int, float, long)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
if isinstance(z, (int, float, long)):
ndim = 3
coords = [x, y, z]
else:
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
point = self._create_point(ndim, coords)
# Initializing using the address returned from the GEOS
# createPoint factory.
super(Point, self).__init__(point, srid=srid)
def _create_point(self, ndim, coords):
"""
Create a coordinate sequence, set X, Y, [Z], and create point
"""
if ndim < 2 or ndim > 3:
raise TypeError('Invalid point dimension: %s' % str(ndim))
cs = capi.create_cs(c_uint(1), c_uint(ndim))
i = iter(coords)
capi.cs_setx(cs, 0, i.next())
capi.cs_sety(cs, 0, i.next())
if ndim == 3: capi.cs_setz(cs, 0, i.next())
return capi.create_point(cs)
def _set_list(self, length, items):
ptr = self._create_point(length, items)
if ptr:
capi.destroy_geom(self.ptr)
self._ptr = ptr
self._set_cs()
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._cs.setOrdinate(index, 0, value)
def __iter__(self):
"Allows iteration over coordinates of this Point."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of dimensions for this Point (either 0, 2 or 3)."
if self.empty: return 0
if self.hasz: return 3
else: return 2
def _get_single_external(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
elif index == 2:
return self.z
_get_single_internal = _get_single_external
def get_x(self):
"Returns the X component of the Point."
return self._cs.getOrdinate(0, 0)
def set_x(self, value):
"Sets the X component of the Point."
self._cs.setOrdinate(0, 0, value)
def get_y(self):
"Returns the Y component of the Point."
return self._cs.getOrdinate(1, 0)
def set_y(self, value):
"Sets the Y component of the Point."
self._cs.setOrdinate(1, 0, value)
def get_z(self):
"Returns the Z component of the Point."
if self.hasz:
return self._cs.getOrdinate(2, 0)
else:
return None
def set_z(self, value):
"Sets the Z component of the Point."
if self.hasz:
self._cs.setOrdinate(2, 0, value)
else:
raise GEOSException('Cannot set Z on 2D Point.')
# X, Y, Z properties
x = property(get_x, set_x)
y = property(get_y, set_y)
z = property(get_z, set_z)
### Tuple setting and retrieval routines. ###
def get_coords(self):
"Returns a tuple of the point."
return self._cs.tuple
def set_coords(self, tup):
"Sets the coordinates of the point with the given tuple."
self._cs[0] = tup
# The tuple and coords properties
tuple = property(get_coords, set_coords)
coords = tuple
| [
"gleitz@hunch.com"
] | gleitz@hunch.com |
b7a9a1cf49e61232166b8257b59f9ac4df425cd5 | 0689ad04900b45e6ffb85756e65e96f30781558b | /pbase/day19/shili/mylist.py | 2fc8cc1d6d64d39221a007ecd54b4a6c488d1ecc | [] | no_license | lizhihui16/aaa | a5452b5d0de4c2ad6342fce1b8aef278d2d2943e | e8c38e012f6aa0bc05ac6481d6c3e2b4e9013b56 | refs/heads/master | 2020-04-24T01:05:19.266060 | 2019-02-20T01:43:51 | 2019-02-20T01:43:51 | 171,586,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py |
class MyList:
'''创建一个自定义列表类,此MyList内部用列表来存储信息'''
def __init__(self,iterable=()):
self.data=[x for x in iterable]
def __repr__(self):
return 'MyList(%s)'%self.data
def __len__(self):
'''方法必须返回整数'''
# return len(self.data)
return self.data.__len__()
def __abs__(self):
'''此方法实现把sekf的所有元素取绝对值后返回全为正数的自定义列表'''
lst=[abs(x) for x in self.data]
L=MyList(lst) #创建新的列表
return L
myl=MyList([1,-2,3,-4])
print(myl)
print(len(myl))
myl3=abs(myl)
print(myl3)
# myl2=MyList([])
# print(myl2) | [
"tarena@tedu.cn"
] | tarena@tedu.cn |
a286e36d05932f1d3e1eaefcc2ab40f45bb14270 | a6355ef8ddb4d31fb4ff45ae755f34482d8c0ff9 | /supervised/tuner/registry.py | 52b5099b6f1150df1f306046e536db9fdea3bc00 | [
"MIT"
] | permissive | michaelneale/mljar-supervised | d4d1b44f4cd5dcbdb36768c5186f2480a53ec3f7 | 8d1b5fdd56e994a7f13ec5f6d2033830744f3d6f | refs/heads/master | 2022-02-06T14:11:21.377791 | 2019-11-05T08:15:02 | 2019-11-05T08:15:02 | 220,161,447 | 0 | 0 | MIT | 2022-01-26T00:32:56 | 2019-11-07T05:51:34 | null | UTF-8 | Python | false | false | 754 | py | # tasks that can be handled by the package
BINARY_CLASSIFICATION = "binary_classification"
MULTICLASS_CLASSIFICATION = "multiclass_classification"
REGRESSION = "regression"
class ModelsRegistry:
registry = {
BINARY_CLASSIFICATION: {},
MULTICLASS_CLASSIFICATION: {},
REGRESSION: {},
}
@staticmethod
def add(task_name, model_class, model_params, required_preprocessing, additional):
model_information = {
"class": model_class,
"params": model_params,
"required_preprocessing": required_preprocessing,
"additional": additional,
}
ModelsRegistry.registry[task_name][
model_class.algorithm_short_name
] = model_information
| [
"pplonski86@gmail.com"
] | pplonski86@gmail.com |
fb46f01286494fc069a6e58459fe6f0a3d8c16ac | ac2529e9ce94dd99125b8920b4b70acddd6ac173 | /common/validators.py | 7a60858fe6302a4ac2264ffe0b6c6fe8566af9e2 | [] | no_license | maoxiaowang/test | d00f867551993d27c797a785fdc23e2a86e61165 | ae4b3dc831b5a35d9f620781389d349f28c5ee1b | refs/heads/master | 2020-03-10T08:12:09.708699 | 2018-07-08T08:42:56 | 2018-07-08T08:42:56 | 129,279,958 | 1 | 3 | null | 2018-06-16T12:16:52 | 2018-04-12T16:22:37 | CSS | UTF-8 | Python | false | false | 117 | py | # coding=utf-8
from django.core.exceptions import ValidationError
from django.utils.translation import gettext as _
| [
"1225191678@qq.com"
] | 1225191678@qq.com |
f0ec882c3142a7b3f7479f8dea631fea8827f88a | 20741a0e27b88eb4396516788c1dd7acab7f1527 | /project/apps/config/models.py | 832fa099b90b4e90e478dff781c5f2ac9bc25537 | [] | no_license | yueyoum/lockscreen-image | 2b371f5e133f0e3ace7b3b4597a2633575027415 | 47351c5aafbd97e3c6862798a9666114c83f4fb9 | refs/heads/master | 2021-01-23T10:04:35.901591 | 2015-06-30T08:00:51 | 2015-06-30T08:00:51 | 29,383,019 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # -*- coding: utf-8 -*-
from django.db import models
class Config(models.Model):
name = models.TextField(primary_key=True)
value = models.TextField()
class Meta:
db_table = 'project_config'
verbose_name = '设置'
verbose_name_plural = '设置'
@classmethod
def get_value(cls, key):
try:
return cls.objects.get(name=key).value
except cls.DoesNotExist:
return ''
| [
"yueyoum@gmail.com"
] | yueyoum@gmail.com |
1d9d95820ff83069e478a3b37183e8ded7518493 | 56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a | /applications/CoSimulationApplication/python_scripts/colors.py | 2269c3f01716035217f3aa062b303ce8ae2b8230 | [
"BSD-3-Clause"
] | permissive | KratosMultiphysics/Kratos | 82b902a2266625b25f17239b42da958611a4b9c5 | 366949ec4e3651702edc6ac3061d2988f10dd271 | refs/heads/master | 2023-08-30T20:31:37.818693 | 2023-08-30T18:01:01 | 2023-08-30T18:01:01 | 81,815,495 | 994 | 285 | NOASSERTION | 2023-09-14T13:22:43 | 2017-02-13T10:58:24 | C++ | UTF-8 | Python | false | false | 2,273 | py | PRINT_COLORS = False # Global var to specify if colors should be printed
def color_string(string2color, color_code):
if PRINT_COLORS:
return "\x1b["+color_code+"m" + str(string2color) + "\x1b[0m"
else:
return string2color
def bold(string2color):
return color_string(string2color, "1;1")
def italic(string2color):
return color_string(string2color, "1;3")
def darkify(string2color):
return bold(color_string(string2color, "1;2")) # bold is needed bcs it is removed otherwise
def underline(string2color):
return color_string(string2color, "1;4")
def blue(string2color):
return color_string(string2color, "1;34")
def darkblue(string2color):
return (darkify(blue(string2color)))
def red(string2color):
return color_string(string2color, "1;31")
def darkred(string2color):
return (darkify(red(string2color)))
def green(string2color):
return color_string(string2color, "1;32")
def darkgreen(string2color):
return (darkify(green(string2color)))
def yellow(string2color):
return color_string(string2color, "1;33")
def darkyellow(string2color):
return (darkify(yellow(string2color)))
def cyan(string2color):
return color_string(string2color, "1;36")
def darkcyan(string2color):
return (darkify(cyan(string2color)))
def magenta(string2color):
return color_string(string2color, "1;35")
def darkmagenta(string2color):
return (darkify(magenta(string2color)))
if __name__ == "__main__":
print("printing all color options:\n")
str2print = "MyCustomString"
PRINT_COLORS = True
print("print:", str2print)
print("bold:", bold(str2print))
print("italic:", italic(str2print))
print("darkify:", darkify(str2print))
print("underline:", underline(str2print))
print("blue:", blue(str2print))
print("darkblue:", darkblue(str2print))
print("red:", red(str2print))
print("darkred:", darkred(str2print))
print("green:", green(str2print))
print("darkgreen:", darkgreen(str2print))
print("yellow:", yellow(str2print))
print("darkyellow:", darkyellow(str2print))
print("cyan:", cyan(str2print))
print("darkcyan:", darkcyan(str2print))
print("magenta:", magenta(str2print))
print("darkmagenta:", darkmagenta(str2print))
| [
"philipp.bucher@tum.de"
] | philipp.bucher@tum.de |
ba01e58765e20324575a48ee31485025bc1c583a | 8993c3a75139d6a26b22701ab55b17b6f72c4a38 | /hyperion/model/image.py | f99f0aa95cced40feece602185d371c899216c05 | [
"BSD-2-Clause"
] | permissive | saethlin/hyperion | b5e196f88e1cd8bdd6df22c775520c60b8a56561 | 9ddd860267a4bf12a3b7389192bed692a78ca39a | refs/heads/master | 2020-06-01T13:56:33.162686 | 2019-06-07T20:36:14 | 2019-06-07T20:36:14 | 190,803,837 | 0 | 0 | null | 2019-06-07T20:20:21 | 2019-06-07T20:20:21 | null | UTF-8 | Python | false | false | 9,608 | py | import numpy as np
from astropy.extern import six
from ..util.functions import FreezableClass, is_numpy_array
from ..util.constants import c
class Image(FreezableClass):
"""
Class to represent an image or set of images
Parameters
----------
nu : ndarray
The frequencies at which the image is defined, in Hz
val : ndarray, optional
The values for the image. The last dimensions should match the number
of frequencies.
unc : ndarray, optional
The uncertainties for the image values. The last dimensions should
match the number of frequencies.
units : str
The units of the values
"""
def __init__(self, nu, val=None, unc=None, units=None):
self.nu = nu
self.val = val
self.unc = unc
self.units = units
self.x_min = None
self.x_max = None
self.y_min = None
self.y_max = None
self.lon_min = None
self.lon_max = None
self.lat_min = None
self.lat_max = None
self.d_min = None
self.d_max = None
self.distance = None
self.pix_area_sr = None
self.inside_observer = False
self._freeze()
@property
def nu(self):
"""
The frequencies for which the image is defined (in Hz).
"""
return self._nu
@nu.setter
def nu(self, value):
if type(value) in [list, tuple]:
value = np.array(value)
if value is None:
self._nu = value
elif isinstance(value, np.ndarray) and value.ndim == 1:
self._nu = value
else:
raise TypeError("nu should be a 1-d sequence")
@property
def val(self):
"""
The image values (fluxes, flux densities, surface brightness, or polarization) in the units given by the ``.unit`` property.
"""
return self._val
@val.setter
def val(self, value):
if type(value) in [list, tuple]:
value = np.array(value)
if value is None:
self._val = value
elif isinstance(value, np.ndarray) and value.ndim >= 1:
if self.nu is not None and len(self.nu) != value.shape[-1]:
raise ValueError("the last dimension of the value array should match the length of the nu array (expected {0} but found {1})".format(len(self.nu), value.shape[-1]))
else:
if hasattr(self, 'unc') and self.unc is not None:
if value.shape != self.unc.shape:
raise ValueError("dimensions should match that of unc")
self._val = value
else:
raise TypeError("val should be a multi-dimensional array")
@property
def unc(self):
"""
The uncertainties on the image values in the units given by the ``.unit`` property.
"""
return self._unc
@unc.setter
def unc(self, value):
if type(value) in [list, tuple]:
value = np.array(value)
if value is None:
self._unc = value
elif isinstance(value, np.ndarray) and value.ndim >= 1:
if self.nu is not None and len(self.nu) != value.shape[-1]:
raise ValueError("the last dimension of the unc array should match the length of the nu array (expected {0} but found {1})".format(len(self.nu), value.shape[-1]))
else:
if hasattr(self, 'val') and self.val is not None:
if value.shape != self.val.shape:
raise ValueError("dimensions should match that of val")
self._unc = value
else:
raise TypeError("unc should be a multi-dimensional array")
@property
def unit(self):
"""
The units of the image values.
"""
return self._unit
@unit.setter
def unit(self, value):
if value is None or isinstance(value, six.string_types):
self._unit = value
else:
raise ValueError("unit should be a string")
@property
def wav(self):
"""
The wavelengths for which the image is defined (in microns).
"""
if self.nu is None:
return None
else:
return c / self.nu * 1e4
def __iter__(self):
if self.unc is None:
return (x for x in [self.wav, self.val])
else:
return (x for x in [self.wav, self.val, self.unc])
@property
def x_min(self):
"""
Lower extent of the image in the x direction (in cm).
"""
return self._x_min
@x_min.setter
def x_min(self, value):
if value is None or (np.isscalar(value) and np.isreal(value)):
self._x_min = value
else:
raise ValueError("x_min should be a real scalar value")
@property
def x_max(self):
"""
Upper extent of the image in the x direction (in cm).
"""
return self._x_max
@x_max.setter
def x_max(self, value):
if value is None or (np.isscalar(value) and np.isreal(value)):
self._x_max = value
else:
raise ValueError("x_max should be a real scalar value")
@property
def y_min(self):
"""
Lower extent of the image in the y direction (in cm).
"""
return self._y_min
@y_min.setter
def y_min(self, value):
if value is None or (np.isscalar(value) and np.isreal(value)):
self._y_min = value
else:
raise ValueError("y_min should be a real scalar value")
@property
def y_max(self):
"""
Upper extent of the image in the y direction (in cm).
"""
return self._y_max
@y_max.setter
def y_max(self, value):
if value is None or (np.isscalar(value) and np.isreal(value)):
self._y_max = value
else:
raise ValueError("y_max should be a real scalar value")
@property
def lon_min(self):
"""
Lower extent of the image in the x direction (in degrees).
"""
return self._lon_min
@lon_min.setter
def lon_min(self, value):
if value is None or (np.isscalar(value) and np.isreal(value)):
self._lon_min = value
else:
raise ValueError("lon_min should be a real scalar value")
@property
def lon_max(self):
"""
Upper extent of the image in the x direction (in degrees).
"""
return self._lon_max
@lon_max.setter
def lon_max(self, value):
if value is None or (np.isscalar(value) and np.isreal(value)):
self._lon_max = value
else:
raise ValueError("lon_max should be a real scalar value")
@property
def lat_min(self):
"""
Lower extent of the image in the y direction (in degrees).
"""
return self._lat_min
@lat_min.setter
def lat_min(self, value):
if value is None or (np.isscalar(value) and np.isreal(value)):
self._lat_min = value
else:
raise ValueError("lat_min should be a real scalar value")
@property
def lat_max(self):
"""
Upper extent of the image in the y direction (in degrees).
"""
return self._lat_max
@lat_max.setter
def lat_max(self, value):
if value is None or (np.isscalar(value) and np.isreal(value)):
self._lat_max = value
else:
raise ValueError("lat_max should be a real scalar value")
@property
def d_min(self):
"""
Minimum depth used to define the SEDs (in cm).
"""
return self._d_min
@d_min.setter
def d_min(self, value):
if value is None or (np.isscalar(value) and np.isreal(value)):
self._d_min = value
else:
raise ValueError("d_min should be a real scalar value")
@property
def d_max(self):
"""
Maximum depth used to define the SEDs (in cm).
"""
return self._d_max
@d_max.setter
def d_max(self, value):
if value is None or (np.isscalar(value) and np.isreal(value)):
self._d_max = value
else:
raise ValueError("d_max should be a real scalar value")
@property
def distance(self):
"""
Distance assumed for the image (in cm).
"""
return self._distance
@distance.setter
def distance(self, value):
if value is None or (np.isscalar(value) and np.isreal(value)):
self._distance = value
else:
raise ValueError("distance should be a real scalar value")
@property
def pix_area_sr(self):
"""
Pixel area (in steradians).
"""
return self._pix_area_sr
@pix_area_sr.setter
def pix_area_sr(self, value):
if value is None or (np.isscalar(value) and np.isreal(value)) or (is_numpy_array(value) and value.ndim == 2):
self._pix_area_sr = value
else:
raise ValueError("pix_area_sr should be a real scalar value or a 2-d array")
@property
def inside_observer(self):
"""
Whether the image was from an inside observer.
"""
return self._inside_observer
@inside_observer.setter
def inside_observer(self, value):
if value is None or type(value) is bool:
self._inside_observer = value
else:
raise ValueError("inside_observer should be a boolean")
| [
"thomas.robitaille@gmail.com"
] | thomas.robitaille@gmail.com |
71b166eca1b5d4658c1a7725081ee16a782a7cbd | 98032c5363d0904ba44e1b5c1b7aa0d31ed1d3f2 | /Chapter07/ch7/json_examples/json_cplx.py | 62a571ee8e31bf133df4dcb1f2adf45092c8410c | [
"MIT"
] | permissive | PacktPublishing/Learn-Python-Programming-Second-Edition | 7948b309f6e8b146a5eb5e8690b7865cb76136d5 | 54fee44ff1c696df0c7da1e3e84a6c2271a78904 | refs/heads/master | 2023-05-12T08:56:52.868686 | 2023-01-30T09:59:05 | 2023-01-30T09:59:05 | 138,018,499 | 65 | 44 | MIT | 2023-02-15T20:04:34 | 2018-06-20T10:41:13 | Jupyter Notebook | UTF-8 | Python | false | false | 679 | py | import json
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, complex):
return {
'_meta': '_complex',
'num': [obj.real, obj.imag],
}
return json.JSONEncoder.default(self, obj)
data = {
'an_int': 42,
'a_float': 3.14159265,
'a_complex': 3 + 4j,
}
json_data = json.dumps(data, cls=ComplexEncoder)
print(json_data)
def object_hook(obj):
try:
if obj['_meta'] == '_complex':
return complex(*obj['num'])
except (KeyError, TypeError):
return obj
data_out = json.loads(json_data, object_hook=object_hook)
print(data_out)
| [
"33118647+romydias@users.noreply.github.com"
] | 33118647+romydias@users.noreply.github.com |
4cf0bf7a0846e19918f7d5f57a6a82b1bebac764 | 0e47ec038441ed2adf3ac1dc123220ee4515c5a5 | /test/orm/test_events.py | 6957a903093bb2062dc6234408804c36a1b5019f | [
"MIT"
] | permissive | alfrcr/sqlalchemy | 9decac53cfc68d8467e089c9caddc56f0c7eb997 | 75969a81d64df631c81badac6fee5fc9a8e71af5 | refs/heads/master | 2020-12-19T20:07:41.003330 | 2020-01-23T02:41:59 | 2020-01-23T02:41:59 | 235,838,349 | 1 | 0 | MIT | 2020-01-23T16:38:33 | 2020-01-23T16:38:32 | null | UTF-8 | Python | false | false | 72,137 | py | import sqlalchemy as sa
from sqlalchemy import event
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import attributes
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import create_session
from sqlalchemy.orm import events
from sqlalchemy.orm import EXT_SKIP
from sqlalchemy.orm import instrumentation
from sqlalchemy.orm import Mapper
from sqlalchemy.orm import mapper
from sqlalchemy.orm import query
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.mapper import _mapper_registry
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_not_
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.mock import ANY
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from test.orm import _fixtures
class _RemoveListeners(object):
def teardown(self):
events.MapperEvents._clear()
events.InstanceEvents._clear()
events.SessionEvents._clear()
events.InstrumentationEvents._clear()
events.QueryEvents._clear()
super(_RemoveListeners, self).teardown()
class MapperEventsTest(_RemoveListeners, _fixtures.FixtureTest):
run_inserts = None
@classmethod
def define_tables(cls, metadata):
super(MapperEventsTest, cls).define_tables(metadata)
metadata.tables["users"].append_column(
Column("extra", Integer, default=5, onupdate=10)
)
def test_instance_event_listen(self):
"""test listen targets for instance events"""
users, addresses = self.tables.users, self.tables.addresses
canary = []
class A(object):
pass
class B(A):
pass
mapper(A, users)
mapper(
B, addresses, inherits=A, properties={"address_id": addresses.c.id}
)
def init_a(target, args, kwargs):
canary.append(("init_a", target))
def init_b(target, args, kwargs):
canary.append(("init_b", target))
def init_c(target, args, kwargs):
canary.append(("init_c", target))
def init_d(target, args, kwargs):
canary.append(("init_d", target))
def init_e(target, args, kwargs):
canary.append(("init_e", target))
event.listen(mapper, "init", init_a)
event.listen(Mapper, "init", init_b)
event.listen(class_mapper(A), "init", init_c)
event.listen(A, "init", init_d)
event.listen(A, "init", init_e, propagate=True)
a = A()
eq_(
canary,
[
("init_a", a),
("init_b", a),
("init_c", a),
("init_d", a),
("init_e", a),
],
)
# test propagate flag
canary[:] = []
b = B()
eq_(canary, [("init_a", b), ("init_b", b), ("init_e", b)])
def listen_all(self, mapper, **kw):
canary = []
def evt(meth):
def go(*args, **kwargs):
canary.append(meth)
return go
for meth in [
"init",
"init_failure",
"load",
"refresh",
"refresh_flush",
"expire",
"before_insert",
"after_insert",
"before_update",
"after_update",
"before_delete",
"after_delete",
]:
event.listen(mapper, meth, evt(meth), **kw)
return canary
def test_init_allow_kw_modify(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
@event.listens_for(User, "init")
def add_name(obj, args, kwargs):
kwargs["name"] = "ed"
u1 = User()
eq_(u1.name, "ed")
def test_init_failure_hook(self):
users = self.tables.users
class Thing(object):
def __init__(self, **kw):
if kw.get("fail"):
raise Exception("failure")
mapper(Thing, users)
canary = Mock()
event.listen(Thing, "init_failure", canary)
Thing()
eq_(canary.mock_calls, [])
assert_raises_message(Exception, "failure", Thing, fail=True)
eq_(canary.mock_calls, [call(ANY, (), {"fail": True})])
def test_listen_doesnt_force_compile(self):
User, users = self.classes.User, self.tables.users
m = mapper(
User,
users,
properties={
# intentionally non-existent class to ensure
# the lambda is not called, simulates a class from
# a not-yet-imported module
"addresses": relationship(lambda: ImNotAClass) # noqa
},
)
event.listen(User, "before_insert", lambda *a, **kw: None)
assert not m.configured
def test_basic(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
canary = self.listen_all(User)
named_canary = self.listen_all(User, named=True)
sess = create_session()
u = User(name="u1")
sess.add(u)
sess.flush()
sess.expire(u)
u = sess.query(User).get(u.id)
sess.expunge_all()
u = sess.query(User).get(u.id)
u.name = "u1 changed"
sess.flush()
sess.delete(u)
sess.flush()
expected = [
"init",
"before_insert",
"refresh_flush",
"after_insert",
"expire",
"refresh",
"load",
"before_update",
"refresh_flush",
"after_update",
"before_delete",
"after_delete",
]
eq_(canary, expected)
eq_(named_canary, expected)
def test_insert_before_configured(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
canary = Mock()
event.listen(mapper, "before_configured", canary.listen1)
event.listen(mapper, "before_configured", canary.listen2, insert=True)
event.listen(mapper, "before_configured", canary.listen3)
event.listen(mapper, "before_configured", canary.listen4, insert=True)
configure_mappers()
eq_(
canary.mock_calls,
[call.listen4(), call.listen2(), call.listen1(), call.listen3()],
)
def test_insert_flags(self):
users, User = self.tables.users, self.classes.User
m = mapper(User, users)
canary = Mock()
arg = Mock()
event.listen(m, "before_insert", canary.listen1)
event.listen(m, "before_insert", canary.listen2, insert=True)
event.listen(
m, "before_insert", canary.listen3, propagate=True, insert=True
)
event.listen(m, "load", canary.listen4)
event.listen(m, "load", canary.listen5, insert=True)
event.listen(m, "load", canary.listen6, propagate=True, insert=True)
User()
m.dispatch.before_insert(arg, arg, arg)
m.class_manager.dispatch.load(arg, arg)
eq_(
canary.mock_calls,
[
call.listen3(arg, arg, arg.obj()),
call.listen2(arg, arg, arg.obj()),
call.listen1(arg, arg, arg.obj()),
call.listen6(arg.obj(), arg),
call.listen5(arg.obj(), arg),
call.listen4(arg.obj(), arg),
],
)
def test_merge(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
canary = []
def load(obj, ctx):
canary.append("load")
event.listen(mapper, "load", load)
s = Session()
u = User(name="u1")
s.add(u)
s.commit()
s = Session()
u2 = s.merge(u)
s = Session()
u2 = s.merge(User(name="u2")) # noqa
s.commit()
s.query(User).order_by(User.id).first()
eq_(canary, ["load", "load", "load"])
def test_inheritance(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
class AdminUser(User):
pass
mapper(User, users)
mapper(
AdminUser,
addresses,
inherits=User,
properties={"address_id": addresses.c.id},
)
canary1 = self.listen_all(User, propagate=True)
canary2 = self.listen_all(User)
canary3 = self.listen_all(AdminUser)
sess = create_session()
am = AdminUser(name="au1", email_address="au1@e1")
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = "au1 changed"
sess.flush()
sess.delete(am)
sess.flush()
eq_(
canary1,
[
"init",
"before_insert",
"refresh_flush",
"after_insert",
"refresh",
"load",
"before_update",
"refresh_flush",
"after_update",
"before_delete",
"after_delete",
],
)
eq_(canary2, [])
eq_(
canary3,
[
"init",
"before_insert",
"refresh_flush",
"after_insert",
"refresh",
"load",
"before_update",
"refresh_flush",
"after_update",
"before_delete",
"after_delete",
],
)
def test_inheritance_subclass_deferred(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
canary1 = self.listen_all(User, propagate=True)
canary2 = self.listen_all(User)
class AdminUser(User):
pass
mapper(
AdminUser,
addresses,
inherits=User,
properties={"address_id": addresses.c.id},
)
canary3 = self.listen_all(AdminUser)
sess = create_session()
am = AdminUser(name="au1", email_address="au1@e1")
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = "au1 changed"
sess.flush()
sess.delete(am)
sess.flush()
eq_(
canary1,
[
"init",
"before_insert",
"refresh_flush",
"after_insert",
"refresh",
"load",
"before_update",
"refresh_flush",
"after_update",
"before_delete",
"after_delete",
],
)
eq_(canary2, [])
eq_(
canary3,
[
"init",
"before_insert",
"refresh_flush",
"after_insert",
"refresh",
"load",
"before_update",
"refresh_flush",
"after_update",
"before_delete",
"after_delete",
],
)
def test_before_after_only_collection(self):
"""before_update is called on parent for collection modifications,
after_update is called even if no columns were updated.
"""
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
mapper(
Item,
items,
properties={
"keywords": relationship(Keyword, secondary=item_keywords)
},
)
mapper(Keyword, keywords)
canary1 = self.listen_all(Item)
canary2 = self.listen_all(Keyword)
sess = create_session()
i1 = Item(description="i1")
k1 = Keyword(name="k1")
sess.add(i1)
sess.add(k1)
sess.flush()
eq_(canary1, ["init", "before_insert", "after_insert"])
eq_(canary2, ["init", "before_insert", "after_insert"])
canary1[:] = []
canary2[:] = []
i1.keywords.append(k1)
sess.flush()
eq_(canary1, ["before_update", "after_update"])
eq_(canary2, [])
def test_before_after_configured_warn_on_non_mapper(self):
User, users = self.classes.User, self.tables.users
m1 = Mock()
mapper(User, users)
assert_raises_message(
sa.exc.SAWarning,
r"before_configured' and 'after_configured' ORM events only "
r"invoke with the mapper\(\) function or Mapper class as "
r"the target.",
event.listen,
User,
"before_configured",
m1,
)
assert_raises_message(
sa.exc.SAWarning,
r"before_configured' and 'after_configured' ORM events only "
r"invoke with the mapper\(\) function or Mapper class as "
r"the target.",
event.listen,
User,
"after_configured",
m1,
)
def test_before_after_configured(self):
User, users = self.classes.User, self.tables.users
m1 = Mock()
m2 = Mock()
mapper(User, users)
event.listen(mapper, "before_configured", m1)
event.listen(mapper, "after_configured", m2)
s = Session()
s.query(User)
eq_(m1.mock_calls, [call()])
eq_(m2.mock_calls, [call()])
def test_instrument_event(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
canary = []
def instrument_class(mapper, cls):
canary.append(cls)
event.listen(Mapper, "instrument_class", instrument_class)
mapper(User, users)
eq_(canary, [User])
mapper(Address, addresses)
eq_(canary, [User, Address])
def test_instrument_class_precedes_class_instrumentation(self):
users = self.tables.users
class MyClass(object):
pass
canary = Mock()
def my_init(self):
canary.init()
# mapper level event
@event.listens_for(mapper, "instrument_class")
def instrument_class(mp, class_):
canary.instrument_class(class_)
class_.__init__ = my_init
# instrumentationmanager event
@event.listens_for(object, "class_instrument")
def class_instrument(class_):
canary.class_instrument(class_)
mapper(MyClass, users)
m1 = MyClass()
assert attributes.instance_state(m1)
eq_(
[
call.instrument_class(MyClass),
call.class_instrument(MyClass),
call.init(),
],
canary.mock_calls,
)
def test_before_mapper_configured_event(self):
"""Test [ticket:4397].
This event is intended to allow a specific mapper to be skipped during
the configure step, by returning a value of
:attr:`.orm.interfaces.EXT_SKIP` which means the mapper will be skipped
within this configure run. The "new mappers" flag will remain set in
this case and the configure operation will occur again.
This event, and its return value, make it possible to query one base
while a different one still needs configuration, which cannot be
completed at this time.
"""
User, users = self.classes.User, self.tables.users
mapper(User, users)
AnotherBase = declarative_base()
class Animal(AnotherBase):
__tablename__ = "animal"
species = Column(String(30), primary_key=True)
__mapper_args__ = dict(
polymorphic_on="species", polymorphic_identity="Animal"
)
# Register the first classes and create their Mappers:
configure_mappers()
unconfigured = [m for m in _mapper_registry if not m.configured]
eq_(0, len(unconfigured))
# Declare a subclass, table and mapper, which refers to one that has
# not been loaded yet (Employer), and therefore cannot be configured:
class Mammal(Animal):
nonexistent = relationship("Nonexistent")
# These new classes should not be configured at this point:
unconfigured = [m for m in _mapper_registry if not m.configured]
eq_(1, len(unconfigured))
# Now try to query User, which is internally consistent. This query
# fails by default because Mammal needs to be configured, and cannot
# be:
def probe():
s = Session()
s.query(User)
assert_raises(sa.exc.InvalidRequestError, probe)
# If we disable configuring mappers while querying, then it succeeds:
@event.listens_for(
AnotherBase,
"before_mapper_configured",
propagate=True,
retval=True,
)
def disable_configure_mappers(mapper, cls):
return EXT_SKIP
probe()
class DeclarativeEventListenTest(
_RemoveListeners, fixtures.DeclarativeMappedTest
):
run_setup_classes = "each"
run_deletes = None
def test_inheritance_propagate_after_config(self):
# test [ticket:2949]
class A(self.DeclarativeBasic):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
class B(A):
pass
listen = Mock()
event.listen(self.DeclarativeBasic, "load", listen, propagate=True)
class C(B):
pass
m1 = A.__mapper__.class_manager
m2 = B.__mapper__.class_manager
m3 = C.__mapper__.class_manager
a1 = A()
b1 = B()
c1 = C()
m3.dispatch.load(c1._sa_instance_state, "c")
m2.dispatch.load(b1._sa_instance_state, "b")
m1.dispatch.load(a1._sa_instance_state, "a")
eq_(listen.mock_calls, [call(c1, "c"), call(b1, "b"), call(a1, "a")])
class DeferredMapperEventsTest(_RemoveListeners, _fixtures.FixtureTest):
""""test event listeners against unmapped classes.
This incurs special logic. Note if we ever do the "remove" case,
it has to get all of these, too.
"""
run_inserts = None
def test_deferred_map_event(self):
"""
1. mapper event listen on class
2. map class
3. event fire should receive event
"""
users, User = (self.tables.users, self.classes.User)
canary = []
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", evt, raw=True)
m = mapper(User, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary, [5])
def test_deferred_map_event_subclass_propagate(self):
"""
1. mapper event listen on class, w propagate
2. map only subclass of class
3. event fire should receive event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
class SubSubUser(SubUser):
pass
canary = Mock()
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", canary, propagate=True, raw=True)
m = mapper(SubUser, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary.mock_calls, [call(5, 6, 7)])
m2 = mapper(SubSubUser, users)
m2.dispatch.before_insert(8, 9, 10)
eq_(canary.mock_calls, [call(5, 6, 7), call(8, 9, 10)])
def test_deferred_map_event_subclass_no_propagate(self):
"""
1. mapper event listen on class, w/o propagate
2. map only subclass of class
3. event fire should not receive event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", evt, propagate=False)
m = mapper(SubUser, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary, [])
def test_deferred_map_event_subclass_post_mapping_propagate(self):
"""
1. map only subclass of class
2. mapper event listen on class, w propagate
3. event fire should receive event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
m = mapper(SubUser, users)
canary = []
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", evt, propagate=True, raw=True)
m.dispatch.before_insert(5, 6, 7)
eq_(canary, [5])
def test_deferred_map_event_subclass_post_mapping_propagate_two(self):
"""
1. map only subclass of class
2. mapper event listen on class, w propagate
3. event fire should receive event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
class SubSubUser(SubUser):
pass
m = mapper(SubUser, users)
canary = Mock()
event.listen(User, "before_insert", canary, propagate=True, raw=True)
m2 = mapper(SubSubUser, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary.mock_calls, [call(5, 6, 7)])
m2.dispatch.before_insert(8, 9, 10)
eq_(canary.mock_calls, [call(5, 6, 7), call(8, 9, 10)])
def test_deferred_instance_event_subclass_post_mapping_propagate(self):
"""
1. map only subclass of class
2. instance event listen on class, w propagate
3. event fire should receive event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
m = mapper(SubUser, users)
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, propagate=True, raw=True)
m.class_manager.dispatch.load(5)
eq_(canary, [5])
def test_deferred_instance_event_plain(self):
"""
1. instance event listen on class, w/o propagate
2. map class
3. event fire should receive event
"""
users, User = (self.tables.users, self.classes.User)
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, raw=True)
m = mapper(User, users)
m.class_manager.dispatch.load(5)
eq_(canary, [5])
def test_deferred_instance_event_subclass_propagate_subclass_only(self):
"""
1. instance event listen on class, w propagate
2. map two subclasses of class
3. event fire on each class should receive one and only one event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
class SubUser2(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, propagate=True, raw=True)
m = mapper(SubUser, users)
m2 = mapper(SubUser2, users)
m.class_manager.dispatch.load(5)
eq_(canary, [5])
m2.class_manager.dispatch.load(5)
eq_(canary, [5, 5])
def test_deferred_instance_event_subclass_propagate_baseclass(self):
"""
1. instance event listen on class, w propagate
2. map one subclass of class, map base class, leave 2nd subclass
unmapped
3. event fire on sub should receive one and only one event
4. event fire on base should receive one and only one event
5. map 2nd subclass
6. event fire on 2nd subclass should receive one and only one event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
class SubUser2(User):
pass
canary = Mock()
event.listen(User, "load", canary, propagate=True, raw=False)
# reversing these fixes....
m = mapper(SubUser, users)
m2 = mapper(User, users)
instance = Mock()
m.class_manager.dispatch.load(instance)
eq_(canary.mock_calls, [call(instance.obj())])
m2.class_manager.dispatch.load(instance)
eq_(canary.mock_calls, [call(instance.obj()), call(instance.obj())])
m3 = mapper(SubUser2, users)
m3.class_manager.dispatch.load(instance)
eq_(
canary.mock_calls,
[call(instance.obj()), call(instance.obj()), call(instance.obj())],
)
def test_deferred_instance_event_subclass_no_propagate(self):
"""
1. instance event listen on class, w/o propagate
2. map subclass
3. event fire on subclass should not receive event
"""
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, propagate=False)
m = mapper(SubUser, users)
m.class_manager.dispatch.load(5)
eq_(canary, [])
def test_deferred_instrument_event(self):
User = self.classes.User
canary = []
def evt(x):
canary.append(x)
event.listen(User, "attribute_instrument", evt)
instrumentation._instrumentation_factory.dispatch.attribute_instrument(
User
)
eq_(canary, [User])
def test_isolation_instrument_event(self):
User = self.classes.User
class Bar(object):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(Bar, "attribute_instrument", evt)
instrumentation._instrumentation_factory.dispatch.attribute_instrument(
User
)
eq_(canary, [])
@testing.requires.predictable_gc
def test_instrument_event_auto_remove(self):
class Bar(object):
pass
dispatch = instrumentation._instrumentation_factory.dispatch
assert not dispatch.attribute_instrument
event.listen(Bar, "attribute_instrument", lambda: None)
eq_(len(dispatch.attribute_instrument), 1)
del Bar
gc_collect()
assert not dispatch.attribute_instrument
def test_deferred_instrument_event_subclass_propagate(self):
User = self.classes.User
class SubUser(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "attribute_instrument", evt, propagate=True)
instrumentation._instrumentation_factory.dispatch.attribute_instrument(
SubUser
)
eq_(canary, [SubUser])
def test_deferred_instrument_event_subclass_no_propagate(self):
users, User = (self.tables.users, self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "attribute_instrument", evt, propagate=False)
mapper(SubUser, users)
instrumentation._instrumentation_factory.dispatch.attribute_instrument(
5
)
eq_(canary, [])
class LoadTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
mapper(User, users)
def _fixture(self):
User = self.classes.User
canary = []
def load(target, ctx):
canary.append("load")
def refresh(target, ctx, attrs):
canary.append(("refresh", attrs))
event.listen(User, "load", load)
event.listen(User, "refresh", refresh)
return canary
def test_just_loaded(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
sess.close()
sess.query(User).first()
eq_(canary, ["load"])
def test_repeated_rows(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
sess.close()
sess.query(User).union_all(sess.query(User)).all()
eq_(canary, ["load"])
class RemovalTest(_fixtures.FixtureTest):
run_inserts = None
def test_attr_propagated(self):
User = self.classes.User
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
class AdminUser(User):
pass
mapper(User, users)
mapper(
AdminUser,
addresses,
inherits=User,
properties={"address_id": addresses.c.id},
)
fn = Mock()
event.listen(User.name, "set", fn, propagate=True)
au = AdminUser()
au.name = "ed"
eq_(fn.call_count, 1)
event.remove(User.name, "set", fn)
au.name = "jack"
eq_(fn.call_count, 1)
def test_unmapped_listen(self):
users = self.tables.users
class Foo(object):
pass
fn = Mock()
event.listen(Foo, "before_insert", fn, propagate=True)
class User(Foo):
pass
m = mapper(User, users)
u1 = User()
m.dispatch.before_insert(m, None, attributes.instance_state(u1))
eq_(fn.call_count, 1)
event.remove(Foo, "before_insert", fn)
# existing event is removed
m.dispatch.before_insert(m, None, attributes.instance_state(u1))
eq_(fn.call_count, 1)
# the _HoldEvents is also cleaned out
class Bar(Foo):
pass
m = mapper(Bar, users)
b1 = Bar()
m.dispatch.before_insert(m, None, attributes.instance_state(b1))
eq_(fn.call_count, 1)
def test_instance_event_listen_on_cls_before_map(self):
users = self.tables.users
fn = Mock()
class User(object):
pass
event.listen(User, "load", fn)
m = mapper(User, users)
u1 = User()
m.class_manager.dispatch.load(u1._sa_instance_state, "u1")
event.remove(User, "load", fn)
m.class_manager.dispatch.load(u1._sa_instance_state, "u2")
eq_(fn.mock_calls, [call(u1, "u1")])
class RefreshTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
mapper(User, users)
def _fixture(self):
User = self.classes.User
canary = []
def load(target, ctx):
canary.append("load")
def refresh(target, ctx, attrs):
canary.append(("refresh", attrs))
event.listen(User, "load", load)
event.listen(User, "refresh", refresh)
return canary
def test_already_present(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name="u1")
sess.add(u1)
sess.flush()
sess.query(User).first()
eq_(canary, [])
def test_changes_reset(self):
"""test the contract of load/refresh such that history is reset.
This has never been an official contract but we are testing it
here to ensure it is maintained given the loading performance
enhancements.
"""
User = self.classes.User
@event.listens_for(User, "load")
def canary1(obj, context):
obj.name = "new name!"
@event.listens_for(User, "refresh")
def canary2(obj, context, props):
obj.name = "refreshed name!"
sess = Session()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
sess.close()
u1 = sess.query(User).first()
eq_(attributes.get_history(u1, "name"), ((), ["new name!"], ()))
assert "name" not in attributes.instance_state(u1).committed_state
assert u1 not in sess.dirty
sess.expire(u1)
u1.id
eq_(attributes.get_history(u1, "name"), ((), ["refreshed name!"], ()))
assert "name" not in attributes.instance_state(u1).committed_state
assert u1 in sess.dirty
def test_repeated_rows(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
sess.query(User).union_all(sess.query(User)).all()
eq_(canary, [("refresh", set(["id", "name"]))])
def test_via_refresh_state(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
u1.name
eq_(canary, [("refresh", set(["id", "name"]))])
def test_was_expired(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name="u1")
sess.add(u1)
sess.flush()
sess.expire(u1)
sess.query(User).first()
eq_(canary, [("refresh", set(["id", "name"]))])
def test_was_expired_via_commit(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
sess.query(User).first()
eq_(canary, [("refresh", set(["id", "name"]))])
def test_was_expired_attrs(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name="u1")
sess.add(u1)
sess.flush()
sess.expire(u1, ["name"])
sess.query(User).first()
eq_(canary, [("refresh", set(["name"]))])
def test_populate_existing(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
sess.query(User).populate_existing().first()
eq_(canary, [("refresh", None)])
class SessionEventsTest(_RemoveListeners, _fixtures.FixtureTest):
run_inserts = None
def test_class_listen(self):
def my_listener(*arg, **kw):
pass
event.listen(Session, "before_flush", my_listener)
s = Session()
assert my_listener in s.dispatch.before_flush
def test_sessionmaker_listen(self):
"""test that listen can be applied to individual
scoped_session() classes."""
def my_listener_one(*arg, **kw):
pass
def my_listener_two(*arg, **kw):
pass
S1 = sessionmaker()
S2 = sessionmaker()
event.listen(Session, "before_flush", my_listener_one)
event.listen(S1, "before_flush", my_listener_two)
s1 = S1()
assert my_listener_one in s1.dispatch.before_flush
assert my_listener_two in s1.dispatch.before_flush
s2 = S2()
assert my_listener_one in s2.dispatch.before_flush
assert my_listener_two not in s2.dispatch.before_flush
def test_scoped_session_invalid_callable(self):
from sqlalchemy.orm import scoped_session
def my_listener_one(*arg, **kw):
pass
scope = scoped_session(lambda: Session())
assert_raises_message(
sa.exc.ArgumentError,
"Session event listen on a scoped_session requires that its "
"creation callable is associated with the Session class.",
event.listen,
scope,
"before_flush",
my_listener_one,
)
def test_scoped_session_invalid_class(self):
from sqlalchemy.orm import scoped_session
def my_listener_one(*arg, **kw):
pass
class NotASession(object):
def __call__(self):
return Session()
scope = scoped_session(NotASession)
assert_raises_message(
sa.exc.ArgumentError,
"Session event listen on a scoped_session requires that its "
"creation callable is associated with the Session class.",
event.listen,
scope,
"before_flush",
my_listener_one,
)
def test_scoped_session_listen(self):
from sqlalchemy.orm import scoped_session
def my_listener_one(*arg, **kw):
pass
scope = scoped_session(sessionmaker())
event.listen(scope, "before_flush", my_listener_one)
assert my_listener_one in scope().dispatch.before_flush
def _listener_fixture(self, **kw):
canary = []
def listener(name):
def go(*arg, **kw):
canary.append(name)
return go
sess = Session(**kw)
for evt in [
"after_transaction_create",
"after_transaction_end",
"before_commit",
"after_commit",
"after_rollback",
"after_soft_rollback",
"before_flush",
"after_flush",
"after_flush_postexec",
"after_begin",
"before_attach",
"after_attach",
"after_bulk_update",
"after_bulk_delete",
]:
event.listen(sess, evt, listener(evt))
return sess, canary
def test_flush_autocommit_hook(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess, canary = self._listener_fixture(
autoflush=False, autocommit=True, expire_on_commit=False
)
u = User(name="u1")
sess.add(u)
sess.flush()
eq_(
canary,
[
"before_attach",
"after_attach",
"before_flush",
"after_transaction_create",
"after_begin",
"after_flush",
"after_flush_postexec",
"before_commit",
"after_commit",
"after_transaction_end",
],
)
def test_rollback_hook(self):
User, users = self.classes.User, self.tables.users
sess, canary = self._listener_fixture()
mapper(User, users)
u = User(name="u1", id=1)
sess.add(u)
sess.commit()
u2 = User(name="u1", id=1)
sess.add(u2)
assert_raises(sa.exc.SAWarning, sess.commit)
sess.rollback()
eq_(
canary,
[
"after_transaction_create", # changed in #5074
"before_attach",
"after_attach",
"before_commit",
"before_flush",
"after_transaction_create",
"after_begin",
"after_flush",
"after_flush_postexec",
"after_transaction_end",
"after_commit",
"after_transaction_end",
"after_transaction_create",
"before_attach",
"after_attach",
"before_commit",
"before_flush",
"after_transaction_create",
"after_begin",
"after_rollback",
"after_transaction_end",
"after_soft_rollback",
"after_transaction_end",
"after_soft_rollback",
],
)
def test_can_use_session_in_outer_rollback_hook(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = Session()
assertions = []
@event.listens_for(sess, "after_soft_rollback")
def do_something(session, previous_transaction):
if session.is_active:
assertions.append("name" not in u.__dict__)
assertions.append(u.name == "u1")
u = User(name="u1", id=1)
sess.add(u)
sess.commit()
u2 = User(name="u1", id=1)
sess.add(u2)
assert_raises(sa.exc.SAWarning, sess.commit)
sess.rollback()
eq_(assertions, [True, True])
def test_flush_noautocommit_hook(self):
User, users = self.classes.User, self.tables.users
sess, canary = self._listener_fixture()
mapper(User, users)
u = User(name="u1")
sess.add(u)
sess.flush()
eq_(
canary,
[
"after_transaction_create", # changed due to #5074
"before_attach",
"after_attach",
"before_flush",
"after_transaction_create",
"after_begin",
"after_flush",
"after_flush_postexec",
"after_transaction_end",
],
)
def test_flush_in_commit_hook(self):
User, users = self.classes.User, self.tables.users
sess, canary = self._listener_fixture()
mapper(User, users)
u = User(name="u1")
sess.add(u)
sess.flush()
canary[:] = []
u.name = "ed"
sess.commit()
eq_(
canary,
[
"before_commit",
"before_flush",
"after_transaction_create",
"after_flush",
"after_flush_postexec",
"after_transaction_end",
"after_commit",
"after_transaction_end",
# no longer autocreates after #5074
],
)
def test_state_before_attach(self):
User, users = self.classes.User, self.tables.users
sess = Session()
@event.listens_for(sess, "before_attach")
def listener(session, inst):
state = attributes.instance_state(inst)
if state.key:
assert state.key not in session.identity_map
else:
assert inst not in session.new
mapper(User, users)
u = User(name="u1")
sess.add(u)
sess.flush()
sess.expunge(u)
sess.add(u)
def test_state_after_attach(self):
User, users = self.classes.User, self.tables.users
sess = Session()
@event.listens_for(sess, "after_attach")
def listener(session, inst):
state = attributes.instance_state(inst)
if state.key:
assert session.identity_map[state.key] is inst
else:
assert inst in session.new
mapper(User, users)
u = User(name="u1")
sess.add(u)
sess.flush()
sess.expunge(u)
sess.add(u)
def test_standalone_on_commit_hook(self):
sess, canary = self._listener_fixture()
sess.commit()
eq_(
canary,
[
"after_transaction_create", # moved to top due to #5074
"before_commit",
"after_commit",
"after_transaction_end",
],
)
def test_on_bulk_update_hook(self):
User, users = self.classes.User, self.tables.users
sess = Session()
canary = Mock()
event.listen(sess, "after_begin", canary.after_begin)
event.listen(sess, "after_bulk_update", canary.after_bulk_update)
def legacy(ses, qry, ctx, res):
canary.after_bulk_update_legacy(ses, qry, ctx, res)
event.listen(sess, "after_bulk_update", legacy)
mapper(User, users)
sess.query(User).update({"name": "foo"})
eq_(canary.after_begin.call_count, 1)
eq_(canary.after_bulk_update.call_count, 1)
upd = canary.after_bulk_update.mock_calls[0][1][0]
eq_(upd.session, sess)
eq_(
canary.after_bulk_update_legacy.mock_calls,
[call(sess, upd.query, upd.context, upd.result)],
)
def test_on_bulk_delete_hook(self):
User, users = self.classes.User, self.tables.users
sess = Session()
canary = Mock()
event.listen(sess, "after_begin", canary.after_begin)
event.listen(sess, "after_bulk_delete", canary.after_bulk_delete)
def legacy(ses, qry, ctx, res):
canary.after_bulk_delete_legacy(ses, qry, ctx, res)
event.listen(sess, "after_bulk_delete", legacy)
mapper(User, users)
sess.query(User).delete()
eq_(canary.after_begin.call_count, 1)
eq_(canary.after_bulk_delete.call_count, 1)
upd = canary.after_bulk_delete.mock_calls[0][1][0]
eq_(upd.session, sess)
eq_(
canary.after_bulk_delete_legacy.mock_calls,
[call(sess, upd.query, upd.context, upd.result)],
)
def test_connection_emits_after_begin(self):
sess, canary = self._listener_fixture(bind=testing.db)
sess.connection()
# changed due to #5074
eq_(canary, ["after_transaction_create", "after_begin"])
sess.close()
def test_reentrant_flush(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
def before_flush(session, flush_context, objects):
session.flush()
sess = Session()
event.listen(sess, "before_flush", before_flush)
sess.add(User(name="foo"))
assert_raises_message(
sa.exc.InvalidRequestError, "already flushing", sess.flush
)
def test_before_flush_affects_flush_plan(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
def before_flush(session, flush_context, objects):
for obj in list(session.new) + list(session.dirty):
if isinstance(obj, User):
session.add(User(name="another %s" % obj.name))
for obj in list(session.deleted):
if isinstance(obj, User):
x = (
session.query(User)
.filter(User.name == "another %s" % obj.name)
.one()
)
session.delete(x)
sess = Session()
event.listen(sess, "before_flush", before_flush)
u = User(name="u1")
sess.add(u)
sess.flush()
eq_(
sess.query(User).order_by(User.name).all(),
[User(name="another u1"), User(name="u1")],
)
sess.flush()
eq_(
sess.query(User).order_by(User.name).all(),
[User(name="another u1"), User(name="u1")],
)
u.name = "u2"
sess.flush()
eq_(
sess.query(User).order_by(User.name).all(),
[
User(name="another u1"),
User(name="another u2"),
User(name="u2"),
],
)
sess.delete(u)
sess.flush()
eq_(
sess.query(User).order_by(User.name).all(),
[User(name="another u1")],
)
def test_before_flush_affects_dirty(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
def before_flush(session, flush_context, objects):
for obj in list(session.identity_map.values()):
obj.name += " modified"
sess = Session(autoflush=True)
event.listen(sess, "before_flush", before_flush)
u = User(name="u1")
sess.add(u)
sess.flush()
eq_(sess.query(User).order_by(User.name).all(), [User(name="u1")])
sess.add(User(name="u2"))
sess.flush()
sess.expunge_all()
eq_(
sess.query(User).order_by(User.name).all(),
[User(name="u1 modified"), User(name="u2")],
)
def test_snapshot_still_present_after_commit(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
u1 = sess.query(User).first()
@event.listens_for(sess, "after_commit")
def assert_state(session):
assert "name" in u1.__dict__
eq_(u1.name, "u1")
sess.commit()
assert "name" not in u1.__dict__
def test_snapshot_still_present_after_rollback(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
u1 = sess.query(User).first()
@event.listens_for(sess, "after_rollback")
def assert_state(session):
assert "name" in u1.__dict__
eq_(u1.name, "u1")
sess.rollback()
assert "name" not in u1.__dict__
class SessionLifecycleEventsTest(_RemoveListeners, _fixtures.FixtureTest):
run_inserts = None
def _fixture(self, include_address=False):
users, User = self.tables.users, self.classes.User
if include_address:
addresses, Address = self.tables.addresses, self.classes.Address
mapper(
User,
users,
properties={
"addresses": relationship(
Address, cascade="all, delete-orphan"
)
},
)
mapper(Address, addresses)
else:
mapper(User, users)
listener = Mock()
sess = Session()
def start_events():
event.listen(
sess, "transient_to_pending", listener.transient_to_pending
)
event.listen(
sess, "pending_to_transient", listener.pending_to_transient
)
event.listen(
sess,
"persistent_to_transient",
listener.persistent_to_transient,
)
event.listen(
sess, "pending_to_persistent", listener.pending_to_persistent
)
event.listen(
sess, "detached_to_persistent", listener.detached_to_persistent
)
event.listen(
sess, "loaded_as_persistent", listener.loaded_as_persistent
)
event.listen(
sess, "persistent_to_detached", listener.persistent_to_detached
)
event.listen(
sess, "deleted_to_detached", listener.deleted_to_detached
)
event.listen(
sess, "persistent_to_deleted", listener.persistent_to_deleted
)
event.listen(
sess, "deleted_to_persistent", listener.deleted_to_persistent
)
return listener
if include_address:
return sess, User, Address, start_events
else:
return sess, User, start_events
def test_transient_to_pending(self):
sess, User, start_events = self._fixture()
listener = start_events()
@event.listens_for(sess, "transient_to_pending")
def trans_to_pending(session, instance):
assert instance in session
listener.flag_checked(instance)
u1 = User(name="u1")
sess.add(u1)
eq_(
listener.mock_calls,
[call.transient_to_pending(sess, u1), call.flag_checked(u1)],
)
def test_pending_to_transient_via_rollback(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
listener = start_events()
@event.listens_for(sess, "pending_to_transient")
def test_deleted_flag(session, instance):
assert instance not in session
listener.flag_checked(instance)
sess.rollback()
assert u1 not in sess
eq_(
listener.mock_calls,
[call.pending_to_transient(sess, u1), call.flag_checked(u1)],
)
def test_pending_to_transient_via_expunge(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
listener = start_events()
@event.listens_for(sess, "pending_to_transient")
def test_deleted_flag(session, instance):
assert instance not in session
listener.flag_checked(instance)
sess.expunge(u1)
assert u1 not in sess
eq_(
listener.mock_calls,
[call.pending_to_transient(sess, u1), call.flag_checked(u1)],
)
def test_pending_to_persistent(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
listener = start_events()
@event.listens_for(sess, "pending_to_persistent")
def test_flag(session, instance):
assert instance in session
assert instance._sa_instance_state.persistent
assert instance._sa_instance_state.key in session.identity_map
listener.flag_checked(instance)
sess.flush()
eq_(
listener.mock_calls,
[call.pending_to_persistent(sess, u1), call.flag_checked(u1)],
)
u1.name = "u2"
sess.flush()
# event was not called again
eq_(
listener.mock_calls,
[call.pending_to_persistent(sess, u1), call.flag_checked(u1)],
)
def test_pending_to_persistent_del(self):
sess, User, start_events = self._fixture()
@event.listens_for(sess, "pending_to_persistent")
def pending_to_persistent(session, instance):
listener.flag_checked(instance)
# this is actually u1, because
# we have a strong ref internally
is_not_(None, instance)
u1 = User(name="u1")
sess.add(u1)
u1_inst_state = u1._sa_instance_state
del u1
gc_collect()
listener = start_events()
sess.flush()
eq_(
listener.mock_calls,
[
call.flag_checked(u1_inst_state.obj()),
call.pending_to_persistent(sess, u1_inst_state.obj()),
],
)
def test_persistent_to_deleted_del(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
sess.flush()
listener = start_events()
@event.listens_for(sess, "persistent_to_deleted")
def persistent_to_deleted(session, instance):
is_not_(None, instance)
listener.flag_checked(instance)
sess.delete(u1)
u1_inst_state = u1._sa_instance_state
del u1
gc_collect()
sess.flush()
eq_(
listener.mock_calls,
[
call.persistent_to_deleted(sess, u1_inst_state.obj()),
call.flag_checked(u1_inst_state.obj()),
],
)
def test_detached_to_persistent(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
sess.flush()
sess.expunge(u1)
listener = start_events()
@event.listens_for(sess, "detached_to_persistent")
def test_deleted_flag(session, instance):
assert instance not in session.deleted
assert instance in session
listener.flag_checked()
sess.add(u1)
eq_(
listener.mock_calls,
[call.detached_to_persistent(sess, u1), call.flag_checked()],
)
def test_loaded_as_persistent(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
sess.close()
listener = start_events()
@event.listens_for(sess, "loaded_as_persistent")
def test_identity_flag(session, instance):
assert instance in session
assert instance._sa_instance_state.persistent
assert instance._sa_instance_state.key in session.identity_map
assert not instance._sa_instance_state.deleted
assert not instance._sa_instance_state.detached
assert instance._sa_instance_state.persistent
listener.flag_checked(instance)
u1 = sess.query(User).filter_by(name="u1").one()
eq_(
listener.mock_calls,
[call.loaded_as_persistent(sess, u1), call.flag_checked(u1)],
)
def test_detached_to_persistent_via_deleted(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
sess.close()
listener = start_events()
@event.listens_for(sess, "detached_to_persistent")
def test_deleted_flag_persistent(session, instance):
assert instance not in session.deleted
assert instance in session
assert not instance._sa_instance_state.deleted
assert not instance._sa_instance_state.detached
assert instance._sa_instance_state.persistent
listener.dtp_flag_checked(instance)
@event.listens_for(sess, "persistent_to_deleted")
def test_deleted_flag_detached(session, instance):
assert instance not in session.deleted
assert instance not in session
assert not instance._sa_instance_state.persistent
assert instance._sa_instance_state.deleted
assert not instance._sa_instance_state.detached
listener.ptd_flag_checked(instance)
sess.delete(u1)
assert u1 in sess.deleted
eq_(
listener.mock_calls,
[call.detached_to_persistent(sess, u1), call.dtp_flag_checked(u1)],
)
sess.flush()
eq_(
listener.mock_calls,
[
call.detached_to_persistent(sess, u1),
call.dtp_flag_checked(u1),
call.persistent_to_deleted(sess, u1),
call.ptd_flag_checked(u1),
],
)
def test_detached_to_persistent_via_cascaded_delete(self):
sess, User, Address, start_events = self._fixture(include_address=True)
u1 = User(name="u1")
sess.add(u1)
a1 = Address(email_address="e1")
u1.addresses.append(a1)
sess.commit()
u1.addresses # ensure u1.addresses refers to a1 before detachment
sess.close()
listener = start_events()
@event.listens_for(sess, "detached_to_persistent")
def test_deleted_flag(session, instance):
assert instance not in session.deleted
assert instance in session
assert not instance._sa_instance_state.deleted
assert not instance._sa_instance_state.detached
assert instance._sa_instance_state.persistent
listener.flag_checked(instance)
sess.delete(u1)
assert u1 in sess.deleted
assert a1 in sess.deleted
eq_(
listener.mock_calls,
[
call.detached_to_persistent(sess, u1),
call.flag_checked(u1),
call.detached_to_persistent(sess, a1),
call.flag_checked(a1),
],
)
sess.flush()
def test_persistent_to_deleted(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
listener = start_events()
@event.listens_for(sess, "persistent_to_deleted")
def test_deleted_flag(session, instance):
assert instance not in session.deleted
assert instance not in session
assert instance._sa_instance_state.deleted
assert not instance._sa_instance_state.detached
assert not instance._sa_instance_state.persistent
listener.flag_checked(instance)
sess.delete(u1)
assert u1 in sess.deleted
eq_(listener.mock_calls, [])
sess.flush()
assert u1 not in sess
eq_(
listener.mock_calls,
[call.persistent_to_deleted(sess, u1), call.flag_checked(u1)],
)
def test_persistent_to_detached_via_expunge(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
sess.flush()
listener = start_events()
@event.listens_for(sess, "persistent_to_detached")
def test_deleted_flag(session, instance):
assert instance not in session.deleted
assert instance not in session
assert not instance._sa_instance_state.deleted
assert instance._sa_instance_state.detached
assert not instance._sa_instance_state.persistent
listener.flag_checked(instance)
assert u1 in sess
sess.expunge(u1)
assert u1 not in sess
eq_(
listener.mock_calls,
[call.persistent_to_detached(sess, u1), call.flag_checked(u1)],
)
def test_persistent_to_detached_via_expunge_all(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
sess.flush()
listener = start_events()
@event.listens_for(sess, "persistent_to_detached")
def test_deleted_flag(session, instance):
assert instance not in session.deleted
assert instance not in session
assert not instance._sa_instance_state.deleted
assert instance._sa_instance_state.detached
assert not instance._sa_instance_state.persistent
listener.flag_checked(instance)
assert u1 in sess
sess.expunge_all()
assert u1 not in sess
eq_(
listener.mock_calls,
[call.persistent_to_detached(sess, u1), call.flag_checked(u1)],
)
def test_persistent_to_transient_via_rollback(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
sess.flush()
listener = start_events()
@event.listens_for(sess, "persistent_to_transient")
def test_deleted_flag(session, instance):
assert instance not in session.deleted
assert instance not in session
assert not instance._sa_instance_state.deleted
assert not instance._sa_instance_state.detached
assert not instance._sa_instance_state.persistent
assert instance._sa_instance_state.transient
listener.flag_checked(instance)
sess.rollback()
eq_(
listener.mock_calls,
[call.persistent_to_transient(sess, u1), call.flag_checked(u1)],
)
def test_deleted_to_persistent_via_rollback(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
sess.delete(u1)
sess.flush()
listener = start_events()
@event.listens_for(sess, "deleted_to_persistent")
def test_deleted_flag(session, instance):
assert instance not in session.deleted
assert instance in session
assert not instance._sa_instance_state.deleted
assert not instance._sa_instance_state.detached
assert instance._sa_instance_state.persistent
listener.flag_checked(instance)
assert u1 not in sess
assert u1._sa_instance_state.deleted
assert not u1._sa_instance_state.persistent
assert not u1._sa_instance_state.detached
sess.rollback()
assert u1 in sess
assert u1._sa_instance_state.persistent
assert not u1._sa_instance_state.deleted
assert not u1._sa_instance_state.detached
eq_(
listener.mock_calls,
[call.deleted_to_persistent(sess, u1), call.flag_checked(u1)],
)
def test_deleted_to_detached_via_commit(self):
sess, User, start_events = self._fixture()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
sess.delete(u1)
sess.flush()
listener = start_events()
@event.listens_for(sess, "deleted_to_detached")
def test_detached_flag(session, instance):
assert instance not in session.deleted
assert instance not in session
assert not instance._sa_instance_state.deleted
assert instance._sa_instance_state.detached
listener.flag_checked(instance)
assert u1 not in sess
assert u1._sa_instance_state.deleted
assert not u1._sa_instance_state.persistent
assert not u1._sa_instance_state.detached
sess.commit()
assert u1 not in sess
assert not u1._sa_instance_state.deleted
assert u1._sa_instance_state.detached
eq_(
listener.mock_calls,
[call.deleted_to_detached(sess, u1), call.flag_checked(u1)],
)
class QueryEventsTest(
_RemoveListeners,
_fixtures.FixtureTest,
AssertsCompiledSQL,
testing.AssertsExecutionResults,
):
__dialect__ = "default"
@classmethod
def setup_mappers(cls):
User = cls.classes.User
users = cls.tables.users
mapper(User, users)
def test_before_compile(self):
@event.listens_for(query.Query, "before_compile", retval=True)
def no_deleted(query):
for desc in query.column_descriptions:
if desc["type"] is User:
entity = desc["expr"]
query = query.filter(entity.id != 10)
return query
User = self.classes.User
s = Session()
q = s.query(User).filter_by(id=7)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"WHERE users.id = :id_1 AND users.id != :id_2",
checkparams={"id_2": 10, "id_1": 7},
)
def test_before_compile_no_retval(self):
counter = [0]
@event.listens_for(query.Query, "before_compile")
def count(query):
counter[0] += 1
User = self.classes.User
s = Session()
q = s.query(User).filter_by(id=7)
str(q)
str(q)
eq_(counter, [2])
def test_alters_entities(self):
User = self.classes.User
@event.listens_for(query.Query, "before_compile", retval=True)
def fn(query):
return query.add_columns(User.name)
s = Session()
q = s.query(User.id).filter_by(id=7)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"WHERE users.id = :id_1",
checkparams={"id_1": 7},
)
eq_(q.all(), [(7, "jack")])
def test_before_compile_update(self):
@event.listens_for(query.Query, "before_compile_update", retval=True)
def no_deleted(query, update_context):
assert update_context.query is query
for desc in query.column_descriptions:
if desc["type"] is User:
entity = desc["expr"]
query = query.filter(entity.id != 10)
update_context.values["name"] = (
update_context.values["name"] + "_modified"
)
return query
User = self.classes.User
s = Session()
with self.sql_execution_asserter() as asserter:
s.query(User).filter_by(id=7).update({"name": "ed"})
asserter.assert_(
CompiledSQL(
"UPDATE users SET name=:name WHERE "
"users.id = :id_1 AND users.id != :id_2",
[{"name": "ed_modified", "id_1": 7, "id_2": 10}],
)
)
def test_before_compile_delete(self):
@event.listens_for(query.Query, "before_compile_delete", retval=True)
def no_deleted(query, delete_context):
assert delete_context.query is query
for desc in query.column_descriptions:
if desc["type"] is User:
entity = desc["expr"]
query = query.filter(entity.id != 10)
return query
User = self.classes.User
s = Session()
# note this deletes no rows
with self.sql_execution_asserter() as asserter:
s.query(User).filter_by(id=10).delete()
asserter.assert_(
CompiledSQL(
"DELETE FROM users WHERE "
"users.id = :id_1 AND users.id != :id_2",
[{"id_1": 10, "id_2": 10}],
)
)
class RefreshFlushInReturningTest(fixtures.MappedTest):
"""test [ticket:3427].
this is a rework of the test for [ticket:3167] stated
in test_unitofworkv2, which tests that returning doesn't trigger
attribute events; the test here is *reversed* so that we test that
it *does* trigger the new refresh_flush event.
"""
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"test",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("prefetch_val", Integer, default=5),
Column("returning_val", Integer, server_default="5"),
)
@classmethod
def setup_classes(cls):
class Thing(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Thing = cls.classes.Thing
mapper(Thing, cls.tables.test, eager_defaults=True)
def test_no_attr_events_flush(self):
Thing = self.classes.Thing
mock = Mock()
event.listen(Thing, "refresh_flush", mock)
t1 = Thing()
s = Session()
s.add(t1)
s.flush()
if testing.requires.returning.enabled:
# ordering is deterministic in this test b.c. the routine
# appends the "returning" params before the "prefetch"
# ones. if there were more than one attribute in each category,
# then we'd have hash order issues.
eq_(
mock.mock_calls,
[call(t1, ANY, ["returning_val", "prefetch_val"])],
)
else:
eq_(mock.mock_calls, [call(t1, ANY, ["prefetch_val"])])
eq_(t1.id, 1)
eq_(t1.prefetch_val, 5)
eq_(t1.returning_val, 5)
| [
"mike_mp@zzzcomputing.com"
] | mike_mp@zzzcomputing.com |
09931d10a4151654fe8242c78f83ed304088ce12 | 15e818aada2b18047fa895690bc1c2afda6d7273 | /config/m600/control/estimator.py | 498dc87935b2b4dd9089285612b5b81d22e03473 | [
"Apache-2.0"
] | permissive | ghomsy/makani | 4ee34c4248fb0ac355f65aaed35718b1f5eabecf | 818ae8b7119b200a28af6b3669a3045f30e0dc64 | refs/heads/master | 2023-01-11T18:46:21.939471 | 2020-11-10T00:23:31 | 2020-11-10T00:23:31 | 301,863,147 | 0 | 0 | Apache-2.0 | 2020-11-10T00:23:32 | 2020-10-06T21:51:21 | null | UTF-8 | Python | false | false | 21,009 | py | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Estimation parameters."""
from makani.config import mconfig
from makani.control import system_types as m
import numpy as np
from scipy import signal
def _CalcUpwashAnglePerCL(pos, wing_params):
"""Computes the upwash angle detected by the pitot due to wing lift.
The upwash is computed assuming a constant strength bound vortex
in the wing. See Houghton & Carpenter Eq. 5.9 and the
Kutta-Joukowsky theorem.
Args:
pos: position of the pitot sensors
wing_params: dict containing wing parameters
Returns:
delta_alpha: the amount that alpha changes due to upwash of the wing.
"""
d = np.hypot(pos[0], pos[2])
y1 = pos[1] + wing_params['b'] / 2.0
y2 = pos[1] - wing_params['b'] / 2.0
# We neglect the change in freestream velocity due to the induced
# velocity and make a small angle approximation.
delta_alpha = 0.5 * wing_params['c'] * (pos[0] / d) * (
y1 / np.hypot(y1, d) - y2 / np.hypot(y2, d)) / (4.0 * np.pi * d)
assert 0.0 <= delta_alpha and delta_alpha <= 0.1
return delta_alpha
def _CheckFilter(b, a, pass_band, max_phase):
(_, freq_resp) = signal.freqz(b, a, np.linspace(0.0, pass_band, 1000))
return (np.angle(freq_resp[0]) == 0.0
and np.max(np.abs(np.angle(freq_resp))) < max_phase)
@mconfig.Config(deps={
'common_params': 'common.common_params',
'phys': 'common.physical_constants',
'pitot': 'm600.pitot',
'simple_aero': 'm600.control.simple_aero_model',
'tether': mconfig.WING_MODEL + '.tether',
'wing': mconfig.WING_MODEL + '.wing',
'test_site': 'common.test_site'
})
def MakeParams(params):
upwash_angle_per_cl = _CalcUpwashAnglePerCL(
params['pitot']['pos'], params['wing'])
# Coefficients for IIR filters on the rate gyros and accelerometers.
# These filters attenuate motor vibrations while avoiding reducing
# phase margin significantly (which is tested approximately by
# _CheckFilter).
#
# The filter cutoff frequencies were hand-optimized to provide
# sufficient rejection at the vibration frequencies without adding
# significant phase lag.
#
# NOTE: The coefficients generated here should be updated in
# generate_hover_controllers.m whenever these filters are adjusted.
vibration_filter_b, vibration_filter_a = signal.butter(
2, 8.3 * 2.0 * params['common_params']['ts'])
assert _CheckFilter(vibration_filter_b, vibration_filter_a,
0.6 * 2.0 * np.pi * params['common_params']['ts'],
np.deg2rad(10.0))
# Coefficients for IIR filters on the wing velocity.
Vb_filter_b, Vb_filter_a = signal.butter(
2, 3.3 * 2.0 * params['common_params']['ts'])
assert _CheckFilter(Vb_filter_b, Vb_filter_a,
0.4 * 2.0 * np.pi * params['common_params']['ts'],
np.deg2rad(10.0))
# Adjust the detwist axis offset depending on the test site.
# This parameter is only used if adaptive_detwist_cmd in crosswind.py is set
# to True. This axis is raised to ensure the tether departure direction
# encircles the axis and also stays far to avoid fast commanded detwist
# angles. Offshore, the axis needs to be higher up to compensate for the buoy
# pitching motion.
if params['test_site'] in [m.kTestSiteNorway]:
detwist_axis_offset_deg = 20.0
else:
detwist_axis_offset_deg = 10.0
return {
# Times [s] to initialize the estimator.
't_initialize': 11.0,
# Initial payout [m] to set if kControlOptHardCodeInitialPayout
# is enabled.
'hard_coded_initial_payout': 0.0,
'apparent_wind': {
# Bias [rad] subtracted from the angle-of-attack estimate to
# compensate for upwash effects.
'pitot_upwash_alpha_bias': (
params['simple_aero']['CL_0'] * upwash_angle_per_cl
),
# Scale factor [rad/rad] to divide the angle-of-attack estimate by
# to compensate for upwash effects.
'pitot_upwash_alpha_scale': (
1.0 + params['simple_aero']['dCL_dalpha'] * upwash_angle_per_cl
),
# Speed [m/s] below/above which to not/fully trust the pitot.
'v_low': 12.0,
'v_high': 17.0,
# Estimated alpha and beta [rad] below/above which to fully/not
# trust the pitot.
'ang_est_low': np.deg2rad(30.0),
'ang_est_high': np.deg2rad(40.0),
# Pitot or loadcell measured alpha and beta [rad] below/above
# which to fully/not trust the pitot or loadcell.
'ang_fly_low': np.deg2rad(20.0),
'ang_fly_high': np.deg2rad(30.0),
# Cutoff frequencies [Hz] for filtered apparent wind estimate.
'fc_v': 1.0,
'fc_alpha': 5.0,
'fc_beta': 5.0,
# Cutoff frequency [Hz] for complementary apparent wind filter.
'fc_comp': 0.5,
},
'nav': {
'attitude': {
# Initial attitude quaternion.
'q_g2b_0': [1.0, 0.0, 0.0, 0.0],
# Coarse initialization proportional gains [rad/s], [#], [#].
'coarse_init_kp': 10.0,
'coarse_init_kp_acc': 0.2,
'coarse_init_kp_mag': 0.8,
# Initial standard deviation [rad] for attitude errors.
'sigma_attitude_0': 0.5,
# Initial standard deviation [rad/s] for gyro biases.
'sigma_gyro_bias_0': 0.01,
# Gyro angle random walk [rad/s/rtHz]. The ADIS16488 datasheet
# indicates a typical gyro angle random walk of 0.26 deg/sqrt(hr).
# 0.26 deg/sqrt(hour)
# = 0.26 * (pi/180 rad) / sqrt(hour)
# = 0.26 * pi/180 rad/sqrt(hour) * sqrt(hour)/sqrt(3600 s)
# = 0.26 * pi/180 / sqrt(3600) * rad/sqrt(s) * s/s
# = 0.26 * pi/180 / sqrt(3600) * rad/s/sqrt(Hz)
# = 7.5631e-05 rad/s/sqrt(Hz)
'sigma_gyro_noise': 0.00075, # Tuned.
# Gyro bias instability [rad/s]. The ADIS16488 datasheet
# indicates a typical gyro bias instability of 5.1 deg/hr.
# 5.1 deg/hr
# = 5.1 * pi/180 rad / 3600 s
# = 2.4725e-05 rad/s
'sigma_gyro_bias_instability': 4.9e-6, # Tuned.
# Time constant [s] for the gyro biases. This parameter relates
# to sigma_gyro_bias_instability to describe a Gauss-Markov
# process noise.
'gyro_bias_time_constant': 3600.0,
# Nominal angle-of-attack [rad] and angle-of-sideslip [rad]
# for computing the apparent wind vector.
'nominal_angle_of_attack': 0.0,
'nominal_angle_of_sideslip': 0.0,
# Multiplier [#] on the airspeed to give the standard
# deviation for apparent wind vector updates.
# Set per b/64558980#comment3 on 2017-10-06.
'v_app_relative_err': 2.5,
# Minimum airspeed [m/s] when calculating the standard
# deviation for apparent wind vector updates.
'v_app_relative_err_min_airspeed': 15.0,
# Multiplier [#] on the norm of mag_g to give the
# standard deviation for magnetometer updates.
# Set per b/64558980#comment3 on 2017-10-06.
'mag_relative_err': 0.75,
# Multiplier [#] on the norm of g to give the standard deviation
# for plumb bob gravity updates.
'plumb_bob_relative_err': 0.4,
# Multiplier [#] to account for the difference between
# the magnitude of the accelerometer reading and the
# expected value of g.
'plumb_bob_g_err_scale': 7.0,
# Maximum magnitude [rad/s] for gyro biases.
'max_gyro_bias': 0.05,
# Cutoff frequency [Hz] for the pre-filter applied before
# treating the accelerometers as a vector measurement of gravity.
'fc_acc': 5.0,
# Correct attitude by comparing the apparent wind vector rotated
# into ground coordinates to the ground station wind sensor. This
# setting only affects crosswind attitude estimation.
'enable_apparent_wind_correction': True,
# Correct attitude by comparing the port-to-starboard (or
# wingtip-to-center) GPS position vector rotated into body
# coordinates against the body frame antenna locations.
'enable_gps_vector_correction': True,
# For the GPS vector attitude correction, this parameter defines
# additional uncertainty to account for mounting and wing flex
# errors in the physical location of each antenna.
'wing_wingtip_to_center_sigma': 0.2, # [m]
# For the GPS vector attitude correction, this parameter defines
# additional uncertainty to account for mounting and wing flex
# errors in the physical location of each antenna.
'wing_port_to_star_sigma': 0.2, # [m]
# For the GPS vector attitude correction, this parameter defines
# the required ratio between the baseline and standard deviation
# to apply the measurement correction. A ratio of 9.0 means that
# the baseline must be 9.0 times the standard deviation. For a
# 26 m baseline, sigma must be less than 2.89 m.
'wing_vector_sigma_ratio': 9.0,
# Reject measurements with a GPS position standard deviation
# greater than this threshold.
'max_gps_position_sigma': 0.1, # [m]
# Reject measurements by comparing the measured distance and the
# known physical distance between the two antennas. When choosing
# this parameter consider the scenario when the measurement error
# is perpendicular to baseline: a 0.5 m error on a 12.8 m baseline
# corresponds to a atan2(0.5, 12.8) = 2.2 deg error.
'max_gps_vector_error': 0.1, # [m]
# Scale the difference between the measured distance and the known
# physical distance between antennas to obtain a measurement
# standard deviation.
'gps_vector_relative_err': 1.0, # [m]
# Number of estimator cycles to consider the GPS vector solution
# valid and therefore prevent application of the apparent wind
# correction.
'gps_vector_timeout_cycles': 500,
# Reject measurements with a GPS compass standard deviation
# greater than this threshold.
'max_gps_compass_sigma': np.deg2rad(10.0)
},
'position': {
'baro': {
'kalman_est': {
# Minimum and maximum filter cutoff-frequencies [Hz].
'fc_min': 0.0,
'fc_max': 1.0,
# Random walk variance for barometric altitude bias [m^2].
'Q': 0.01,
},
# Initial standard deviation [m] for the unknown
# altitude bias.
'sigma_Xg_z_bias_0': 100.0,
# Standard deviation [m] for the instantaneous
# altitude error for each pressure measurement.
'sigma_Xg_z': 4.0
},
'filter': {
# Initial velocity standard deviation [m/s].
'sigma_vel_g_0': 0.1,
# Multipliers [#] to apply to the GPS sigmas.
#
# Currently the position filter re-uses stale GPS
# data. We multiply the reported standard
# deviations by the square-root of the GPS update
# period divided by the controller update period to
# approximately account for this re-use.
#
# TODO: Signal to the estimator when fresh
# GPS data arrives and remove this multiplier.
'gps_sigma_multiplier': (0.05 / 0.01)**0.5,
# Minimum GPS velocity [m/s] and position [m]
# standard deviations.
'min_gps_sigma_vel_g': 0.3,
'min_gps_sigma_pos_g': 0.1,
# Acceleration standard deviation [m/s^2].
#
# These values set the acceleration standard
# deviation (used for the process noise in the
# Kalman filter) for hover and all other flight
# modes. These values are set by assuming a maximum
# of 12 degrees of attitude error and 60 [m/s^2] of
# crosswind acceleration.
'sigma_wing_accel_hover': 1.96,
'sigma_wing_accel_dynamic': 11.0,
# Whether or not to enable barometric altitude.
'baro_enabled': False,
# Whether or not to enable GLAS aiding.
'glas_enabled': False,
# Number of cycles to wait after losing GPS to begin applying
# GLAS corrections.
'gps_position_timeout_cycles': int(
10.0 / params['common_params']['ts']),
},
'glas': {
# Multiplier [#] on tether length to give a minimum
# position standard deviation.
'min_relative_sigma_pos_g': 0.025,
# Position sigma per weight-over-tension ratio [m].
#
# The reported position standard deviation from the GLAS
# estimate is taken to be inversely proportional to
# the tether tension.
#
# TODO: Investigate models that more
# accurately account for drag and inertial effects.
# One such implementation was removed by I91657020.
'sigma_per_weight_over_tension_ratio': 0.45,
# Limit on tether weight over tension [#] for use
# in the uncertainty model mentioned above.
'max_weight_over_tension_ratio': 0.5,
# Cutoff frequency [Hz] for updating the GSG biases.
'gsg_bias_fc': 0.003,
# Minimum tension [N] for updating the GSG biases.
#
# The bias correction uses a straight line tether
# model. This threshold is set high enough that
# catenary effects are negligible.
'gsg_bias_tension_lim': 10.0 * (
params['tether']['length'] *
params['tether']['linear_density'] * params['phys']['g']),
# Limits on the GSG biases [rad].
'bias_low': {'azi': -np.deg2rad(40.0), 'ele': -0.1},
'bias_high': {'azi': np.deg2rad(40.0), 'ele': 0.1}
},
'gps': {
# Number [#] of control cycles to ignore GPS data for after
# a thrown error.
'outage_hold_num': 3,
# Distance [m] below which GPS position estimates
# are guaranteed to be considered in agreement. The
# first number is for hover flight modes, and is
# selected to avoid jumps that could endanger a
# successful ascend or descend. The
# second number is for all other flight modes, and is
# set to be twice the difference in position
# expected due to a 0.05 second delay between the
# receivers when travelling 60 m/s.
'min_disagreement_distance_hover': 0.3,
'min_disagreement_distance': 6.0,
# Fraction of the minimum disagreement distance [#]
# from the current estimate that a given receiver
# must be before switching away from using its
# solution.
'disagreement_hysteresis_ratio': 0.5,
# Excess position uncertainty [m] added to the alternate GPS
# receiver to avoid chatter between receivers.
'sigma_hysteresis': 0.2,
# Relative threshold [#] used to compare GPS sigma's
# when deciding to switch between receivers.
'relative_sigma_threshold': 1.5,
},
},
# Filter frequency [Hz] for velocity to determine when to use the
# V_app vector.
'fc_Vg': 5.0,
# Cutoff frequency [Hz] for the norm of the acclerometer reading.
'fc_acc_norm': 0.4,
# See definition above.
'vibration_filter_a': vibration_filter_a.tolist(),
'vibration_filter_b': vibration_filter_b.tolist(),
'Vb_filter_a': Vb_filter_a.tolist(),
'Vb_filter_b': Vb_filter_b.tolist(),
# Maximum value [m] for the norm of the sigma of position of the
# position filter. Once we exceed this value, we declare the estimate
# to be invalid.
'max_valid_position_sigma_norm': 3.0,
},
'tether_anchor': {
# Filter parameters for the tether anchor.
'fc_near_perch': 0.1,
'fc_far_from_perch': 0.01,
# Payout values [m] over which the filter cutoff frequency is faded.
'payout_near_perch': 0.0,
'payout_far_from_perch': 100.0,
'fc_lateral': 0.1,
'zeta_lateral': np.sqrt(2.0) / 2.0,
},
'ground_station': {
# Number [#] of controller iterations to debounce the ground station
# mode.
'num_debounce': 20,
},
'joystick': {
# Number [#] of controller iterations to debounce joystick switches.
'joystick_num_debounce': 11,
# Throttle filter frequency [Hz].
'fc_throttle': 1.0,
# Pitch filter frequency [Hz].
'fc_pitch': 1.0,
},
'perch_azi': {
# Bound on the angular rate [rad/s] estimate for the perch azimuth.
'max_angle_vel': 1.0,
# Filter cutoff [Hz] for the perch azimuth angular rate.
'fc_angle_vel': 1.0,
# Damping ratio [#] for the filter on perch azimuth angular
# rate.
'damping_ratio_angle_vel': 1.0,
},
'tether_force': {
# Cutoff frequency [Hz] for the filtered tension.
'fc_tension': 1.0
},
'tether_ground_angles': {
# Half-angle [rad] of the cone about the detwist axis in which to hold
# the estimate of the tether detwist angle.
'hold_cone_half_angle': np.deg2rad(1.0),
# Angle [rad] that the axis used to compute the detwist angle is
# offset upwards to ensure the tether departure direction encircles
# this detwist axis.
'detwist_axis_offset': np.deg2rad(detwist_axis_offset_deg),
},
'weather': {
# Filter cutoff frequency [Hz] and damping ratio [#] for a
# low-pass filter applied to air density measurements.
'fc_rho': 1.0/60.0,
'zeta_rho': 1.0,
},
'wind': {
# Wind vector [m/s] to use when the kControlOptHardCodeWind
# is enabled.
#
# TODO: This should probably be a system parameter.
'hard_coded_wind_g': [-8.19152, -5.73576, 0.0],
# Filter cutoff frequencies [Hz].
'fc_initialize': 1.0,
'fc_vector': 0.2,
'fc_vector_slow': 0.01,
'fc_speed': 0.01,
'fc_speed_playbook': 0.005,
'zeta_speed_playbook': 1.0,
'fc_dir': 0.01,
'zeta_dir': 1.0,
'fc_dir_playbook': 0.005,
'zeta_dir_playbook': 1.0,
# Maximum allowable difference between wind aloft and ground wind.
# Wind aloft direction will be saturated if greater.
'playbook_aloft_azi_offset_max': np.deg2rad(20.0)
},
}
| [
"luislarco@google.com"
] | luislarco@google.com |
717db42d79ee7b0d3916cafd2ff9cbc961178823 | 89284da682f723c6aaad8ef6bba37ac31cd30c92 | /PythonTutorial/Advance/multiple_threads_and_processes/code/processes/shared_memory_array.py | f85b49bae8270d4d704ef6ec7d53d8bb2a7f8623 | [] | no_license | Danielyan86/Python-Study | 9d9912e0385c5b4d2b7272e9eaca542ff556dc1a | 782c1638eb9733a4be4acbc4c805a78f0fe77546 | refs/heads/master | 2023-03-17T13:26:31.865927 | 2023-03-05T12:30:07 | 2023-03-05T12:30:07 | 26,902,349 | 28 | 25 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | from multiprocessing import Process, Array
def cube(x):
for i in range(len(x)):
x[i] = x[i] + 1
if __name__ == "__main__":
# Array()初始化了一个拥有int数据类型、长度为3的空数组,该数组已被循环使用,在其中的每个元素上加1。
# 你可以在不同的进程中使用Arr,就像Value一样。这实质上是共享内存的概念。
# 注意:'d'表示双精度浮点数,'i'(在Array("i", 3)中)表示有符号整数。
arr = Array("i", 3)
print(arr[:]) # 初始化一个长度为3的数组,初始值为0
p = Process(target=cube, args=(arr,))
p.start()
p.join()
print(arr[:])
p = Process(target=cube, args=(arr,))
p.start()
p.join()
print(arr[:])
| [
"516495459@qq.com"
] | 516495459@qq.com |
05a0a743111a1fb2bcc0bc354304dab8a950f35a | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/semantic_segmentation/MMseg-swin/mmcv/mmcv/parallel/scatter_gather.py | af752be41e688283dae1aee634c26b49818ecea2 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,096 | py | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017
# All rights reserved.
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
from torch import Tensor
from torch.nn.parallel._functions import Scatter as OrigScatter
from ._functions import Scatter
from .data_container import DataContainer
ScatterInputs = Union[Tensor, DataContainer, tuple, list, dict]
def scatter(inputs: ScatterInputs,
target_gpus: List[int],
dim: int = 0) -> list:
"""Scatter inputs to target gpus.
The only difference from original :func:`scatter` is to add support for
:type:`~mmcv.parallel.DataContainer`.
"""
def scatter_map(obj):
if isinstance(obj, Tensor):
if target_gpus != [-1]:
return OrigScatter.apply(target_gpus, None, dim, obj)
else:
# for CPU inference we use self-implemented scatter
return Scatter.forward(target_gpus, obj)
if isinstance(obj, DataContainer):
if obj.cpu_only:
return obj.data
else:
return Scatter.forward(target_gpus, obj.data)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
out = list(map(list, zip(*map(scatter_map, obj))))
return out
if isinstance(obj, dict) and len(obj) > 0:
out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return out
return [obj for _ in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None # type: ignore
def scatter_kwargs(inputs: ScatterInputs,
kwargs: ScatterInputs,
target_gpus: List[int],
dim: int = 0) -> Tuple[tuple, tuple]:
"""Scatter with support for kwargs dictionary."""
inputs = scatter(inputs, target_gpus, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
if len(inputs) < len(kwargs):
length = len(kwargs) - len(inputs)
inputs.extend([() for _ in range(length)]) # type: ignore
elif len(kwargs) < len(inputs):
length = len(inputs) - len(kwargs)
kwargs.extend([{} for _ in range(length)]) # type: ignore
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
eaed96112b1511de5d974f5a8149f020a7934fb2 | 669a514211247356ed9be3ad07d138a5afd1f889 | /LaboratorioDePOOUSP/venv/bin/pip3 | 5abfae87242e35257414efc2b615b165aba99c40 | [] | no_license | borin98/Projetos_Em_Python_Curso_USP | 89a0761ad6cbe6c178358332fcd676cda2313649 | 95e78549e133fb644be95e37b4d4a003ca8da401 | refs/heads/master | 2021-04-12T09:54:04.672567 | 2018-05-04T18:57:02 | 2018-05-04T18:57:02 | 126,911,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | #!/home/borin/PycharmProjects/LaboratorioDePOOUSP/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
| [
"borinmacedo@gmail.com"
] | borinmacedo@gmail.com | |
78d6dff4ab665fc0023f1433308f4bfc7502b9a2 | 971c5ae1d87cdfbb97723485c3d76c17395b82b0 | /x86-semantics/semantics_using_uifs/z3EquivFormulas/z3tutorial.py | f4c0553048e4978a3d5e415bc18edc7b4bb2407f | [
"NCSA"
] | permissive | mewbak/binary-decompilation | 7d0bf64d6cd01bfa5f5fc912d74a85ce81124959 | f58da4c53cd823edc4bbbad6b647dbcefd7e64f8 | refs/heads/master | 2020-04-16T06:08:14.983946 | 2019-01-06T17:21:50 | 2019-01-06T17:21:50 | 165,334,058 | 1 | 0 | NOASSERTION | 2019-01-12T01:42:16 | 2019-01-12T01:42:16 | null | UTF-8 | Python | false | false | 7,103 | py | from z3 import *
# x = Int('x')
# y = Int('y')
# solve(x > 2, y < 10, x + 2*y == 7)
#
# x = Int('x')
# y = Int('y')
# print simplify(x + y + 2*x + 3)
# print simplify(x < y + x + 2)
# print simplify(And(x + 1 >= 3, x**2 + x**2 + y**2 + 2 >= 5))
#
# x = Int('x')
# y = Int('y')
# print x**2 + y**2 >= 1
# set_option(html_mode=False)
# print x**2 + y**2 >= 1
# x = Int('x')
# y = Int('y')
# n = x + y >= 3
# print "num args: ", n.num_args()
# print "children: ", n.children()
# print "1st child:", n.arg(0)
# print "2nd child:", n.arg(1)
# print "operator: ", n.decl()
# print "op name: ", n.decl().name()
# x = Real('x')
# solve(x > 4, x < 0)
# p = Bool('p')
# q = Bool('q')
# r = Bool('r')
# solve(Implies(p, q), r == Not(q), Or(Not(p), r))
# p = Bool('p')
# q = Bool('q')
# print And(p, q, True)
# print simplify(And(p, q, True))
# print simplify(And(p, q, False))
# p = Bool('p')
# x = Real('x')
# solve(Or(x < 5, x > 10), Or(p, x**2 == 2), Not(p))
# x = Int('x')
# y = Int('y')
#
# s = Solver()
# print s
#
# s.add(x > 10, y == x + 2)
# print s
# print "Solving constraints in the solver s ..."
# print s.check()
#
# print "Create a new scope..."
# s.push()
# s.add(y < 11)
# print s
# print "Solving updated set of constraints..."
# print s.check()
#
# print "Restoring state..."
# s.pop()
# print s
# print "Solving restored set of constraints..."
# print s.check()
# x = Real('x')
# y = Real('y')
# s = Solver()
# s.add(x > 1, y > 1, Or(x + y > 3, x - y < 2))
# print "asserted constraints..."
# for c in s.assertions():
# print c
#
# print s.check()
# print "statistics for the last check method..."
# print s.statistics()
# # Traversing statistics
# for k, v in s.statistics():
# print "%s : %s" % (k, v)
# x, y, z = Reals('x y z')
# s = Solver()
# s.add(x > 1, y > 1, x + y > 3, z - x < 10)
# print s.check()
#
# m = s.model()
# print "x = %s" % m[x]
#
# print "traversing model..."
# for d in m.decls():
# print "%s = %s" % (d.name(), m[d])
# x = BitVec('x', 16)
# y = BitVec('y', 16)
# print x + 2
# # Internal representation
# print (x + 2).sexpr()
#
# # -1 is equal to 65535 for 16-bit integers
# print simplify(x + y - 1)
#
# # Creating bit-vector constants
# a = BitVecVal(-1, 16)
# b = BitVecVal(65535, 16)
# print simplify(a == b)
#
#
# # Create to bit-vectors of size 32
# x, y = BitVecs('x y', 32)
#
# solve(x + y == 2, x > 0, y > 0)
#
# solve(x & y == ~y)
#
# solve(x < 0)
#
# # using unsigned version of <
# solve(ULT(x, 0))
#
# # Create to bit-vectors of size 32
# x, y = BitVecs('x y', 32)
#
# solve(x >> 2 == 3)
#
# solve(x << 2 == 3)
#
# solve(x << 2 == 24)
# p, q = Bools('p q')
# demorgan = And(p, q) == Not(Or(Not(p), Not(q)))
# print demorgan
#
#
# print "Proving demorgan..."
# prove(demorgan)
#
# x = BitVec('t', 8)
# print x.sexpr()
# y = Extract(7, 4, x)
# z = Extract(3, 0, x)
# w = Concat(y, z)
# print y.sexpr()
# print z.sexpr()
# print w.sexpr()
# s = Solver();
# s.add(Not(w == x))
# print s.check()
# x = Int('x')
# y = Real('y')
# print (x + 1).sort()
# print (y + 1).sort()
# print (x >= 2).sort()
# x = Int('x')
# print "is expression: ", is_expr(x)
# n = x + 1
# print "is application:", is_app(n)
# print "decl: ", n.decl()
# print "num args: ", n.num_args()
# for i in range(n.num_args()):
# print "arg(", i, ") ->", n.arg(i)
# x = Int('x')
# x_d = x.decl()
# print "is_expr(x_d): ", is_expr(x_d)
# print "is_func_decl(x_d):", is_func_decl(x_d)
# print "x_d.name(): ", x_d.name()
# print "x_d.range(): ", x_d.range()
# print "x_d.arity(): ", x_d.arity()
# # x_d() creates an application with 0 arguments using x_d.
# print "eq(x_d(), x): ", eq(x_d(), x)
# print "\n"
# # f is a function from (Int, Real) to Bool
# f = Function('f', IntSort(), RealSort(), BoolSort())
# print "f.name(): ", f.name()
# print "f.range(): ", f.range()
# print "f.arity(): ", f.arity()
# for i in range(f.arity()):
# print "domain(", i, "): ", f.domain(i)
# # f(x, x) creates an application with 2 arguments using f.
# print f(x, x)
# print eq(f(x, x).decl(), f)
# x = Int('x')
# y = Int('y')
# max = If(x > y, (x, y))
# print simplify(max)
# a = BitVecVal(-1, 16)
# b = BitVecVal(65535, 16)
# print simplify(a + b)
# print a.size()
# x = Real("x")
#
# rule = x > 0
# goal = x < 0
#z3.prove(z3.Implies(rule, goal))
# z3.prove(Implies(And(rule, x != -1, x != -2), goal))
# p, q = Bools('p q')
# print simplify(Implies(p, q))
# print simplify(And(Implies(p, q), Implies(q,p)))
# print simplify(p == q)
#print parse_smt2_string('(declare-const x Int) (assert (> x 0)) (assert (< x 10))')
def prove(f):
s = Solver()
s.add(Not(f))
if s.check() == unsat:
print "proved"
else:
print "failed to prove"
# a = BitVec('x', 4)
# b = BitVec('y', 4)
# z = Not(ULE(a, b) == (a <= b))
# prove(z)
# z3.prove(z)
#a = parse_smt2_string('(declare-const a (_ BitVec 4)) (declare-const b (_ BitVec 4)) (assert (not (= (bvule a b) (bvsle a b)))) ')
#print a
#print z3.prove(a)
#a = BitVecVal(0xff, 16)
#print a.sexpr()
# R1 = BitVec('R1', 64)
# R2 = BitVec('R2', 64)
# R3 = BitVec('R3', 64)
# CONST_BV_S8_V63 = BitVecVal(63, 8)
# CONST_BV_S8_V64 = BitVecVal(64, 8)
# CONST_BV_S1_V0 = BitVecVal(0, 1)
# CONST_BV_S64_V0 = BitVecVal(0, 64)
# CONST_BV_S64_VNEG1 = BitVecVal(-1, 64)
# CONST_BV_S57_V0 = BitVecVal(0x0, 57)
# CONST_BV_S8_V3f = BitVecVal(0x3f, 8)
#
# a = BitVecVal( BV2Int( CONST_BV_S8_V63 , is_signed=False), 64)
# Declarations
# CF = BitVec('CF', 1)
# PF = BitVec('PF', 1)
# AF = BitVec('AF', 1)
# ZF = BitVec('ZF', 1)
# SF = BitVec('SF', 1)
# OF = BitVec('OF', 1)
#
# RAX = BitVec('RAX', 64)
# RCX = BitVec('RCX', 64)
# ZERO1 = BitVecVal(0, 1)
# ONE1 = BitVecVal(1, 1)
#
# cf = (CF == ONE1)
# pf = (PF == ONE1)
# af = (AF == ONE1)
# zf = (ZF == ONE1)
# sf = (SF == ONE1)
# of = (OF == ONE1)
#
# cvt_int32_to_single = Function('f', IntSort(), Float32())
# XX = Function('t', BitVecSort(32), IntSort())
#
# print('[6;30;44m' + 'Opcode:vcvtdq2ps_xmm_xmm' + '[0m')
#
# R1 = BitVec('R1', 256)
# R2 = BitVec('R2', 256)
# CONST_BV_S128_V0 = BitVecVal(0, 128)
#
# #PK_R2 = Concat(CONST_BV_S128_V0, fpToIEEEBV (cvt_int32_to_single ( Extract (127, 96, R1))))
# #PS_R2 = Concat(CONST_BV_S128_V0, fpToIEEEBV (cvt_int32_to_single ( Extract (127, 96, R1))))
#
# PK_R2 = fpToIEEEBV (cvt_int32_to_single ( XX(Extract (127, 96, R1))))
# PS_R2 = fpToIEEEBV (cvt_int32_to_single ( XX(Extract (127, 96, R1))))
#
# print simplify(PK_R2)
# print simplify(PS_R2)
#
# print eq(PK_R2, PS_R2)
# print prove(PK_R2 == PS_R2)
######### Test <<
# X = BitVecVal(2, 8)
# ONE = BitVecVal(1, 1)
#
# Y = X << BitVecVal(Int2BV(1, 8), X.size())
# print Y
# print simplify(Y)
CONST_BV_S1_V1 = BitVecVal(0x1, 1)
b = Int('b')
Y2 = Function('Y', IntSort(), IntSort(), IntSort())
a = Int('a')
def X(a):
return a
z3.prove( X(a) == a )
def Y1(a,b):
return Y2(b,a)
z3.prove( Y1(a, b) == Y2(b, a) )
def X(a):
if(a==3):
return 1
else:
return 2
print(X(a) == If (a == 3, 1, 2))
| [
"sdasgup3@illinois.edu"
] | sdasgup3@illinois.edu |
b37d96fb5e943bddf86de20f27d7cc337526e4f7 | cd2ce8e913048a535d7680789d38a16f23ad04b3 | /server/apps/services/chargecallbacks/__init__.py | e24ff974f67c3803087a1314f203bc90a8adb3c8 | [] | no_license | wade333777/cocos-js-tips | 3758bbaccb168c1a7f4d17e243e8107cb9fbfb06 | 4f430d5631b1118ad251bdaf8384bc0dbdaf07b9 | refs/heads/master | 2021-01-20T20:35:48.273690 | 2016-06-22T10:02:15 | 2016-06-22T10:02:15 | 60,678,664 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | #!/usr/bin/env python
# encoding: utf-8
import whwj_pay_callback
import ez_pay_callback
| [
"1063776603@qq.com"
] | 1063776603@qq.com |
02caebbc080991e786b864bdcff5e6559bee9ffb | 930309163b930559929323647b8d82238724f392 | /arc025_2.py | 31eb79bf696c226f9cf49a3df1266da20ed55b4d | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py |
class Cumsum2d:
dp: "Optional[List[List]]" = None
@staticmethod
def generate(h, w, a):
'''da[i][j]:(0,0)~(i,j)の長方形の和'''
Cumsum2d.dp = dp = [[0]*w for j in range(h)]
dp[0][0] = a[0][0]
for i in range(1,w):
dp[0][i] = dp[0][i-1]+a[0][i]
for i in range(1,h):
cnt_w = 0
for j in range(w):
cnt_w += a[i][j]
dp[i][j] = dp[i-1][j]+cnt_w
return dp
@staticmethod
def calc(p, q, x, y):
dp = Cumsum2d.dp
'''da_calc(p,q,x,y):(p,q)~(x,y)の長方形の和'''
if p > x or q > y:
return 0
if p == 0 and q == 0:
return dp[x][y]
if p == 0:
return dp[x][y]-dp[x][q-1]
if q == 0:
return dp[x][y]-dp[p-1][y]
return dp[x][y]-dp[p-1][y]-dp[x][q-1]+dp[p-1][q-1]
import itertools
H,W=map(int, input().split())
C = [list(map(int, input().split())) for _ in range(H)]
for h,w in itertools.product(range(H), range(W)):
if (h+w)%2==0: # ブラックチョコならば
C[h][w] *= -1 # 正負を反転
Cumsum2d.generate(H,W,C)
ans = 0
for p, q in itertools.product(range(H), range(W)):
for x, y in itertools.product(range(p, H), range(q, W)):
if Cumsum2d.calc(p,q,x,y) == 0:
ans = max(ans, (x-p+1)*(y-q+1))
print(ans)
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
3f06d0b16de54e75a4262e8811413a8a3debcf2e | 86d33ca948fefd0e4dcea71fc8a53c6b062eca8b | /safe_relay_service/gas_station/models.py | 8aae69493abc13847eb7c3fcf076ef6b1184787d | [
"MIT"
] | permissive | energywebfoundation/safe-relay-service | f140f39375c1e916810e94b7289355cb6f1b00a5 | 7a34b2492a78027aa1131360c445d2de3ec1aaee | refs/heads/master | 2020-09-26T18:24:57.727244 | 2019-12-16T18:19:25 | 2019-12-16T18:19:25 | 226,312,414 | 0 | 0 | MIT | 2019-12-06T11:15:00 | 2019-12-06T11:14:59 | null | UTF-8 | Python | false | false | 920 | py | from django.db import models
from model_utils.models import TimeStampedModel
class GasPrice(TimeStampedModel):
lowest = models.BigIntegerField()
safe_low = models.BigIntegerField()
standard = models.BigIntegerField()
fast = models.BigIntegerField()
fastest = models.BigIntegerField()
class Meta:
get_latest_by = 'created'
def __str__(self):
return '%s lowest=%d safe_low=%d standard=%d fast=%d fastest=%d' % (self.created,
self.lowest,
self.safe_low,
self.standard,
self.fast,
self.fastest)
| [
"uxio@gnosis.pm"
] | uxio@gnosis.pm |
951f93f3ad78397dfe5c0b6c8d65393db383a88c | 3474b315da3cc5cb3f7823f19a18b63a8da6a526 | /scratch/KRAMS/src/apps/scratch/alex/test/prototyped_from.py | c409e5d9084597782be40c6598ca405228c3cce5 | [] | no_license | h4ck3rm1k3/scratch | 8df97462f696bc2be00f1e58232e1cd915f0fafd | 0a114a41b0d1e9b2d68dbe7af7cf34db11512539 | refs/heads/master | 2021-01-21T15:31:38.718039 | 2013-09-19T10:48:24 | 2013-09-19T10:48:24 | 29,173,525 | 0 | 0 | null | 2015-01-13T04:58:57 | 2015-01-13T04:58:56 | null | UTF-8 | Python | false | false | 1,311 | py | '''
Created on May 3, 2010
@author: alexander
'''
from enthought.traits.api import \
PrototypedFrom, Float, HasTraits, Instance, Str, DelegatesTo
class Parent ( HasTraits ):
first_name = Str
last_name = Str
def _last_name_changed(self, new):
print "Parent's last name changed to %s." % new
class Child ( HasTraits ):
father = Instance( Parent )
first_name = Str
# last_name = DelegatesTo( 'father' )
# last_name = DelegatesTo( 'father', listenable = False )
last_name = PrototypedFrom( 'father' )
def _last_name_changed(self, new):
print "Child's last name changed to %s." % new
dad = Parent( first_name='William', last_name='Chase' )
son = Child( first_name='John', father=dad )
print 'dad.last_name', dad.last_name
print 'son.last_name', son.last_name
dad.last_name='Jones'
print 'dad.last_name', dad.last_name
print 'son.last_name', son.last_name
son.last_name='Thomas'
print 'dad.last_name', dad.last_name
print 'son.last_name', son.last_name
dad.last_name='Riley'
print 'dad.last_name', dad.last_name
print 'son.last_name', son.last_name
del son.last_name
print 'dad.last_name', dad.last_name
print 'son.last_name', son.last_name
dad.last_name='Simmons'
print 'dad.last_name', dad.last_name
print 'son.last_name', son.last_name
| [
"Axel@Axel-Pc"
] | Axel@Axel-Pc |
785e7dc3e4aec91b2973026a9f6193bc488cd7ad | 7dc495401ea92c4007e5ee6e19d05a0d2b75afab | /fae2/websiteResults/apps.py | af4bbd89d1f5f0e7bc7c79348c2f1ca778e52aff | [
"Apache-2.0"
] | permissive | scasagrande/fae2 | ed3ff3bdf9b533cd23891fd78beed7f8ac8b3de1 | 78e2f883e39014c2addef28871cf9b53ad74f585 | refs/heads/master | 2021-01-14T10:16:44.603672 | 2016-03-12T00:57:09 | 2016-03-12T00:57:09 | 53,964,802 | 0 | 0 | null | 2016-03-15T17:29:17 | 2016-03-15T17:29:17 | null | UTF-8 | Python | false | false | 103 | py | from django.apps import AppConfig
class WebsiteResultsConfig(AppConfig):
name = 'websiteResults'
| [
"jongund@illinois.edu"
] | jongund@illinois.edu |
592549a8b5a948839225f0f96a6df8c95c820d86 | c6778674b408fc6c560b046ce74b9672a29ed14c | /toasty/multi_tan.py | 40e8eb308471dd81c5f32b0887d5425cfe7ac12b | [
"MIT"
] | permissive | WorldWideTelescope/toasty | 0b91fc851a4870bd3cb3e20d16dfcfc929aa9170 | 3f0c5e02a6d437b04dfc092f1bd6539f58e9fa11 | refs/heads/master | 2023-09-05T16:28:55.199491 | 2023-08-18T00:17:51 | 2023-08-18T00:17:51 | 200,071,988 | 0 | 4 | MIT | 2023-08-18T00:17:53 | 2019-08-01T15:05:26 | Python | UTF-8 | Python | false | false | 12,251 | py | # -*- mode: python; coding: utf-8 -*-
# Copyright 2019-2022 the AAS WorldWide Telescope project
# Licensed under the MIT License.
"""
Generate tiles from a collection of images on a common TAN projection.
"""
__all__ = """
MultiTanProcessor
""".split()
from astropy.wcs import WCS
import numpy as np
import warnings
from .progress import progress_bar
from .study import StudyTiling
MATCH_HEADERS = [
"CTYPE1",
"CTYPE2",
"CRVAL1",
"CRVAL2",
"CDELT1",
"CDELT2",
]
# In my sample set, some of these files vary minutely from one header to the
# next, so we save them but don't demand exact matching. We should use
# np.isclose() or whatever it is.
SAVE_HEADERS = [
"PC1_1",
"PC1_2",
"PC2_1",
"PC2_2",
]
class MultiTanDescriptor(object):
ident = None
in_shape = None
crxmin = None
crxmax = None
crymin = None
crymax = None
imin = None
imax = None
jmin = None
jmax = None
sub_tiling = None
class MultiTanProcessor(object):
"""
Generate tiles from a collection of images on a common TAN projection.
Some large astronomical images are stored as a collection of sub-images that
share a common tangential projection, a format is that is nice and easy to
convert into a WWT "study" tile pyramid. This class can process a collection
of such images and break them into the highest-resolution layer of such a
tile pyramid.
"""
def __init__(self, collection):
self._collection = collection
def compute_global_pixelization(self, builder):
"""
Read the input images to determine the global pixelation of this data
set.
This function reads the FITS headers of all of the input data files to
determine the overall image size and the parameters of its tiling as a
WWT study. This should be pretty easy, because we've been assured that
everything is on a nice common TAN, which is exactly what we need to end
up with anyway.
"""
self._descs = []
ref_headers = None
global_crxmin = None
for desc in self._collection.descriptions():
# For figuring out the tiling properties, we need to ensure that
# we're working in top-down mode everywhere.
desc.ensure_negative_parity()
# Build up information for the common TAN projection.
header = desc.wcs.to_header()
if ref_headers is None:
ref_headers = {}
for h in MATCH_HEADERS:
ref_headers[h] = header[h]
for h in SAVE_HEADERS:
value = header.get(h)
if value is not None:
ref_headers[h] = value
if ref_headers["CTYPE1"] != "RA---TAN":
raise Exception(
"all inputs must be in a TAN projection, but {} is not".format(
desc.collection_id
)
)
if ref_headers["CTYPE2"] != "DEC--TAN":
raise Exception(
"all inputs must be in a TAN projection, but {} is not".format(
desc.collection_id
)
)
else:
for h in MATCH_HEADERS:
expected = ref_headers[h]
observed = header[h]
if observed != expected:
raise Exception(
"inputs are not on uniform WCS grid; in file {}, expected "
"value {} for header {} but observed {}".format(
desc.collection_id, expected, h, observed
)
)
this_crpix1 = header["CRPIX1"] - 1
this_crpix2 = header["CRPIX2"] - 1
mtdesc = MultiTanDescriptor()
mtdesc.ident = desc.collection_id
mtdesc.in_shape = desc.shape
mtdesc.crxmin = 0 - this_crpix1
mtdesc.crxmax = (desc.shape[1] - 1) - this_crpix1
mtdesc.crymin = 0 - this_crpix2
mtdesc.crymax = (desc.shape[0] - 1) - this_crpix2
if global_crxmin is None:
global_crxmin = mtdesc.crxmin
global_crxmax = mtdesc.crxmax
global_crymin = mtdesc.crymin
global_crymax = mtdesc.crymax
else:
global_crxmin = min(global_crxmin, mtdesc.crxmin)
global_crxmax = max(global_crxmax, mtdesc.crxmax)
global_crymin = min(global_crymin, mtdesc.crymin)
global_crymax = max(global_crymax, mtdesc.crymax)
self._descs.append(mtdesc)
# We can now compute the global properties of the tiled TAN representation:
width = int(global_crxmax - global_crxmin) + 1
height = int(global_crymax - global_crymin) + 1
self._tiling = StudyTiling(width, height)
ref_headers["CRPIX1"] = this_crpix1 + 1 + (mtdesc.crxmin - global_crxmin)
ref_headers["CRPIX2"] = this_crpix2 + 1 + (mtdesc.crymin - global_crymin)
wcs = WCS(ref_headers)
self._tiling.apply_to_imageset(builder.imgset)
builder.apply_wcs_info(wcs, width, height)
# While we're here, figure out how each input will map onto the global
# tiling. This makes sure that nothing funky happened during the
# computation and allows us to know how many tiles we'll have to visit.
self._n_todo = 0
for desc in self._descs:
desc.imin = int(np.floor(desc.crxmin - global_crxmin))
desc.imax = int(np.ceil(desc.crxmax - global_crxmin))
desc.jmin = int(np.floor(desc.crymin - global_crymin))
desc.jmax = int(np.ceil(desc.crymax - global_crymin))
# Compute the sub-tiling now so that we can count how many total
# tiles we'll need to process.
if desc.imax < desc.imin or desc.jmax < desc.jmin:
raise Exception(
f"segment {desc.ident} maps to zero size in the global mosaic"
)
desc.sub_tiling = self._tiling.compute_for_subimage(
desc.imin,
desc.jmin,
desc.imax + 1 - desc.imin,
desc.jmax + 1 - desc.jmin,
)
self._n_todo += desc.sub_tiling.count_populated_positions()
return self # chaining convenience
def tile(self, pio, parallel=None, cli_progress=False, **kwargs):
"""
Tile the input images into the deepest layer of the pyramid.
Parameters
----------
pio : :class:`toasty.pyramid.PyramidIO`
A :class:`~toasty.pyramid.PyramidIO` instance to manage the I/O with
the tiles in the tile pyramid.
parallel : integer or None (the default)
The level of parallelization to use. If unspecified, defaults to using
all CPUs. If the OS does not support fork-based multiprocessing,
parallel processing is not possible and serial processing will be
forced. Pass ``1`` to force serial processing.
cli_progress : optional boolean, defaults False
If true, a progress bar will be printed to the terminal.
"""
from .par_util import resolve_parallelism
parallel = resolve_parallelism(parallel)
if parallel > 1:
self._tile_parallel(pio, cli_progress, parallel, **kwargs)
else:
self._tile_serial(pio, cli_progress, **kwargs)
# Since we used `pio.update_image()`, we should clean up the lockfiles
# that were generated.
pio.clean_lockfiles(self._tiling._tile_levels)
def _tile_serial(self, pio, cli_progress, **kwargs):
tile_parity_sign = pio.get_default_vertical_parity_sign()
with progress_bar(total=self._n_todo, show=cli_progress) as progress:
for image, desc in zip(self._collection.images(), self._descs):
# Ensure that the image and the eventual tile agree on parity
if image.get_parity_sign() != tile_parity_sign:
image.flip_parity()
for (
pos,
width,
height,
image_x,
image_y,
tile_x,
tile_y,
) in desc.sub_tiling.generate_populated_positions():
# Tiling coordinate systems are always negative (top-down)
# parity, with the Y=0 tile at the top. But the actual data
# buffers might be positive (bottoms-up) parity -- this is
# the case for FITS. In those situations, we have to flip
# the vertical positioning. Because we have ensured that the
# source image and tile layouts agree, the needed tweak is
# actually quite minimal:
if tile_parity_sign == 1:
image_y = image.height - (image_y + height)
tile_y = 256 - (tile_y + height)
ix_idx = slice(image_x, image_x + width)
bx_idx = slice(tile_x, tile_x + width)
iy_idx = slice(image_y, image_y + height)
by_idx = slice(tile_y, tile_y + height)
with pio.update_image(
pos, masked_mode=image.mode, default="masked"
) as basis:
image.update_into_maskable_buffer(
basis, iy_idx, ix_idx, by_idx, bx_idx
)
progress.update(1)
def _tile_parallel(self, pio, cli_progress, parallel, **kwargs):
import multiprocessing as mp
# Start up the workers
done_event = mp.Event()
queue = mp.Queue(maxsize=2 * parallel)
workers = []
for _ in range(parallel):
w = mp.Process(
target=_mp_tile_worker, args=(queue, done_event, pio, kwargs)
)
w.daemon = True
w.start()
workers.append(w)
# Send out them segments
with progress_bar(total=len(self._descs), show=cli_progress) as progress:
for image, desc in zip(self._collection.images(), self._descs):
queue.put((image, desc))
progress.update(1)
# Finish up
queue.close()
queue.join_thread()
done_event.set()
for w in workers:
w.join()
def _mp_tile_worker(queue, done_event, pio, _kwargs):
"""
Generate and enqueue the tiles that need to be processed.
"""
from queue import Empty
tile_parity_sign = pio.get_default_vertical_parity_sign()
while True:
try:
# un-pickling WCS objects always triggers warnings right now
with warnings.catch_warnings():
warnings.simplefilter("ignore")
image, desc = queue.get(True, timeout=1)
except Empty:
if done_event.is_set():
break
continue
if image.get_parity_sign() != tile_parity_sign:
image.flip_parity()
for (
pos,
width,
height,
image_x,
image_y,
tile_x,
tile_y,
) in desc.sub_tiling.generate_populated_positions():
if tile_parity_sign == 1:
image_y = image.height - (image_y + height)
tile_y = 256 - (tile_y + height)
ix_idx = slice(image_x, image_x + width)
bx_idx = slice(tile_x, tile_x + width)
iy_idx = slice(image_y, image_y + height)
by_idx = slice(tile_y, tile_y + height)
with pio.update_image(
pos, masked_mode=image.mode, default="masked"
) as basis:
image.update_into_maskable_buffer(basis, iy_idx, ix_idx, by_idx, bx_idx)
| [
"peter@newton.cx"
] | peter@newton.cx |
8050bb2685427b930d2b9bf195e420e980ddfa6a | 3e77a86429ba0f6968f709e77e204cdfe920a041 | /python/python/src/python_problems/other_python_prob/split_ex.py | 197395da79cb77288a4fcac60b91f4ba222cd5c8 | [] | no_license | ramyamango123/test | a2d9bb6cafe8d7406b76eba526ddded2acf2a3b2 | 85420c9406109a72e1b1d455ea29a5cae9def5a3 | refs/heads/master | 2020-06-07T23:34:08.344051 | 2014-03-12T05:19:05 | 2014-03-12T05:19:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | import re
#t1 = raw_input("Enter a price:")
t1 = "$2.99 after rebate"
#x1 = regexpi("After rebate")
#print x1
if i in t1:
ts = t1.split(" ")
r1 = ts[0]
A1 = r1[1:]
print A1
else:
x1= t1[1:]
print x1 | [
"ramya@Ramyas-MacBook-Air.local"
] | ramya@Ramyas-MacBook-Air.local |
123cdcb3e7724202187438dd63b63dd01e104218 | 96ced1bf722a6b003a9388f2b1b82e7e2870cb5f | /platforma_inwestorow/scraper/urls.py | e265c4f93e0b2177c42b2e149197302cb15087cb | [] | no_license | topi-chan/scraper_app | bfb069b199e04249257c0968bb5ff3c274d3d0d3 | 4fe97e3ab5480f9365aa6373ca6338b34d221ca7 | refs/heads/master | 2023-02-24T07:19:30.116068 | 2021-01-28T00:36:41 | 2021-01-28T00:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('mailing_assign', views.mailing_assign, name='mailing_assign')
]
| [
"maciej@top-project.com.pl"
] | maciej@top-project.com.pl |
d09b084170f96a5415905a1133d4562ed51f5774 | 677388ca1fc9b489e94598d392d1b584efa0c096 | /unpacking variables.py | ff9f5965f11eb9db6310847d13359f90a83ba125 | [] | no_license | robertruhiu/learn | 356b10a92536e8feca137e0ef15fdbac588785a7 | 6f32418144111ce52c23b79314c1cf2c59ee03b8 | refs/heads/master | 2021-01-23T16:05:38.468043 | 2017-09-14T10:37:29 | 2017-09-14T10:37:29 | 102,722,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | from sys import argv
script,first,second = argv
print "The script is called:",script
print"Your first variable is:",first
print"Your second variable is:",second
| [
"robertruhiu@gmail.com"
] | robertruhiu@gmail.com |
687978f653d19ee7a31533d6c0bb62eef063429e | c67603fed9ef7a1ebf5a41944e5f65d37e9ddfb3 | /lovcenbanka/spiders/spider.py | 76a1bbbbdccd8780f43fc061a4156a24c2c3f20b | [] | no_license | hristo-grudev/lovcenbanka | ff20c25f67ff9dfa89c8440cc79f5a741c317def | 1a80c2b5d3fff7a3bee869ad637f0ef5fe872878 | refs/heads/main | 2023-03-13T10:34:13.024062 | 2021-02-24T12:04:57 | 2021-02-24T12:04:57 | 341,887,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | import scrapy
from scrapy.loader import ItemLoader
from ..items import LovcenbankaItem
from itemloaders.processors import TakeFirst
class LovcenbankaSpider(scrapy.Spider):
name = 'lovcenbanka'
start_urls = ['https://lovcenbanka.me/me/novosti']
def parse(self, response):
post_links = response.xpath('//h2[@class="ba-blog-post-title"]/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
next_page = response.xpath('//div[@class="ba-blog-posts-pagination"]//a/@href').getall()
yield from response.follow_all(next_page, self.parse)
def parse_post(self, response):
title = response.xpath('//h1/text()').get()
description = response.xpath('//div[@class="blog-content-wrapper"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description]
description = ' '.join(description).strip()
date = response.xpath('//span[@class="intro-post-date"]/text()').get()
item = ItemLoader(item=LovcenbankaItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
| [
"hr.grudev@gmail.com"
] | hr.grudev@gmail.com |
0a37618d4074892270ed144a3b76d507146dfb1f | 32dda10669e459cf37c31f426fa709001d2c75b0 | /atcoder/contest/solved/abc155_b.py | 897a7f9a1fa3b72ca3f84dc1f5154053ba979bcd | [] | no_license | fastso/learning-python | 3300f50d06871245d0bfcbe9d201224580f70852 | d21dbd1b9f31017cdb1ed9b9ffd1e53ffe326572 | refs/heads/master | 2023-02-10T14:43:53.726247 | 2023-01-26T10:14:59 | 2023-01-26T10:14:59 | 193,454,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | n = int(input())
a = list(map(int, input().split()))
for i in a:
if i % 2 == 0:
if i % 3 == 0:
continue
elif i % 5 == 0:
continue
else:
print('DENIED')
exit()
print('APPROVED')
| [
"fastso.biko@gmail.com"
] | fastso.biko@gmail.com |
702daecec153b661b5fca34283aeb67e8b4888ca | 25df9eca90070191be927a35e3285343dd3bfe9b | /main.py | 146f8a307569a9c075633d03b40e6d27ff5a5b29 | [] | no_license | minhnd3796/simple_pyobject_tutorial | 2639bea433212ca4470a5464210ebfce3727ccec | 340b797e7b06ea308652be60aff430ab0811fe28 | refs/heads/master | 2021-05-02T02:22:03.726342 | 2018-02-09T09:13:43 | 2018-02-09T09:13:43 | 120,881,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | import Id
name = Id.get_name()
addr = Id.get_addr()
dob = Id.get_dob()
print("name:", name)
print("addr:", addr)
print("dob:", dob) | [
"gordonnguyen3796@gmail.com"
] | gordonnguyen3796@gmail.com |
5c24e27f03722c90cd20e99b743b2fc90abc4ab9 | c98e9ebdb356360c6dbbfd8fcf5a809fc7c0b975 | /rockypages/admin.py | e2d08ee2907733c629af2b5e61b5db56ccc95387 | [] | no_license | Cerkinfo/homepage | c89f9f7154f2dc6c43109fee500d8a060f16b5a5 | 4b59b48af7e1e4e5b168883ff3f90cc0a9d3e481 | refs/heads/master | 2020-05-29T19:28:21.201760 | 2014-04-17T12:30:04 | 2014-04-17T12:30:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | from django import forms
from django.contrib import admin
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext_lazy as _
from ckeditor.widgets import CKEditorWidget
from django.contrib.flatpages.admin import FlatpageForm
from reversion.admin import VersionAdmin
from flatblocks.models import FlatBlock
class FlatPageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/]+$',
help_text = _("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_message = _("This value must contain only letters, numbers,"
" underscores, dashes or slashes."))
content = forms.CharField(widget=CKEditorWidget(), label=_("Content"))
class Meta:
model = FlatPage
class FlatPageAdmin(VersionAdmin):
form = FlatPageForm
fieldsets = (
(None, {'fields': ('url', 'title', 'content', 'sites')}),
(_('Advanced options'), {'classes': ('collapse',), 'fields': ('enable_comments', 'registration_required', 'template_name')}),
)
list_display = ('url', 'title')
list_filter = ('sites', 'enable_comments', 'registration_required')
search_fields = ('url', 'title')
class FlatBlockAdmin(VersionAdmin):
pass
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageAdmin)
admin.site.unregister(FlatBlock)
admin.site.register(FlatBlock, FlatBlockAdmin)
| [
"nikita.marchant@gmail.com"
] | nikita.marchant@gmail.com |
b11ff45c86d0a9b296aa011ae26fc47f43f32ac2 | b958551bde844d2bfaa4dd7cb670c4ebab4ff17b | /week4/365_day_advanced.py | f0e79a956030ae39dc60eebefc7cb303568806a0 | [] | no_license | aslupin/cpe31-task | d97d3e495634826d6752c1ee82192cc58045a883 | c15ec9be94b899120db39b076a4d1c644f0af24b | refs/heads/master | 2021-01-15T09:18:54.615578 | 2017-11-17T12:53:23 | 2017-11-17T12:53:23 | 99,573,358 | 0 | 2 | null | 2017-09-20T06:25:31 | 2017-08-07T12:00:05 | Python | UTF-8 | Python | false | false | 782 | py | import math
movement = {"U":1,"R":1,"L":-1,"D":-1} # Const of movement
x = [0];y = [0] # list container for pattern
lastx=0;lasty=0;tmpx=0;tmpy=0 # declare var
check = False
fly = input()
here_x = int(input())
here_y = int(input())
for i in fly: # get each of pattern
if(i == 'U' or i == 'D'):tmpy += movement[i]
if(i == 'R' or i == 'L'):tmpx += movement[i]
if(not(tmpx in x and tmpy in y)):
x.append(tmpx)
y.append(tmpy)
if(here_x <= 0 and here_y <=0):bignum = math.fabs(min(here_x,here_y))
else:bignum = max(here_x,here_y) # big-number
for i in range(len(x)):
lastx = x[i] * bignum ;lasty = y[i] * bignum
for j in range(len(x)):
if(lastx + x[j] == here_x and lasty + y[j] == here_y):check = True
if(check):print('Y')
else:print('N') | [
"poon_arsene_lupin@hotmail.com"
] | poon_arsene_lupin@hotmail.com |
690c670821f3e1a878b4a4d3973fa7740182c1e8 | 70fa6468c768d4ec9b4b14fc94fa785da557f1b5 | /lib/googlecloudsdk/third_party/apis/container/v1alpha1/container_v1alpha1_client.py | 98a609adb37c6f988f4f57da9a5f4a800362287c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | kylewuolle/google-cloud-sdk | d43286ef646aec053ecd7eb58566ab2075e04e76 | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | refs/heads/master | 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 | NOASSERTION | 2019-02-04T19:04:40 | 2019-02-04T18:58:36 | Python | UTF-8 | Python | false | false | 80,693 | py | """Generated client library for container version v1alpha1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.container.v1alpha1 import container_v1alpha1_messages as messages
class ContainerV1alpha1(base_api.BaseApiClient):
"""Generated client library for service container version v1alpha1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://container.googleapis.com/'
_PACKAGE = u'container'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform']
_VERSION = u'v1alpha1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'ContainerV1alpha1'
_URL_VERSION = u'v1alpha1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new container handle."""
url = url or self.BASE_URL
super(ContainerV1alpha1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_aggregated_usableSubnetworks = self.ProjectsAggregatedUsableSubnetworksService(self)
self.projects_aggregated = self.ProjectsAggregatedService(self)
self.projects_locations_clusters_nodePools = self.ProjectsLocationsClustersNodePoolsService(self)
self.projects_locations_clusters_well_known = self.ProjectsLocationsClustersWellKnownService(self)
self.projects_locations_clusters = self.ProjectsLocationsClustersService(self)
self.projects_locations_operations = self.ProjectsLocationsOperationsService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects_zones_clusters_nodePools = self.ProjectsZonesClustersNodePoolsService(self)
self.projects_zones_clusters = self.ProjectsZonesClustersService(self)
self.projects_zones_operations = self.ProjectsZonesOperationsService(self)
self.projects_zones = self.ProjectsZonesService(self)
self.projects = self.ProjectsService(self)
class ProjectsAggregatedUsableSubnetworksService(base_api.BaseApiService):
"""Service class for the projects_aggregated_usableSubnetworks resource."""
_NAME = u'projects_aggregated_usableSubnetworks'
def __init__(self, client):
super(ContainerV1alpha1.ProjectsAggregatedUsableSubnetworksService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
r"""Lists subnetworks that are usable for creating clusters in a project.
Args:
request: (ContainerProjectsAggregatedUsableSubnetworksListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListUsableSubnetworksResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/aggregated/usableSubnetworks',
http_method=u'GET',
method_id=u'container.projects.aggregated.usableSubnetworks.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1alpha1/{+parent}/aggregated/usableSubnetworks',
request_field='',
request_type_name=u'ContainerProjectsAggregatedUsableSubnetworksListRequest',
response_type_name=u'ListUsableSubnetworksResponse',
supports_download=False,
)
class ProjectsAggregatedService(base_api.BaseApiService):
"""Service class for the projects_aggregated resource."""
_NAME = u'projects_aggregated'
def __init__(self, client):
super(ContainerV1alpha1.ProjectsAggregatedService, self).__init__(client)
self._upload_configs = {
}
class ProjectsLocationsClustersNodePoolsService(base_api.BaseApiService):
"""Service class for the projects_locations_clusters_nodePools resource."""
_NAME = u'projects_locations_clusters_nodePools'
def __init__(self, client):
super(ContainerV1alpha1.ProjectsLocationsClustersNodePoolsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a node pool for a cluster.
Args:
request: (CreateNodePoolRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.nodePools.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1alpha1/{+parent}/nodePools',
request_field='<request>',
request_type_name=u'CreateNodePoolRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a node pool from a cluster.
Args:
request: (ContainerProjectsLocationsClustersNodePoolsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}',
http_method=u'DELETE',
method_id=u'container.projects.locations.clusters.nodePools.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'clusterId', u'nodePoolId', u'projectId', u'zone'],
relative_path=u'v1alpha1/{+name}',
request_field='',
request_type_name=u'ContainerProjectsLocationsClustersNodePoolsDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Retrieves the node pool requested.
Args:
request: (ContainerProjectsLocationsClustersNodePoolsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(NodePool) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}',
http_method=u'GET',
method_id=u'container.projects.locations.clusters.nodePools.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'clusterId', u'nodePoolId', u'projectId', u'zone'],
relative_path=u'v1alpha1/{+name}',
request_field='',
request_type_name=u'ContainerProjectsLocationsClustersNodePoolsGetRequest',
response_type_name=u'NodePool',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists the node pools for a cluster.
Args:
request: (ContainerProjectsLocationsClustersNodePoolsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListNodePoolsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools',
http_method=u'GET',
method_id=u'container.projects.locations.clusters.nodePools.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'clusterId', u'projectId', u'zone'],
relative_path=u'v1alpha1/{+parent}/nodePools',
request_field='',
request_type_name=u'ContainerProjectsLocationsClustersNodePoolsListRequest',
response_type_name=u'ListNodePoolsResponse',
supports_download=False,
)
def Rollback(self, request, global_params=None):
r"""Roll back the previously Aborted or Failed NodePool upgrade.
This will be an no-op if the last upgrade successfully completed.
Args:
request: (RollbackNodePoolUpgradeRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Rollback')
return self._RunMethod(
config, request, global_params=global_params)
Rollback.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:rollback',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.nodePools.rollback',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:rollback',
request_field='<request>',
request_type_name=u'RollbackNodePoolUpgradeRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetAutoscaling(self, request, global_params=None):
r"""Sets the autoscaling settings of a specific node pool.
Args:
request: (SetNodePoolAutoscalingRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetAutoscaling')
return self._RunMethod(
config, request, global_params=global_params)
SetAutoscaling.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:setAutoscaling',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.nodePools.setAutoscaling',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:setAutoscaling',
request_field='<request>',
request_type_name=u'SetNodePoolAutoscalingRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetManagement(self, request, global_params=None):
r"""Sets the NodeManagement options for a node pool.
Args:
request: (SetNodePoolManagementRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetManagement')
return self._RunMethod(
config, request, global_params=global_params)
SetManagement.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:setManagement',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.nodePools.setManagement',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:setManagement',
request_field='<request>',
request_type_name=u'SetNodePoolManagementRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetSize(self, request, global_params=None):
r"""Sets the size for a specific node pool.
Args:
request: (SetNodePoolSizeRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetSize')
return self._RunMethod(
config, request, global_params=global_params)
SetSize.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:setSize',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.nodePools.setSize',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:setSize',
request_field='<request>',
request_type_name=u'SetNodePoolSizeRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates the version and/or iamge type of a specific node pool.
Args:
request: (UpdateNodePoolRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}',
http_method=u'PUT',
method_id=u'container.projects.locations.clusters.nodePools.update',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}',
request_field='<request>',
request_type_name=u'UpdateNodePoolRequest',
response_type_name=u'Operation',
supports_download=False,
)
class ProjectsLocationsClustersWellKnownService(base_api.BaseApiService):
"""Service class for the projects_locations_clusters_well_known resource."""
_NAME = u'projects_locations_clusters_well_known'
def __init__(self, client):
super(ContainerV1alpha1.ProjectsLocationsClustersWellKnownService, self).__init__(client)
self._upload_configs = {
}
def GetOpenid_configuration(self, request, global_params=None):
r"""GetOpenIDConfig gets the OIDC discovery document for the cluster.
See the OpenID Connect Discovery 1.0 specification for details.
https://openid.net/specs/openid-connect-discovery-1_0.html
This API is not yet intended for general use, and is not available for all
clusters.
Args:
request: (ContainerProjectsLocationsClustersWellKnownGetOpenidConfigurationRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GetOpenIDConfigResponse) The response message.
"""
config = self.GetMethodConfig('GetOpenid_configuration')
return self._RunMethod(
config, request, global_params=global_params)
GetOpenid_configuration.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/.well-known/openid-configuration',
http_method=u'GET',
method_id=u'container.projects.locations.clusters.well-known.getOpenid-configuration',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1alpha1/{+parent}/.well-known/openid-configuration',
request_field='',
request_type_name=u'ContainerProjectsLocationsClustersWellKnownGetOpenidConfigurationRequest',
response_type_name=u'GetOpenIDConfigResponse',
supports_download=False,
)
class ProjectsLocationsClustersService(base_api.BaseApiService):
"""Service class for the projects_locations_clusters resource."""
_NAME = u'projects_locations_clusters'
def __init__(self, client):
super(ContainerV1alpha1.ProjectsLocationsClustersService, self).__init__(client)
self._upload_configs = {
}
def CompleteIpRotation(self, request, global_params=None):
r"""Completes master IP rotation.
Args:
request: (CompleteIPRotationRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('CompleteIpRotation')
return self._RunMethod(
config, request, global_params=global_params)
CompleteIpRotation.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:completeIpRotation',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.completeIpRotation',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:completeIpRotation',
request_field='<request>',
request_type_name=u'CompleteIPRotationRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Create(self, request, global_params=None):
r"""Creates a cluster, consisting of the specified number and type of Google.
Compute Engine instances.
By default, the cluster is created in the project's
[default network](/compute/docs/networks-and-firewalls#networks).
One firewall is added for the cluster. After cluster creation,
the cluster creates routes for each node to allow the containers
on that node to communicate with all other instances in the
cluster.
Finally, an entry is added to the project's global metadata indicating
which CIDR range is being used by the cluster.
Args:
request: (CreateClusterRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1alpha1/{+parent}/clusters',
request_field='<request>',
request_type_name=u'CreateClusterRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes the cluster, including the Kubernetes endpoint and all worker.
nodes.
Firewalls and routes that were configured during cluster creation
are also deleted.
Other Google Compute Engine resources that might be in use by the cluster
(e.g. load balancer resources) will not be deleted if they weren't present
at the initial create time.
Args:
request: (ContainerProjectsLocationsClustersDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}',
http_method=u'DELETE',
method_id=u'container.projects.locations.clusters.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'clusterId', u'projectId', u'zone'],
relative_path=u'v1alpha1/{+name}',
request_field='',
request_type_name=u'ContainerProjectsLocationsClustersDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the details for a specific cluster.
Args:
request: (ContainerProjectsLocationsClustersGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Cluster) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}',
http_method=u'GET',
method_id=u'container.projects.locations.clusters.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'clusterId', u'projectId', u'zone'],
relative_path=u'v1alpha1/{+name}',
request_field='',
request_type_name=u'ContainerProjectsLocationsClustersGetRequest',
response_type_name=u'Cluster',
supports_download=False,
)
def GetJwks(self, request, global_params=None):
r"""GetJSONWebKeys gets the public component of the cluster signing keys in.
JSON Web Key format.
This API is not yet intended for general use, and is not available for all
clusters.
Args:
request: (ContainerProjectsLocationsClustersGetJwksRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GetJSONWebKeysResponse) The response message.
"""
config = self.GetMethodConfig('GetJwks')
return self._RunMethod(
config, request, global_params=global_params)
GetJwks.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/jwks',
http_method=u'GET',
method_id=u'container.projects.locations.clusters.getJwks',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1alpha1/{+parent}/jwks',
request_field='',
request_type_name=u'ContainerProjectsLocationsClustersGetJwksRequest',
response_type_name=u'GetJSONWebKeysResponse',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists all clusters owned by a project in either the specified zone or all.
zones.
Args:
request: (ContainerProjectsLocationsClustersListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListClustersResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters',
http_method=u'GET',
method_id=u'container.projects.locations.clusters.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'projectId', u'zone'],
relative_path=u'v1alpha1/{+parent}/clusters',
request_field='',
request_type_name=u'ContainerProjectsLocationsClustersListRequest',
response_type_name=u'ListClustersResponse',
supports_download=False,
)
def SetAddons(self, request, global_params=None):
r"""Sets the addons for a specific cluster.
Args:
request: (SetAddonsConfigRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetAddons')
return self._RunMethod(
config, request, global_params=global_params)
SetAddons.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setAddons',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.setAddons',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:setAddons',
request_field='<request>',
request_type_name=u'SetAddonsConfigRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetLegacyAbac(self, request, global_params=None):
r"""Enables or disables the ABAC authorization mechanism on a cluster.
Args:
request: (SetLegacyAbacRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetLegacyAbac')
return self._RunMethod(
config, request, global_params=global_params)
SetLegacyAbac.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLegacyAbac',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.setLegacyAbac',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:setLegacyAbac',
request_field='<request>',
request_type_name=u'SetLegacyAbacRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetLocations(self, request, global_params=None):
r"""Sets the locations for a specific cluster.
Args:
request: (SetLocationsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetLocations')
return self._RunMethod(
config, request, global_params=global_params)
SetLocations.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.setLocations',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:setLocations',
request_field='<request>',
request_type_name=u'SetLocationsRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetLogging(self, request, global_params=None):
r"""Sets the logging service for a specific cluster.
Args:
request: (SetLoggingServiceRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetLogging')
return self._RunMethod(
config, request, global_params=global_params)
SetLogging.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLogging',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.setLogging',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:setLogging',
request_field='<request>',
request_type_name=u'SetLoggingServiceRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetMaintenancePolicy(self, request, global_params=None):
r"""Sets the maintenance policy for a cluster.
Args:
request: (SetMaintenancePolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetMaintenancePolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetMaintenancePolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMaintenancePolicy',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.setMaintenancePolicy',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:setMaintenancePolicy',
request_field='<request>',
request_type_name=u'SetMaintenancePolicyRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetMasterAuth(self, request, global_params=None):
r"""Used to set master auth materials. Currently supports :-.
Changing the admin password for a specific cluster.
This can be either via password generation or explicitly set.
Modify basic_auth.csv and reset the K8S API server.
Args:
request: (SetMasterAuthRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetMasterAuth')
return self._RunMethod(
config, request, global_params=global_params)
SetMasterAuth.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMasterAuth',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.setMasterAuth',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:setMasterAuth',
request_field='<request>',
request_type_name=u'SetMasterAuthRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetMonitoring(self, request, global_params=None):
r"""Sets the monitoring service for a specific cluster.
Args:
request: (SetMonitoringServiceRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetMonitoring')
return self._RunMethod(
config, request, global_params=global_params)
SetMonitoring.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMonitoring',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.setMonitoring',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:setMonitoring',
request_field='<request>',
request_type_name=u'SetMonitoringServiceRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetNetworkPolicy(self, request, global_params=None):
r"""Enables/Disables Network Policy for a cluster.
Args:
request: (SetNetworkPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetNetworkPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetNetworkPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setNetworkPolicy',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.setNetworkPolicy',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:setNetworkPolicy',
request_field='<request>',
request_type_name=u'SetNetworkPolicyRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetResourceLabels(self, request, global_params=None):
r"""Sets labels on a cluster.
Args:
request: (SetLabelsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetResourceLabels')
return self._RunMethod(
config, request, global_params=global_params)
SetResourceLabels.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setResourceLabels',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.setResourceLabels',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:setResourceLabels',
request_field='<request>',
request_type_name=u'SetLabelsRequest',
response_type_name=u'Operation',
supports_download=False,
)
def StartIpRotation(self, request, global_params=None):
r"""Start master IP rotation.
Args:
request: (StartIPRotationRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('StartIpRotation')
return self._RunMethod(
config, request, global_params=global_params)
StartIpRotation.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:startIpRotation',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.startIpRotation',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:startIpRotation',
request_field='<request>',
request_type_name=u'StartIPRotationRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates the settings for a specific cluster.
Args:
request: (UpdateClusterRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}',
http_method=u'PUT',
method_id=u'container.projects.locations.clusters.update',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}',
request_field='<request>',
request_type_name=u'UpdateClusterRequest',
response_type_name=u'Operation',
supports_download=False,
)
def UpdateMaster(self, request, global_params=None):
r"""Updates the master for a specific cluster.
Args:
request: (UpdateMasterRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('UpdateMaster')
return self._RunMethod(
config, request, global_params=global_params)
UpdateMaster.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:updateMaster',
http_method=u'POST',
method_id=u'container.projects.locations.clusters.updateMaster',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:updateMaster',
request_field='<request>',
request_type_name=u'UpdateMasterRequest',
response_type_name=u'Operation',
supports_download=False,
)
class ProjectsLocationsOperationsService(base_api.BaseApiService):
"""Service class for the projects_locations_operations resource."""
_NAME = u'projects_locations_operations'
def __init__(self, client):
super(ContainerV1alpha1.ProjectsLocationsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Cancels the specified operation.
Args:
request: (CancelOperationRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel',
http_method=u'POST',
method_id=u'container.projects.locations.operations.cancel',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha1/{+name}:cancel',
request_field='<request>',
request_type_name=u'CancelOperationRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the specified operation.
Args:
request: (ContainerProjectsLocationsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method=u'GET',
method_id=u'container.projects.locations.operations.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'operationId', u'projectId', u'zone'],
relative_path=u'v1alpha1/{+name}',
request_field='',
request_type_name=u'ContainerProjectsLocationsOperationsGetRequest',
response_type_name=u'Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists all operations in a project in a specific zone or all zones.
Args:
request: (ContainerProjectsLocationsOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/operations',
http_method=u'GET',
method_id=u'container.projects.locations.operations.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'projectId', u'zone'],
relative_path=u'v1alpha1/{+parent}/operations',
request_field='',
request_type_name=u'ContainerProjectsLocationsOperationsListRequest',
response_type_name=u'ListOperationsResponse',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = u'projects_locations'
def __init__(self, client):
super(ContainerV1alpha1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
def GetServerConfig(self, request, global_params=None):
r"""Returns configuration info about the Kubernetes Engine service.
Args:
request: (ContainerProjectsLocationsGetServerConfigRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ServerConfig) The response message.
"""
config = self.GetMethodConfig('GetServerConfig')
return self._RunMethod(
config, request, global_params=global_params)
GetServerConfig.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}/locations/{locationsId}/serverConfig',
http_method=u'GET',
method_id=u'container.projects.locations.getServerConfig',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'projectId', u'zone'],
relative_path=u'v1alpha1/{+name}/serverConfig',
request_field='',
request_type_name=u'ContainerProjectsLocationsGetServerConfigRequest',
response_type_name=u'ServerConfig',
supports_download=False,
)
class ProjectsZonesClustersNodePoolsService(base_api.BaseApiService):
"""Service class for the projects_zones_clusters_nodePools resource."""
_NAME = u'projects_zones_clusters_nodePools'
def __init__(self, client):
super(ContainerV1alpha1.ProjectsZonesClustersNodePoolsService, self).__init__(client)
self._upload_configs = {
}
def Autoscaling(self, request, global_params=None):
r"""Sets the autoscaling settings of a specific node pool.
Args:
request: (SetNodePoolAutoscalingRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Autoscaling')
return self._RunMethod(
config, request, global_params=global_params)
Autoscaling.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.nodePools.autoscaling',
ordered_params=[u'projectId', u'zone', u'clusterId', u'nodePoolId'],
path_params=[u'clusterId', u'nodePoolId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/autoscaling',
request_field='<request>',
request_type_name=u'SetNodePoolAutoscalingRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Create(self, request, global_params=None):
r"""Creates a node pool for a cluster.
Args:
request: (CreateNodePoolRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.nodePools.create',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools',
request_field='<request>',
request_type_name=u'CreateNodePoolRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a node pool from a cluster.
Args:
request: (ContainerProjectsZonesClustersNodePoolsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'container.projects.zones.clusters.nodePools.delete',
ordered_params=[u'projectId', u'zone', u'clusterId', u'nodePoolId'],
path_params=[u'clusterId', u'nodePoolId', u'projectId', u'zone'],
query_params=[u'name'],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}',
request_field='',
request_type_name=u'ContainerProjectsZonesClustersNodePoolsDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Retrieves the node pool requested.
Args:
request: (ContainerProjectsZonesClustersNodePoolsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(NodePool) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'container.projects.zones.clusters.nodePools.get',
ordered_params=[u'projectId', u'zone', u'clusterId', u'nodePoolId'],
path_params=[u'clusterId', u'nodePoolId', u'projectId', u'zone'],
query_params=[u'name'],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}',
request_field='',
request_type_name=u'ContainerProjectsZonesClustersNodePoolsGetRequest',
response_type_name=u'NodePool',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists the node pools for a cluster.
Args:
request: (ContainerProjectsZonesClustersNodePoolsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListNodePoolsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'container.projects.zones.clusters.nodePools.list',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[u'parent'],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools',
request_field='',
request_type_name=u'ContainerProjectsZonesClustersNodePoolsListRequest',
response_type_name=u'ListNodePoolsResponse',
supports_download=False,
)
def Rollback(self, request, global_params=None):
r"""Roll back the previously Aborted or Failed NodePool upgrade.
This will be an no-op if the last upgrade successfully completed.
Args:
request: (RollbackNodePoolUpgradeRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Rollback')
return self._RunMethod(
config, request, global_params=global_params)
Rollback.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.nodePools.rollback',
ordered_params=[u'projectId', u'zone', u'clusterId', u'nodePoolId'],
path_params=[u'clusterId', u'nodePoolId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}:rollback',
request_field='<request>',
request_type_name=u'RollbackNodePoolUpgradeRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetManagement(self, request, global_params=None):
r"""Sets the NodeManagement options for a node pool.
Args:
request: (SetNodePoolManagementRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetManagement')
return self._RunMethod(
config, request, global_params=global_params)
SetManagement.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.nodePools.setManagement',
ordered_params=[u'projectId', u'zone', u'clusterId', u'nodePoolId'],
path_params=[u'clusterId', u'nodePoolId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setManagement',
request_field='<request>',
request_type_name=u'SetNodePoolManagementRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetSize(self, request, global_params=None):
r"""Sets the size for a specific node pool.
Args:
request: (SetNodePoolSizeRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetSize')
return self._RunMethod(
config, request, global_params=global_params)
SetSize.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.nodePools.setSize',
ordered_params=[u'projectId', u'zone', u'clusterId', u'nodePoolId'],
path_params=[u'clusterId', u'nodePoolId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/setSize',
request_field='<request>',
request_type_name=u'SetNodePoolSizeRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates the version and/or iamge type of a specific node pool.
Args:
request: (UpdateNodePoolRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.nodePools.update',
ordered_params=[u'projectId', u'zone', u'clusterId', u'nodePoolId'],
path_params=[u'clusterId', u'nodePoolId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/nodePools/{nodePoolId}/update',
request_field='<request>',
request_type_name=u'UpdateNodePoolRequest',
response_type_name=u'Operation',
supports_download=False,
)
class ProjectsZonesClustersService(base_api.BaseApiService):
"""Service class for the projects_zones_clusters resource."""
_NAME = u'projects_zones_clusters'
def __init__(self, client):
super(ContainerV1alpha1.ProjectsZonesClustersService, self).__init__(client)
self._upload_configs = {
}
def Addons(self, request, global_params=None):
r"""Sets the addons for a specific cluster.
Args:
request: (SetAddonsConfigRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Addons')
return self._RunMethod(
config, request, global_params=global_params)
Addons.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.addons',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/addons',
request_field='<request>',
request_type_name=u'SetAddonsConfigRequest',
response_type_name=u'Operation',
supports_download=False,
)
def CompleteIpRotation(self, request, global_params=None):
r"""Completes master IP rotation.
Args:
request: (CompleteIPRotationRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('CompleteIpRotation')
return self._RunMethod(
config, request, global_params=global_params)
CompleteIpRotation.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.completeIpRotation',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:completeIpRotation',
request_field='<request>',
request_type_name=u'CompleteIPRotationRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Create(self, request, global_params=None):
r"""Creates a cluster, consisting of the specified number and type of Google.
Compute Engine instances.
By default, the cluster is created in the project's
[default network](/compute/docs/networks-and-firewalls#networks).
One firewall is added for the cluster. After cluster creation,
the cluster creates routes for each node to allow the containers
on that node to communicate with all other instances in the
cluster.
Finally, an entry is added to the project's global metadata indicating
which CIDR range is being used by the cluster.
Args:
request: (CreateClusterRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.create',
ordered_params=[u'projectId', u'zone'],
path_params=[u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters',
request_field='<request>',
request_type_name=u'CreateClusterRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes the cluster, including the Kubernetes endpoint and all worker.
nodes.
Firewalls and routes that were configured during cluster creation
are also deleted.
Other Google Compute Engine resources that might be in use by the cluster
(e.g. load balancer resources) will not be deleted if they weren't present
at the initial create time.
Args:
request: (ContainerProjectsZonesClustersDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'container.projects.zones.clusters.delete',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[u'name'],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}',
request_field='',
request_type_name=u'ContainerProjectsZonesClustersDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the details for a specific cluster.
Args:
request: (ContainerProjectsZonesClustersGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Cluster) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'container.projects.zones.clusters.get',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[u'name'],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}',
request_field='',
request_type_name=u'ContainerProjectsZonesClustersGetRequest',
response_type_name=u'Cluster',
supports_download=False,
)
def LegacyAbac(self, request, global_params=None):
r"""Enables or disables the ABAC authorization mechanism on a cluster.
Args:
request: (SetLegacyAbacRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('LegacyAbac')
return self._RunMethod(
config, request, global_params=global_params)
LegacyAbac.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.legacyAbac',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/legacyAbac',
request_field='<request>',
request_type_name=u'SetLegacyAbacRequest',
response_type_name=u'Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists all clusters owned by a project in either the specified zone or all.
zones.
Args:
request: (ContainerProjectsZonesClustersListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListClustersResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'container.projects.zones.clusters.list',
ordered_params=[u'projectId', u'zone'],
path_params=[u'projectId', u'zone'],
query_params=[u'parent'],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters',
request_field='',
request_type_name=u'ContainerProjectsZonesClustersListRequest',
response_type_name=u'ListClustersResponse',
supports_download=False,
)
def Locations(self, request, global_params=None):
r"""Sets the locations for a specific cluster.
Args:
request: (SetLocationsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Locations')
return self._RunMethod(
config, request, global_params=global_params)
Locations.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.locations',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/locations',
request_field='<request>',
request_type_name=u'SetLocationsRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Logging(self, request, global_params=None):
r"""Sets the logging service for a specific cluster.
Args:
request: (SetLoggingServiceRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Logging')
return self._RunMethod(
config, request, global_params=global_params)
Logging.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.logging',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/logging',
request_field='<request>',
request_type_name=u'SetLoggingServiceRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Master(self, request, global_params=None):
r"""Updates the master for a specific cluster.
Args:
request: (UpdateMasterRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Master')
return self._RunMethod(
config, request, global_params=global_params)
Master.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.master',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/master',
request_field='<request>',
request_type_name=u'UpdateMasterRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Monitoring(self, request, global_params=None):
r"""Sets the monitoring service for a specific cluster.
Args:
request: (SetMonitoringServiceRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Monitoring')
return self._RunMethod(
config, request, global_params=global_params)
Monitoring.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.monitoring',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/monitoring',
request_field='<request>',
request_type_name=u'SetMonitoringServiceRequest',
response_type_name=u'Operation',
supports_download=False,
)
def ResourceLabels(self, request, global_params=None):
r"""Sets labels on a cluster.
Args:
request: (SetLabelsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('ResourceLabels')
return self._RunMethod(
config, request, global_params=global_params)
ResourceLabels.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.resourceLabels',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}/resourceLabels',
request_field='<request>',
request_type_name=u'SetLabelsRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetMaintenancePolicy(self, request, global_params=None):
r"""Sets the maintenance policy for a cluster.
Args:
request: (SetMaintenancePolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetMaintenancePolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetMaintenancePolicy.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.setMaintenancePolicy',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMaintenancePolicy',
request_field='<request>',
request_type_name=u'SetMaintenancePolicyRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetMasterAuth(self, request, global_params=None):
r"""Used to set master auth materials. Currently supports :-.
Changing the admin password for a specific cluster.
This can be either via password generation or explicitly set.
Modify basic_auth.csv and reset the K8S API server.
Args:
request: (SetMasterAuthRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetMasterAuth')
return self._RunMethod(
config, request, global_params=global_params)
SetMasterAuth.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.setMasterAuth',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setMasterAuth',
request_field='<request>',
request_type_name=u'SetMasterAuthRequest',
response_type_name=u'Operation',
supports_download=False,
)
def SetNetworkPolicy(self, request, global_params=None):
r"""Enables/Disables Network Policy for a cluster.
Args:
request: (SetNetworkPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SetNetworkPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetNetworkPolicy.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.setNetworkPolicy',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:setNetworkPolicy',
request_field='<request>',
request_type_name=u'SetNetworkPolicyRequest',
response_type_name=u'Operation',
supports_download=False,
)
def StartIpRotation(self, request, global_params=None):
r"""Start master IP rotation.
Args:
request: (StartIPRotationRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('StartIpRotation')
return self._RunMethod(
config, request, global_params=global_params)
StartIpRotation.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.clusters.startIpRotation',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}:startIpRotation',
request_field='<request>',
request_type_name=u'StartIPRotationRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates the settings for a specific cluster.
Args:
request: (UpdateClusterRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'container.projects.zones.clusters.update',
ordered_params=[u'projectId', u'zone', u'clusterId'],
path_params=[u'clusterId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/clusters/{clusterId}',
request_field='<request>',
request_type_name=u'UpdateClusterRequest',
response_type_name=u'Operation',
supports_download=False,
)
class ProjectsZonesOperationsService(base_api.BaseApiService):
"""Service class for the projects_zones_operations resource."""
_NAME = u'projects_zones_operations'
def __init__(self, client):
super(ContainerV1alpha1.ProjectsZonesOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Cancels the specified operation.
Args:
request: (CancelOperationRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'container.projects.zones.operations.cancel',
ordered_params=[u'projectId', u'zone', u'operationId'],
path_params=[u'operationId', u'projectId', u'zone'],
query_params=[],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/operations/{operationId}:cancel',
request_field='<request>',
request_type_name=u'CancelOperationRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the specified operation.
Args:
request: (ContainerProjectsZonesOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'container.projects.zones.operations.get',
ordered_params=[u'projectId', u'zone', u'operationId'],
path_params=[u'operationId', u'projectId', u'zone'],
query_params=[u'name'],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/operations/{operationId}',
request_field='',
request_type_name=u'ContainerProjectsZonesOperationsGetRequest',
response_type_name=u'Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists all operations in a project in a specific zone or all zones.
Args:
request: (ContainerProjectsZonesOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'container.projects.zones.operations.list',
ordered_params=[u'projectId', u'zone'],
path_params=[u'projectId', u'zone'],
query_params=[u'parent'],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/operations',
request_field='',
request_type_name=u'ContainerProjectsZonesOperationsListRequest',
response_type_name=u'ListOperationsResponse',
supports_download=False,
)
class ProjectsZonesService(base_api.BaseApiService):
"""Service class for the projects_zones resource."""
_NAME = u'projects_zones'
def __init__(self, client):
super(ContainerV1alpha1.ProjectsZonesService, self).__init__(client)
self._upload_configs = {
}
def GetServerconfig(self, request, global_params=None):
r"""Returns configuration info about the Kubernetes Engine service.
Args:
request: (ContainerProjectsZonesGetServerconfigRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ServerConfig) The response message.
"""
config = self.GetMethodConfig('GetServerconfig')
return self._RunMethod(
config, request, global_params=global_params)
GetServerconfig.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'container.projects.zones.getServerconfig',
ordered_params=[u'projectId', u'zone'],
path_params=[u'projectId', u'zone'],
query_params=[u'name'],
relative_path=u'v1alpha1/projects/{projectId}/zones/{zone}/serverconfig',
request_field='',
request_type_name=u'ContainerProjectsZonesGetServerconfigRequest',
response_type_name=u'ServerConfig',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(ContainerV1alpha1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns NOT_FOUND error if.
the resource does not exist. Returns an empty policy if the resource exists
but does not have a policy set.
Authorization requires the Google IAM permission
`container.clusters.getIamPolicy` on the specified resource.
Args:
request: (ContainerProjectsGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleIamV1Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}:getIamPolicy',
http_method=u'POST',
method_id=u'container.projects.getIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1alpha1/{+resource}:getIamPolicy',
request_field=u'googleIamV1GetIamPolicyRequest',
request_type_name=u'ContainerProjectsGetIamPolicyRequest',
response_type_name=u'GoogleIamV1Policy',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy for a resource. Replaces any existing.
policy.
Authorization requires the Google IAM permission
'container.clusters.setIamPolicy' on the specified resource.
Args:
request: (ContainerProjectsSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleIamV1Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}:setIamPolicy',
http_method=u'POST',
method_id=u'container.projects.setIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1alpha1/{+resource}:setIamPolicy',
request_field=u'googleIamV1SetIamPolicyRequest',
request_type_name=u'ContainerProjectsSetIamPolicyRequest',
response_type_name=u'GoogleIamV1Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource.
If the resource does not exist, this will return an empty set of
permissions, not a NOT_FOUND error.
There is no permission required to make this API call.
Args:
request: (ContainerProjectsTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleIamV1TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha1/projects/{projectsId}:testIamPermissions',
http_method=u'POST',
method_id=u'container.projects.testIamPermissions',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1alpha1/{+resource}:testIamPermissions',
request_field=u'googleIamV1TestIamPermissionsRequest',
request_type_name=u'ContainerProjectsTestIamPermissionsRequest',
response_type_name=u'GoogleIamV1TestIamPermissionsResponse',
supports_download=False,
)
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
ce38fb44a64fcfae8083fd6ce3fe008b981d12e2 | 5de718a2ab00460f59621e1e3c100b37c0853f61 | /env/Lib/site-packages/sqlalchemy/dialects/mssql/zxjdbc.py | 426f3e3ef410c599f02a37d452f04c4dfe17d460 | [] | no_license | HenryVo31/Connect | 3fd60d893edd1199663878b7b68505e57a410dd6 | 3783e5b4d6b58f19e37ccff66501cb78c35c1500 | refs/heads/master | 2023-02-13T14:21:12.692446 | 2021-01-08T21:40:16 | 2021-01-08T21:40:16 | 295,485,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,382 | py | # mssql/zxjdbc.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]
:driverurl: http://jtds.sourceforge.net/
.. note:: Jython is not supported by current versions of SQLAlchemy. The
zxjdbc dialect should be considered as experimental.
""" # noqa
from .base import MSDialect
from .base import MSExecutionContext
from ... import engine
from ...connectors.zxJDBC import ZxJDBCConnector
class MSExecutionContext_zxjdbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
super(MSExecutionContext_zxjdbc, self).pre_exec()
# scope_identity after the fact returns null in jTDS so we must
# embed it
if self._select_lastrowid and self.dialect.use_scope_identity:
self._embedded_scope_identity = True
self.statement += "; SELECT scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
while True:
try:
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error:
self.cursor.nextset()
self._lastrowid = int(row[0])
if (
self.isinsert or self.isupdate or self.isdelete
) and self.compiled.returning:
self._result_proxy = engine.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(
self.compiled.statement.table
)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
jdbc_db_name = "jtds:sqlserver"
jdbc_driver_name = "net.sourceforge.jtds.jdbc.Driver"
execution_ctx_cls = MSExecutionContext_zxjdbc
def _get_server_version_info(self, connection):
return tuple(
int(x) for x in connection.connection.dbversion.split(".")
)
dialect = MSDialect_zxjdbc
| [
"trungsonvo2001@gmail.com"
] | trungsonvo2001@gmail.com |
714e7c3f7f9102593f2a2bbf1a7f7c6aad0e2d64 | 2cbe01c30ab6cb77973bc2b19b2e573481205ef2 | /poetry/console/commands/debug/resolve.py | ad90f4fac2026268f57ed5de72e8801e4e9b7124 | [
"GPL-3.0-or-later",
"LGPL-3.0-or-later",
"LGPL-2.1-only",
"LGPL-3.0-only",
"BSD-4-Clause",
"LGPL-2.1-or-later",
"MIT",
"GPL-2.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"GPL-3.0-only",
"GPL-2.0-only",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | markovendelin/poetry | 9dc93d9db78b67eae71501761fdaf4db66d76a90 | de73fa07386be26d32bf15044fd81bf979787b9f | refs/heads/master | 2020-03-21T11:30:00.800918 | 2018-06-18T22:58:03 | 2018-06-18T22:58:03 | 138,509,629 | 0 | 0 | MIT | 2018-06-24T19:28:08 | 2018-06-24T19:28:08 | null | UTF-8 | Python | false | false | 3,628 | py | import re
from typing import List
from ..command import Command
class DebugResolveCommand(Command):
"""
Debugs dependency resolution.
debug:resolve
{ package?* : packages to resolve. }
{ --E|extras=* : Extras to activate for the dependency. }
{ --python= : Python version(s) to use for resolution. }
"""
_loggers = ["poetry.repositories.pypi_repository"]
def handle(self):
from poetry.packages import Dependency
from poetry.packages import ProjectPackage
from poetry.puzzle import Solver
from poetry.repositories.repository import Repository
from poetry.semver import parse_constraint
packages = self.argument("package")
if not packages:
package = self.poetry.package
else:
requirements = self._determine_requirements(packages)
requirements = self._format_requirements(requirements)
# validate requirements format
for constraint in requirements.values():
parse_constraint(constraint)
dependencies = []
for name, constraint in requirements.items():
dep = Dependency(name, constraint)
extras = []
for extra in self.option("extras"):
if " " in extra:
extras += [e.strip() for e in extra.split(" ")]
else:
extras.append(extra)
for ex in extras:
dep.extras.append(ex)
dependencies.append(dep)
package = ProjectPackage(
self.poetry.package.name, self.poetry.package.version
)
package.python_versions = (
self.option("python") or self.poetry.package.python_versions
)
for dep in dependencies:
package.requires.append(dep)
solver = Solver(
package, self.poetry.pool, Repository(), Repository(), self.output
)
ops = solver.solve()
self.line("")
self.line("Resolution results:")
self.line("")
for op in ops:
package = op.package
self.line(
" - <info>{}</info> (<comment>{}</comment>)".format(
package.name, package.version
)
)
def _determine_requirements(self, requires): # type: (List[str]) -> List[str]
if not requires:
return []
requires = self._parse_name_version_pairs(requires)
result = []
for requirement in requires:
if "version" not in requirement:
requirement["version"] = "*"
result.append("{} {}".format(requirement["name"], requirement["version"]))
return result
def _parse_name_version_pairs(self, pairs): # type: (list) -> list
result = []
for i in range(len(pairs)):
pair = re.sub("^([^=: ]+)[=: ](.*)$", "\\1 \\2", pairs[i].strip())
pair = pair.strip()
if " " in pair:
name, version = pair.split(" ", 2)
result.append({"name": name, "version": version})
else:
result.append({"name": pair})
return result
def _format_requirements(self, requirements): # type: (List[str]) -> dict
requires = {}
requirements = self._parse_name_version_pairs(requirements)
for requirement in requirements:
requires[requirement["name"]] = requirement["version"]
return requires
| [
"sebastien@eustace.io"
] | sebastien@eustace.io |
5b1119992f2bd6a9b3e95510d7c56c29898df158 | f4dd8aa4e5476ffde24e27273dd47913c7f9177a | /Dlv2_safe2/tests/parser/pasi-brew-eite-99-example-buy-car.test.py | 4b81a4af09c099eeba6884ec3e5b68744983223a | [
"Apache-2.0"
] | permissive | dave90/Dlv_safe2 | e56071ec1b07c45defda571cb721852e2391abfb | f127f413e3f35d599554e64aaa918bc1629985bc | refs/heads/master | 2020-05-30T10:44:13.473537 | 2015-07-12T12:35:22 | 2015-07-12T12:35:22 | 38,256,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,707 | py | input = """
% Facts which are not disputable are declared as heads of 'top'
rule(top).
rule(r11).
rule(r12).
rule(r13).
rule(r21).
rule(r22).
rule(r23).
rule(r24).
rule(r25).
rule(r26).
rule(r27).
rule(r28).
rule(r29).
rule(r31).
rule(r32).
rule(r33).
rule(r34).
rule(r35).
rule(r36).
rule(r37).
rule(r38).
rule(r39).
rule(r41).
rule(r42).
rule(r43).
rule(r44).
rule(r45).
rule(r46).
rule(r47).
rule(r48).
rule(r49).
head(expensive_C,top).
head(safe_C,top).
head(safe_V,top).
head(nice_P,top).
head(fast_P,top).
head(neg_buy_C,r11). nbl(buy_C,r11). pbl(expensive_C,r11).
head(neg_buy_V,r12). nbl(buy_V,r12). pbl(expensive_V,r12).
head(neg_buy_P,r13). nbl(buy_P,r13). pbl(expensive_P,r13).
head(buy_C,r21). nbl(neg_buy_C,r21). pbl(safe_C,r21).
head(buy_V,r22). nbl(neg_buy_V,r22). pbl(safe_V,r22).
head(buy_P,r23). nbl(neg_buy_P,r23). pbl(safe_P,r23).
head(neg_buy_C,r24). pbl(buy_V,r24). pbl(safe_V,r24).
head(neg_buy_C,r27). pbl(buy_P,r27). pbl(safe_P,r27).
head(neg_buy_V,r25). pbl(buy_P,r25). pbl(safe_P,r25).
head(neg_buy_V,r28). pbl(buy_C,r28). pbl(safe_C,r28).
head(neg_buy_P,r26). pbl(buy_C,r26). pbl(safe_C,r26).
head(neg_buy_P,r29). pbl(buy_V,r29). pbl(safe_V,r29).
head(buy_C,r31). nbl(neg_buy_C,r31). pbl(nice_C,r31).
head(buy_V,r32). nbl(neg_buy_V,r32). pbl(nice_V,r32).
head(buy_P,r33). nbl(neg_buy_P,r33). pbl(nice_P,r33).
head(neg_buy_C,r34). pbl(buy_V,r34). pbl(nice_V,r34).
head(neg_buy_C,r37). pbl(buy_P,r37). pbl(nice_P,r37).
head(neg_buy_V,r35). pbl(buy_P,r35). pbl(nice_P,r35).
head(neg_buy_V,r38). pbl(buy_C,r38). pbl(nice_C,r38).
head(neg_buy_P,r36). pbl(buy_C,r36). pbl(nice_C,r36).
head(neg_buy_P,r39). pbl(buy_V,r39). pbl(nice_V,r39).
head(buy_C,r41). nbl(neg_buy_C,r41). pbl(fast_C,r41).
head(buy_V,r42). nbl(neg_buy_V,r42). pbl(fast_V,r42).
head(buy_P,r43). nbl(neg_buy_P,r43). pbl(fast_P,r43).
head(neg_buy_C,r44). pbl(buy_V,r44). pbl(fast_V,r44).
head(neg_buy_C,r47). pbl(buy_P,r47). pbl(fast_P,r47).
head(neg_buy_P,r46). pbl(buy_C,r46). pbl(fast_C,r46).
head(neg_buy_P,r49). pbl(buy_V,r49). pbl(fast_V,r49).
head(neg_buy_V,r45). pbl(buy_P,r45). pbl(fast_P,r45).
head(neg_buy_V,r48). pbl(buy_C,r48). pbl(fast_C,r48).
opp(buy_C,neg_buy_C).
opp(buy_V,neg_buy_V).
opp(buy_P,neg_buy_P).
% define preferences
level(0,top).
level(1,r11).
level(1,r12).
level(1,r13).
level(2,r21).
level(2,r22).
level(2,r23).
level(2,r24).
level(2,r25).
level(2,r26).
level(2,r27).
level(2,r28).
level(2,r29).
level(3,r31).
level(3,r32).
level(3,r33).
level(3,r34).
level(3,r35).
level(3,r36).
level(3,r37).
level(3,r38).
level(3,r39).
level(4,r41).
level(4,r42).
level(4,r43).
level(4,r44).
level(4,r45).
level(4,r46).
level(4,r47).
level(4,r48).
level(4,r49).
kl(0,1). kl(1,2). kl(2,3). kl(3,4).
pr(X,Y) :- kl(L1,L2), level(L1,X),level(L2,Y).
"""
output = """
% Facts which are not disputable are declared as heads of 'top'
rule(top).
rule(r11).
rule(r12).
rule(r13).
rule(r21).
rule(r22).
rule(r23).
rule(r24).
rule(r25).
rule(r26).
rule(r27).
rule(r28).
rule(r29).
rule(r31).
rule(r32).
rule(r33).
rule(r34).
rule(r35).
rule(r36).
rule(r37).
rule(r38).
rule(r39).
rule(r41).
rule(r42).
rule(r43).
rule(r44).
rule(r45).
rule(r46).
rule(r47).
rule(r48).
rule(r49).
head(expensive_C,top).
head(safe_C,top).
head(safe_V,top).
head(nice_P,top).
head(fast_P,top).
head(neg_buy_C,r11). nbl(buy_C,r11). pbl(expensive_C,r11).
head(neg_buy_V,r12). nbl(buy_V,r12). pbl(expensive_V,r12).
head(neg_buy_P,r13). nbl(buy_P,r13). pbl(expensive_P,r13).
head(buy_C,r21). nbl(neg_buy_C,r21). pbl(safe_C,r21).
head(buy_V,r22). nbl(neg_buy_V,r22). pbl(safe_V,r22).
head(buy_P,r23). nbl(neg_buy_P,r23). pbl(safe_P,r23).
head(neg_buy_C,r24). pbl(buy_V,r24). pbl(safe_V,r24).
head(neg_buy_C,r27). pbl(buy_P,r27). pbl(safe_P,r27).
head(neg_buy_V,r25). pbl(buy_P,r25). pbl(safe_P,r25).
head(neg_buy_V,r28). pbl(buy_C,r28). pbl(safe_C,r28).
head(neg_buy_P,r26). pbl(buy_C,r26). pbl(safe_C,r26).
head(neg_buy_P,r29). pbl(buy_V,r29). pbl(safe_V,r29).
head(buy_C,r31). nbl(neg_buy_C,r31). pbl(nice_C,r31).
head(buy_V,r32). nbl(neg_buy_V,r32). pbl(nice_V,r32).
head(buy_P,r33). nbl(neg_buy_P,r33). pbl(nice_P,r33).
head(neg_buy_C,r34). pbl(buy_V,r34). pbl(nice_V,r34).
head(neg_buy_C,r37). pbl(buy_P,r37). pbl(nice_P,r37).
head(neg_buy_V,r35). pbl(buy_P,r35). pbl(nice_P,r35).
head(neg_buy_V,r38). pbl(buy_C,r38). pbl(nice_C,r38).
head(neg_buy_P,r36). pbl(buy_C,r36). pbl(nice_C,r36).
head(neg_buy_P,r39). pbl(buy_V,r39). pbl(nice_V,r39).
head(buy_C,r41). nbl(neg_buy_C,r41). pbl(fast_C,r41).
head(buy_V,r42). nbl(neg_buy_V,r42). pbl(fast_V,r42).
head(buy_P,r43). nbl(neg_buy_P,r43). pbl(fast_P,r43).
head(neg_buy_C,r44). pbl(buy_V,r44). pbl(fast_V,r44).
head(neg_buy_C,r47). pbl(buy_P,r47). pbl(fast_P,r47).
head(neg_buy_P,r46). pbl(buy_C,r46). pbl(fast_C,r46).
head(neg_buy_P,r49). pbl(buy_V,r49). pbl(fast_V,r49).
head(neg_buy_V,r45). pbl(buy_P,r45). pbl(fast_P,r45).
head(neg_buy_V,r48). pbl(buy_C,r48). pbl(fast_C,r48).
opp(buy_C,neg_buy_C).
opp(buy_V,neg_buy_V).
opp(buy_P,neg_buy_P).
% define preferences
level(0,top).
level(1,r11).
level(1,r12).
level(1,r13).
level(2,r21).
level(2,r22).
level(2,r23).
level(2,r24).
level(2,r25).
level(2,r26).
level(2,r27).
level(2,r28).
level(2,r29).
level(3,r31).
level(3,r32).
level(3,r33).
level(3,r34).
level(3,r35).
level(3,r36).
level(3,r37).
level(3,r38).
level(3,r39).
level(4,r41).
level(4,r42).
level(4,r43).
level(4,r44).
level(4,r45).
level(4,r46).
level(4,r47).
level(4,r48).
level(4,r49).
kl(0,1). kl(1,2). kl(2,3). kl(3,4).
pr(X,Y) :- kl(L1,L2), level(L1,X),level(L2,Y).
"""
| [
"davide@davide-All-Series"
] | davide@davide-All-Series |
b28f52ffc894b04f3a0898a1e8524c6aa8dcebb3 | d838bed08a00114c92b73982a74d96c15166a49e | /docs/data/learn/Bioinformatics/output/ch5_code/src/Stepik.5.8.CodeChallenge.LongestPathInArbitraryDAG.py | 6148405bd0771f1e1961245ad63c9942a73cc473 | [] | no_license | offbynull/offbynull.github.io | 4911f53d77f6c59e7a453ee271b1e04e613862bc | 754a85f43159738b89dd2bde1ad6ba0d75f34b98 | refs/heads/master | 2023-07-04T00:39:50.013571 | 2023-06-17T20:27:05 | 2023-06-17T23:27:00 | 308,482,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,531 | py | import re
from graph.Graph import Graph
# DON'T USE THIS -- THERE ARE CLEAN IMPLEMENTATIONS OF THE DIFFERENT ALGORITHMS IN THE SUBDIRECTORIES.
# DON'T USE THIS -- THERE ARE CLEAN IMPLEMENTATIONS OF THE DIFFERENT ALGORITHMS IN THE SUBDIRECTORIES.
# DON'T USE THIS -- THERE ARE CLEAN IMPLEMENTATIONS OF THE DIFFERENT ALGORITHMS IN THE SUBDIRECTORIES.
with open('/home/user/Downloads/dataset_240303_7.txt', mode='r', encoding='utf-8') as f:
data = f.read()
lines = data.strip().split('\n')
start_node = int(lines[0].strip())
end_node = int(lines[1].strip())
g = Graph()
next_e_id = 0
for l in lines[2:]:
in_node, out_node, edge_weight = [int(v) for v in re.split('->|:', l)]
if not g.has_node(in_node):
g.insert_node(in_node)
if not g.has_node(out_node):
g.insert_node(out_node)
g.insert_edge(f'E{next_e_id}', in_node, out_node, edge_weight)
next_e_id += 1
# Populate node weights and backtracking info. Each node's data is a tuple where [0] is the calculated weight and [1] is
# the edge the incoming connection that was chosen to calculate that weight (used for backtracking).
#
# start_node should be a root node. Initialize its weight to 0, but initialize all other root node weights to None.
# A None weight is used as a marker to skip over these because we don't want to consider them.
check_nodes = set()
ready_nodes = set()
for node in g.get_nodes(): # Add all roots with None weight and None backtracking edge.
if g.get_in_degree(node) == 0:
initial_weight = None
g.update_node_data(node, (initial_weight, None))
check_nodes |= {g.get_edge_to(e) for e in g.get_outputs(node)}
ready_nodes |= {node}
g.update_node_data(start_node, (0, None)) # Overwrite start_node root with 0 weight and None backtracking edge
# Run the algorithm, populating node weights and backtracking edges
while len(check_nodes) > 0:
for node in check_nodes:
incoming_nodes = {g.get_edge_from(e) for e in g.get_inputs(node)}
if incoming_nodes.issubset(ready_nodes):
incoming_accum_weights = {}
for edge in g.get_inputs(node):
source_node = g.get_edge_from(edge)
source_node_weight, _ = g.get_node_data(source_node)
edge_weight = g.get_edge_data(edge)
# Roots that aren't start_node were initialized to a weight of None -- if you see them, skip them.
if source_node_weight is not None:
incoming_accum_weights[edge] = source_node_weight + edge_weight
if len(incoming_accum_weights) == 0:
max_edge = None
max_weight = None
else:
max_edge = max(incoming_accum_weights, key=lambda e: incoming_accum_weights[e])
max_weight = incoming_accum_weights[max_edge]
g.update_node_data(node, (max_weight, max_edge))
check_nodes.remove(node)
check_nodes |= {g.get_edge_to(e) for e in g.get_outputs(node)}
ready_nodes |= {node}
break
# Now backtrack from the end_node to start_node to get the path.
longest_path_length, _ = g.get_node_data(end_node)
longest_path = [end_node]
_, backtracking_edge = g.get_node_data(end_node)
while backtracking_edge is not None:
prev_node = g.get_edge_from(backtracking_edge)
longest_path.insert(0, prev_node)
_, backtracking_edge = g.get_node_data(prev_node)
print(f'{longest_path_length}')
print(f'{"->".join([str(n) for n in longest_path])}')
| [
"offbynull@gmail.com"
] | offbynull@gmail.com |
4b74c5ebe349418204db0b31dfe8ddd4bc9fb347 | 4a9dada02c749e9e5277fe1e35357d7b2b28ad5c | /顾天媛2018010980/操作系统实验/作业1.py | 62b65b5fea82293af555a8a363ada38f3e1ddb78 | [] | no_license | wanghan79/2020_Option_System | 631cc80f52829390a128a86677de527472470348 | f37b870614edf7d85320da197d932df2f25a5720 | refs/heads/master | 2021-01-09T13:10:05.630685 | 2020-07-10T03:30:39 | 2020-07-10T03:30:39 | 242,312,271 | 13 | 9 | null | 2020-07-04T16:13:11 | 2020-02-22T09:12:56 | Python | UTF-8 | Python | false | false | 1,053 | py | # !/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
Author: Ty.Gu
Purpose: platform
Created: 24/6/2020
"""
# 作业1. 采用python语言获取操作系统信息;提示:使用Python内置platform工具包
import platform
print(platform.platform()) # 获取操作系统名称及版本号,Windows-10-10.0.18362-SP0
print(platform.system()) # 获取操作系统名称,Windows
print(platform.version()) # 获取操作系统版本号,10.0.18362
print(platform.architecture()) # 获取操作系统的位数,('64bit', 'WindowsPE')
print(platform.machine()) # 计算机类型,AMD64
print(platform.node()) # 计算机的网络名称,DESKTOP-1OBE4SD
print(platform.processor()) # 计算机处理器信息,'Intel64 Family 6 Model 142 Stepping 10, GenuineIntel
print(platform.uname()) # 包含上面所有的信息汇总,uname_result(system='Windows', node='DESKTOP-1OBE4SD', release='10', version='10.0.18362', machine='AMD64', processor='Intel64 Family 6 Model 142 Stepping 10, GenuineIntel')
| [
"noreply@github.com"
] | wanghan79.noreply@github.com |
0d11e1af0a846188e4cfc990c895a7c257e27a99 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_27236.py | 59ce135d1576318993ad7e9807ed5f24297e4cf2 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | # python - plotly: remove plotted data from graph
z.write(x=[], y=[])
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
006359c0751853ad080769a798f31b41f16548e4 | 248b1c62e3f06e82c6fc5ef557cc1af506763d8c | /cogs/give.py | 9aa565d35f362044d249dda6dbf9d27208029fe1 | [
"MIT"
] | permissive | virtualCrypto-discord/VCrypto-Utilities | bd091ba5fbccf7d59e137ee5ba5b2077bd01b6de | f735159cc45f8601f5d9a50f2c61ca6ec09d87ed | refs/heads/master | 2023-03-12T19:23:30.833926 | 2021-03-03T07:06:50 | 2021-03-03T07:06:50 | 344,033,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | from discord.ext import commands
from bot import MyBot
from virtualcrypto import AsyncVirtualCryptoClient
import discord
class Give(commands.Cog):
def __init__(self, bot: MyBot):
self.bot = bot
self.vcrypto: AsyncVirtualCryptoClient = self.bot.vcrypto
@commands.command()
@commands.has_permissions(administrator=True)
async def give(self, ctx: commands.Context, amount: int, *, users):
"""メンションした相手全てに通貨を配布します。"""
currency = await self.vcrypto.get_currency_by_guild(ctx.guild.id)
if currency is None:
await ctx.send("このサーバーでは通貨は作成されていません。")
return
users = ctx.message.mentions
if len(users) > 15:
await ctx.send("15人までに配布できます。")
return
balance = await self.bot.get_balance(ctx.guild.id)
if balance.amount < amount * len(users):
await ctx.send(f"通貨が{amount * len(users) - balance.amount}{currency.unit}足りません。")
return
await ctx.send("配布しています...")
for user in users:
await self.vcrypto.create_user_transaction(
unit=currency.unit,
receiver_discord_id=user.id,
amount=amount
)
await self.bot.refresh_cache()
await ctx.send("配布完了しました。")
def setup(bot):
return bot.add_cog(Give(bot))
| [
"sumito@izumita.com"
] | sumito@izumita.com |
e7c80e9ae3396015fadb7df42e1afe4f03dcf766 | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/.install/.backup/lib/third_party/ruamel/yaml/__init__.py | 9be208205ea694383b43ab60c013defe2f5e4b6e | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 2,997 | py | # coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
# install_requires of ruamel.base is not really required but the old
# ruamel.base installed __init__.py, and thus a new version should
# be installed at some point
_package_data = dict(
full_package_name="ruamel.yaml",
version_info=(0, 11, 11),
author="Anthon van der Neut",
author_email="a.van.der.neut@ruamel.eu",
description="ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order", # NOQA
entry_points=None,
install_requires=dict(
any=[],
py26=["ruamel.ordereddict"],
py27=["ruamel.ordereddict"]
),
ext_modules=[dict(
name="_ruamel_yaml",
src=["ext/_ruamel_yaml.c", "ext/api.c", "ext/writer.c", "ext/dumper.c",
"ext/loader.c", "ext/reader.c", "ext/scanner.c", "ext/parser.c",
"ext/emitter.c"],
lib=[],
# test='#include "ext/yaml.h"\n\nint main(int argc, char* argv[])\n{\nyaml_parser_t parser;\nparser = parser; /* prevent warning */\nreturn 0;\n}\n' # NOQA
)
],
classifiers=[
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: Implementation :: Jython",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup"
],
windows_wheels=True,
read_the_docs='yaml',
)
# < from ruamel.util.new import _convert_version
def _convert_version(tup):
"""create a PEP 386 pseudo-format conformant string from tuple tup"""
ret_val = str(tup[0]) # first is always digit
next_sep = "." # separator for next extension, can be "" or "."
for x in tup[1:]:
if isinstance(x, int):
ret_val += next_sep + str(x)
next_sep = '.'
continue
first_letter = x[0].lower()
next_sep = ''
if first_letter in 'abcr':
ret_val += 'rc' if first_letter == 'r' else first_letter
elif first_letter in 'pd':
ret_val += '.post' if first_letter == 'p' else '.dev'
return ret_val
# <
version_info = _package_data['version_info']
__version__ = _convert_version(version_info)
del _convert_version
try:
from .cyaml import * # NOQA
__with_libyaml__ = True
except (ImportError, ValueError): # for Jython
__with_libyaml__ = False
# body extracted to main.py
try:
from .main import * # NOQA
except ImportError:
from ruamel.yaml.main import * # NOQA
| [
"toork@uw.edu"
] | toork@uw.edu |
cdda675aa756d1891cbf54b705896010335f4458 | 9a45b5f0f5b58c0ce7b9db720965d47d7b322e98 | /wsgi.py | d72891380c56e4a320ef6bb8052a6a8e4a161b4c | [] | no_license | tiagocordeiro/flask-lme-chart | fd32034431fea9412ea27124ba3fb4de0bfdc930 | 6a534869034965bb02365585c600188fd1033f87 | refs/heads/master | 2023-09-01T17:10:54.652987 | 2021-08-10T18:54:29 | 2021-08-10T18:55:49 | 95,251,498 | 6 | 1 | null | 2023-09-07T10:32:09 | 2017-06-23T19:47:58 | Python | UTF-8 | Python | false | false | 65 | py | from lme.app import create_app
application = app = create_app()
| [
"tiago@mulhergorila.com"
] | tiago@mulhergorila.com |
61deb11fc76a069f92fdd3010e5cabe67388e589 | ea6c97980ca32a61c325d0934e463399bed53b6a | /app/migrations/0005_auto_20210727_2343.py | 2b84a58e65de5d8bf67aacb653ae4ecc52e7bbea | [] | no_license | WambiruL/chat-app | 4201ffe97e3c565669e2212ab337ac28e27bce2b | 7e445afd61d5b01599fedeea60fbea33a3459d07 | refs/heads/master | 2023-06-26T06:57:30.467106 | 2021-07-29T08:27:49 | 2021-07-29T08:27:49 | 389,715,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,779 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-07-27 20:43
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0004_auto_20210727_2300'),
]
operations = [
migrations.DeleteModel(
name='Room',
),
migrations.AlterModelOptions(
name='message',
options={'ordering': ['date_created']},
),
migrations.RenameField(
model_name='message',
old_name='date',
new_name='date_created',
),
migrations.RemoveField(
model_name='message',
name='room',
),
migrations.RemoveField(
model_name='message',
name='user',
),
migrations.AddField(
model_name='message',
name='receiver',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='received_messages', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='message',
name='seen',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='message',
name='sender',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sent_messages', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='message',
name='message',
field=models.TextField(),
),
]
| [
"wambirulorraine@gmail.com"
] | wambirulorraine@gmail.com |
3f7ef408f371de3afba3738f038af48d2f611471 | 994216797f013a4a8926a5689aabd1653bf93a18 | /pkg/fibonacci.py | 2bcc9c0bb1f7b9295999beea7cc96da1577b469d | [] | no_license | Jamie-Cheon/Python | 2387a0e3abef7e7ed594e994d812faa1b322ce19 | 8f0351e58baae762b2cb2f5b2ce12d99358459e1 | refs/heads/master | 2022-06-15T13:47:42.368609 | 2020-05-08T09:27:21 | 2020-05-08T09:27:21 | 262,274,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | class Fibonacci:
def __init__(self, title="fibonacci"):
#title="fibonacci" 타이틀로 다른게 들어오면 다른걸 쓰고 타이틀이 안넘어오면 피보나치를 타이틀에 넣어라
self.title = title
def fib(n):
a, b = 0, 1
while a < n:
print(a, end=' ')
a, b = b, a+b
print()
def fib2(n):
result = []
a, b = 0, 1
while a < n:
result.append(a)
a, b = b, a+b
return result
| [
"jamiecheon55@gmail.com"
] | jamiecheon55@gmail.com |
de522ff95f318f182e74c4886331fdfdbb87dc3e | cf54adda6874a4256401e9e4eb28f353b28ae74b | /python-modules/MySQLdb-dict-cursor.py | 41d807e759d6e536a2a6e039d2ab55a92e114269 | [] | no_license | oraant/study | c0ea4f1a7a8c3558c0eac4b4108bc681a54e8ebf | 7bce20f2ea191d904b4e932c8d0abe1b70a54f7e | refs/heads/master | 2020-09-23T02:08:07.279705 | 2016-11-21T06:30:26 | 2016-11-21T06:30:26 | 66,995,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | # coding:utf-8
import MySQLdb
from MySQLdb.cursors import DictCursor
conn = MySQLdb.connect('localhost','django','django','test',3306)
cursor = conn.cursor(cursorclass=DictCursor)
cursor.execute('select ID, name from ttt')
print cursor.fetchall()
# 结论:
# 数据库中的NULL,在Python中会变成None
# 列填的是大写,字典的键就是大写
| [
"oraant777@gmail.com"
] | oraant777@gmail.com |
e07f99f6027d5c036e5441b11219946a60d927ef | 0514c992dc9dd2c54bc757c8ca4487ca3a8434c5 | /miscell/demo.py | 2eacb3cdde0514b7505c11818392e249cd913b8f | [] | no_license | amalmhn/PythonDjangoProjects | 0878fffad4350d135197ceb0612dd0765b075bb7 | 3a065bb93135075c78a7bff965f83e605d8de4bc | refs/heads/master | 2023-04-25T05:50:16.768383 | 2021-05-03T06:50:06 | 2021-05-03T06:50:06 | 315,811,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | size=int(input("Enter the size of the stack"))
stk=[]
top=0
n=1.
def push(element):
global top
if top>=size:
print('stack is full')
else:
stk.insert(top,element)
print('Element pushed')
top+=1
print('Top is now',top)
def pop():
global top
if top<=0:
print('Stack is empty')
else:
print(stk[top-1],'popped')
top-=1
def display():
global top
for i in range(0,top):
print(stk[i])
while n!=0:
option=int(input('Enter operation you want to perform 1)Push 2)Pop 3)Display'))
if option==1:
element=int(input('Enter the element'))
push(element)
elif option==2:
pop()
elif option==3:
display()
else:
print('Invalid option')
n=int(input('Press "1" for continue, "0" for exit')) | [
"amalmhnofficial@gmail.com"
] | amalmhnofficial@gmail.com |
4423bfc12a256ce5fcc27ad43ef15cdf7d0537a8 | 93dd86c8d0eceaee8276a5cafe8c0bfee2a315d3 | /python/paddle/fluid/tests/unittests/test_bilinear_api.py | 24eae4797de85f371ed62e78c85b160f698ee9eb | [
"Apache-2.0"
] | permissive | hutuxian/Paddle | f8b7693bccc6d56887164c1de0b6f6e91cffaae8 | a1b640bc66a5cc9583de503e7406aeba67565e8d | refs/heads/develop | 2023-08-29T19:36:45.382455 | 2020-09-09T09:19:07 | 2020-09-09T09:19:07 | 164,977,763 | 8 | 27 | Apache-2.0 | 2023-06-16T09:47:39 | 2019-01-10T02:50:31 | Python | UTF-8 | Python | false | false | 2,376 | py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import numpy as np
class TestBilinearAPI(unittest.TestCase):
def test_api(self):
with fluid.program_guard(fluid.default_startup_program(),
fluid.default_main_program()):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
data1 = fluid.data(name='X1', shape=[5, 5], dtype='float32')
data2 = fluid.data(name='X2', shape=[5, 4], dtype='float32')
layer1 = np.random.random((5, 5)).astype('float32')
layer2 = np.random.random((5, 4)).astype('float32')
bilinear = paddle.nn.Bilinear(
in1_features=5, in2_features=4, out_features=1000)
ret = bilinear(data1, data2)
exe.run(fluid.default_startup_program())
ret_fetch = exe.run(feed={'X1': layer1,
'X2': layer2},
fetch_list=[ret.name])
self.assertEqual(ret_fetch[0].shape, (5, 1000))
class TestBilinearAPIDygraph(unittest.TestCase):
def test_api(self):
paddle.disable_static()
layer1 = np.random.random((5, 5)).astype('float32')
layer2 = np.random.random((5, 4)).astype('float32')
bilinear = paddle.nn.Bilinear(
in1_features=5, in2_features=4, out_features=1000)
ret = bilinear(paddle.to_tensor(layer1), paddle.to_tensor(layer2))
self.assertEqual(ret.shape, [5, 1000])
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | hutuxian.noreply@github.com |
e383d2fc173e8a7434d12f2956d71aa7370dc271 | f3aa6bf16293beb94c7f63df28e8dfd27c8b603f | /codes/contest/leetcode/3sum.py | 3347c3dd94b823257a8e51db0eb1d02325f52e1f | [] | no_license | Farenew/dirtysalt.github.io | b8a9ddd7787fd0659b478584682ec97e8e9be0b3 | 856e84adf22f6c82e55f5a7f843fbccfdf17109f | refs/heads/master | 2020-06-17T02:42:25.392147 | 2019-07-08T01:59:46 | 2019-07-08T01:59:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,247 | py | #!/usr/bin/env python
# coding:utf-8
# Copyright (C) dirlt
# class Solution(object):
# def threeSum(self, nums):
# """
# :type nums: List[int]
# :rtype: List[List[int]]
# """
# nums.sort()
# n = len(nums)
# ans = []
# dedup = set()
#
# for i in range(n):
# target = 0 - nums[i]
# j, k = i + 1, n - 1
# while j < k:
# value = nums[j] + nums[k]
# if value == target:
# a, b, c = nums[i], nums[j], nums[k]
# value = (a, b, c)
# if value not in dedup:
# ans.append(value)
# dedup.add(value)
# j += 1
# elif value > target:
# k -= 1
# else:
# j += 1
# return ans
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
n = len(nums)
ans = []
for i in range(n):
if i > 0 and nums[i] == nums[i - 1]: continue
target = 0 - nums[i]
j, k = i + 1, n - 1
while j < k:
value = nums[j] + nums[k]
if value == target:
a, b, c = nums[i], nums[j], nums[k]
value = (a, b, c)
ans.append(value)
j += 1
k -= 1
while j < k and nums[j] == nums[j - 1]:
j += 1
while j < k and nums[k] == nums[k + 1]:
k -= 1
elif value > target:
k -= 1
while j < k and nums[k] == nums[k + 1]:
k -= 1
else:
j += 1
while j < k and nums[j] == nums[j - 1]:
j += 1
return ans
if __name__ == '__main__':
s = Solution()
print(s.threeSum([-1, 0, 1, 2, -1, -4]))
print(s.threeSum([-4, -2, -2, -2, 0, 1, 2, 2, 2, 3, 3, 4, 4, 6, 6]))
print(s.threeSum([0] * 512))
| [
"dirtysalt1987@gmail.com"
] | dirtysalt1987@gmail.com |
7d38c9579b7e1f455d55e64c1c3aae3797d0cbdf | 6b791247919f7de90c8402abcca64b32edd7a29b | /lib/coginvasion/hood/DDSafeZoneLoader.py | 99815be7db2a7e56f45b1f5ada014a7d5b35295d | [
"Apache-2.0"
] | permissive | theclashingfritz/Cog-Invasion-Online-Dump | a9bce15c9f37b6776cecd80b309f3c9ec5b1ec36 | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | refs/heads/master | 2021-01-04T06:44:04.295001 | 2020-02-14T05:23:01 | 2020-02-14T05:23:01 | 240,434,213 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,236 | py | # uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.hood.DDSafeZoneLoader
from lib.coginvasion.holiday.HolidayManager import HolidayType
import SafeZoneLoader, DDPlayground
class DDSafeZoneLoader(SafeZoneLoader.SafeZoneLoader):
def __init__(self, hood, parentFSM, doneEvent):
SafeZoneLoader.SafeZoneLoader.__init__(self, hood, parentFSM, doneEvent)
self.playground = DDPlayground.DDPlayground
self.pgMusicFilename = 'phase_6/audio/bgm/DD_nbrhood.mid'
self.interiorMusicFilename = 'phase_6/audio/bgm/DD_SZ_activity.mid'
self.battleMusicFile = 'phase_3.5/audio/bgm/encntr_general_bg.mid'
self.invasionMusicFiles = [
'phase_12/audio/bgm/BossBot_CEO_v1.mid',
'phase_9/audio/bgm/encntr_suit_winning.mid']
self.tournamentMusicFiles = [
'phase_3.5/audio/bgm/encntr_nfsmw_bg_1.ogg',
'phase_3.5/audio/bgm/encntr_nfsmw_bg_2.ogg',
'phase_3.5/audio/bgm/encntr_nfsmw_bg_3.ogg',
'phase_3.5/audio/bgm/encntr_nfsmw_bg_4.ogg']
self.bossBattleMusicFile = 'phase_7/audio/bgm/encntr_suit_winning_indoor.mid'
self.dnaFile = 'phase_6/dna/donalds_dock_sz.pdna'
self.szStorageDNAFile = 'phase_6/dna/storage_DD_sz.pdna'
self.szHolidayDNAFile = None
if base.cr.holidayManager.getHoliday() == HolidayType.CHRISTMAS:
self.szHolidayDNAFile = 'phase_6/dna/winter_storage_DD_sz.pdna'
self.telescope = None
self.birdNoise = 'phase_6/audio/sfx/SZ_DD_Seagull.ogg'
return
def load(self):
SafeZoneLoader.SafeZoneLoader.load(self)
hq = self.geom.find('**/*toon_landmark_hqDD*')
hq.find('**/doorFrameHoleLeft_0').stash()
hq.find('**/doorFrameHoleRight_0').stash()
hq.find('**/doorFrameHoleLeft_1').stash()
hq.find('**/doorFrameHoleRight_1').stash()
def enter(self, requestStatus):
SafeZoneLoader.SafeZoneLoader.enter(self, requestStatus)
self.hood.setWhiteFog()
def exit(self):
self.hood.setNoFog()
SafeZoneLoader.SafeZoneLoader.exit(self) | [
"theclashingfritz@users.noreply.github.com"
] | theclashingfritz@users.noreply.github.com |
e1db71bb2fd5a9128fd41ae825d57ec3fa777beb | ec635c82b4516c19c0f86489420222e3ee68ef72 | /nnlib/networks/resnet_cifar.py | 8c3b8bf14981b8c30c5cafec58f702f720d3b5ec | [
"MIT"
] | permissive | keyboardAnt/nnlib | 1e29443db7f71c5b87a36c923a32e9b15225f8cb | 8062403d6d5bd57af7047c68a295d19be980f8e7 | refs/heads/master | 2022-12-12T07:40:44.873705 | 2020-09-15T08:13:35 | 2020-09-15T08:13:35 | 295,259,611 | 1 | 0 | null | 2020-09-13T23:57:20 | 2020-09-13T23:57:19 | null | UTF-8 | Python | false | false | 4,323 | py | import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, norm_layer=nn.BatchNorm2d):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = norm_layer(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
norm_layer(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, norm_layer=nn.BatchNorm2d):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = norm_layer(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.norm_layer(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, norm_layer=nn.BatchNorm2d):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = norm_layer(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1, norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2, norm_layer=norm_layer)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2, norm_layer=norm_layer)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride, norm_layer):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, norm_layer=norm_layer))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet18(num_classes=10, norm_layer=nn.BatchNorm2d):
return ResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes, norm_layer=norm_layer)
def resnet34(num_classes=10, norm_layer=nn.BatchNorm2d):
return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, norm_layer=norm_layer)
def resnet50(num_classes=10, norm_layer=nn.BatchNorm2d):
return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, norm_layer=norm_layer)
def resnet101(num_classes=10, norm_layer=nn.BatchNorm2d):
return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, norm_layer=norm_layer)
def resnet152(num_classes=10, norm_layer=nn.BatchNorm2d):
return ResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes, norm_layer=norm_layer)
| [
"harhro@gmail.com"
] | harhro@gmail.com |
78c6b04fab58b471b567bedf62f83b9b7b4a6599 | 1af426c1eb2fc76624854e604cef3d568303f670 | /paper/fig_full_map.py | 7e3940eb04a59761b50997b0a03f5034c1c6c76d | [
"BSD-3-Clause"
] | permissive | ACTCollaboration/tilec | 51f0bbb8f83013fd4c3cdc95e33f8ba88b6a3e56 | 29cbd055c360c67b8676e1cf8534049c0f1dd16e | refs/heads/master | 2022-02-17T19:26:18.113206 | 2022-02-08T04:38:51 | 2022-02-08T04:38:51 | 152,158,730 | 1 | 1 | NOASSERTION | 2020-05-06T08:09:34 | 2018-10-08T23:17:23 | Jupyter Notebook | UTF-8 | Python | false | false | 4,267 | py | from __future__ import print_function
from orphics import maps,io,cosmology,catalogs
from pixell import enmap,reproject
import numpy as np
import os,sys,shutil
from soapack import interfaces as sints
import healpy as hp
version = "map_v1.2.0_joint"
cversion = "v1.2.0"
down = 6
# nilc = hp.read_alm("/scratch/r/rbond/msyriac/data/planck/data/pr2/COM_CompMap_Compton-SZMap-nilc-ymaps_2048_R2.00_alm.fits")
# annot = 'paper/all_planck_act.csv'
#annot = 'paper/all_planck_clusters.csv'
#annot = 'paper/public_clusters.csv'
#annot = None
t = {'deep56': 2, 'boss':4}
sels = {'deep56':np.s_[...,220:-220,300:-300] , 'boss':np.s_[...,450:-450,500:-500]}
for region in ['boss','deep56']:
yname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_single_tile_%s_comptony_%s.fits" % (version,region,region,version)
ybname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_single_tile_%s_comptony_%s_beam.txt" % (version,region,region,version)
cname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_single_tile_%s_cmb_deprojects_comptony_%s.fits" % (version,region,region,version)
cbname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_single_tile_%s_cmb_deprojects_comptony_%s_beam.txt" % (version,region,region,version)
sname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_single_tile_%s_cmb_%s.fits" % (version,region,region,version)
sbname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_single_tile_%s_cmb_%s_beam.txt" % (version,region,region,version)
mname = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324//%s_%s/tilec_mask.fits" % (version,region)
# shutil.copy(yname,"/scratch/r/rbond/msyriac/data/for_sigurd/")
# shutil.copy(sname,"/scratch/r/rbond/msyriac/data/for_sigurd/")
# shutil.copy(mname,"/scratch/r/rbond/msyriac/data/for_sigurd/")
# continue
mask = maps.binary_mask(enmap.read_map(mname))
# Planck
cols = catalogs.load_fits("/scratch/r/rbond/msyriac/data/planck/data/J_A+A_594_A27.fits",['RAdeg','DEdeg'])
ras = cols['RAdeg']
decs = cols['DEdeg']
# ACT
cols = catalogs.load_fits("paper/E-D56Clusters.fits",['RAdeg','DECdeg'])
ras = np.append(ras,cols['RAdeg'])
decs = np.append(decs,cols['DECdeg'])
if region=='boss':
radius = 10
width = 2
fontsize = 28
elif region=='deep56':
radius = 6
width = 1
fontsize = 16
#annot = 'paper/temp_all_clusters.csv'
annot = None
# catalogs.convert_catalog_to_enplot_annotate_file(annot,ras,
# decs,radius=radius,width=width,
# color='red',mask=mask,threshold=0.99)
# dm = sints.PlanckHybrid(region=mask)
# pmap = dm.get_splits(season=None,patch=None,arrays=['545'],ncomp=1,srcfree=False)[0,0,0]
ymap = enmap.read_map(yname)*mask
smap = enmap.read_map(sname)*mask
# nmap = reproject.enmap_from_healpix(nilc, mask.shape, mask.wcs, ncomp=1, unit=1, lmax=0,
# rot="gal,equ", first=0, is_alm=True, return_alm=False, f_ell=None)
# io.hplot(nmap[sels[region]],'fig_full_nmap_%s' % region,color='gray',grid=True,colorbar=True,
# annotate=annot,min=-1.25e-5,max=3.0e-5,ticks=t[region],mask=0,downgrade=down,mask_tol=1e-14)
# io.hplot(pmap[sels[region]],'fig_full_pmap_%s' % region,color='planck',grid=True,colorbar=True,
# ticks=t[region],downgrade=down)
# io.hplot(ymap[sels[region]],'fig_full_ymap_%s' % region,color='gray',grid=True,colorbar=True,
# annotate=annot,min=-1.25e-5,max=3.0e-5,ticks=t[region],mask=0,downgrade=down,mask_tol=1e-14,font_size=fontsize)
io.hplot(ymap[sels[region]],'fig_full_ymap_%s' % region,color='gray',grid=True,colorbar=True,
annotate=annot,min=-0.7e-5,max=2.0e-5,ticks=t[region],mask=0,downgrade=down,mask_tol=1e-14,font_size=fontsize)
io.hplot(smap[sels[region]],'fig_full_smap_%s' % region,color='planck',grid=True,colorbar=True,
range=300,ticks=t[region],mask=0,downgrade=down,mask_tol=1e-14,font_size=fontsize)
| [
"mathewsyriac@gmail.com"
] | mathewsyriac@gmail.com |
261cfed4856dceb90491461d4404f4480a20f972 | 13cccbc1bbaec02f53d2f4e654d480512f6c2bb5 | /ds/segment-tree/stone.py | 08d8c7025ff275abc465f50eeeb164d642083106 | [] | no_license | sjdeak/interview-practice | 580cc61ec0d20d548bbc1e9ebebb4a64cd7ac2dc | 1746aaf5ab06603942f9c85c360e319c110d4df8 | refs/heads/master | 2020-07-20T21:06:23.864208 | 2019-09-08T10:54:16 | 2019-09-08T10:54:16 | 206,709,284 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,535 | py | # 黑白石头 https://www.jisuanke.com/course/804/41868
import unittest
class SegmentTreeIntervalSet:
def __init__(self, A):
self.length = len(A)
# 由数组表示的树,下标从0开始
# 存的是区间内最长的连续1的个数
self.data = [0] * 4 * self.length
# lazySet[p] = val: p指代的区间内的所有项都置为val
self.lazySet = [0] * 4 * self.length
# build
for i, n in enumerate(A):
self.update(i, n)
def query(self, st, ed):
"""
查询[st,ed]的区间和
"""
def _query(p, l, r, x, y):
if x <= l <= r <= y:
return self.data[p]
self._down(p, l, r)
mid = (l + r) // 2
res = 0
if x <= mid:
res += _query(p * 2 + 1, l, mid, x, y)
if y > mid:
res += _query(p * 2 + 2, mid + 1, r, x, y)
return res
return _query(0, 0, self.length - 1, st, ed)
def update(self, x, v):
"""
把第x个结点置为v
"""
def _update(p, l, r):
print('[update] p, l, r:', p, l, r)
if l == r:
self.data[p] = v
return
mid = (l + r) // 2
if x <= mid:
_update(p * 2 + 1, l, mid)
else:
_update(p * 2 + 2, mid + 1, r)
# push child's value up to the parent
self.data[p] = self.data[p * 2 + 1] + self.data[p * 2 + 2]
_update(0, 0, self.length - 1)
def _down(self, p, l, r):
"""
如果p节点有懒标记,就向下执行一层增加
"""
if self.lazySet[p]:
mid = (l + r) // 2
self.data[p * 2 + 1] = self.lazySet[p] * (mid - l + 1)
self.data[p * 2 + 2] = self.lazySet[p] * (r - (mid + 1) + 1)
# 分别传递给左右子节点
self.lazySet[p * 2 + 1] = self.lazySet[p * 2 + 2] = self.lazySet[p]
# 传递结束
self.lazySet[p] = 0
def intervalToggle(self, x, y):
def _toggle(p, l, r):
print('[intervalToggle] p, l, r, x, y, val:', p, l, r, x, y)
print('self.data:', self.data)
if x <= l <= r <= y: # p指向[l,r]区间,[l,r]被[x,y]完整包含,不需要再修改子节点
self.data[p] = (r - l + 1) * val
self.lazySet[p] = val # 增加标记,表示子结点还没有被修改
return # 不需要再往下前进 因为[l,r]被[x,y]完整包含
self._down(p, l, r)
mid = (l + r) // 2
if x <= mid:
_toggle(p * 2 + 1, l, mid)
if y > mid:
_toggle(p * 2 + 2, mid + 1, r)
# push up child's value
self.data[p] = self.data[p * 2 + 1] + self.data[p * 2 + 2]
_toggle(0, 0, self.length - 1)
class Test(unittest.TestCase):
def testSum(self):
# 测试数据来自 帕吉的肉钩 https://www.jisuanke.com/course/804/41866
t = SegmentTreeIntervalSet([1, 0, 1, 0])
self.assertEqual(1, t.query(0, 3))
t.intervalToggle(1, 2)
self.assertEqual(2, t.query(0, 3))
t.intervalToggle(2, 2)
self.assertEqual(0, t.query(3, 3))
# self.assertEqual(t.query(0, 9), 25)
# self.assertEqual(t.query(0, 1), 4)
# t.update(0, 100)
# self.assertEqual(t.query(0,1), 102)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testInit']
unittest.main()
"""
# t = SegmentTreeSum([1] * 10)
# t.intervalAdd(0, 4, 1)
t = SegmentTreeSumIntervalAdd([1] * 10)
# t.intervalAdd(0, 4, 1) # [2,2,2,2,2,1,1,1,1,1]
t.intervalSet(4, 8, 3) # [1,1,1,1,3,3,3,3,3,1]
# self.assertEqual(t.query(0, 9), 25)
print('t.data:', t.data)
print('t.query(0, 9):', t.query(0, 9))
print('t.lazySet:', t.lazySet)
"""
| [
"sjdeak@yahoo.com"
] | sjdeak@yahoo.com |
b9ce71918ff5f859c0e3130615e632b06972dabc | 509e9d64744f720392fda2b978d783f985c60824 | /python2.7/site-packages/numpy/lib/_iotools.py | a5419a22f81b6e42d7cd2b917a50bb9b08c7f2ee | [] | no_license | theideasmith/Instant-OpenCV-FFMPEG | 0560598fba630ded533b4e6c111c61c9b0b7502b | 234e359af245b4832b3e7ade6070e91c81b65de0 | refs/heads/master | 2021-01-11T20:39:33.443984 | 2017-01-16T23:09:46 | 2017-01-16T23:09:46 | 79,162,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:1659736f7079dac7416a1915b72802e506a32c2717f3b731db4d61c567506fcd
size 32062
| [
"aclscientist@gmail.com"
] | aclscientist@gmail.com |
0fa8f09c57c7c3c70066e95565ea32012c2724da | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02608/s445397347.py | bb422d2ccfcddc22d586f269320686308b27139a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | n = int(input())
M = int(n**(0.5))
ans = [0]*(n+1)
for x in range(1,M+1):
for y in range(1,10**2):
for z in range(1,10**2):
if x**2+y**2+z**2+x*y+y*z+z*x > n:
break
ans[x**2+y**2+z**2+x*y+y*z+z*x] += 1
if x**2+y**2 > n:
break
for i in range(n):
print(ans[i+1]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
da5ce7e09a5a2f3deaaec9035d8013313459e201 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/dynatrace/azure-mgmt-dynatrace/azure/mgmt/dynatrace/operations/_single_sign_on_operations.py | 83d90f1e61aaae28dd3d0ec2bc05d3e16df832d3 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 26,706 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str, monitor_name: str, configuration_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-09-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/singleSignOnConfigurations/{configurationName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"monitorName": _SERIALIZER.url("monitor_name", monitor_name, "str"),
"configurationName": _SERIALIZER.url("configuration_name", configuration_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, monitor_name: str, configuration_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-09-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/singleSignOnConfigurations/{configurationName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"monitorName": _SERIALIZER.url("monitor_name", monitor_name, "str"),
"configurationName": _SERIALIZER.url("configuration_name", configuration_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(resource_group_name: str, monitor_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-09-01"] = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/singleSignOnConfigurations",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"monitorName": _SERIALIZER.url("monitor_name", monitor_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class SingleSignOnOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.dynatrace.DynatraceObservabilityMgmtClient`'s
:attr:`single_sign_on` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_or_update_initial(
self,
resource_group_name: str,
monitor_name: str,
configuration_name: str,
resource: Union[_models.DynatraceSingleSignOnResource, IO],
**kwargs: Any
) -> _models.DynatraceSingleSignOnResource:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-09-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DynatraceSingleSignOnResource] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(resource, (IO, bytes)):
_content = resource
else:
_json = self._serialize.body(resource, "DynatraceSingleSignOnResource")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
configuration_name=configuration_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DynatraceSingleSignOnResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DynatraceSingleSignOnResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/singleSignOnConfigurations/{configurationName}"
}
@overload
def begin_create_or_update(
self,
resource_group_name: str,
monitor_name: str,
configuration_name: str,
resource: _models.DynatraceSingleSignOnResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DynatraceSingleSignOnResource]:
"""Create a DynatraceSingleSignOnResource.
Create a DynatraceSingleSignOnResource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Monitor resource name. Required.
:type monitor_name: str
:param configuration_name: Single Sign On Configuration Name. Required.
:type configuration_name: str
:param resource: Resource create parameters. Required.
:type resource: ~azure.mgmt.dynatrace.models.DynatraceSingleSignOnResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DynatraceSingleSignOnResource or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.dynatrace.models.DynatraceSingleSignOnResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
monitor_name: str,
configuration_name: str,
resource: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DynatraceSingleSignOnResource]:
"""Create a DynatraceSingleSignOnResource.
Create a DynatraceSingleSignOnResource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Monitor resource name. Required.
:type monitor_name: str
:param configuration_name: Single Sign On Configuration Name. Required.
:type configuration_name: str
:param resource: Resource create parameters. Required.
:type resource: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DynatraceSingleSignOnResource or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.dynatrace.models.DynatraceSingleSignOnResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
monitor_name: str,
configuration_name: str,
resource: Union[_models.DynatraceSingleSignOnResource, IO],
**kwargs: Any
) -> LROPoller[_models.DynatraceSingleSignOnResource]:
"""Create a DynatraceSingleSignOnResource.
Create a DynatraceSingleSignOnResource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Monitor resource name. Required.
:type monitor_name: str
:param configuration_name: Single Sign On Configuration Name. Required.
:type configuration_name: str
:param resource: Resource create parameters. Is either a model type or a IO type. Required.
:type resource: ~azure.mgmt.dynatrace.models.DynatraceSingleSignOnResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DynatraceSingleSignOnResource or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.dynatrace.models.DynatraceSingleSignOnResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-09-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DynatraceSingleSignOnResource] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
configuration_name=configuration_name,
resource=resource,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DynatraceSingleSignOnResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/singleSignOnConfigurations/{configurationName}"
}
@distributed_trace
def get(
self, resource_group_name: str, monitor_name: str, configuration_name: str, **kwargs: Any
) -> _models.DynatraceSingleSignOnResource:
"""Get a DynatraceSingleSignOnResource.
Get a DynatraceSingleSignOnResource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Monitor resource name. Required.
:type monitor_name: str
:param configuration_name: Single Sign On Configuration Name. Required.
:type configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DynatraceSingleSignOnResource or the result of cls(response)
:rtype: ~azure.mgmt.dynatrace.models.DynatraceSingleSignOnResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-09-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.DynatraceSingleSignOnResource] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
configuration_name=configuration_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("DynatraceSingleSignOnResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/singleSignOnConfigurations/{configurationName}"
}
@distributed_trace
def list(
self, resource_group_name: str, monitor_name: str, **kwargs: Any
) -> Iterable["_models.DynatraceSingleSignOnResource"]:
"""List all DynatraceSingleSignOnResource by monitorName.
List all DynatraceSingleSignOnResource by monitorName.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Monitor resource name. Required.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DynatraceSingleSignOnResource or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.dynatrace.models.DynatraceSingleSignOnResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-09-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.DynatraceSingleSignOnResourceListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DynatraceSingleSignOnResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/singleSignOnConfigurations"
}
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
721c4a68be7ac8de8f4e0a2b22853c7b5deb6663 | 8f9ea3f14bdf2187de759939b2bbc87fe68ccfc0 | /tensorflow/contrib/distributions/python/ops/batch_reshape.py | 4714caad69ee4341d259f6677decdd5842931834 | [
"Apache-2.0"
] | permissive | davidstanke/bazel-mvn-demo | 4ea43f0ba293a28b916a27eab5f0812e9b753c2c | cff14dddce15ea7152988da576673bd15bab6c6e | refs/heads/master | 2022-10-20T07:52:29.651851 | 2018-11-22T13:17:51 | 2018-11-22T13:17:51 | 157,782,756 | 2 | 0 | Apache-2.0 | 2022-10-04T23:47:05 | 2018-11-15T22:54:09 | C++ | UTF-8 | Python | false | false | 16,433 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The BatchReshape distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
__all__ = [
"BatchReshape",
]
class BatchReshape(distribution_lib.Distribution):
"""The Batch-Reshaping distribution.
This "meta-distribution" reshapes the batch dimensions of another
distribution.
#### Examples
```python
tfd = tf.contrib.distributions
dtype = np.float32
dims = 2
new_batch_shape = [1, 2, -1]
old_batch_shape = [6]
scale = np.ones(old_batch_shape + [dims], dtype)
mvn = tfd.MultivariateNormalDiag(scale_diag=scale)
reshape_mvn = tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape,
validate_args=True)
reshape_mvn.batch_shape
# ==> [1, 2, 3]
x = reshape_mvn.sample(sample_shape=[4, 5])
x.shape
# ==> [4, 5, 1, 2, 3, 2] == sample_shape + new_batch_shape + [dims]
reshape_mvn.log_prob(x).shape
# ==> [4, 5, 1, 2, 3] == sample_shape + new_batch_shape
```
"""
def __init__(self,
distribution,
batch_shape,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Construct BatchReshape distribution.
Args:
distribution: The base distribution instance to reshape. Typically an
instance of `Distribution`.
batch_shape: Positive `int`-like vector-shaped `Tensor` representing
the new shape of the batch dimensions. Up to one dimension may contain
`-1`, meaning the remainder of the batch size.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: The name to give Ops created by the initializer.
Default value: `"BatchReshape" + distribution.name`.
Raises:
ValueError: if `batch_shape` is not a vector.
ValueError: if `batch_shape` has non-positive elements.
ValueError: if `batch_shape` size is not the same as a
`distribution.batch_shape` size.
"""
parameters = dict(locals())
name = name or "BatchReshape" + distribution.name
with ops.name_scope(name, values=[batch_shape]) as name:
# The unexpanded batch shape may contain up to one dimension of -1.
self._batch_shape_unexpanded = ops.convert_to_tensor(
batch_shape, dtype=dtypes.int32, name="batch_shape")
validate_init_args_statically(distribution, self._batch_shape_unexpanded)
batch_shape, batch_shape_static, runtime_assertions = calculate_reshape(
distribution.batch_shape_tensor(), self._batch_shape_unexpanded,
validate_args)
self._distribution = distribution
self._batch_shape_ = batch_shape
self._batch_shape_static = batch_shape_static
self._runtime_assertions = runtime_assertions
super(BatchReshape, self).__init__(
dtype=distribution.dtype,
reparameterization_type=distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
[self._batch_shape_unexpanded] + distribution._graph_parents), # pylint: disable=protected-access
name=name)
@property
def distribution(self):
return self._distribution
def _batch_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return array_ops.identity(self._batch_shape_)
def _batch_shape(self):
return self._batch_shape_static
def _event_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return array_ops.identity(self.distribution.event_shape_tensor())
def _event_shape(self):
return self.distribution.event_shape
def _sample_n(self, n, seed=None):
with ops.control_dependencies(self._runtime_assertions):
x = self.distribution.sample(sample_shape=n, seed=seed)
new_shape = array_ops.concat(
[
[n],
self._batch_shape_unexpanded,
self.event_shape_tensor(),
],
axis=0)
return array_ops.reshape(x, new_shape)
def _log_prob(self, x):
return self._call_reshape_input_output(
self.distribution.log_prob, x)
def _prob(self, x):
return self._call_reshape_input_output(
self.distribution.prob, x)
def _log_cdf(self, x):
return self._call_reshape_input_output(
self.distribution.log_cdf, x)
def _cdf(self, x):
return self._call_reshape_input_output(
self.distribution.cdf, x)
def _log_survival_function(self, x):
return self._call_reshape_input_output(
self.distribution.log_survival_function, x)
def _survival_function(self, x):
return self._call_reshape_input_output(
self.distribution.survival_function, x)
def _entropy(self):
return self._call_and_reshape_output(
self.distribution.entropy,
[],
[tensor_shape.scalar()])
def _mean(self):
return self._call_and_reshape_output(self.distribution.mean)
def _mode(self):
return self._call_and_reshape_output(self.distribution.mode)
def _stddev(self):
return self._call_and_reshape_output(self.distribution.stddev)
def _variance(self):
return self._call_and_reshape_output(self.distribution.variance)
def _covariance(self):
return self._call_and_reshape_output(
self.distribution.covariance,
[self.event_shape_tensor()]*2,
[self.event_shape]*2)
def _sample_shape(self, x):
"""Computes graph and static `sample_shape`."""
x_ndims = (array_ops.rank(x) if x.shape.ndims is None else x.shape.ndims)
event_ndims = (array_ops.size(self.event_shape_tensor())
if self.event_shape.ndims is None
else self.event_shape.ndims)
batch_ndims = (
array_ops.size(self._batch_shape_unexpanded)
if self.batch_shape.ndims is None else self.batch_shape.ndims)
sample_ndims = x_ndims - batch_ndims - event_ndims
if isinstance(sample_ndims, int):
static_sample_shape = x.shape[:sample_ndims]
else:
static_sample_shape = tensor_shape.TensorShape(None)
if static_sample_shape.is_fully_defined():
sample_shape = np.int32(static_sample_shape.as_list())
else:
sample_shape = array_ops.shape(x)[:sample_ndims]
return sample_shape, static_sample_shape
def _call_reshape_input_output(self, fn, x):
"""Calls `fn`, appropriately reshaping its input `x` and output."""
with ops.control_dependencies(
self._runtime_assertions + self._validate_sample_arg(x)):
sample_shape, static_sample_shape = self._sample_shape(x)
old_shape = array_ops.concat([
sample_shape,
self.distribution.batch_shape_tensor(),
self.event_shape_tensor(),
], axis=0)
result = fn(array_ops.reshape(x, old_shape))
new_shape = array_ops.concat(
[
sample_shape,
self._batch_shape_unexpanded,
], axis=0)
result = array_ops.reshape(result, new_shape)
if (static_sample_shape.ndims is not None and
self.batch_shape.ndims is not None):
new_shape = static_sample_shape.concatenate(self.batch_shape)
result.set_shape(result.shape.merge_with(new_shape))
return result
def _call_and_reshape_output(
self,
fn,
event_shape_list=None,
static_event_shape_list=None):
"""Calls `fn` and appropriately reshapes its output."""
with ops.control_dependencies(self._runtime_assertions):
if event_shape_list is None:
event_shape_list = [self._event_shape_tensor()]
if static_event_shape_list is None:
static_event_shape_list = [self.event_shape]
new_shape = array_ops.concat(
[self._batch_shape_unexpanded] + event_shape_list, axis=0)
result = array_ops.reshape(fn(), new_shape)
if (self.batch_shape.ndims is not None and
self.event_shape.ndims is not None):
event_shape = tensor_shape.TensorShape([])
for rss in static_event_shape_list:
event_shape = event_shape.concatenate(rss)
static_shape = result.shape.merge_with(
self.batch_shape.concatenate(event_shape))
result.set_shape(static_shape)
return result
def _validate_sample_arg(self, x):
"""Helper which validates sample arg, e.g., input to `log_prob`."""
with ops.name_scope(name="validate_sample_arg", values=[x]):
x_ndims = (array_ops.rank(x) if x.shape.ndims is None else x.shape.ndims)
event_ndims = (array_ops.size(self.event_shape_tensor())
if self.event_shape.ndims is None
else self.event_shape.ndims)
batch_ndims = (
array_ops.size(self._batch_shape_unexpanded)
if self.batch_shape.ndims is None else self.batch_shape.ndims)
expected_batch_event_ndims = batch_ndims + event_ndims
if (isinstance(x_ndims, int) and
isinstance(expected_batch_event_ndims, int)):
if x_ndims < expected_batch_event_ndims:
raise NotImplementedError(
"Broadcasting is not supported; too few batch and event dims "
"(expected at least {}, saw {}).".format(
expected_batch_event_ndims, x_ndims))
ndims_assertion = []
elif self.validate_args:
ndims_assertion = [
check_ops.assert_greater_equal(
x_ndims,
expected_batch_event_ndims,
message=("Broadcasting is not supported; too few "
"batch and event dims."),
name="assert_batch_and_event_ndims_large_enough"),
]
if (self.batch_shape.is_fully_defined() and
self.event_shape.is_fully_defined()):
expected_batch_event_shape = np.int32(self.batch_shape.concatenate(
self.event_shape).as_list())
else:
expected_batch_event_shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], axis=0)
sample_ndims = x_ndims - expected_batch_event_ndims
if isinstance(sample_ndims, int):
sample_ndims = max(sample_ndims, 0)
if (isinstance(sample_ndims, int) and
x.shape[sample_ndims:].is_fully_defined()):
actual_batch_event_shape = np.int32(x.shape[sample_ndims:].as_list())
else:
sample_ndims = math_ops.maximum(sample_ndims, 0)
actual_batch_event_shape = array_ops.shape(x)[sample_ndims:]
if (isinstance(expected_batch_event_shape, np.ndarray) and
isinstance(actual_batch_event_shape, np.ndarray)):
if any(expected_batch_event_shape != actual_batch_event_shape):
raise NotImplementedError("Broadcasting is not supported; "
"unexpected batch and event shape "
"(expected {}, saw {}).".format(
expected_batch_event_shape,
actual_batch_event_shape))
# We need to set the final runtime-assertions to `ndims_assertion` since
# its possible this assertion was created. We could add a condition to
# only do so if `self.validate_args == True`, however this is redundant
# as `ndims_assertion` already encodes this information.
runtime_assertions = ndims_assertion
elif self.validate_args:
# We need to make the `ndims_assertion` a control dep because otherwise
# TF itself might raise an exception owing to this assertion being
# ill-defined, ie, one cannot even compare different rank Tensors.
with ops.control_dependencies(ndims_assertion):
shape_assertion = check_ops.assert_equal(
expected_batch_event_shape,
actual_batch_event_shape,
message=("Broadcasting is not supported; "
"unexpected batch and event shape."),
name="assert_batch_and_event_shape_same")
runtime_assertions = [shape_assertion]
else:
runtime_assertions = []
return runtime_assertions
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
"""Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
if batch_shape_static.is_fully_defined():
return np.int32(batch_shape_static.as_list()), batch_shape_static, []
with ops.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
original_size = math_ops.reduce_prod(original_shape)
implicit_dim = math_ops.equal(new_shape, -1)
size_implicit_dim = (
original_size // math_ops.maximum(1, -math_ops.reduce_prod(new_shape)))
new_ndims = array_ops.shape(new_shape)
expanded_new_shape = array_ops.where( # Assumes exactly one `-1`.
implicit_dim, array_ops.fill(new_ndims, size_implicit_dim), new_shape)
validations = [] if not validate else [
check_ops.assert_rank(
original_shape, 1, message="Original shape must be a vector."),
check_ops.assert_rank(
new_shape, 1, message="New shape must be a vector."),
check_ops.assert_less_equal(
math_ops.count_nonzero(implicit_dim, dtype=dtypes.int32),
1,
message="At most one dimension can be unknown."),
check_ops.assert_positive(
expanded_new_shape, message="Shape elements must be >=-1."),
check_ops.assert_equal(
math_ops.reduce_prod(expanded_new_shape),
original_size,
message="Shape sizes do not match."),
]
return expanded_new_shape, batch_shape_static, validations
def validate_init_args_statically(distribution, batch_shape):
"""Helper to __init__ which makes or raises assertions."""
if batch_shape.shape.ndims is not None:
if batch_shape.shape.ndims != 1:
raise ValueError("`batch_shape` must be a vector "
"(saw rank: {}).".format(batch_shape.shape.ndims))
batch_shape_static = tensor_util.constant_value_as_shape(batch_shape)
batch_size_static = batch_shape_static.num_elements()
dist_batch_size_static = distribution.batch_shape.num_elements()
if batch_size_static is not None and dist_batch_size_static is not None:
if batch_size_static != dist_batch_size_static:
raise ValueError("`batch_shape` size ({}) must match "
"`distribution.batch_shape` size ({}).".format(
batch_size_static, dist_batch_size_static))
if batch_shape_static.dims is not None:
if any(
dim.value is not None and dim.value < 1 for dim in batch_shape_static):
raise ValueError("`batch_shape` elements must be >=-1.")
| [
"davidstanke@gmail.com"
] | davidstanke@gmail.com |
4795e440d021ea6c20464178c5e259094ae896c1 | f023692f73992354a0b7823d9c49ae730c95ab52 | /tool/datastructure.py | 153f8ccd287d1ff772731de54504c2d56135f836 | [] | no_license | corutopi/AtCorder_python | a959e733f9a3549fab7162023e414ac2c99c4abe | a2c78cc647076071549e354c398155a65d5e331a | refs/heads/master | 2023-08-31T09:40:35.929155 | 2023-08-20T06:19:35 | 2023-08-20T06:19:35 | 197,030,129 | 1 | 0 | null | 2022-06-22T04:06:28 | 2019-07-15T15:57:34 | Python | UTF-8 | Python | false | false | 8,403 | py | class BinaryIndexedTree:
"""
l = [1, 2, 3, 4, 5, 6, 7, 8] のlistを例とした場合、
以下のような範囲での演算結果(sum)を配列に持つ。
1: [1, 2, 3, 4, 5, 6, 7, 8]
2: [1, 2, 3, 4]
3: [1, 2] [5, 6]
4: [1] [3] [5] [7]
1 ~ r までの結果S(r)を、各層で必要な演算済みのデータを使うことで log(N) で計算できる.
l ~ r までの結果は S(r) - S(l - 1) で同じくlog(N)計算できる.
データ構造の作成は N*log(N).
配列データは1始まりとして計算.
長さ n + 1 (0 ~ n) の配列にデータを持ち, データ内の対象要素を l ~ r とすると, 配列の r 番目が格納先となる.
また対象要素の数は r の LSB(Least Significant Bit) に一致する.
転倒数の計算にも使える.
"""
def __init__(self, n):
"""
:param n: num of date.
"""
self.num = n
self.tree = [0] * (n + 1)
def add(self, k, x):
"""
:param k: [1, self.num]
:param x: add num.
:return: None
"""
while k <= self.num:
self.tree[k] += x
k += k & -k
def sum(self, k):
"""
1 ~ k までの合計
:param k:
:return:
"""
re = 0
while k > 0:
re += self.tree[k]
k -= k & -k
return re
def sum_lr(self, l, r):
"""
sum of form l to r
:param l: 1 <= l <= r
:param r: l <= r <= self.num
:return:
"""
return self.sum(r) - self.sum(l - 1)
class SegTree:
"""
セグメントツリー
参考:
https://algo-logic.info/segment-tree/#toc_id_1
https://qiita.com/takayg1/items/c811bd07c21923d7ec69
--イメージ--------
1 1 1 1 1 1 1 1
2 2 2 2 3 3 3 3
4 4 5 5 6 6 7 7
8 9 10 11 12 13 14 15 <- ここに配列の素の値が入る
------------------
同じ番号の列すべての func 結果を配列に持つ
"""
def __init__(self, elm, func, default):
"""
:param elm: 配列
:param func: 操作関数(f(x, y))
:param default: 単位元
"""
# create val
self.num = 1 << (len(elm) - 1).bit_length()
self.func = func
self.tree = [default] * 2 * self.num
self.default = default
# update leaf
for i in range(len(elm)):
self.tree[self.num + i] = elm[i]
# update nodes
for i in range(self.num - 1, 0, -1):
self.tree[i] = self.func(self.tree[i * 2], self.tree[i * 2 + 1])
def element(self, k):
"""
要素の取得
:param k: elm の要素番号
:return:
"""
return self.tree[self.num + k]
def update(self, k, x):
"""
要素k の値を x に更新する
:param k: elm の要素番号
:param x:
:return:
"""
k = self.num + k
self.tree[k] = x
while k > 1:
k = k // 2
self.tree[k] = self.func(self.tree[k * 2], self.tree[k * 2 + 1])
def query(self, l, r):
"""
[l, r) の結果を取得する
:param l: 0 始まりで指定する
:param r:
:return:
"""
res_l = self.default
res_r = self.default
l += self.num
r += self.num
while l < r:
if l & 1:
res_l = self.func(res_l, self.tree[l])
l += 1
if r & 1:
res_r = self.func(self.tree[r - 1], res_r)
l >>= 1
r >>= 1
return self.func(res_l, res_r)
class BinaryTrie:
"""
遅いので改良する。
数値の順序付き集合を管理するクラス。
特定の数値より大きく最小の値/小さく最大の値等を高速に求められる(ようにしたい)。
参考:
https://kazuma8128.hatenablog.com/entry/2018/05/06/022654
"""
def __init__(self, b):
self.bit_size = b
self.b_node = BinaryNode()
def insert(self, x):
"""
x を追加する
:param x:
:return:
"""
self.b_node.add_node(x, self.bit_size)
def delete(self, x):
"""
x を削除する
:param x:
:return:
"""
self.b_node.del_node(x, self.bit_size)
def max_element(self):
pass
def min_element(self):
pass
def lower_bound(self, x):
"""
x 以下で要素中最大の値の要素番号を返す。番号は1始まり。
:param x:
:return:
"""
return self.b_node.lower_bound(x, self.bit_size)
def upper_bound(self, x):
"""
x 以上で要素中最小の値の要素番号を返す。番号は1始まり。
:param x:
:return:
"""
return self.b_node.num - self.b_node.upper_bound(x, self.bit_size) + 1
def kth_element(self, k):
"""
k 番目の要素の値を返す。番号は1始まり。
:param k:
:return:
"""
return self.b_node.kth_element(k, self.bit_size)
class BinaryNode:
"""
BinaryTrie 内で使用するサブクラス。
引数や戻り値の要素位置は1始まり。
"""
def __init__(self):
self.num = 0
self.pointer = [None, None]
def __add_pointer(self, x):
self.pointer[x] = \
BinaryNode() if self.pointer[x] is None else self.pointer[x]
def __del_pointer(self, x):
self.pointer[x] = None
def add_node(self, x, b):
"""
x をノードに追加する
:param x: 追加する値
:param b: 低から数えた時の深さ
:return:
"""
if b == -1:
self.num += 1
return self.num
t = x >> b & 1
self.__add_pointer(t)
self.pointer[t].add_node(x, b - 1)
self.num += 1
def del_node(self, x, b):
"""
x をノードから削除する
:param x: 削除する値
:param b: 低から数えた時の深さ
:return:
"""
if b == -1:
self.num = 0
return self.num
t = x >> b & 1
if self.pointer[t].del_node(x, b - 1) == 0:
self.__del_pointer(t)
self.num -= 1
return self.num
def upper_bound(self, x, b):
"""
x 以上の値の要素の個数
:param x: 検索値
:param b: 低から数えた時の深さ
:return:
"""
if b == -1:
return 1
re = 0
if x >> b & 1 == 1:
if self.pointer[1] is not None:
re += self.pointer[1].upper_bound(x, b - 1)
else:
if self.pointer[0] is not None:
re += self.pointer[0].upper_bound(x, b - 1)
if self.pointer[1] is not None:
re += self.pointer[1].num
return re
def lower_bound(self, x, b):
"""
x 以下の要素の個数
:param x: 検索値
:param b: 低から数えた時の深さ
:return:
"""
if b == -1:
return 1
re = 0
if x >> b & 1 == 1:
if self.pointer[0] is not None:
re += self.pointer[0].num
if self.pointer[1] is not None:
re += self.pointer[1].lower_bound(x, b - 1)
else:
if self.pointer[0] is not None:
re += self.pointer[0].lower_bound(x, b - 1)
return re
def kth_element(self, k, b):
"""
k番目の要素の値
:param k: 検索要素番号
:param b: 低から数えた時の深さ
:return:
"""
if b == -1:
return 0
re = 0
if self.pointer[0] is not None:
if k <= self.pointer[0].num:
re += self.pointer[0].kth_element(k, b - 1)
else:
re += 1 << b
re += self.pointer[1].kth_element(k - self.pointer[0].num,
b - 1)
else:
re += 1 << b
re += self.pointer[1].kth_element(k, b - 1)
return re
| [
"39874652+corutopi@users.noreply.github.com"
] | 39874652+corutopi@users.noreply.github.com |
a037c30b4cf938b6c88e676a97e3fb1218ac58f5 | 45c01f01483b09ff738be19df6b183ec9bf38504 | /bin/combinatorial_fitness.py | 0cc08316d553e9b5db3df4f9d799942fe0fcda81 | [
"MIT"
] | permissive | brianhie/viral-mutation | 77787e74cb3868ef227aca50b13b3a4c439d4564 | 81c80d41671670eb58cc46e957a1b0c4bf14856a | refs/heads/master | 2023-04-16T18:42:34.118126 | 2022-02-16T16:22:23 | 2022-02-16T16:22:23 | 247,753,138 | 117 | 46 | MIT | 2023-03-24T22:45:39 | 2020-03-16T15:52:08 | Python | UTF-8 | Python | false | false | 8,010 | py | from utils import Counter, SeqIO
from Bio.Seq import translate
import numpy as np
def load_doud2016():
strain = 'h1'
fname = 'data/influenza/escape_doud2018/WSN1933_H1_HA.fa'
wt_seq = SeqIO.read(fname, 'fasta').seq
seqs_fitness = {}
fname = ('data/influenza/fitness_doud2016/'
'Supplemental_File_2_HApreferences.txt')
with open(fname) as f:
muts = f.readline().rstrip().split()[4:]
for line in f:
fields = line.rstrip().split()
pos = int(fields[0]) - 1
orig = fields[1]
assert(wt_seq[pos] == orig)
data = [ float(field) for field in fields[3:] ]
assert(len(muts) == len(data))
for mut, pref in zip(muts, data):
mutable = [ aa for aa in wt_seq ]
assert(mut.startswith('PI_'))
mutable[pos] = mut[-1]
mut_seq = ''.join(mutable)
assert(len(mut_seq) == len(wt_seq))
if (mut_seq, strain) not in seqs_fitness:
seqs_fitness[(mut_seq, strain)] = [ {
'strain': strain,
'fitnesses': [ pref ],
'preferences': [ pref ],
'wildtype': wt_seq,
'mut_pos': [ pos ],
} ]
else:
seqs_fitness[(mut_seq, strain)][0][
'fitnesses'].append(pref)
seqs_fitness[(mut_seq, strain)][0][
'preferences'].append(pref)
for fit_key in seqs_fitness:
seqs_fitness[fit_key][0]['fitness'] = np.median(
seqs_fitness[fit_key][0]['fitnesses']
)
seqs_fitness[fit_key][0]['preference'] = np.median(
seqs_fitness[fit_key][0]['preferences']
)
return { strain: wt_seq }, seqs_fitness
def load_haddox2018():
strain_names = [ 'BF520', 'BG505' ]
strains = {}
seqs_fitness = {}
for strain in strain_names:
wt_seq = translate(SeqIO.read(
'data/hiv/fitness_haddox2018/'
'{}_env.fasta'.format(strain), 'fasta'
).seq).rstrip('*')
strains[strain] = wt_seq
fname = 'data/hiv/fitness_haddox2018/{}_to_HXB2.csv'.format(strain)
pos_map = {}
with open(fname) as f:
f.readline() # Consume header.
for line in f:
fields = line.rstrip().split(',')
pos_map[fields[1]] = (fields[2], int(fields[0]) - 1)
fname = ('data/hiv/fitness_haddox2018/{}_avgprefs.csv'
.format(strain))
with open(fname) as f:
mutants = f.readline().rstrip().split(',')[1:]
for line in f:
fields = line.rstrip().split(',')
orig, pos = pos_map[fields[0]]
assert(wt_seq[int(pos)] == orig)
preferences = [ float(field) for field in fields[1:] ]
assert(len(mutants) == len(preferences))
for mut, pref in zip(mutants, preferences):
mutable = [ aa for aa in wt_seq ]
mutable[pos] = mut
mut_seq = ''.join(mutable)
if (mut_seq, strain) not in seqs_fitness:
seqs_fitness[(mut_seq, strain)] = [ {
'strain': strain,
'fitnesses': [ pref ],
'preferences': [ pref ],
'wildtype': wt_seq,
'mut_pos': [ pos ],
} ]
else:
seqs_fitness[(mut_seq, strain)][0][
'fitnesses'].append(pref)
seqs_fitness[(mut_seq, strain)][0][
'preferences'].append(pref)
for fit_key in seqs_fitness:
seqs_fitness[fit_key][0]['fitness'] = np.median(
seqs_fitness[fit_key][0]['fitnesses']
)
seqs_fitness[fit_key][0]['preference'] = np.median(
seqs_fitness[fit_key][0]['preferences']
)
return strains, seqs_fitness
def load_wu2020():
mut_pos = [
156, 158, 159, 190, 193, 196
]
offset = 16 # Amino acids in prefix.
mut_pos = [ pos - 1 + offset for pos in mut_pos ]
names = [
'HK68', 'Bk79', 'Bei89', 'Mos99', 'Bris07L194', 'NDako16',
]
wildtypes = [
'KGSESV', 'EESENV', 'EEYENV', 'QKYDST', 'HKFDFA', 'HNSDFA',
]
# Load full wildtype sequences.
wt_seqs = {}
fname = 'data/influenza/fitness_wu2020/wildtypes.fa'
for record in SeqIO.parse(fname, 'fasta'):
strain_idx = names.index(record.description)
wt = wildtypes[strain_idx]
for aa, pos in zip(wt, mut_pos):
assert(record.seq[pos] == aa)
wt_seqs[names[strain_idx]] = record.seq
# Load mutants.
seqs_fitness = {}
fname = 'data/influenza/fitness_wu2020/data_pref.tsv'
with open(fname) as f:
f.readline()
for line in f:
fields = line.rstrip().split('\t')
mut, strain, fitness, preference = fields
if strain == 'Bris07P194':
continue
if strain == 'Bris07':
strain = 'Bris07L194'
fitness = float(preference)
preference = float(preference)
strain_idx = names.index(strain)
wt = wildtypes[strain_idx]
full_seq = wt_seqs[strain]
mutable = [ aa for aa in full_seq ]
for aa_wt, aa, pos in zip(wt, mut, mut_pos):
assert(mutable[pos] == aa_wt)
mutable[pos] = aa
mut_seq = ''.join(mutable)
if (mut_seq, strain) not in seqs_fitness:
seqs_fitness[(mut_seq, strain)] = []
seqs_fitness[(mut_seq, strain)].append({
'strain': strain,
'fitness': fitness,
'preference': preference,
'wildtype': full_seq,
'mut_pos': mut_pos,
})
return wt_seqs, seqs_fitness
def load_starr2020():
strain = 'sars_cov_2'
wt_seq = SeqIO.read('data/cov/cov2_spike_wt.fasta', 'fasta').seq
seqs_fitness = {}
with open('data/cov/starr2020cov2/binding_Kds.csv') as f:
f.readline()
for line in f:
fields = line.replace('"', '').rstrip().split(',')
if fields[5] == 'NA':
continue
log10Ka = float(fields[5])
mutants = fields[-2].split()
mutable = [ aa for aa in wt_seq ]
mut_pos = []
for mutant in mutants:
orig, mut = mutant[0], mutant[-1]
pos = int(mutant[1:-1]) - 1 + 330
assert(wt_seq[pos] == orig)
mutable[pos] = mut
mut_pos.append(pos)
mut_seq = ''.join(mutable)
if (mut_seq, strain) not in seqs_fitness:
seqs_fitness[(mut_seq, strain)] = [ {
'strain': strain,
'fitnesses': [ log10Ka ],
'preferences': [ log10Ka ],
'wildtype': wt_seq,
'mut_pos': mut_pos,
} ]
else:
seqs_fitness[(mut_seq, strain)][0][
'fitnesses'].append(log10Ka)
seqs_fitness[(mut_seq, strain)][0][
'preferences'].append(log10Ka)
for fit_key in seqs_fitness:
seqs_fitness[fit_key][0]['fitness'] = np.median(
seqs_fitness[fit_key][0]['fitnesses']
)
seqs_fitness[fit_key][0]['preference'] = np.median(
seqs_fitness[fit_key][0]['preferences']
)
print(len(seqs_fitness))
return { strain: wt_seq }, seqs_fitness
if __name__ == '__main__':
load_starr2020()
exit()
load_doud2016()
load_haddox2018()
load_wu2020()
| [
"brianhie@mit.edu"
] | brianhie@mit.edu |
63c93fdc3a6a5121bd821bc2b6e51bfc13572d01 | 90af9781544352a3ae2e4f33f1c21cf4cd7a18ba | /scripts/sptk/libs/utils.py | 871478ece70e32e4d63464fecea3d8c21a3083c1 | [
"Apache-2.0"
] | permissive | ronggan/setk | 65d5a079185c69d29035a07449cfe5497c844203 | c1df07f5acb5d631ec4a6d6bdbae0507cc9c9dfc | refs/heads/master | 2020-06-14T08:44:32.387932 | 2019-06-07T12:47:52 | 2019-06-07T12:47:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,608 | py | #!/usr/bin/env python
# wujian@2018
import os
import math
import errno
import warnings
import logging
import librosa as audio_lib
# using wf to handle wave IO because it support better than librosa
import scipy.io.wavfile as wf
import scipy.signal as ss
import numpy as np
MAX_INT16 = np.iinfo(np.int16).max
EPSILON = np.finfo(np.float32).eps
__all__ = [
"stft", "istft", "get_logger", "make_dir", "filekey", "write_wav",
"read_wav"
]
def nfft(window_size):
# nextpow2
return 2**math.ceil(math.log2(window_size))
def cmat_abs(cmat):
"""
In [4]: c = np.random.rand(500, 513) + np.random.rand(500, 513)*1j
In [5]: %timeit np.abs(c)
5.62 ms +- 1.75 us per loop (mean +- std. dev. of 7 runs, 100 loops each)
In [6]: %timeit np.sqrt(c.real**2 + c.imag**2)
2.4 ms +- 4.25 us per loop (mean +- std. dev. of 7 runs, 100 loops each)
"""
if not np.iscomplexobj(cmat):
raise RuntimeError(
"function cmat_abs expect complex as input, but got {}".format(
cmat.dtype))
return np.sqrt(cmat.real**2 + cmat.imag**2)
def write_wav(fname, samps, fs=16000, normalize=True):
"""
Write wav files in int16, support single/multi-channel
"""
if normalize:
samps = samps * MAX_INT16
# scipy.io.wavfile.write could write single/multi-channel files
# for multi-channel, accept ndarray [Nsamples, Nchannels]
if samps.ndim != 1 and samps.shape[0] < samps.shape[1]:
samps = np.transpose(samps)
samps = np.squeeze(samps)
# same as MATLAB and kaldi
samps_int16 = samps.astype(np.int16)
fdir = os.path.dirname(fname)
if fdir and not os.path.exists(fdir):
os.makedirs(fdir)
# NOTE: librosa 0.6.0 seems could not write non-float narray
# so use scipy.io.wavfile instead
wf.write(fname, fs, samps_int16)
def read_wav(fname, normalize=True, return_rate=False):
"""
Read wave files using scipy.io.wavfile(support multi-channel)
"""
# samps_int16: N x C or N
# N: number of samples
# C: number of channels
samp_rate, samps_int16 = wf.read(fname)
# N x C => C x N
samps = samps_int16.astype(np.float)
# tranpose because I used to put channel axis first
if samps.ndim != 1:
samps = np.transpose(samps)
# normalize like MATLAB and librosa
if normalize:
samps = samps / MAX_INT16
if return_rate:
return samp_rate, samps
return samps
# return F x T or T x F(tranpose=True)
def stft(samps,
frame_len=1024,
frame_hop=256,
round_power_of_two=True,
center=False,
window="hann",
apply_abs=False,
apply_log=False,
apply_pow=False,
transpose=True):
"""
STFT wrapper, using librosa
"""
if apply_log and not apply_abs:
warnings.warn("Ignore apply_abs=False because apply_log=True")
apply_abs = True
if samps.ndim != 1:
raise RuntimeError("Invalid shape, librosa.stft accepts mono input")
# pad fft size to power of two or left it same as frame length
n_fft = nfft(frame_len) if round_power_of_two else frame_len
if window == "sqrthann":
window = ss.hann(frame_len, sym=False)**0.5
# orignal stft accept samps(vector) and return matrix shape as F x T
# NOTE for librosa.stft:
# 1) win_length <= n_fft
# 2) if win_length is None, win_length = n_fft
# 3) if win_length < n_fft, pad window to n_fft
stft_mat = audio_lib.stft(samps,
n_fft,
frame_hop,
win_length=frame_len,
window=window,
center=center)
# stft_mat: F x T or N x F x T
if apply_abs:
stft_mat = cmat_abs(stft_mat)
if apply_pow:
stft_mat = np.power(stft_mat, 2)
if apply_log:
stft_mat = np.log(np.maximum(stft_mat, EPSILON))
if transpose:
stft_mat = np.transpose(stft_mat)
return stft_mat
# accept F x T or T x F(tranpose=True)
def istft(stft_mat,
frame_len=1024,
frame_hop=256,
center=False,
window="hann",
transpose=True,
norm=None,
power=None,
nsamps=None):
"""
iSTFT wrapper, using librosa
"""
if transpose:
stft_mat = np.transpose(stft_mat)
if window == "sqrthann":
window = ss.hann(frame_len, sym=False)**0.5
# orignal istft accept stft result(matrix, shape as FxT)
samps = audio_lib.istft(stft_mat,
frame_hop,
win_length=frame_len,
window=window,
center=center,
length=nsamps)
# keep same amplitude
if norm:
samps_norm = np.linalg.norm(samps, np.inf)
samps = samps * norm / (samps_norm + EPSILON)
# keep same power
if power:
samps_pow = np.linalg.norm(samps, 2)**2 / samps.size
samps = samps * np.sqrt(power / samps_pow)
return samps
def griffin_lim(magnitude,
frame_len=1024,
frame_hop=256,
window="hann",
center=True,
transpose=True,
epochs=100):
# TxF -> FxT
if transpose:
magnitude = np.transpose(magnitude)
n_fft = nfft(frame_len)
angle = np.exp(2j * np.pi * np.random.rand(*magnitude.shape))
samps = audio_lib.istft(magnitude * angle,
frame_hop,
frame_len,
window=window,
center=center)
for _ in range(epochs):
stft_mat = audio_lib.stft(samps,
n_fft,
frame_hop,
frame_len,
window=window,
center=center)
angle = np.exp(1j * np.angle(stft_mat))
samps = audio_lib.istft(magnitude * angle,
frame_hop,
frame_len,
window=window,
center=center)
return samps
def filekey(path):
"""
Return unique index from file name
"""
fname = os.path.basename(path)
if not fname:
raise ValueError("{}(Is directory path?)".format(path))
token = fname.split(".")
if len(token) == 1:
return token[0]
else:
return '.'.join(token[:-1])
def get_logger(
name,
format_str="%(asctime)s [%(pathname)s:%(lineno)s - %(levelname)s ] %(message)s",
date_format="%Y-%m-%d %H:%M:%S",
file=False):
"""
Get logger instance
"""
def get_handler(handler):
handler.setLevel(logging.INFO)
formatter = logging.Formatter(fmt=format_str, datefmt=date_format)
handler.setFormatter(formatter)
return handler
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
if file:
logger.addHandler(get_handler(logging.FileHandler(name)))
else:
logger.addHandler(logging.StreamHandler())
return logger
def make_dir(fdir):
"""
Make directory
"""
if not fdir or os.path.exists(fdir):
return
try:
os.makedirs(fdir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise RuntimeError("Error exists when mkdir -p {}".format(fdir)) | [
"funcwj@foxmail.com"
] | funcwj@foxmail.com |
64c3b35aa6ebf57c25ec3b1db9291ee0f006dc9f | ab4f74d127bfc89813ee359bb9c779eca5426ddc | /script/label_image.runfiles/protobuf_archive/python/google/protobuf/duration_pb2.py | 953dc9c34d0dfda7a5a39984789590d95ed8a71e | [
"MIT"
] | permissive | harshit-jain-git/ImageNET | cdfd5a340b62862ad8d1cc3b9a0f30cccc481744 | 1cd4c2b70917e4709ce75422c0205fe3735a1b01 | refs/heads/master | 2022-12-11T12:47:46.795376 | 2017-12-19T05:47:26 | 2017-12-19T05:47:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | /home/co/.cache/bazel/_bazel_co/2e35bede1f3fd334ff5ab28da2fc1540/execroot/org_tensorflow/bazel-out/k8-opt/genfiles/external/protobuf_archive/python/google/protobuf/duration_pb2.py | [
"harshitjain1371999@gmail.com"
] | harshitjain1371999@gmail.com |
7d7a1006b9e7cc31093802d2443ca503d2227810 | bd542286bccf42a61697c97e3eed86508ddab9c4 | /CarManagement/Car/migrations/0026_carimage.py | 8365a66ae44c74a2155a5fdf0763a574a83f7a55 | [] | no_license | mahadi-interconnection/DjangoReactCarManagement | 031929e9752db83fb0059be666f1acd8c64a1c03 | cbf2d04207b73956e5942894d78651a6dc22902e | refs/heads/master | 2020-06-03T19:43:34.984121 | 2019-06-09T17:38:58 | 2019-06-09T17:38:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # Generated by Django 2.0.5 on 2019-05-16 03:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Car', '0025_auto_20190513_1633'),
]
operations = [
migrations.CreateModel(
name='CarImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='')),
('car', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='Car.Car')),
],
),
]
| [
"streametanvir@gmail.com"
] | streametanvir@gmail.com |
ff524307c8551c7850b38755517425b7c3d311eb | be18547cef4591a551321a8c78cb6b28aafa3f0d | /pumpp/feature/fft.py | 7b7ea9255de43ed713bd5b208c64976dbbaf99cd | [
"ISC"
] | permissive | Manojkl/pumpp | f1f2a476a95af548f2096c8834ba308a6e3892bc | 18d3d843d5e5e505888057fed20e58e545f4baaa | refs/heads/master | 2022-11-29T01:40:47.159380 | 2019-08-21T12:20:31 | 2019-08-21T12:20:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,168 | py | #!/usr/bin/env python
"""STFT feature extractors"""
import numpy as np
from librosa import stft, magphase
from librosa import amplitude_to_db, get_duration
from librosa.util import fix_length
from .base import FeatureExtractor
from ._utils import phase_diff, to_dtype
__all__ = ['STFT', 'STFTMag', 'STFTPhaseDiff']
class STFT(FeatureExtractor):
'''Short-time Fourier Transform (STFT) with both magnitude
and phase.
Attributes
----------
name : str
The name of this transformer
sr : number > 0
The sampling rate of audio
hop_length : int > 0
The hop length of STFT frames
n_fft : int > 0
The number of FFT bins per frame
log : bool
If `True`, scale magnitude in decibels.
Otherwise use linear magnitude.
conv : str
Convolution mode
dtype : np.dtype
The data type for the output features. Default is `float32`.
Setting to `uint8` will produce quantized features.
See Also
--------
STFTMag
STFTPhaseDiff
'''
def __init__(self, name, sr, hop_length, n_fft, log=False, conv=None, dtype='float32'):
super(STFT, self).__init__(name, sr, hop_length, conv=conv, dtype=dtype)
self.n_fft = n_fft
self.log = log
self.register('mag', 1 + n_fft // 2, self.dtype)
self.register('phase', 1 + n_fft // 2, self.dtype)
def transform_audio(self, y):
'''Compute the STFT magnitude and phase.
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
STFT magnitude
data['phase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
STFT phase
'''
n_frames = self.n_frames(get_duration(y=y, sr=self.sr))
D = stft(y, hop_length=self.hop_length,
n_fft=self.n_fft)
D = fix_length(D, n_frames)
mag, phase = magphase(D)
if self.log:
mag = amplitude_to_db(mag, ref=np.max)
return {'mag': to_dtype(mag.T[self.idx], self.dtype),
'phase': to_dtype(np.angle(phase.T)[self.idx], self.dtype)}
class STFTPhaseDiff(STFT):
'''STFT with phase differentials
See Also
--------
STFT
'''
def __init__(self, *args, **kwargs):
super(STFTPhaseDiff, self).__init__(*args, **kwargs)
phase_field = self.pop('phase')
self.register('dphase', 1 + self.n_fft // 2, phase_field.dtype)
def transform_audio(self, y):
'''Compute the STFT magnitude and phase differential.
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
STFT magnitude
data['dphase'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
STFT phase
'''
n_frames = self.n_frames(get_duration(y=y, sr=self.sr))
D = stft(y, hop_length=self.hop_length,
n_fft=self.n_fft)
D = fix_length(D, n_frames)
mag, phase = magphase(D)
if self.log:
mag = amplitude_to_db(mag, ref=np.max)
phase = phase_diff(np.angle(phase.T)[self.idx], self.conv)
return {'mag': to_dtype(mag.T[self.idx], self.dtype),
'dphase': to_dtype(phase, self.dtype)}
class STFTMag(STFT):
'''STFT with only magnitude.
See Also
--------
STFT
'''
def __init__(self, *args, **kwargs):
super(STFTMag, self).__init__(*args, **kwargs)
self.pop('phase')
def transform_audio(self, y):
'''Compute the STFT
Parameters
----------
y : np.ndarray
The audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, 1 + n_fft//2)
The STFT magnitude
'''
data = super(STFTMag, self).transform_audio(y)
data.pop('phase')
return data
| [
"brian.mcfee@nyu.edu"
] | brian.mcfee@nyu.edu |
a44647075485b6c9ab58eac3f447cf8cbf4d581b | 5b2611b9b79f0dab5ea13ed9e7e1bcf3639d5d98 | /dec_keras/__init__.py | 221a7a0c8d9825a2d0409078ddb14d5269a68cd1 | [] | no_license | miclaraia/DEC-keras | 0e3ffb55689bd9d649d25ced4da82b0c6bdb57a4 | 7249f4e0ebf10130ed13030fc761837437ff698c | refs/heads/master | 2021-09-25T13:39:03.353484 | 2018-10-22T18:00:16 | 2018-10-22T18:00:16 | 111,464,256 | 0 | 0 | null | 2017-11-20T21:21:54 | 2017-11-20T21:21:54 | null | UTF-8 | Python | false | false | 28 | py | from dec_keras.DEC import *
| [
"micheal.laraia@gmail.com"
] | micheal.laraia@gmail.com |
332501bffec57d4ff4b2b4cca896d08b65b8d3ba | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-iec/huaweicloudsdkiec/v1/model/update_instance_option.py | 4de1ea36bdbe9710839a0c8d412c9b2d57353a81 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,941 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateInstanceOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'description': 'str'
}
attribute_map = {
'name': 'name',
'description': 'description'
}
def __init__(self, name=None, description=None):
"""UpdateInstanceOption - a model defined in huaweicloud sdk"""
self._name = None
self._description = None
self.discriminator = None
if name is not None:
self.name = name
if description is not None:
self.description = description
@property
def name(self):
"""Gets the name of this UpdateInstanceOption.
修改后的边缘实例名称, 只能由中文字符、英文字母、数字及“_”、“-”、“.”组成。
:return: The name of this UpdateInstanceOption.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UpdateInstanceOption.
修改后的边缘实例名称, 只能由中文字符、英文字母、数字及“_”、“-”、“.”组成。
:param name: The name of this UpdateInstanceOption.
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this UpdateInstanceOption.
描述, 不能包含“<”,“>”。
:return: The description of this UpdateInstanceOption.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this UpdateInstanceOption.
描述, 不能包含“<”,“>”。
:param description: The description of this UpdateInstanceOption.
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateInstanceOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
57970b46f486cd918e78eb5d99b794e00dedb0cf | 67430c0de6ba62ff6faca80e5de5d9fedf45e2f1 | /to_mrp_backdate/models/mrp_workcenter_productivity.py | fa89c4bf74daae418dbca47f64ff51ea53ce1ea6 | [] | no_license | blue-connect/inl_extra_addons_oe13 | 2bc62d1eeeff3a450a0891f37aca614bca7050bd | 58144a02ce00abd3cf86dd3b84dfae8163eb6d26 | refs/heads/master | 2022-12-24T04:15:46.991096 | 2020-09-29T10:38:14 | 2020-09-29T10:38:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | from odoo import api, fields, models
class MrpWorkcenterProductivity(models.Model):
_inherit = 'mrp.workcenter.productivity'
backdate = fields.Datetime(string='Backdate', help="If filled, this date and time will be used instead"
" of the current date and time")
def button_block(self):
self.ensure_one()
if self.backdate:
super(MrpWorkcenterProductivity, self.with_context(manual_validate_date_time=self.backdate)).button_block()
else:
super(MrpWorkcenterProductivity, self).button_block()
| [
"kikin.kusumah@gmail.com"
] | kikin.kusumah@gmail.com |
377e861bde090e01e900aa2779b55f2f6fb308cd | 036a41c913b3a4e7ae265e22a672dd89302d3200 | /未完成题目/1501-1550/1538/1538_Python_1.py | c467812968377363498155d449155bd2f2f4ffd6 | [] | no_license | ChangxingJiang/LeetCode | e76f96ebda68d7ade53575354479cfc33ad4f627 | a2209206cdd7229dd33e416f611e71a984a8dd9e | refs/heads/master | 2023-04-13T15:23:35.174390 | 2021-04-24T05:54:14 | 2021-04-24T05:54:14 | 272,088,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # """
# This is the ArrayReader's API interface.
# You should not implement it, or speculate about its implementation
# """
class ArrayReader(object):
# Compares the sum of arr[l..r] with the sum of arr[x..y]
# return 1 if sum(arr[l..r]) > sum(arr[x..y])
# return 0 if sum(arr[l..r]) == sum(arr[x..y])
# return -1 if sum(arr[l..r]) < sum(arr[x..y])
def compareSub(self, l: int, r: int, x: int, y: int) -> int:
pass
# Returns the length of the array
def length(self) -> int:
pass
class Solution:
def guessMajority(self, reader: 'ArrayReader') -> int:
pass
if __name__ == "__main__":
pass
| [
"1278729001@qq.com"
] | 1278729001@qq.com |
d79939e1962f8f6b1da5021ec40c097fad37386b | 5352ad5f07ae81c6406d20c018f68d29788b2290 | /exps/standalone/diff_sims/mem_ops.py | 054efdf780b2cd1d8b08a13f850125a4a23530b4 | [
"Apache-2.0"
] | permissive | sillywalk/GRSan | 9b23c95c272fa06fcaaec4fee33e22eb523a9319 | a0adb1a90d41ff9006d8c1476546263f728b3c83 | refs/heads/master | 2020-06-08T20:53:24.667084 | 2019-06-27T14:56:40 | 2019-06-27T14:56:40 | 193,304,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,735 | py | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
def pa(lb, arr):
sys.stdout.write(lb+': ')
sys.stdout.write('[')
for a in arr:
sys.stdout.write('{:0.2E} '.format(a))
print(']')
def pi(lb, *ints):
sys.stdout.write(lb+': ')
for i in ints:
sys.stdout.write('{:0.2E} '.format(i))
print()
def weighted_avg(arr):
weights = np.hamming(len(arr))
return np.sum(np.multiply(np.array(arr), weights/np.sum(weights)))
def get_gauss(sigma):
def gaussian(x_j, i, j):
return x_j / (np.sqrt(2*np.pi*sigma**2)) *\
np.exp(-(i - j)**2/(2*sigma**2))
return gaussian
def get_d_gauss_di(sigma):
def d_gauss_di(x_j, i, j):
return - x_j / (np.sqrt(2*np.pi*sigma**6)) *\
np.exp(-(i - j)**2/(2*sigma**2)) *\
(i - j)
return d_gauss_di
def get_d_gauss_dj(sigma):
def d_gauss_dj(x_j, i, j):
return - x_j / (np.sqrt(2*np.pi*sigma**6)) *\
np.exp(-(i - j)**2/(2*sigma**2)) *\
(i - j)
return d_gauss_dj
def abs_exp(x_j, i, j):
return x_j * np.exp(-np.abs(i - j))
def d_abs_exp(x_j, i, j):
return -x_j * np.exp(-np.abs(i - j)) * (i - j) / np.abs(i - j)
def sim_read(A, i, filt, use_log=False):
if (use_log):
print(max(A)*0.000001)
A = np.log(A+max(A)*0.000001)
y = np.zeros(len(i))
for j in range(len(A)):
y += filt(A[j], i, j)
return y
def dyidi(A, i, d_filt = get_d_gauss_di(1.0), use_log=False):
if (use_log):
A = np.log(A + max(A)*0.000001)
if (getattr(i, "len", False)):
dy = np.zeros(len(i))
for j in range(len(A)):
dy += d_filt(A[j], i, j)
return dy
# if (type(i) == int):
else:
dy = 0.0
for j in range(len(A)):
dy += d_filt(A[j], i, j)
return dy
def dread(A, ind, didxin, v=False):
samples = [-1, 1]
N = len(A)
dydis = []
for i, s in enumerate(samples):
if (didxin != 0):
# ASSUMPTION: wrap mem if beyond bounds (specific to crc)
xs = int(ind + s*didxin)
# valid index in 0:N
modxs = xs % N
# valid dind within -N:N
boundedxs = modxs
if (xs < 0):
boundedxs -= N
dydis.append((int(A[modxs]) - int(A[ind]))/(s*boundedxs))
else:
dydis.append(0)
dydi = weighted_avg(dydis)
return dydi
def dreadxin(A, ind, didxin, v=False):
samples = [-1, 1]
N = len(A)
dydis = []
for i, s in enumerate(samples):
if (didxin != 0):
# ASSUMPTION: wrap mem if beyond bounds (specific to crc)
xs = int(ind + s*didxin)
xs = xs % N
# FIX: need to set s*didxin so derivative is wrt input not xin
# currently inconsistent with other diff ops
dydis.append((int(A[xs]) - int(A[ind]))/s)
else:
dydis.append(0)
dydi = weighted_avg(dydis)
# pi('DREAD: dydi didxin', dydi, didxin)
return dydi
def dreadxin_sim(A, ind, xin_bytes, v=False):
def sim_crc_read2(byte_arr):
value = 0xffffffff
for b in byte_arr:
v1 = b
v2 = v1 ^ value
v3 = v2 & 0xff
v4 = A[v3]
v5 = value >> 8
value = v4 ^ v5
# value = table[(ord(ch) ^ value) & 0xff] ^ (value >> 8)
return v3, v4
# sim with xin0-1, xin0+1
xins1 = int.to_bytes(int.from_bytes(xin_bytes, 'big') - 256, 2, 'big')
xins2 = int.to_bytes(int.from_bytes(xin_bytes, 'big') + 256, 2, 'big')
simx1, simy1 = sim_crc_read2(xins1)
simx2, simy2 = sim_crc_read2(xins2)
if v:
pi('sr1', simx1, simy1)
pi('sr2', simx2, simy2)
x = ind
y = A[ind]
dydi1 = (int(simy1) - int(y))/-1
dydi2 = (int(simy2) - int(y))/1
dydi = (dydi1 + dydi2) / 2
if v:
print()
return dydi
def viz_read(A, filt, d_filt):
plt.figure(figsize=(16,4))
plt.subplot(1,2,1)
plt.bar(np.arange(len(A)), np.log(A+max(A)*0.000001), width=0.25)
plt.axhline(linewidth=1,color='gray')
plt.title('Memory & Approximation:')
all_i = np.linspace(0, len(A)-1, len(A)*25)
y = sim_read(A, all_i, filt)
plt.plot(all_i, y, 'r', linewidth=2)
plt.xlabel('memory index')
plt.subplot(1,2,2)
plt.plot(all_i, dyidi(A, all_i, d_filt),
linewidth=2)
plt.axhline(linewidth=1,color='gray')
plt.title('dydi')
plt.xlabel('i')
plt.ylabel('dydi')
def get_d_gauss_dj(sigma):
def d_gauss_dj(x_j, i, j):
return x_j / (np.sqrt(2*np.pi*sigma**6)) *\
np.exp(-(i - j)**2/(2*sigma**2)) *\
(i - j)
return d_gauss_dj
def sim_write(x_j, i, j, filt):
return filt(x_j, i, j)
def dyidj(x_j, i, j, d_filt):
return d_filt(np.log(x_j), i, j)
# return x_j / (np.sqrt(2*np.pi*sigma**6)) *\
# np.exp(-(i - j)**2/(2*sigma**2)) *\
# (i - j)
def viz_write(A, i, j, filt, d_filt):
plt.figure(figsize=(16,4))
plt.subplot(1,2,1)
plt.bar(np.arange(len(A)), A, width=0.25)
plt.axhline(linewidth=1,color='gray')
plt.title('Memory & Approximation:')
all_j = np.linspace(0, len(A)-1, len(A)*25)
y = sim_write(A[j], i, all_j, filt)
plt.plot(all_j, y, 'r', linewidth=2)
plt.xlabel('memory index')
plt.subplot(1,2,2)
plt.plot(all_j, dyidj(A[j], i, all_j, d_filt),
linewidth=2)
plt.axhline(linewidth=1,color='gray')
plt.title('dyidj')
plt.xlabel('j')
plt.ylabel('dyidj')
| [
"i.m.ralk@gmail.com"
] | i.m.ralk@gmail.com |
c5d8e725634e1fb6e5a5fb1a9721ca6045aad126 | 24cee07743790afde5040c38ef95bb940451e2f6 | /acode/abc305/d/ans.py | 31437f4bd7bf9de4c38755d415269925e0b3c403 | [] | no_license | tinaba96/coding | fe903fb8740d115cf5a7f4ff5af73c7d16b9bce1 | d999bf5620e52fabce4e564c73b9f186e493b070 | refs/heads/master | 2023-09-01T02:24:33.476364 | 2023-08-30T15:01:47 | 2023-08-30T15:01:47 | 227,594,153 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | from bisect import *
n = int(input())
A = list(map(int, input().split())) + [10**9+1]
RA = [0]
for i, ai in enumerate(A):
if i == 0: continue
if i%2==0:
RA.append(RA[-1] + A[i] - A[i-1])
else:
RA.append(RA[-1])
# print(RA)
def solv(r):
rp = bisect_right(A, r)
# print(rp, RA[rp-1], A[rp-1], r)
if rp%2 == 0:
ret = RA[rp-1] + (r - A[rp-1])
else:
ret = RA[rp-1]
return ret
q = int(input())
for _ in range(q):
l, r = map(int, input().split())
ret = solv(r) - solv(l)
print(ret)
| [
"tinaba178.96@gmail.com"
] | tinaba178.96@gmail.com |
73a07f3af522e0ba840c37e388931825422ec3c0 | 08760dda1de398381f639ac82f70bd97a22288dc | /dicts/954_array_of_doubled_pairs.py | 1ece687e836f7062590851d0a6490593de01ef8f | [] | no_license | liketheflower/CSCI13200 | 9a8719f6ecb1295cee22bd8a4abd9556594a0d14 | 18053e4c2513ad22d26d7b4c0528b34494c0ed8b | refs/heads/master | 2020-07-11T18:37:45.797196 | 2019-12-10T17:25:58 | 2019-12-10T17:25:58 | 204,616,708 | 7 | 5 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | class Solution:
def canReorderDoubled(self, A: List[int]) -> bool:
neg, zero, pos = [], [], []
for a in A:
if a > 0:pos.append(a)
elif a<0:neg.append(-a)
else:zero.append(a)
if len(zero)%2!=0:return False
def check(a):
if not a:return True
if len(a)%2!=0:return False
cnt = collections.Counter(a)
for k in sorted(cnt.keys()):
if cnt[k]>0:
cnt[2*k] -= cnt[k]
if cnt[2*k]<0:return False
return True
return check(neg) and check(pos)
| [
"jim.morris.shen@gmail.com"
] | jim.morris.shen@gmail.com |
be57d8fe06e399091726d07a5f4e7a5ebfa57736 | 6aab2d11b3ab7619ee26319886dcfc771cbcaba5 | /0x11-python-network_1/4-hbtn_status.py | 74b861309d0771e6458caf2b759db25339833d7e | [] | no_license | IhebChatti/holbertonschool-higher_level_programming | ef592f25eb077e182a0295cb5f2f7d69c7a8ab67 | ca58262c6f82f98b2022344818e20d382cf82592 | refs/heads/master | 2022-12-18T10:06:30.443550 | 2020-09-24T17:31:30 | 2020-09-24T17:31:30 | 259,174,423 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | #!/usr/bin/python3
"""[sumPython script that fetches
https://intranet.hbtn.io/statusmary]
"""
import requests
if __name__ == "__main__":
url = "https://intranet.hbtn.io/status"
req = requests.get(url)
req = req.text
print("Body response:")
print("\t- type: {}".format(type(req)))
print("\t- content: {}".format(req))
| [
"iheb.chatti@holbertonschool.com"
] | iheb.chatti@holbertonschool.com |
5047cb53e99c130ffc9ab800fd5d2f469be741cc | 0f4cacd40260137d3d0b3d1b34be58ac76fc8bd0 | /2016/advent2.py | 0d31b60fbf66e6737409d47930d2a72fe04da333 | [] | no_license | timrprobocom/advent-of-code | 45bc765e6ee84e8d015543b1f2fa3003c830e60e | dc4d8955f71a92f7e9c92a36caeb954c208c50e7 | refs/heads/master | 2023-01-06T07:19:03.509467 | 2022-12-27T18:28:30 | 2022-12-27T18:28:30 | 161,268,871 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | import sys
moves = """ULL
RRDDD
LURDL
UUUUD""".splitlines()
x,y = (2,2)
pad = ( ' ', ' 123 ', ' 456 ', ' 789 ', ' ' )
moves = open('../Downloads/day2.txt').readlines()
x,y = (4,4)
pad = (
' ',
' 1 ',
' 234 ',
' 56789 ',
' ABC ',
' D ',
' ')
for ln in moves:
for c in ln.strip():
nx,ny = x,y
if c=='R': nx += 1
elif c=='U': ny -= 1
elif c=='L': nx -= 1
elif c=='D': ny += 1
if pad[ny][nx] != ' ':
x,y=nx,ny
print pad[y][x],
| [
"timr@probo.com"
] | timr@probo.com |
3cda652059bf909ad4e252db11faa152f6204318 | bf06bf980ef359615604d53567d1cc435a980b78 | /data/HW3/hw3_306.py | 1d990ae03ca4494d616153c28f0b26f761a9e0c2 | [] | no_license | am3030/IPT | dd22f5e104daa07a437efdf71fb58f55bcaf82d7 | 6851c19b2f25397f5d4079f66dbd19ba982245c5 | refs/heads/master | 2021-01-23T05:03:53.777868 | 2017-03-09T18:10:36 | 2017-03-09T18:10:36 | 86,270,526 | 0 | 0 | null | 2017-03-26T22:53:42 | 2017-03-26T22:53:42 | null | UTF-8 | Python | false | false | 958 | py |
KELVIN_FREEZING_POINT = 273.16
KELVIN_BOILING_POINT = 373.16
CELSIUS_FREEZING_POINT = 0
CELSIUS_BOILING_POINT = 100
def main():
temp = float(input("Please enter the temperature: "))
scale = str(input("Please enter 'C' for Celsius, or 'K' for Kelvin: "))
if scale == "C":
if temp <= CELSIUS_FREEZING_POINT:
print("At this temperature, water is a (frozen) solid.")
elif CELSIUS_FREEZING_POINT < temp < CELSIUS_BOILING_POINT:
print("At this temperature, water is a liquid.")
else:
print("At this temperature, water is a gas.")
elif scale == "K":
if temp <= KELVIN_FREEZING_POINT:
print("At this temperature, water is a (frozen) solid.")
elif KELVIN_FREEZING_POINT < temp < KELVIN_BOILING_POINT:
print("At this temperature, water is a liquid.")
else:
print("At this temperature, water is a gas.")
main()
| [
"mneary1@umbc.edu"
] | mneary1@umbc.edu |
f51148a2d076ea723fb98ec72996d851045fc639 | f048f66977ebcfd3973f5cb41911e5de8b1bf7f5 | /pullenti/ner/person/internal/PersonIdentityToken.py | 68af374a694ddaf3a64c276acf443b266f2d2e36 | [] | no_license | AAA1911/PullentiPython | e01223d2d8656a8fbcc0873446a12d7e5c913f4a | f25b228c8eef9b70acb1285f405c976542342319 | refs/heads/master | 2020-12-22T12:56:21.701229 | 2019-12-11T08:34:43 | 2019-12-11T08:34:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103,548 | py | # Copyright (c) 2013, Pullenti. All rights reserved. Non-Commercial Freeware.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project (www.pullenti.ru).
# See www.pullenti.ru/downloadpage.aspx.
import io
import typing
from pullenti.unisharp.Utils import Utils
from pullenti.unisharp.Misc import RefOutArgWrapper
from pullenti.ner.core.NounPhraseParseAttr import NounPhraseParseAttr
from pullenti.morph.MorphGender import MorphGender
from pullenti.morph.MorphWordForm import MorphWordForm
from pullenti.ner.MorphCollection import MorphCollection
from pullenti.ner.Token import Token
from pullenti.ner.MetaToken import MetaToken
from pullenti.morph.MorphNumber import MorphNumber
from pullenti.ner.core.GetTextAttr import GetTextAttr
from pullenti.ner.person.internal.PersonAttrToken import PersonAttrToken
from pullenti.ner.TextToken import TextToken
from pullenti.morph.MorphCase import MorphCase
from pullenti.ner.person.internal.FioTemplateType import FioTemplateType
from pullenti.ner.NumberToken import NumberToken
from pullenti.morph.MorphClass import MorphClass
from pullenti.ner.ReferentToken import ReferentToken
from pullenti.morph.MorphBaseInfo import MorphBaseInfo
from pullenti.ner.core.NumberHelper import NumberHelper
from pullenti.ner.Referent import Referent
from pullenti.ner.core.MiscHelper import MiscHelper
from pullenti.ner.person.internal.PersonMorphCollection import PersonMorphCollection
from pullenti.ner.core.NounPhraseHelper import NounPhraseHelper
from pullenti.ner.person.PersonReferent import PersonReferent
from pullenti.ner.core.BracketHelper import BracketHelper
from pullenti.ner.person.internal.PersonItemToken import PersonItemToken
from pullenti.ner.person.internal.PersonHelper import PersonHelper
class PersonIdentityToken(MetaToken):
def __init__(self, begin : 'Token', end : 'Token') -> None:
super().__init__(begin, end, None)
self.coef = 0
self.firstname = None;
self.lastname = None;
self.middlename = None;
self.ontology_person = None;
self.typ = FioTemplateType.UNDEFINED
@property
def probable_gender(self) -> 'MorphGender':
if (self.morph.gender == MorphGender.FEMINIE or self.morph.gender == MorphGender.MASCULINE):
return self.morph.gender
fem = 0
mus = 0
for i in range(2):
col = (self.firstname if i == 0 else self.lastname)
if (col is None):
continue
isf = False
ism = False
for v in col.items:
if ((((v.gender) & (MorphGender.MASCULINE))) != (MorphGender.UNDEFINED)):
ism = True
if ((((v.gender) & (MorphGender.FEMINIE))) != (MorphGender.UNDEFINED)):
isf = True
if (ism):
mus += 1
if (isf):
fem += 1
if (mus > fem):
return MorphGender.MASCULINE
if (fem > mus):
return MorphGender.FEMINIE
return MorphGender.UNDEFINED
def __str__(self) -> str:
res = io.StringIO()
print("{0} {1}: {2}".format(self.coef, Utils.enumToString(self.typ), ("" if self.lastname is None else str(self.lastname))), end="", file=res, flush=True)
print(" {0} {1}; {2}".format(("" if self.firstname is None else str(self.firstname)), ("" if self.middlename is None else str(self.middlename)), str(self.morph)), end="", file=res, flush=True)
return Utils.toStringStringIO(res)
@staticmethod
def create_lastname(pit : 'PersonItemToken', inf : 'MorphBaseInfo') -> 'PersonMorphCollection':
res = PersonMorphCollection()
if (pit.lastname is None):
PersonIdentityToken.__set_value(res, pit.begin_token, inf)
else:
PersonIdentityToken.__set_value2(res, pit.lastname, inf)
return res
@staticmethod
def try_attach_latin_surname(pit : 'PersonItemToken', ontos : 'IntOntologyCollection') -> 'PersonReferent':
if (pit is None):
return None
if (pit.lastname is not None and ((pit.lastname.is_in_dictionary or pit.lastname.is_lastname_has_std_tail))):
p = PersonReferent()
p.add_slot(PersonReferent.ATTR_LASTNAME, pit.lastname.vars0_[0].value, False, 0)
return p
return None
@staticmethod
def try_attach_onto_for_single(pit : 'PersonItemToken', ontos : 'IntOntologyCollection') -> 'PersonReferent':
if ((pit is None or ontos is None or pit.value is None) or pit.typ == PersonItemToken.ItemType.INITIAL):
return None
if (len(ontos.items) > 30):
return None
p0 = None
cou = 0
fi = False
sur = True
for p in ontos.items:
if (isinstance(p.referent, PersonReferent)):
p00 = None
if (pit.firstname is not None):
for v in pit.firstname.vars0_:
if (p.referent.find_slot(PersonReferent.ATTR_FIRSTNAME, v.value, True) is not None):
p00 = (Utils.asObjectOrNull(p.referent, PersonReferent))
fi = True
break
if (pit.lastname is not None):
for v in pit.lastname.vars0_:
if (p.referent.find_slot(PersonReferent.ATTR_LASTNAME, v.value, True) is not None):
p00 = (Utils.asObjectOrNull(p.referent, PersonReferent))
sur = True
break
if (p00 is None):
if (p.referent.find_slot(PersonReferent.ATTR_FIRSTNAME, pit.value, True) is not None):
p00 = (Utils.asObjectOrNull(p.referent, PersonReferent))
fi = True
elif (p.referent.find_slot(PersonReferent.ATTR_LASTNAME, pit.value, True) is not None):
p00 = (Utils.asObjectOrNull(p.referent, PersonReferent))
sur = True
if (p00 is not None):
p0 = p00
cou += 1
if (p0 is not None and cou == 1):
if (fi):
li = list()
li.append(pit)
king = PersonIdentityToken.__try_attach_king(li, 0, pit.morph, False)
if (king is not None):
return None
return p0
return None
@staticmethod
def try_attach_onto_for_duble(pit0 : 'PersonItemToken', pit1 : 'PersonItemToken', ontos : 'IntOntologyCollection') -> 'PersonReferent':
if ((pit0 is None or pit0.firstname is None or pit1 is None) or pit1.middlename is None or ontos is None):
return None
if (len(ontos.items) > 100):
return None
p0 = None
cou = 0
for p in ontos.items:
if (p.referent is not None):
for v in pit0.firstname.vars0_:
if (p.referent.find_slot(PersonReferent.ATTR_FIRSTNAME, v.value, True) is None):
continue
if (p.referent.find_slot(PersonReferent.ATTR_MIDDLENAME, pit1.middlename.vars0_[0].value, True) is None):
continue
p0 = (Utils.asObjectOrNull(p.referent, PersonReferent))
cou += 1
break
if (p0 is not None and cou == 1):
return p0
return None
@staticmethod
def try_attach_onto_ext(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo', ontos : 'ExtOntology') -> 'PersonIdentityToken':
if (ind >= len(pits) or pits[ind].typ == PersonItemToken.ItemType.INITIAL or ontos is None):
return None
if (len(ontos.items) > 1000):
return None
otl = ontos.attach_token(PersonReferent.OBJ_TYPENAME, pits[ind].begin_token)
return PersonIdentityToken.__try_attach_onto(pits, ind, inf, otl, False, False)
@staticmethod
def try_attach_onto_int(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo', ontos : 'IntOntologyCollection') -> 'PersonIdentityToken':
if (ind >= len(pits) or pits[ind].typ == PersonItemToken.ItemType.INITIAL):
return None
if (len(ontos.items) > 1000):
return None
otl = ontos.try_attach(pits[ind].begin_token, None, False)
res = PersonIdentityToken.__try_attach_onto(pits, ind, inf, otl, False, False)
if (res is not None):
return res
return None
@staticmethod
def __try_attach_onto(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo', otl : typing.List['IntOntologyToken'], is_local : bool, is_attr_before : bool) -> 'PersonIdentityToken':
if (otl is None or len(otl) == 0):
return None
res = list()
onto_persons = list()
if (otl is not None):
for ot in otl:
if (ot.end_token == pits[ind].end_token):
pers = Utils.asObjectOrNull(ot.item.referent, PersonReferent)
if (pers is None):
continue
if (pers in onto_persons):
continue
if (ot.termin.ignore_terms_order):
if (ind != 0):
continue
pit = PersonIdentityToken.try_attach_identity(pits, inf)
if (pit is None):
continue
p = PersonReferent()
p._add_identity(pit.lastname)
pit.ontology_person = p
onto_persons.append(pers)
res.append(pit)
continue
if (inf.gender == MorphGender.MASCULINE):
if (pers.is_female):
continue
elif (inf.gender == MorphGender.FEMINIE):
if (pers.is_male):
continue
inf0 = MorphBaseInfo._new2494(inf.case_, inf.gender)
if (not ot.morph.case_.is_undefined and inf0.case_ == MorphCase.ALL_CASES and ot.begin_token == ot.end_token):
inf0.case_ = ot.morph.case_
if (pers.is_male):
inf0.gender = MorphGender.MASCULINE
elif (pers.is_female):
inf0.gender = MorphGender.FEMINIE
vars0_ = list()
if (ind > 1):
pit = PersonIdentityToken.__try_attachiisurname(pits, ind - 2, inf0)
if ((pit) is not None):
vars0_.append(pit)
pit = PersonIdentityToken.__try_attach_name_secname_surname(pits, ind - 2, inf0, False)
if ((pit) is not None):
vars0_.append(pit)
if (ind > 0):
pit = PersonIdentityToken.__try_attachiisurname(pits, ind - 1, inf0)
if ((pit) is not None):
vars0_.append(pit)
pit = PersonIdentityToken.__try_attach_name_surname(pits, ind - 1, inf0, False, is_attr_before)
if ((pit) is not None):
vars0_.append(pit)
if ((ind + 2) < len(pits)):
pit = PersonIdentityToken.__try_attach_surnameii(pits, ind, inf0)
if ((pit) is not None):
vars0_.append(pit)
pit = PersonIdentityToken.__try_attach_surname_name_secname(pits, ind, inf0, False, False)
if ((pit) is not None):
vars0_.append(pit)
if ((ind + 1) < len(pits)):
pit = PersonIdentityToken.__try_attach_surname_name(pits, ind, inf0, False)
if ((pit) is not None):
pit0 = None
for v in vars0_:
if (v.typ == FioTemplateType.SURNAMENAMESECNAME):
pit0 = v
break
if (pit0 is None or (pit0.coef < pit.coef)):
vars0_.append(pit)
pit = PersonIdentityToken.__try_attach_asian(pits, ind, inf0, 3, False)
if ((pit) is not None):
vars0_.append(pit)
else:
pit = PersonIdentityToken.__try_attach_asian(pits, ind, inf0, 2, False)
if ((pit) is not None):
vars0_.append(pit)
pit = (None)
for v in vars0_:
if (v.coef < 0):
continue
p = PersonReferent()
if (v.ontology_person is not None):
p = v.ontology_person
else:
if (v.typ == FioTemplateType.ASIANNAME):
pers._add_identity(v.lastname)
else:
p._add_fio_identity(v.lastname, v.firstname, v.middlename)
v.ontology_person = p
if (not pers.can_be_equals(p, Referent.EqualType.WITHINONETEXT)):
if (pit is not None and v.coef >= pit.coef):
pit = (None)
elif (pit is None):
pit = v
elif (pit.coef < v.coef):
pit = v
if (pit is None):
pit = PersonIdentityToken.__try_attach_single_surname(pits, ind, inf0)
if (pit is None or (pit.coef < 2)):
continue
p = PersonReferent()
p._add_fio_identity(pit.lastname, None, None)
pit.ontology_person = p
onto_persons.append(pers)
res.append(pit)
if (len(res) == 0):
return None
if (len(res) == 1):
res[0].ontology_person.merge_slots(onto_persons[0], True)
return res[0]
return None
@staticmethod
def create_typ(pits : typing.List['PersonItemToken'], typ_ : 'FioTemplateType', inf : 'MorphBaseInfo') -> 'PersonIdentityToken':
if (typ_ == FioTemplateType.SURNAMENAMESECNAME):
return PersonIdentityToken.__try_attach_surname_name_secname(pits, 0, inf, False, True)
return None
@staticmethod
def sort(li : typing.List['PersonIdentityToken']) -> None:
if (li is not None and len(li) > 1):
k = 0
while k < len(li):
ch = False
i = 0
while i < (len(li) - 1):
if (li[i].coef < li[i + 1].coef):
ch = True
v = li[i]
li[i] = li[i + 1]
li[i + 1] = v
i += 1
if (not ch):
break
k += 1
@staticmethod
def try_attach_for_ext_onto(pits : typing.List['PersonItemToken']) -> typing.List['PersonIdentityToken']:
pit = None
if (len(pits) == 3):
if (pits[0].typ == PersonItemToken.ItemType.VALUE and pits[1].typ == PersonItemToken.ItemType.INITIAL and pits[2].typ == PersonItemToken.ItemType.VALUE):
pit = PersonIdentityToken._new2495(pits[0].begin_token, pits[2].end_token, FioTemplateType.NAMEISURNAME)
PersonIdentityToken.manage_firstname(pit, pits[0], None)
PersonIdentityToken.manage_lastname(pit, pits[2], None)
PersonIdentityToken.__manage_middlename(pit, pits[1], None)
pit.coef = (2)
elif (pits[0].typ == PersonItemToken.ItemType.VALUE and pits[1].typ == PersonItemToken.ItemType.VALUE and pits[2].typ == PersonItemToken.ItemType.VALUE):
ok = False
if (pits[0].firstname is None and pits[1].middlename is None and ((pits[1].firstname is not None or pits[2].middlename is not None))):
ok = True
elif (pits[0].firstname is not None and ((pits[0].firstname.is_lastname_has_std_tail or pits[0].firstname.is_in_dictionary))):
ok = True
if (ok):
pit = PersonIdentityToken._new2495(pits[0].begin_token, pits[2].end_token, FioTemplateType.SURNAMENAMESECNAME)
PersonIdentityToken.manage_firstname(pit, pits[1], None)
PersonIdentityToken.manage_lastname(pit, pits[0], None)
PersonIdentityToken.__manage_middlename(pit, pits[2], None)
pit.coef = (2)
elif (len(pits) == 2 and pits[0].typ == PersonItemToken.ItemType.VALUE and pits[1].typ == PersonItemToken.ItemType.VALUE):
nam = None
sur = None
for i in range(2):
if (((pits[i].firstname is not None and pits[i].firstname.is_in_dictionary)) or ((pits[i ^ 1].lastname is not None and ((pits[i ^ 1].lastname.is_in_dictionary or pits[i ^ 1].lastname.is_lastname_has_std_tail))))):
nam = pits[i]
sur = pits[i ^ 1]
break
if (nam is not None):
pit = PersonIdentityToken._new2495(pits[0].begin_token, pits[1].end_token, (FioTemplateType.NAMESURNAME if nam == pits[0] else FioTemplateType.SURNAMENAME))
PersonIdentityToken.manage_firstname(pit, nam, None)
PersonIdentityToken.manage_lastname(pit, sur, None)
pit.coef = (2)
if (pit is None):
return None
res = list()
res.append(pit)
return res
@staticmethod
def try_attach(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo', first_tok : 'Token', king : bool, is_attr_before : bool) -> typing.List['PersonIdentityToken']:
res = list()
ty = FioTemplateType.UNDEFINED
if (first_tok is not None):
t = first_tok.previous
while t is not None:
pf = Utils.asObjectOrNull(t.get_referent(), PersonReferent)
if (pf is not None):
ty = pf._m_person_identity_typ
break
if (t.is_newline_before):
break
if (t.chars.is_letter and not t.is_and):
break
t = t.previous
pit = PersonIdentityToken.__try_attach_global(pits, ind, inf)
if ((pit) is not None):
res.append(pit)
return res
pit = PersonIdentityToken.__try_attach_surnameii(pits, ind, inf)
if ((pit) is not None):
res.append(pit)
pit = PersonIdentityToken.__try_attachiisurname(pits, ind, inf)
if ((pit) is not None):
res.append(pit)
pit = PersonIdentityToken.__try_attach_asian(pits, ind, inf, 3, ty == FioTemplateType.ASIANNAME)
if ((pit) is not None):
res.append(pit)
else:
pit = PersonIdentityToken.__try_attach_name_surname(pits, ind, inf, ty == FioTemplateType.NAMESURNAME, is_attr_before)
if ((pit) is not None):
res.append(pit)
pit1 = PersonIdentityToken.__try_attach_surname_name(pits, ind, inf, ty == FioTemplateType.SURNAMENAME)
if ((pit1) is not None):
res.append(pit1)
if (pit is not None and (pit.coef + (1)) >= pit1.coef and ty != FioTemplateType.SURNAMENAME):
pit1.coef -= (0.5)
pit = PersonIdentityToken.__try_attach_name_secname_surname(pits, ind, inf, ty == FioTemplateType.NAMESECNAMESURNAME)
if ((pit) is not None):
res.append(pit)
pit = PersonIdentityToken.__try_attach_surname_name_secname(pits, ind, inf, ty == FioTemplateType.SURNAMENAMESECNAME, False)
if ((pit) is not None):
res.append(pit)
pit = PersonIdentityToken.__try_attach_asian(pits, ind, inf, 2, ty == FioTemplateType.ASIANNAME)
if ((pit) is not None):
res.append(pit)
if (king):
pit = PersonIdentityToken.__try_attach_name_secname(pits, ind, inf, ty == FioTemplateType.NAMESECNAME)
if ((pit) is not None):
res.append(pit)
for r in res:
if (r.typ == FioTemplateType.NAMESURNAME):
r.coef = (pit.coef - (1))
pit = PersonIdentityToken.__try_attach_king(pits, ind, inf, ty == FioTemplateType.KING or king)
if ((pit) is not None):
res.append(pit)
if (inf.gender == MorphGender.MASCULINE or inf.gender == MorphGender.FEMINIE):
for p in res:
if (p.morph.gender == MorphGender.UNDEFINED or (p.morph.gender) == (((MorphGender.FEMINIE) | (MorphGender.MASCULINE)))):
p.morph.gender = inf.gender
if (p.morph.case_.is_undefined):
p.morph.case_ = inf.case_
for r in res:
tt = r.begin_token
while tt != r.end_token:
if (tt.is_newline_after):
r.coef -= (1)
tt = tt.next0_
ttt = r.begin_token.previous
if (ttt is not None and ttt.morph.class0_ == MorphClass.VERB):
tte = r.end_token.next0_
if (tte is None or tte.is_char('.') or tte.is_newline_before):
pass
else:
continue
r.coef += (1)
if (r.coef >= 0 and ind == 0 and r.end_token == pits[len(pits) - 1].end_token):
r.coef += PersonIdentityToken.__calc_coef_after(pits[len(pits) - 1].end_token.next0_)
if (ty != FioTemplateType.UNDEFINED and ind == 0):
for r in res:
if (r.typ == ty):
r.coef += (1.5)
elif (((r.typ == FioTemplateType.SURNAMENAME and ty == FioTemplateType.SURNAMENAMESECNAME)) or ((r.typ == FioTemplateType.SURNAMENAMESECNAME and ty == FioTemplateType.SURNAMENAME))):
r.coef += (0.5)
PersonIdentityToken.sort(res)
return res
@staticmethod
def manage_lastname(res : 'PersonIdentityToken', pit : 'PersonItemToken', inf : 'MorphBaseInfo') -> None:
if (pit.lastname is None):
res.lastname = PersonMorphCollection()
PersonIdentityToken.__set_value(res.lastname, pit.begin_token, inf)
if (pit.is_in_dictionary):
res.coef -= 1
tt = Utils.asObjectOrNull(pit.begin_token, TextToken)
if ((tt is not None and not tt.chars.is_latin_letter and tt.chars.is_capital_upper) and tt.length_char > 2 and not tt.chars.is_latin_letter):
ok = True
for wf in tt.morph.items:
if ((wf).is_in_dictionary):
ok = False
break
if (ok):
res.coef += (1)
else:
res.coef += 1
if (not PersonIdentityToken.__is_accords(pit.lastname, inf)):
res.coef -= 1
res.lastname = PersonMorphCollection()
PersonIdentityToken.__set_value2(res.lastname, pit.lastname, inf)
if (pit.lastname.term is not None):
if (res.morph.case_.is_undefined or res.morph.case_.is_nominative):
if (not pit.lastname.is_in_dictionary and not pit.lastname.term in res.lastname.values):
if (inf.case_.is_nominative or inf.case_.is_undefined):
if (pit.lastname.morph.class0_.is_adjective and inf.gender == MorphGender.FEMINIE):
pass
else:
res.lastname.add(pit.lastname.term, None, pit.morph.gender, False)
if (pit.is_in_dictionary):
res.coef -= 1
if (pit.lastname.is_in_dictionary or pit.lastname.is_in_ontology):
res.coef += 1
if (pit.lastname.is_lastname_has_hiphen):
res.coef += (1)
if (pit.middlename is not None and pit.middlename.morph.gender == MorphGender.FEMINIE):
res.coef -= 1
if (pit.firstname is not None and not pit.chars.is_latin_letter):
res.coef -= 1
if (isinstance(pit.begin_token, ReferentToken)):
res.coef -= 1
@staticmethod
def manage_firstname(res : 'PersonIdentityToken', pit : 'PersonItemToken', inf : 'MorphBaseInfo') -> None:
if (pit.firstname is None):
if (pit.lastname is not None):
res.coef -= 1
res.firstname = PersonMorphCollection()
PersonIdentityToken.__set_value(res.firstname, pit.begin_token, inf)
if (pit.is_in_dictionary):
res.coef -= 1
else:
res.coef += 1
if (not PersonIdentityToken.__is_accords(pit.firstname, inf)):
res.coef -= 1
res.firstname = PersonMorphCollection()
PersonIdentityToken.__set_value2(res.firstname, pit.firstname, inf)
if (pit.is_in_dictionary and not pit.firstname.is_in_dictionary):
res.coef -= 1
if (pit.middlename is not None and pit.middlename != pit.firstname):
res.coef -= 1
if (pit.lastname is not None and ((pit.lastname.is_in_dictionary or pit.lastname.is_in_ontology))):
res.coef -= 1
if (isinstance(pit.begin_token, ReferentToken)):
res.coef -= (2)
@staticmethod
def __manage_middlename(res : 'PersonIdentityToken', pit : 'PersonItemToken', inf : 'MorphBaseInfo') -> None:
mm = PersonMorphCollection()
res.middlename = (mm)
if (pit.middlename is None):
PersonIdentityToken.__set_value(mm, pit.begin_token, inf)
else:
res.coef += 1
if (not PersonIdentityToken.__is_accords(pit.middlename, inf)):
res.coef -= 1
PersonIdentityToken.__set_value2(mm, pit.middlename, inf)
@staticmethod
def __try_attach_single_surname(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo') -> 'PersonIdentityToken':
if (ind >= len(pits) or pits[ind].lastname is None):
return None
res = PersonIdentityToken(pits[ind].begin_token, pits[ind].end_token)
if (ind == 0 and len(pits) == 1):
res.coef += 1
else:
if (ind > 0 and ((not pits[ind - 1].is_in_dictionary or pits[ind - 1].typ == PersonItemToken.ItemType.INITIAL or pits[ind - 1].firstname is not None))):
res.coef -= 1
if (((ind + 1) < len(pits)) and ((not pits[ind + 1].is_in_dictionary or pits[ind + 1].typ == PersonItemToken.ItemType.INITIAL or pits[ind + 1].firstname is not None))):
res.coef -= 1
res.morph = PersonIdentityToken.__accord_morph(inf, pits[ind].lastname, None, None, pits[ind].end_token.next0_)
PersonIdentityToken.manage_lastname(res, pits[ind], inf)
return res
@staticmethod
def __try_attach_name_surname(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo', prev_has_this_typ : bool=False, is_attr_before : bool=False) -> 'PersonIdentityToken':
if ((ind + 1) >= len(pits) or pits[ind + 1].typ != PersonItemToken.ItemType.VALUE or pits[ind].typ != PersonItemToken.ItemType.VALUE):
return None
if (pits[ind + 1].lastname is None):
if (not prev_has_this_typ):
if (pits[ind].chars.is_latin_letter):
pass
else:
if (pits[ind].firstname is None or pits[ind + 1].middlename is not None):
return None
if (pits[ind + 1].is_newline_after):
pass
elif (pits[ind + 1].end_token.next0_ is not None and pits[ind + 1].end_token.next0_.is_char_of(",.)")):
pass
else:
return None
if (pits[ind].is_newline_after or pits[ind].is_hiphen_after):
return None
if (pits[ind + 1].middlename is not None and pits[ind + 1].middlename.is_in_dictionary and pits[ind + 1].middlename.morph.gender == MorphGender.FEMINIE):
return None
if (PersonIdentityToken.__is_both_surnames(pits[ind], pits[ind + 1])):
return None
res = PersonIdentityToken._new2495(pits[ind].begin_token, pits[ind + 1].end_token, FioTemplateType.NAMESURNAME)
res.coef -= (ind)
res.morph = PersonIdentityToken.__accord_morph(inf, pits[ind + 1].lastname, pits[ind].firstname, None, pits[ind + 1].end_token.next0_)
if (res.morph.gender == MorphGender.MASCULINE or res.morph.gender == MorphGender.FEMINIE):
if (pits[ind + 1].lastname is not None and not pits[ind + 1].lastname.morph.case_.is_undefined):
if ((pits[ind].lastname is not None and pits[ind].lastname.is_lastname_has_std_tail and pits[ind + 1].firstname is not None) and pits[ind + 1].firstname.is_in_dictionary):
res.coef -= (1)
else:
res.coef += (1)
inf = (res.morph)
PersonIdentityToken.manage_firstname(res, pits[ind], inf)
PersonIdentityToken.manage_lastname(res, pits[ind + 1], inf)
if (pits[ind].firstname is not None and (isinstance(pits[ind + 1].begin_token, ReferentToken))):
res.coef += 1
if (pits[ind].begin_token.get_morph_class_in_dictionary().is_verb):
if (pits[ind].begin_token.chars.is_capital_upper and not MiscHelper.can_be_start_of_sentence(pits[ind].begin_token)):
pass
else:
res.coef -= (1)
if (pits[ind].firstname is not None and ((pits[ind + 1].is_newline_after or ((pits[ind + 1].end_token.next0_ is not None and ((pits[ind + 1].end_token.next0_.is_char_of(",."))))))) and not pits[ind + 1].is_newline_before):
if (pits[ind + 1].firstname is None and pits[ind + 1].middlename is None):
res.coef += 1
elif (pits[ind + 1].chars.is_latin_letter and (ind + 2) == len(pits)):
res.coef += 1
if (pits[ind + 1].middlename is not None):
info = pits[ind].kit.statistics.get_word_info(pits[ind + 1].begin_token)
if (info is not None and info.not_capital_before_count > 0):
pass
else:
res.coef -= (1 + ind)
if (res.morph.gender == MorphGender.MASCULINE or res.morph.gender == MorphGender.FEMINIE):
if (pits[ind + 1].lastname is not None and ((pits[ind + 1].lastname.is_in_dictionary or pits[ind + 1].lastname.is_in_ontology))):
pass
else:
for v in pits[ind + 1].middlename.vars0_:
if ((((v.gender) & (res.morph.gender))) != (MorphGender.UNDEFINED)):
res.coef -= (1)
break
if (pits[ind].chars != pits[ind + 1].chars):
if (pits[ind].chars.is_capital_upper and pits[ind + 1].chars.is_all_upper):
pass
elif (pits[ind].chars.is_all_upper and pits[ind + 1].chars.is_capital_upper and pits[ind].firstname is None):
res.coef -= (10)
else:
res.coef -= (1)
if (pits[ind].firstname is None or not pits[ind].firstname.is_in_dictionary or pits[ind].chars.is_all_upper):
res.coef -= (1)
elif (pits[ind].chars.is_all_upper):
res.coef -= (0.5)
if (pits[ind].is_in_dictionary):
if (pits[ind + 1].is_in_dictionary):
res.coef -= (2)
if (pits[ind + 1].is_newline_after):
res.coef += 1
elif (pits[ind + 1].end_token.next0_ is not None and pits[ind + 1].end_token.next0_.is_char_of(".,:")):
res.coef += 1
if (pits[ind].is_in_dictionary and pits[ind].firstname is None):
res.coef -= 1
elif (pits[ind].firstname is None or not pits[ind].firstname.is_in_dictionary):
if (inf.case_.is_undefined):
res.coef -= (1)
else:
for mi in pits[ind].begin_token.morph.items:
if (not ((mi.case_) & inf.case_).is_undefined):
if ((isinstance(mi, MorphWordForm)) and (mi).is_in_dictionary):
res.coef -= (1)
break
if (not pits[ind].chars.is_latin_letter):
npt = NounPhraseHelper.try_parse(pits[ind].begin_token, NounPhraseParseAttr.NO, 0)
if (npt is not None and npt.end_char >= pits[ind + 1].begin_char):
if (pits[ind].begin_token.get_morph_class_in_dictionary().is_adjective):
res.coef -= (2)
elif (pits[ind + 1].begin_token.get_morph_class_in_dictionary().is_noun):
res.coef -= (2)
PersonIdentityToken.__correct_coef_after_lastname(res, pits, ind + 2)
if (ind > 0 and res.coef > 0 and pits[ind].is_hiphen_before):
b1 = pits[ind].kit.statistics.get_bigramm_info(pits[ind - 1].begin_token, pits[ind].begin_token)
if (b1 is not None and b1.second_count == b1.pair_count):
res0 = PersonIdentityToken._new2495(pits[ind].begin_token, pits[ind + 1].end_token, FioTemplateType.NAMESURNAME)
PersonIdentityToken.manage_firstname(res0, pits[ind - 1], inf)
res.firstname = PersonMorphCollection.add_prefix(res0.firstname, res.firstname)
res.coef += 1
res.begin_token = pits[ind - 1].begin_token
if (BracketHelper.can_be_start_of_sequence(res.begin_token.previous, False, False) and BracketHelper.can_be_end_of_sequence(res.end_token.next0_, False, None, False)):
res.coef -= (2)
bi = pits[0].begin_token.kit.statistics.get_initial_info(pits[ind].value, pits[ind + 1].begin_token)
if (bi is not None and bi.pair_count > 0):
res.coef += (2)
if ((not pits[0].is_in_dictionary and pits[1].lastname is not None and pits[1].lastname.is_lastname_has_std_tail) and not pits[1].is_in_dictionary):
res.coef += 0.5
if (res.firstname is not None and pits[ind].begin_token.is_value("СЛАВА", None)):
res.coef -= (3)
elif (PersonIdentityToken.check_latin_after(res) is not None):
res.coef += (2)
if (pits[0].firstname is None or ((pits[0].firstname is not None and not pits[0].firstname.is_in_dictionary))):
if (pits[0].begin_token.get_morph_class_in_dictionary().is_proper_geo and pits[1].lastname is not None and pits[1].lastname.is_in_ontology):
res.coef -= (2)
if (ind == 0 and len(pits) == 2 and pits[0].chars.is_latin_letter):
if (pits[0].firstname is not None):
if (not is_attr_before and (isinstance(pits[0].begin_token.previous, TextToken)) and pits[0].begin_token.previous.chars.is_capital_upper):
res.coef -= (1)
else:
res.coef += (1)
if (pits[0].chars.is_all_upper and pits[1].chars.is_capital_upper):
res.coef = (0)
return res
@staticmethod
def __try_attach_name_secname_surname(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo', prev_has_this_typ : bool=False) -> 'PersonIdentityToken':
if ((ind + 2) >= len(pits) or pits[ind].typ != PersonItemToken.ItemType.VALUE or pits[ind + 2].typ != PersonItemToken.ItemType.VALUE):
return None
if (pits[ind].is_newline_after):
if ((len(pits) == 3 and pits[0].firstname is not None and pits[1].middlename is not None) and pits[2].lastname is not None):
pass
else:
return None
if (pits[ind + 2].lastname is None and not prev_has_this_typ and not pits[ind].morph.language.is_en):
return None
ok = False
need_test_name_surname = False
add_coef = 0
if (pits[ind + 1].typ == PersonItemToken.ItemType.INITIAL):
ok = True
elif (pits[ind + 1].typ == PersonItemToken.ItemType.VALUE and pits[ind + 1].middlename is not None):
ok = True
elif (pits[ind + 1].typ == PersonItemToken.ItemType.VALUE and pits[ind + 2].firstname is None):
b1 = pits[0].kit.statistics.get_bigramm_info(pits[ind + 1].begin_token, pits[ind + 2].begin_token)
b2 = pits[0].kit.statistics.get_bigramm_info(pits[ind].begin_token, pits[ind + 2].begin_token)
if (b1 is not None):
if (b1.pair_count == b1.first_count and b1.pair_count == b1.second_count):
ok = True
b3 = pits[0].kit.statistics.get_bigramm_info(pits[ind].begin_token, pits[ind + 1].begin_token)
if (b3 is not None):
if (b3.second_count > b3.pair_count):
ok = False
elif (b3.second_count == b3.pair_count and pits[ind + 2].is_hiphen_before):
ok = False
elif (b2 is not None and (b2.pair_count + b1.pair_count) == b1.second_count):
ok = True
elif ((ind + 3) == len(pits) and pits[ind + 2].lastname is not None and not pits[ind + 2].is_in_dictionary):
ok = True
if (not ok):
b1 = pits[0].kit.statistics.get_initial_info(pits[ind].value, pits[ind + 2].begin_token)
if (b1 is not None and b1.pair_count > 0):
ok = True
add_coef = 2
if (not ok):
wi = pits[0].kit.statistics.get_word_info(pits[ind + 2].end_token)
if (wi is not None and wi.lower_count == 0):
if (wi.male_verbs_after_count > 0 or wi.female_verbs_after_count > 0):
ok = True
add_coef = 2
need_test_name_surname = True
if (pits[ind + 1].firstname is not None and pits[ind + 1].middlename is None):
if (pits[ind].firstname is None and pits[ind].value is not None and pits[ind].is_in_dictionary):
ok = False
if (pits[ind + 1].lastname is not None and ((pits[ind + 1].lastname.is_in_dictionary or pits[ind + 1].lastname.is_in_ontology))):
ok = False
if (not ok):
if ((ind == 0 and len(pits) == 3 and pits[0].chars.is_latin_letter) and pits[1].chars.is_latin_letter and pits[2].chars.is_latin_letter):
if (pits[0].firstname is not None and pits[2].lastname is not None):
ok = True
if (not ok):
return None
if (PersonIdentityToken.__is_both_surnames(pits[ind], pits[ind + 2])):
return None
ok = False
i = ind
while i < (ind + 3):
if (pits[i].typ == PersonItemToken.ItemType.INITIAL):
ok = True
elif (not pits[i].is_in_dictionary):
cla = pits[i].begin_token.get_morph_class_in_dictionary()
if (cla.is_proper_name or cla.is_proper_surname or cla.is_proper_secname):
ok = True
elif (cla.is_undefined):
ok = True
i += 1
if (not ok):
return None
res = PersonIdentityToken(pits[ind].begin_token, pits[ind + 2].end_token)
res.typ = (FioTemplateType.NAMEISURNAME if pits[ind + 1].typ == PersonItemToken.ItemType.INITIAL else FioTemplateType.NAMESECNAMESURNAME)
res.coef -= (ind)
res.morph = PersonIdentityToken.__accord_morph(inf, pits[ind + 2].lastname, pits[ind].firstname, pits[ind + 1].middlename, pits[ind + 2].end_token.next0_)
if (res.morph.gender == MorphGender.MASCULINE or res.morph.gender == MorphGender.FEMINIE):
res.coef += (1)
inf = (res.morph)
PersonIdentityToken.manage_firstname(res, pits[ind], inf)
PersonIdentityToken.manage_lastname(res, pits[ind + 2], inf)
if (pits[ind + 1].middlename is not None and len(pits[ind + 1].middlename.vars0_) > 0):
res.coef += 1
res.middlename = (pits[ind + 1].middlename.vars0_[0].value)
if (len(pits[ind + 1].middlename.vars0_) > 1):
res.middlename = (PersonMorphCollection())
PersonIdentityToken.__set_value2(Utils.asObjectOrNull(res.middlename, PersonMorphCollection), pits[ind + 1].middlename, inf)
if (pits[ind + 2].lastname is not None):
if (pits[ind + 2].lastname.is_in_dictionary or pits[ind + 2].lastname.is_lastname_has_std_tail or pits[ind + 2].lastname.is_has_std_postfix):
res.coef += 1
elif (pits[ind + 1].typ == PersonItemToken.ItemType.INITIAL):
res.middlename = (pits[ind + 1].value)
res.coef += 1
if (pits[ind + 2].lastname is not None):
pass
else:
npt = NounPhraseHelper.try_parse(pits[ind + 2].begin_token, Utils.valToEnum((NounPhraseParseAttr.PARSEPREPOSITION) | (NounPhraseParseAttr.PARSEPRONOUNS) | (NounPhraseParseAttr.PARSEADVERBS), NounPhraseParseAttr), 0)
if (npt is not None and npt.end_char > pits[ind + 2].end_char):
res.coef -= (2)
elif (pits[ind + 1].firstname is not None and pits[ind + 2].middlename is not None and len(pits) == 3):
res.coef -= (2)
else:
PersonIdentityToken.__manage_middlename(res, pits[ind + 1], inf)
res.coef += (0.5)
if (pits[ind].chars != pits[ind + 2].chars):
res.coef -= (1)
if (pits[ind].chars.is_all_upper):
res.coef -= (1)
elif (pits[ind + 1].typ != PersonItemToken.ItemType.INITIAL and pits[ind].chars != pits[ind + 1].chars):
res.coef -= (1)
PersonIdentityToken.__correct_coef_after_lastname(res, pits, ind + 3)
res.coef += (add_coef)
if (pits[ind].is_in_dictionary and pits[ind + 1].is_in_dictionary and pits[ind + 2].is_in_dictionary):
res.coef -= 1
return res
@staticmethod
def __try_attach_name_secname(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo', prev_has_this_typ : bool=False) -> 'PersonIdentityToken':
if ((ind != 0 or (ind + 2) != len(pits) or pits[ind].typ != PersonItemToken.ItemType.VALUE) or pits[ind + 1].typ != PersonItemToken.ItemType.VALUE):
return None
if (pits[ind].is_newline_after):
return None
if (pits[ind].firstname is None or pits[ind + 1].middlename is None):
return None
res = PersonIdentityToken(pits[ind].begin_token, pits[ind + 1].end_token)
res.typ = FioTemplateType.NAMESECNAME
res.morph = PersonIdentityToken.__accord_morph(inf, None, pits[ind].firstname, pits[ind + 1].middlename, pits[ind + 1].end_token.next0_)
if (res.morph.gender == MorphGender.MASCULINE or res.morph.gender == MorphGender.FEMINIE):
res.coef += (1)
inf = (res.morph)
PersonIdentityToken.manage_firstname(res, pits[ind], inf)
PersonIdentityToken.__manage_middlename(res, pits[ind + 1], inf)
res.coef = (2)
return res
@staticmethod
def __correct_coef_after_lastname(res : 'PersonIdentityToken', pits : typing.List['PersonItemToken'], ind : int) -> None:
if (not pits[ind - 1].is_newline_after):
pat = PersonAttrToken.try_attach(pits[ind - 1].begin_token, None, PersonAttrToken.PersonAttrAttachAttrs.ONLYKEYWORD)
if (pat is not None):
res.coef -= (1)
if (ind >= len(pits)):
if (PersonIdentityToken.check_latin_after(res) is not None):
res.coef += (2)
te = pits[ind - 1].end_token
stat = te.kit.statistics.get_word_info(te)
if (stat is not None):
if (stat.has_before_person_attr):
res.coef += 1
te = pits[ind - 1].end_token.next0_
if (te is None):
return
if (PersonHelper.is_person_say_or_attr_after(te)):
res.coef += 1
if (res.chars.is_latin_letter and res.typ == FioTemplateType.NAMESURNAME):
res.coef += (2)
if (not te.chars.is_letter and not te.chars.is_all_lower):
return
wi = te.kit.statistics.get_word_info(te)
if (wi is not None):
if (wi.lower_count > 0):
res.coef -= 1
elif ((wi.female_verbs_after_count + wi.male_verbs_after_count) > 0):
res.coef += 1
return
if (ind == 0):
return
if (pits[ind].typ == PersonItemToken.ItemType.VALUE and ((pits[ind].firstname is None or ind == (len(pits) - 1)))):
b1 = pits[0].kit.statistics.get_bigramm_info(pits[ind - 1].begin_token, pits[ind].begin_token)
if ((b1 is not None and b1.first_count == b1.pair_count and b1.second_count == b1.pair_count) and b1.pair_count > 0):
ok = False
if (b1.pair_count > 1 and pits[ind].whitespaces_before_count == 1):
ok = True
elif (pits[ind].is_hiphen_before and pits[ind].lastname is not None):
ok = True
if (ok):
res1 = PersonIdentityToken(pits[ind].begin_token, pits[ind].end_token)
PersonIdentityToken.manage_lastname(res1, pits[ind], res.morph)
res.lastname = PersonMorphCollection.add_prefix(res.lastname, res1.lastname)
res.end_token = pits[ind].end_token
res.coef += 1
ind += 1
if (ind >= len(pits)):
return
if (pits[ind - 1].whitespaces_before_count > pits[ind - 1].whitespaces_after_count):
res.coef -= (1)
elif (pits[ind - 1].whitespaces_before_count == pits[ind - 1].whitespaces_after_count):
if (pits[ind].lastname is not None or pits[ind].firstname is not None):
if (not pits[ind].is_in_dictionary):
res.coef -= (1)
@staticmethod
def __correct_coef_for_lastname(pit : 'PersonIdentityToken', it : 'PersonItemToken') -> None:
if (it.begin_token != it.end_token):
return
tt = Utils.asObjectOrNull(it.begin_token, TextToken)
if (tt is None):
return
in_dic = False
has_std = False
for wf in tt.morph.items:
if (wf.class0_.is_proper_surname):
pass
elif ((wf).is_in_dictionary):
in_dic = True
if (it.lastname is not None):
has_std = it.lastname.is_lastname_has_std_tail
if (not has_std and in_dic):
pit.coef -= 1.5
@staticmethod
def __try_attach_surname_name(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo', prev_has_this_typ : bool=False) -> 'PersonIdentityToken':
if ((ind + 1) >= len(pits) or pits[ind + 1].typ != PersonItemToken.ItemType.VALUE or pits[ind].typ != PersonItemToken.ItemType.VALUE):
return None
if (pits[ind].lastname is None and not prev_has_this_typ):
return None
if (PersonIdentityToken.__is_both_surnames(pits[ind], pits[ind + 1])):
return None
res = PersonIdentityToken._new2495(pits[ind].begin_token, pits[ind + 1].end_token, FioTemplateType.SURNAMENAME)
res.coef -= (ind)
if (pits[ind].is_newline_after):
res.coef -= 1
if (pits[ind].whitespaces_after_count > 15):
res.coef -= 1
res.morph = PersonIdentityToken.__accord_morph(inf, pits[ind].lastname, pits[ind + 1].firstname, None, pits[ind + 1].end_token.next0_)
if (res.morph.gender == MorphGender.MASCULINE or res.morph.gender == MorphGender.FEMINIE):
if (pits[ind].lastname is not None and not pits[ind].lastname.morph.case_.is_undefined):
res.coef += (1)
inf = (res.morph)
PersonIdentityToken.manage_lastname(res, pits[ind], inf)
PersonIdentityToken.manage_firstname(res, pits[ind + 1], inf)
PersonIdentityToken.__correct_coef_for_lastname(res, pits[ind])
if (pits[ind].chars != pits[ind + 1].chars):
res.coef -= (1)
if (pits[ind + 1].firstname is None or not pits[ind + 1].firstname.is_in_dictionary or pits[ind + 1].chars.is_all_upper):
res.coef -= (1)
elif (pits[ind].chars.is_all_upper):
res.coef -= (0.5)
if (pits[ind + 1].is_in_dictionary and ((pits[ind + 1].firstname is None or not pits[ind + 1].firstname.is_in_dictionary))):
res.coef -= (1)
PersonIdentityToken.__correct_coef_after_name(res, pits, ind + 2)
npt = NounPhraseHelper.try_parse(pits[ind + 1].end_token, NounPhraseParseAttr.NO, 0)
if (npt is not None and npt.end_token != pits[ind + 1].end_token):
res.coef -= (1)
if (ind == 0):
PersonIdentityToken.__correct_coefsns(res, pits, ind + 2)
if (pits[ind].end_token.next0_.is_hiphen):
res.coef -= (2)
if (BracketHelper.can_be_start_of_sequence(res.begin_token.previous, False, False) and BracketHelper.can_be_end_of_sequence(res.end_token.next0_, False, None, False)):
res.coef -= (2)
if (pits[ind].is_in_dictionary):
mc = pits[ind].begin_token.get_morph_class_in_dictionary()
if (mc.is_pronoun or mc.is_personal_pronoun):
return None
if (((len(pits) == 2 and ind == 0 and pits[0].chars.is_all_upper) and pits[1].chars.is_capital_upper and not pits[1].is_in_dictionary) and (res.coef < 0)):
res.coef = (0)
return res
@staticmethod
def __correct_coefsns(res : 'PersonIdentityToken', pits : typing.List['PersonItemToken'], ind_after : int) -> None:
if (ind_after >= len(pits)):
return
if (pits[0].lastname is None or not pits[0].lastname.is_lastname_has_std_tail):
stat = pits[0].kit.statistics.get_word_info(pits[1].begin_token)
stata = pits[0].kit.statistics.get_word_info(pits[2].begin_token)
statb = pits[0].kit.statistics.get_word_info(pits[0].begin_token)
if (stat is not None and stata is not None and statb is not None):
if (stat.like_chars_after_words is not None and stat.like_chars_before_words is not None):
coua = 0
coub = 0
wrapcoua2502 = RefOutArgWrapper(0)
Utils.tryGetValue(stat.like_chars_after_words, stata, wrapcoua2502)
coua = wrapcoua2502.value
wrapcoub2501 = RefOutArgWrapper(0)
Utils.tryGetValue(stat.like_chars_before_words, statb, wrapcoub2501)
coub = wrapcoub2501.value
if (coua == stat.total_count and (coub < stat.total_count)):
res.coef -= (2)
return
if (pits[1].firstname is None):
return
middle = None
if (ind_after > 2 and pits[2].middlename is not None):
middle = pits[2].middlename
inf = MorphBaseInfo()
mi1 = PersonIdentityToken.__accord_morph(inf, pits[0].lastname, pits[1].firstname, middle, None)
if (mi1.case_.is_undefined):
res.coef -= (1)
if (pits[ind_after].lastname is None or not pits[ind_after].lastname.is_lastname_has_std_tail):
return
mi2 = PersonIdentityToken.__accord_morph(inf, pits[ind_after].lastname, pits[1].firstname, middle, pits[ind_after].end_token.next0_)
if (not mi2.case_.is_undefined):
res.coef -= (1)
@staticmethod
def __try_attach_surname_name_secname(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo', prev_has_this_typ : bool=False, always : bool=False) -> 'PersonIdentityToken':
if ((ind + 2) >= len(pits) or pits[ind + 1].typ != PersonItemToken.ItemType.VALUE or pits[ind].typ != PersonItemToken.ItemType.VALUE):
return None
if (pits[ind].lastname is None and not prev_has_this_typ):
if (ind > 0):
return None
if (len(pits) == 3 and not always):
tt1 = pits[2].end_token.next0_
if (tt1 is not None and tt1.is_comma):
tt1 = tt1.next0_
if (tt1 is not None and not tt1.is_newline_before and PersonAttrToken.try_attach(tt1, None, PersonAttrToken.PersonAttrAttachAttrs.ONLYKEYWORD) is not None):
pass
else:
return None
if (not always):
if (PersonIdentityToken.__is_both_surnames(pits[ind], pits[ind + 2])):
return None
if (PersonIdentityToken.__is_both_surnames(pits[ind], pits[ind + 1])):
if (len(pits) == 3 and ind == 0 and pits[2].middlename is not None):
pass
else:
return None
res = PersonIdentityToken._new2495(pits[ind].begin_token, pits[ind + 2].end_token, FioTemplateType.SURNAMENAMESECNAME)
if (pits[ind + 2].middlename is None):
if ((ind + 2) == (len(pits) - 1) and prev_has_this_typ):
res.coef += (1)
elif (pits[ind + 1].firstname is not None and pits[ind + 2].firstname is not None):
pass
elif (not always):
return None
res.coef -= (ind)
if (pits[ind].is_newline_after):
if (pits[ind].is_newline_before and pits[ind + 2].is_newline_after):
pass
else:
res.coef -= 1
if (pits[ind].whitespaces_after_count > 15):
res.coef -= 1
if (pits[ind + 1].is_newline_after):
if (pits[ind].is_newline_before and pits[ind + 2].is_newline_after):
pass
else:
res.coef -= 1
if (pits[ind + 1].whitespaces_after_count > 15):
res.coef -= 1
res.morph = PersonIdentityToken.__accord_morph(inf, pits[ind].lastname, pits[ind + 1].firstname, pits[ind + 2].middlename, pits[ind + 2].end_token.next0_)
if (res.morph.gender == MorphGender.MASCULINE or res.morph.gender == MorphGender.FEMINIE):
res.coef += 1.5
inf = (res.morph)
PersonIdentityToken.manage_lastname(res, pits[ind], inf)
PersonIdentityToken.__correct_coef_for_lastname(res, pits[ind])
PersonIdentityToken.manage_firstname(res, pits[ind + 1], inf)
if (pits[ind + 2].middlename is not None and len(pits[ind + 2].middlename.vars0_) > 0):
res.coef += 1
res.middlename = (pits[ind + 2].middlename.vars0_[0].value)
if (len(pits[ind + 2].middlename.vars0_) > 1):
res.middlename = (PersonMorphCollection())
PersonIdentityToken.__set_value2(Utils.asObjectOrNull(res.middlename, PersonMorphCollection), pits[ind + 2].middlename, inf)
if (pits[ind + 1].firstname is not None and len(pits) == 3 and not pits[ind].is_in_dictionary):
res.coef += 1
else:
PersonIdentityToken.__manage_middlename(res, pits[ind + 2], inf)
if (pits[ind].chars != pits[ind + 1].chars or pits[ind].chars != pits[ind + 2].chars):
res.coef -= (1)
if (pits[ind].chars.is_all_upper and pits[ind + 1].chars.is_capital_upper and pits[ind + 2].chars.is_capital_upper):
res.coef += (2)
tt = Utils.asObjectOrNull(pits[ind].begin_token, TextToken)
if (tt is not None):
if (tt.is_value("УВАЖАЕМЫЙ", None) or tt.is_value("ДОРОГОЙ", None)):
res.coef -= (2)
PersonIdentityToken.__correct_coef_after_name(res, pits, ind + 3)
if (ind == 0):
PersonIdentityToken.__correct_coefsns(res, pits, ind + 3)
if (pits[ind].is_in_dictionary and pits[ind + 1].is_in_dictionary and pits[ind + 2].is_in_dictionary):
res.coef -= 1
return res
@staticmethod
def __correct_coef_after_name(res : 'PersonIdentityToken', pits : typing.List['PersonItemToken'], ind : int) -> None:
if (ind >= len(pits)):
return
if (ind == 0):
return
if (pits[ind - 1].whitespaces_before_count > pits[ind - 1].whitespaces_after_count):
res.coef -= (1)
elif (pits[ind - 1].whitespaces_before_count == pits[ind - 1].whitespaces_after_count):
if (pits[ind].lastname is not None or pits[ind].firstname is not None or pits[ind].middlename is not None):
res.coef -= (1)
t = pits[ind - 1].end_token.next0_
if (t is not None and t.next0_ is not None and t.next0_.is_char(',')):
t = t.next0_
if (t is not None):
if (PersonAttrToken.try_attach(t, None, PersonAttrToken.PersonAttrAttachAttrs.ONLYKEYWORD) is not None):
res.coef += (1)
@staticmethod
def __calc_coef_after(tt : 'Token') -> float:
if (tt is not None and tt.is_comma):
tt = tt.next0_
attr = PersonAttrToken.try_attach(tt, None, PersonAttrToken.PersonAttrAttachAttrs.ONLYKEYWORD)
if (attr is not None and attr.age is not None):
return 3
if (tt is not None and tt.get_referent() is not None and tt.get_referent().type_name == "DATE"):
co = 1
if (tt.next0_ is not None and tt.next0_.is_value("Р", None)):
co += (2)
return co
return 0
@staticmethod
def __try_attach_surnameii(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo') -> 'PersonIdentityToken':
if ((ind + 1) >= len(pits) or pits[ind + 1].typ != PersonItemToken.ItemType.INITIAL or pits[ind].typ == PersonItemToken.ItemType.INITIAL):
return None
if (pits[ind].is_newline_after):
return None
if (pits[ind].lastname is None):
return None
res = PersonIdentityToken._new2495(pits[ind].begin_token, pits[ind + 1].end_token, FioTemplateType.SURNAMEI)
res.coef -= (ind)
PersonIdentityToken.manage_lastname(res, pits[ind], inf)
if (pits[ind].is_asian_item(False) and pits[ind].lastname is not None and pits[ind].lastname.is_china_surname):
pass
elif (pits[ind].firstname is not None and pits[ind].firstname.is_in_dictionary):
if (pits[ind].lastname is None or not pits[ind].lastname.is_lastname_has_std_tail):
if ((ind == 0 and len(pits) == 3 and not pits[1].is_newline_after) and not pits[2].is_whitespace_after):
pass
else:
res.coef -= (2)
res.morph = (pits[ind].morph if pits[ind].lastname is None else pits[ind].lastname.morph)
if (res.lastname.gender != MorphGender.UNDEFINED):
res.morph.gender = res.lastname.gender
if (pits[ind].whitespaces_after_count < 2):
res.coef += (0.5)
res.firstname = PersonMorphCollection()
res.firstname.add(pits[ind + 1].value, None, MorphGender.UNDEFINED, False)
i1 = ind + 2
if ((i1 < len(pits)) and pits[i1].typ == PersonItemToken.ItemType.INITIAL):
res.typ = FioTemplateType.SURNAMEII
res.end_token = pits[i1].end_token
res.middlename = (pits[i1].value)
if (pits[i1].whitespaces_before_count < 2):
res.coef += (0.5)
i1 += 1
if (i1 >= len(pits)):
if (pits[0].lastname is not None and ((pits[0].lastname.is_in_dictionary or pits[0].lastname.is_in_ontology)) and pits[0].firstname is None):
res.coef += 1
return res
if (pits[ind].whitespaces_after_count > pits[i1].whitespaces_before_count):
res.coef -= 1
elif (pits[ind].whitespaces_after_count == pits[i1].whitespaces_before_count and pits[i1].lastname is not None):
if ((i1 + 3) == len(pits) and pits[i1 + 1].typ == PersonItemToken.ItemType.INITIAL and pits[i1 + 2].typ == PersonItemToken.ItemType.INITIAL):
pass
else:
if (pits[i1].is_in_dictionary and pits[i1].begin_token.get_morph_class_in_dictionary().is_noun):
pass
else:
res.coef -= 1
ok = True
tt = pits[ind].begin_token.previous
while tt is not None:
if (tt.is_newline_before):
break
elif (tt.get_referent() is not None and not ((isinstance(tt.get_referent(), PersonReferent)))):
ok = False
break
elif ((isinstance(tt, TextToken)) and tt.chars.is_letter):
ok = False
break
tt = tt.previous
if (ok):
res.coef += 1
return res
@staticmethod
def __try_attachiisurname(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo') -> 'PersonIdentityToken':
if ((ind + 1) >= len(pits) or pits[ind].typ != PersonItemToken.ItemType.INITIAL):
return None
if (ind > 0):
if (pits[ind - 1].typ == PersonItemToken.ItemType.INITIAL):
return None
if (pits[ind].is_newline_after):
return None
res = PersonIdentityToken._new2495(pits[ind].begin_token, pits[ind + 1].end_token, FioTemplateType.ISURNAME)
res.coef -= (ind)
res.firstname = PersonMorphCollection()
res.firstname.add(pits[ind].value, None, MorphGender.UNDEFINED, False)
i1 = ind + 1
if (pits[i1].typ == PersonItemToken.ItemType.INITIAL):
res.typ = FioTemplateType.IISURNAME
res.middlename = (pits[i1].value)
if (pits[i1].whitespaces_before_count < 2):
res.coef += (0.5)
i1 += 1
if (i1 >= len(pits) or pits[i1].typ != PersonItemToken.ItemType.VALUE):
return None
if (pits[i1].is_newline_before):
return None
res.end_token = pits[i1].end_token
prev = None
if (not pits[ind].is_newline_before):
if (ind > 0):
prev = pits[ind - 1]
else:
prev = PersonItemToken.try_attach(pits[ind].begin_token.previous, None, (PersonItemToken.ParseAttr.CANBELATIN if pits[i1].chars.is_latin_letter else PersonItemToken.ParseAttr.NO), None)
if (prev is not None):
if (PersonAttrToken.try_attach_word(prev.begin_token) is not None):
prev = (None)
res.coef += 1
PersonIdentityToken.manage_lastname(res, pits[i1], inf)
if (pits[i1].lastname is not None and pits[i1].lastname.is_in_ontology):
res.coef += 1
if (pits[i1].firstname is not None and pits[i1].firstname.is_in_dictionary):
if (pits[i1].lastname is None or ((not pits[i1].lastname.is_lastname_has_std_tail and not pits[i1].lastname.is_in_ontology))):
res.coef -= (2)
if (prev is not None):
mc = prev.begin_token.get_morph_class_in_dictionary()
if (mc.is_preposition or mc.is_adverb or mc.is_verb):
res.coef += (ind)
if (pits[i1].lastname is not None):
if (pits[i1].lastname.is_in_dictionary or pits[i1].lastname.is_in_ontology):
res.coef += (1)
if (prev.lastname is not None and ((prev.lastname.is_lastname_has_std_tail or prev.lastname.is_in_dictionary))):
res.coef -= (1)
res.morph = (pits[i1].morph if pits[i1].lastname is None else pits[i1].lastname.morph)
if (res.lastname.gender != MorphGender.UNDEFINED):
res.morph.gender = res.lastname.gender
if (pits[i1].whitespaces_before_count < 2):
if (not pits[ind].is_newline_before and (pits[ind].whitespaces_before_count < 2) and prev is not None):
pass
else:
res.coef += (0.5)
if (prev is None):
if (pits[ind].is_newline_before and pits[i1].is_newline_after):
res.coef += (1)
elif (pits[i1].end_token.next0_ is not None and ((pits[i1].end_token.next0_.is_char_of(";,.") or pits[i1].end_token.next0_.morph.class0_.is_conjunction))):
res.coef += (1)
return res
if (prev.whitespaces_after_count < pits[i1].whitespaces_before_count):
res.coef -= 1
elif (prev.whitespaces_after_count == pits[i1].whitespaces_before_count and prev.lastname is not None):
res.coef -= 1
return res
@staticmethod
def __try_attach_king(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo', prev_has_this_typ : bool=False) -> 'PersonIdentityToken':
if (ind > 0 or ind >= len(pits)):
return None
if (pits[0].firstname is None or pits[0].is_newline_after):
return None
if (pits[0].begin_token.is_value("ТОМ", None)):
return None
i = 0
if (len(pits) > 1 and ((pits[1].firstname is not None or pits[1].middlename is not None))):
i += 1
if (pits[i].is_newline_after):
return None
if (pits[i].end_token.whitespaces_after_count > 2):
return None
num = 0
roman = False
ok = False
t = pits[i].end_token.next0_
if (isinstance(t, NumberToken)):
if (t.chars.is_all_lower or (t).int_value is None):
return None
num = (t).int_value
if (not t.morph.class0_.is_adjective):
return None
else:
if (((i + 2) < len(pits)) and pits[i + 1].typ == PersonItemToken.ItemType.INITIAL):
return None
nt = NumberHelper.try_parse_roman(t)
if (nt is not None and nt.int_value is not None):
num = nt.int_value
roman = True
t = nt.end_token
if (num < 1):
if (pits[0].firstname is not None and prev_has_this_typ):
if (len(pits) == 1):
ok = True
elif (len(pits) == 2 and pits[0].end_token.next0_.is_hiphen):
ok = True
if (not ok):
return None
res = PersonIdentityToken._new2495(pits[0].begin_token, pits[0].end_token, FioTemplateType.KING)
res.morph = PersonIdentityToken.__accord_morph(inf, None, pits[0].firstname, ((Utils.ifNotNull(pits[1].middlename, pits[1].firstname)) if len(pits) == 2 else None), pits[(1 if len(pits) == 2 else 0)].end_token.next0_)
if (res.morph.gender == MorphGender.MASCULINE or res.morph.gender == MorphGender.FEMINIE):
inf = (res.morph)
if (inf.gender != MorphGender.FEMINIE and inf.gender != MorphGender.MASCULINE and not roman):
return None
PersonIdentityToken.manage_firstname(res, pits[0], inf)
if (len(pits) > 1):
PersonIdentityToken.__manage_middlename(res, pits[1], inf)
res.end_token = pits[1].end_token
if (num > 0):
res.lastname = PersonMorphCollection()
res.lastname.number = num
res.end_token = t
res.coef = ((3 if num > 0 else 2))
return res
@staticmethod
def __try_attach_asian(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo', cou : int, prev_has_this_typ : bool=False) -> 'PersonIdentityToken':
if (ind > 0 or ind >= len(pits) or ((len(pits) != cou and len(pits) != (cou * 2)))):
return None
if (pits[0].lastname is not None and pits[0].lastname.is_china_surname and pits[0].chars.is_capital_upper):
if (cou == 3):
if (not pits[1].is_asian_item(False)):
return None
if (not pits[2].is_asian_item(True)):
return None
elif (cou == 2):
if (pits[1].typ != PersonItemToken.ItemType.VALUE):
return None
elif (cou == 3):
if (not pits[0].is_asian_item(False)):
return None
if (not pits[1].is_asian_item(False)):
return None
if (not pits[2].is_asian_item(True)):
return None
else:
if (not pits[0].is_asian_item(False)):
return None
if (not pits[1].is_asian_item(True)):
return None
cou -= 1
is_chine_sur = pits[0].lastname is not None and pits[0].lastname.is_china_surname
res = PersonIdentityToken._new2495(pits[0].begin_token, pits[cou].end_token, FioTemplateType.ASIANNAME)
if (pits[cou].lastname is not None):
res.morph = PersonIdentityToken.__accord_morph(inf, pits[cou].lastname, None, None, pits[cou].end_token.next0_)
if (not res.morph.case_.is_undefined):
inf = (res.morph)
if (is_chine_sur):
res.typ = FioTemplateType.ASIANSURNAMENAME
res.coef = (2)
if (pits[1].is_asian_item(True)):
res.coef += (1)
PersonIdentityToken.manage_lastname(res, pits[0], inf)
tr = PersonReferent._del_surname_end(pits[0].value)
if (tr != pits[0].value):
res.lastname.add(tr, None, MorphGender.MASCULINE, False)
res.firstname = PersonMorphCollection()
pref = (pits[1].value if cou == 2 else "")
if (pits[cou].is_asian_item(False)):
res.firstname.add(pref + pits[cou].value, None, MorphGender.MASCULINE, False)
res.firstname.add(pref + pits[cou].value, None, MorphGender.FEMINIE, False)
if (len(pref) > 0):
res.firstname.add(pref + "-" + pits[cou].value, None, MorphGender.MASCULINE, False)
res.firstname.add(pref + "-" + pits[cou].value, None, MorphGender.FEMINIE, False)
else:
v = PersonReferent._del_surname_end(pits[cou].value)
res.firstname.add(pref + v, None, MorphGender.MASCULINE, False)
if (len(pref) > 0):
res.firstname.add(pref + "-" + v, None, MorphGender.MASCULINE, False)
ss = pits[cou].end_token.get_normal_case_text(MorphClass.NOUN, False, MorphGender.UNDEFINED, False)
if (ss != v and len(ss) <= len(v)):
res.firstname.add(pref + ss, None, MorphGender.MASCULINE, False)
if (len(pref) > 0):
res.firstname.add(pref + "-" + ss, None, MorphGender.MASCULINE, False)
inf.gender = MorphGender.MASCULINE
else:
if (inf.gender == MorphGender.MASCULINE):
PersonIdentityToken.manage_lastname(res, pits[cou], inf)
else:
res.lastname = PersonMorphCollection()
if (pits[cou].is_asian_item(False)):
res.lastname.add(pits[cou].value, None, MorphGender.MASCULINE, False)
res.lastname.add(pits[cou].value, None, MorphGender.FEMINIE, False)
else:
v = PersonReferent._del_surname_end(pits[cou].value)
res.lastname.add(v, None, MorphGender.MASCULINE, False)
ss = pits[cou].end_token.get_normal_case_text(MorphClass.NOUN, False, MorphGender.UNDEFINED, False)
if (ss != v and len(ss) <= len(v)):
res.lastname.add(ss, None, MorphGender.MASCULINE, False)
inf.gender = MorphGender.MASCULINE
if (cou == 2):
res.coef = (2)
if ((res.whitespaces_after_count < 2) and len(pits) > 3):
res.coef -= 1
res.lastname.add_prefix_str("{0} {1} ".format(pits[0].value, pits[1].value))
else:
res.coef = (1)
res.lastname.add_prefix_str(pits[0].value + " ")
i = 0
while i < len(pits):
if (pits[i].is_in_dictionary):
mc = pits[i].begin_token.get_morph_class_in_dictionary()
if ((mc.is_conjunction or mc.is_pronoun or mc.is_preposition) or mc.is_personal_pronoun):
res.coef -= 0.5
i += 1
if (pits[0].value == pits[1].value):
res.coef -= 0.5
if (cou == 2):
if (pits[0].value == pits[2].value):
res.coef -= 0.5
if (pits[1].value == pits[2].value):
res.coef -= 0.5
if (not pits[cou].is_whitespace_after):
t = pits[cou].end_token.next0_
if (t is not None and t.is_hiphen):
res.coef -= 0.5
if (BracketHelper.can_be_end_of_sequence(t, False, None, False)):
res.coef -= 0.5
if (BracketHelper.can_be_start_of_sequence(pits[0].begin_token.previous, False, False)):
res.coef -= 0.5
return res
@staticmethod
def try_attach_identity(pits : typing.List['PersonItemToken'], inf : 'MorphBaseInfo') -> 'PersonIdentityToken':
if (len(pits) == 1):
if (pits[0].typ != PersonItemToken.ItemType.REFERENT):
return None
else:
if (len(pits) != 2 and len(pits) != 3):
return None
for p in pits:
if (p.typ != PersonItemToken.ItemType.VALUE):
return None
if (p.chars != pits[0].chars):
return None
begin = Utils.asObjectOrNull(pits[0].begin_token, TextToken)
end = Utils.asObjectOrNull(pits[len(pits) - 1].end_token, TextToken)
if (begin is None or end is None):
return None
res = PersonIdentityToken(begin, end)
res.lastname = PersonMorphCollection()
s = MiscHelper.get_text_value(begin, end, GetTextAttr.NO)
if (len(s) > 100):
return None
tmp = io.StringIO()
t = begin
first_pass3285 = True
while True:
if first_pass3285: first_pass3285 = False
else: t = t.next0_
if (not (t is not None and t.previous != end)): break
tt = Utils.asObjectOrNull(t, TextToken)
if (tt is None):
continue
if (tt.is_hiphen):
print('-', end="", file=tmp)
continue
if (tmp.tell() > 0):
if (Utils.getCharAtStringIO(tmp, tmp.tell() - 1) != '-'):
print(' ', end="", file=tmp)
if (tt.length_char < 3):
print(tt.term, end="", file=tmp)
continue
sss = tt.term
for wff in tt.morph.items:
wf = Utils.asObjectOrNull(wff, MorphWordForm)
if (wf is not None and wf.normal_case is not None and (len(wf.normal_case) < len(sss))):
sss = wf.normal_case
print(sss, end="", file=tmp)
ss = Utils.toStringStringIO(tmp)
if (inf.case_.is_nominative):
res.lastname.add(s, None, MorphGender.UNDEFINED, False)
if (s != ss):
res.lastname.add(ss, None, MorphGender.UNDEFINED, False)
else:
if (s != ss):
res.lastname.add(ss, None, MorphGender.UNDEFINED, False)
res.lastname.add(s, None, MorphGender.UNDEFINED, False)
for p in pits:
if (p != pits[0]):
if (p.is_newline_before):
res.coef -= (1)
elif (p.whitespaces_before_count > 1):
res.coef -= (0.5)
res.coef += (0.5)
if (p.length_char > 4):
if (p.is_in_dictionary):
res.coef -= (1.5)
if (p.lastname is not None and ((p.lastname.is_in_dictionary or p.lastname.is_in_ontology))):
res.coef -= (1)
if (p.firstname is not None and p.firstname.is_in_dictionary):
res.coef -= (1)
if (p.middlename is not None):
res.coef -= (1)
if (p.chars.is_all_upper):
res.coef -= (0.5)
elif (p.chars.is_all_upper):
res.coef -= (1)
if (len(pits) == 2 and pits[1].lastname is not None and ((pits[1].lastname.is_lastname_has_std_tail or pits[1].lastname.is_in_dictionary))):
res.coef -= 0.5
return res
@staticmethod
def __try_attach_global(pits : typing.List['PersonItemToken'], ind : int, inf : 'MorphBaseInfo') -> 'PersonIdentityToken':
if (ind > 0 or pits[0].typ != PersonItemToken.ItemType.VALUE):
return None
if ((len(pits) == 4 and pits[0].value == "АУН" and pits[1].value == "САН") and pits[2].value == "СУ" and pits[3].value == "ЧЖИ"):
res = PersonIdentityToken(pits[0].begin_token, pits[3].end_token)
res.ontology_person = PersonReferent()
res.ontology_person.add_slot(PersonReferent.ATTR_IDENTITY, "АУН САН СУ ЧЖИ", False, 0)
res.ontology_person.is_female = True
res.coef = (10)
return res
if (len(pits) == 2 and pits[0].firstname is not None and pits[0].firstname.is_in_dictionary):
if (pits[0].begin_token.is_value("ИВАН", None) and pits[1].begin_token.is_value("ГРОЗНЫЙ", None)):
res = PersonIdentityToken(pits[0].begin_token, pits[1].end_token)
res.ontology_person = PersonReferent()
res.ontology_person.add_slot(PersonReferent.ATTR_FIRSTNAME, "ИВАН", False, 0)
res.ontology_person.add_slot(PersonReferent.ATTR_NICKNAME, "ГРОЗНЫЙ", False, 0)
res.ontology_person.is_male = True
res.coef = (10)
return res
if (pits[0].begin_token.is_value("ЮРИЙ", None) and pits[1].begin_token.is_value("ДОЛГОРУКИЙ", None)):
res = PersonIdentityToken(pits[0].begin_token, pits[1].end_token)
res.ontology_person = PersonReferent()
res.ontology_person.add_slot(PersonReferent.ATTR_FIRSTNAME, "ЮРИЙ", False, 0)
res.ontology_person.add_slot(PersonReferent.ATTR_NICKNAME, "ДОЛГОРУКИЙ", False, 0)
res.ontology_person.is_male = True
res.coef = (10)
return res
if (pits[1].begin_token.is_value("ВЕЛИКИЙ", None)):
res = PersonIdentityToken(pits[0].begin_token, pits[1].end_token)
res.ontology_person = PersonReferent()
if (pits[0].firstname.morph.gender == MorphGender.FEMINIE):
res.ontology_person.is_female = True
elif (pits[0].firstname.morph.gender == MorphGender.MASCULINE or (((pits[1].morph.gender) & (MorphGender.MASCULINE))) != (MorphGender.UNDEFINED)):
res.ontology_person.is_male = True
else:
return None
PersonIdentityToken.manage_firstname(res, pits[0], pits[1].morph)
res.ontology_person._add_fio_identity(None, res.firstname, None)
res.ontology_person.add_slot(PersonReferent.ATTR_NICKNAME, ("ВЕЛИКИЙ" if res.ontology_person.is_male else "ВЕЛИКАЯ"), False, 0)
res.coef = (10)
return res
if (pits[1].begin_token.is_value("СВЯТОЙ", None)):
res = PersonIdentityToken(pits[0].begin_token, pits[1].end_token)
res.ontology_person = PersonReferent()
if (pits[0].firstname.morph.gender == MorphGender.FEMINIE):
res.ontology_person.is_female = True
elif (pits[0].firstname.morph.gender == MorphGender.MASCULINE or (((pits[1].morph.gender) & (MorphGender.MASCULINE))) != (MorphGender.UNDEFINED)):
res.ontology_person.is_male = True
else:
return None
PersonIdentityToken.manage_firstname(res, pits[0], pits[1].morph)
res.ontology_person._add_fio_identity(None, res.firstname, None)
res.ontology_person.add_slot(PersonReferent.ATTR_NICKNAME, ("СВЯТОЙ" if res.ontology_person.is_male else "СВЯТАЯ"), False, 0)
res.coef = (10)
return res
if (pits[1].begin_token.is_value("ПРЕПОДОБНЫЙ", None)):
res = PersonIdentityToken(pits[0].begin_token, pits[1].end_token)
res.ontology_person = PersonReferent()
if (pits[0].firstname.morph.gender == MorphGender.FEMINIE):
res.ontology_person.is_female = True
elif (pits[0].firstname.morph.gender == MorphGender.MASCULINE or (((pits[1].morph.gender) & (MorphGender.MASCULINE))) != (MorphGender.UNDEFINED)):
res.ontology_person.is_male = True
else:
return None
PersonIdentityToken.manage_firstname(res, pits[0], pits[1].morph)
res.ontology_person._add_fio_identity(None, res.firstname, None)
res.ontology_person.add_slot(PersonReferent.ATTR_NICKNAME, ("ПРЕПОДОБНЫЙ" if res.ontology_person.is_male else "ПРЕПОДОБНАЯ"), False, 0)
res.coef = (10)
return res
if (pits[1].begin_token.is_value("БЛАЖЕННЫЙ", None)):
res = PersonIdentityToken(pits[0].begin_token, pits[1].end_token)
res.ontology_person = PersonReferent()
if (pits[0].firstname.morph.gender == MorphGender.FEMINIE):
res.ontology_person.is_female = True
elif (pits[0].firstname.morph.gender == MorphGender.MASCULINE or (((pits[1].morph.gender) & (MorphGender.MASCULINE))) != (MorphGender.UNDEFINED)):
res.ontology_person.is_male = True
else:
return None
PersonIdentityToken.manage_firstname(res, pits[0], pits[1].morph)
res.ontology_person._add_fio_identity(None, res.firstname, None)
res.ontology_person.add_slot(PersonReferent.ATTR_NICKNAME, ("БЛАЖЕННЫЙ" if res.ontology_person.is_male else "БЛАЖЕННАЯ"), False, 0)
res.coef = (10)
return res
if (len(pits) == 2 and pits[1].firstname is not None and pits[1].firstname.is_in_dictionary):
if (pits[0].begin_token.is_value("СВЯТОЙ", None)):
res = PersonIdentityToken(pits[0].begin_token, pits[1].end_token)
res.ontology_person = PersonReferent()
if (pits[1].firstname.morph.gender == MorphGender.FEMINIE or pits[0].morph.gender == MorphGender.FEMINIE):
res.ontology_person.is_female = True
elif (pits[1].firstname.morph.gender == MorphGender.MASCULINE or (((pits[0].morph.gender) & (MorphGender.MASCULINE))) != (MorphGender.UNDEFINED)):
res.ontology_person.is_male = True
else:
return None
PersonIdentityToken.manage_firstname(res, pits[1], pits[0].morph)
res.ontology_person._add_fio_identity(None, res.firstname, None)
res.ontology_person.add_slot(PersonReferent.ATTR_NICKNAME, ("СВЯТОЙ" if res.ontology_person.is_male else "СВЯТАЯ"), False, 0)
res.coef = (10)
return res
if (pits[0].begin_token.is_value("ПРЕПОДОБНЫЙ", None)):
res = PersonIdentityToken(pits[0].begin_token, pits[1].end_token)
res.ontology_person = PersonReferent()
if (pits[1].firstname.morph.gender == MorphGender.FEMINIE):
res.ontology_person.is_female = True
elif (pits[1].firstname.morph.gender == MorphGender.MASCULINE or (((pits[0].morph.gender) & (MorphGender.MASCULINE))) != (MorphGender.UNDEFINED)):
res.ontology_person.is_male = True
else:
return None
PersonIdentityToken.manage_firstname(res, pits[1], pits[0].morph)
res.ontology_person._add_fio_identity(None, res.firstname, None)
res.ontology_person.add_slot(PersonReferent.ATTR_NICKNAME, ("ПРЕПОДОБНЫЙ" if res.ontology_person.is_male else "ПРЕПОДОБНАЯ"), False, 0)
res.coef = (10)
return res
if (pits[0].begin_token.is_value("БЛАЖЕННЫЙ", None)):
res = PersonIdentityToken(pits[0].begin_token, pits[1].end_token)
res.ontology_person = PersonReferent()
if (pits[1].firstname.morph.gender == MorphGender.FEMINIE):
res.ontology_person.is_female = True
elif (pits[1].firstname.morph.gender == MorphGender.MASCULINE or (((pits[0].morph.gender) & (MorphGender.MASCULINE))) != (MorphGender.UNDEFINED)):
res.ontology_person.is_male = True
else:
return None
PersonIdentityToken.manage_firstname(res, pits[1], pits[0].morph)
res.ontology_person._add_fio_identity(None, res.firstname, None)
res.ontology_person.add_slot(PersonReferent.ATTR_NICKNAME, ("БЛАЖЕННЫЙ" if res.ontology_person.is_male else "БЛАЖЕННАЯ"), False, 0)
res.coef = (10)
return res
return None
@staticmethod
def __accord_morph(inf : 'MorphBaseInfo', p1 : 'MorphPersonItem', p2 : 'MorphPersonItem', p3 : 'MorphPersonItem', next0__ : 'Token') -> 'MorphCollection':
res = MorphCollection()
pp = list()
if (p1 is not None):
pp.append(p1)
if (p2 is not None):
pp.append(p2)
if (p3 is not None):
pp.append(p3)
if (len(pp) == 0):
return res
if (inf is not None and p1 is not None and ((p1.is_lastname_has_std_tail or p1.is_in_dictionary))):
if (((inf.case_) & p1.morph.case_).is_undefined):
inf = (None)
if (inf is not None and p2 is not None and p2.is_in_dictionary):
if (((inf.case_) & p2.morph.case_).is_undefined):
inf = (None)
for i in range(2):
g = (MorphGender.MASCULINE if i == 0 else MorphGender.FEMINIE)
if (inf is not None and inf.gender != MorphGender.UNDEFINED and (((inf.gender) & (g))) == (MorphGender.UNDEFINED)):
continue
cas = MorphCase.ALL_CASES
for p in pp:
ca = MorphCase()
for v in p.vars0_:
if (v.gender != MorphGender.UNDEFINED):
if ((((v.gender) & (g))) == (MorphGender.UNDEFINED)):
continue
if (inf is not None and not inf.case_.is_undefined and not v.case_.is_undefined):
if (((inf.case_) & v.case_).is_undefined):
continue
if (not v.case_.is_undefined):
ca |= v.case_
else:
ca = MorphCase.ALL_CASES
cas &= ca
if (not cas.is_undefined):
if (inf is not None and not inf.case_.is_undefined and not ((inf.case_) & cas).is_undefined):
cas &= inf.case_
res.add_item(MorphBaseInfo._new2508(g, cas))
verb_gend = MorphGender.UNDEFINED
if ((next0__ is not None and (isinstance(next0__, TextToken)) and next0__.chars.is_all_lower) and next0__.morph.class0_ == MorphClass.VERB and next0__.morph.number == MorphNumber.SINGULAR):
if (next0__.morph.gender == MorphGender.FEMINIE or next0__.morph.gender == MorphGender.MASCULINE):
verb_gend = next0__.morph.gender
npt = NounPhraseHelper.try_parse(next0__.next0_, NounPhraseParseAttr.NO, 0)
if ((npt is not None and npt.morph.case_.is_nominative and npt.morph.gender == verb_gend) and npt.morph.number == MorphNumber.SINGULAR):
verb_gend = MorphGender.UNDEFINED
if (verb_gend != MorphGender.UNDEFINED and res.items_count > 1):
cou = 0
for it in res.items:
if (it.case_.is_nominative and it.gender == verb_gend):
cou += 1
if (cou == 1):
for i in range(res.items_count - 1, -1, -1):
if (not res.get_indexer_item(i).case_.is_nominative or res.get_indexer_item(i).gender != verb_gend):
res.remove_item(i)
return res
@staticmethod
def __is_accords(mt : 'MorphPersonItem', inf : 'MorphBaseInfo') -> bool:
if (inf is None):
return True
if (len(mt.vars0_) == 0):
return True
for wf in mt.vars0_:
ok = True
if (not inf.case_.is_undefined and not wf.case_.is_undefined):
if (((wf.case_) & inf.case_).is_undefined):
ok = False
if (inf.gender != MorphGender.UNDEFINED and wf.gender != MorphGender.UNDEFINED):
if ((((inf.gender) & (wf.gender))) == (MorphGender.UNDEFINED)):
ok = False
if (ok):
return True
return False
@staticmethod
def __is_both_surnames(p1 : 'PersonItemToken', p2 : 'PersonItemToken') -> bool:
if (p1 is None or p2 is None):
return False
if (p1.lastname is None or p2.lastname is None):
return False
if (not p1.lastname.is_in_dictionary and not p1.lastname.is_in_ontology and not p1.lastname.is_lastname_has_std_tail):
return False
if (p1.firstname is not None or p2.middlename is not None):
return False
if (not p2.lastname.is_in_dictionary and not p2.lastname.is_in_ontology and not p2.lastname.is_lastname_has_std_tail):
return False
if (p2.firstname is not None or p2.middlename is not None):
return False
if (not ((isinstance(p1.end_token, TextToken))) or not ((isinstance(p2.end_token, TextToken)))):
return False
v1 = (p1.end_token).term
v2 = (p2.end_token).term
if (v1[len(v1) - 1] == v2[len(v2) - 1]):
return False
return True
@staticmethod
def __get_value(mt : 'MorphPersonItem', inf : 'MorphBaseInfo') -> str:
for wf in mt.vars0_:
if (inf is not None):
if (not inf.case_.is_undefined and not wf.case_.is_undefined):
if (((wf.case_) & inf.case_).is_undefined):
continue
if (inf.gender != MorphGender.UNDEFINED and wf.gender != MorphGender.UNDEFINED):
if ((((inf.gender) & (wf.gender))) == (MorphGender.UNDEFINED)):
continue
return wf.value
return mt.term
@staticmethod
def __set_value2(res : 'PersonMorphCollection', mt : 'MorphPersonItem', inf : 'MorphBaseInfo') -> None:
ok = False
for wf in mt.vars0_:
if (inf is not None):
if (not inf.case_.is_undefined and not wf.case_.is_undefined):
if (((wf.case_) & inf.case_).is_undefined):
continue
if (inf.gender != MorphGender.UNDEFINED and wf.gender != MorphGender.UNDEFINED):
if ((((inf.gender) & (wf.gender))) == (MorphGender.UNDEFINED)):
continue
ok = True
res.add(wf.value, wf.short_value, wf.gender, False)
if (len(res.values) == 0):
if ((inf is not None and not inf.case_.is_undefined and len(mt.vars0_) > 0) and mt.is_lastname_has_std_tail):
for wf in mt.vars0_:
res.add(wf.value, wf.short_value, wf.gender, False)
res.add(mt.term, None, inf.gender, False)
@staticmethod
def __set_value(res : 'PersonMorphCollection', t : 'Token', inf : 'MorphBaseInfo') -> None:
tt = Utils.asObjectOrNull(t, TextToken)
if (tt is None and (isinstance(t, MetaToken)) and (t).begin_token == (t).end_token):
tt = (Utils.asObjectOrNull((t).begin_token, TextToken))
if (tt is None):
return
for wf in tt.morph.items:
if (wf.class0_.is_verb):
continue
if (wf.contains_attr("к.ф.", None)):
continue
if (inf is not None and inf.gender != MorphGender.UNDEFINED and wf.gender != MorphGender.UNDEFINED):
if ((((wf.gender) & (inf.gender))) == (MorphGender.UNDEFINED)):
continue
if (inf is not None and not inf.case_.is_undefined and not wf.case_.is_undefined):
if (((wf.case_) & inf.case_).is_undefined):
continue
str0_ = (tt.term if t.chars.is_latin_letter else (wf).normal_case)
res.add(str0_, None, wf.gender, False)
res.add(tt.term, None, (MorphGender.UNDEFINED if inf is None else inf.gender), False)
@staticmethod
def correctxfml(pli0 : typing.List['PersonIdentityToken'], pli1 : typing.List['PersonIdentityToken'], attrs : typing.List['PersonAttrToken']) -> bool:
p0 = None
p1 = None
for p in pli0:
if (p.typ == FioTemplateType.SURNAMENAMESECNAME):
p0 = p
break
for p in pli1:
if (p.typ == FioTemplateType.NAMESECNAMESURNAME):
p1 = p
break
if (p0 is None or p1 is None):
for p in pli0:
if (p.typ == FioTemplateType.SURNAMENAME):
p0 = p
break
for p in pli1:
if (p.typ == FioTemplateType.NAMESURNAME):
p1 = p
break
if (p0 is None or p1 is None):
return False
if (p1.coef > p0.coef):
return False
tt = p1.begin_token
while tt != p1.end_token:
if (tt.is_newline_after):
return False
tt = tt.next0_
if (not p1.end_token.is_newline_after):
if (PersonItemToken.try_attach(p1.end_token.next0_, None, PersonItemToken.ParseAttr.NO, None) is not None):
return False
if (p0.lastname is None or p1.lastname is None):
return False
if (p1.lastname.has_lastname_standard_tail):
if (not p0.lastname.has_lastname_standard_tail):
p1.coef = (p0.coef + (0.1))
return True
if (attrs is None or len(attrs) == 0):
if (not p1.lastname.has_lastname_standard_tail and p0.lastname.has_lastname_standard_tail):
return False
t = p1.end_token.next0_
if (t is not None and not t.chars.is_capital_upper and not t.chars.is_all_upper):
npt = NounPhraseHelper.try_parse(p1.end_token, NounPhraseParseAttr.NO, 0)
if (npt is not None and npt.end_token != npt.begin_token):
return False
cl1 = p0.begin_token.get_morph_class_in_dictionary()
cl2 = p1.end_token.get_morph_class_in_dictionary()
if (cl2.is_noun and not cl1.is_noun):
return False
p1.coef = (p0.coef + (0.1))
return True
return False
@staticmethod
def check_latin_after(pit : 'PersonIdentityToken') -> 'PersonIdentityToken':
if (pit is None):
return None
t = pit.end_token.next0_
if (t is None or not t.is_char('(')):
return None
t = t.next0_
p1 = PersonItemToken.try_attach_latin(t)
if (p1 is None):
return None
p2 = PersonItemToken.try_attach_latin(p1.end_token.next0_)
if (p2 is None):
return None
if (p2.end_token.next0_ is None):
return None
p3 = None
et = p2.end_token.next0_
if (p2.end_token.next0_.is_char(')')):
pass
else:
p3 = PersonItemToken.try_attach_latin(et)
if (p3 is None):
return None
et = p3.end_token.next0_
if (et is None or not et.is_char(')')):
return None
sur = None
nam = None
sec = None
if (pit.typ == FioTemplateType.NAMESURNAME and pit.firstname is not None and pit.lastname is not None):
eq = 0
if (p1.typ == PersonItemToken.ItemType.VALUE):
if (pit.firstname.check_latin_variant(p1.value)):
eq += 1
nam = p1
if (p2.typ == PersonItemToken.ItemType.VALUE and p3 is None):
sur = p2
if (pit.lastname.check_latin_variant(p2.value)):
eq += 1
elif (p2.typ == PersonItemToken.ItemType.INITIAL and p3 is not None):
if (pit.lastname.check_latin_variant(p3.value)):
eq += 1
sur = p3
if (eq == 0):
return None
elif ((pit.typ == FioTemplateType.NAMESECNAMESURNAME and pit.firstname is not None and pit.middlename is not None) and pit.lastname is not None and p3 is not None):
eq = 0
if (p1.typ == PersonItemToken.ItemType.VALUE):
if (pit.firstname.check_latin_variant(p1.value)):
eq += 1
nam = p1
if (p2.typ == PersonItemToken.ItemType.VALUE):
sec = p2
if (isinstance(pit.middlename, PersonMorphCollection)):
if ((pit.middlename).check_latin_variant(p2.value)):
eq += 1
if (p3.typ == PersonItemToken.ItemType.VALUE):
sur = p3
if (pit.lastname.check_latin_variant(p3.value)):
eq += 1
if (eq == 0):
return None
if (nam is None or sur is None):
return None
res = PersonIdentityToken._new2495(t, et, pit.typ)
res.lastname = PersonMorphCollection()
res.lastname.add(sur.value, None, MorphGender.UNDEFINED, False)
res.firstname = PersonMorphCollection()
res.firstname.add(nam.value, None, MorphGender.UNDEFINED, False)
if (sec is not None):
res.middlename = (PersonMorphCollection())
(res.middlename).add(sec.value, None, MorphGender.UNDEFINED, False)
return res
@staticmethod
def _new2495(_arg1 : 'Token', _arg2 : 'Token', _arg3 : 'FioTemplateType') -> 'PersonIdentityToken':
res = PersonIdentityToken(_arg1, _arg2)
res.typ = _arg3
return res | [
"alex@alexkuk.ru"
] | alex@alexkuk.ru |
51b96d33a052a040280d116a4ef0520fd9628657 | 48a522b031d45193985ba71e313e8560d9b191f1 | /baekjoon/python/26562.py | 4dd8cc5efe23ccc1c2c30ecf7fec7171d06e1bcd | [] | no_license | dydwnsekd/coding_test | beabda0d0aeec3256e513e9e0d23b43debff7fb3 | 4b2b4878408558239bae7146bb4f37888cd5b556 | refs/heads/master | 2023-09-04T12:37:03.540461 | 2023-09-03T15:58:33 | 2023-09-03T15:58:33 | 162,253,096 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | import sys
money_dict = {"Franklin": 100, "Grant": 50, "Jackson": 20, "Hamilton": 10, "Lincoln": 5, "Washington": 1}
cases = int(sys.stdin.readline())
for _ in range(cases):
total_money = 0
wallet = list(sys.stdin.readline().split())
for w in wallet:
total_money += money_dict[w]
print(f"${total_money}")
| [
"dydwnsekd123@gmail.com"
] | dydwnsekd123@gmail.com |
d2a79e2201a7b60db1dd4df3cd2de184d10141d6 | 5f845ebbc2c9b40eea702833c91928ae90ae7ee5 | /algorithms/manasa-and-stones.py | 935737c7d38b736bd795f52b6dfb7b60373024ed | [
"MIT"
] | permissive | imgeekabhi/HackerRank | 7a1917fee5af01976aebb9c82aa1045a36487016 | 7fe4a308abad85ce446a28328324be480672e6fc | refs/heads/master | 2022-12-28T19:13:49.098090 | 2020-10-11T09:29:08 | 2020-10-11T09:29:08 | 300,023,395 | 1 | 0 | MIT | 2020-09-30T18:48:12 | 2020-09-30T18:48:11 | null | UTF-8 | Python | false | false | 379 | py | #!/bin/python3
import sys
def stones(n, a, b):
return sorted(set([(n-1)*min(a, b) + x*abs(a-b) for x in range(n)]))
if __name__ == "__main__":
T = int(input().strip())
for a0 in range(T):
n = int(input().strip())
a = int(input().strip())
b = int(input().strip())
result = stones(n, a, b)
print (" ".join(map(str, result)))
| [
"sergey.n.nemov@gmail.com"
] | sergey.n.nemov@gmail.com |
dce8e6165e091fab69df93ff3e8300d17303dba0 | da0fa8d4fdee6f8d5b52723d45e46f9cc0e55866 | /publ/cli.py | b3b3247b69dd6d2c3fb39bdc65a4e36c66e47ade | [
"MIT"
] | permissive | PlaidWeb/Publ | 3f62ceb490a29a639314dc792552d06d511012f4 | 2cc227ec975529a89eec105f63e4102b62eeddbe | refs/heads/main | 2023-08-18T08:41:48.478369 | 2023-08-08T17:38:17 | 2023-08-08T17:38:17 | 127,061,540 | 33 | 5 | MIT | 2023-09-01T20:30:18 | 2018-03-28T00:30:42 | Python | UTF-8 | Python | false | false | 5,831 | py | """ CLI utilities for Publ """
# pylint:disable=too-many-arguments
import itertools
import logging
import os.path
import re
import time
import arrow
import click
import slugify
from flask.cli import AppGroup, with_appcontext
from pony import orm
from . import queries
from .config import config
LOGGER = logging.getLogger(__name__)
publ_cli = AppGroup('publ', short_help="Publ-specific commands") # pylint:disable=invalid-name
@publ_cli.command('reindex', short_help="Reindex the content store")
@click.option('--quietly', '-q', 'quietly', is_flag=True, help="Quietly")
@click.option('--fresh', '-f', 'fresh', is_flag=True, help="Start with a fresh database")
@with_appcontext
def reindex_command(quietly, fresh):
""" Forces a reindex of the content store.
This is particularly useful to ensure that all content has been indexed
before performing another action, such as sending out notifications.
"""
from . import index, model
if fresh:
model.reset()
spinner = itertools.cycle('|/-\\')
index.scan_index(config.content_folder, False)
while index.in_progress():
if not quietly:
qlen = index.queue_size() or ''
print(f"\rIndexing... {next(spinner)} {qlen} ", end='', flush=True)
time.sleep(0.1)
if not quietly:
print("Done")
@publ_cli.command('token', short_help="Generate a bearer token")
@click.argument('identity')
@click.option('--scope', '-s', help="The token's permission scope")
@click.option('--lifetime', '-l', help="The token's lifetime (in seconds)", default=3600)
@with_appcontext
def token_command(identity, scope, lifetime):
""" Generates a bearer token for use with external applications. """
from . import tokens
print(tokens.get_token(identity, int(lifetime), scope))
@publ_cli.command('normalize', short_help="Normalize entry filenames")
@click.argument('category', nargs=-1)
@click.option('--recurse', '-r', 'recurse', is_flag=True,
help="Include subdirectories")
@click.option('--all', '-a', 'all_entries', is_flag=True,
help="Apply to all entries, not just reachable ones")
@click.option('--dry-run', '-n', 'dry_run', is_flag=True,
help="Show, but don't apply, changes")
@click.option('--format', '-f', 'format_str',
help="Filename format to use",
default="{date} {sid} {title}")
@click.option('--verbose', '-v', 'verbose', is_flag=True,
help="Show detailed actions")
@with_appcontext
@orm.db_session
def normalize_command(category, recurse, dry_run, format_str, verbose, all_entries):
""" Normalizes the filenames of content files based on a standardized format.
This will only normalize entries which are already in the content index.
If no categories are specified, it defaults to the root category. To include
the root category in a list of other categories, use an empty string parameter,
e.g.:
flask publ normalize '' blog
Available tokens for --format/-f:
{date} The entry's publish date, in YYYYMMDD format
{time} The entry's publish time, in HHMMSS format
{id} The entry's ID
{status} The entry's publish status
{sid} If the entry is reachable, the ID, otherwise the status
{title} The entry's title, normalized to filename-safe characters
{slug} The entry's slug text
{type} The entry's type
"""
# pylint:disable=too-many-locals
from .model import PublishStatus
entries = queries.build_query({
'category': category or '',
'recurse': recurse,
'_future': True,
'_all': all_entries,
})
fname_slugify = slugify.UniqueSlugify(max_length=100, safe_chars='-.', separator=' ')
for entry in entries:
path = os.path.dirname(entry.file_path)
basename, ext = os.path.splitext(os.path.basename(entry.file_path))
status = PublishStatus(entry.status)
eid = entry.id
if status == PublishStatus.DRAFT:
# Draft entries don't get a stable entry ID
eid = status.name
sid = entry.id if status in (PublishStatus.PUBLISHED,
PublishStatus.HIDDEN,
PublishStatus.SCHEDULED) else status.name
date = arrow.get(entry.local_date)
dest_basename = format_str.format(
date=date.format('YYYYMMDD'),
time=date.format('HHmmss'),
id=eid,
status=status.name,
sid=sid,
title=entry.title,
slug=entry.slug_text,
type=entry.entry_type).strip()
dest_basename = re.sub(r' +', ' ', dest_basename)
if dest_basename != basename:
while True:
# UniqueSlugify will bump the suffix until it doesn't collide
dest_path = os.path.join(path, fname_slugify(dest_basename) + ext)
if not os.path.exists(dest_path):
break
if verbose:
print(f'{entry.file_path} -> {dest_path}')
if not os.path.isfile(entry.file_path):
LOGGER.warning('File %s does not exist; is the index up-to-date?', entry.file_path)
elif os.path.exists(dest_path):
LOGGER.warning('File %s already exists', dest_path)
elif not dry_run:
try:
os.rename(entry.file_path, dest_path)
except OSError:
LOGGER.exception('Error moving %s to %s', entry.file_path, dest_path)
entry.file_path = dest_path
orm.commit()
def setup(app):
""" Register the CLI commands with the command parser """
app.cli.add_command(publ_cli)
| [
"fluffy@beesbuzz.biz"
] | fluffy@beesbuzz.biz |
bf62c010e605a1512decc17c0d3cec6837ac690e | eaf83bd07e03cb3d9934c31c5cda60040546ff3d | /tensorflow/contrib/learn/python/learn/tests/dataframe/test_column.py | ae4f36cceb91e3931f5ae2edbe607de0fed9d156 | [
"Apache-2.0"
] | permissive | anguillanneuf/tensorflow | 508d2ddcd59a0c2ba8e138bdfa0af0fecb5d8640 | 7d58c6856c4e0e3095d4a50cc23a0ff036338949 | refs/heads/master | 2021-01-22T08:32:43.794741 | 2016-06-06T15:10:07 | 2016-06-06T15:10:07 | 59,699,880 | 0 | 0 | null | 2016-05-25T21:45:20 | 2016-05-25T21:45:20 | null | UTF-8 | Python | false | false | 2,240 | py | """Tests of the Column class."""
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
class TransformedColumnTest(tf.test.TestCase):
"""Test of `TransformedColumn`."""
def test_repr(self):
col = learn.TransformedColumn(
[mocks.MockColumn("foobar", [])],
mocks.MockTwoOutputTransform("thb", "nth", "snt"), "qux")
# note params are sorted by name
expected = ("MockTransform({'param_one': 'thb', 'param_three': 'snt', "
"'param_two': 'nth'})"
"(foobar)[qux]")
self.assertEqual(expected, repr(col))
def test_build_no_output(self):
def create_no_output_column():
return learn.TransformedColumn(
[mocks.MockColumn("foobar", [])],
mocks.MockZeroOutputTransform("thb", "nth"), None)
self.assertRaises(ValueError, create_no_output_column)
def test_build_single_output(self):
col = learn.TransformedColumn(
[mocks.MockColumn("foobar", [])],
mocks.MockOneOutputTransform("thb", "nth"), "out1")
result = col.build()
expected = "Fake Tensor 1"
self.assertEqual(expected, result)
def test_build_multiple_output(self):
col = learn.TransformedColumn(
[mocks.MockColumn("foobar", [])],
mocks.MockTwoOutputTransform("thb", "nth", "snt"), "out2")
result = col.build()
expected = "Fake Tensor 2"
self.assertEqual(expected, result)
if __name__ == "__main__":
tf.test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
62ebcc91889677c8b30ffd65ed29c5829b2a6e1f | b4f211423f51c7c3bfbc39c868aaa15c5899b1fa | /226. Invert Binary Tree.py | 3d93f299ed5775a88c748b37f1e0eae10e2d34b6 | [] | no_license | yaoyu2001/LeetCode_Practice_Python | 705971d17fb91cba9114854886ee113206d23f59 | a09bd0105c0ac9e76e9b4ef1946faa2fb8797660 | refs/heads/master | 2020-11-30T08:57:22.022268 | 2020-09-19T00:01:23 | 2020-09-19T00:01:23 | 230,361,026 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root:
return None
right = self.invertTree(root.right)
left = self.invertTree(root.left)
root.left = right
root.right = left
return root | [
"yongchangyao2001@gmail.com"
] | yongchangyao2001@gmail.com |
9d8ab899ac69be877244f609a08e6ff88f26dcf1 | 79e1a5ad019b261034bc6338e894679d3f5d54d9 | /Regular Expression Matching.py | 7bef362e86cab1c1dbffc2ee974d0dc8261d8b1e | [
"MIT"
] | permissive | ngdeva99/Fulcrum | c615f457ec34c563199cc1dab243ecc62e23ad0b | 3a5c69005bbaf2a5aebe13d1907f13790210fb32 | refs/heads/master | 2022-12-15T19:35:46.508701 | 2020-09-09T06:47:48 | 2020-09-09T06:48:08 | 294,027,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | class Solution:
def isMatch(self, s: str, p: str) -> bool:
dp = [[False for _ in range(len(p)+1)] for _ in range(len(s)+1)]
dp[0][0] = True
#noT ALWAYS COLMNS ARE FALSE BECAUSE SOMETIMES THEY CAN match with the empty string for cases like a*b*
patterns with "" string.
#Deals with patterns like a* or a*b* or a*b*c*
for j in range(1,len(dp[0])):
if p[j-1] == "*":
dp[0][j] = dp[0][j-2]
for i in range(1,len(dp)):
for j in range(1,len(dp[0])):
if s[i-1]==p[j-1] or p[j-1]==".":
dp[i][j] = dp[i-1][j-1]
elif p[j-1]=="*":
dp[i][j] = dp[i][j-2]
if s[i-1]==p[j-2] or p[j-2]==".":
dp[i][j] |= dp[i-1][j]
else:
dp[i][j] = False
print(dp)
return dp[-1][-1]
| [
"31466229+ngdeva99@users.noreply.github.com"
] | 31466229+ngdeva99@users.noreply.github.com |
8fb708dfdd6f0ed3f00e5449bc4839c03c543bdc | da0a7446122a44887fa2c4f391e9630ae033daa2 | /python/ray/data/preprocessors/custom_stateful.py | 801f4d6f092df4bfcd672f0d8835e4814150bb1a | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | whiledoing/ray | d8d9ba09b7545e8fd00cca5cfad451278e61fffd | 9272bcbbcae1630c5bb2db08a8279f0401ce6f92 | refs/heads/master | 2023-03-06T16:23:18.006757 | 2022-07-22T02:06:47 | 2022-07-22T02:06:47 | 252,420,044 | 0 | 0 | Apache-2.0 | 2023-03-04T08:57:20 | 2020-04-02T10:07:23 | Python | UTF-8 | Python | false | false | 3,665 | py | from typing import Callable, TYPE_CHECKING, Dict
from ray.data.preprocessor import Preprocessor
from ray.data import Dataset
if TYPE_CHECKING:
import pandas
class CustomStatefulPreprocessor(Preprocessor):
"""Implements a user-defined stateful preprocessor that fits on a Dataset.
This is meant to be generic and can be used to perform arbitrary stateful
preprocessing that cannot already be done through existing preprocessors.
Logic must be defined to perform fitting on a Ray Dataset and transforming
pandas DataFrames.
Example:
.. code-block:: python
import pandas as pd
import ray.data
from pandas import DataFrame
from ray.data.preprocessors import CustomStatefulPreprocessor
from ray.data import Dataset
from ray.data.aggregate import Max
items = [
{"A": 1, "B": 10},
{"A": 2, "B": 20},
{"A": 3, "B": 30},
]
ds = ray.data.from_items(items)
def get_max_a(ds: Dataset):
# Calculate max value for column A.
max_a = ds.aggregate(Max("A"))
# {'max(A)': 3}
return max_a
def subtract_max_a_from_a_and_add_max_a_to_b(df: DataFrame, stats: dict):
# Subtract max A value from column A and subtract it from B.
max_a = stats["max(A)"]
df["A"] = df["A"] - max_a
df["B"] = df["B"] + max_a
return df
preprocessor = CustomStatefulPreprocessor(
get_max_a,
subtract_max_a_from_a_and_add_max_a_to_b
)
preprocessor.fit(ds)
transformed_ds = preprocessor.transform(ds)
expected_items = [
{"A": -2, "B": 13},
{"A": -1, "B": 23},
{"A": 0, "B": 33},
]
expected_ds = ray.data.from_items(expected_items)
assert transformed_ds.take(3) == expected_ds.take(3)
batch = pd.DataFrame(
{
"A": [5, 6],
"B": [10, 10]
}
)
transformed_batch = preprocessor.transform_batch(batch)
expected_batch = pd.DataFrame(
{
"A": [2, 3],
"B": [13, 13],
}
)
assert transformed_batch.equals(expected_batch)
Args:
fit_fn: A user defined function that computes state information about
a :class:`ray.data.Dataset` and returns it in a :class:`dict`.
transform_fn: A user defined function that takes in a
:class:`pandas.DataFrame` and the :class:`dict` computed from
``fit_fn``, and returns a transformed :class:`pandas.DataFrame`.
"""
_is_fittable = True
def __init__(
self,
fit_fn: Callable[[Dataset], Dict],
transform_fn: Callable[["pandas.DataFrame", Dict], "pandas.DataFrame"],
):
self.fit_fn = fit_fn
self.transform_fn = transform_fn
def _fit(self, dataset: Dataset) -> "Preprocessor":
self.stats_ = self.fit_fn(dataset)
return self
def _transform_pandas(self, df: "pandas.DataFrame") -> "pandas.DataFrame":
return self.transform_fn(df, self.stats_)
def __repr__(self):
fit_fn_name = getattr(self.fit_fn, "__name__", str(self.fit_fn))
transform_fn_name = getattr(
self.transform_fn, "__name__", str(self.transform_fn)
)
stats = getattr(self, "stats_", None)
return (
f"CustomStatefulPreprocessor("
f"fit_fn={fit_fn_name}, "
f"transform_fn={transform_fn_name}, "
f"stats={stats})"
)
| [
"noreply@github.com"
] | whiledoing.noreply@github.com |
fe78a5baff324bb3d049a9f7ad54a9d47eca3a4f | ead3ef1aa0d7633f9ba7cf8f4454bac7ca74f540 | /sources/RaMaK/RaMaK.py | 381cb566858cbed3ef3839cd8c1da73b4151cebc | [] | no_license | sffej/Sefaria-Data | e5c64ba436098de4294d441b0d686ad9850c9d07 | 73a0ae6ab80cdac477fd446ac69edf42b6e37919 | refs/heads/master | 2021-01-13T02:53:57.395433 | 2016-12-21T09:17:29 | 2016-12-21T09:17:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,286 | py | # -*- coding: utf-8 -*-
__author__ = 'stevenkaplan'
from XML_to_JaggedArray import XML_to_JaggedArray
import sys
sys.path.append('../')
from functions import *
sys.path.append('../../../')
from sefaria.model import *
'''Every node whose first element is a title is the node's title. Then remove these titles possibly.
Every other title has structural significance if it has a bold tag as a child
Titles can structure text
Footnotes
Also consider how to decipher JA_array or allowed_tags automatically
'''
def parse(text_arr):
assert type(text_arr) is list
for index, text in enumerate(text_arr):
text_arr[index] = text_arr[index].replace("<bold>", "<b>").replace("<italic>", "<i>").replace("</bold>", "</b>").replace("</italic>", "</i>")
return text_arr
def create_schema():
book = SchemaNode()
book.key = "ramak"
book.add_title(u"אור נערב", "he", primary=True)
book.add_title("Or Neerav", "en", primary=True)
intro = JaggedArrayNode()
intro.add_title("Introduction", "en", primary=True)
intro.add_title(u"הקדמה", "he", primary=True)
intro.depth = 1
intro.sectionNames = ["Paragraph"]
intro.addressTypes = ["Integer"]
intro.key = "intro"
book.append(intro)
arr = ["I", "II", "III", "IV", "V", "VI"]
for i in range(6):
pt = SchemaNode()
pt.key = "pt"+str(i)+"schema"
pt.add_title("PART " + arr[i], "en", primary=True)
pt.add_title(u"חלק "+numToHeb(1+i), "he", primary=True)
subject = JaggedArrayNode()
subject.add_title("Subject", "en", primary=True)
subject.add_title(u"נושא", "he", primary=True)
subject.key = "subject"
subject.depth = 1
subject.sectionNames = ["Paragraph"]
subject.addressTypes = ["Integer"]
default = JaggedArrayNode()
default.depth = 2
default.default = True
default.sectionNames = ["Chapter", "Paragraph"]
default.addressTypes = ["Integer", "Integer"]
default.key = "default"
pt.append(subject)
pt.append(default)
book.append(pt)
pt7 = JaggedArrayNode()
pt7.add_title("PART VII", "en", primary=True)
pt7.add_title(u"חלק ז", "he", primary=True)
pt7.depth = 1
pt7.sectionNames = ["Paragraph"]
pt7.addressTypes = ["Integer"]
pt7.key = "pt7"
book.append(pt7)
appendix = SchemaNode()
appendix.add_title("Appendix The Introductory Material", "en", primary=True)
appendix.add_title(u"נספח: הקדמות", "he", primary=True)
appendix.key = "appendix"
subject = JaggedArrayNode()
subject.add_title("Subject", "en", primary=True)
subject.add_title(u"נושא", "he", primary=True)
subject.key = "subject"
subject.depth = 1
subject.sectionNames = ["Paragraph"]
subject.addressTypes = ["Integer"]
default = JaggedArrayNode()
default.depth = 2
default.default = True
default.sectionNames = ["Chapter", "Paragraph"]
default.addressTypes = ["Integer", "Integer"]
default.key = "default"
appendix.append(subject)
appendix.append(default)
footnotes_array = ["Introduction", "PART I", "PART II", "PART III", "PART IV", "PART V", "PART VI", "PART VII", "Appendix The Introductory Material"]
footnotes_heb = [u"הקדמה", u"חלק א", u"חלק ב", u"חלק ג", u"חלק ד", u"חלק ה", u"חלק ו", u"חלק ז", u"נספח"]
footnotes = SchemaNode()
footnotes.key = "footnotes"
footnotes.add_title("Footnotes", "en", primary=True)
footnotes.add_title(u"הערות", "he", primary=True)
for i in range(len(footnotes_array)):
node = JaggedArrayNode()
if footnotes_array[i] == "Introduction" or footnotes_array[i] == "PART VII":
node.depth = 1
node.sectionNames = ["Paragraph"]
node.addressTypes = ["Integer"]
else:
node.depth = 2
node.sectionNames = ["Chapter", "Paragraph"]
node.addressTypes = ["Integer", "Integer"]
node.key = footnotes_array[i]
node.add_title(footnotes_array[i], "en", primary=True)
node.add_title(footnotes_heb[i], "he", primary=True)
footnotes.append(node)
book.append(appendix)
book.append(footnotes)
book.validate()
index = {
"title": title,
"categories": ["Kabbalah"],
"schema": book.serialize()
}
post_index(index)
if __name__ == "__main__":
post_info = {}
post_info["versionTitle"] = "hi"
post_info["versionSource"] = "hi"
post_info["language"] = "en"
allowed_tags = ["book", "intro", "part", "appendix", "chapter", "p", "ftnote", "title"]
structural_tags = ["title"] #this is not all tags with structural significance, but just
#the ones we must explicitly mention, because it has no children,
#we want what comes after it until the next instance of it to be its children anyway
allowed_attributes = ["id"]
file_name = "../sources/DC labs/Robinson_MosesCordoveroIntroductionToKabbalah.xml"
title = "Or Neerav"
ramak = XML_to_JaggedArray(title, file_name, allowed_tags, allowed_attributes, post_info, parse)
create_schema()
ramak.run()
| [
"skaplan@brandeis.edu"
] | skaplan@brandeis.edu |
acf150c4647da83352303ffcee565e243f7cd2c4 | fe22e8ffdb1b2f1e11becc027e71a7a512fe56eb | /misc/analysis_step2/calculateBTaggingEffs.py | e0d52d9a1afe7d8fabf519c6d6d0b3cb4089ff58 | [] | no_license | HEP-KBFI/stpol | 3cdb5dc125bb0394f4531abfdfe9629b0c8d0fa4 | 962837a3341dd26391025b9a07a9c1c93084bf64 | refs/heads/master | 2020-06-03T16:15:14.743807 | 2015-08-05T09:00:28 | 2015-08-05T09:00:28 | 5,716,481 | 0 | 1 | null | 2015-03-04T08:23:28 | 2012-09-07T12:27:30 | Python | UTF-8 | Python | false | false | 7,138 | py | from anfw import *
import pdb
import math
import json
#cut = Cuts.mu + Cuts.MT + Cuts.mlnu + Cuts.jetRMS + Cuts.jetPt + Cuts.jets_1LJ + Cuts.etaLJ + Cuts.recoFState #Cut("1plusLJ", "_lightJetCount>=1")
cut = Cuts.mu + Cuts.MT + Cuts.mlnu + Cuts.jetRMS + Cuts.etaLJ + Cuts.recoFState + Cuts.jetPt + Cut("1plusLJ", "_lightJetCount>=1")
#cut = Cuts.mu + Cuts.MT
print cut
def effUnc(eff, count):
return math.sqrt(eff*(1.0-eff)/count)
of = ROOT.TFile("bTaggingEffs.root", "RECREATE")
def calcBTaggingEff(channel):
print "B-tagging effs for channel {0}".format(channel)
of.cd()
hTrueB_bDiscr = ROOT.TH1F("hTrueB_BDiscr_{0}".format(channel), "true b-jet b-discriminator distribution", 1000, -100, 40)
hTrueC_bDiscr = ROOT.TH1F("hTrueC_BDiscr_{0}".format(channel), "true c-jet b-discriminator distribution", 1000, -100, 40)
hTrueL_bDiscr = ROOT.TH1F("hTrueL_BDiscr_{0}".format(channel), "true l-jet b-discriminator distribution", 1000, -100, 40)
ROOT.gROOT.cd()
#cut = Cuts.finalMu
channels[channel].tree.Draw(">>elist", cut.cutStr)
elist = ROOT.gROOT.Get("elist")
print "Number of events in selection: %d" % elist.GetN()
lepCount = {-1:0, 0: 0, 1:0, 2:0, 3:0}
sumBTaggedB = 0
sumTrueB = 0
sumBTaggedC = 0
sumTrueC = 0
sumBTaggedL = 0
sumTrueL = 0
nFailed = 0
tree = channels[channel].tree
for i in range(elist.GetN()):
tree.GetEntry(elist.GetEntry(i))
if (tree._btaggedTrueBJetCount == -1 or tree._trueBJetCount == -1 or
tree._btaggedTrueCJetCount == -1 or tree._trueCJetCount == -1 or
tree._btaggedTrueLJetCount == -1 or tree._trueLJetCount == -1
):
nFailed += 1
#print "Warning: anomalous event"
continue
nJets = tree._lightJetCount + tree._bJetCount
for i in range(min(2, nJets)):
partonFlavour = getattr(tree, "_goodJets_{0}_partonFlavour".format(i))
bDiscriminator = getattr(tree, "_goodJets_{0}_bDiscriminator".format(i))
if abs(partonFlavour)==5:
hTrueB_bDiscr.Fill(bDiscriminator)
elif abs(partonFlavour)==4:
hTrueC_bDiscr.Fill(bDiscriminator)
else:
hTrueL_bDiscr.Fill(bDiscriminator)
lepCount[tree._genLeptonsTCount] += 1
sumBTaggedB += tree._btaggedTrueBJetCount
sumTrueB += tree._trueBJetCount
sumBTaggedC += tree._btaggedTrueCJetCount
sumTrueC += tree._trueCJetCount
sumBTaggedL += tree._btaggedTrueLJetCount
sumTrueL += tree._trueLJetCount
print ("jet counts (tagged | all): B: %d | %d" % (sumBTaggedB, sumTrueB)) + ("; C: %d | %d" % (sumBTaggedC, sumTrueC)) + ("; L: %d | %d" % (sumBTaggedL, sumTrueL))
#print "Generated lepton counts: {0}".format(str(lepCount))
eff_b = float(sumBTaggedB)/float(sumTrueB)
eff_c = float(sumBTaggedC)/float(sumTrueC)
eff_l = float(sumBTaggedL)/float(sumTrueL)
sigma_eff_b = effUnc(eff_b, sumTrueB)
sigma_eff_c = effUnc(eff_c, sumTrueC)
sigma_eff_l = effUnc(eff_l, sumTrueL)
print "nFailed = {0}".format(nFailed)
def printEff(eff, sigma, flavour):
print "eff_{3} = {0:.2E} (\sigma {1:.2E}) ({2:.1%})".format(eff, sigma, sigma/eff, flavour)
printEff(eff_b, sigma_eff_b, "b")
printEff(eff_c, sigma_eff_c, "c")
printEff(eff_l, sigma_eff_l, "l")
print 80*"-"
of.Write()
return {
"count_events": elist.GetN(),
"eff_b": 100.0*eff_b, "eff_c": 100.0*eff_c, "eff_l": 100.0*eff_l,
"sigma_eff_b": 100*sigma_eff_b, "sigma_eff_c": 100*sigma_eff_c, "sigma_eff_l": 100*sigma_eff_l,
"rel_sigma_eff_b": 100.0*sigma_eff_b/eff_b, "rel_sigma_eff_c": 100.0*sigma_eff_c/eff_c, "rel_sigma_eff_l": 100.0*sigma_eff_l/eff_l,
"count_b_total": sumTrueB, "count_b_tagged": sumBTaggedB,
"count_c_total": sumTrueC, "count_c_tagged": sumBTaggedC,
"count_l_total": sumTrueL, "count_l_tagged": sumBTaggedL
}
effs = dict()
effs["T_t"] = calcBTaggingEff("T_t")
effs["WJets"] = calcBTaggingEff("WJets")
effs["TTbar"] = calcBTaggingEff("TTbar")
out = dict()
out["bTaggingEffs"] = dict()
for (chan, eff) in effs.items():
for (k, v) in eff.items():
out["bTaggingEffs"]["{0}_{1}".format(k, chan)] = v
of.Close()
ofile = open("bTaggingEffs.json", "w+")
ofile.write(json.dumps(effs))
ofile.close()
from Cheetah.Template import Template
temp = r"""
#compiler-settings
cheetahVarStartToken = @
#end compiler-settings
\begin{tabular}{ |l|c|c|c|c|c| }
\hline
MC sample & MC events in sel. & flavour & total & b-tagged & $\epsilon$ & stat. unc. \\
\hline
\multirow{3}{*}{single top, t-channel, top} & \multirow{3}{*}{@effs['T_t']['count_events']} & b & @effs['T_t']['count_b_total'] & @effs['T_t']['count_b_tagged'] & #echo '%.2f' % @effs['T_t']['eff_b']# \pm #echo '%.1f' % @effs['T_t']['rel_sigma_eff_b']#\% \\\cline{3-6}
% & & c & @effs['T_t']['count_c_total'] & @effs['T_t']['count_c_tagged'] & #echo '%.3E' % @effs['T_t']['eff_c']# & #echo '%.3E' % @effs['T_t']['sigma_eff_c']# \\\cline{3-7}
% & & l & @effs['T_t']['count_l_total'] & @effs['T_t']['count_l_tagged'] & #echo '%.3E' % @effs['T_t']['eff_l']# & #echo '%.3E' % @effs['T_t']['sigma_eff_l']# \\\cline{3-7}
% \hline
% \multirow{3}{*}{$t\bar{t}$} & \multirow{3}{*}{@effs['TTbar']['count_events']} & b & @effs['TTbar']['count_b_total'] & @effs['TTbar']['count_b_tagged'] & #echo '%.3E' % @effs['TTbar']['eff_b']# & #echo '%.3E' % @effs['TTbar']['sigma_eff_b']# \\\cline{3-7}
% & & c & @effs['TTbar']['count_c_total'] & @effs['TTbar']['count_c_tagged'] & #echo '%.3E' % @effs['TTbar']['eff_c']# & #echo '%.3E' % @effs['TTbar']['sigma_eff_c']# \\\cline{3-7}
% & & l & @effs['TTbar']['count_l_total'] & @effs['TTbar']['count_l_tagged'] & #echo '%.3E' % @effs['TTbar']['eff_l']# & #echo '%.3E' % @effs['TTbar']['sigma_eff_l']# \\\cline{3-7}
% \hline
% \multirow{3}{*}{W+Jets} & \multirow{3}{*}{@effs['WJets']['count_events']} & b & @effs['WJets']['count_b_total'] & @effs['WJets']['count_b_tagged'] & #echo '%.3E' % @effs['WJets']['eff_b']# & #echo '%.3E' % @effs['WJets']['sigma_eff_b']# \\\cline{3-7}
% & & c & @effs['WJets']['count_c_total'] & @effs['WJets']['count_c_tagged'] & #echo '%.3E' % @effs['WJets']['eff_c']# & #echo '%.3E' % @effs['WJets']['sigma_eff_c']# \\\cline{3-7}
% & & l & @effs['WJets']['count_l_total'] & @effs['WJets']['count_l_tagged'] & #echo '%.3E' % @effs['WJets']['eff_l']# & #echo '%.3E' % @effs['WJets']['sigma_eff_l']# \\\cline{3-7}
\hline
\end{tabular}
"""
print Template(temp, searchList=[{"effs": effs}]) | [
"joosep.pata@gmail.com"
] | joosep.pata@gmail.com |
4dcc4ff446eff2fef093f48a7aea4875ff8d2d3a | 6c543074f1d764af9701e5b55db9ab0220c1df93 | /03_循环/venv/bin/pip3.6 | 6dfe12f619a30cc702d37070c1f52c36606b8e5c | [] | no_license | allenlgy/Django-project | 127e984e13f71d20e01df68ad42d00b977ac0105 | 9c4b9e6c67481a5f3cef58ea47e9fd62058036d8 | refs/heads/master | 2020-06-23T01:03:03.170674 | 2019-09-04T06:11:40 | 2019-09-04T06:11:40 | 198,453,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | 6 | #!/home/linguiyi/Desktop/03_循环/venv/bin/python -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.6'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.6')()
)
| [
"1006547624@qq.com"
] | 1006547624@qq.com |
1f5a6da61f4297f4c0f993fcff3e63528df697d2 | 61f2172dfbb81aa3ad46b2063ad6baced0c94b5c | /juicer/models/inline_response2007.py | 6353cdbb447feea555aa18c46832da56cfb855a6 | [] | no_license | rochacbruno-archive/pulp3-juicer | 0005517a8b1e840b2c08ca492318f5e4b460edf3 | da9b636720281169d343a6190d6615a81f631b0b | refs/heads/master | 2020-04-29T01:24:42.736203 | 2019-03-15T02:09:43 | 2019-03-15T02:12:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,186 | py | # coding: utf-8
"""
Pulp3 API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from juicer.models.update_record import UpdateRecord # noqa: F401,E501
class InlineResponse2007(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'int',
'next': 'str',
'previous': 'str',
'results': 'list[UpdateRecord]'
}
attribute_map = {
'count': 'count',
'next': 'next',
'previous': 'previous',
'results': 'results'
}
def __init__(self, count=None, next=None, previous=None, results=None): # noqa: E501
"""InlineResponse2007 - a model defined in Swagger""" # noqa: E501
self._count = None
self._next = None
self._previous = None
self._results = None
self.discriminator = None
self.count = count
if next is not None:
self.next = next
if previous is not None:
self.previous = previous
self.results = results
@property
def count(self):
"""Gets the count of this InlineResponse2007. # noqa: E501
:return: The count of this InlineResponse2007. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this InlineResponse2007.
:param count: The count of this InlineResponse2007. # noqa: E501
:type: int
"""
if count is None:
raise ValueError("Invalid value for `count`, must not be `None`") # noqa: E501
self._count = count
@property
def next(self):
"""Gets the next of this InlineResponse2007. # noqa: E501
:return: The next of this InlineResponse2007. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this InlineResponse2007.
:param next: The next of this InlineResponse2007. # noqa: E501
:type: str
"""
self._next = next
@property
def previous(self):
"""Gets the previous of this InlineResponse2007. # noqa: E501
:return: The previous of this InlineResponse2007. # noqa: E501
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this InlineResponse2007.
:param previous: The previous of this InlineResponse2007. # noqa: E501
:type: str
"""
self._previous = previous
@property
def results(self):
"""Gets the results of this InlineResponse2007. # noqa: E501
:return: The results of this InlineResponse2007. # noqa: E501
:rtype: list[UpdateRecord]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this InlineResponse2007.
:param results: The results of this InlineResponse2007. # noqa: E501
:type: list[UpdateRecord]
"""
if results is None:
raise ValueError("Invalid value for `results`, must not be `None`") # noqa: E501
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2007):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"rochacbruno@gmail.com"
] | rochacbruno@gmail.com |
7082139b388aa7fe762248acdc6c4af2c6679758 | baf3996414315ffb60470c40c7ad797bf4e6897f | /10_back_dev/paradigms/functional/py_scripts/21. Handling errors in lambda expressions.py | 36cd909a7c4d14bb020ce8a13c5dc78044b7d955 | [
"MIT"
] | permissive | thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | 2023-03-15T22:10:21.109707 | 2022-09-14T17:04:35 | 2022-09-14T17:04:35 | 207,919,073 | 0 | 0 | null | 2019-11-13T18:18:23 | 2019-09-11T22:40:46 | Python | UTF-8 | Python | false | false | 788 | py |
# coding: utf-8
# ## Let's reconsider our lambda
# In[3]:
l_add_str = lambda s: sum([int(i) for i in s.split('+')])
# ## A Maybe-like decorator
#
# The Maybe monad is not very Pythonic. But we can do something similar using a decorator.
# In[5]:
def maybe(fnc):
def inner(*args):
for a in args:
if isinstance(a, Exception):
return a
try:
return fnc(*args)
except Exception as e:
return e
return inner
safe_add_str = maybe(lambda s: sum([int(i) for i in s.split('+')]))
print(safe_add_str(1+2))
# ## Exceptions are fine!
#
# Even though `Exception`s are not entirely compatible with a functional programming style, they are still a very good way to deal with errors!
| [
"thiago.allue@yahoo.com"
] | thiago.allue@yahoo.com |
3ae428c5c3d9934484c31bd9f055b975f15db166 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2751486_0/Python/ChRapO/A.py | 74c4807722da1da40ad33a1b289805c6a93fd531 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | import sys
cases = sys.stdin.readline()
non = ['a','e','i','o','u']
for case in range(0,int(cases)):
s,L = [v for v in sys.stdin.readline().split()]
L = int(L)
res = 0
ind = 0
sum = 0
K = []
for c in s:
if not c in non: sum += 1
else: sum = 0
if sum >= L:
K.append(ind)
ind += 1
next = 0
for i in range(0, len(s) - L + 1):
while next<len(K)-1 and i+L-1>K[next]: next+=1
if next<len(K) and K[next]-i>=L - 1:
res += len(s) - K[next]
print "Case #%d: %d" % (case+1, res)
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
7665c3d92f0286971a166f11effa1d65de7fb0bc | 2dad8b725583afd64e2f381acb6a299350a069c4 | /winback/wsgi.py | 26e399e353bb46b9c222b8c1e08797b7183f46fc | [] | no_license | s4-hub/winback | 39b0b354690201a7906ce77f46c1172ddcb21110 | abfb22b6ed5d523b93ea5cdb982ac3066a63ab7c | refs/heads/master | 2020-12-22T12:27:54.416189 | 2020-02-11T10:50:30 | 2020-02-11T10:50:30 | 233,515,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for winback project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'winback.settings')
application = get_wsgi_application()
| [
"syafii.newbie@gmail.com"
] | syafii.newbie@gmail.com |
7b43a51da0f5244e8620e566618e73db3071883d | 78cc1e7a9703769cfce430bbc4ac38c874a59d47 | /backend/home/migrations/0003_message.py | 6c1a3eebe5afc3461fa8aa5be519fb3834b1d7e7 | [] | no_license | crowdbotics-apps/thefuture-21697 | 9790196ed22bb7a859ea6d8aefa1a916e998208d | e0552b0cec2b826933e7d01a73c8b434195e4f61 | refs/heads/master | 2022-12-29T18:28:04.481853 | 2020-10-19T05:35:21 | 2020-10-19T05:35:21 | 305,272,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | # Generated by Django 2.2.16 on 2020-10-19 05:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dating', '0001_initial'),
('home', '0002_load_initial_data'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('created', models.DateTimeField(auto_now_add=True)),
('inbox', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='message_inbox', to='dating.Inbox')),
('match', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_match', to='dating.Match')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
19bac62d51ee3986732db67f4d6bab2ebfcd2f2a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04000/s866107546.py | 76d4bb9f3ddc109f45e5d41132929ecf15b57adf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | import sys,queue,math,copy,itertools,bisect,collections,heapq
def main():
LI = lambda : [int(x) for x in sys.stdin.readline().split()]
H,W,N = LI()
d = collections.Counter()
for _ in range(N):
x,y = LI()
for dx in range(-1,2):
for dy in range(-1,2):
nx = x+dx
ny = y+dy
if 1 < nx < H and 1 < ny < W:
d[(nx,ny)] += 1
ans = [0] * 10
for x,c in collections.Counter(d.values()).items():
ans[x] += c
ans[0] = (H-2) * (W-2) - sum(ans[1:])
print(*ans,sep='\n')
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d2e720f226dc3dec7b687773cd1b7a6e364ed126 | f7a3406afa8682523dc4684ddb2363ad029cbe62 | /myenv/final/talent_schedule_generator.py | 749fbd83bd4079f1e461d8343ef154e113e6278b | [] | no_license | katherineb28/work-team | 2f8b6b8c7d78dd15c775301c961316dd75b76640 | 3d04d9404e91ab795270de73d44318ccfdf88be1 | refs/heads/master | 2022-12-10T14:41:40.213317 | 2018-12-31T00:38:51 | 2018-12-31T00:38:51 | 163,583,574 | 0 | 0 | null | 2022-12-08T00:46:31 | 2018-12-30T11:52:33 | Python | UTF-8 | Python | false | false | 17,650 | py | # Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a shift scheduling problem and solves it."""
from __future__ import print_function, division, absolute_import
import argparse
from ortools.sat.python import cp_model
from google.protobuf import text_format
from collections import defaultdict
from cs50 import SQL
import application
db = SQL("sqlite:///database.db")
"""Get the variables for the calculation(employee count, requests etc.) """
total = 0
total = db.execute("SELECT COUNT(role) FROM employees WHERE role = :role", role = "Talent")
total = total[0]['COUNT(role)']
# Request: (employee, shift, day, weight)
requests = []
night_feature = []
for e in range(1,28):
night_feature.append(tuple([9,3,e,-1]))
night_feature.append(tuple([10,3,e,-1]))
# A negative weight indicates that the employee desire this assignment.
summary = db.execute("SELECT requestee,day,req FROM requests")
for index in range(len(summary)):
new_list=[]
employee_id = None
shift = None
if summary[index]['requestee'] == "Kate":
employee_id = 0
elif summary[index]['requestee'] == "Victor" :
employee_id = 1
elif summary[index]['requestee'] == "Jorge":
employee_id = 2
elif summary[index]['requestee'] == "Regina":
employee_id = 3
elif summary[index]['requestee'] == "Alvara":
employee_id = 4
elif summary[index]['requestee'] == "Borja":
employee_id = 5
elif summary[index]['requestee'] == "Ana":
employee_id = 6
elif summary[index]['requestee'] == "Cris":
employee_id = 7
elif summary[index]['requestee'] == "Lora":
employee_id = 8
elif summary[index]['requestee'] == "Natalia":
employee_id = 9
elif summary[index]['requestee'] == "Carles":
employee_id = 10
if employee_id or employee_id == 0:
new_list.append(employee_id)
if summary[index]['req'] == "Off":
shift = 0
elif summary[index]['req'] == "Morning":
shift = 1
elif summary[index]['req'] == "Evening":
shift = 2
elif summary[index]['req'] == "Night":
shift = 3
new_list.append(shift)
day = summary[index]['day']-1
new_list.append(day)
new_tuple = tuple(new_list)
requests.append(new_tuple)
print(requests)
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'--output_proto',
default="",
help='Output file to write the cp_model'
'proto to.')
PARSER.add_argument('--params', default="", help='Sat solver parameters.')
def negated_bounded_span(works, start, length):
"""Filters an isolated sub-sequence of variables assined to True.
Extract the span of Boolean variables [start, start + length), negate them,
and if there is variables to the left/right of this span, surround the span by
them in non negated form.
Args:
works: a list of variables to extract the span from.
start: the start to the span.
length: the length of the span.
Returns:
a list of variables which conjunction will be false if the sub-list is
assigned to True, and correctly bounded by variables assigned to False,
or by the start or end of works.
"""
sequence = []
# Left border (start of works, or works[start - 1])
if start > 0:
sequence.append(works[start - 1])
for i in range(length):
sequence.append(works[start + i].Not())
# Right border (end of works or works[start + length])
if start + length < len(works):
sequence.append(works[start + length])
return sequence
def add_soft_sequence_constraint(model, works, hard_min, soft_min, min_cost,
soft_max, hard_max, max_cost, prefix):
"""Sequence constraint on true variables with soft and hard bounds.
This constraint look at every maximal contiguous sequence of variables
assigned to true. If forbids sequence of length < hard_min or > hard_max.
Then it creates penalty terms if the length is < soft_min or > soft_max.
Args:
model: the sequence constraint is built on this model.
works: a list of Boolean variables.
hard_min: any sequence of true variables must have a length of at least
hard_min.
soft_min: any sequence should have a length of at least soft_min, or a
linear penalty on the delta will be added to the objective.
min_cost: the coefficient of the linear penalty if the length is less than
soft_min.
soft_max: any sequence should have a length of at most soft_max, or a linear
penalty on the delta will be added to the objective.
hard_max: any sequence of true variables must have a length of at most
hard_max.
max_cost: the coefficient of the linear penalty if the length is more than
soft_max.
prefix: a base name for penalty literals.
Returns:
a tuple (variables_list, coefficient_list) containing the different
penalties created by the sequence constraint.
"""
cost_literals = []
cost_coefficients = []
# Forbid sequences that are too short.
for length in range(1, hard_min):
for start in range(len(works) - length - 1):
model.AddBoolOr(negated_bounded_span(works, start, length))
# Penalize sequences that are below the soft limit.
if min_cost > 0:
for length in range(hard_min, soft_min):
for start in range(len(works) - length - 1):
span = negated_bounded_span(works, start, length)
name = ': under_span(start=%i, length=%i)' % (start, length)
lit = model.NewBoolVar(prefix + name)
span.append(lit)
model.AddBoolOr(span)
cost_literals.append(lit)
# We filter exactly the sequence with a short length.
# The penalty is proportional to the delta with soft_min.
cost_coefficients.append(min_cost * (soft_min - length))
# Penalize sequences that are above the soft limit.
if max_cost > 0:
for length in range(soft_max + 1, hard_max + 1):
for start in range(len(works) - length - 1):
span = negated_bounded_span(works, start, length)
name = ': over_span(start=%i, length=%i)' % (start, length)
lit = model.NewBoolVar(prefix + name)
span.append(lit)
model.AddBoolOr(span)
cost_literals.append(lit)
# Cost paid is max_cost * excess length.
cost_coefficients.append(max_cost * (length - soft_max))
# Just forbid any sequence of true variables with length hard_max + 1
for start in range(len(works) - hard_max - 1):
model.AddBoolOr(
[works[i].Not() for i in range(start, start + hard_max + 1)])
return cost_literals, cost_coefficients
def add_soft_sum_constraint(model, works, hard_min, soft_min, min_cost,
soft_max, hard_max, max_cost, prefix):
"""Sum constraint with soft and hard bounds.
This constraint counts the variables assigned to true from works.
If forbids sum < hard_min or > hard_max.
Then it creates penalty terms if the sum is < soft_min or > soft_max.
Args:
model: the sequence constraint is built on this model.
works: a list of Boolean variables.
hard_min: any sequence of true variables must have a sum of at least
hard_min.
soft_min: any sequence should have a sum of at least soft_min, or a linear
penalty on the delta will be added to the objective.
min_cost: the coefficient of the linear penalty if the sum is less than
soft_min.
soft_max: any sequence should have a sum of at most soft_max, or a linear
penalty on the delta will be added to the objective.
hard_max: any sequence of true variables must have a sum of at most
hard_max.
max_cost: the coefficient of the linear penalty if the sum is more than
soft_max.
prefix: a base name for penalty variables.
Returns:
a tuple (variables_list, coefficient_list) containing the different
penalties created by the sequence constraint.
"""
cost_variables = []
cost_coefficients = []
sum_var = model.NewIntVar(hard_min, hard_max, '')
# This adds the hard constraints on the sum.
model.Add(sum_var == sum(works))
# Penalize sums below the soft_min target.
if soft_min > hard_min and min_cost > 0:
delta = model.NewIntVar(-len(works), len(works), '')
model.Add(delta == soft_min - sum_var)
# TODO(user): Compare efficiency with only excess >= soft_min - sum_var.
excess = model.NewIntVar(0, 7, prefix + ': under_sum')
model.AddMaxEquality(excess, [delta, 0])
cost_variables.append(excess)
cost_coefficients.append(min_cost)
# Penalize sums above the soft_max target.
if soft_max < hard_max and max_cost > 0:
delta = model.NewIntVar(-7, 7, '')
model.Add(delta == sum_var - soft_max)
excess = model.NewIntVar(0, 7, prefix + ': over_sum')
model.AddMaxEquality(excess, [delta, 0])
cost_variables.append(excess)
cost_coefficients.append(max_cost)
return cost_variables, cost_coefficients
def solve_shift_scheduling(params, output_proto):
"""Solves the shift scheduling problem."""
# Data
final_talent_roster = defaultdict(list)
num_employees = total
num_weeks = 4
shifts = ['OFF', '7', '15', '23']
# Shift constraints on continuous sequence :
# (shift, hard_min, soft_min, min_penalty,
# soft_max, hard_max, max_penalty)
shift_constraints = [
# One or two consecutive days of rest, this is a hard constraint.
(0, 2, 2, 0, 2, 2, 0),
# betweem 2 and 3 consecutive days of night shifts, 1 and 4 are
# possible but penalized.
(3, 1, 2, 20, 3, 4, 5),
]
# Weekly sum constraints on shifts days:
# (shift, hard_min, soft_min, min_penalty,
# soft_max, hard_max, max_penalty)
weekly_sum_constraints = [
# Constraints on rests per week.
(0, 1, 2, 7, 2, 3, 4),
]
# Penalized transitions:
# (previous_shift, next_shift, penalty (0 means forbidden))
penalized_transitions = [
# Night to morning is forbidden.
(3, 1, 0),
#Night to afternoon is forbidden.
(3, 2, 0)
]
# daily demands for work shifts (morning, afternon, night) for each day
# of the week starting on Monday.
weekly_cover_demands = [
(3, 2, 1), # Monday
(2, 2, 1), # Tuesday
(2, 2, 1), # Wednesday
(2, 2, 2), # Thursday
(2, 3, 2), # Friday
(2, 3, 2), # Saturday
(2, 3, 2), # Sunday
]
# Penalty for exceeding the cover constraint per shift type.
excess_cover_penalties = (2, 2, 5)
num_days = num_weeks * 7
num_shifts = len(shifts)
model = cp_model.CpModel()
work = {}
for e in range(num_employees):
for s in range(num_shifts):
for d in range(num_days):
work[e, s, d] = model.NewBoolVar('work%i_%i_%i' % (e, s, d))
# Linear terms of the objective in a minimization context.
obj_int_vars = []
obj_int_coeffs = []
obj_bool_vars = []
obj_bool_coeffs = []
# Exactly one shift per day.
for e in range(num_employees):
for d in range(num_days):
model.Add(sum(work[e, s, d] for s in range(num_shifts)) == 1)
# Employee requests a.k.a fixed assignments.
for e, s, d in requests:
model.Add(work[e, s, d] == 1)
#Night manager feature
for e, s, d, w in night_feature:
obj_bool_vars.append(work[e, s, d])
obj_bool_coeffs.append(w)
# Shift constraints
for ct in shift_constraints:
shift, hard_min, soft_min, min_cost, soft_max, hard_max, max_cost = ct
for e in range(num_employees):
works = [work[e, shift, d] for d in range(num_days)]
variables, coeffs = add_soft_sequence_constraint(
model, works, hard_min, soft_min, min_cost, soft_max, hard_max,
max_cost, 'shift_constraint(employee %i, shift %i)' % (e,
shift))
obj_bool_vars.extend(variables)
obj_bool_coeffs.extend(coeffs)
# Weekly sum constraints
for ct in weekly_sum_constraints:
shift, hard_min, soft_min, min_cost, soft_max, hard_max, max_cost = ct
for e in range(num_employees):
for w in range(num_weeks):
works = [work[e, shift, d + w * 7] for d in range(7)]
variables, coeffs = add_soft_sum_constraint(
model, works, hard_min, soft_min, min_cost, soft_max,
hard_max, max_cost,
'weekly_sum_constraint(employee %i, shift %i, week %i)' %
(e, shift, w))
obj_int_vars.extend(variables)
obj_int_coeffs.extend(coeffs)
# Penalized transitions
for previous_shift, next_shift, cost in penalized_transitions:
for e in range(num_employees):
for d in range(num_days - 1):
transition = [
work[e, previous_shift, d].Not(),
work[e, next_shift, d + 1].Not()
]
if cost == 0:
model.AddBoolOr(transition)
else:
trans_var = model.NewBoolVar(
'transition (employee=%i, day=%i)' % (e, d))
transition.append(trans_var)
model.AddBoolOr(transition)
obj_bool_vars.append(trans_var)
obj_bool_coeffs.append(cost)
# Cover constraints
for s in range(1, num_shifts):
for w in range(num_weeks):
for d in range(7):
works = [work[e, s, w * 7 + d] for e in range(num_employees)]
# Ignore Off shift.
min_demand = weekly_cover_demands[d][s - 1]
worked = model.NewIntVar(min_demand, num_employees, '')
model.Add(worked == sum(works))
over_penalty = excess_cover_penalties[s - 1]
if over_penalty > 0:
name = 'excess_demand(shift=%i, week=%i, day=%i)' % (s, w,
d)
excess = model.NewIntVar(0, num_employees - min_demand,
name)
model.Add(excess == worked - min_demand)
obj_int_vars.append(excess)
obj_int_coeffs.append(over_penalty)
# Objective
model.Minimize(
sum(obj_bool_vars[i] * obj_bool_coeffs[i]
for i in range(len(obj_bool_vars)))
+ sum(obj_int_vars[i] * obj_int_coeffs[i]
for i in range(len(obj_int_vars))))
if output_proto:
print('Writing proto to %s' % output_proto)
with open(output_proto, 'w') as text_file:
text_file.write(str(model))
# Solve the model.
solver = cp_model.CpSolver()
solver.parameters.max_time_in_seconds = 20.0
if params:
text_format.Merge(params, solver.parameters)
solution_printer = cp_model.ObjectiveSolutionPrinter()
status = solver.SolveWithSolutionCallback(model, solution_printer)
# Print solution.
if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
print()
header = ' '
for e in range(num_employees):
schedule = ''
for d in range(num_days):
for s in range(num_shifts):
if solver.BooleanValue(work[e, s, d]):
schedule += shifts[s] + ' '
final_talent_roster[e].append(shifts[s])
print('worker %s: %s' % (e, schedule))
print()
print('Penalties:')
for i, var in enumerate(obj_bool_vars):
if solver.BooleanValue(var):
penalty = obj_bool_coeffs[i]
if penalty > 0:
print(' %s violated, penalty=%i' % (var.Name(), penalty))
else:
print(' %s fulfilled, gain=%i' % (var.Name(), -penalty))
for i, var in enumerate(obj_int_vars):
if solver.Value(var) > 0:
print(' %s violated by %i, linear penalty=%i' %
(var.Name(), solver.Value(var), obj_int_coeffs[i]))
print()
print('Statistics')
print(' - status : %s' % solver.StatusName(status))
print(' - conflicts : %i' % solver.NumConflicts())
print(' - branches : %i' % solver.NumBranches())
print(' - wall time : %f ms' % solver.WallTime())
print(final_talent_roster)
return final_talent_roster
def main(args):
"""Main."""
return solve_shift_scheduling(args.params, args.output_proto)
if __name__ == '__main__':
main(PARSER.parse_args())
| [
"kate.baranova@outlook.com"
] | kate.baranova@outlook.com |
629b3d6a0feeebefff9e7835994f4fea2c8d79c7 | 065956c29148d6b9a52cc97f1766f2d3e9e3ad83 | /pandas/venv/Scripts/f2py.py | b74390c16600d0f3554f89e7ca0bcab017816327 | [] | no_license | HBU/PythonLearning | 17b09ad32ea0100b18f01ad489b3daa81f70594a | 18303752c44ed90beefb13725690124031381f35 | refs/heads/master | 2022-11-24T05:17:01.968082 | 2019-06-06T05:01:08 | 2019-06-06T05:01:08 | 123,361,590 | 3 | 5 | null | 2022-11-18T22:18:49 | 2018-03-01T00:53:39 | Python | UTF-8 | Python | false | false | 797 | py | #!C:\GitHub\PythonLearning\pandas\venv\Scripts\python.exe
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| [
"8584751@qq.com"
] | 8584751@qq.com |
e953bb5cba9a5f137ea0bb01359006d3dca25399 | 4090d8b4e8e9e28d620d222651c73a12a753be36 | /cases/urls.py | b90ee81d849bdb23377c093189d6fb2c10d48c89 | [] | no_license | isaev4lex/220studio | 91aa08f9d10ff55e98effe2542e26799efb6e2f2 | 6188403eeed7ee590b21da15c67af9e6f06ab06b | refs/heads/main | 2023-08-20T07:14:18.203593 | 2021-10-31T07:24:19 | 2021-10-31T07:24:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.cases, name='cases'),
path('<slug:category_slug>/', views.cases, name='cases_slug'),
]
| [
"FWorld21@protonmail.com"
] | FWorld21@protonmail.com |
612ca64cff2aaf1672b0799554822c9afa445102 | 6740ee4590bd30513c7e5fe185db42ca5c8fd565 | /lib/html_highlight.py | 2b27e1d9c4e0b63c3e6cf56d72e9ba1a4b6265aa | [
"MIT"
] | permissive | emmetio/sublime-text-plugin | 5a10c12e2d8c8a15b0be3b29c44e913bbfba4526 | df81e99d9655fe3ad0d7187051369a5324c6f139 | refs/heads/master | 2023-08-01T12:38:02.914935 | 2023-01-18T23:58:25 | 2023-01-18T23:58:25 | 205,387,294 | 267 | 38 | MIT | 2021-06-14T06:11:38 | 2019-08-30T13:25:18 | Python | UTF-8 | Python | false | false | 1,605 | py | import re
import html
from ..emmet.html_matcher import scan, get_attributes, ElementType
re_tag_end = re.compile(r'\s*\/?>$')
def highlight(code: str) -> str:
chunks = []
offset = [0]
def cb(name: str, elem_type: int, start: int, end: int):
if offset[0] != start:
chunks.append(escape(code[offset[0]:start]))
offset[0] = end
if elem_type == ElementType.Close:
chunks.append('<span class="tag close"></<span class="tag-name">%s</span>></span>' % name)
else:
chunks.append('<span class="tag open"><<span class="tag-name">%s</span>' % name)
for attr in get_attributes(code, start, end, name):
chunks.append(' <span class="attr">')
chunks.append('<span class="attr-name">%s</span>' % attr.name)
if attr.value is not None:
chunks.append('=<span class="attr-value">%s</span>' % attr.value)
chunks.append('</span>')
tag_end = re_tag_end.search(code[start:end])
if tag_end:
chunks.append(escape(tag_end.group(0)))
chunks.append('</span>')
scan(code, cb)
chunks.append(escape(code[offset[0]:]))
return ''.join(chunks)
def styles():
return """
.dark .tag { color: #77c7b4; }
.dark .attr-name { color: #8fd260; }
.dark .attr-value { color: #ff6e61; }
.light .tag { color: #0046aa; }
.light .attr-name { color: #017ab7; }
.light .attr-value { color: #017ab7; }
"""
def escape(code: str):
return html.escape(code, False)
| [
"serge.che@gmail.com"
] | serge.che@gmail.com |
68066c1b7cc8196473a609c02be7e95140327953 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/3346b78a8a872286a245d1e77ef4718fc5e6be1a-<has_zoneinfo_database>-bug.py | 64bd17668d26971a8bc27e797781cbcb4d0f7c4d | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | @cached_property
def has_zoneinfo_database(self):
with self.connection.cursor() as cursor:
cursor.execute('SELECT 1 FROM mysql.time_zone LIMIT 1')
return (cursor.fetchone() is not None) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
afa3e2142f3568deab7b6eba89c0bd6107a71827 | c7c0aff3a367e21aeb8470c74607f882266a4bdc | /pystagram/settings.py | 982ac51073b3e076d2c92020039bea5bc90684a7 | [] | no_license | hanquf1/s5-pystagram | 30c58a97ad909120ad5dcb72c7517106aff66fe0 | a893e3a4aa8c6d7f0bde4a734f5ae9e602678692 | refs/heads/master | 2021-01-21T20:06:46.221481 | 2016-03-12T08:16:17 | 2016-03-12T08:16:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,224 | py | """
Django settings for pystagram project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f!fz*)t_l!xeio^fg@&w*gjb$&3-@pqx!d@-hp#g0c*qt^)y*g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'photos',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pystagram.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pystagram.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/staticfile_url/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'my_static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'collected_statics')
MEDIA_URL = '/uploads/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'upload_files')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
},
'simple': {
'format': '%(levelname)s %(message)s',
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'access.log',
'formatter': 'verbose',
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'simple',
},
},
'loggers': {
'django': {
'handlers': ['file', 'console'],
'level': 'INFO',
},
},
}
| [
"kay@hannal.net"
] | kay@hannal.net |
941346ac5ae82a0bd2402d7543bd22bcb7f01648 | bd435e3ff491d13c3cb1ffcf34771ac1c80f7859 | /code/base/identity_operator.py | 9f341d5aa53d3e532e9a7e70ff8a938e3f91f132 | [] | no_license | luningcowboy/PythonTutorial | 8f4b6d16e0fad99a226540a6f12639ccdff402ff | 9024efe8ed22aca0a1271a2c1c388d3ffe1e6690 | refs/heads/master | 2021-06-16T23:03:22.153473 | 2020-04-09T13:52:12 | 2020-04-09T13:52:12 | 187,571,993 | 0 | 0 | null | 2021-03-25T23:02:36 | 2019-05-20T05:16:13 | Python | UTF-8 | Python | false | false | 440 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
a = 20
b = 20
if (a is b):
print 'a, b 有相同的标识'
else:
print 'a, b 有不同的标识'
if (a is not b):
print 'a, b 有不同的标识'
else:
print 'a, b 有相同的标识'
b = 30
if (a is b):
print 'a, b 有相同的标识'
else:
print 'a, b 有不同的标识'
if (a is not b):
print 'a, b 有不同的标识'
else:
print 'a, b 有相同的标识'
| [
"luningcowboy@gmail.com"
] | luningcowboy@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.