blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7548fe3f47a10d5a78a43b667b7b2a0f726ef768 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4_ch016_ep003_7/Gather2_W_fixGood_C_change/train/pyr_1s/L6/step09_1side_L6.py | 89ebd751954369c671fb1720c3654f0e2e924585 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,525 | py | #############################################################################################################################################################################################################
from step08_c_use_G_generate_I_w_M_to_Wx_Wy_Wz_focus_to_Cx_Cy_focus_combine import I_w_M_to_W_to_C
from step08_b_use_G_generate_0_util import Tight_crop, Color_jit
from step09_c_train_step import Train_step_I_w_M_to_W_to_C
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
color_jit = Color_jit(do_ratio=0.6)
use_gen_op_p20 = I_w_M_to_W_to_C( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) ) ### 我目前的 multi_model 的 I_to_Wxyz_to_Cxy_general 是 全部都回傳 Wz_pre_w_M, Wy_pre_w_M, Wx_pre_w_M, Cx_pre_w_M, Cy_pre_w_M, 所以不管 wi/woDIV, Separate 全設 True 就對了
use_train_step_p20 = Train_step_I_w_M_to_W_to_C( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15), color_jit=color_jit ) ### 我目前的 multi_model 的 I_to_Wxyz_to_Cxy_general 是 全部都回傳 Wz_pre_w_M, Wy_pre_w_M, Wx_pre_w_M, Cx_pre_w_M, Cy_pre_w_M, 所以不管 wi/woDIV, Separate 全設 True 就對了
from Exps_7_v3.doc3d.Ablation4_ch016_ep003_7.W_w_M_to_C_pyr.pyr_1s.L6.step09_1side_L6 import *
from Exps_7_v3.doc3d.Ablation4_ch016_ep003_7.I_w_M_to_W_pyr.pyr_3s.L5.step09_3side_L5 import ch032_pyramid_1side_6__2side_5__3side_2 as I_w_M_to_W_Tcrop255_p20_3s_L5_good
import time
start_time = time.time()
###############################################################################################################################################################################################
#########################################################################################
ch032_pyramid_1side_1_and_1s6_2s6 = KModel_builder().set_model_name(MODEL_NAME.multi_flow_unet).set_multi_model_builders(op_type="I_to_Wxyz_to_Cxy_general", W_to_Cx_Cy=ch032_pyramid_1side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_Tcrop255_p20_3s_L5_good).set_multi_model_separate_focus(I_to_W_separ=False, I_to_W_focus=True, W_to_C_separ=False, W_to_C_focus=True).set_gen_op( use_gen_op_p20 ).set_train_step( use_train_step_p20 )
ch032_pyramid_1side_2_and_1s6_2s6 = KModel_builder().set_model_name(MODEL_NAME.multi_flow_unet).set_multi_model_builders(op_type="I_to_Wxyz_to_Cxy_general", W_to_Cx_Cy=ch032_pyramid_1side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_Tcrop255_p20_3s_L5_good).set_multi_model_separate_focus(I_to_W_separ=False, I_to_W_focus=True, W_to_C_separ=False, W_to_C_focus=True).set_gen_op( use_gen_op_p20 ).set_train_step( use_train_step_p20 )
ch032_pyramid_1side_3_and_1s6_2s6 = KModel_builder().set_model_name(MODEL_NAME.multi_flow_unet).set_multi_model_builders(op_type="I_to_Wxyz_to_Cxy_general", W_to_Cx_Cy=ch032_pyramid_1side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_Tcrop255_p20_3s_L5_good).set_multi_model_separate_focus(I_to_W_separ=False, I_to_W_focus=True, W_to_C_separ=False, W_to_C_focus=True).set_gen_op( use_gen_op_p20 ).set_train_step( use_train_step_p20 )
ch032_pyramid_1side_4_and_1s6_2s6 = KModel_builder().set_model_name(MODEL_NAME.multi_flow_unet).set_multi_model_builders(op_type="I_to_Wxyz_to_Cxy_general", W_to_Cx_Cy=ch032_pyramid_1side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_Tcrop255_p20_3s_L5_good).set_multi_model_separate_focus(I_to_W_separ=False, I_to_W_focus=True, W_to_C_separ=False, W_to_C_focus=True).set_gen_op( use_gen_op_p20 ).set_train_step( use_train_step_p20 )
ch032_pyramid_1side_5_and_1s6_2s6 = KModel_builder().set_model_name(MODEL_NAME.multi_flow_unet).set_multi_model_builders(op_type="I_to_Wxyz_to_Cxy_general", W_to_Cx_Cy=ch032_pyramid_1side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_Tcrop255_p20_3s_L5_good).set_multi_model_separate_focus(I_to_W_separ=False, I_to_W_focus=True, W_to_C_separ=False, W_to_C_focus=True).set_gen_op( use_gen_op_p20 ).set_train_step( use_train_step_p20 )
ch032_pyramid_1side_6_and_1s6_2s6 = KModel_builder().set_model_name(MODEL_NAME.multi_flow_unet).set_multi_model_builders(op_type="I_to_Wxyz_to_Cxy_general", W_to_Cx_Cy=ch032_pyramid_1side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_Tcrop255_p20_3s_L5_good).set_multi_model_separate_focus(I_to_W_separ=False, I_to_W_focus=True, W_to_C_separ=False, W_to_C_focus=True).set_gen_op( use_gen_op_p20 ).set_train_step( use_train_step_p20 )
ch032_pyramid_1side_7_and_1s6_2s6 = KModel_builder().set_model_name(MODEL_NAME.multi_flow_unet).set_multi_model_builders(op_type="I_to_Wxyz_to_Cxy_general", W_to_Cx_Cy=ch032_pyramid_1side_7, I_to_Wx_Wy_Wz=I_w_M_to_W_Tcrop255_p20_3s_L5_good).set_multi_model_separate_focus(I_to_W_separ=False, I_to_W_focus=True, W_to_C_separ=False, W_to_C_focus=True).set_gen_op( use_gen_op_p20 ).set_train_step( use_train_step_p20 )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_1side_4_and_1s6_2s6
use_model = use_model.build()
result = use_model.generator(data, Mask=data)
print(result[0].shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
fcaf80bdda5cd0b6b0ce65c9fb6246ac8ad6e44f | dbc0202e6a002fae4d86a0b16649af0cc1577b4e | /app/newsh.py | 489d0f8eff1e569c83b5d60dd3031a701929a349 | [] | no_license | huangchao20/CheckPackage | c8613efd844e7ac20cfaf090590b341d1241702f | c2ddddd2578a2b15044573261520f29b3f110450 | refs/heads/master | 2020-04-16T21:56:48.858233 | 2019-01-23T00:39:16 | 2019-01-23T00:39:16 | 165,946,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | import os
import re
def createNewSh(str1):
print("开始组建sh脚本")
print("str1=[%s]" % str1)
if os.path.isfile(str1):
pass
def openfile(dpath, filename):
"""
:function:将命令添加到sh脚本里面
:param dpath:
:param filename:
:return:
"""
if os.path.splitext(filename)[1] == ".sh":
flag = True
startflag = "satrtflag"
install = "install "
os.chdir(dpath)
nfilename = "22222222222.sh"
os.rename(filename, nfilename)
with open(nfilename, "r") as f:
with open(filename, 'w+') as fn:
for dd in f:
# 拼接:tmp = 'install XQ-2018-791 TR_45871_X_20181210.sh'
tmp = install + dpath.split("\\").pop() + " " + filename + "\n"
if startflag in dd:
print(tmp)
fn.write(tmp)
elif install in dd and flag == True:
print(tmp)
fn.write(tmp)
flag = False
else:
fn.write(dd)
else:
os.chdir(dpath)
with open(filename, 'r+') as f:
tmp = 'test massage'
f.write(tmp)
if __name__ == '__main__':
dpath = "F:\\黄小宝的宝\\测试目录"
filename = "1111111111.sh" | [
"842713855@qq.com"
] | 842713855@qq.com |
a460369a6cb9776a2850ebfd39f7e10664457c89 | e5d5fa28999bcc6c642bb42dda93afd38e272b81 | /LeetCode/28. Find the Index of the First Occurrence in a String/solve4.py | 6bdbe92c17e5dbadf39ec79dae682ed9224d6700 | [] | no_license | chiahsun/problem_solving | cd3105969983d16d3d5d416d4a0d5797d4b58e91 | 559fafa92dd5516058bdcea82a438eadf5aa1ede | refs/heads/master | 2023-02-05T06:11:27.536617 | 2023-01-26T10:51:23 | 2023-01-26T10:51:23 | 30,732,382 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | class Solution:
def strStr(self, haystack: str, needle: str) -> int:
N, prev, pos = len(needle), -1, -1
fallback = [-1] * N
for i in range(1, N):
while prev >= 0 and needle[i] != needle[prev+1]:
prev = fallback[prev]
if needle[i] == needle[prev+1]:
prev += 1
fallback[i] = prev
for i, c in enumerate(haystack):
while pos >= 0 and needle[pos+1] != c:
pos = fallback[pos]
if pos >= 0 or needle[0] == c:
pos += 1
if pos == N-1:
return i-N+1
return -1
| [
"chiahsun0814@gmail.com"
] | chiahsun0814@gmail.com |
b6930afe6a1a7a309ead3c407c3baa734f28d9c1 | 951500339f5887590fbf83a900dc633887402580 | /examples/06-classes.py | 38f77b461d46ceb82464e6b518e669ca42ae5a97 | [] | no_license | Tanwuld/skillbox-chat | abfd4b6a888c84ee6274ace42e295508594887ef | 4218bebe40167549bee5e6ee45f5b9623f84ef03 | refs/heads/master | 2020-09-29T04:47:45.834886 | 2019-12-12T15:59:37 | 2019-12-12T15:59:37 | 226,955,034 | 1 | 0 | null | 2019-12-12T15:59:39 | 2019-12-09T19:52:57 | Python | UTF-8 | Python | false | false | 1,253 | py | # Created by Artem Manchenkov
# artyom@manchenkoff.me
#
# Copyright © 2019
#
# Объектно-ориентированного программирование, использование классов и объектов
#
# Простой класс с переменными
class Person:
first_name: str
last_name: str
age: int
person1 = Person()
person1.first_name = 'John'
person1.last_name = 'Doe'
person1.age = 43
print(person1.first_name)
# Простой класс с конструктором
class Person:
first_name: str
last_name: str
age: int
def __init__(self, first_name: str, last_name: str, age: int = 0):
self.first_name = first_name
self.last_name = last_name
self.age = age
person1 = Person('John', 'Doe', 43)
print(person1.first_name)
# Класс с методами
class Person:
first_name: str
last_name: str
age: int
def __init__(self, first_name: str, last_name: str, age: int = 0):
self.first_name = first_name
self.last_name = last_name
self.age = age
def info(self):
print(f"Person: {self.first_name} {self.last_name}, age: {self.age}")
person1 = Person('John', 'Doe', 43)
person1.info()
| [
"artyom@manchenkoff.me"
] | artyom@manchenkoff.me |
ef3b7fb16a5b900b2a8336fd5516cad4bdbbb9dd | c2163c653ba589ea610733250230ab4b57ab7d6a | /doc/uses/EPSCs-and-IPSCs/smooth histogram method/04.py | 6d67cefc7771ee5cfdad63b64fa73f9b4ad96701 | [
"MIT"
] | permissive | harshadsbs/SWHLab | 6ff57f816f252da888f5827e9ea677e696e69038 | a86c3c65323cec809a4bd4f81919644927094bf5 | refs/heads/master | 2021-05-20T12:24:55.229806 | 2018-12-14T03:18:38 | 2018-12-14T03:18:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,568 | py | """
MOST OF THIS CODE IS NOT USED
ITS COPY/PASTED AND LEFT HERE FOR CONVENIENCE
"""
import os
import sys
# in case our module isn't installed (running from this folder)
if not os.path.abspath('../../../') in sys.path:
sys.path.append('../../../') # helps spyder get docs
import swhlab
import matplotlib.pyplot as plt
import numpy as np
import warnings # suppress VisibleDeprecationWarning warning
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
def kernel_gaussian(size=100, sigma=None):
sigma=size/10 if sigma is None else int(sigma)
points=np.exp(-np.power(np.arange(size)-size/2,2)/(2*np.power(sigma,2)))
return points/sum(points)
def analyzeSweep(abf):
Y=abf.sweepYsmartbase()
Y=Y[abf.pointsPerSec*.5:]
# create a 1 Kbin histogram with bins centered around 3x the SD from the mean
AV,SD=np.average(Y),np.std(Y)
B1,B2=AV-SD*3,AV+SD*3
nBins=1000
hist, bin_edges = np.histogram(Y, density=False, bins=nBins, range=(B1,B2))
histSmooth=np.convolve(hist,kernel_gaussian(nBins/5),mode='same')
histSmooth=histSmooth/max(histSmooth) # normalize to a peak of 1
centerI=np.where(histSmooth==max(histSmooth))[0][0] # calculate center
histSmooth=np.roll(histSmooth,int(nBins/2-centerI)) # roll data so center is in middle
centerVal=bin_edges[centerI]
EPSC=np.sum(histSmooth[:int(len(histSmooth)/2)])
IPSC=np.sum(histSmooth[int(len(histSmooth)/2):])
return [centerVal,EPSC,IPSC]
if __name__=="__main__":
abfFile=R"C:\Users\scott\Documents\important\demodata\abfs\16d07022.abf"
abf=swhlab.ABF(abfFile)
abf.kernel=abf.kernel_gaussian(sizeMS=500) # needed for smart base
Xs,centerVals,EPSCs,IPSCs=[],[],[],[]
for sweep in abf.setsweeps():
print("analyzing sweep",sweep)
centerVal,EPSC,IPSC=analyzeSweep(abf)
Xs.append(abf.sweepStart/60.0)
centerVals.append(centerVal)
EPSCs.append(EPSC)
IPSCs.append(IPSC)
plt.figure(figsize=(10,10))
plt.subplot(211)
plt.grid()
plt.plot(Xs,EPSCs,'r',alpha=.8,lw=2,label="excitation")
plt.plot(Xs,IPSCs,'b',alpha=.8,lw=2,label="inhibition")
plt.ylabel("power (sum norm half)")
plt.xlabel("experiment time (min)")
plt.margins(0,.1)
plt.subplot(212)
plt.grid()
plt.plot(Xs,centerVals,'g',alpha=.8,lw=2)
plt.ylabel("shift WRT baseline")
plt.xlabel("experiment time (min)")
plt.axhline(0,color='k',ls='--')
plt.margins(0,.1)
plt.show()
print("DONE")
| [
"swharden@gmail.com"
] | swharden@gmail.com |
90be05ad622bd48ee39185b7ebe344684ce90150 | 2ca5a1fe5608eb8298d7e142ecca98fd0fa4566b | /venv/lib/python3.7/site-packages/Crypto/Cipher/_mode_siv.py | 077848ce6408996822dc5d8c426e00882ce0abf9 | [
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"MIT"
] | permissive | basemanbase/Animal-Health | 75bd2e4dc4eb29b6466125b197a77a70e14d91a4 | d9741aafd54126f05ba43e6f4ad6517755797c76 | refs/heads/master | 2023-05-30T17:53:19.905594 | 2022-01-30T05:52:53 | 2022-01-30T05:52:53 | 228,593,914 | 0 | 1 | MIT | 2023-05-01T20:37:31 | 2019-12-17T10:41:05 | Python | UTF-8 | Python | false | false | 13,742 | py | # ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""
Synthetic Initialization Vector (SIV) mode.
"""
__all__ = ['SivMode']
from binascii import hexlify
from Crypto.Util.py3compat import byte_string, bord, unhexlify, b
from Crypto.Util.number import long_to_bytes, bytes_to_long
from Crypto.Protocol.KDF import _S2V
from Crypto.Hash import BLAKE2s
from Crypto.Random import get_random_bytes
class SivMode(object):
"""Synthetic Initialization Vector (SIV).
This is an Authenticated Encryption with Associated Data (`AEAD`_) mode.
It provides both confidentiality and authenticity.
The header of the message may be left in the clear, if needed, and it will
still be subject to authentication. The decryption step tells the receiver
if the message comes from a source that really knowns the secret key.
Additionally, decryption detects if any part of the message - including the
header - has been modified or corrupted.
Unlike other AEAD modes such as CCM, EAX or GCM, accidental reuse of a
nonce is not catastrophic for the confidentiality of the message. The only
effect is that an attacker can tell when the same plaintext (and same
associated data) is protected with the same key.
The length of the MAC is fixed to the block size of the underlying cipher.
The key size is twice the length of the key of the underlying cipher.
This mode is only available for AES ciphers.
+--------------------+---------------+-------------------+
| Cipher | SIV MAC size | SIV key length |
| | (bytes) | (bytes) |
+====================+===============+===================+
| AES-128 | 16 | 32 |
+--------------------+---------------+-------------------+
| AES-192 | 16 | 48 |
+--------------------+---------------+-------------------+
| AES-256 | 16 | 64 |
+--------------------+---------------+-------------------+
See `RFC5297`_ and the `original paper`__.
.. _RFC5297: https://tools.ietf.org/html/rfc5297
.. _AEAD: http://blog.cryptographyengineering.com/2012/05/how-to-choose-authenticated-encryption.html
.. __: http://www.cs.ucdavis.edu/~rogaway/papers/keywrap.pdf
:undocumented: __init__
"""
def __init__(self, factory, key, nonce, kwargs):
self.block_size = factory.block_size
"""The block size of the underlying cipher, in bytes."""
self._factory = factory
self._nonce = nonce
self._cipher_params = kwargs
if len(key) not in (32, 48, 64):
raise ValueError("Incorrect key length (%d bytes)" % len(key))
if nonce is not None:
if not byte_string(nonce):
raise TypeError("When provided, the nonce must be a byte string")
if len(nonce) == 0:
raise ValueError("When provided, the nonce must be non-empty")
self.nonce = nonce
"""Public attribute is only available in case of non-deterministic
encryption."""
subkey_size = len(key) // 2
self._mac_tag = None # Cache for MAC tag
self._kdf = _S2V(key[:subkey_size],
ciphermod=factory,
cipher_params=self._cipher_params)
self._subkey_cipher = key[subkey_size:]
# Purely for the purpose of verifying that cipher_params are OK
factory.new(key[:subkey_size], factory.MODE_ECB, **kwargs)
# Allowed transitions after initialization
self._next = [self.update, self.encrypt, self.decrypt,
self.digest, self.verify]
def _create_ctr_cipher(self, mac_tag):
"""Create a new CTR cipher from the MAC in SIV mode"""
tag_int = bytes_to_long(mac_tag)
return self._factory.new(
self._subkey_cipher,
self._factory.MODE_CTR,
initial_value=tag_int ^ (tag_int & 0x8000000080000000),
nonce=b(""),
**self._cipher_params)
def update(self, component):
"""Protect one associated data component
For SIV, the associated data is a sequence (*vector*) of non-empty
byte strings (*components*).
This method consumes the next component. It must be called
once for each of the components that constitue the associated data.
Note that the components have clear boundaries, so that:
>>> cipher.update(b"builtin")
>>> cipher.update(b"securely")
is not equivalent to:
>>> cipher.update(b"built")
>>> cipher.update(b"insecurely")
If there is no associated data, this method must not be called.
:Parameters:
component : byte string
The next associated data component. It must not be empty.
"""
if self.update not in self._next:
raise TypeError("update() can only be called"
" immediately after initialization")
self._next = [self.update, self.encrypt, self.decrypt,
self.digest, self.verify]
return self._kdf.update(component)
def encrypt(self, plaintext):
"""Encrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have encrypted a message
you cannot encrypt (or decrypt) another message using the same
object.
This method can be called only **once**.
You cannot reuse an object for encrypting
or decrypting other data with the same key.
This function does not add any padding to the plaintext.
:Parameters:
plaintext : byte string
The piece of data to encrypt.
It can be of any length, but it cannot be empty.
:Return:
the encrypted data, as a byte string.
It is as long as *plaintext*.
"""
if self.encrypt not in self._next:
raise TypeError("encrypt() can only be called after"
" initialization or an update()")
self._next = [self.digest]
if self._nonce:
self._kdf.update(self.nonce)
self._kdf.update(plaintext)
self._mac_tag = self._kdf.derive()
cipher = self._create_ctr_cipher(self._mac_tag)
return cipher.encrypt(plaintext)
def decrypt(self, ciphertext):
"""Decrypt data with the key and the parameters set at initialization.
For SIV, decryption and verification must take place at the same
point. This method shall not be used.
Use `decrypt_and_verify` instead.
"""
raise TypeError("decrypt() not allowed for SIV mode."
" Use decrypt_and_verify() instead.")
def digest(self):
"""Compute the *binary* MAC tag.
The caller invokes this function at the very end.
This method returns the MAC that shall be sent to the receiver,
together with the ciphertext.
:Return: the MAC, as a byte string.
"""
if self.digest not in self._next:
raise TypeError("digest() cannot be called when decrypting"
" or validating a message")
self._next = [self.digest]
if self._mac_tag is None:
self._mac_tag = self._kdf.derive()
return self._mac_tag
def hexdigest(self):
"""Compute the *printable* MAC tag.
This method is like `digest`.
:Return: the MAC, as a hexadecimal string.
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def verify(self, received_mac_tag):
"""Validate the *binary* MAC tag.
The caller invokes this function at the very end.
This method checks if the decrypted message is indeed valid
(that is, if the key is correct) and it has not been
tampered with while in transit.
:Parameters:
received_mac_tag : byte string
This is the *binary* MAC, as received from the sender.
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
if self.verify not in self._next:
raise TypeError("verify() cannot be called"
" when encrypting a message")
self._next = [self.verify]
if self._mac_tag is None:
self._mac_tag = self._kdf.derive()
secret = get_random_bytes(16)
mac1 = BLAKE2s.new(digest_bits=160, key=secret, data=self._mac_tag)
mac2 = BLAKE2s.new(digest_bits=160, key=secret, data=received_mac_tag)
if mac1.digest() != mac2.digest():
raise ValueError("MAC check failed")
def hexverify(self, hex_mac_tag):
"""Validate the *printable* MAC tag.
This method is like `verify`.
:Parameters:
hex_mac_tag : string
This is the *printable* MAC, as received from the sender.
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
self.verify(unhexlify(hex_mac_tag))
def encrypt_and_digest(self, plaintext):
"""Perform encrypt() and digest() in one step.
:Parameters:
plaintext : byte string
The piece of data to encrypt.
:Return:
a tuple with two byte strings:
- the encrypted data
- the MAC
"""
return self.encrypt(plaintext), self.digest()
def decrypt_and_verify(self, ciphertext, mac_tag):
"""Perform decryption and verification in one step.
A cipher object is stateful: once you have decrypted a message
you cannot decrypt (or encrypt) another message with the same
object.
You cannot reuse an object for encrypting
or decrypting other data with the same key.
This function does not remove any padding from the plaintext.
:Parameters:
ciphertext : byte string
The piece of data to decrypt.
It can be of any length.
mac_tag : byte string
This is the *binary* MAC, as received from the sender.
:Return: the decrypted data (byte string).
:Raises ValueError:
if the MAC does not match. The message has been tampered with
or the key is incorrect.
"""
if self.decrypt not in self._next:
raise TypeError("decrypt() can only be called"
" after initialization or an update()")
self._next = [self.verify]
# Take the MAC and start the cipher for decryption
self._cipher = self._create_ctr_cipher(mac_tag)
plaintext = self._cipher.decrypt(ciphertext)
if self._nonce:
self._kdf.update(self.nonce)
if plaintext:
self._kdf.update(plaintext)
self.verify(mac_tag)
return plaintext
def _create_siv_cipher(factory, **kwargs):
"""Create a new block cipher, configured in
Synthetic Initializaton Vector (SIV) mode.
:Parameters:
factory : object
A symmetric cipher module from `Crypto.Cipher`
(like `Crypto.Cipher.AES`).
:Keywords:
key : byte string
The secret key to use in the symmetric cipher.
It must be 32, 48 or 64 bytes long.
If AES is the chosen cipher, the variants *AES-128*,
*AES-192* and or *AES-256* will be used internally.
nonce : byte string
For deterministic encryption, it is not present.
Otherwise, it is a value that must never be reused
for encrypting message under this key.
There are no restrictions on its length,
but it is recommended to use at least 16 bytes.
"""
try:
key = kwargs.pop("key")
except KeyError as e:
raise TypeError("Missing parameter: " + str(e))
nonce = kwargs.pop("nonce", None)
return SivMode(factory, key, nonce, kwargs)
| [
"ogunbaseemmanuel@yahoo.com"
] | ogunbaseemmanuel@yahoo.com |
9e46dea8c726e6b556c22797629ff6ce5462f2a9 | 9aaa39f200ee6a14d7d432ef6a3ee9795163ebed | /Algorithm/Python/507. Perfect Number.py | ec25990fcf6a4ba827dcadc6c1d1b4da2a527a0f | [] | no_license | WuLC/LeetCode | 47e1c351852d86c64595a083e7818ecde4131cb3 | ee79d3437cf47b26a4bca0ec798dc54d7b623453 | refs/heads/master | 2023-07-07T18:29:29.110931 | 2023-07-02T04:31:00 | 2023-07-02T04:31:00 | 54,354,616 | 29 | 16 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # -*- coding: utf-8 -*-
# @Author: WuLC
# @Date: 2017-03-31 23:48:21
# @Last modified by: WuLC
# @Last Modified time: 2017-03-31 23:49:23
# @Email: liangchaowu5@gmail.com
# naive solution
class Solution(object):
def checkPerfectNumber(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 1: return False
tmp = 1
for i in xrange(2, int(math.sqrt(num))+1):
if num % i == 0:
tmp += (i + num/i)
return tmp == num | [
"liangchaowu5@gmail.com"
] | liangchaowu5@gmail.com |
fb18b89e34d8324bd64c7a65ddc675258ea78b59 | d838bed08a00114c92b73982a74d96c15166a49e | /docs/data/learn/Bioinformatics/input/ch6_code/src/Stepik.6.3.ExerciseBreak.CountUniqueReversalsFor100SyntenyBlocks.py | ebdc61515fc02e393e9c4f69178b127ff28b5644 | [] | no_license | offbynull/offbynull.github.io | 4911f53d77f6c59e7a453ee271b1e04e613862bc | 754a85f43159738b89dd2bde1ad6ba0d75f34b98 | refs/heads/master | 2023-07-04T00:39:50.013571 | 2023-06-17T20:27:05 | 2023-06-17T23:27:00 | 308,482,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | # Poorly worded question. Here's my crack at rewording it: What's the maximum number of unique reversals possible on a
# permutation of length 100? So for example, assume you have a permutation of length 2: [+A, +B]...
#
# reverse 1: [+A, +B] to [-A ,+B]
# reverse 2: [+A, +B] to [+A, -B]
# reverse range 1-2: [+A, +B] to [-B, -A]
#
# That's it. Any permutation of length 2 will have a max of 3 unique reversals possible.
#
# Now apply the logic to a permutation of length 100.
total = 0
block_count = 100
for start in range(1, block_count + 1):
# you can turn the following loop to just total += start I left the original in because it's easier to think about
for end in range(1, start + 1):
total += 1
print(f'{total}')
| [
"offbynull@gmail.com"
] | offbynull@gmail.com |
af752a00767f668d27c56b14d4367f68fed39eb1 | c593ef56f070fef8c221c30053b10483f9bf8fd2 | /作业01-预测彩票/Program/Apriori/Code/SetInList.py | 2a76262511165133ed1b121de16c04da7d00ecb8 | [] | no_license | nbgao/HDU-Data-Mining-Experiment | d17b1d750b06f6e89244116f3d3b37c5629933df | 7842187cf1ef4efd80dd6ff60c1fc85a270d53b3 | refs/heads/master | 2021-09-02T08:07:17.123842 | 2017-12-31T19:24:38 | 2017-12-31T19:24:38 | 115,851,261 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | def SetInList(Set, L):
for s in Set:
if s not in L:
return False
return True
| [
"nbgao@126.com"
] | nbgao@126.com |
a1232a7431c67b8eab70fd33da37c300d8418e45 | 000002c39ac5c00f1f70d7667d772d3acbe95680 | /batchtest.py | 6b500f8323c871c4a0d69f28526dc462ca91a247 | [] | no_license | ag8/shapes | ab0dcfa1d46c412faf63c421edec9a0165eb5090 | c6b38eca3a50b8a31ab7ccec11158e4a99fb628b | refs/heads/master | 2020-12-03T06:36:40.435743 | 2017-07-07T01:23:23 | 2017-07-07T01:23:23 | 95,675,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | from __future__ import print_function
import tensorflow as tf
f = ["f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8"]
l = ["l1", "l2", "l3", "l4", "l5", "l6", "l7", "l8"]
fv = tf.constant(f)
lv = tf.constant(l)
rsq = tf.RandomShuffleQueue(10, 0, [tf.string, tf.string], shapes=[[],[]])
do_enqueues = rsq.enqueue_many([fv, lv])
gotf, gotl = rsq.dequeue()
print("Getting batch")
iB, lB = tf.train.batch([gotf, gotl], batch_size=6, num_threads=4, capacity=2 * 3, dynamic_pad=True)
print("Got batch")
with tf.Session() as sess:
tf.global_variables_initializer().run(session=sess)
tf.train.start_queue_runners(sess=sess)
sess.run(do_enqueues)
for i in xrange(4):
one_f, one_l = sess.run([gotf, gotl])
one_l = one_l + '3434'
print("F: ", one_f, "L: ", one_l) | [
"andrew2000g@gmail.com"
] | andrew2000g@gmail.com |
f7eadefbc3b67fe920ef3ab321a31c5a0f3b62e9 | 67e817ca139ca039bd9eee5b1b789e5510119e83 | /Tree/[662]Maximum Width of Binary Tree.py | de96b2b77ecccee00510b6deba357ba2222af7b4 | [] | no_license | dstch/my_leetcode | 0dc41e7a2526c2d85b6b9b6602ac53f7a6ba9273 | 48a8c77e81cd49a75278551048028c492ec62994 | refs/heads/master | 2021-07-25T21:30:41.705258 | 2021-06-06T08:58:29 | 2021-06-06T08:58:29 | 164,360,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,541 | py | # Given a binary tree, write a function to get the maximum width of the given tr
# ee. The maximum width of a tree is the maximum width among all levels.
#
# The width of one level is defined as the length between the end-nodes (the le
# ftmost and right most non-null nodes in the level, where the null nodes between
# the end-nodes are also counted into the length calculation.
#
# It is guaranteed that the answer will in the range of 32-bit signed integer.
#
#
# Example 1:
#
#
# Input:
#
# 1
# / \
# 3 2
# / \ \
# 5 3 9
#
# Output: 4
# Explanation: The maximum width existing in the third level with the length 4 (
# 5,3,null,9).
#
#
# Example 2:
#
#
# Input:
#
# 1
# /
# 3
# / \
# 5 3
#
# Output: 2
# Explanation: The maximum width existing in the third level with the length 2 (
# 5,3).
#
#
# Example 3:
#
#
# Input:
#
# 1
# / \
# 3 2
# /
# 5
#
# Output: 2
# Explanation: The maximum width existing in the second level with the length 2
# (3,2).
#
#
# Example 4:
#
#
# Input:
#
# 1
# / \
# 3 2
# / \
# 5 9
# / \
# 6 7
# Output: 8
# Explanation:The maximum width existing in the fourth level with the length 8 (
# 6,null,null,null,null,null,null,7).
#
#
#
# Constraints:
#
#
# The given binary tree will have between 1 and 3000 nodes.
#
# Related Topics Tree
# 👍 2131 👎 380
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
res = []
result = 0
def func(node, level, index):
if node:
nonlocal result
if len(res) == level:
res.append([])
res[level].append(index)
func(node.left, level + 1, index * 2)
func(node.right, level + 1, index * 2 + 1)
temp = res[level][-1] - res[level][0] + 1
if temp > result:
result = temp
func(root, 0, 1)
return result
# leetcode submit region end(Prohibit modification and deletion)
| [
"dstch@163.com"
] | dstch@163.com |
d849d50e164d18d2d8753644d39bb684e7d01269 | 6a95112805b64322953429270a305d01fef3faea | /dist/weewx-3.7.0/bin/weewx/reportengine.py | f1cca8b3494f730e49e66d9d45e49f43aca3c450 | [
"GPL-3.0-only",
"GPL-1.0-or-later",
"Apache-2.0"
] | permissive | tomdotorg/docker-weewx | c6d59dc492a9e53f3bc898f7b9f593717092d72c | 7085654f455d39b06acc688738fde27e1f78ad1e | refs/heads/main | 2023-06-08T17:57:44.184399 | 2023-01-30T11:21:23 | 2023-01-30T11:21:23 | 54,113,384 | 21 | 16 | Apache-2.0 | 2022-10-19T23:46:26 | 2016-03-17T11:39:29 | Dockerfile | UTF-8 | Python | false | false | 33,547 | py | #
# Copyright (c) 2009-2015 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
"""Engine for generating reports"""
# System imports:
import datetime
import ftplib
import glob
import os.path
import shutil
import socket
import sys
import syslog
import threading
import time
import traceback
# 3rd party imports:
import configobj
# Weewx imports:
import weeutil.weeutil
from weeutil.weeutil import to_bool
import weewx.manager
# spans of valid values for each CRON like field
MINUTES = (0, 59)
HOURS = (0, 23)
DOM = (1, 31)
MONTHS = (1, 12)
DOW = (0, 6)
# valid day names for DOW field
DAY_NAMES = ('sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat')
# valid month names for month field
MONTH_NAMES = ('jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec')
# map month names to month number
MONTH_NAME_MAP = zip(('jan', 'feb', 'mar', 'apr',
'may', 'jun', 'jul', 'aug',
'sep', 'oct', 'nov', 'dec'), xrange(1, 13))
# map day names to day number
DAY_NAME_MAP = zip(('sun', 'mon', 'tue', 'wed',
'thu', 'fri', 'sat'), xrange(7))
# map CRON like nicknames to equivalent CRON like line
NICKNAME_MAP = {
"@yearly": "0 0 1 1 *",
"@anually": "0 0 1 1 *",
"@monthly": "0 0 1 * *",
"@weekly": "0 0 * * 0",
"@daily": "0 0 * * *",
"@hourly": "0 * * * *"
}
# list of valid spans for CRON like fields
SPANS = (MINUTES, HOURS, DOM, MONTHS, DOW)
# list of valid names for CRON lik efields
NAMES = ((), (), (), MONTH_NAMES, DAY_NAMES)
# list of name maps for CRON like fields
MAPS = ((), (), (), MONTH_NAME_MAP, DAY_NAME_MAP)
# =============================================================================
# Class StdReportEngine
# =============================================================================
class StdReportEngine(threading.Thread):
"""Reporting engine for weewx.
This engine runs zero or more reports. Each report uses a skin. A skin
has its own configuration file specifying things such as which 'generators'
should be run, which templates are to be used, what units are to be used,
etc..
A 'generator' is a class inheriting from class ReportGenerator, that
produces the parts of the report, such as image plots, HTML files.
StdReportEngine inherits from threading.Thread, so it will be run in a
separate thread.
See below for examples of generators.
"""
def __init__(self, config_dict, stn_info, record=None, gen_ts=None, first_run=True):
"""Initializer for the report engine.
config_dict: The configuration dictionary.
stn_info: An instance of weewx.station.StationInfo, with static
station information.
record: The current archive record [Optional; default is None]
gen_ts: The timestamp for which the output is to be current
[Optional; default is the last time in the database]
first_run: True if this is the first time the report engine has been
run. If this is the case, then any 'one time' events should be done.
"""
threading.Thread.__init__(self, name="ReportThread")
self.config_dict = config_dict
self.stn_info = stn_info
self.record = record
self.gen_ts = gen_ts
self.first_run = first_run
def run(self):
"""This is where the actual work gets done.
Runs through the list of reports. """
if self.gen_ts:
syslog.syslog(syslog.LOG_DEBUG,
"reportengine: Running reports for time %s" %
weeutil.weeutil.timestamp_to_string(self.gen_ts))
else:
syslog.syslog(syslog.LOG_DEBUG, "reportengine: "
"Running reports for latest time in the database.")
# Iterate over each requested report
for report in self.config_dict['StdReport'].sections:
# See if this report is disabled
enabled = to_bool(self.config_dict['StdReport'][report].get('enable', True))
if not enabled:
syslog.syslog(syslog.LOG_DEBUG,
"reportengine: Skipping report %s" % report)
continue
syslog.syslog(syslog.LOG_DEBUG,
"reportengine: Running report %s" % report)
# Figure out where the configuration file is for the skin used for
# this report:
skin_config_path = os.path.join(
self.config_dict['WEEWX_ROOT'],
self.config_dict['StdReport']['SKIN_ROOT'],
self.config_dict['StdReport'][report].get('skin', 'Standard'),
'skin.conf')
# Retrieve the configuration dictionary for the skin. Wrap it in
# a try block in case we fail
try:
skin_dict = configobj.ConfigObj(skin_config_path, file_error=True)
syslog.syslog(
syslog.LOG_DEBUG,
"reportengine: Found configuration file %s for report %s" %
(skin_config_path, report))
except IOError, e:
syslog.syslog(
syslog.LOG_ERR, "reportengine: "
"Cannot read skin configuration file %s for report %s: %s"
% (skin_config_path, report, e))
syslog.syslog(syslog.LOG_ERR, " **** Report ignored")
continue
except SyntaxError, e:
syslog.syslog(
syslog.LOG_ERR, "reportengine: "
"Failed to read skin configuration file %s for report %s: %s"
% (skin_config_path, report, e))
syslog.syslog(syslog.LOG_ERR, " **** Report ignored")
continue
# Add the default database binding:
skin_dict.setdefault('data_binding', 'wx_binding')
# Default to logging to whatever is specified at the root level
# of weewx.conf, or true if nothing specified:
skin_dict.setdefault('log_success',
self.config_dict.get('log_success', True))
skin_dict.setdefault('log_failure',
self.config_dict.get('log_failure', True))
# Inject any overrides the user may have specified in the
# weewx.conf configuration file for all reports:
for scalar in self.config_dict['StdReport'].scalars:
skin_dict[scalar] = self.config_dict['StdReport'][scalar]
# Now inject any overrides for this specific report:
skin_dict.merge(self.config_dict['StdReport'][report])
# Finally, add the report name:
skin_dict['REPORT_NAME'] = report
# Default action is to run the report. Only reason to not run it is
# if we have a valid report report_timing and it did not trigger.
if self.record is not None:
# StdReport called us not wee_reports so look for a report_timing
# entry if we have one.
timing_line = skin_dict.get('report_timing', None)
# The report_timing entry might have one or more comma separated
# values which ConfigObj would interpret as a list. If so then
# reconstruct our report_timing entry.
if hasattr(timing_line, '__iter__'):
timing_line = ','.join(timing_line)
if timing_line:
# Get a ReportTiming object.
timing = ReportTiming(timing_line)
if timing.is_valid:
# Get timestamp and interval so we can check if the
# report timing is triggered.
_ts = self.record['dateTime']
_interval = self.record['interval'] * 60
# Is our report timing triggered? timing.is_triggered
# returns True if triggered, False if not triggered
# and None if an invalid report timing line.
if timing.is_triggered(_ts, _ts - _interval) is False:
# report timing was valid but not triggered so do
# not run the report.
syslog.syslog(syslog.LOG_DEBUG, "reportengine: Report %s skipped due to report_timing setting" %
(report, ))
continue
else:
syslog.syslog(syslog.LOG_DEBUG, "reportengine: Invalid report_timing setting for report '%s', running report anyway" % report)
syslog.syslog(syslog.LOG_DEBUG, " **** %s" % timing.validation_error)
for generator in weeutil.weeutil.option_as_list(skin_dict['Generators'].get('generator_list')):
try:
# Instantiate an instance of the class.
obj = weeutil.weeutil._get_object(generator)(
self.config_dict,
skin_dict,
self.gen_ts,
self.first_run,
self.stn_info,
self.record)
except Exception, e:
syslog.syslog(
syslog.LOG_CRIT, "reportengine: "
"Unable to instantiate generator %s" % generator)
syslog.syslog(syslog.LOG_CRIT, " **** %s" % e)
weeutil.weeutil.log_traceback(" **** ")
syslog.syslog(syslog.LOG_CRIT, " **** Generator ignored")
traceback.print_exc()
continue
try:
# Call its start() method
obj.start()
except Exception, e:
# Caught unrecoverable error. Log it, continue on to the
# next generator.
syslog.syslog(
syslog.LOG_CRIT, "reportengine: "
"Caught unrecoverable exception in generator %s"
% generator)
syslog.syslog(syslog.LOG_CRIT, " **** %s" % str(e))
weeutil.weeutil.log_traceback(" **** ")
syslog.syslog(syslog.LOG_CRIT, " **** Generator terminated")
traceback.print_exc()
continue
finally:
obj.finalize()
# =============================================================================
# Class ReportGenerator
# =============================================================================
class ReportGenerator(object):
"""Base class for all report generators."""
def __init__(self, config_dict, skin_dict, gen_ts, first_run, stn_info, record):
self.config_dict = config_dict
self.skin_dict = skin_dict
self.gen_ts = gen_ts
self.first_run = first_run
self.stn_info = stn_info
self.record = record
self.db_binder = weewx.manager.DBBinder(self.config_dict)
def start(self):
self.run()
def run(self):
pass
def finalize(self):
self.db_binder.close()
# =============================================================================
# Class FtpGenerator
# =============================================================================
class FtpGenerator(ReportGenerator):
"""Class for managing the "FTP generator".
This will ftp everything in the public_html subdirectory to a webserver."""
def run(self):
import weeutil.ftpupload
# determine how much logging is desired
log_success = to_bool(self.skin_dict.get('log_success', True))
t1 = time.time()
if 'HTML_ROOT' in self.skin_dict:
local_root = os.path.join(self.config_dict['WEEWX_ROOT'],
self.skin_dict['HTML_ROOT'])
else:
local_root = os.path.join(self.config_dict['WEEWX_ROOT'],
self.config_dict['StdReport']['HTML_ROOT'])
try:
ftp_data = weeutil.ftpupload.FtpUpload(
server=self.skin_dict['server'],
user=self.skin_dict['user'],
password=self.skin_dict['password'],
local_root=local_root,
remote_root=self.skin_dict['path'],
port=int(self.skin_dict.get('port', 21)),
name=self.skin_dict['REPORT_NAME'],
passive=to_bool(self.skin_dict.get('passive', True)),
max_tries=int(self.skin_dict.get('max_tries', 3)),
secure=to_bool(self.skin_dict.get('secure_ftp', False)),
debug=int(self.skin_dict.get('debug', 0)))
except Exception:
syslog.syslog(syslog.LOG_DEBUG,
"ftpgenerator: FTP upload not requested. Skipped.")
return
try:
n = ftp_data.run()
except (socket.timeout, socket.gaierror, ftplib.all_errors, IOError), e:
(cl, unused_ob, unused_tr) = sys.exc_info()
syslog.syslog(syslog.LOG_ERR, "ftpgenerator: "
"Caught exception %s: %s" % (cl, e))
weeutil.weeutil.log_traceback(" **** ")
return
t2 = time.time()
if log_success:
syslog.syslog(syslog.LOG_INFO,
"ftpgenerator: ftp'd %d files in %0.2f seconds" %
(n, (t2 - t1)))
# =============================================================================
# Class RsynchGenerator
# =============================================================================
class RsyncGenerator(ReportGenerator):
"""Class for managing the "rsync generator".
This will rsync everything in the public_html subdirectory to a server."""
def run(self):
import weeutil.rsyncupload
# We don't try to collect performance statistics about rsync, because
# rsync will report them for us. Check the debug log messages.
try:
if 'HTML_ROOT' in self.skin_dict:
html_root = self.skin_dict['HTML_ROOT']
else:
html_root = self.config_dict['StdReport']['HTML_ROOT']
rsync_data = weeutil.rsyncupload.RsyncUpload(
local_root=os.path.join(self.config_dict['WEEWX_ROOT'], html_root),
remote_root=self.skin_dict['path'],
server=self.skin_dict['server'],
user=self.skin_dict.get('user'),
port=self.skin_dict.get('port'),
ssh_options=self.skin_dict.get('ssh_options'),
compress=to_bool(self.skin_dict.get('compress', False)),
delete=to_bool(self.skin_dict.get('delete', False)),
log_success=to_bool(self.skin_dict.get('log_success', True)))
except Exception:
syslog.syslog(syslog.LOG_DEBUG,
"rsyncgenerator: rsync upload not requested. Skipped.")
return
try:
rsync_data.run()
except IOError, e:
(cl, unused_ob, unused_tr) = sys.exc_info()
syslog.syslog(syslog.LOG_ERR, "rsyncgenerator: "
"Caught exception %s: %s" % (cl, e))
# =============================================================================
# Class CopyGenerator
# =============================================================================
class CopyGenerator(ReportGenerator):
"""Class for managing the 'copy generator.'
This will copy files from the skin subdirectory to the public_html
subdirectory."""
def run(self):
copy_dict = self.skin_dict['CopyGenerator']
# determine how much logging is desired
log_success = to_bool(copy_dict.get('log_success', True))
copy_list = []
if self.first_run:
# Get the list of files to be copied only once, at the first
# invocation of the generator. Wrap in a try block in case the
# list does not exist.
try:
copy_list += weeutil.weeutil.option_as_list(copy_dict['copy_once'])
except KeyError:
pass
# Get the list of files to be copied everytime. Again, wrap in a
# try block.
try:
copy_list += weeutil.weeutil.option_as_list(copy_dict['copy_always'])
except KeyError:
pass
# Change directory to the skin subdirectory:
os.chdir(os.path.join(self.config_dict['WEEWX_ROOT'],
self.skin_dict['SKIN_ROOT'],
self.skin_dict['skin']))
# Figure out the destination of the files
html_dest_dir = os.path.join(self.config_dict['WEEWX_ROOT'],
self.skin_dict['HTML_ROOT'])
# The copy list can contain wildcard characters. Go through the
# list globbing any character expansions
ncopy = 0
for pattern in copy_list:
# Glob this pattern; then go through each resultant filename:
for _file in glob.glob(pattern):
# Final destination is the join of the html destination
# directory and any relative subdirectory on the filename:
dest_dir = os.path.join(html_dest_dir, os.path.dirname(_file))
# Make the destination directory, wrapping it in a try block in
# case it already exists:
try:
os.makedirs(dest_dir)
except OSError:
pass
# This version of copy does not copy over modification time,
# so it will look like a new file, causing it to be (for
# example) ftp'd to the server:
shutil.copy(_file, dest_dir)
ncopy += 1
if log_success:
syslog.syslog(syslog.LOG_INFO, "copygenerator: "
"copied %d files to %s" % (ncopy, html_dest_dir))
# ===============================================================================
# Class ReportTiming
# ===============================================================================
class ReportTiming(object):
"""Class for processing a CRON like line and determining whether it should
be fired for a given time.
The following CRON like capabilities are supported:
- There are two ways to specify the day the line is fired, DOM and DOW. A
match on either all other fields and either DOM or DOW will casue the
line to be fired.
- first-last, *. Matches all possible values for the field concerned.
- step, /x. Matches every xth minute/hour/day etc. May be bounded by a list
or range.
- range, lo-hi. Matches all values from lo to hi inclusive. Ranges using
month and day names are not supported.
- lists, x,y,z. Matches those items in the list. List items may be a range.
Lists using month and day names are not supported.
- month names. Months may be specified by number 1..12 or first 3 (case
insensitive) letters of the English month name jan..dec.
- weekday names. Weekday names may be specified by number 0..7
(0,7 = Sunday) or first 3 (case insensitive) letters of the English
weekday names sun..sat.
- nicknames. Following nicknames are supported:
@yearly : Run once a year, ie "0 0 1 1 *"
@annually : Run once a year, ie "0 0 1 1 *"
@monthly : Run once a month, ie "0 0 1 * *"
@weekly : Run once a week, ie "0 0 * * 0"
@daily : Run once a day, ie "0 0 * * *"
@hourly : Run once an hour, ie "0 * * * *"
Useful ReportTiming class attributes:
is_valid: Whether passed line is a valid line or not.
validation_error: Error message if passed line is an invalid line.
raw_line: Raw line data passed to ReportTiming.
line: 5 item list representing the 5 date/time fields after the
raw line has been processed and dom/dow named parameters
replaced with numeric equivalents.
"""
def __init__(self, line):
"""Initialises a ReportTiming object.
Processes raw line to produce 5 field line suitable for further
processing.
line: The raw line to be processed.
"""
# initialise some properties
self.is_valid = None
self.validation_error = None
self.raw_line = line.strip()
# do some basic checking of the line for unsupported characters
for unsupported_char in ('%', '#', 'L', 'W'):
if unsupported_char in line:
self.is_valid = False
self.validation_error = "Unsupported character '%s' in '%s'." % (unsupported_char,
line)
return
# six special time defintion 'nicknames' are supported which replace
# the line elements with pre-detemined values. These nicknames start
# with the @ character. Check for any of these nicknames and substitute
# the corresponding line.
for nickname, nn_line in NICKNAME_MAP.iteritems():
if line == nickname:
line = nn_line
break
fields = line.split(None, 5)
if len(fields) < 5:
# Not enough fields
self.is_valid = False
self.validation_error = "Insufficient fields found in '%s'" % line
return
elif len(fields) == 5:
fields.append(None)
# Extract individual line elements
minutes, hours, dom, months, dow, _extra = fields
# Save individual fields
self.line = [minutes, hours, dom, months, dow]
# Is DOM restricted ie is DOM not '*'
self.dom_restrict = self.line[2] != '*'
# Is DOW restricted ie is DOW not '*'
self.dow_restrict = self.line[4] != '*'
# Decode the line and generate a set of possible values for each field
(self.is_valid, self.validation_error) = self.decode()
def decode(self):
"""Decode each field and store the sets of valid values.
Set of valid values is stored in self.decode. Self.decode can only be
considered valid if self.is_valid is True. Returns a 2-way tuple
(True|False, ERROR MESSAGE). First item is True is the line is valid
otherwise False. ERROR MESSAGE is None if the line is valid otherwise a
string containing a short error message.
"""
# set a list to hold our decoded ranges
self.decode = []
try:
# step through each field and its associated range, names and maps
for field, span, names, mapp in zip(self.line, SPANS, NAMES, MAPS):
field_set = self.parse_field(field, span, names, mapp)
self.decode.append(field_set)
# if we are this far then our line is valid so return True and no
# error message
return (True, None)
except ValueError, e:
# we picked up a ValueError in self.parse_field() so return False
# and the error message
return (False, e)
def parse_field(self, field, span, names, mapp, is_rorl=False):
"""Return the set of valid values for a field.
Parses and validates a field and if the field is valid returns a set
containing all of the possible field values. Called recursively to
parse sub-fields (eg lists of ranges). If a field is invalid a
ValueError is raised.
field: String containing the raw field to be parsed.
span: Tuple representing the lower and upper numeric values the
field may take. Format is (lower, upper).
names: Tuple containing all valid named values for the field. For
numeric only fields the tuple is empty.
mapp: Tuple of 2 way tuples mapping named values to numeric
equivalents. Format is ((name1, numeric1), ..
(namex, numericx)). For numeric only fields the tuple is empty.
is_rorl: Is field part of a range or list. Either True or False.
"""
field = field.strip()
if field == '*': # first-last
# simply return a set of all poss values
return set(xrange(span[0], span[1] + 1))
elif field.isdigit(): # just a number
# If its a DOW then replace any 7s with 0
_field = field.replace('7','0') if span == DOW else field
# its valid if its within our span
if span[0] <= int(_field) <= span[1]:
# it's valid so return the field itself as a set
return set((int(_field), ))
else:
# invalid field value so raise ValueError
raise ValueError("Invalid field value '%s' in '%s'" % (field,
self.raw_line))
elif field.lower() in names: # an abbreviated name
# abbreviated names are only valid if not used in a range or list
if not is_rorl:
# replace all named values with numbers
_field = field
for _name, _ord in mapp:
_field = _field.replace(_name, str(_ord))
# its valid if its within our span
if span[0] <= int(_field) <= span[1]:
# it's valid so return the field itself as a set
return set((int(_field), ))
else:
# invalid field value so raise ValueError
raise ValueError("Invalid field value '%s' in '%s'" % (field,
self.raw_line))
else:
# invalid use of abbreviated name so raise ValueError
raise ValueError("Invalid use of abbreviated name '%s' in '%s'" % (field,
self.raw_line))
elif ',' in field: # we have a list
# get the first list item and the rest of the list
_first, _rest = field.split(',', 1)
# get _first as a set using a recursive call
_first_set = self.parse_field(_first, span, names, mapp, True)
# get _rest as a set using a recursive call
_rest_set = self.parse_field(_rest, span, names, mapp, True)
# return the union of the _first and _rest sets
return _first_set | _rest_set
elif '/' in field: # a step
# get the value and the step
_val, _step = field.split('/', 1)
# step is valid if it is numeric
if _step.isdigit():
# get _val as a set using a recursive call
_val_set = self.parse_field(_val, span, names, mapp, True)
# get the set of all possible values using _step
_lowest = min(_val_set)
_step_set = set([x for x in _val_set if ((x - _lowest) % int(_step) == 0)])
# return the intersection of the _val and _step sets
return _val_set & _step_set
else:
# invalid step so raise ValueError
raise ValueError("Invalid step value '%s' in '%s'" % (field,
self.raw_line))
elif '-' in field: # we have a range
# get the lo and hi values of the range
lo, hi = field.split('-', 1)
# if lo is numeric and in the span range then the range is valid if
# hi is valid
if lo.isdigit() and span[0] <= int(lo) <= span[1]:
# if hi is numeric and in the span range and greater than or
# equal to lo then the range is valid
if hi.isdigit() and int(hi) >= int(lo) and span[0] <= int(hi) <= span[1]:
# valid range so return a set of the range
return set(xrange(int(lo), int(hi) + 1))
else:
# something is wrong, we have an invalid field
raise ValueError("Invalid range specification '%s' in '%s'" % (field,
self.raw_line))
else:
# something is wrong with lo, we have an invalid field
raise ValueError("Invalid range specification '%s' in '%s'" % (field,
self.raw_line))
else:
# we have something I don't know how to parse so raise a ValueError
raise ValueError("Invalid field '%s' in '%s'" % (field,
self.raw_line))
def is_triggered(self, ts_hi, ts_lo=None):
"""Determine if CRON like line is to be triggered.
Return True if line is triggered between timestamps ts_lo and ts_hi
(exclusivie on ts_lo inclusive on ts_hi), False if it is not
triggered or None if the line is invalid or ts_hi is not valid.
If ts_lo is not specified check for triggering on ts_hi only.
ts_hi: Timestamp of latest time to be checked for triggering.
ts_lo: Timestamp used for earliest time in range of times to be
checked for triggering. May be ommitted in which case only
ts_hi is checked.
"""
if self.is_valid and ts_hi is not None:
# setup ts range to iterate over
if ts_lo is None:
_range = [int(ts_hi)]
else:
# CRON like line has a 1 min resolution so step backwards every
# 60 sec.
_range = range(int(ts_hi), int(ts_lo), -60)
# Iterate through each ts in our range. All we need is one ts that
# triggers the line.
for _ts in _range:
# convert ts to timetuple and extract required data
trigger_dt = datetime.datetime.fromtimestamp(_ts)
trigger_tt = trigger_dt.timetuple()
month, dow, day, hour, minute = (trigger_tt.tm_mon,
(trigger_tt.tm_wday + 1) % 7,
trigger_tt.tm_mday,
trigger_tt.tm_hour,
trigger_tt.tm_min)
# construct a tuple so we can iterate over and process each
# field
element_tuple = zip((minute, hour, day, month, dow),
self.line,
SPANS,
self.decode)
# Iterate over each field and check if it will prevent
# triggering. Remember, we only need a match on either DOM or
# DOW but all other fields must match.
dom_match = False
dom_restricted_match = False
for period, _field, field_span, decode in element_tuple:
if period in decode:
# we have a match
if field_span == DOM:
# we have a match on DOM but we need to know if it
# was a match on a restricted DOM field
dom_match = True
dom_restricted_match = self.dom_restrict
elif field_span == DOW and not(dom_restricted_match or self.dow_restrict or dom_match):
break
continue
elif field_span == DOW and dom_restricted_match or field_span == DOM:
# No match but consider it a match if this field is DOW
# and we already have a DOM match. Also, if we didn't
# match on DOM then continue as we might match on DOW.
continue
else:
# The field will prevent the line from triggerring for
# this ts so we break and move to the next ts.
break
else:
# If we arrived here then all fields match and the line
# would be triggered on this ts so return True.
return True
# If we are here it is becasue we broke out of all inner for loops
# and the line was not triggered so return False.
return False
else:
# Our line is not valid or we do not have a timestamp to use,
# return None
return None
| [
"tom@tom.org"
] | tom@tom.org |
aaac2a8d988a604a4d19efaa994359ce8a18e87f | aaa6ae528d66e711f41699d6b6ee79fa059be4f8 | /satchmo/shipping/modules/tieredweightzone/migrations/0002_auto_20190417_1857.py | 693f7226b1d816c86b07f97d97552a1cbf02d836 | [
"BSD-2-Clause"
] | permissive | ToeKnee/jelly-roll | c23e1eac1c2983ede4259bd047578c404a8c72e0 | c2814749c547349ff63415bdc81f53eb1215c7c0 | refs/heads/master | 2020-05-21T22:34:00.399719 | 2020-02-03T20:20:02 | 2020-02-03T20:20:02 | 33,657,967 | 0 | 1 | null | 2015-07-21T20:36:13 | 2015-04-09T08:37:28 | Python | UTF-8 | Python | false | false | 1,617 | py | # Generated by Django 2.1.7 on 2019-04-17 18:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tieredweightzone', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='carrier',
name='delivery',
field=models.CharField(default='', max_length=200, verbose_name='Delivery Days'),
preserve_default=False,
),
migrations.AddField(
model_name='carrier',
name='description',
field=models.CharField(default='', max_length=200, verbose_name='Description'),
preserve_default=False,
),
migrations.AddField(
model_name='carrier',
name='method',
field=models.CharField(default='', help_text='i.e. US Mail', max_length=200, verbose_name='Method'),
preserve_default=False,
),
migrations.AddField(
model_name='carrier',
name='name',
field=models.CharField(default='', max_length=50, verbose_name='Carrier'),
preserve_default=False,
),
migrations.AddField(
model_name='zone',
name='description',
field=models.CharField(default='', max_length=200, verbose_name='Description'),
preserve_default=False,
),
migrations.AddField(
model_name='zone',
name='name',
field=models.CharField(default='', max_length=50, verbose_name='Zone'),
preserve_default=False,
),
]
| [
"tony@ynottony.net"
] | tony@ynottony.net |
a9c83352baf1af5398777f9338af863c8f4e6112 | bc25016fdae676eb7b000e59b8e823da6fefe157 | /servo/stm32uart.py | d0758b97afd2735e62bd0b36b9621b37d3a6cf0f | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mmind/servo-hdctools | b275d98e8c71b46cfc326aad774ce83b29b55d58 | c7d50190837497dafc45f6efe18bf01d6e70cfd2 | refs/heads/master | 2020-06-24T20:41:19.110569 | 2016-11-28T13:04:07 | 2016-11-28T13:04:07 | 74,622,430 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,724 | py | # Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Allow creation of uart/console interface via stm32 usb endpoint."""
import errno
import exceptions
import logging
import os
import pty
import select
import sys
import termios
import threading
import time
import tty
import usb
import stm32usb
import uart
class SuartError(Exception):
"""Class for exceptions of Suart."""
def __init__(self, msg, value=0):
"""SuartError constructor.
Args:
msg: string, message describing error in detail
value: integer, value of error when non-zero status returned. Default=0
"""
super(SuartError, self).__init__(msg, value)
self.msg = msg
self.value = value
class Suart(uart.Uart):
"""Provide interface to stm32 serial usb endpoint."""
def __init__(self, vendor=0x18d1, product=0x501a, interface=0,
serialname=None, ftdi_context=None):
"""Suart contstructor.
Initializes stm32 USB stream interface.
Args:
vendor: usb vendor id of stm32 device
product: usb product id of stm32 device
interface: interface number of stm32 device to use
serialname: n/a. Defaults to None.
ftdi_context: n/a. Defaults to None.
Raises:
SuartError: If init fails
"""
super(Suart, self).__init__()
self._logger = logging.getLogger('Suart')
self._logger.debug('')
self._logger.debug('Suart opening %04x:%04x, intf %d, sn: %s' % (
vendor, product, interface, serialname))
self._susb = stm32usb.Susb(vendor=vendor, product=product,
interface=interface, serialname=serialname, logger=self._logger)
self._logger.debug("Set up stm32 uart")
def __del__(self):
"""Suart destructor."""
self._logger.debug('')
def run_rx_thread(self):
self._logger.debug('rx thread started on %s' % self.get_pty())
ep = select.epoll()
ep.register(self._ptym, select.EPOLLHUP)
while True:
events = ep.poll(0)
# Check if the pty is connected to anything, or hungup.
if not events:
try:
r = self._susb._read_ep.read(64, self._susb.TIMEOUT_MS)
if r:
os.write(self._ptym, r)
except Exception as e:
# If we miss some characters on pty disconnect, that's fine.
# ep.read() also throws USBError on timeout, which we discard.
if type(e) not in [exceptions.OSError, usb.core.USBError]:
self._logger.debug("rx %s: %s" % (self.get_pty(), e))
else:
time.sleep(.1)
def run_tx_thread(self):
self._logger.debug("tx thread started on %s" % self.get_pty())
ep = select.epoll()
ep.register(self._ptym, select.EPOLLHUP)
while True:
events = ep.poll(0)
# Check if the pty is connected to anything, or hungup.
if not events:
try:
r = os.read(self._ptym, 64)
if r:
self._susb._write_ep.write(r, self._susb.TIMEOUT_MS)
except Exception as e:
self._logger.debug("tx %s: %s" % (self.get_pty(), e))
else:
time.sleep(.1)
def run(self):
"""Creates pthreads to poll stm32 & PTY for data.
"""
self._logger.debug('')
m, s = os.openpty()
self._ptyname = os.ttyname(s)
self._logger.debug("PTY name: %s" % self._ptyname)
self._ptym = m
self._ptys = s
os.fchmod(s, 0o660)
# Change the owner and group of the PTY to the user who started servod.
try:
uid = int(os.environ.get('SUDO_UID', -1))
except TypeError:
uid = -1
try:
gid = int(os.environ.get('SUDO_GID', -1))
except TypeError:
gid = -1
os.fchown(s, uid, gid)
tty.setraw(self._ptym, termios.TCSADRAIN)
# Generate a HUP flag on pty slave fd.
os.fdopen(s).close()
self._logger.debug('stm32 uart pty is %s' % self.get_pty())
self._rx_thread = threading.Thread(target=self.run_rx_thread, args=[])
self._rx_thread.daemon = True
self._rx_thread.start()
self._tx_thread = threading.Thread(target=self.run_tx_thread, args=[])
self._tx_thread.daemon = True
self._tx_thread.start()
self._logger.debug('stm32 rx and tx threads started.')
def get_uart_props(self):
"""Get the uart's properties.
Returns:
dict where:
baudrate: integer of uarts baudrate
bits: integer, number of bits of data Can be 5|6|7|8 inclusive
parity: integer, parity of 0-2 inclusive where:
0: no parity
1: odd parity
2: even parity
sbits: integer, number of stop bits. Can be 0|1|2 inclusive where:
0: 1 stop bit
1: 1.5 stop bits
2: 2 stop bits
"""
self._logger.debug('')
return {'baudrate': 115200,
'bits': 8,
'parity': 0,
'sbits': 1}
def set_uart_props(self, line_props):
"""Set the uart's properties. Note that Suart cannot set properties
and will fail if the properties are not the default 115200,8n1.
Args:
line_props: dict where:
baudrate: integer of uarts baudrate
bits: integer, number of bits of data ( prior to stop bit)
parity: integer, parity of 0-2 inclusive where
0: no parity
1: odd parity
2: even parity
sbits: integer, number of stop bits. Can be 0|1|2 inclusive where:
0: 1 stop bit
1: 1.5 stop bits
2: 2 stop bits
Raises:
SuartError: If requested line properties are not the default.
"""
self._logger.debug('')
curr_props = self.get_uart_props()
for prop in line_props:
if line_props[prop] != curr_props[prop]:
raise SuartError("Line property %s cannot be set from %s to %s" % (
prop, curr_props[prop], line_props[prop]))
return True
def get_pty(self):
"""Gets path to pty for communication to/from uart.
Returns:
String path to the pty connected to the uart
"""
self._logger.debug('')
return self._ptyname
def test():
format='%(asctime)s - %(name)s - %(levelname)s'
loglevel = logging.INFO
if True:
loglevel = logging.DEBUG
format += ' - %(filename)s:%(lineno)d:%(funcName)s'
format += ' - %(message)s'
logging.basicConfig(level=loglevel, format=format)
logger = logging.getLogger(os.path.basename(sys.argv[0]))
logger.info('Start')
sobj = Suart()
sobj.run()
logging.info('%s' % sobj.get_pty())
# run() is a thread so just busy wait to mimic server
while True:
# ours sleeps to eleven!
time.sleep(11)
if __name__ == '__main__':
try:
test()
except KeyboardInterrupt:
sys.exit(0)
| [
"chrome-bot@chromium.org"
] | chrome-bot@chromium.org |
374ab19d532b0e4f7999ecbd18bdc4861166f1e4 | 229924db99b28bc8ae88e8379252e0422119c3c6 | /logistic_sgd.py | 4192da4ece52edf5523269c09bf6ef7c9e341d3d | [] | no_license | ineedaspo1/CNN | 3ae4a9213762df627e1217ec526df1777651884b | 60cdf54947db7d3149cbae8a915c43ae53e81faa | refs/heads/master | 2021-06-01T03:43:25.853717 | 2016-07-18T00:29:26 | 2016-07-18T00:29:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,390 | py | """
This tutorial introduces logistic regression using Theano and stochastic
gradient descent.
Logistic regression is a probabilistic, linear classifier. It is parametrized
by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability.
Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of
the vector whose i'th element is P(Y=i|x).
.. math::
y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method
suitable for large datasets.
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 4.3.2
"""
__docformat__ = 'restructedtext en'
import pickle
import gzip
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from sklearn import datasets
import sklearn
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(
value=numpy.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
# initialize the biases b as a vector of n_out 0s
self.b = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
# symbolic expression for computing the matrix of class-membership
# probabilities
# Where:
# W is a matrix where column-k represent the separation hyperplane for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of
# hyperplane-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose
# probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def load_data():
face=sklearn.datasets.fetch_olivetti_faces(shuffle=True)
train_set=(face.data[0:200,],face.target[0:200,])
valid_set=(face.data[200:300,],face.target[200:300,])
test_set =(face.data[300:400,],face.target[300:400,])
#train_set, valid_set, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
def sgd_optimization_mnist(learning_rate=0.13, n_epochs=100,
batch_size=20):
"""
Demonstrate stochastic gradient descent optimization of a log-linear
model
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data()
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = int(train_set_x.get_value(borrow=True).shape[0] / batch_size)
print(n_train_batches)
n_valid_batches = int(valid_set_x.get_value(borrow=True).shape[0] / batch_size)
n_test_batches = int(test_set_x.get_value(borrow=True).shape[0] / batch_size)
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a
# minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# Each MNIST image has size 28*28
classifier = LogisticRegression(input=x, n_in=4096, n_out=40)
# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by
# the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
###############
# TRAIN MODEL #
###############
print('... training the model')
# early-stopping parameters
patience = 400 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in range(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print((
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in range(n_test_batches)]
test_score = numpy.mean(test_losses)
print((
(
' epoch %i, minibatch %i/%i, test error of'
' best model %f %%'
) %
(
epoch,
minibatch_index + 1,
n_train_batches,
test_score * 100.
)
))
with open('workfile.pkl', 'wb') as f:
pickle.dump(classifier, f)
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print((
(
'Optimization complete with best validation score of %f %%,'
'with test performance %f %%'
)
% (best_validation_loss * 100., test_score * 100.)
))
print('The code run for %d epochs, with %f epochs/sec' % (
epoch, 1. * epoch / (end_time - start_time)))
print(('The code for file ' +
' ran for %.1fs' % ((end_time - start_time))), file=sys.stderr)
| [
"volpato30@gmail.com"
] | volpato30@gmail.com |
2cdf189fecef26456d0e4f7217c73f9b92a27121 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/networkcloud/azure-mgmt-networkcloud/azure/mgmt/networkcloud/operations/_cloud_services_networks_operations.py | b61b0103213a19dd43fef2433dc9879e2b3810ca | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 52,841 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_subscription_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-07-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.NetworkCloud/cloudServicesNetworks"
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-07-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, cloud_services_network_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-07-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks/{cloudServicesNetworkName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"cloudServicesNetworkName": _SERIALIZER.url(
"cloud_services_network_name",
cloud_services_network_name,
"str",
pattern=r"^([a-zA-Z0-9][a-zA-Z0-9-_]{0,28}[a-zA-Z0-9])$",
),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, cloud_services_network_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks/{cloudServicesNetworkName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"cloudServicesNetworkName": _SERIALIZER.url(
"cloud_services_network_name",
cloud_services_network_name,
"str",
pattern=r"^([a-zA-Z0-9][a-zA-Z0-9-_]{0,28}[a-zA-Z0-9])$",
),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, cloud_services_network_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-07-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks/{cloudServicesNetworkName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"cloudServicesNetworkName": _SERIALIZER.url(
"cloud_services_network_name",
cloud_services_network_name,
"str",
pattern=r"^([a-zA-Z0-9][a-zA-Z0-9-_]{0,28}[a-zA-Z0-9])$",
),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, cloud_services_network_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-07-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks/{cloudServicesNetworkName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"cloudServicesNetworkName": _SERIALIZER.url(
"cloud_services_network_name",
cloud_services_network_name,
"str",
pattern=r"^([a-zA-Z0-9][a-zA-Z0-9-_]{0,28}[a-zA-Z0-9])$",
),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
class CloudServicesNetworksOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.networkcloud.NetworkCloudMgmtClient`'s
:attr:`cloud_services_networks` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_subscription(self, **kwargs: Any) -> Iterable["_models.CloudServicesNetwork"]:
"""List cloud services networks in the subscription.
Get a list of cloud services networks in the provided subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CloudServicesNetwork or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.networkcloud.models.CloudServicesNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CloudServicesNetworkList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CloudServicesNetworkList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_subscription.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.NetworkCloud/cloudServicesNetworks"
}
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, **kwargs: Any
) -> Iterable["_models.CloudServicesNetwork"]:
"""List cloud services networks in the resource group.
Get a list of cloud services networks in the provided resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CloudServicesNetwork or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.networkcloud.models.CloudServicesNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CloudServicesNetworkList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CloudServicesNetworkList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks"
}
@distributed_trace
def get(
self, resource_group_name: str, cloud_services_network_name: str, **kwargs: Any
) -> _models.CloudServicesNetwork:
"""Retrieve the cloud services network.
Get properties of the provided cloud services network.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cloud_services_network_name: The name of the cloud services network. Required.
:type cloud_services_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CloudServicesNetwork or the result of cls(response)
:rtype: ~azure.mgmt.networkcloud.models.CloudServicesNetwork
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.CloudServicesNetwork] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
cloud_services_network_name=cloud_services_network_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("CloudServicesNetwork", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks/{cloudServicesNetworkName}"
}
def _create_or_update_initial(
self,
resource_group_name: str,
cloud_services_network_name: str,
cloud_services_network_parameters: Union[_models.CloudServicesNetwork, IO],
**kwargs: Any
) -> _models.CloudServicesNetwork:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CloudServicesNetwork] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(cloud_services_network_parameters, (IOBase, bytes)):
_content = cloud_services_network_parameters
else:
_json = self._serialize.body(cloud_services_network_parameters, "CloudServicesNetwork")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
cloud_services_network_name=cloud_services_network_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("CloudServicesNetwork", pipeline_response)
if response.status_code == 201:
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
deserialized = self._deserialize("CloudServicesNetwork", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks/{cloudServicesNetworkName}"
}
@overload
def begin_create_or_update(
self,
resource_group_name: str,
cloud_services_network_name: str,
cloud_services_network_parameters: _models.CloudServicesNetwork,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.CloudServicesNetwork]:
"""Create or update the cloud services network.
Create a new cloud services network or update the properties of the existing cloud services
network.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cloud_services_network_name: The name of the cloud services network. Required.
:type cloud_services_network_name: str
:param cloud_services_network_parameters: The request body. Required.
:type cloud_services_network_parameters: ~azure.mgmt.networkcloud.models.CloudServicesNetwork
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CloudServicesNetwork or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.networkcloud.models.CloudServicesNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
cloud_services_network_name: str,
cloud_services_network_parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.CloudServicesNetwork]:
"""Create or update the cloud services network.
Create a new cloud services network or update the properties of the existing cloud services
network.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cloud_services_network_name: The name of the cloud services network. Required.
:type cloud_services_network_name: str
:param cloud_services_network_parameters: The request body. Required.
:type cloud_services_network_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CloudServicesNetwork or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.networkcloud.models.CloudServicesNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
cloud_services_network_name: str,
cloud_services_network_parameters: Union[_models.CloudServicesNetwork, IO],
**kwargs: Any
) -> LROPoller[_models.CloudServicesNetwork]:
"""Create or update the cloud services network.
Create a new cloud services network or update the properties of the existing cloud services
network.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cloud_services_network_name: The name of the cloud services network. Required.
:type cloud_services_network_name: str
:param cloud_services_network_parameters: The request body. Is either a CloudServicesNetwork
type or a IO type. Required.
:type cloud_services_network_parameters: ~azure.mgmt.networkcloud.models.CloudServicesNetwork
or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CloudServicesNetwork or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.networkcloud.models.CloudServicesNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CloudServicesNetwork] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
cloud_services_network_name=cloud_services_network_name,
cloud_services_network_parameters=cloud_services_network_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("CloudServicesNetwork", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks/{cloudServicesNetworkName}"
}
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, cloud_services_network_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
cloud_services_network_name=cloud_services_network_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks/{cloudServicesNetworkName}"
}
@distributed_trace
def begin_delete(
self, resource_group_name: str, cloud_services_network_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Delete the cloud services network.
Delete the provided cloud services network.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cloud_services_network_name: The name of the cloud services network. Required.
:type cloud_services_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
cloud_services_network_name=cloud_services_network_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks/{cloudServicesNetworkName}"
}
def _update_initial(
self,
resource_group_name: str,
cloud_services_network_name: str,
cloud_services_network_update_parameters: Optional[
Union[_models.CloudServicesNetworkPatchParameters, IO]
] = None,
**kwargs: Any
) -> _models.CloudServicesNetwork:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CloudServicesNetwork] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(cloud_services_network_update_parameters, (IOBase, bytes)):
_content = cloud_services_network_update_parameters
else:
if cloud_services_network_update_parameters is not None:
_json = self._serialize.body(
cloud_services_network_update_parameters, "CloudServicesNetworkPatchParameters"
)
else:
_json = None
request = build_update_request(
resource_group_name=resource_group_name,
cloud_services_network_name=cloud_services_network_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("CloudServicesNetwork", pipeline_response)
if response.status_code == 202:
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
deserialized = self._deserialize("CloudServicesNetwork", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks/{cloudServicesNetworkName}"
}
@overload
def begin_update(
self,
resource_group_name: str,
cloud_services_network_name: str,
cloud_services_network_update_parameters: Optional[_models.CloudServicesNetworkPatchParameters] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.CloudServicesNetwork]:
"""Patch the cloud services network.
Update properties of the provided cloud services network, or update the tags associated with
it. Properties and tag updates can be done independently.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cloud_services_network_name: The name of the cloud services network. Required.
:type cloud_services_network_name: str
:param cloud_services_network_update_parameters: The request body. Default value is None.
:type cloud_services_network_update_parameters:
~azure.mgmt.networkcloud.models.CloudServicesNetworkPatchParameters
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CloudServicesNetwork or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.networkcloud.models.CloudServicesNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
cloud_services_network_name: str,
cloud_services_network_update_parameters: Optional[IO] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.CloudServicesNetwork]:
"""Patch the cloud services network.
Update properties of the provided cloud services network, or update the tags associated with
it. Properties and tag updates can be done independently.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cloud_services_network_name: The name of the cloud services network. Required.
:type cloud_services_network_name: str
:param cloud_services_network_update_parameters: The request body. Default value is None.
:type cloud_services_network_update_parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CloudServicesNetwork or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.networkcloud.models.CloudServicesNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self,
resource_group_name: str,
cloud_services_network_name: str,
cloud_services_network_update_parameters: Optional[
Union[_models.CloudServicesNetworkPatchParameters, IO]
] = None,
**kwargs: Any
) -> LROPoller[_models.CloudServicesNetwork]:
"""Patch the cloud services network.
Update properties of the provided cloud services network, or update the tags associated with
it. Properties and tag updates can be done independently.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param cloud_services_network_name: The name of the cloud services network. Required.
:type cloud_services_network_name: str
:param cloud_services_network_update_parameters: The request body. Is either a
CloudServicesNetworkPatchParameters type or a IO type. Default value is None.
:type cloud_services_network_update_parameters:
~azure.mgmt.networkcloud.models.CloudServicesNetworkPatchParameters or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CloudServicesNetwork or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.networkcloud.models.CloudServicesNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CloudServicesNetwork] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
cloud_services_network_name=cloud_services_network_name,
cloud_services_network_update_parameters=cloud_services_network_update_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("CloudServicesNetwork", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/cloudServicesNetworks/{cloudServicesNetworkName}"
}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
5252b25fafef0707d008c0d81a0299eea6cd5383 | 169e75df163bb311198562d286d37aad14677101 | /tensorflow/tensorflow/python/kernel_tests/where_op_test.py | 17575da6f1bf2c226a67419b4bc8156f70f6dedc | [
"Apache-2.0"
] | permissive | zylo117/tensorflow-gpu-macosx | e553d17b769c67dfda0440df8ac1314405e4a10a | 181bc2b37aa8a3eeb11a942d8f330b04abc804b3 | refs/heads/master | 2022-10-19T21:35:18.148271 | 2020-10-15T02:33:20 | 2020-10-15T02:33:20 | 134,240,831 | 116 | 26 | Apache-2.0 | 2022-10-04T23:36:22 | 2018-05-21T08:29:12 | C++ | UTF-8 | Python | false | false | 5,579 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None):
with self.test_session(use_gpu=True):
ans = array_ops.where(x)
self.assertEqual([None, x.ndim], ans.get_shape().as_list())
if expected_err_re is None:
tf_ans = ans.eval()
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
def testWrongNumbers(self):
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.where([False, True], [1, 2], None)
with self.assertRaises(ValueError):
array_ops.where([False, True], None, [1, 2])
def testBasicVec(self):
x = np.asarray([True, False])
truth = np.asarray([[0]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, True, False])
truth = np.asarray([[1]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, False, True, False, True])
truth = np.asarray([[2], [4]], dtype=np.int64)
self._testWhere(x, truth)
def testRandomVec(self):
x = np.random.rand(1000000) > 0.5
truth = np.vstack([np.where(x)[0].astype(np.int64)]).T
self._testWhere(x, truth)
def testBasicMat(self):
x = np.asarray([[True, False], [True, False]])
# Ensure RowMajor mode
truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
self._testWhere(x, truth)
def testBasic3Tensor(self):
x = np.asarray([[[True, False], [True, False]],
[[False, True], [False, True]],
[[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
[[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth)
def _testRandom(self, dtype, expected_err_re=None):
shape = [127, 33, 53]
x = np.random.randn(*shape) + 1j * np.random.randn(*shape)
x = (np.random.randn(*shape) > 0).astype(dtype)
truth = np.where(np.abs(x) > 0) # Tuples of indices by axis.
truth = np.vstack(truth).T # Convert to [num_true, indices].
self._testWhere(x, truth, expected_err_re)
def testRandomBool(self):
self._testRandom(np.bool)
def testRandomInt32(self):
self._testRandom(np.int32)
def testRandomInt64(self):
self._testRandom(np.int64)
def testRandomFloat(self):
self._testRandom(np.float32)
def testRandomDouble(self):
self._testRandom(np.float64)
def testRandomComplex64(self):
self._testRandom(np.complex64)
def testRandomComplex128(self):
self._testRandom(np.complex128)
def testRandomUint8(self):
self._testRandom(np.uint8)
def testRandomInt8(self):
self._testRandom(np.int8)
def testRandomInt16(self):
self._testRandom(np.int16)
def testThreeArgument(self):
x = np.array([[-2, 3, -1], [1, -3, -3]])
np_val = np.where(x > 0, x * x, -x)
with self.test_session(use_gpu=True):
tf_val = array_ops.where(constant_op.constant(x) > 0, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
class WhereBenchmark(test.Benchmark):
def benchmarkWhere(self):
for (m, n, p, use_gpu) in itertools.product(
[10],
[10, 100, 1000, 10000, 100000, 1000000],
[0.01, 0.5, 0.99],
[False, True]):
name = "m_%d_n_%d_p_%g_use_gpu_%s" % (m, n, p, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n), dtype=dtypes.float32) <= p
v = resource_variable_ops.ResourceVariable(x)
op = array_ops.where(v)
with session.Session() as sess:
v.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
# approximate size of output: m*n*p int64s for each axis.
gb_processed_output = 2 * 8 * m * n * p / 1.0e9
gb_processed = gb_processed_input + gb_processed_output
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
| [
"thomas.warfel@pnnl.gov"
] | thomas.warfel@pnnl.gov |
9cb4ecfd020a49a4c42e9f0db47935b6e84e0704 | 13a4df75e81ee4330a197340a300ec0755247a93 | /Strings/4.balanceStrings.py | 7e038243f1e6a517488ccb561510ce15d9197e87 | [] | no_license | ltfafei/py_Leetcode_study | d22955380bf9f134bc9cb215fea73ec4f9ea94cf | 0fd1bca56a621001cf9093f60941c4bfed4c79a5 | refs/heads/master | 2023-07-13T18:15:59.098314 | 2021-08-30T15:11:17 | 2021-08-30T15:11:17 | 363,597,757 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | #!/usr/bin/python
# Env: python3
# Rewrite by afei_0and1
'''
4.1、平衡字符串
现在输入一个只包含L和R的字符串,并其中L与R的个数是相等的。符合这种输入条件的字符串称之为”平衡字符串“。
要求通过编程对输入的平衡字符串进行分割,尽可能多的分割出平衡字符串子串,并将可以得到的子串数量返回。
例如:输入:RLLRRRL,将返回结果:3,其可以分割成:RL、LLRR和RL;输入:LLLRRR将返回结果:1,因为其只能分割
出LLLRRR。
'''
def balanceStrings(string):
res = 0 #最终组合结果
lc = 0 #L字符的个数
rc = 0 #R字符的个数
#对字符串进行遍历
for i in string:
if i == "L":
lc += 1
if i == "R":
rc += 1
#字符R和L个数相同
if rc == lc:
res += 1
#重置
lc = 0
rc = 0
return res
print(balanceStrings("RLLLRRRL"))
'''
Output result:
3
'''
'''
4.2、分割回文字符串
要求输入一个字符串,将此字符串分割成一些子串,使得每个子串都是回文字符串(单字符的字符串也属于
回文字符串)。要求通过编程将所有的分割结果返回。例如:输入字符串“abb”,返回
[
["a", "b", "b"], ["a", "bb"]
]这个二维列表作为答案(列表中元素位置可以变动)。
'''
def isPlalind(string):
#逆序判断是否是回文字符串
return string == string[::-1]
#start:起始位置,string:分割字符串
#l:已经产生回文串子列表,res:存放结果
def cut_plalindString(start, string, l, res):
if start > len(string) - 1:
res.append(list(l))
return
#for循环继续分割
for index in range(start+1, len(string)+1):
strings = string[start:index]
if isPlalind(strings):
cut_plalindString(index, string, l+[string[start:index]], res)
def func(string):
res = []
cut_plalindString(0, string, [], res)
return res
print(func("abb"))
'''
Output result:
[['a', 'b', 'b'], ['a', 'bb']]
''' | [
"m18479685120@163.com"
] | m18479685120@163.com |
854703fda0ce649f68979599bf1e07ee0f3ca0ee | 40125ea7386e269bbae2425a318a3fd2e8571cb3 | /src/ie/urls_library.py | d191538ba4416aa1650b93c462b5b0788bef5722 | [
"MIT"
] | permissive | compressore/moc | bb160a308562e6e57fe4300a8d8a6ee00a59e785 | 8e05e3e60d2d2c7534e0c659b6ed0743e9189f6b | refs/heads/master | 2023-02-11T18:13:44.096427 | 2021-01-06T11:08:02 | 2021-01-06T11:08:02 | 327,283,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | """ Project-specific URL Configuration
See urls.py for more information
"""
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from core.admin import admin_site
current_site = "library"
urlpatterns = [
path("admin/", admin_site.urls),
path("tinymce/", include("tinymce.urls")),
]
for key,value in settings.PROJECT_LIST.items():
if key == current_site:
# This makes the current site be mounted on the root directory
get_path = ""
else:
get_path = value["url"]
urlpatterns += [
path(get_path, include(key+".urls")),
]
| [
"paul@penguinprotocols.com"
] | paul@penguinprotocols.com |
aca32633a7afe077270b8ec6cb5ecd7dd189ccc3 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/sacremoses/__init__.py | 48cc9af42c69614012407cbaee0083bb18bc67f6 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4b560216eecc87542d164c228258494730d7eeb75b4bddbd9bf242ff0b88cfb8
size 196
| [
"yamprakash130@gmail.com"
] | yamprakash130@gmail.com |
c664d8e4031d11d62a0818bb29281830bbbd6ece | a6194c0c20dc09a115f54ebd6a02fbbb55e206e8 | /dqn_agent/q_network.py | 3970d8bc020f04f36e19d8113c5b9155449e1612 | [] | no_license | Rowing0914/tf_agent_investigation | c1149c1b7371c070ef8513e7bf0fd63a48d33cee | dbce3862abf12a21115e67e6391314f8d866b658 | refs/heads/master | 2020-08-13T13:53:58.249071 | 2019-10-14T07:56:40 | 2019-10-14T07:56:40 | 214,979,265 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | from __future__ import absolute_import, division, print_function
import gin, tensorflow as tf
from tf_agents.networks import encoding_network, network
def validate_specs(action_spec, observation_spec):
del observation_spec
flat_action_spec = tf.nest.flatten(action_spec)
if len(flat_action_spec) > 1:
raise ValueError("Network only supports action_specs with a single action.")
if flat_action_spec[0].shape not in [(), (1,)]:
raise ValueError("Network only supports action_specs with shape in [(), (1,)])")
@gin.configurable
class QNetwork(network.Network):
def __init__(self,
input_tensor_spec,
action_spec,
preprocessing_layers=None,
preprocessing_combiner=None,
conv_layer_prarams=None,
fc_layer_params=(75, 40),
dropout_layer_params=None,
activation_fn=tf.keras.activations.relu,
kernel_initializer=None,
batch_squash=True,
dtype=tf.float32,
name="QNetwork"):
validate_specs(action_spec, input_tensor_spec)
action_spec = tf.nest.flatten(action_spec)[0]
num_actions = action_spec.maximum - action_spec.minimum + 1
encoder_input_tensor_spec = input_tensor_spec
encoder = encoding_network.EncodingNetwork(encoder_input_tensor_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
conv_layer_params=conv_layer_prarams,
fc_layer_params=fc_layer_params,
dropout_layer_params=dropout_layer_params,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
batch_squash=batch_squash,
dtype=dtype)
q_value_layer = tf.keras.layers.Dense(num_actions,
activation=None,
kernel_initializer=tf.compat.v1.initializers.random_uniform(minval=0.03,
maxval=0.03),
bias_initializer=tf.compat.v1.initializers.constant(-0.2),
dtype=dtype)
super(QNetwork, self).__init__(input_tensor_spec=input_tensor_spec,
state_spec=(),
name=name)
self._encoder = encoder
self._q_value_layer = q_value_layer
def call(self, observation, step_type=None, network_state=()):
state, network_state = self._encoder(observation, step_type=step_type, network_state=network_state)
return self._q_value_layer(state), network_state | [
"kosakaboat@gmail.com"
] | kosakaboat@gmail.com |
e85ecaabccae9cff03474e2aadde53d65568a73b | c5dab033cbdfa9c4b9b4d1912ed09fe0fe34ec40 | /duc.py | f15913a7051674f8a4ee9641497dc2f7bca08417 | [] | no_license | bmwant/duc | 73019442b28e61af2903d61f1ceb204a0cc3ff34 | ccd7cb4175ec00ccf49b77f38b0a7e84e2a1e759 | refs/heads/master | 2020-12-31T05:25:45.022694 | 2015-05-28T09:32:49 | 2015-05-28T09:32:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,930 | py | # -*- coding: utf-8 -*-
__author__ = 'Most Wanted'
import logging
from collections import UserDict, UserList
from cerberus import Validator
logging.basicConfig(level=logging.DEBUG)
class DottedList(UserList):
"""
Dotted list allows to use dot-notation when accessing list items by index: e.g.
arr = ['a', 'b', 'c']
arr[0] -> 'a'
arr.e1 -> 'b'
arr.e2 = 'd'
arr[2] -> 'd'
"""
def __init__(self, arr=None):
if arr is not None and isinstance(arr, list):
super().__init__()
for elem in arr:
if isinstance(elem, list):
self.data.append(DottedList(elem))
elif isinstance(elem, dict):
self.data.append(DottedDict(elem))
else:
self.data.append(elem)
else:
super().__init__(arr)
def __getattr__(self, key):
e = key[0]
index = None
try:
index = int(key[1:])
except ValueError:
pass
if e == 'e' and isinstance(index, int):
return self.data[index]
super().__getattribute__(key)
def __setattr__(self, key, value):
e = key[0]
index = None
try:
index = int(key[1:])
except ValueError:
pass
if e == 'e' and isinstance(index, int):
if isinstance(value, list):
self.data[index] = DottedList(value)
else:
self.data[index] = value
else:
super().__setattr__(key, value)
class DottedDict(UserDict):
"""
Dotted dictionary provides accessing dictionary items by dot-style attribute accessing
"""
def __init__(self, data=None):
if data is not None and isinstance(data, dict):
super().__init__()
#self.__dict__['data'] = {}
for key, value in data.items():
if isinstance(value, dict):
self.data[key] = DottedDict(value)
elif isinstance(value, list):
self.data[key] = DottedList(value)
else:
self.data[key] = value
else:
super().__init__(data)
def __getattr__(self, key):
try:
return self.__dict__['data'][key]
except KeyError as e:
pass
super().__getattribute__(key)
def __setitem__(self, name, value):
if isinstance(value, dict):
self.__dict__['data'][name] = DottedDict(value)
elif isinstance(value, list):
self.__dict__['data'][name] = DottedList(value)
else:
self.__dict__['data'][name] = value
def __setattr__(self, key, value):
if 'data' in self.__dict__:
self.__setitem__(key, value)
else:
super().__setattr__(key, value)
def __delattr__(self, item):
if item in self.data:
del self.data[item]
else:
super().__delattr__(item)
class Duc(object):
"""
Trans Duc er accepts some data, validate it if needed and make some transformations on it returning
resulting data and output data to store somewhere
"""
def __init__(self, transduce_schema, data=None):
# Check if schema is correct
valid_schema = self._check_schema(transduce_schema)
if valid_schema:
self._schema = transduce_schema
else:
raise ValueError('Invalid format of schema. Check documentation.')
self._data = DottedDict()
if data is not None:
self._data.update(data)
self._result = None
self._errors = None
self._transduced = False
self._out = set()
@staticmethod
def _check_schema(schema):
if not isinstance(schema, dict):
return False
for key, value in schema.items():
allowed_params = ('transform', 'validator', )
if not isinstance(value, dict):
return False
for param in value.keys():
if param not in allowed_params:
return False
allowed_transform = ('name', 'type', 'apply', 'out', )
if 'transform' in value:
operations = value['transform']
if not isinstance(operations, dict):
return False
for operation in operations:
if operation not in allowed_transform:
return False
if 'validator' in value and not isinstance(value['validator'], dict):
return False
return True
def validate(self, data=None):
validation_schema = {}
for field, value in self._schema.items():
if value is not None and 'validator' in value:
validation_schema[field] = value['validator']
validator = Validator(validation_schema, allow_unknown=True)
if data is None:
result = validator.validate(self._data)
else:
result = validator.validate(data)
if not result:
self._errors = validator.errors
return result
def transduce(self, data=None, append_missed=False, append_out=False):
"""
You can also try to transduce unvalidated data
:param data: Data to transform based on initial schema
:param append_missed: append missed items that are not listed in shema to resulting data
:param append_out: append missed items that are not listed in schema to output
:return: True if transforming succeed
"""
self._transduced = True
result = DottedDict()
if data is None:
data = self._data
for field, value in self._schema.items():
if field not in data:
raise ValueError('Input data does not corresponds to schema provided. No such key: %s' % field)
to_transform = True
if value:
if 'transform' in value and value['transform']:
transform_data = value['transform']
name = field
if 'name' in transform_data and isinstance(transform_data['name'], str):
name = transform_data['name']
if 'type' in transform_data:
caster = self._get_cast(transform_data['type'])
to_cast = data[field]
try:
result[name] = caster(to_cast)
except ValueError as e:
self._transduced = False
raise ValueError('Cannot cast {}: {} with built-in {}'.format(name, to_cast, caster))
else:
result[name] = data[field]
if 'apply' in transform_data and hasattr(transform_data['apply'], '__call__'):
result[name] = transform_data['apply'](result[name])
if 'out' in transform_data and transform_data['out'] == False:
continue
logging.info('Adding %s to out-data' % name)
self._out.add(name)
else:
self._out.add(field)
to_transform = False
else:
self._out.add(field)
to_transform = False
if not to_transform:
logging.debug('No need to transform %s' % field)
result[field] = data[field]
# Append elements that are not listed in schema
if append_missed:
for field, value in data.items():
try:
new_field_name = self._schema[field]['transform']['name']
except KeyError:
new_field_name = field
if new_field_name not in result:
result[new_field_name] = value
# Append elements that are not listed to the ouput
if append_out:
for field, value in data.items():
if field not in self._schema:
self._out.add(field)
if self._transduced:
self._result = result
else:
self._result = None
return self._transduced
@staticmethod
def cast_to_num(number):
try:
result = int(number)
except ValueError:
try:
result = float(number)
except ValueError:
raise ValueError('Cannot parse a number from %s value' % number)
return result
@staticmethod
def cast_to_datetime(date):
from dateutil.parser import parse
try:
result = parse(date)
except ValueError:
ValueError('Cannot parse a date from %s value' % date)
return result
def _get_cast(self, cast_key: str) -> type:
def not_impl(obj):
raise NotImplementedError('Still implementing')
casters = {
'string': str,
'integer': int,
'float': float,
'boolean': bool,
'dict': dict,
'list': list,
'set': set,
'datetime': self.cast_to_datetime,
'number': self.cast_to_num
}
if cast_key in casters:
return casters[cast_key]
raise KeyError('No such caster to transduce your data: [%s]' % cast_key)
@property
def out(self):
out = {}
for elem in self._out:
if elem in self._result:
out[elem] = self._result[elem]
else:
out[elem] = self._data[elem]
return out
@property
def result(self):
return self._result
@property
def errors(self):
return self._errors | [
"bmwant@gmail.com"
] | bmwant@gmail.com |
4196b3328fb2ae090303eab64b5720ae7299b3ce | 7235051c8def972f3403bf10155e246c9c291c58 | /angola_erp/angola_erpnext/doctype/inss/inss.py | 77cf56a0d8e98888cad5dfe0c5a2ec943b1f7544 | [
"MIT"
] | permissive | proenterprise/angola_erp | 8e79500ce7bcf499fc344948958ae8e8ab12f897 | 1c171362b132e567390cf702e6ebd72577297cdf | refs/heads/master | 2020-06-03T08:51:34.467041 | 2019-06-07T01:35:54 | 2019-06-07T01:35:54 | 191,514,859 | 1 | 0 | NOASSERTION | 2019-06-12T06:53:41 | 2019-06-12T06:53:41 | null | UTF-8 | Python | false | false | 255 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Helio de Jesus and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class INSS(Document):
pass
| [
"hcesar@gmail.com"
] | hcesar@gmail.com |
49e463ea9d2c04cee39afcce3719c6f9c650dad7 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/netapp/volumes/replications/reverse.py | 8cd797fd2d4a34131e35fbbbbdc0477a66f1efbb | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 3,224 | py | # -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reverse a Cloud NetApp Volume Replication's direction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.netapp.volumes.replications import client as replications_client
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.netapp import flags
from googlecloudsdk.command_lib.netapp.volumes.replications import flags as replications_flags
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Reverse(base.Command):
"""Reverse a Cloud NetApp Volume Replication's direction."""
_RELEASE_TRACK = base.ReleaseTrack.GA
detailed_help = {
'DESCRIPTION': """\
Reverse a Cloud NetApp Volume Replication.
""",
'EXAMPLES': """\
The following command reverses a Replication named NAME using the required arguments:
$ {command} NAME --location=us-central1 --volume=vol1
To reverse a Replication named NAME asynchronously, run the following command:
$ {command} NAME --location=us-central1 --volume=vol1 --async
""",
}
@staticmethod
def Args(parser):
concept_parsers.ConceptParser(
[
flags.GetReplicationPresentationSpec(
'The Replication to reverse direction.'
)
]
).AddToParser(parser)
replications_flags.AddReplicationVolumeArg(parser, reverse_op=True)
flags.AddResourceAsyncFlag(parser)
def Run(self, args):
"""Reverse a Cloud NetApp Volume Replication's direction in the current project."""
replication_ref = args.CONCEPTS.replication.Parse()
if args.CONCEPTS.volume.Parse() is None:
raise exceptions.RequiredArgumentException(
'--volume', 'Requires a volume to reverse replication of'
)
client = replications_client.ReplicationsClient(self._RELEASE_TRACK)
result = client.ReverseReplication(
replication_ref, args.async_)
if args.async_:
command = 'gcloud {} netapp volumes replications list'.format(
self.ReleaseTrack().prefix
)
log.status.Print(
'Check the status of the reversed replication by listing all'
' replications:\n $ {} '.format(command)
)
return result
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class ReverseBeta(Reverse):
"""Reverse a Cloud NetApp Volume Replication's direction."""
_RELEASE_TRACK = base.ReleaseTrack.BETA
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
8c8158918e991fc5c7b0286a8c393cab6a256e10 | 0bde5f7f09aa537ed1f4828d4e5ebee66475918f | /h2o-py/tests/testdir_algos/rf/pyunit_custom_metrics_pubdev_5088.py | 2a91952de3807564ef0b33cc1b40b49fbe8907f1 | [
"Apache-2.0"
] | permissive | Winfredemalx54/h2o-3 | d69f1c07e1f5d2540cb0ce5e6073415fa0780d32 | dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7 | refs/heads/master | 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 | Apache-2.0 | 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null | UTF-8 | Python | false | false | 1,637 | py | import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from tests.pyunit_utils import CustomMaeFunc, CustomRmseFunc,\
assert_correct_custom_metric, regression_model, multinomial_model, binomial_model
from h2o.estimators.random_forest import H2ORandomForestEstimator
# Custom model metrics fixture
def custom_mae_mm():
return h2o.upload_custom_metric(CustomMaeFunc, func_name="mae", func_file="mm_mae.py")
def custom_rmse_mm():
return h2o.upload_custom_metric(CustomRmseFunc, func_name="rmse", func_file="mm_rmse.py")
# Test that the custom model metric is computed
# and compare them with implicit custom metric
def test_custom_metric_computation_regression():
(model, f_test) = regression_model(H2ORandomForestEstimator, custom_mae_mm())
assert_correct_custom_metric(model, f_test, "mae", "Regression on prostate")
def test_custom_metric_computation_binomial():
(model, f_test) = binomial_model(H2ORandomForestEstimator, custom_rmse_mm())
assert_correct_custom_metric(model, f_test, "rmse", "Binomial on prostate")
def test_custom_metric_computation_multinomial():
(model, f_test) = multinomial_model(H2ORandomForestEstimator, custom_rmse_mm())
assert_correct_custom_metric(model, f_test, "rmse", "Multinomial on iris")
# Tests to invoke in this suite
__TESTS__ = [
test_custom_metric_computation_binomial,
test_custom_metric_computation_regression,
test_custom_metric_computation_multinomial
]
if __name__ == "__main__":
for func in __TESTS__:
pyunit_utils.standalone_test(func)
else:
for func in __TESTS__:
func()
| [
"noreply@github.com"
] | Winfredemalx54.noreply@github.com |
385ce40f026c78916f14eddf848c66a606974671 | cf737d6f8bb3d2db9eb5756fd11cecb612f5694b | /pype/tools/pyblish_pype/view.py | 477303eae879a3ac1cad666432f1b270643cf2cb | [
"MIT"
] | permissive | jrsndl/pype | 7e04b9eaeadb339008a70dbab74864d5705a07ea | 47ef4b64f297186c6d929a8f56ecfb93dd0f44e8 | refs/heads/master | 2023-08-16T09:20:30.963027 | 2020-09-15T15:59:53 | 2020-09-15T15:59:53 | 297,372,053 | 1 | 0 | MIT | 2020-09-21T14:53:08 | 2020-09-21T14:53:08 | null | UTF-8 | Python | false | false | 11,656 | py | from Qt import QtCore, QtWidgets
from . import model
from .constants import Roles, EXPANDER_WIDTH
# Imported when used
widgets = None
def _import_widgets():
global widgets
if widgets is None:
from . import widgets
class ArtistView(QtWidgets.QListView):
# An item is requesting to be toggled, with optional forced-state
toggled = QtCore.Signal(QtCore.QModelIndex, object)
show_perspective = QtCore.Signal(QtCore.QModelIndex)
def __init__(self, parent=None):
super(ArtistView, self).__init__(parent)
self.horizontalScrollBar().hide()
self.viewport().setAttribute(QtCore.Qt.WA_Hover, True)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setResizeMode(QtWidgets.QListView.Adjust)
self.setVerticalScrollMode(QtWidgets.QListView.ScrollPerPixel)
def event(self, event):
if not event.type() == QtCore.QEvent.KeyPress:
return super(ArtistView, self).event(event)
elif event.key() == QtCore.Qt.Key_Space:
for index in self.selectionModel().selectedIndexes():
self.toggled.emit(index, None)
return True
elif event.key() == QtCore.Qt.Key_Backspace:
for index in self.selectionModel().selectedIndexes():
self.toggled.emit(index, False)
return True
elif event.key() == QtCore.Qt.Key_Return:
for index in self.selectionModel().selectedIndexes():
self.toggled.emit(index, True)
return True
return super(ArtistView, self).event(event)
def focusOutEvent(self, event):
self.selectionModel().clear()
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
indexes = self.selectionModel().selectedIndexes()
if len(indexes) <= 1 and event.pos().x() < 20:
for index in indexes:
self.toggled.emit(index, None)
if len(indexes) == 1 and event.pos().x() > self.width() - 40:
for index in indexes:
self.show_perspective.emit(index)
return super(ArtistView, self).mouseReleaseEvent(event)
class OverviewView(QtWidgets.QTreeView):
# An item is requesting to be toggled, with optional forced-state
toggled = QtCore.Signal(QtCore.QModelIndex, object)
show_perspective = QtCore.Signal(QtCore.QModelIndex)
def __init__(self, parent=None):
super(OverviewView, self).__init__(parent)
self.horizontalScrollBar().hide()
self.viewport().setAttribute(QtCore.Qt.WA_Hover, True)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setItemsExpandable(True)
self.setVerticalScrollMode(QtWidgets.QTreeView.ScrollPerPixel)
self.setHeaderHidden(True)
self.setRootIsDecorated(False)
self.setIndentation(0)
def event(self, event):
if not event.type() == QtCore.QEvent.KeyPress:
return super(OverviewView, self).event(event)
elif event.key() == QtCore.Qt.Key_Space:
for index in self.selectionModel().selectedIndexes():
self.toggled.emit(index, None)
return True
elif event.key() == QtCore.Qt.Key_Backspace:
for index in self.selectionModel().selectedIndexes():
self.toggled.emit(index, False)
return True
elif event.key() == QtCore.Qt.Key_Return:
for index in self.selectionModel().selectedIndexes():
self.toggled.emit(index, True)
return True
return super(OverviewView, self).event(event)
def focusOutEvent(self, event):
self.selectionModel().clear()
def mouseReleaseEvent(self, event):
if event.button() in (QtCore.Qt.LeftButton, QtCore.Qt.RightButton):
# Deselect all group labels
indexes = self.selectionModel().selectedIndexes()
for index in indexes:
if index.data(Roles.TypeRole) == model.GroupType:
self.selectionModel().select(
index, QtCore.QItemSelectionModel.Deselect
)
return super(OverviewView, self).mouseReleaseEvent(event)
class PluginView(OverviewView):
def __init__(self, *args, **kwargs):
super(PluginView, self).__init__(*args, **kwargs)
self.clicked.connect(self.item_expand)
def item_expand(self, index):
if index.data(Roles.TypeRole) == model.GroupType:
if self.isExpanded(index):
self.collapse(index)
else:
self.expand(index)
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
indexes = self.selectionModel().selectedIndexes()
if len(indexes) == 1:
index = indexes[0]
pos_index = self.indexAt(event.pos())
# If instance or Plugin and is selected
if (
index == pos_index
and index.data(Roles.TypeRole) == model.PluginType
):
if event.pos().x() < 20:
self.toggled.emit(index, None)
elif event.pos().x() > self.width() - 20:
self.show_perspective.emit(index)
return super(PluginView, self).mouseReleaseEvent(event)
class InstanceView(OverviewView):
def __init__(self, parent=None):
super(InstanceView, self).__init__(parent)
self.viewport().setMouseTracking(True)
def mouseMoveEvent(self, event):
index = self.indexAt(event.pos())
if index.data(Roles.TypeRole) == model.GroupType:
self.update(index)
super(InstanceView, self).mouseMoveEvent(event)
def item_expand(self, index, expand=None):
if expand is None:
expand = not self.isExpanded(index)
if expand:
self.expand(index)
else:
self.collapse(index)
def group_toggle(self, index):
model = index.model()
chilren_indexes_checked = []
chilren_indexes_unchecked = []
for idx in range(model.rowCount(index)):
child_index = model.index(idx, 0, index)
if not child_index.data(Roles.IsEnabledRole):
continue
if child_index.data(QtCore.Qt.CheckStateRole):
chilren_indexes_checked.append(child_index)
else:
chilren_indexes_unchecked.append(child_index)
if chilren_indexes_checked:
to_change_indexes = chilren_indexes_checked
new_state = False
else:
to_change_indexes = chilren_indexes_unchecked
new_state = True
for index in to_change_indexes:
model.setData(index, new_state, QtCore.Qt.CheckStateRole)
self.toggled.emit(index, new_state)
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
indexes = self.selectionModel().selectedIndexes()
if len(indexes) == 1:
index = indexes[0]
pos_index = self.indexAt(event.pos())
if index == pos_index:
# If instance or Plugin
if index.data(Roles.TypeRole) == model.InstanceType:
if event.pos().x() < 20:
self.toggled.emit(index, None)
elif event.pos().x() > self.width() - 20:
self.show_perspective.emit(index)
else:
if event.pos().x() < EXPANDER_WIDTH:
self.item_expand(index)
else:
self.group_toggle(index)
self.item_expand(index, True)
return super(InstanceView, self).mouseReleaseEvent(event)
class TerminalView(QtWidgets.QTreeView):
# An item is requesting to be toggled, with optional forced-state
def __init__(self, parent=None):
super(TerminalView, self).__init__(parent)
self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setAutoScroll(False)
self.setHeaderHidden(True)
self.setIndentation(0)
self.setVerticalScrollMode(QtWidgets.QTreeView.ScrollPerPixel)
self.verticalScrollBar().setSingleStep(10)
self.setRootIsDecorated(False)
self.clicked.connect(self.item_expand)
_import_widgets()
def event(self, event):
if not event.type() == QtCore.QEvent.KeyPress:
return super(TerminalView, self).event(event)
elif event.key() == QtCore.Qt.Key_Space:
for index in self.selectionModel().selectedIndexes():
if self.isExpanded(index):
self.collapse(index)
else:
self.expand(index)
elif event.key() == QtCore.Qt.Key_Backspace:
for index in self.selectionModel().selectedIndexes():
self.collapse(index)
elif event.key() == QtCore.Qt.Key_Return:
for index in self.selectionModel().selectedIndexes():
self.expand(index)
return super(TerminalView, self).event(event)
def focusOutEvent(self, event):
self.selectionModel().clear()
def item_expand(self, index):
if index.data(Roles.TypeRole) == model.TerminalLabelType:
if self.isExpanded(index):
self.collapse(index)
else:
self.expand(index)
self.model().layoutChanged.emit()
self.updateGeometry()
def rowsInserted(self, parent, start, end):
"""Automatically scroll to bottom on each new item added."""
super(TerminalView, self).rowsInserted(parent, start, end)
self.updateGeometry()
self.scrollToBottom()
def expand(self, index):
"""Wrapper to set widget for expanded index."""
model = index.model()
row_count = model.rowCount(index)
is_new = False
for child_idx in range(row_count):
child_index = model.index(child_idx, index.column(), index)
widget = self.indexWidget(child_index)
if widget is None:
is_new = True
msg = child_index.data(QtCore.Qt.DisplayRole)
widget = widgets.TerminalDetail(msg)
self.setIndexWidget(child_index, widget)
super(TerminalView, self).expand(index)
if is_new:
self.updateGeometries()
def resizeEvent(self, event):
super(self.__class__, self).resizeEvent(event)
self.model().layoutChanged.emit()
def sizeHint(self):
size = super(TerminalView, self).sizeHint()
height = (
self.contentsMargins().top()
+ self.contentsMargins().bottom()
)
for idx_i in range(self.model().rowCount()):
index = self.model().index(idx_i, 0)
height += self.rowHeight(index)
if self.isExpanded(index):
for idx_j in range(index.model().rowCount(index)):
child_index = index.child(idx_j, 0)
height += self.rowHeight(child_index)
size.setHeight(height)
return size
| [
"jakub.trllo@gmail.com"
] | jakub.trllo@gmail.com |
0cb7704f335aa1089c00787ef0e0221b9dfc20c3 | b316c1d1e57ca197b0b24625b5ceede12905a979 | /tango_with_django_project/tango_with_django_project/urls.py | 67adf6c79e5fb3c4505ed3c1b74f796dcb90e178 | [] | no_license | JayWelborn/Rango | 962ed888e33c591074c80cbf07f77edca2d4d821 | 41c832c9bc791f910b948fe9026cd41fc12cf129 | refs/heads/master | 2021-01-22T22:16:35.134729 | 2017-09-22T19:05:02 | 2017-09-22T19:05:02 | 92,766,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | """tango_with_django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url, include
from django.conf.urls.static import static
from rango import views
urlpatterns = [
url(r'^rango/', include('rango.urls')),
url(r'^admin/', admin.site.urls),
url(r'^accounts/register/$',
views.MyRegistrationView.as_view(),
name='registration_register'),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^$', views.index, name='index')
]
if settings.DEBUG:
urlpatterns += static(
settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT
)
| [
"jesse.welborn@gmail.com"
] | jesse.welborn@gmail.com |
19e82b23b4f97c9a1c1bd88fe98d27f73b01476b | 3faf4b9fb76145b2326446bc6bc190a5712b3b62 | /Algorithms/0695 Max Area of Island.py | cdc720cba3b793eb17af5a9da0ce13bc619edfd6 | [] | no_license | cravo123/LeetCode | b93c18f3e4ca01ea55f4fdebceca76ccf664e55e | 4c1288c99f78823c7c3bac0ceedd532e64af1258 | refs/heads/master | 2021-07-12T11:10:26.987657 | 2020-06-02T12:24:29 | 2020-06-02T12:24:29 | 152,670,206 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | # Solution 1, DFS
class Solution:
def dfs(self, i, j, grid, m, n):
area = 1
grid[i][j] = 0
for di, dj in [[-1, 0], [1, 0], [0, -1], [0, 1]]:
x, y = i + di, j + dj
if 0 <= x < m and 0 <= y < n and grid[x][y] == 1:
area += self.dfs(x, y, grid, m, n)
return area
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0]) if grid else 0
res = 0
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
curr = self.dfs(i, j, grid, m, n)
res = max(res, curr)
return res
# Solution 1.1, another DFS implementation
class Solution:
def dfs(self, i, j, A, m, n):
if i < 0 or i >= m or j < 0 or j >= n or A[i][j] == 0:
return 0
curr = 1
A[i][j] = 0
for di, dj in [[-1, 0], [1, 0], [0, -1], [0, 1]]:
x, y = i + di, j + dj
curr += self.dfs(x, y, A, m, n)
return curr
def maxAreaOfIsland(self, grid: 'List[List[int]]') -> 'int':
res = 0
m, n = len(grid), len(grid[0]) if grid else 0
for i in range(m):
for j in range(n):
res = max(res, self.dfs(i, j, grid, m, n))
return res
| [
"cc3630@columbia.edu"
] | cc3630@columbia.edu |
c3f9cd2060490f4c7f83b91a605c42694ee81a49 | ee92057a8ebc91ba90d8055a9bece25d24211499 | /kattis/maximum-points-you-can-obtain-from-cards/maximum-points-you-can-obtain-from-cards.py | 5705a97eb87d0bfc290539344a5fe68a41a69367 | [] | no_license | KendrickAng/competitive-programming | ce0a4f44f592f295c2f8cd7e854139f18fb8853a | f9768a2020f801b8e4787cc853398b8258a0bf09 | refs/heads/master | 2022-05-29T07:21:32.607089 | 2022-04-24T16:35:14 | 2022-04-24T16:35:14 | 254,402,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | class Solution:
def maxScore(self, cardPoints: List[int], k: int) -> int:
# calculate prefix sums
n = len(cardPoints)
prefix = [0]
for i in range(n):
tmp = prefix[-1] + cardPoints[i]
prefix.append(tmp)
#print(prefix)
# move fixed sliding window
left = k
right = n
ans = -1
while left >= 0:
leftSum = prefix[left]
rightSum = prefix[n] - prefix[right]
points = leftSum + rightSum
#print(f"{leftSum} {rightSum} {points}")
ans = max(ans, points)
left -= 1
right -= 1
return ans
"""
1 2 3 4 5 6 1, k = 3
1 6 5
9 7 7 9 7 7 9, k = 7
take all
idea: fixed sliding window, length k
1. calculate prefix sums
2. move fixed window (size n - k) from rightmost to leftmost
""" | [
"kendrick.wh@outlook.com"
] | kendrick.wh@outlook.com |
37b64b825968043ad9deba9d9b7b60f106080bfb | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/coderByte_20200518233550.py | ef8c2488afe45ffa8446a7d96de9b95d1352174d | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py |
def QuestionsMarks(str):
a,b = 0,0
for i in range(len(s)-1):
for j in range(i,len(s)):
if s[i].isdigit() and s[j].isdigit() and int(s[i])+int(j[i]) == 10:
a,b = i,j
new = s[a+1:b+1]
if new.count('?') == 3:
return 'true'
else:
return 'false'
# print(numbers[0:numbers[i]])
# break
print(others)
# keep this function call here
QuestionsMarks("acc?7??sss?3rr1??????5") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
ddaa9fc9222adb6cea73a896b2bad36c63421164 | 17993dcca87d490bc9841437309f309a5592ab38 | /Codes/linear_regression/exercise/face_solution.py | 3ee098e36b1d89f939fdf469515fb53525213a62 | [] | no_license | dreamlikexin/machine_learning | bc86ea15ef8552ad1be78a5bc65fb74a2cdb274e | 850e87025270847210b6ad188d2da181983a72c7 | refs/heads/master | 2022-01-16T09:51:20.538340 | 2019-06-19T16:27:26 | 2019-06-19T16:27:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
data = fetch_olivetti_faces()
data = data.images.reshape((len(data.images), -1))
n_pixels = data.shape[1]
X = data[:, :(n_pixels + 1) // 2]
y = data[:, n_pixels // 2:]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
image_shape = (64, 64)
id = 5
true_face = np.hstack((X_test[id], y_test[id]))
pred_face = np.hstack((X_test[id], y_pred[id]))
plt.figure(0)
plt.imshow(true_face.reshape(image_shape), interpolation="nearest")
plt.figure(1)
plt.imshow(pred_face.reshape(image_shape), interpolation="nearest")
plt.show()
| [
"wanglei@wanglei-mbp.local"
] | wanglei@wanglei-mbp.local |
1a7a8fdd1634fdda99eea85a2692323fb7209a0f | a411a55762de11dc2c9d913ff33d2f1477ac02cf | /lte/gateway/python/integ_tests/s1aptests/test_ipv6_non_nat_dp_ul_tcp.py | 43041ea8822b77bd9e137145804ac43a2c93d85f | [
"BSD-3-Clause"
] | permissive | magma/magma | 0dc48c1513d9968bd05fb7589f302c192b7c0f94 | 0e1d895dfe625681229e181fbc2dbad83e13c5cb | refs/heads/master | 2023-09-04T09:31:56.140395 | 2023-08-29T13:54:49 | 2023-08-29T13:54:49 | 170,803,235 | 1,219 | 525 | NOASSERTION | 2023-09-07T17:45:42 | 2019-02-15T04:46:24 | C++ | UTF-8 | Python | false | false | 4,053 | py | """
Copyright 2022 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ipaddress
import time
import unittest
import s1ap_types
from integ_tests.s1aptests import s1ap_wrapper
from s1ap_utils import MagmadUtil
class TestIpv6NonNatDpUlTcp(unittest.TestCase):
"""Integration Test: TestAttachDetachNonNatDpUlTcp"""
def __init__(self, method_name: str) -> None:
"""Initialize unittest class"""
super().__init__(methodName=method_name)
self.magma_utils = MagmadUtil(None)
def setUp(self):
"""Initialize before test case execution"""
self.magma_utils.disable_nat(ip_version=6)
self._s1ap_wrapper = s1ap_wrapper.TestWrapper(ip_version=6)
def tearDown(self):
"""Cleanup after test case execution"""
self._s1ap_wrapper.cleanup()
self.magma_utils.enable_nat(ip_version=6)
def test_ipv6_non_nat_dp_ul_tcp(self):
"""Basic attach/detach and UL TCP ipv6 data test with a single UE"""
num_ues = 1
magma_apn = {
"apn_name": "magma", # APN-name
"qci": 9, # qci
"priority": 15, # priority
"pre_cap": 1, # preemption-capability
"pre_vul": 0, # preemption-vulnerability
"mbr_ul": 200000000, # MBR UL
"mbr_dl": 100000000, # MBR DL
"pdn_type": 1, # PDN Type 0-IPv4,1-IPv6,2-IPv4v6
}
wait_for_s1 = True
ue_ips = ["fdee::"]
apn_list = [magma_apn]
self._s1ap_wrapper.configUEDevice(num_ues, [], ue_ips)
req = self._s1ap_wrapper.ue_req
ue_id = req.ue_id
print(
"************************* Running End to End attach for ",
"UE id ",
req.ue_id,
)
self._s1ap_wrapper.configAPN(
"IMSI" + "".join([str(j) for j in req.imsi]),
apn_list,
default=False,
)
# Now actually complete the attach
self._s1ap_wrapper.s1_util.attach(
ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
pdn_type=2,
)
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
# Receive Router Advertisement message
apn = "magma"
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_ROUTER_ADV_IND.value
router_adv = response.cast(s1ap_types.ueRouterAdv_t)
print(
"********** Received Router Advertisement for APN-%s"
" bearer id-%d" % (apn, router_adv.bearerId),
)
ipv6_addr = "".join([chr(i) for i in router_adv.ipv6Addr]).rstrip(
"\x00",
)
print("********** UE IPv6 address: ", ipv6_addr)
ipaddress.ip_address(ipv6_addr)
self._s1ap_wrapper.s1_util.update_ipv6_address(ue_id, ipv6_addr)
print("Sleeping for 5 secs")
time.sleep(5)
print(
"************************* Running UE uplink (TCP) for UE id ",
req.ue_id,
)
with self._s1ap_wrapper.configUplinkTest(req, duration=1) as test:
test.verify()
print(
"************************* Running UE detach for UE id ",
req.ue_id,
)
# Now detach the UE
self._s1ap_wrapper.s1_util.detach(
req.ue_id,
s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value,
wait_for_s1,
)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | magma.noreply@github.com |
f26cfb2eec843676ac2c9085e502fcee2b1f71ce | 43842089122512e6b303ebd05fc00bb98066a5b2 | /dynamic_programming/213_house_robber_ii.py | 2b579f56defc49ef1f5790ac7012f6ace3142db0 | [] | no_license | mistrydarshan99/Leetcode-3 | a40e14e62dd400ddb6fa824667533b5ee44d5f45 | bf98c8fa31043a45b3d21cfe78d4e08f9cac9de6 | refs/heads/master | 2022-04-16T11:26:56.028084 | 2020-02-28T23:04:06 | 2020-02-28T23:04:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py |
"""
213. House Robber II
Medium
627
18
You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed. All houses at this place are arranged in a circle. That means the first house is the neighbor of the last one. Meanwhile, adjacent houses have security system connected and it will automatically contact the police if two adjacent houses were broken into on the same night.
Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of money you can rob tonight without alerting the police.
Example 1:
Input: [2,3,2]
Output: 3
Explanation: You cannot rob house 1 (money = 2) and then rob house 3 (money = 2),
because they are adjacent houses.
Example 2:
Input: [1,2,3,1]
Output: 4
Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).
Total amount you can rob = 1 + 3 = 4.
"""
def robbery(houses):
"""
input houses: list[int]
output total: int
"""
if len(houses) <= 3:
return max(houses or [0])
def dplist(new_houses):
new_houses = [0] + new_houses
dp = new_houses[0:3]
total = 0
for i in range(3, len(new_houses)):
dp.append(new_houses[i] + max(dp[i-2], dp[i-3]))
if dp[i] > total:
total = dp[i]
return total
return(max(dplist(houses[1:]), dplist(houses[:len(houses)-1])))
houses = [1,3,1,3,100]
print(robbery(houses))
| [
"maplesuger@hotmail.com"
] | maplesuger@hotmail.com |
c2c3436aac89ebe1bc9f7f979a2d3dcdc72f552e | c7c63d04ca4a3db48ccc2d78b11ee092175df043 | /numpyro/contrib/control_flow/scan.py | 35ffb1dd785705fc2c8afee7cfd5f742f298d995 | [
"Apache-2.0"
] | permissive | zeta1999/numpyro | e17413805db2e70d48e1ada4b029b02cb065f539 | 6823c495f62774172936fb437bb17210fd0179e0 | refs/heads/master | 2023-04-11T14:39:41.356900 | 2021-04-14T03:24:36 | 2021-04-14T03:24:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,831 | py | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from collections import OrderedDict
from functools import partial
from jax import (
device_put,
lax,
random,
tree_flatten,
tree_map,
tree_multimap,
tree_unflatten,
)
import jax.numpy as jnp
from jax.tree_util import register_pytree_node_class
from numpyro import handlers
from numpyro.primitives import _PYRO_STACK, Messenger, apply_stack
from numpyro.util import not_jax_tracer
@register_pytree_node_class
class PytreeTrace:
def __init__(self, trace):
self.trace = trace
def tree_flatten(self):
trace, aux_trace = {}, {}
for name, site in self.trace.items():
if site["type"] in ["sample", "deterministic"]:
trace[name], aux_trace[name] = {}, {"_control_flow_done": True}
for key in site:
if key in ["fn", "args", "value", "intermediates"]:
trace[name][key] = site[key]
# scanned sites have stop field because we trace them inside a block handler
elif key != "stop":
aux_trace[name][key] = site[key]
# keep the site order information because in JAX, flatten and unflatten do not preserve
# the order of keys in a dict
site_names = list(trace.keys())
return (trace,), (aux_trace, site_names)
@classmethod
def tree_unflatten(cls, aux_data, children):
aux_trace, site_names = aux_data
(trace,) = children
trace_with_aux = {}
for name in site_names:
trace[name].update(aux_trace[name])
trace_with_aux[name] = trace[name]
return cls(trace_with_aux)
def _subs_wrapper(subs_map, i, length, site):
value = None
if isinstance(subs_map, dict) and site["name"] in subs_map:
value = subs_map[site["name"]]
elif callable(subs_map):
rng_key = site["kwargs"].get("rng_key")
subs_map = (
handlers.seed(subs_map, rng_seed=rng_key)
if rng_key is not None
else subs_map
)
value = subs_map(site)
if value is not None:
value_ndim = jnp.ndim(value)
sample_shape = site["kwargs"]["sample_shape"]
fn_ndim = len(sample_shape + site["fn"].shape())
if value_ndim == fn_ndim:
# this branch happens when substitute_fn is init_strategy,
# where we apply init_strategy to each element in the scanned series
return value
elif value_ndim == fn_ndim + 1:
# this branch happens when we substitute a series of values
shape = jnp.shape(value)
if shape[0] == length:
return value[i]
elif shape[0] < length:
rng_key = site["kwargs"]["rng_key"]
assert rng_key is not None
# we use the substituted values if i < shape[0]
# and generate a new sample otherwise
return lax.cond(
i < shape[0],
(value, i),
lambda val: val[0][val[1]],
rng_key,
lambda val: site["fn"](rng_key=val, sample_shape=sample_shape),
)
else:
raise RuntimeError(
f"Substituted value for site {site['name']} "
"requires length less than or equal to scan length."
f" Expected length <= {length}, but got {shape[0]}."
)
else:
raise RuntimeError(
f"Something goes wrong. Expected ndim = {fn_ndim} or {fn_ndim+1},"
f" but got {value_ndim}. This might happen when you use nested scan,"
" which is currently not supported. Please report the issue to us!"
)
class _promote_fn_shapes(Messenger):
# a helper messenger to promote shapes of `fn`
# + msg: fn.batch_shape = (3,), value.shape = (2, 3) + fn.event_shape
# process_message(msg): promote fn so that fn.batch_shape = (1, 3).
def postprocess_message(self, msg):
if msg["type"] == "sample" and msg["value"] is not None:
fn, value = msg["fn"], msg["value"]
value_batch_ndims = jnp.ndim(value) - fn.event_dim
fn_batch_ndim = len(fn.batch_shape)
if fn_batch_ndim < value_batch_ndims:
prepend_shapes = (1,) * (value_batch_ndims - fn_batch_ndim)
msg["fn"] = tree_map(
lambda x: jnp.reshape(x, prepend_shapes + jnp.shape(x)), fn
)
def _promote_scanned_value_shapes(value, fn):
# a helper function to promote shapes of `value`
# + msg: fn.batch_shape = (T, 2, 3), value.shape = (T, 3,) + fn.event_shape
# process_message(msg): promote value so that value.shape = (T, 1, 3) + fn.event_shape
value_batch_ndims = jnp.ndim(value) - fn.event_dim
fn_batch_ndim = len(fn.batch_shape)
if fn_batch_ndim > value_batch_ndims:
prepend_shapes = (1,) * (fn_batch_ndim - value_batch_ndims)
return jnp.reshape(
value, jnp.shape(value)[:1] + prepend_shapes + jnp.shape(value)[1:]
)
else:
return value
def scan_enum(
f,
init,
xs,
length,
reverse,
rng_key=None,
substitute_stack=None,
history=1,
first_available_dim=None,
):
from numpyro.contrib.funsor import (
config_enumerate,
enum,
markov,
trace as packed_trace,
)
# amount number of steps to unroll
history = min(history, length)
unroll_steps = min(2 * history - 1, length)
if reverse:
x0 = tree_map(lambda x: x[-unroll_steps:][::-1], xs)
xs_ = tree_map(lambda x: x[:-unroll_steps], xs)
else:
x0 = tree_map(lambda x: x[:unroll_steps], xs)
xs_ = tree_map(lambda x: x[unroll_steps:], xs)
carry_shapes = []
def body_fn(wrapped_carry, x, prefix=None):
i, rng_key, carry = wrapped_carry
init = True if (not_jax_tracer(i) and i in range(unroll_steps)) else False
rng_key, subkey = random.split(rng_key) if rng_key is not None else (None, None)
# we need to tell unconstrained messenger in potential energy computation
# that only the item at time `i` is needed when transforming
fn = handlers.infer_config(f, config_fn=lambda msg: {"_scan_current_index": i})
seeded_fn = handlers.seed(fn, subkey) if subkey is not None else fn
for subs_type, subs_map in substitute_stack:
subs_fn = partial(_subs_wrapper, subs_map, i, length)
if subs_type == "condition":
seeded_fn = handlers.condition(seeded_fn, condition_fn=subs_fn)
elif subs_type == "substitute":
seeded_fn = handlers.substitute(seeded_fn, substitute_fn=subs_fn)
if init:
# handler the name to match the pattern of sakkar_bilmes product
with handlers.scope(prefix="_PREV_" * (unroll_steps - i), divider=""):
new_carry, y = config_enumerate(seeded_fn)(carry, x)
trace = {}
else:
# Like scan_wrapper, we collect the trace of scan's transition function
# `seeded_fn` here. To put time dimension to the correct position, we need to
# promote shapes to make `fn` and `value`
# at each site have the same batch dims (e.g. if `fn.batch_shape = (2, 3)`,
# and value's batch_shape is (3,), then we promote shape of
# value so that its batch shape is (1, 3)).
# Here we will promote `fn` shape first. `value` shape will be promoted after scanned.
# We don't promote `value` shape here because we need to store carry shape
# at this step. If we reshape the `value` here, output carry might get wrong shape.
with _promote_fn_shapes(), packed_trace() as trace:
new_carry, y = config_enumerate(seeded_fn)(carry, x)
# store shape of new_carry at a global variable
if len(carry_shapes) < (history + 1):
carry_shapes.append([jnp.shape(x) for x in tree_flatten(new_carry)[0]])
# make new_carry have the same shape as carry
# FIXME: is this rigorous?
new_carry = tree_multimap(
lambda a, b: jnp.reshape(a, jnp.shape(b)), new_carry, carry
)
return (i + 1, rng_key, new_carry), (PytreeTrace(trace), y)
with handlers.block(
hide_fn=lambda site: not site["name"].startswith("_PREV_")
), enum(first_available_dim=first_available_dim):
wrapped_carry = (0, rng_key, init)
y0s = []
# We run unroll_steps + 1 where the last step is used for rolling with `lax.scan`
for i in markov(range(unroll_steps + 1), history=history):
if i < unroll_steps:
wrapped_carry, (_, y0) = body_fn(
wrapped_carry, tree_map(lambda z: z[i], x0)
)
if i > 0:
# reshape y1, y2,... to have the same shape as y0
y0 = tree_multimap(
lambda z0, z: jnp.reshape(z, jnp.shape(z0)), y0s[0], y0
)
y0s.append(y0)
# shapes of the first `history - 1` steps are not useful to interpret the last carry
# shape so we don't need to record them here
if (i >= history - 1) and (len(carry_shapes) < history + 1):
carry_shapes.append(
jnp.shape(x) for x in tree_flatten(wrapped_carry[-1])[0]
)
else:
# this is the last rolling step
y0s = tree_multimap(lambda *z: jnp.stack(z, axis=0), *y0s)
# return early if length = unroll_steps
if length == unroll_steps:
return wrapped_carry, (PytreeTrace({}), y0s)
wrapped_carry = device_put(wrapped_carry)
wrapped_carry, (pytree_trace, ys) = lax.scan(
body_fn, wrapped_carry, xs_, length - unroll_steps, reverse
)
first_var = None
for name, site in pytree_trace.trace.items():
# currently, we only record sample or deterministic in the trace
# we don't need to adjust `dim_to_name` for deterministic site
if site["type"] not in ("sample",):
continue
# add `time` dimension, the name will be '_time_{first variable in the trace}'
if first_var is None:
first_var = name
# we haven't promote shapes of values yet during `lax.scan`, so we do it here
site["value"] = _promote_scanned_value_shapes(site["value"], site["fn"])
# XXX: site['infer']['dim_to_name'] is not enough to determine leftmost dimension because
# we don't record 1-size dimensions in this field
time_dim = -min(
len(site["fn"].batch_shape), jnp.ndim(site["value"]) - site["fn"].event_dim
)
site["infer"]["dim_to_name"][time_dim] = "_time_{}".format(first_var)
# similar to carry, we need to reshape due to shape alternating in markov
ys = tree_multimap(
lambda z0, z: jnp.reshape(z, z.shape[:1] + jnp.shape(z0)[1:]), y0s, ys
)
# then join with y0s
ys = tree_multimap(lambda z0, z: jnp.concatenate([z0, z], axis=0), y0s, ys)
# we also need to reshape `carry` to match sequential behavior
i = (length + 1) % (history + 1)
t, rng_key, carry = wrapped_carry
carry_shape = carry_shapes[i]
flatten_carry, treedef = tree_flatten(carry)
flatten_carry = [
jnp.reshape(x, t1_shape) for x, t1_shape in zip(flatten_carry, carry_shape)
]
carry = tree_unflatten(treedef, flatten_carry)
wrapped_carry = (t, rng_key, carry)
return wrapped_carry, (pytree_trace, ys)
def scan_wrapper(
f,
init,
xs,
length,
reverse,
rng_key=None,
substitute_stack=[],
enum=False,
history=1,
first_available_dim=None,
):
if length is None:
length = tree_flatten(xs)[0][0].shape[0]
if enum and history > 0:
return scan_enum(
f,
init,
xs,
length,
reverse,
rng_key,
substitute_stack,
history,
first_available_dim,
)
def body_fn(wrapped_carry, x):
i, rng_key, carry = wrapped_carry
rng_key, subkey = random.split(rng_key) if rng_key is not None else (None, None)
with handlers.block():
# we need to tell unconstrained messenger in potential energy computation
# that only the item at time `i` is needed when transforming
fn = handlers.infer_config(
f, config_fn=lambda msg: {"_scan_current_index": i}
)
seeded_fn = handlers.seed(fn, subkey) if subkey is not None else fn
for subs_type, subs_map in substitute_stack:
subs_fn = partial(_subs_wrapper, subs_map, i, length)
if subs_type == "condition":
seeded_fn = handlers.condition(seeded_fn, condition_fn=subs_fn)
elif subs_type == "substitute":
seeded_fn = handlers.substitute(seeded_fn, substitute_fn=subs_fn)
with handlers.trace() as trace:
carry, y = seeded_fn(carry, x)
return (i + 1, rng_key, carry), (PytreeTrace(trace), y)
wrapped_carry = device_put((0, rng_key, init))
return lax.scan(body_fn, wrapped_carry, xs, length=length, reverse=reverse)
def scan(f, init, xs, length=None, reverse=False, history=1):
"""
This primitive scans a function over the leading array axes of
`xs` while carrying along state. See :func:`jax.lax.scan` for more
information.
**Usage**:
.. doctest::
>>> import numpy as np
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.contrib.control_flow import scan
>>>
>>> def gaussian_hmm(y=None, T=10):
... def transition(x_prev, y_curr):
... x_curr = numpyro.sample('x', dist.Normal(x_prev, 1))
... y_curr = numpyro.sample('y', dist.Normal(x_curr, 1), obs=y_curr)
... return x_curr, (x_curr, y_curr)
...
... x0 = numpyro.sample('x_0', dist.Normal(0, 1))
... _, (x, y) = scan(transition, x0, y, length=T)
... return (x, y)
>>>
>>> # here we do some quick tests
>>> with numpyro.handlers.seed(rng_seed=0):
... x, y = gaussian_hmm(np.arange(10.))
>>> assert x.shape == (10,) and y.shape == (10,)
>>> assert np.all(y == np.arange(10))
>>>
>>> with numpyro.handlers.seed(rng_seed=0): # generative
... x, y = gaussian_hmm()
>>> assert x.shape == (10,) and y.shape == (10,)
.. warning:: This is an experimental utility function that allows users to use
JAX control flow with NumPyro's effect handlers. Currently, `sample` and
`deterministic` sites within the scan body `f` are supported. If you notice
that any effect handlers or distributions are unsupported, please file an issue.
.. note:: It is ambiguous to align `scan` dimension inside a `plate` context.
So the following pattern won't be supported
.. code-block:: python
with numpyro.plate('N', 10):
last, ys = scan(f, init, xs)
All `plate` statements should be put inside `f`. For example, the corresponding
working code is
.. code-block:: python
def g(*args, **kwargs):
with numpyro.plate('N', 10):
return f(*arg, **kwargs)
last, ys = scan(g, init, xs)
.. note:: Nested scan is currently not supported.
.. note:: We can scan over discrete latent variables in `f`. The joint density is
evaluated using parallel-scan (reference [1]) over time dimension, which
reduces parallel complexity to `O(log(length))`.
A :class:`~numpyro.handlers.trace` of `scan` with discrete latent
variables will contain the following sites:
+ init sites: those sites belong to the first `history` traces of `f`.
Sites at the `i`-th trace will have name prefixed with
`'_PREV_' * (2 * history - 1 - i)`.
+ scanned sites: those sites collect the values of the remaining scan
loop over `f`. An addition time dimension `_time_foo` will be
added to those sites, where `foo` is the name of the first site
appeared in `f`.
Not all transition functions `f` are supported. All of the restrictions from
Pyro's enumeration tutorial [2] still apply here. In addition, there should
not have any site outside of `scan` depend on the first output of `scan`
(the last carry value).
** References **
1. *Temporal Parallelization of Bayesian Smoothers*,
Simo Sarkka, Angel F. Garcia-Fernandez
(https://arxiv.org/abs/1905.13002)
2. *Inference with Discrete Latent Variables*
(http://pyro.ai/examples/enumeration.html#Dependencies-among-plates)
:param callable f: a function to be scanned.
:param init: the initial carrying state
:param xs: the values over which we scan along the leading axis. This can
be any JAX pytree (e.g. list/dict of arrays).
:param length: optional value specifying the length of `xs`
but can be used when `xs` is an empty pytree (e.g. None)
:param bool reverse: optional boolean specifying whether to run the scan iteration
forward (the default) or in reverse
:param int history: The number of previous contexts visible from the current context.
Defaults to 1. If zero, this is similar to :class:`numpyro.plate`.
:return: output of scan, quoted from :func:`jax.lax.scan` docs:
"pair of type (c, [b]) where the first element represents the final loop
carry value and the second element represents the stacked outputs of the
second output of f when scanned over the leading axis of the inputs".
"""
# if there are no active Messengers, we just run and return it as expected:
if not _PYRO_STACK:
(length, rng_key, carry), (pytree_trace, ys) = scan_wrapper(
f, init, xs, length=length, reverse=reverse
)
else:
# Otherwise, we initialize a message...
initial_msg = {
"type": "control_flow",
"fn": scan_wrapper,
"args": (f, init, xs, length, reverse),
"kwargs": {"rng_key": None, "substitute_stack": [], "history": history},
"value": None,
}
# ...and use apply_stack to send it to the Messengers
msg = apply_stack(initial_msg)
(length, rng_key, carry), (pytree_trace, ys) = msg["value"]
if not msg["kwargs"].get("enum", False):
for msg in pytree_trace.trace.values():
apply_stack(msg)
else:
from numpyro.contrib.funsor import to_funsor
from numpyro.contrib.funsor.enum_messenger import LocalNamedMessenger
for msg in pytree_trace.trace.values():
with LocalNamedMessenger():
dim_to_name = msg["infer"].get("dim_to_name")
to_funsor(
msg["value"],
dim_to_name=OrderedDict(
[(k, dim_to_name[k]) for k in sorted(dim_to_name)]
),
)
apply_stack(msg)
return carry, ys
| [
"noreply@github.com"
] | zeta1999.noreply@github.com |
24e705f289cc6429a70f3dce754c533a3befe52e | 991d0b40a9ddb5ea6a72e3c018a74c6135792909 | /freenodejobs/admin/views.py | 9d6fb0fbcfa91721fbec85b8e0219cf6646f0888 | [
"MIT"
] | permissive | freenode/freenodejobs | 9afe699713efb915f5008c1ee2299a25604ab351 | 235388c88ac6f984f36cd20074542e21369bcc8b | refs/heads/master | 2021-06-11T05:09:14.255759 | 2019-10-22T13:46:44 | 2019-10-22T13:46:44 | 128,451,423 | 4 | 5 | MIT | 2021-03-18T20:31:54 | 2018-04-06T19:16:05 | JavaScript | UTF-8 | Python | false | false | 2,306 | py | from django.contrib import messages
from django.shortcuts import render, redirect, get_object_or_404
from freenodejobs.jobs.enums import StateEnum
from freenodejobs.utils.paginator import AutoPaginator
from freenodejobs.utils.decorators import staff_required
from freenodejobs.jobs.models import Job
from .forms import ApproveForm, RejectForm, RemoveForm
@staff_required
def view(request, state_slug=''):
try:
state = StateEnum[state_slug.upper()]
except KeyError:
return redirect(
'admin:view',
StateEnum.WAITING_FOR_APPROVAL.name.lower(),
)
jobs = Job.objects.filter(state=state)
page = AutoPaginator(request, jobs, 20).current_page()
by_state = Job.objects.by_state()
return render(request, 'admin/view.html', {
'page': page,
'state': state,
'by_state': by_state,
})
@staff_required
def approve(request, slug):
job = get_object_or_404(Job, slug=slug)
if request.method == 'POST':
form = ApproveForm(job, request.POST)
if form.is_valid():
form.save(request.user)
messages.success(request, "Job was approved.")
return redirect('admin:view')
else:
form = ApproveForm(job)
return render(request, 'admin/approve.html', {
'job': job,
'form': form,
})
@staff_required
def reject(request, slug):
job = get_object_or_404(Job, slug=slug)
if request.method == 'POST':
form = RejectForm(job, request.POST)
if form.is_valid():
form.save(request.user)
messages.success(request, "Job was rejected.")
return redirect('admin:view')
else:
form = RejectForm(job)
return render(request, 'admin/reject.html', {
'job': job,
'form': form,
})
@staff_required
def remove(request, slug):
job = get_object_or_404(Job, slug=slug)
if request.method == 'POST':
form = RemoveForm(job, request.POST)
if form.is_valid():
form.save(request.user)
messages.success(request, "Job was removed.")
return redirect('admin:view')
else:
form = RemoveForm(job)
return render(request, 'admin/remove.html', {
'job': job,
'form': form,
})
| [
"chris@chris-lamb.co.uk"
] | chris@chris-lamb.co.uk |
86b92b8f212a1e0ccacfc949fefe1da3ffb5b062 | 6dd5bfe305bfc8d7fccf1f9bd6b3ec2250fc574c | /tensorflow_probability/python/bijectors/cholesky_outer_product_test.py | 12b437fe8c7502373507a9937c4af664181bef2f | [
"Apache-2.0"
] | permissive | snehil03july/probability | dd38cf7abba01b6702362476150d67092ce754b2 | 5f576230f1e261a823e20a49c442ff38c8f381d3 | refs/heads/master | 2020-03-28T10:24:46.378464 | 2018-09-08T22:34:22 | 2018-09-08T22:34:25 | 148,106,347 | 1 | 0 | Apache-2.0 | 2018-09-10T06:01:26 | 2018-09-10T06:01:26 | null | UTF-8 | Python | false | false | 6,045 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
from tensorflow_probability.python import bijectors as tfb
class CholeskyOuterProductBijectorTest(tf.test.TestCase):
"""Tests the correctness of the Y = X @ X.T transformation."""
def testBijectorMatrix(self):
with self.test_session():
bijector = tfb.CholeskyOuterProduct(validate_args=True)
self.assertEqual("cholesky_outer_product", bijector.name)
x = [[[1., 0], [2, 1]], [[np.sqrt(2.), 0], [np.sqrt(8.), 1]]]
y = np.matmul(x, np.transpose(x, axes=(0, 2, 1)))
# Fairly easy to compute differentials since we have 2x2.
dx_dy = [[[2. * 1, 0, 0],
[2, 1, 0],
[0, 2 * 2, 2 * 1]],
[[2 * np.sqrt(2.), 0, 0],
[np.sqrt(8.), np.sqrt(2.), 0],
[0, 2 * np.sqrt(8.), 2 * 1]]]
ildj = -np.sum(
np.log(np.asarray(dx_dy).diagonal(
offset=0, axis1=1, axis2=2)),
axis=1)
self.assertAllEqual((2, 2, 2), bijector.forward(x).get_shape())
self.assertAllEqual((2, 2, 2), bijector.inverse(y).get_shape())
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
self.assertAllClose(
ildj,
self.evaluate(
bijector.inverse_log_det_jacobian(
y, event_ndims=2)), atol=0., rtol=1e-7)
self.assertAllClose(
self.evaluate(-bijector.inverse_log_det_jacobian(
y, event_ndims=2)),
self.evaluate(bijector.forward_log_det_jacobian(
x, event_ndims=2)),
atol=0.,
rtol=1e-7)
def testNoBatchStaticJacobian(self):
x = np.eye(2)
bijector = tfb.CholeskyOuterProduct()
# The Jacobian matrix is 2 * tf.eye(2), which has jacobian determinant 4.
self.assertAllClose(
np.log(4),
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=2)))
def testNoBatchDynamicJacobian(self):
x = np.eye(2)
bijector = tfb.CholeskyOuterProduct()
x_pl = tf.placeholder(tf.float32)
with self.test_session():
log_det_jacobian = bijector.forward_log_det_jacobian(x_pl, event_ndims=2)
# The Jacobian matrix is 2 * tf.eye(2), which has jacobian determinant 4.
self.assertAllClose(
np.log(4),
log_det_jacobian.eval({x_pl: x}))
def testNoBatchStatic(self):
x = np.array([[1., 0], [2, 1]]) # np.linalg.cholesky(y)
y = np.array([[1., 2], [2, 5]]) # np.matmul(x, x.T)
with self.test_session() as sess:
y_actual = tfb.CholeskyOuterProduct().forward(x=x)
x_actual = tfb.CholeskyOuterProduct().inverse(y=y)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual])
self.assertAllEqual([2, 2], y_actual.get_shape())
self.assertAllEqual([2, 2], x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
def testNoBatchDeferred(self):
x = np.array([[1., 0], [2, 1]]) # np.linalg.cholesky(y)
y = np.array([[1., 2], [2, 5]]) # np.matmul(x, x.T)
with self.test_session() as sess:
x_pl = tf.placeholder(tf.float32)
y_pl = tf.placeholder(tf.float32)
y_actual = tfb.CholeskyOuterProduct().forward(x=x_pl)
x_actual = tfb.CholeskyOuterProduct().inverse(y=y_pl)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual],
feed_dict={x_pl: x, y_pl: y})
self.assertEqual(None, y_actual.get_shape())
self.assertEqual(None, x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
def testBatchStatic(self):
x = np.array([[[1., 0],
[2, 1]],
[[3., 0],
[1, 2]]]) # np.linalg.cholesky(y)
y = np.array([[[1., 2],
[2, 5]],
[[9., 3],
[3, 5]]]) # np.matmul(x, x.T)
with self.test_session() as sess:
y_actual = tfb.CholeskyOuterProduct().forward(x=x)
x_actual = tfb.CholeskyOuterProduct().inverse(y=y)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual])
self.assertEqual([2, 2, 2], y_actual.get_shape())
self.assertEqual([2, 2, 2], x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
def testBatchDeferred(self):
x = np.array([[[1., 0],
[2, 1]],
[[3., 0],
[1, 2]]]) # np.linalg.cholesky(y)
y = np.array([[[1., 2],
[2, 5]],
[[9., 3],
[3, 5]]]) # np.matmul(x, x.T)
with self.test_session() as sess:
x_pl = tf.placeholder(tf.float32)
y_pl = tf.placeholder(tf.float32)
y_actual = tfb.CholeskyOuterProduct().forward(x=x_pl)
x_actual = tfb.CholeskyOuterProduct().inverse(y=y_pl)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual],
feed_dict={x_pl: x, y_pl: y})
self.assertEqual(None, y_actual.get_shape())
self.assertEqual(None, x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
if __name__ == "__main__":
tf.test.main()
| [
"copybara-piper@google.com"
] | copybara-piper@google.com |
4989526cf06ccac21cea11cc7c61c481bd7c4e7a | 74bc48ba64859a63855d204f1efd31eca47a223f | /Planet/3000.Prav_vgg16_01.py | b0e04eacff9cf7bbf863c935f3498c951b3fa0f1 | [] | no_license | PraveenAdepu/kaggle_competitions | 4c53d71af12a615d5ee5f34e5857cbd0fac7bc3c | ed0111bcecbe5be4529a2a5be2ce4c6912729770 | refs/heads/master | 2020-09-02T15:29:51.885013 | 2020-04-09T01:50:55 | 2020-04-09T01:50:55 | 219,248,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,500 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 09 20:02:03 2017
@author: SriPrav
"""
import numpy as np
np.random.seed(2017)
import os
import glob
import cv2
import datetime
import pandas as pd
import time
import warnings
warnings.filterwarnings("ignore")
from sklearn.cross_validation import KFold
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten, Activation
from keras.layers.convolutional import Convolution2D, ZeroPadding2D
from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping,Callback, ModelCheckpoint
from keras.utils import np_utils
from sklearn.metrics import log_loss
from keras.layers.normalization import BatchNormalization
from keras import __version__ as keras_version
from keras.layers import Input, merge, Reshape
from keras.models import Model
from keras import backend as K
from Models.scale_layer import Scale
from sklearn.metrics import fbeta_score
inDir = 'C:/Users/SriPrav/Documents/R/27Planet'
vgg16_weights = 'C:/Users/SriPrav/Documents/R' + '/imagenet_models/vgg16_weights.h5'
MODEL_WEIGHTS_FILE = inDir + '/vgg16_CCN01_weights.h5'
train_file = inDir + "/input/train_images.csv"
test_file = inDir + "/input/test_images.csv"
test_additional_file = inDir + "/input/test_additional_images.csv"
train_df = pd.read_csv(train_file)
test_df = pd.read_csv(test_file)
test_additional_df = pd.read_csv(test_additional_file)
print(train_df.shape) # (40479, 4)
print(test_df.shape) # (40669, 2)
print(test_additional_df.shape) # (20522, 2)
test_all = pd.concat([test_df,test_additional_df])
print(test_all.shape) # (61191, 2)
def vgg16_model(img_rows, img_cols, channel=3, num_classes=None):
"""VGG 16 Model for Keras
Model Schema is based on
https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3
ImageNet Pretrained Weights
https://drive.google.com/file/d/0Bz7KyqmuGsilT0J5dmRCM0ROVHc/view?usp=sharing
Parameters:
img_rows, img_cols - resolution of inputs
channel - 1 for grayscale, 3 for color
num_classes - number of categories for our classification task
"""
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(channel, img_rows, img_cols)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# Add Fully Connected Layer
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
# Loads ImageNet pre-trained data
model.load_weights(vgg16_weights)
# Truncate and replace softmax layer for transfer learning
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.add(Dense(num_classes, activation='softmax'))
# Uncomment below to set the first 10 layers to non-trainable (weights will not be updated)
#for layer in model.layers[:10]:
# layer.trainable = False
# Learning rate is changed to 0.001
return model
ROWS = 224
COLUMNS = 224
CHANNELS = 3
VERBOSEFLAG = 2
train_data_224_3 = np.load(inDir +"/input/train_data_224_3.npy")
train_target_224_3 = np.load(inDir +"/input/train_target_224_3.npy")
train_id_224_3 = np.load(inDir +"/input/train_id_224_3.npy")
test_data_224_3 = np.load(inDir +"/input/test_data_224_3.npy")
test_id_224_3 = np.load(inDir +"/input/test_id_224_3.npy")
train_data_224_3 = train_data_224_3.astype('float32')
#train_data_224_3 = train_data_224_3 / 255
## check mean pixel value
mean_pixel = [103.939, 116.779, 123.68]
for c in range(3):
train_data_224_3[:, c, :, :] = train_data_224_3[:, c, :, :] - mean_pixel[c]
# train_data /= 255
test_data_224_3 = test_data_224_3.astype('float32')
#test_data_224_3 = test_data_224_3 / 255
for c in range(3):
test_data_224_3[:, c, :, :] = test_data_224_3[:, c, :, :] - mean_pixel[c]
batch_size = 16
nb_epoch = 25
random_state = 2017
def train_nn(i):
trainindex = train_df[train_df['CVindices'] != i].index.tolist()
valindex = train_df[train_df['CVindices'] == i].index.tolist()
X_val_df = train_df.iloc[valindex,:]
X_build , X_valid = train_data_224_3[trainindex,:], train_data_224_3[valindex,:]
y_build , y_valid = train_target_224_3[trainindex,:], train_target_224_3[valindex,:]
print('Split train: ', len(X_build), len(y_build))
print('Split valid: ', len(X_valid), len(y_valid))
model = vgg16_model(ROWS, COLUMNS, CHANNELS, num_classes=17)
# callbacks = [
# EarlyStopping(monitor='val_loss', patience=3, verbose=VERBOSEFLAG),
# ]
callbacks = [ModelCheckpoint(MODEL_WEIGHTS_FILE, monitor='val_loss', save_best_only=True, verbose=1)]
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=['accuracy'])#'categorical_crossentropy'
# model.compile(loss='categorical_crossentropy', optimizer="adadelta", \
# metrics=["accuracy"])
model.fit(X_build, y_build, batch_size=batch_size, nb_epoch=nb_epoch,
shuffle=True, verbose=VERBOSEFLAG, validation_data=(X_valid, y_valid),
callbacks=callbacks
)
model.load_weights(MODEL_WEIGHTS_FILE)
pred_cv = model.predict(X_valid, verbose=1)
print('F2 Score : ',fbeta_score(y_valid, np.array(pred_cv) > 0.2, beta=2, average='samples'))
pred_cv = pd.DataFrame(pred_cv)
pred_cv.head()
pred_cv.columns = ["slash_burn","clear","blooming","primary","cloudy","conventional_mine","water","haze","cultivation","partly_cloudy","artisinal_mine","habitation","bare_ground","blow_down","agriculture","road","selective_logging"]
pred_cv["image_name"] = X_val_df.image_name.values
sub_valfile = inDir + '/submissions/Prav.vgg16_01.fold' + str(i) + '.csv'
pred_cv = pred_cv[["image_name","slash_burn","clear","blooming","primary","cloudy","conventional_mine","water","haze","cultivation","partly_cloudy","artisinal_mine","habitation","bare_ground","blow_down","agriculture","road","selective_logging"]]
pred_cv.to_csv(sub_valfile, index=False)
pred_test = model.predict(test_data_224_3,verbose=1)
pred_test = pd.DataFrame(pred_test)
pred_test.columns = ["slash_burn","clear","blooming","primary","cloudy","conventional_mine","water","haze","cultivation","partly_cloudy","artisinal_mine","habitation","bare_ground","blow_down","agriculture","road","selective_logging"]
pred_test["image_name"] = test_all.image_name.values
pred_test = pred_test[["image_name","slash_burn","clear","blooming","primary","cloudy","conventional_mine","water","haze","cultivation","partly_cloudy","artisinal_mine","habitation","bare_ground","blow_down","agriculture","road","selective_logging"]]
sub_file = inDir + '/submissions/Prav.vgg16_01.fold' + str(i) + '-test'+'.csv'
pred_test.to_csv(sub_file, index=False)
i = 10
train_nn(i)
| [
"padepu@wesfarmers.com.au"
] | padepu@wesfarmers.com.au |
59225ea2848e31f2eec61a545e3c036792b4f7a2 | 6db515644769c94166d2023b05c1f5ea57d3df51 | /blog/migrations/0001_initial.py | 090c4757842dddd9a2cb5656d58f7707b24683fb | [] | no_license | toastding/footprint | fe7da2340e826438d1cb17d18a5b1bdf2018d8a0 | e9af8163706efdce8e5732e9dfaedd6ecb2fb445 | refs/heads/master | 2022-11-17T22:38:40.539558 | 2020-07-12T04:43:37 | 2020-07-12T04:43:37 | 278,774,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,386 | py | # Generated by Django 3.0.8 on 2020-07-11 08:56
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.TextField(help_text='Enter your blog text here.', max_length=2000)),
('post_date', models.DateField(default=datetime.date.today)),
],
options={
'ordering': ['-post_date'],
},
),
migrations.CreateModel(
name='BlogComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(help_text='Enter comment about blog here..', max_length=1000)),
('post_date', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Blog')),
],
options={
'ordering': ['post_date'],
},
),
migrations.CreateModel(
name='BlogAuthor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(help_text='Enter your bio details here.', max_length=400)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['user', 'bio'],
},
),
migrations.AddField(
model_name='blog',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='blog.BlogAuthor'),
),
]
| [
"ding02211995@gmail.com"
] | ding02211995@gmail.com |
9f08f99d478449a90db4b6dd7d4a9e87595f22d8 | 1316cd6763e784811c769c1de577235c921af0de | /Widgets/Striptool/setup.py | 3766198b8ae55d1005c727f74a195f038871b27f | [] | no_license | VELA-CLARA-software/Software | a6fb6b848584e5893fd6939a447d23134ce636cc | 2e2a88ac0b2b03a495c868d2e11e6481e05097c3 | refs/heads/master | 2023-02-05T07:40:58.260798 | 2023-01-27T09:39:09 | 2023-01-27T09:39:09 | 69,860,536 | 7 | 3 | null | 2021-04-07T14:17:07 | 2016-10-03T10:20:46 | Mathematica | UTF-8 | Python | false | false | 312 | py | from distutils.core import setup
setup(
name='Striptool',
version='1.1',
packages=[''],
url='',
license='',
author='jkj62',
author_email='james.jones@stfc.ac.uk',
description='Strip tool widget for PyQt4',
install_requires=['pyqtgraph>=0.10.0','numpy','peakutils>=1.0.3'],
)
| [
"james.jones@stfc.ac.uk"
] | james.jones@stfc.ac.uk |
a508e4bb0d77eab214532f5da0b2c7c0c380210a | c068f19f14749c7e29a5450871da81d0e4b57348 | /inasilentway/collection.py | d0443b44fd449af559bde16b95b272f8985e3139 | [] | no_license | davidmiller/inasilentway | 42b94adea60143a70e7fa82ecd8504fa6a1142f1 | c88ccd1cc0f79aa167f8a2ff082a20a043d64556 | refs/heads/master | 2022-12-10T11:47:51.052379 | 2021-01-22T12:41:33 | 2021-01-22T12:41:33 | 161,032,907 | 0 | 0 | null | 2022-12-08T01:29:25 | 2018-12-09T11:38:22 | HTML | UTF-8 | Python | false | false | 5,074 | py | # """
# Collection utilities
# """
# import pickle
# import time
# import ffs
# import discogs_client as discogs
# from inasilentway import utils
# HERE = ffs.Path.here()
# CACHE_PATH = HERE / '../data/collection.pickle'
# def get_collection():
# if CACHE_PATH:
# with open(CACHE_PATH.abspath, 'rb') as fh:
# return pickle.load(fh)
# else:
# print("No local record cache larry :(")
# def save_record_from_discogs_data(record):
# """
# Given a Discogs record instance, save it to our database
# """
# from inasilentway import models
# artists = []
# for artist in record.release.artists:
# art, _ = models.Artist.objects.get_or_create(discogs_id=artist.id)
# art.name = artist.name
# try:
# art.images = artist.images
# art.url = artist.url
# art.profile = artist.profile
# art.urls = artist.urls
# except discogs.exceptions.HTTPError:
# pass # 404 on the artist images happens sometimes apparently?
# art.save()
# artists.append(art)
# label, _ = models.Label.objects.get_or_create(
# discogs_id=record.release.labels[0].id)
# label.name = record.release.labels[0].name
# label.save()
# rec, _ = models.Record.objects.get_or_create(discogs_id=record.release.id)
# for artist in artists:
# rec.artist.add(artist)
# for genre in record.release.genres:
# g, _ = models.Genre.objects.get_or_create(name=genre)
# rec.genres.add(g)
# if record.release.styles:
# for style in record.release.styles:
# s, _ = models.Style.objects.get_or_create(name=style)
# rec.styles.add(s)
# api = discogs.Client(
# 'inasilentway',
# user_token='PYffmSZeeqHUYaMWMEjwGhqfSWtOBFcPOggoixmD'
# )
# api_release = api.release(rec.discogs_id)
# rec.thumb = api_release.thumb
# rec.label = label
# rec.title = record.release.title
# rec.year = record.release.year
# rec.images = record.release.images
# rec.country = record.release.country
# rec.notes = record.release.notes
# rec.formats = '{}, {}'.format(
# record.release.formats[0]['name'],
# ' '.join(record.release.formats[0]['descriptions'])
# )
# rec.url = record.release.url
# rec.status = record.release.status
# rec.save()
# # Tracks don't have an ID so kill them all
# models.Track.objects.filter(record=rec).delete()
# for track in record.release.tracklist:
# models.Track(
# record=rec,
# duration=track.duration,
# position=track.position,
# title=track.title
# ).save()
# return rec
# """
# Commandline entrypoints from ./shhh (python -m inasilentway )
# """
# def download(args):
# """
# Commandline entrypoint to download the collection if we
# have less records in it than in our local cache
# TODO: Convert to Django?
# """
# records = get_collection()
# if records is None:
# records = []
# api = discogs.Client('Inasilentway')
# me = api.user('thatdavidmiller')
# if len(records) != me.num_collection:
# print('fetching data...')
# records = [r for r in me.collection_folders[0].releases]
# print('fetched record data')
# print('{} records'.format(len(records)))
# with open(CACHE_PATH.abspath, 'wb') as fh:
# pickle.dump(records, fh)
# print('written record data to local cache')
# def load_django(args):
# """
# Commandline entrypoint to load our collection into Django
# """
# utils.setup_django()
# download(None)
# collection = get_collection()
# ADDED = 0
# def add_record(record):
# rec = save_record_from_discogs_data(record)
# print('Added {}'.format(rec.title))
# from inasilentway import models
# for record in collection:
# print('Looking at {}'.format(record.release.title))
# # TODO: Undo this when we've figured out how to not freak out
# # the discogs API limits
# if models.Record.objects.filter(discogs_id=record.release.id).exists():
# print(
# 'Found {} in local database, skipping'.format(
# record.release.title
# )
# )
# continue
# try:
# add_record(record)
# ADDED += 1
# if ADDED == 100:
# break
# except discogs.exceptions.HTTPError:
# print(
# "Got a quick requests warning, sleep for a bit and retry once"
# )
# time.sleep(60)
# add_record(record)
# ADDED += 1
# if ADDED == 100:
# break
# # Prevent HTTPError: 429: You are making requests too quickly.
# time.sleep(2)
# print('Count: {}'.format(models.Record.objects.count()))
| [
"david@deadpansincerity.com"
] | david@deadpansincerity.com |
f4dc7d92176b117ca06704933db0d564697b3c06 | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/pandas/core/reshape/pivot.py | 15602b58b926514bef78fdad232eb4e3806945f4 | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,217 | py | import numpy as np
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import is_integer_dtype, is_list_like, is_scalar
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
import pandas.core.common as com
from pandas.core.frame import _shared_docs
from pandas.core.groupby import Grouper
from pandas.core.index import Index, MultiIndex, _get_objs_combined_axis
from pandas.core.reshape.concat import concat
from pandas.core.reshape.util import cartesian_product
from pandas.core.series import Series
# Note: We need to make sure `frame` is imported before `pivot`, otherwise
# _shared_docs['pivot_table'] will not yet exist. TODO: Fix this dependency
@Substitution("\ndata : DataFrame")
@Appender(_shared_docs["pivot_table"], indents=1)
def pivot_table(
data,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
):
index = _convert_by(index)
columns = _convert_by(columns)
if isinstance(aggfunc, list):
pieces = []
keys = []
for func in aggfunc:
table = pivot_table(
data,
values=values,
index=index,
columns=columns,
fill_value=fill_value,
aggfunc=func,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
pieces.append(table)
keys.append(getattr(func, "__name__", func))
return concat(pieces, keys=keys, axis=1)
keys = index + columns
values_passed = values is not None
if values_passed:
if is_list_like(values):
values_multi = True
values = list(values)
else:
values_multi = False
values = [values]
# GH14938 Make sure value labels are in data
for i in values:
if i not in data:
raise KeyError(i)
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
x = x.key
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
else:
values = data.columns
for key in keys:
try:
values = values.drop(key)
except (TypeError, ValueError, KeyError):
pass
values = list(values)
grouped = data.groupby(keys, observed=observed)
agged = grouped.agg(aggfunc)
if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
agged = agged.dropna(how="all")
# gh-21133
# we want to down cast if
# the original values are ints
# as we grouped with a NaN value
# and then dropped, coercing to floats
for v in values:
if (
v in data
and is_integer_dtype(data[v])
and v in agged
and not is_integer_dtype(agged[v])
):
agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
table = agged
if table.index.nlevels > 1:
# Related GH #17123
# If index_names are integers, determine whether the integers refer
# to the level position or name.
index_names = agged.index.names[: len(index)]
to_unstack = []
for i in range(len(index), len(keys)):
name = agged.index.names[i]
if name is None or name in index_names:
to_unstack.append(i)
else:
to_unstack.append(name)
table = agged.unstack(to_unstack)
if not dropna:
from pandas import MultiIndex
if table.index.nlevels > 1:
m = MultiIndex.from_arrays(
cartesian_product(table.index.levels), names=table.index.names
)
table = table.reindex(m, axis=0)
if table.columns.nlevels > 1:
m = MultiIndex.from_arrays(
cartesian_product(table.columns.levels), names=table.columns.names
)
table = table.reindex(m, axis=1)
if isinstance(table, ABCDataFrame):
table = table.sort_index(axis=1)
if fill_value is not None:
table = table.fillna(value=fill_value, downcast="infer")
if margins:
if dropna:
data = data[data.notna().all(axis=1)]
table = _add_margins(
table,
data,
values,
rows=index,
cols=columns,
aggfunc=aggfunc,
observed=dropna,
margins_name=margins_name,
fill_value=fill_value,
)
# discard the top level
if (
values_passed
and not values_multi
and not table.empty
and (table.columns.nlevels > 1)
):
table = table[values[0]]
if len(index) == 0 and len(columns) > 0:
table = table.T
# GH 15193 Make sure empty columns are removed if dropna=True
if isinstance(table, ABCDataFrame) and dropna:
table = table.dropna(how="all", axis=1)
return table
def _add_margins(
table,
data,
values,
rows,
cols,
aggfunc,
observed=None,
margins_name="All",
fill_value=None,
):
if not isinstance(margins_name, str):
raise ValueError("margins_name argument must be a string")
msg = 'Conflicting name "{name}" in margins'.format(name=margins_name)
for level in table.index.names:
if margins_name in table.index.get_level_values(level):
raise ValueError(msg)
grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name)
# could be passed a Series object with no 'columns'
if hasattr(table, "columns"):
for level in table.columns.names[1:]:
if margins_name in table.columns.get_level_values(level):
raise ValueError(msg)
if len(rows) > 1:
key = (margins_name,) + ("",) * (len(rows) - 1)
else:
key = margins_name
if not values and isinstance(table, ABCSeries):
# If there are no values and the table is a series, then there is only
# one column in the data. Compute grand margin and return it.
return table.append(Series({key: grand_margin[margins_name]}))
if values:
marginal_result_set = _generate_marginal_results(
table,
data,
values,
rows,
cols,
aggfunc,
observed,
grand_margin,
margins_name,
)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
else:
marginal_result_set = _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc, observed, margins_name
)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
row_margin = row_margin.reindex(result.columns, fill_value=fill_value)
# populate grand margin
for k in margin_keys:
if isinstance(k, str):
row_margin[k] = grand_margin[k]
else:
row_margin[k] = grand_margin[k[0]]
from pandas import DataFrame
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
try:
for dtype in set(result.dtypes):
cols = result.select_dtypes([dtype]).columns
margin_dummy[cols] = margin_dummy[cols].astype(dtype)
result = result.append(margin_dummy)
except TypeError:
# we cannot reshape, so coerce the axis
result.index = result.index._to_safe_for_reshape()
result = result.append(margin_dummy)
result.index.names = row_names
return result
def _compute_grand_margin(data, values, aggfunc, margins_name="All"):
if values:
grand_margin = {}
for k, v in data[values].items():
try:
if isinstance(aggfunc, str):
grand_margin[k] = getattr(v, aggfunc)()
elif isinstance(aggfunc, dict):
if isinstance(aggfunc[k], str):
grand_margin[k] = getattr(v, aggfunc[k])()
else:
grand_margin[k] = aggfunc[k](v)
else:
grand_margin[k] = aggfunc(v)
except TypeError:
pass
return grand_margin
else:
return {margins_name: aggfunc(data.index)}
def _generate_marginal_results(
table, data, values, rows, cols, aggfunc, observed, grand_margin, margins_name="All"
):
if len(cols) > 0:
# need to "interleave" the margins
table_pieces = []
margin_keys = []
def _all_key(key):
return (key, margins_name) + ("",) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows + values].groupby(rows, observed=observed).agg(aggfunc)
cat_axis = 1
for key, piece in table.groupby(level=0, axis=cat_axis, observed=observed):
all_key = _all_key(key)
# we are going to mutate this, so need to copy!
piece = piece.copy()
try:
piece[all_key] = margin[key]
except TypeError:
# we cannot reshape, so coerce the axis
piece.set_axis(
piece._get_axis(cat_axis)._to_safe_for_reshape(),
axis=cat_axis,
inplace=True,
)
piece[all_key] = margin[key]
table_pieces.append(piece)
margin_keys.append(all_key)
else:
margin = grand_margin
cat_axis = 0
for key, piece in table.groupby(level=0, axis=cat_axis, observed=observed):
all_key = _all_key(key)
table_pieces.append(piece)
table_pieces.append(Series(margin[key], index=[all_key]))
margin_keys.append(all_key)
result = concat(table_pieces, axis=cat_axis)
if len(rows) == 0:
return result
else:
result = table
margin_keys = table.columns
if len(cols) > 0:
row_margin = data[cols + values].groupby(cols, observed=observed).agg(aggfunc)
row_margin = row_margin.stack()
# slight hack
new_order = [len(cols)] + list(range(len(cols)))
row_margin.index = row_margin.index.reorder_levels(new_order)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc, observed, margins_name="All"
):
if len(cols) > 0:
# need to "interleave" the margins
margin_keys = []
def _all_key():
if len(cols) == 1:
return margins_name
return (margins_name,) + ("",) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows].groupby(rows, observed=observed).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
else:
margin = data.groupby(level=0, axis=0, observed=observed).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
return result
else:
result = table
margin_keys = table.columns
if len(cols):
row_margin = data[cols].groupby(cols, observed=observed).apply(aggfunc)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _convert_by(by):
if by is None:
by = []
elif (
is_scalar(by)
or isinstance(by, (np.ndarray, Index, ABCSeries, Grouper))
or hasattr(by, "__call__")
):
by = [by]
else:
by = list(by)
return by
@Substitution("\ndata : DataFrame")
@Appender(_shared_docs["pivot"], indents=1)
def pivot(data, index=None, columns=None, values=None):
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = data.set_index(cols, append=append)
else:
if index is None:
index = data.index
else:
index = data[index]
index = MultiIndex.from_arrays([index, data[columns]])
if is_list_like(values) and not isinstance(values, tuple):
# Exclude tuple because it is seen as a single column name
indexed = data._constructor(
data[values].values, index=index, columns=values
)
else:
indexed = data._constructor_sliced(data[values].values, index=index)
return indexed.unstack(columns)
def crosstab(
index,
columns,
values=None,
rownames=None,
colnames=None,
aggfunc=None,
margins=False,
margins_name="All",
dropna=True,
normalize=False,
):
"""
Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
.. versionadded:: 0.21.0
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
.. versionadded:: 0.18.1
Returns
-------
DataFrame
Cross tabulation of the data.
See Also
--------
DataFrame.pivot : Reshape data based on column values.
pivot_table : Create a pivot table as a DataFrame.
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified.
Any input passed containing Categorical data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
not contain any instances of a particular category.
In the event that there aren't overlapping indexes an empty DataFrame will
be returned.
Examples
--------
>>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = np.array(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Here 'c' and 'f' are not represented in the data and will not be
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
>>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
b 0 1 0
c 0 0 0
"""
index = com.maybe_make_list(index)
columns = com.maybe_make_list(columns)
rownames = _get_names(index, rownames, prefix="row")
colnames = _get_names(columns, colnames, prefix="col")
common_idx = _get_objs_combined_axis(index + columns, intersect=True, sort=False)
data = {}
data.update(zip(rownames, index))
data.update(zip(colnames, columns))
if values is None and aggfunc is not None:
raise ValueError("aggfunc cannot be used without values.")
if values is not None and aggfunc is None:
raise ValueError("values cannot be used without an aggfunc.")
from pandas import DataFrame
df = DataFrame(data, index=common_idx)
if values is None:
df["__dummy__"] = 0
kwargs = {"aggfunc": len, "fill_value": 0}
else:
df["__dummy__"] = values
kwargs = {"aggfunc": aggfunc}
table = df.pivot_table(
"__dummy__",
index=rownames,
columns=colnames,
margins=margins,
margins_name=margins_name,
dropna=dropna,
**kwargs
)
# Post-process
if normalize is not False:
table = _normalize(
table, normalize=normalize, margins=margins, margins_name=margins_name
)
return table
def _normalize(table, normalize, margins, margins_name="All"):
if not isinstance(normalize, (bool, str)):
axis_subs = {0: "index", 1: "columns"}
try:
normalize = axis_subs[normalize]
except KeyError:
raise ValueError("Not a valid normalize argument")
if margins is False:
# Actual Normalizations
normalizers = {
"all": lambda x: x / x.sum(axis=1).sum(axis=0),
"columns": lambda x: x / x.sum(),
"index": lambda x: x.div(x.sum(axis=1), axis=0),
}
normalizers[True] = normalizers["all"]
try:
f = normalizers[normalize]
except KeyError:
raise ValueError("Not a valid normalize argument")
table = f(table)
table = table.fillna(0)
elif margins is True:
# keep index and column of pivoted table
table_index = table.index
table_columns = table.columns
# check if margin name is in (for MI cases) or equal to last
# index/column and save the column and index margin
if (margins_name not in table.iloc[-1, :].name) | (
margins_name != table.iloc[:, -1].name
):
raise ValueError("{} not in pivoted DataFrame".format(margins_name))
column_margin = table.iloc[:-1, -1]
index_margin = table.iloc[-1, :-1]
# keep the core table
table = table.iloc[:-1, :-1]
# Normalize core
table = _normalize(table, normalize=normalize, margins=False)
# Fix Margins
if normalize == "columns":
column_margin = column_margin / column_margin.sum()
table = concat([table, column_margin], axis=1)
table = table.fillna(0)
table.columns = table_columns
elif normalize == "index":
index_margin = index_margin / index_margin.sum()
table = table.append(index_margin)
table = table.fillna(0)
table.index = table_index
elif normalize == "all" or normalize is True:
column_margin = column_margin / column_margin.sum()
index_margin = index_margin / index_margin.sum()
index_margin.loc[margins_name] = 1
table = concat([table, column_margin], axis=1)
table = table.append(index_margin)
table = table.fillna(0)
table.index = table_index
table.columns = table_columns
else:
raise ValueError("Not a valid normalize argument")
else:
raise ValueError("Not a valid margins argument")
return table
def _get_names(arrs, names, prefix="row"):
if names is None:
names = []
for i, arr in enumerate(arrs):
if isinstance(arr, ABCSeries) and arr.name is not None:
names.append(arr.name)
else:
names.append("{prefix}_{i}".format(prefix=prefix, i=i))
else:
if len(names) != len(arrs):
raise AssertionError("arrays and names must have the same length")
if not isinstance(names, list):
names = list(names)
return names
| [
"msaineti@icloud.com"
] | msaineti@icloud.com |
8571d468f570736fbf2eb36541b40bad5f1b0fee | 1599f9a44e5ec492c019036195a769fa3ed9e04b | /tests/test_api/test_v1/test_views/test_index.py | 652ea7f07f022c95414ee40b62085c394159254c | [] | no_license | Mikaelia/AirBnB_clone_v3 | 63b70a2c8874e9d6d6e60ab3c6b2d1511136303a | f628759076ccdd2e5aecf61c8d079505b4d17412 | refs/heads/master | 2020-03-26T22:31:17.315057 | 2018-10-09T01:10:30 | 2018-10-09T01:10:30 | 145,465,466 | 0 | 0 | null | 2018-08-29T19:19:21 | 2018-08-20T20:09:48 | Python | UTF-8 | Python | false | false | 1,334 | py | #!/usr/bin/python3
"""
Unit Test for api v1 Flask App
"""
import inspect
import pep8
import web_flask
import unittest
from os import stat
import api
module = api.v1.views.index
class TestIndexDocs(unittest.TestCase):
"""Class for testing Hello Route docs"""
all_funcs = inspect.getmembers(module, inspect.isfunction)
def test_doc_file(self):
"""... documentation for the file"""
actual = module.__doc__
self.assertIsNotNone(actual)
def test_all_function_docs(self):
"""... tests for ALL DOCS for all functions"""
all_functions = TestIndexDocs.all_funcs
for function in all_functions:
self.assertIsNotNone(function[1].__doc__)
def test_pep8(self):
"""... tests if file conforms to PEP8 Style"""
pep8style = pep8.StyleGuide(quiet=True)
errors = pep8style.check_files(['api/v1/views/index.py'])
self.assertEqual(errors.total_errors, 0, errors.messages)
def test_file_is_executable(self):
"""... tests if file has correct permissions so user can execute"""
file_stat = stat('api/v1/views/index.py')
permissions = str(oct(file_stat[0]))
actual = int(permissions[5:-2]) >= 5
self.assertTrue(actual)
if __name__ == '__main__':
"""
MAIN TESTS
"""
unittest.main
| [
"328@holbertonschool.com"
] | 328@holbertonschool.com |
f7a3f1a1e14e46defed815dd909775f9fd84d89e | 0d5de943909877c01b485d8a918d8bef0cf9e196 | /plugins/RemoveTriggerArea/PluginRemoveTriggerArea.py | db28cd82e00fc81726f38db0b6d1c935c1c47617 | [
"MIT"
] | permissive | baverman/scribes-goodies | 31e2017d81f04cc01e9738e96ceb19f872a3d280 | f6ebfe62e5103d5337929648109b4e610950bced | refs/heads/master | 2021-01-21T10:13:08.397980 | 2013-09-25T16:33:05 | 2013-09-25T16:33:05 | 854,207 | 2 | 1 | null | 2013-09-25T16:33:05 | 2010-08-22T03:12:39 | Python | UTF-8 | Python | false | false | 802 | py | from scribes.helpers import Trigger, TriggerManager
import subprocess
from gettext import gettext as _
name = "Remove trigger area plugin"
authors = ["Anton Bobrov <bobrov@vl.ru>"]
version = 0.1
autoload = True
class_name = "TriggerAreaPlugin"
short_description = "Removes trigger area"
long_description = "Removes trigger area"
trigger = Trigger("show-full-view", "<ctrl><alt>m",
_("Show editor's fullview"), _("Miscellaneous Operations"))
class TriggerAreaPlugin(object):
def __init__(self, editor):
self.editor = editor
self.triggers = TriggerManager(editor)
self.triggers.connect_triggers(self)
@trigger
def activate(self, sender):
self.editor.show_full_view()
return False
def load(self): pass
def unload(self): pass
| [
"bobrov@vl.ru"
] | bobrov@vl.ru |
ba725d409c9ca8e433e32f76419bbea9c92d6199 | 6cad5c613306789b9bd6387c2e7af02515b1c0ad | /django_document/inheritance/migrations/0002_auto_20171011_0700.py | 0ba7650807e9ff33a1802862fd91abed05693195 | [] | no_license | Isaccchoi/django_document_project | ead5eb7b2e932ae5401d5a3cdb3672d3dfd8f9f5 | 980f25c98f99994e6148af16ed82ae4f12d50870 | refs/heads/master | 2021-05-08T06:12:51.261138 | 2017-10-13T05:14:58 | 2017-10-13T05:14:58 | 106,355,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-11 07:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('inheritance', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='School',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='student',
name='school',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='inheritance.School'),
),
migrations.AddField(
model_name='teacher',
name='school',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='inheritance.School'),
),
]
| [
"isaccchoi@naver.com"
] | isaccchoi@naver.com |
720138848aebe7a03865a14abc5960de284e5505 | bf683eb4a6080cf67669de90d1afdad53fccb738 | /Lib/site-packages/phonenumbers/data/region_KZ.py | 62d0715f66f5f56e0453cb8e060ae5324352b8dc | [
"MIT"
] | permissive | mspgeek/Client_Portal | cd513308840aa4203554ebc1160f17f0dd4b17cf | 0267168bb90e8e9c85aecdd715972b9622b82384 | refs/heads/master | 2023-03-07T21:33:22.767108 | 2020-04-08T01:43:19 | 2020-04-08T01:43:19 | 253,946,635 | 6 | 0 | MIT | 2022-12-31T07:01:43 | 2020-04-08T00:43:07 | HTML | UTF-8 | Python | false | false | 1,863 | py | """Auto-generated file, do not edit by hand. KZ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_KZ = PhoneMetadata(id='KZ', country_code=7, international_prefix='810',
general_desc=PhoneNumberDesc(national_number_pattern='33622\\d{5}|(?:7\\d|80)\\d{8}', possible_length=(10,), possible_length_local_only=(5, 6)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:33622|7(?:1(?:0(?:[23]\\d|4[0-3]|59|63)|1(?:[23]\\d|4[0-79]|59)|2(?:[23]\\d|59)|3(?:2\\d|3[0-79]|4[0-35-9]|59)|4(?:[24]\\d|3[013-9]|5[1-9])|5(?:2\\d|3[1-9]|4[0-7]|59)|6(?:[2-4]\\d|5[19]|61)|72\\d|8(?:[27]\\d|3[1-46-9]|4[0-5]))|2(?:1(?:[23]\\d|4[46-9]|5[3469])|2(?:2\\d|3[0679]|46|5[12679])|3(?:[2-4]\\d|5[139])|4(?:2\\d|3[1-35-9]|59)|5(?:[23]\\d|4[0-246-8]|59|61)|6(?:2\\d|3[1-9]|4[0-4]|59)|7(?:[2379]\\d|40|5[279])|8(?:[23]\\d|4[0-3]|59)|9(?:2\\d|3[124578]|59))))\\d{5}', example_number='7123456789', possible_length=(10,), possible_length_local_only=(5, 6)),
mobile=PhoneNumberDesc(national_number_pattern='7(?:0[0-2578]|47|6[02-4]|7[15-8]|85)\\d{7}', example_number='7710009998', possible_length=(10,)),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{7}', example_number='8001234567', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='809\\d{7}', example_number='8091234567', possible_length=(10,)),
personal_number=PhoneNumberDesc(national_number_pattern='808\\d{7}', example_number='8081234567', possible_length=(10,)),
voip=PhoneNumberDesc(national_number_pattern='751\\d{7}', example_number='7511234567', possible_length=(10,)),
no_international_dialling=PhoneNumberDesc(national_number_pattern='751\\d{7}', possible_length=(10,)),
preferred_international_prefix='8~10',
national_prefix='8',
national_prefix_for_parsing='8',
leading_digits='33|7')
| [
"kspooner13@yahoo.com"
] | kspooner13@yahoo.com |
958cbde7c2aceabaaef1f3083dec0f27a6c8c624 | b39d72ba5de9d4683041e6b4413f8483c817f821 | /GeneVisualization/ass1/Lib/site-packages/itk/itkMetaConverterBasePython.py | a87a60552c7b70b51183b5adedd5f9c8389b008e | [] | no_license | ssalmaan/DataVisualization | d93a0afe1290e4ea46c3be5718d503c71a6f99a7 | eff072f11337f124681ce08742e1a092033680cc | refs/heads/master | 2021-03-13T05:40:23.679095 | 2020-03-11T21:37:45 | 2020-03-11T21:37:45 | 246,642,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,469 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (3, 0, 0):
new_instancemethod = lambda func, inst, cls: _itkMetaConverterBasePython.SWIG_PyInstanceMethod_New(func)
else:
from new import instancemethod as new_instancemethod
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_itkMetaConverterBasePython', [dirname(__file__)])
except ImportError:
import _itkMetaConverterBasePython
return _itkMetaConverterBasePython
if fp is not None:
try:
_mod = imp.load_module('_itkMetaConverterBasePython', fp, pathname, description)
finally:
fp.close()
return _mod
_itkMetaConverterBasePython = swig_import_helper()
del swig_import_helper
else:
import _itkMetaConverterBasePython
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import itkSpatialObjectBasePython
import itkCovariantVectorPython
import itkFixedArrayPython
import pyBasePython
import vnl_vector_refPython
import vnl_vectorPython
import vnl_matrixPython
import stdcomplexPython
import itkVectorPython
import itkImageRegionPython
import itkSizePython
import ITKCommonBasePython
import itkIndexPython
import itkOffsetPython
import itkSpatialObjectPropertyPython
import itkRGBAPixelPython
import itkBoundingBoxPython
import itkVectorContainerPython
import itkMatrixPython
import vnl_matrix_fixedPython
import itkPointPython
import itkContinuousIndexPython
import itkMapContainerPython
import itkAffineTransformPython
import itkMatrixOffsetTransformBasePython
import itkArray2DPython
import itkOptimizerParametersPython
import itkArrayPython
import itkVariableLengthVectorPython
import itkDiffusionTensor3DPython
import itkSymmetricSecondRankTensorPython
import itkTransformBasePython
class itkMetaConverterBase2(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkMetaConverterBase2 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def ReadMeta(self, name: 'char const *') -> "itkSpatialObject2_Pointer":
"""ReadMeta(itkMetaConverterBase2 self, char const * name) -> itkSpatialObject2_Pointer"""
return _itkMetaConverterBasePython.itkMetaConverterBase2_ReadMeta(self, name)
def WriteMeta(self, spatialObject: 'itkSpatialObject2', name: 'char const *') -> "bool":
"""WriteMeta(itkMetaConverterBase2 self, itkSpatialObject2 spatialObject, char const * name) -> bool"""
return _itkMetaConverterBasePython.itkMetaConverterBase2_WriteMeta(self, spatialObject, name)
def MetaObjectToSpatialObject(self, mo: 'MetaObject const *') -> "itkSpatialObject2_Pointer":
"""MetaObjectToSpatialObject(itkMetaConverterBase2 self, MetaObject const * mo) -> itkSpatialObject2_Pointer"""
return _itkMetaConverterBasePython.itkMetaConverterBase2_MetaObjectToSpatialObject(self, mo)
def SpatialObjectToMetaObject(self, spatialObject: 'itkSpatialObject2') -> "MetaObject *":
"""SpatialObjectToMetaObject(itkMetaConverterBase2 self, itkSpatialObject2 spatialObject) -> MetaObject *"""
return _itkMetaConverterBasePython.itkMetaConverterBase2_SpatialObjectToMetaObject(self, spatialObject)
def SetWriteImagesInSeparateFile(self, _arg: 'bool const') -> "void":
"""SetWriteImagesInSeparateFile(itkMetaConverterBase2 self, bool const _arg)"""
return _itkMetaConverterBasePython.itkMetaConverterBase2_SetWriteImagesInSeparateFile(self, _arg)
def GetWriteImagesInSeparateFile(self) -> "bool":
"""GetWriteImagesInSeparateFile(itkMetaConverterBase2 self) -> bool"""
return _itkMetaConverterBasePython.itkMetaConverterBase2_GetWriteImagesInSeparateFile(self)
itkMetaConverterBase2.ReadMeta = new_instancemethod(_itkMetaConverterBasePython.itkMetaConverterBase2_ReadMeta, None, itkMetaConverterBase2)
itkMetaConverterBase2.WriteMeta = new_instancemethod(_itkMetaConverterBasePython.itkMetaConverterBase2_WriteMeta, None, itkMetaConverterBase2)
itkMetaConverterBase2.MetaObjectToSpatialObject = new_instancemethod(_itkMetaConverterBasePython.itkMetaConverterBase2_MetaObjectToSpatialObject, None, itkMetaConverterBase2)
itkMetaConverterBase2.SpatialObjectToMetaObject = new_instancemethod(_itkMetaConverterBasePython.itkMetaConverterBase2_SpatialObjectToMetaObject, None, itkMetaConverterBase2)
itkMetaConverterBase2.SetWriteImagesInSeparateFile = new_instancemethod(_itkMetaConverterBasePython.itkMetaConverterBase2_SetWriteImagesInSeparateFile, None, itkMetaConverterBase2)
itkMetaConverterBase2.GetWriteImagesInSeparateFile = new_instancemethod(_itkMetaConverterBasePython.itkMetaConverterBase2_GetWriteImagesInSeparateFile, None, itkMetaConverterBase2)
itkMetaConverterBase2_swigregister = _itkMetaConverterBasePython.itkMetaConverterBase2_swigregister
itkMetaConverterBase2_swigregister(itkMetaConverterBase2)
class itkMetaConverterBase3(ITKCommonBasePython.itkObject):
"""Proxy of C++ itkMetaConverterBase3 class."""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def ReadMeta(self, name: 'char const *') -> "itkSpatialObject3_Pointer":
"""ReadMeta(itkMetaConverterBase3 self, char const * name) -> itkSpatialObject3_Pointer"""
return _itkMetaConverterBasePython.itkMetaConverterBase3_ReadMeta(self, name)
def WriteMeta(self, spatialObject: 'itkSpatialObject3', name: 'char const *') -> "bool":
"""WriteMeta(itkMetaConverterBase3 self, itkSpatialObject3 spatialObject, char const * name) -> bool"""
return _itkMetaConverterBasePython.itkMetaConverterBase3_WriteMeta(self, spatialObject, name)
def MetaObjectToSpatialObject(self, mo: 'MetaObject const *') -> "itkSpatialObject3_Pointer":
"""MetaObjectToSpatialObject(itkMetaConverterBase3 self, MetaObject const * mo) -> itkSpatialObject3_Pointer"""
return _itkMetaConverterBasePython.itkMetaConverterBase3_MetaObjectToSpatialObject(self, mo)
def SpatialObjectToMetaObject(self, spatialObject: 'itkSpatialObject3') -> "MetaObject *":
"""SpatialObjectToMetaObject(itkMetaConverterBase3 self, itkSpatialObject3 spatialObject) -> MetaObject *"""
return _itkMetaConverterBasePython.itkMetaConverterBase3_SpatialObjectToMetaObject(self, spatialObject)
def SetWriteImagesInSeparateFile(self, _arg: 'bool const') -> "void":
"""SetWriteImagesInSeparateFile(itkMetaConverterBase3 self, bool const _arg)"""
return _itkMetaConverterBasePython.itkMetaConverterBase3_SetWriteImagesInSeparateFile(self, _arg)
def GetWriteImagesInSeparateFile(self) -> "bool":
"""GetWriteImagesInSeparateFile(itkMetaConverterBase3 self) -> bool"""
return _itkMetaConverterBasePython.itkMetaConverterBase3_GetWriteImagesInSeparateFile(self)
itkMetaConverterBase3.ReadMeta = new_instancemethod(_itkMetaConverterBasePython.itkMetaConverterBase3_ReadMeta, None, itkMetaConverterBase3)
itkMetaConverterBase3.WriteMeta = new_instancemethod(_itkMetaConverterBasePython.itkMetaConverterBase3_WriteMeta, None, itkMetaConverterBase3)
itkMetaConverterBase3.MetaObjectToSpatialObject = new_instancemethod(_itkMetaConverterBasePython.itkMetaConverterBase3_MetaObjectToSpatialObject, None, itkMetaConverterBase3)
itkMetaConverterBase3.SpatialObjectToMetaObject = new_instancemethod(_itkMetaConverterBasePython.itkMetaConverterBase3_SpatialObjectToMetaObject, None, itkMetaConverterBase3)
itkMetaConverterBase3.SetWriteImagesInSeparateFile = new_instancemethod(_itkMetaConverterBasePython.itkMetaConverterBase3_SetWriteImagesInSeparateFile, None, itkMetaConverterBase3)
itkMetaConverterBase3.GetWriteImagesInSeparateFile = new_instancemethod(_itkMetaConverterBasePython.itkMetaConverterBase3_GetWriteImagesInSeparateFile, None, itkMetaConverterBase3)
itkMetaConverterBase3_swigregister = _itkMetaConverterBasePython.itkMetaConverterBase3_swigregister
itkMetaConverterBase3_swigregister(itkMetaConverterBase3)
| [
"44883043+ssalmaan@users.noreply.github.com"
] | 44883043+ssalmaan@users.noreply.github.com |
b76703edb54f9d342ff986b4ad2451ffd03e6498 | f281c9ecd48aedd30469cfbd556bc3319cd8419d | /sendmail/src/main.py | 0b5456e505640bb912ccd4735ffc2480b6fa88dd | [] | no_license | youerning/blog | 5d5edeb4f836d233a4119796f38fc4e33531714e | 59c3704cf5a77bba70a48a5d09db9b165ea59d4b | refs/heads/master | 2023-08-31T04:08:16.461923 | 2023-08-27T01:28:39 | 2023-08-27T01:28:39 | 114,074,235 | 183 | 105 | null | 2023-05-05T02:36:52 | 2017-12-13T04:35:00 | HTML | UTF-8 | Python | false | false | 6,488 | py | # -*- coding: UTF-8 -*-
# @author youerning
# @email 673125641@qq.com
import sys
import base64
import smtplib
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from collections import defaultdict
from io import BytesIO
from os import path
# 第三方库
from jinja2 import Template
from PIL import Image
# 发送邮件所需的信息
mail_to = "<收件人邮箱地址>"
smtp_host = "<邮件服务器>"
smtp_username = "<用户名>"
smtp_password = "<密码>"
subject = "演示邮件"
from_ = "邮件机器人"
# 用于发个收件人的逗号
COMMASPACE = ","
EMAIL_TEMPLATE = """<html>
<head>
<style type="text/css">
table
{
border-collapse: collapse;
margin: 0 auto;
text-align: center;
}
table td, table th
{
border: 1px solid #cad9ea;
color: #666;
height: 30px;
}
table thead th
{
background-color: #CCE8EB;
width: 100px;
}
table tr:nth-child(odd)
{
background: #fff;
}
table tr:nth-child(even)
{
background: #F5FAFA;
}
</style>
</head>
<body>
<p>一共有以下{{record_size}}条数据</p>
<table width="90%" class="table">
<thead>
<tr>
{% for label in labels %}
<th>{{label}}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for item in items %}
<tr>
{% for value in item %}
<td>{{value}}</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
</html>"""
EMAIL_IMAGE_TEMPLATE = """<html>
<head>
<title>Page Title</title>
</head>
<body>
<h3>这是一张图片</h3>
<p><img src="cid:{{image_name}}" height="112" width="200" ></p>
</body>
</html>
"""
EMAIL_ONLINE_IMAGE_TEMPLATE = """<html>
<head>
<title>Page Title</title>
</head>
<body>
<h3>这是一张图片</h3>
<p><img src="cid:{{image_name}}" ></p>
</body>
</html>
"""
def create_image_eamil_contant(fp):
tpl = Template(EMAIL_IMAGE_TEMPLATE)
if not path.exists(fp):
sys.exit("要发送的本地图片不存在")
msg = MIMEMultipart("related")
image_name = "demo"
with open(fp, "rb") as rf:
mime_image = MIMEImage(rf.read())
# 注意: 一定需要<>括号
mime_image.add_header("Content-ID", "<%s>" % image_name)
msg.attach(mime_image)
# 渲染邮件文本内容
text = tpl.render(image_name=image_name)
msg_alternative = MIMEMultipart("alternative")
msg_alternative.attach(MIMEText(text, "html", "utf-8"))
msg.attach(msg_alternative)
return msg
def create_online_image_content():
from PIL import Image
tpl = Template(EMAIL_ONLINE_IMAGE_TEMPLATE)
fp = "demo_base64.txt"
if not path.exists(fp):
sys.exit("要发送的base64编码的图片不存在")
msg = MIMEMultipart("related")
image_name = "demo"
with open(fp, "rb") as rf:
base64_data = rf.read()
img_data = base64.b64decode(base64_data)
# 因为open方法需要一个file-like文件对象,而我们解码后的对象类型是bytes类型
# bytes类型没有文件对象的read, close方法,所以我们需要通过BytesIO对象包装一下,它会返回一个file-like文件对象
img = Image.open(BytesIO(img_data))
img_width, img_height = img.size
repeat_times = 5
# compose images
ret_img = Image.new(img.mode, (img_width, img_height * repeat_times))
for index in range(repeat_times):
ret_img.paste(img, box=(0, index * img_height))
# 因为MIMEImage需要一个bytes对象,所以们需要获取图片编码后的二进制数据而不是图片的array数据
img_bytes = BytesIO()
# 如果不指定图片格式,会因为没有文件名而报错
ret_img.save(img_bytes, "png")
mime_image = MIMEImage(img_bytes.getvalue())
# 注意: 一定需要<>括号
mime_image.add_header("Content-ID", "<%s>" % image_name)
msg.attach(mime_image)
# 渲染邮件文本内容
text = tpl.render(image_name=image_name)
msg_alternative = MIMEMultipart("alternative")
msg_alternative.attach(MIMEText(text, "html", "utf-8"))
msg.attach(msg_alternative)
return msg
def create_html_content():
tpl = Template(EMAIL_TEMPLATE)
record_size = 10
label_size = 5
labels = ["label-%s" % i for i in range(label_size)]
items = []
for _ in range(record_size):
item = ["item-%s" % value_index for value_index in range(label_size)]
items.append(item)
text = tpl.render(record_size=record_size, items=items, labels=labels)
msg = MIMEText(text, "html", "utf-8")
return msg
def send_email(msg, mail_to, smtp_host, smtp_username, smtp_password, subject, from_):
msg["Subject"] = Header(subject, "utf-8")
msg["From"] = Header(from_, "utf-8")
if not isinstance(mail_to, list):
mail_to = [mail_to]
msg["To"] = COMMASPACE.join(mail_to)
try:
print("准备连接smtp邮件服务器: %s" % smtp_host)
client = smtplib.SMTP(smtp_host)
print("连接成功")
# client = smtplib.SMTP("localhost")
# client.set_debuglevel(1)
# print(self.mail_user, self.mail_pass)
client.login(smtp_username, smtp_password)
print("登录成功")
# print("=====>", self.mail_from, mail_to)
print("通过邮箱[%s]发送邮件给 %s" % (smtp_username, COMMASPACE.join(mail_to)))
client.sendmail(smtp_username, mail_to, msg.as_string())
print("发送成功...")
return True
except Exception:
print("发送邮件失败")
finally:
client.quit()
def send_local_image_email():
msg = create_image_eamil_contant("demo.jpg")
send_email(msg,mail_to, smtp_host, smtp_username, smtp_password, subject, from_)
def send_online_image_email():
msg = create_online_image_content()
send_email(msg,mail_to, smtp_host, smtp_username, smtp_password, subject, from_)
def send_html_content():
msg = create_html_content()
send_email(msg,mail_to, smtp_host, smtp_username, smtp_password, subject, from_)
def main():
pass
if __name__ == "__main__":
# send_local_image_email()
# send_online_image_email()
send_html_content() | [
"673125641@qq.com"
] | 673125641@qq.com |
822ab7eaf58e30573d40ece680172b2eed27c257 | c3a3cf534fb77a1963c172836aeff70ef1875e4b | /fender/trunk/src/vmware/vcenter.py | fd8798eeb4fb2dd09118af32c487231d2bbe1182 | [
"CC0-1.0"
] | permissive | raychorn/svn_hp-projects | dd281eb06b299e770a31bcce3d1da1769a36883d | d5547906354e2759a93b8030632128e8c4bf3880 | refs/heads/main | 2023-01-08T07:31:30.042263 | 2020-10-14T21:56:57 | 2020-10-14T21:56:57 | 304,144,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,071 | py | #####################################################
#
# vcenter.py
#
# Copyright 2011 Hewlett-Packard Development Company, L.P.
#
# Hewlett-Packard and the Hewlett-Packard logo are trademarks of
# Hewlett-Packard Development Company, L.P. in the U.S. and/or other countries.
#
# Confidential computer software. Valid license from Hewlett-Packard required
# for possession, use or copying. Consistent with FAR 12.211 and 12.212,
# Commercial Computer Software, Computer Software Documentation, and Technical
# Data for Commercial Items are licensed to the U.S. Government under
# vendor's standard commercial license.
#
# Author:
# Mohammed M. Islam
#
# Description:
# Classes for accessing the VMWare Web Services
#
#####################################################
import suds
from suds.client import Client
from suds.sax.parser import Parser
from suds.umx.basic import Basic as bumx
import httplib, httplib2, urllib, urllib2
from util import catalog
from util.resource import resource
from vmware import vmwobj
_PREFIX='ns0'
import logging
log = logging.getLogger(__name__)
from uuid import UUID
import base64
from suds.sax.element import Element, Attribute
from suds.plugin import MessagePlugin
from suds.sudsobject import Object
from threading import Thread, Lock
import time
import sys
import os
def KeyAnyValue(key, value, xsitype):
'''
Generate the XML for a KeyAnyValue as defined in VMware's WSDL.
This is necessary because SUDS apparently can't construct a proper
KeyAnyValue because the 'value' part is typed as xsd:any
'''
kav = Element('arguments')
kav.append(Element('key').setText(key))
val = Element('value').setText(str(value))
attr = Attribute('type', xsitype)
attr.prefix = 'xsi'
val.append(attr)
kav.append(val)
return kav
class AddAttr(MessagePlugin):
def __init__(self, nodename, attrname, attrvalue):
self.nodename = nodename
self.attrname = attrname
self.attrvalue = attrvalue
def addAttributeForValue(self, node):
if node.name == self.nodename:
node.set(self.attrname, self.attrvalue)
def marshalled(self, context):
context.envelope.walk(self.addAttributeForValue)
class ManagedObjectRef(suds.sudsobject.Property):
# ManagedObjectRef Class. The suds SOAP library can not handle xml
# having an element that starts with "_". Managed Object Reference has
"""Custom class to replace the suds generated class, which lacks _type."""
def __init__(self, _type, value):
suds.sudsobject.Property.__init__(self, value)
self._type = _type
class vCenter:
service_reference = ManagedObjectRef('ServiceInstance', 'ServiceInstance')
prop_set_host_minimal=['name', 'hardware.systemInfo.uuid']
prop_set_host_details=['overallStatus', 'hardware', 'name', 'config.ipmi', 'config.network', 'config.product',
'config.storageDevice.hostBusAdapter', 'config.storageDevice.multipathInfo.lun', 'vm']
prop_set_vm = ['name' , 'config.uuid', 'config.hardware', 'guest']
prop_set_dvportgroup = ['name', 'configStatus', 'config', 'host']
prop_set_cluster = ['name', 'configStatus', 'host']
prop_set_datastore = ['name', 'info', 'host']
prop_set_dvswitch = ['name', 'configStatus', 'config']
prop_set_guest = ['guestOperationsManager', 'authManager', 'fileManager', 'processManager']
def __init__(self, host, wsdl=None, decoder=None, eventing_enable=True):
self.initializing = True
self.host = host
self.url = 'https://%s/sdk' % host
if not wsdl:
wsdl = 'file://' + resource('vmware/wsdl41/vimService.wsdl')
self.wsdl = wsdl
self.hosts = []
self.clusters = []
self.listupdatetime = 0
self.session = None
self.sc = None
self.decoder = decoder
self.host_discovery_done = False
self.eventing_enable = eventing_enable
self.last_event_key = 0
self.standby_log = {}
self.reboot_log = {}
self.vm_renamed_log = {}
self.ds_renamed_log = {}
self.destroyed_log = {}
self.reconfigured_log = {}
self.running = False
log.debug("vCenter __init__ host: %s", host)
self.host_update_lock = Lock()
self.cluster_update_lock = Lock()
self.retrieve_properties_lock = Lock()
self.retrieve_list_lock = Lock()
self.vcenterupdatelock = Lock()
self.get_vms_for_host_lock = Lock()
self.get_ds_for_host_lock = Lock()
def decode(self, value):
return self.decoder.decode(value) if self.decoder else base64.decodestring(value)
def get_api_versions(self):
"Queries the VCenter for it's current version and list of prior versions in vim25"
log.debug('Getting VCenter Version')
try :
http = httplib2.Http()
http.disable_ssl_certificate_validation = True
rawdata = http.request('https://%s/sdk/vimServiceVersions.xml' %(self.host), method="GET")
xml = Parser().parse(string=rawdata[1]).root()
data = bumx().process(xml)
ver = []
for ns in data.namespace:
if hasattr(ns, 'priorVersions'):
ver += ns.priorVersions.version
self.prior_versions = ver
except Exception as e :
log.exception("Failed to parse VCenter version: ", str(e))
def setup(self):
self.client = Client(self.wsdl)
self.client.set_options(location=self.url)
self.client.set_options(timeout=36000)
self.vim = self.client.service
self.entire_datacenter_trav_spec = self.build_trav_spec()
self.initializing = False
if self.eventing_enable :
self.eventSetup()
def eventSetup(self) :
'''Setup and start the thread to wait for vCenter events'''
self.thread = Thread(name='vCenter.events:%s' % self.host, target=self.wait_for_events)
self.thread.daemon = True
self.thread.start()
def eventfilter(self):
evfilter = self.factory('EventFilterSpec')
evfilter.alarm = None # Looks like optional params need to be set to None so that they are not included in the SOAP request
evfilter.scheduledTask = None
evfilter.eventTypeId += [
'vim.event.EnteredStandbyModeEvent',
'vim.event.DrsEnteredStandbyModeEvent',
'vim.event.ExitedStandbyModeEvent',
'vim.event.DrsExitedStandbyModeEvent',
'vim.event.VmGuestRebootEvent',
'vim.event.VmReconfiguredEvent',
'vim.event.VmRenamedEvent',
'vim.event.DatastoreDestroyedEvent',
'vim.event.DatastoreRenamedEvent'
]
evcollector = self.vim.CreateCollectorForEvents(self.sc.eventManager, evfilter)
self.vim.SetCollectorPageSize(evcollector, 100)
ps = self.factory('PropertySpec')
ps.all = False
ps.type = evcollector._type
ps.pathSet = [ 'latestPage' ]
obj = self.factory('ObjectSpec')
obj.obj = evcollector
obj.skip = False
pf = self.factory('PropertyFilterSpec')
pf.propSet = [ps]
pf.objectSet = [obj]
return pf
def wait_for_events(self) :
'''Wait for events from vCenter'''
filter = None
version = None
self.running = True
while self.running :
log.debug("Waiting for vCenter Events")
if not self.connected():
log.debug("vCenter %s not connected skipping wait for events", self.host)
time.sleep(30)
continue
if not filter:
try:
log.debug("Building Event Filter")
ef = self.eventfilter()
log.debug("Calling CreateFilter")
filter = self.vim.CreateFilter(self.sc.propertyCollector, ef, False)
log.debug("Created new event filter: %s", filter)
except :
log.exception('Error creating event filter')
self.comm_error()
time.sleep(30)
continue
try:
log.debug("WaitForUpdates: version=%s", version)
updates = self.vim.WaitForUpdates(self.sc.propertyCollector, version)
#except urllib2.URLError:
# log.error('urllib2.URLError in self.vim.WaitForUpdates')
# continue
except Exception:
self.comm_error()
log.exception('Exception waiting for updates')
filter = None
version = None
continue
if updates :
version = updates.version
for update in updates.filterSet:
for obj in update.objectSet:
self.process_update(obj)
self.running = False
def process_events(self, content):
'''
Process events from vCenter
'''
try:
evlist = content.changeSet[0].val.Event
evlist.reverse()
except AttributeError:
# If the event list is empty, content.changeSet[0].val will
# be a Text node with a value of ""
evlist = []
for event in evlist:
if event.key <= self.last_event_key:
continue
self.last_event_key = event.key
event_name = event.__class__.__name__
log.debug('Received vCenter Event %s', event_name)
# Look up the morefs in the event:
# example:
#
# (EventEx){
# host = (HostEventArgument) {
# name = "foobar"
# host = (ManagedObjectReference) {
# value = "host-1234"
# _type = "HostSystem"
# }
# }
# }
#
# So, the moref is event.thing.thing.
#Look for events related to hosts could be any of ('datacenter', 'computeResource', 'host', 'vm', 'ds', 'net', 'dvs')
r = getattr(event, 'host', None)
if r:
r = getattr(r, 'host', None)
if r:
moref = '%s:%s' % (r._type, r.value)
log.debug("Received Event %s for host %s", event_name, moref)
host = self.get_host(moref)
if not host :
log.error('Error processing event %s for host %s: Host not found', event_name, moref)
continue
if 'EnteredStandbyMode' in event_name :
self.standby_log.setdefault(moref, {})['entered_standby_mode'] = event.createdTime
self.standby_log.setdefault(moref, {})['exited_standby_mode'] = None
log.debug("Event Host %s entered standby mode at %s", host.name, event.createdTime)
elif 'ExitedStandbyMode' in event_name :
self.standby_log.setdefault(moref, {})['exited_standby_mode'] = event.createdTime
log.debug("Event Host %s exited standby mode at %s", host.name, event.createdTime)
#Look for events related to vm could be any of ('datacenter', 'computeResource', 'host', 'vm', 'ds', 'net', 'dvs')
print "Looking for EVENTS related to vms..."
vmevent_details = {}
ev_details = []
vmevent = {}
s = getattr(event, 'vm', None)
if s:
s = getattr(s, 'vm', None)
if s:
moref = '%s:%s' % (s._type, s.value)
log.debug("Received Event %s for vm %s", event_name, moref)
log.info("Received Event %s for vm %s", event_name, moref)
vm = self.get_vm(moref)
if not vm :
log.error('Error processing event %s for vm %s: VM not found', event_name, moref)
continue
if 'VmGuestReboot' in event_name :
self.reboot_log.setdefault(moref, {})['vm_guest_reboot'] = event.createdTime
log.debug("Event VM %s guest reboot at %s", vm.name, event.createdTime)
log.info("Event VM %s guest reboot at %s", vm.name, event.createdTime)
#print event.fullFormattedMessage
vmevent['message'] = event.fullFormattedMessage
vmevent['user'] = event.userName
elif 'VmRenamed' in event_name :
self.vm_renamed_log.setdefault(moref, {})['vm_renamed'] = event.createdTime
log.debug("Event VM %s vm renamed at %s", vm.name, event.createdTime)
log.info("Event VM %s vm renamed at %s", vm.name, event.createdTime)
#print event.fullFormattedMessage
vmevent['message'] = event.fullFormattedMessage
vmevent['user'] = event.userName
elif 'VmReconfigured' in event_name :
self.reconfigured_log.setdefault(moref, {})['vm_reconfigured'] = event.createdTime
log.debug("Event VM %s vm reconfigured at %s", vm.name, event.createdTime)
log.info("Event VM %s vm reconfigured at %s", vm.name, event.createdTime)
#print event.fullFormattedMessage
vmevent['message'] = event.fullFormattedMessage
vmevent['user'] = event.userName
vmevent['configSpec'] = event.configSpec
vmevent['id'] = event.chainId
#vmevent['tag'] = event.changeTag
vmevent['resource'] = event.computeResource
vmevent['dc'] = event.datacenter
#vmevent['dvs'] = event.dvs
vmevent['host'] = event.host
vmevent['key'] = event.key
#vmevent['net'] = event.net
#vmevent['ds'] = event.ds
ev_details.append(vmevent)
vmevent_details['details'] = ev_details
print (str(vmevent_details))
#return vmevent_details
#Look for events related to ds could be any of ('datacenter', 'computeResource', 'host', 'vm', 'ds', 'net', 'dvs')
print "Looking for EVENTS related to datastores..."
dsevent_details = {}
ev_details = []
dsevent = {}
t = getattr(event, 'ds', None)
if t:
t = getattr(t, 'datastore', None)
if t:
moref = '%s:%s' % (t._type, t.value)
log.debug("Received Event %s for ds %s", event_name, moref)
log.info("Received Event %s for ds %s", event_name, moref)
ds = self.get_ds(moref)
if not ds :
log.error('Error processing event %s for ds %s: DS not found', event_name, moref)
continue
if 'DatastoreRenamed' in event_name :
self.ds_renamed_log.setdefault(moref, {})['datastore_renamed'] = event.createdTime
log.debug("Event DS %s datastore renamed at %s", ds.name, event.createdTime)
log.info("Event DS %s datastore renamed at %s", ds.name, event.createdTime)
dsevent['message'] = event.fullFormattedMessage
dsevent['user'] = event.userName
elif 'DatastoreDestroyed' in event_name :
self.destroyed_log.setdefault(moref, {})['datastore_destroyed'] = event.createdTime
log.debug("Event DS %s datastore destroyed at %s", ds.name, event.createdTime)
log.info("Event DS %s datastore destroyed at %s", ds.name, event.createdTime)
dsevent['message'] = event.fullFormattedMessage
dsevent['user'] = event.userName
dsevent['id'] = event.chainId
#dsevent['tag'] = event.changeTag
#dsevent['resource'] = event.computeResource
dsevent['dc'] = event.datacenter
#dsevent['dvs'] = event.dvs
#dsevent['host'] = event.host
dsevent['key'] = event.key
#dsevent['net'] = event.net
#dsevent['vm'] = event.vm
ev_details.append(dsevent)
dsevent_details['details'] = ev_details
print (str(dsevent_details))
def process_update(self, content):
'''
Process updates from VCenter
'''
log.debug("Processing vCenter Updates type: %s", content.obj._type)
if content.obj._type == 'EventHistoryCollector':
return self.process_events(content)
def factory(self, name, prefix=_PREFIX):
'''
Create and instance of an object named in the WSDL
@type name: string
@param name: Name of the object to create.
@type prefix: string
@param prefix: The namespace in which the object resides.
@rtype: object
@return: The newly created object
'''
name = "%s:%s" % (prefix, name)
return self.client.factory.create(name)
def login(self, username, password) :
self.username = username
self.password = password
log.debug("logging in to vCenter %s", self.host)
try:
self.sc = self.vim.RetrieveServiceContent(self.service_reference)
self.session = self.vim.Login(self.sc.sessionManager, self.decode(username), self.decode(password))
except:
log.exception('Error in login')
self.comm_error()
def connect(self):
log.debug('In connect...')
try:
if not self.sc:
log.debug('Logging on to the vCenter...')
self.sc = self.vim.RetrieveServiceContent(self.service_reference)
self.session = self.vim.Login(self.sc.sessionManager, self.decode(self.username), self.decode(self.password))
except Exception, e:
log.exception("Error connecting to vCenter %s", self.host)
self.comm_error(e)
def connected(self):
return self.session != None
def keep_alive(self):
log.debug('In keep_alive...')
try:
if not self.connected():
log.debug('Re-connecting session')
self.comm_error()
self.connect()
if self.connected():
log.debug('keep_alive() calling update')
self.update()
#ext = self.vim.FindExtension(self.sc.extensionManager, 'com.hp.ic4vc')
#log.debug('Extension found: %s', str(ext['key']))
except Exception, e:
log.exception('Exception connecting to vCenter %s in keep_alive', self.host)
self.comm_error(e)
def get_about(self):
return self.sc.about
def comm_error(self, e=None):
self.session = None
self.sc = None
self.client.options.transport.cookiejar.clear()
def trav_spec(self, name, _type, path, skip=False, selectset=[]):
t = self.factory('TraversalSpec')
t.name, t.type, t.path, t.skip = name, _type, path, skip
for name in selectset:
ss = self.factory('SelectionSpec')
ss.name = name
t.selectSet.append(ss)
return t
def prop_spec(self, _type, pathset, _all=False):
p = self.factory('PropertySpec')
p.all, p.type, p.pathSet = _all, _type, pathset
return p
def build_trav_spec(self):
rp_vm = self.trav_spec('rpToVm', 'ResourcePool', 'vm')
rp_rp = self.trav_spec('rpToRp', 'ResourcePool', 'resourcePool', selectset=['rpToVm', 'rpToRp'])
cr_rp = self.trav_spec('crToRp', 'ComputeResource', 'resourcePool', selectset=['rpToVm', 'rpToRp'])
cr_host = self.trav_spec('crToH', 'ComputeResource', 'host')
cc_host = self.trav_spec('ccToH', 'ClusterComputeResource', 'host')
dc_hf = self.trav_spec('dcToHf', 'Datacenter', 'hostFolder', selectset=['visitFolders'])
dc_vmf = self.trav_spec('dcToVmf', 'Datacenter', 'vmFolder', selectset=['visitFolders'])
dc_nwf = self.trav_spec('dcToNwf', 'Datacenter', 'networkFolder', selectset=['visitFolders'])
dc_ds = self.trav_spec('dcToDs', 'Datacenter', 'datastore', selectset=['visitFolders'])
host_vm = self.trav_spec('HToVm', 'HostSystem', 'vm', selectset=['visitFolders'])
vf = self.trav_spec('visitFolders', 'Folder', 'childEntity', selectset=[
'visitFolders',
'dcToHf',
'dcToVmf',
'dcToNwf',
'dcToDs',
'crToH',
'ccToH',
'crToRp',
'HToVm',
'rpToVm',
])
return [vf,
dc_vmf,
dc_hf,
dc_nwf,
dc_ds,
cr_host,
cc_host,
cr_rp,
rp_rp,
host_vm,
rp_vm
]
def propfilter(self, ManagedEntityType, ManagedEntityProporties, startType=None, startValue=None):
'''
Create a property filter for collecting all the ManagedEntityType in the vcenter
'''
obj_spec = self.factory('ObjectSpec')
obj_spec.skip = False
if not startType:
obj_spec.obj = self.sc.rootFolder
ts = self.entire_datacenter_trav_spec
obj_spec.selectSet = ts
else:
obj_spec.obj = ManagedObjectRef(startType, startValue)
prop_filter_spec = self.factory('PropertyFilterSpec')
prop_filter_spec.objectSet = [obj_spec]
prop_set = []
prop_set.append(self.prop_spec(ManagedEntityType, ManagedEntityProporties))
prop_filter_spec.propSet = prop_set
return prop_filter_spec
def retrieve_list(self, ManagedEntityType, ManagedEntityProporties):
with self.retrieve_list_lock:
content = []
pf = self.propfilter(ManagedEntityType, ManagedEntityProporties)
try:
content = self.vim.RetrieveProperties(self.sc.propertyCollector, [pf])
except:
self.comm_error()
log.exception('Exception in retrieve_list')
return content
def retrieve_properties_incontext(self, session, ManagedEntityType, ManagedEntityProporties, startObjType, startObjValue):
handler = urllib2.HTTPCookieProcessor()
handler.cookiejar.set_cookie(self.mkcookie('vmware_soap_session', session))
# Now, create a SOAP client based off a previously created SOAP client
# (ie: we don't want to parse the WSDL again), and set the transport
thisclient = self.client.clone()
thisclient.set_options(location=self.url)
thisclient.options.transport.urlopener = urllib2.build_opener(handler)
sc = thisclient.service.RetriveProperties(self.service_reference)
pf = self.propfilter(ManagedEntityType, ManagedEntityProporties, startObjType, startObjValue)
content = thisclient.service.RetrieveProperties(sc, [pf])
obj = vmwobj.factory(content[0].obj)
obj.setprop(content[0].propSet)
try:
obj.hardware.systemInfo.uuid = str(UUID(bytes=UUID(obj.hardware.systemInfo.uuid).bytes_le))
except:
log.error('Error processing vcenter UUID')
return obj
def retrieve_properties(self, ManagedEntityType, ManagedEntityProporties, startObjType=None, startObjValue=None):
with self.retrieve_properties_lock:
content = None
pf = self.propfilter(ManagedEntityType, ManagedEntityProporties, startObjType, startObjValue)
try:
content = self.vim.RetrieveProperties(self.sc.propertyCollector, [pf])
except:
log.exception('Error in retrieve_properties')
self.comm_error()
if content:
try:
obj = vmwobj.factory(content[0].obj)
obj.setprop(content[0].propSet)
return obj
except:
log.exception("propSet failed for %s:%s", startObjType, startObjValue)
return None
return None
def print_host(self, ):
for host in self.hosts:
print '--', host._type, host.value, host.name, host.uuid
def retrieve_host_list(self):
#prop_set_minimal=['name', 'hardware.systemInfo.uuid', 'config.ipmi']
content = self.retrieve_list(vmwobj.host.mo_type, vmwobj.host.pset_minimal)
return content
def retreive_cluster_list(self):
content = self.retrieve_list(vmwobj.cluster.mo_type, vmwobj.cluster.pset_detail)
return content
def retrieve_datacenter_list(self):
content = self.retrieve_list(vmwobj.datacenter.mo_type, vmwobj.datacenter.pset_detail)
return content
def retrieve_dc_host_folder(self, dc_val):
content = None
pf = self.propfilter('Datacenter', ['hostFolder'], 'Datacenter', dc_val)
try:
content = self.vim.RetrieveProperties(self.sc.propertyCollector, [pf])
except:
log.exception('Error in retrieve_dc_host_folder')
self.comm_error()
if content:
return content[0].propSet[0].val
return None
def get_ds_for_host(self, moref):
with self.get_ds_for_host_lock:
host = self.get_host(moref)
dslist = []
if hasattr(host, 'datastore') and hasattr(host.datastore, 'ManagedObjectReference') and isinstance(host.datastore.ManagedObjectReference, list):
for ds_moref in host.datastore.ManagedObjectReference:
if vmwobj.ds.mo_type and ds_moref.value:
content = self.retrieve_properties(vmwobj.ds.mo_type, vmwobj.ds.pset_detail, vmwobj.ds.mo_type, ds_moref.value)
if content:
dslist.append(content)
return dslist
def create_virtual_disk(self, moref):
diskCount = 0
with self.get_vms_for_host_lock:
host = self.get_host(moref)
vmlist = []
vmmoreflist = []
if hasattr(host, 'vm') and hasattr(host.vm, 'ManagedObjectReference') and isinstance(host.vm.ManagedObjectReference, list):
for vm_moref in host.vm.ManagedObjectReference:
if vmwobj.vm.mo_type and vm_moref.value:
content = self.retrieve_properties(vmwobj.vm.mo_type, vmwobj.vm.pset_detail, vmwobj.vm.mo_type, vm_moref.value)
if content:
vmmoreflist.append(vm_moref)
vmlist.append(content)
for device in vmlist[0].config.hardware.device :
if device.deviceInfo.label == 'SCSI controller 0' :
controllerKey = device.key
elif device.__class__.__name__ == 'VirtualDisk' :
diskCount = diskCount + 1
with self.get_ds_for_host_lock:
host = self.get_host(moref)
dslist = []
dsmoreflist = []
if hasattr(host, 'datastore') and hasattr(host.datastore, 'ManagedObjectReference') and isinstance(host.datastore.ManagedObjectReference, list):
for ds_moref in host.datastore.ManagedObjectReference:
if vmwobj.ds.mo_type and ds_moref.value:
content = self.retrieve_properties(vmwobj.ds.mo_type, vmwobj.ds.pset_detail, vmwobj.ds.mo_type, ds_moref.value)
if content:
dsmoreflist.append(ds_moref)
dslist.append(content)
backing = self.factory('VirtualDiskFlatVer2BackingInfo')
vdisk = self.factory('VirtualDisk')
vdevConfig = self.factory('VirtualDeviceConfigSpec')
vmConfig = self.factory('VirtualMachineConfigSpec')
backing.thinProvisioned = True
backing.datastore = dsmoreflist[0]
backing.diskMode = 'persistent'
backing.fileName = '[' + dslist[0].name + '] ' + vmlist[0].name + '/' + 'test_vdisk' + str(diskCount) + '.vmdk'
vdisk.backing = backing
vdisk.capacityInKB = 3 * 1048576
vdisk.controllerKey = controllerKey
vdisk.key = -1
vdisk.unitNumber = diskCount
vdevConfig.device = vdisk
vdevConfig.fileOperation = 'create'
vdevConfig.operation = 'add'
specs = []
specs.append(vdevConfig)
vmConfig.deviceChange = specs
task = self.vim.ReconfigVM_Task(vmmoreflist[0], vmConfig)
def file_transfer_to_guest(self, moref, user_vmmoref, username, password, isession, src_filepath, dst_filepath, overwrite) :
print "ENTERING FILE TRANSFER TO GUEST"
#retrieve managed objects required for calling the vim statement
with self.get_vms_for_host_lock:
hosts = self.get_hosts()
#print(str(hosts))
sc = self.sc
vmlist = []
vmmoreflist = []
gom = []
gom_moref = []
for host in hosts:
if hasattr(host, 'vm') and hasattr(host.vm, 'ManagedObjectReference') and isinstance(host.vm.ManagedObjectReference, list):
for vm_moref in host.vm.ManagedObjectReference:
if vmwobj.vm.mo_type and vm_moref.value:
content = self.retrieve_properties(vmwobj.vm.mo_type, vmwobj.vm.pset_detail, vmwobj.vm.mo_type, vm_moref.value)
if content:
vmlist.append(content)
vmmoreflist.append(vm_moref)
#print(str(vmmoreflist))
#print(str(vmlist))
if user_vmmoref:
vmmoref = user_vmmoref
else:
print "This operation requires a VM Managed Object Reference."
vmmoref = vmmoreflist[6]
guest_auth = self.factory('NamePasswordAuthentication')
if username and password:
print "Acquiring user credentials..."
guest_auth.username = username
guest_auth.password = password
else:
print "'username' and 'password' values are required."
guest_auth.username = "root"
guest_auth.password = "rootpwd"
if isession:
print "Acquiring isession value..."
guest_auth.interactiveSession = isession
else:
print "This will NOT be an interactive session"
guest_auth.interactiveSession = False
if src_filepath and dst_filepath:
filepath = src_filepath
guest_filepath = dst_filepath
else:
print "'source filepath', and 'destination filepath' values are required."
filepath = "C:\Test.zip"
guest_filepath = "/tmp/Test.zip"
vm = vmlist[6]
if vm.guest.guestFamily == 'linuxGuest' :
file_attribs = self.factory('GuestPosixFileAttributes')
else :
file_attribs = self.factory('GuestWindowsFileAttributes')
if hasattr(sc, 'guestOperationsManager'):
if vmwobj.guest_operations_manager.mo_type and sc.guestOperationsManager.value:
content = self.retrieve_properties(vmwobj.guest_operations_manager.mo_type, vmwobj.guest_operations_manager.pset_detail,
vmwobj.guest_operations_manager.mo_type, sc.guestOperationsManager.value)
if content:
gom = content
gom_moref = sc.guestOperationsManager
fileManager = gom.fileManager
filesize = os.path.getsize(filepath)
try:
with open(filepath, 'rb') as f: pass
data = open(filepath, 'rb')
except IOError as e:
print "Cannot locate the file " + filepath
if overwrite:
writeover = overwrite
else:
writeover = True
try:
print "Initiating file transfer to guest..."
put_url = self.vim.InitiateFileTransferToGuest(fileManager, vmmoref, guest_auth, guest_filepath, file_attribs, filesize, writeover)
if put_url:
print "Done initiating file transfer to guest. URL is..."
print(str(put_url))
except:
log.exception('Error initiating file transfer to guest.')
try:
print "Uploading " + str(filepath) + " to guest..."
opener = urllib2.build_opener(urllib2.HTTPSHandler)
request = urllib2.Request(put_url)
request.add_data(data)
request.add_header('content-length', filesize)
request.get_method = lambda: 'PUT'
url = opener.open(request)
print "Successfully uploaded " + str(filepath) + " to guest!"
except:
log.exception('Error uploading file to guest.')
def get_hds_for_hcm(self, moref):
print ("entering hds")
host = self.get_host(moref)
if hasattr(host, 'configManager') and hasattr(host.configManager, 'datastoreSystem'):
content = self.retrieve_properties(vmwobj.hds.mo_type, vmwobj.hds.pset_detail, vmwobj.hds.mo_type, host.configManager.datastoreSystem.value)
print (str(host.configManager.datastoreSystem))
host_scsi_disks = []
host_scsi_disks.append(self.vim.QueryAvailableDisksForVmfs(host.configManager.datastoreSystem))
print(str(host_scsi_disks))
for host_scsi_disk in host_scsi_disks:
print(str(host_scsi_disk[0].devicePath))
vmfs_datastore_options = self.vim.QueryVmfsDatastoreCreateOptions(host.configManager.datastoreSystem, host_scsi_disk[0].devicePath)
print(str(vmfs_datastore_options))
vmfs_spec = self.factory('VmfsDatastoreCreateSpec')
vmfs_spec.vmfs.majorVersion = 5
vmfs_spec.vmfs.volumeName = "test"
for lun in host.config.storageDevice.scsiLun :
vmfs_spec.vmfs.extent.partition = 1
vmfs_spec.vmfs.extent.diskName = lun.canonicalName
#print(str(vmfs_spec))
#service = self.vim.CreateVmfsDatastore(host.configManager.datastoreSystem, vmfs_datastore_options[0].spec)
#print (str(service))
if content:
return content
def get_dvpg(self):
dvpgs = []
content = self.retrieve_list(vmwobj.dvpg.mo_type, vmwobj.dvpg.pset_detail)
for dvpg in content :
obj = vmwobj.factory(dvpg.obj)
obj.setprop(dvpg.propSet)
dvpgs.append(obj)
return dvpgs
def get_vdvsw(self):
vdvsws = []
content = self.retrieve_list(vmwobj.vdvsw.mo_type, vmwobj.vdvsw.pset_detail)
for s in content :
obj = vmwobj.factory(s.obj)
obj.setprop(s.propSet)
vdvsws.append(obj)
return vdvsws
def get_vm(self, moref):
vm = None
content = self.retrieve_list(vmwobj.vm.mo_type, vmwobj.vm.pset_detail)
for vm in content :
obj = vmwobj.factory(vm.obj)
obj.setprop(vm.propSet)
vm = obj
return vm
def get_ds(self, moref):
ds = None
content = self.retrieve_list(vmwobj.ds.mo_type, vmwobj.ds.pset_detail)
for ds in content :
obj = vmwobj.factory(ds.obj)
obj.setprop(ds.propSet)
ds = obj
return ds
def get_vms_for_host(self, moref):
with self.get_vms_for_host_lock:
host = self.get_host(moref)
vmlist = []
if hasattr(host, 'vm') and hasattr(host.vm, 'ManagedObjectReference') and isinstance(host.vm.ManagedObjectReference, list):
for vm_moref in host.vm.ManagedObjectReference:
if vmwobj.vm.mo_type and vm_moref.value:
content = self.retrieve_properties(vmwobj.vm.mo_type, vmwobj.vm.pset_detail, vmwobj.vm.mo_type, vm_moref.value)
if content:
vmlist.append(content)
return vmlist
def get_host_list(self):
hostlist = []
hosts = self.retrieve_host_list()
for host in hosts:
h = vmwobj.factory(host.obj)
h.setprop(host.propSet)
hostlist.append(h)
return hostlist
def update_host_list(self):
temphostlist = self.get_host_list()
with self.host_update_lock:
self.hosts = temphostlist
def generate_host_list(self):
with self.host_update_lock:
self.hosts = self.get_host_list()
self.host_discovery_done = True
def get_cluster_list(self):
clusters = self.retreive_cluster_list()
clusterlist = []
for cluster in clusters:
c = vmwobj.factory(cluster.obj)
c.setprop(cluster.propSet)
clusterlist.append(c)
return clusterlist
def update_cluster_list(self):
tempclusterlist = self.get_cluster_list()
with self.cluster_update_lock:
self.clusters = tempclusterlist
def update(self):
log.debug('Updating...')
if not self.connected():
log.debug('Re-connecting session...')
self.comm_error()
self.connect()
if self.connected():
log.debug('Connected, getting vcenterupdatelock...')
with self.vcenterupdatelock:
listupdate_et = time.time() - self.listupdatetime
if (listupdate_et) > 60:
self.update_host_list()
self.update_cluster_list()
log.debug('updated, vcenterupdatelock released...')
else:
log.error('Unable to connect to vCenter: %s', self.host)
def generate_cluster_list(self):
with self.cluster_update_lock:
self.clusters = self.get_cluster_list()
def get_host_details(self, moref, force=False):
content = catalog.lookup(moref)
if not content or force:
try:
content = self.retrieve_properties(vmwobj.host.mo_type, vmwobj.host.pset_detail, vmwobj.host.mo_type, moref.split(':')[-1])
catalog.insert(content, moref)
except Exception as e:
log.exception("error in vcenter %s", str(e))
self.comm_error(e)
return content
def has_host(self, host):
with self.host_update_lock:
for h in self.hosts:
if h.moref() == host:
return True
return False
def has_obj(self, obj):
items = []
if obj.startswith('HostSystem'):
with self.host_update_lock:
items = self.hosts
elif obj.startswith('ClusterComputeResource'):
with self.cluster_update_lock:
items = self.clusters
for item in items :
if item.moref() == obj:
return True
return False
def get_obj(self, obj):
items = []
if obj.startswith('HostSystem'):
with self.host_update_lock:
items = self.hosts
elif obj.startswith('ClusterComputeResource'):
with self.cluster_update_lock:
items = self.clusters
for item in items:
if item.moref() == obj:
return item
return None
def get_hosts(self):
thislist = []
with self.host_update_lock:
thislist = self.hosts
return thislist
def get_host(self, moref):
with self.host_update_lock:
for h in self.hosts:
if h.moref() == moref:
return h
return None
def arglist(self, kvpairs):
args = []
for arg in kvpairs:
obj = Object()
obj.key = arg['key']
obj.value = arg['value']
args.append(obj)
return args
def log_event(self, mob, event):
ev = self.factory('EventEx')
if mob._type == 'HostSystem':
ev.host = self.factory('HostEventArgument')
ev.host.host = ManagedObjectRef(mob._type, mob.value)
ev.host.name = mob.value
elif mob._type == 'VirtualMachine':
ev.vm = self.factory('VmEventArgument')
ev.vm.vm = ManagedObjectRef(mob._type, mob.value)
ev.vm.name = mob.value
elif mob._type == 'DataCenter':
ev.datacenter = self.factory('DataCenterEventArgument')
ev.datacenter.datacenter = ManagedObjectRef(mob._type, mob.value)
ev.datacenter.name = mob.value
else:
log.debug("failed log_event(%s, %s)", mob, event.message())
#ev.message = message
ev.userName = "HP Insight Management"
ev.eventTypeId = event.event_type_id()
ev.createdTime = self.vim.CurrentTime(self.service_reference)
ev.arguments = self.arglist(event.get_args())
self.client.set_options(plugins=[AddAttr('value', 'xsi:type', 'xsd:string')])
try:
self.vim.PostEvent(self.sc.eventManager, ev)
except:
self.comm_error()
log.exception('Error in log event')
self.client.set_options(plugins=[])
if __name__ == '__main__':
def time_function_call(f, *args):
import time
t0 = time.time()
val = f(*args)
tdiff = time.time() - t0
return tdiff, val
import time
from util import cmdline_opt
target, username, password = cmdline_opt.vcenter()
t0 = time.time()
vc = vCenter(target)
tdiff = time.time() - t0
print "vCenter object initialization time: " + str(tdiff)
et, ret = time_function_call(vc.setup)
print "Parsing wsdl time: " + str( et )
et, ret = time_function_call(vc.login, username, password )
print "vCenter login time: " + str(et)
# t0 = time.time()
# content = vc.retrieve_host_list()
# tdiff = time.time() - t0
# print "retrieve host list time: " + str(tdiff)
# print content
prop_set_minimal=['name', 'hardware.systemInfo.uuid']
prop_set_used_in_brek=['overallStatus', 'hardware', 'name', 'config.ipmi', 'config.network', 'config.product',
'config.storageDevice.hostBusAdapter', 'config.storageDevice.multipathInfo.lun',
'vm']
prop_set_details=['overallStatus', 'hardware', 'name', 'config','vm']
prop_set_datacenter = ['name', 'configuration']
prop_set_vm = ['name' , 'config.uuid', 'config.hardware', 'guest' ]
prop_set_dvportgroup = ['name', 'configStatus', 'config', 'host']
prop_set_cluster = ['name', 'configStatus', 'host']
prop_set_datastore = ['name', 'info', 'host']
prop_set_dvswitch = ['name', 'configStatus', 'config']
# t0 = time.time()
# content = vc.retrieve_list('HostSystem', prop_set_minimal)
# tdiff = time.time() - t0
# print "retrieve host list time: " + str(tdiff)
# print content
#vc.generate_host_list()
et, hosts = time_function_call(vc.generate_host_list)
print "getting host minimal: " + str(et)
vc.print_host()
#print hosts
# et, hosts = time_function_call(vc.retrieve_list, 'HostSystem', prop_set_minimal)
# print "getting host minimal: " + str(et)
# print hosts
#
# #et, hosts = time_function_call(vc.retrieve_properties, 'HostSystem', prop_set_used_in_brek, 'HostSystem', 'host-146')
# #print "getting prop_set_used_in_brek: " + str(et)
#
# et, dvports = time_function_call(vc.retrieve_list, 'DistributedVirtualPortgroup', prop_set_dvportgroup)
# print "getting prop_set_dvportgroup: " + str(et)
#
# et, clusters = time_function_call(vc.retrieve_list, 'ClusterComputeResource', prop_set_cluster)
# print "getting prop_set_cluster: " + str(et)
# et, datastores = time_function_call(vc.retrieve_list, 'Datastore', prop_set_datastore)
# print "getting prop_set_datastore: " + str(et)
# et, dvswitches = time_function_call(vc.retrieve_list, 'VmwareDistributedVirtualSwitch', prop_set_dvswitch)
# print "getting prop_set_dvswitch: " + str(et)
#et, vms_for_host = time_function_call(vc.retrieve_properties, 'VirtualMachine', prop_set_vm, 'VirtualMachine', 'vm-148')
#print "getting vm list for host: " + str(et)
#print vms_for_host
# et, content = time_function_call(vc.retrieve_vm_list_for_host, 'HostSystem', 'host-149')
# print "getting vm list for host: " + str(et)
# print content
# et, content = time_function_call(vc.retrieve_vm_list_for_host, 'HostSystem', 'host-144')
# print "getting vm list for host: " + str(et)
# print content
# et, content = time_function_call(vc.retrieve_vm_list_for_host, 'HostSystem', 'host-141')
# print "getting vm list for host: " + str(et)
# print content
# t0 = time.time()
# content = vc.retrieve_properties('HostSystem', prop_set_minimal, 'HostSystem', 'host-146')
# tdiff = time.time() - t0
# print prop_set_minimal
# print "**********************retrieve host prop_set_minimal time: " + str(tdiff)
# print content
#
# t0 = time.time()
# content = vc.retrieve_properties('HostSystem', prop_set_used_in_brek, 'HostSystem', 'host-146')
# tdiff = time.time() - t0
# print prop_set_used_in_brek
# print "**********************retrieve host prop_set_used_in_brek time: " + str(tdiff)
# print content
#
# t0 = time.time()
# content = vc.retrieve_properties('HostSystem', prop_set_details, 'HostSystem', 'host-146')
# tdiff = time.time() - t0
# print prop_set_details
# print "**********************retrieve host prop_set_details time: " + str(tdiff)
# print content | [
"raychorn@gmail.com"
] | raychorn@gmail.com |
7b59d635ceceda572349f0002f30d490b644440d | 8f8d9428c68aa6bb1e6d131c505c217403979955 | /Python/0053. 螺旋三角.py | 442375ec79cdb6f9c5f503f591d033fb29160d4a | [] | no_license | yang4978/Huawei-OJ | fb3799c0f807b853fcfd4574b809fed5740fc6ea | ea3cccb2b070545574fadd64aecd38f73804361d | refs/heads/main | 2023-04-17T22:04:44.526675 | 2021-04-30T03:40:26 | 2021-04-30T03:40:26 | 337,046,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | # If you need to import additional packages or classes, please import here.
def func():
# please define the python3 input here.
# For example: a,b = map(int, input().strip().split())
# please finish the function body here.
# please define the python3 output here. For example: print().
while True:
try:
n = int(input())
arr = [[0]*n for _ in range(n)]
cnt = 1
for i in range(n):
if i%3 == 0:
for j in range(i//3,n-1-i//3*2):
arr[i//3][j] = cnt
cnt += 1
elif i%3 == 1:
for j in range(i//3,n-i//3*2):
arr[j][n-j-1-i//3] = cnt
cnt += 1
else:
for j in range(n-i//3*2-2,i//3,-1):
arr[j][i//3] = cnt
cnt += 1
if n%3 == 1:
arr[n//3][n//3] = (1+n)*n//2
for l in arr:
for i in range(n):
if i == 0:
print(l[i],end='')
elif l[i]:
print('',l[i],end='')
print('')
except EOFError:
break
if __name__ == "__main__":
func()
| [
"noreply@github.com"
] | yang4978.noreply@github.com |
30a24d477498b4413500a53839689643274b89e7 | 4fc73cbe9e79974cde44d674b7c810edc9b07cd2 | /puyopuyoall/feverclass.py | ebb79ed88aef6c45b799c740ea0ede9ca7610cc8 | [] | no_license | yuufujikata2/Games | 36fbcdfbba976cc6b1850fd5f950bf269f81248d | abc2177023653247ebe1abb9cab172db31d038dc | refs/heads/master | 2020-07-30T14:07:54.466545 | 2019-09-23T03:39:56 | 2019-09-23T03:39:56 | 210,257,943 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py |
class Fever():
def __init__(self):
self.haichi=[[0 for i in range(8)] for j in range(15)]
self.fegauge=0
self.fetime=15
self.fehantei=None
self.ferensasuu=5
def feverin(self,field,ferensatane):
self.haichi=[[s for s in m] for m in field.haichi]
self.fehantei=True
field.haichi=[[s for s in m] for m in ferensatane.rensahaichi(self.ferensasuu)]
self.fegauge=0
def fevertuduki(self,field,ferensatane):
field.haichi=[[s for s in m] for m in ferensatane.rensahaichi(self.ferensasuu)]
def feverout(self,field):
field.haichi=[[s for s in m] for m in self.haichi]
self.fehantei=False
self.fetime=15
| [
"yuu.1201.soccer.f@gmail.com"
] | yuu.1201.soccer.f@gmail.com |
ede2789c8c52c03ef68198b4070f979461ae3c2b | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_w_M_to_Wxyz_focus_Z_ok/pyr_Tcrop255_pad20_jit15/Sob_k15_s001_EroM_Mae_s001/pyr_4s/L6/step09_4side_L6.py | d7d9613e38ece92e30feabda063d9ae25ca3206d | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111,554 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_w_M_to_Wx_Wy_Wz_combine import I_w_M_to_W
from step08_b_use_G_generate_0_util import Tight_crop
from step09_c_train_step import Train_step_I_w_M_to_W
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
import Exps_7_v3.Basic_Pyramid_1ch_model_for_import.pyr_4s.L6.step09_4side_L6 as pyr_1ch_model
use_gen_op = I_w_M_to_W( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) )
use_train_step = Train_step_I_w_M_to_W( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15) )
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
# "1" 3 6 10 15 21 28 36 45 55
# side1 OK 1
pyramid_1side_1__2side_1__3side_1_4side_1 = [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4]
# 1 "3" 6 10 15 21 28 36 45 55
# side2 OK 4
pyramid_1side_2__2side_1__3side_1_4side_1 = [4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 4]
pyramid_1side_2__2side_2__3side_1_4side_1 = [4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4]
pyramid_1side_2__2side_2__3side_2_4side_1 = [4, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4]
pyramid_1side_2__2side_2__3side_2_4side_2 = [4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4]
# 1 3 "6" 10 15 21 28 36 45 55
# side3 OK 10
pyramid_1side_3__2side_1__3side_1_4side_1 = [4, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 4]
pyramid_1side_3__2side_2__3side_1_4side_1 = [4, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 4]
pyramid_1side_3__2side_2__3side_2_4side_1 = [4, 3, 1, 0, 0, 0, 0, 0, 0, 0, 1, 3, 4]
pyramid_1side_3__2side_3__3side_1_4side_1 = [4, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 4]
pyramid_1side_3__2side_3__3side_2_4side_1 = [4, 3, 2, 0, 0, 0, 0, 0, 0, 0, 2, 3, 4]
pyramid_1side_3__2side_3__3side_3_4side_1 = [4, 3, 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 4]
pyramid_1side_3__2side_2__3side_2_4side_2 = [4, 4, 1, 0, 0, 0, 0, 0, 0, 0, 1, 4, 4]
pyramid_1side_3__2side_3__3side_2_4side_2 = [4, 4, 2, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4]
pyramid_1side_3__2side_3__3side_3_4side_2 = [4, 4, 3, 0, 0, 0, 0, 0, 0, 0, 3, 4, 4]
pyramid_1side_3__2side_3__3side_3_4side_3 = [4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4]
# 1 3 6 "10" 15 21 28 36 45 55
# side4 OK 20
pyramid_1side_4__2side_1__3side_1_4side_1 = [4, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 4]
pyramid_1side_4__2side_2__3side_1_4side_1 = [4, 2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2, 4]
pyramid_1side_4__2side_2__3side_2_4side_1 = [4, 3, 1, 1, 0, 0, 0, 0, 0, 1, 1, 3, 4]
pyramid_1side_4__2side_3__3side_1_4side_1 = [4, 2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2, 4]
pyramid_1side_4__2side_3__3side_2_4side_1 = [4, 3, 2, 1, 0, 0, 0, 0, 0, 1, 2, 3, 4]
pyramid_1side_4__2side_3__3side_3_4side_1 = [4, 3, 3, 1, 0, 0, 0, 0, 0, 1, 3, 3, 4]
pyramid_1side_4__2side_4__3side_1_4side_1 = [4, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2, 4]
pyramid_1side_4__2side_4__3side_2_4side_1 = [4, 3, 2, 2, 0, 0, 0, 0, 0, 2, 2, 3, 4]
pyramid_1side_4__2side_4__3side_3_4side_1 = [4, 3, 3, 2, 0, 0, 0, 0, 0, 2, 3, 3, 4]
pyramid_1side_4__2side_4__3side_4_4side_1 = [4, 3, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 4]
pyramid_1side_4__2side_2__3side_2_4side_2 = [4, 4, 1, 1, 0, 0, 0, 0, 0, 1, 1, 4, 4]
pyramid_1side_4__2side_3__3side_2_4side_2 = [4, 4, 2, 1, 0, 0, 0, 0, 0, 1, 2, 4, 4]
pyramid_1side_4__2side_3__3side_3_4side_2 = [4, 4, 3, 1, 0, 0, 0, 0, 0, 1, 3, 4, 4]
pyramid_1side_4__2side_4__3side_2_4side_2 = [4, 4, 2, 2, 0, 0, 0, 0, 0, 2, 2, 4, 4]
pyramid_1side_4__2side_4__3side_3_4side_2 = [4, 4, 3, 2, 0, 0, 0, 0, 0, 2, 3, 4, 4]
pyramid_1side_4__2side_4__3side_4_4side_2 = [4, 4, 3, 3, 0, 0, 0, 0, 0, 3, 3, 4, 4]
pyramid_1side_4__2side_3__3side_3_4side_3 = [4, 4, 4, 1, 0, 0, 0, 0, 0, 1, 4, 4, 4]
pyramid_1side_4__2side_4__3side_3_4side_3 = [4, 4, 4, 2, 0, 0, 0, 0, 0, 2, 4, 4, 4]
pyramid_1side_4__2side_4__3side_4_4side_3 = [4, 4, 4, 3, 0, 0, 0, 0, 0, 3, 4, 4, 4]
pyramid_1side_4__2side_4__3side_4_4side_4 = [4, 4, 4, 4, 0, 0, 0, 0, 0, 4, 4, 4, 4]
# 1 3 6 10 "15" 21 28 36 45 55
# side5 OK 35
pyramid_1side_5__2side_1__3side_1_4side_1 = [4, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 4]
pyramid_1side_5__2side_2__3side_1_4side_1 = [4, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 4]
pyramid_1side_5__2side_2__3side_2_4side_1 = [4, 3, 1, 1, 1, 0, 0, 0, 1, 1, 1, 3, 4]
pyramid_1side_5__2side_3__3side_1_4side_1 = [4, 2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 3, 4]
pyramid_1side_5__2side_3__3side_2_4side_1 = [4, 3, 2, 1, 1, 0, 0, 0, 1, 1, 2, 3, 4]
pyramid_1side_5__2side_3__3side_3_4side_1 = [4, 3, 3, 1, 1, 0, 0, 0, 1, 1, 3, 3, 4]
pyramid_1side_5__2side_4__3side_1_4side_1 = [4, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 4]
pyramid_1side_5__2side_4__3side_2_4side_1 = [4, 3, 2, 2, 1, 0, 0, 0, 1, 2, 2, 3, 4]
pyramid_1side_5__2side_4__3side_3_4side_1 = [4, 3, 3, 2, 1, 0, 0, 0, 1, 2, 3, 3, 4]
pyramid_1side_5__2side_4__3side_4_4side_1 = [4, 3, 3, 3, 1, 0, 0, 0, 1, 3, 3, 3, 4]
pyramid_1side_5__2side_5__3side_1_4side_1 = [4, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2, 4]
pyramid_1side_5__2side_5__3side_2_4side_1 = [4, 3, 2, 2, 2, 0, 0, 0, 2, 2, 2, 3, 4]
pyramid_1side_5__2side_5__3side_3_4side_1 = [4, 3, 3, 2, 2, 0, 0, 0, 2, 2, 3, 3, 4]
pyramid_1side_5__2side_5__3side_4_4side_1 = [4, 3, 3, 3, 2, 0, 0, 0, 2, 3, 3, 3, 4]
pyramid_1side_5__2side_5__3side_5_4side_1 = [4, 3, 3, 3, 3, 0, 0, 0, 3, 3, 3, 3, 4]
pyramid_1side_5__2side_2__3side_2_4side_2 = [4, 4, 1, 1, 1, 0, 0, 0, 1, 1, 1, 4, 4]
pyramid_1side_5__2side_3__3side_2_4side_2 = [4, 4, 2, 1, 1, 0, 0, 0, 1, 1, 2, 4, 4]
pyramid_1side_5__2side_3__3side_3_4side_2 = [4, 4, 3, 1, 1, 0, 0, 0, 1, 1, 3, 4, 4]
pyramid_1side_5__2side_4__3side_2_4side_2 = [4, 4, 2, 2, 1, 0, 0, 0, 1, 2, 2, 4, 4]
pyramid_1side_5__2side_4__3side_3_4side_2 = [4, 4, 3, 2, 1, 0, 0, 0, 1, 2, 3, 4, 4]
pyramid_1side_5__2side_4__3side_4_4side_2 = [4, 4, 3, 3, 1, 0, 0, 0, 1, 3, 3, 4, 4]
pyramid_1side_5__2side_5__3side_2_4side_2 = [4, 4, 2, 2, 2, 0, 0, 0, 2, 2, 2, 4, 4]
pyramid_1side_5__2side_5__3side_3_4side_2 = [4, 4, 3, 2, 2, 0, 0, 0, 2, 2, 3, 4, 4]
pyramid_1side_5__2side_5__3side_4_4side_2 = [4, 4, 3, 3, 2, 0, 0, 0, 2, 3, 3, 4, 4]
pyramid_1side_5__2side_5__3side_5_4side_2 = [4, 4, 3, 3, 3, 0, 0, 0, 3, 3, 3, 4, 4]
pyramid_1side_5__2side_3__3side_3_4side_3 = [4, 4, 4, 1, 1, 0, 0, 0, 1, 1, 4, 4, 4]
pyramid_1side_5__2side_4__3side_3_4side_3 = [4, 4, 4, 2, 1, 0, 0, 0, 1, 2, 4, 4, 4]
pyramid_1side_5__2side_4__3side_4_4side_3 = [4, 4, 4, 3, 1, 0, 0, 0, 1, 3, 4, 4, 4]
pyramid_1side_5__2side_5__3side_3_4side_3 = [4, 4, 4, 2, 2, 0, 0, 0, 2, 2, 4, 4, 4]
pyramid_1side_5__2side_5__3side_4_4side_3 = [4, 4, 4, 3, 2, 0, 0, 0, 2, 3, 4, 4, 4]
pyramid_1side_5__2side_5__3side_5_4side_3 = [4, 4, 4, 3, 3, 0, 0, 0, 3, 3, 4, 4, 4]
pyramid_1side_5__2side_4__3side_4_4side_4 = [4, 4, 4, 4, 1, 0, 0, 0, 1, 4, 4, 4, 4]
pyramid_1side_5__2side_5__3side_4_4side_4 = [4, 4, 4, 4, 2, 0, 0, 0, 2, 4, 4, 4, 4]
pyramid_1side_5__2side_5__3side_5_4side_4 = [4, 4, 4, 4, 3, 0, 0, 0, 3, 4, 4, 4, 4]
pyramid_1side_5__2side_5__3side_5_4side_5 = [4, 4, 4, 4, 4, 0, 0, 0, 4, 4, 4, 4, 4]
# 1 3 6 10 15 "21" 28 36 45 55
# side6 OK 56
pyramid_1side_6__2side_1__3side_1_4side_1 = [4, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 4]
pyramid_1side_6__2side_2__3side_1_4side_1 = [4, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 4]
pyramid_1side_6__2side_2__3side_2_4side_1 = [4, 3, 1, 1, 1, 1, 0, 1, 1, 1, 1, 3, 4]
pyramid_1side_6__2side_3__3side_1_4side_1 = [4, 2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2, 4]
pyramid_1side_6__2side_3__3side_2_4side_1 = [4, 3, 2, 1, 1, 1, 0, 1, 1, 1, 2, 3, 4]
pyramid_1side_6__2side_3__3side_3_4side_1 = [4, 3, 3, 1, 1, 1, 0, 1, 1, 1, 3, 3, 4]
pyramid_1side_6__2side_4__3side_1_4side_1 = [4, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 4]
pyramid_1side_6__2side_4__3side_2_4side_1 = [4, 3, 2, 2, 1, 1, 0, 1, 1, 2, 2, 3, 4]
pyramid_1side_6__2side_4__3side_3_4side_1 = [4, 3, 3, 2, 1, 1, 0, 1, 1, 2, 3, 3, 4]
pyramid_1side_6__2side_4__3side_4_4side_1 = [4, 3, 3, 3, 1, 1, 0, 1, 1, 3, 3, 3, 4]
pyramid_1side_6__2side_5__3side_1_4side_1 = [4, 2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2, 4]
pyramid_1side_6__2side_5__3side_2_4side_1 = [4, 3, 2, 2, 2, 1, 0, 1, 2, 2, 2, 3, 4]
pyramid_1side_6__2side_5__3side_3_4side_1 = [4, 3, 3, 2, 2, 1, 0, 1, 2, 2, 3, 3, 4]
pyramid_1side_6__2side_5__3side_4_4side_1 = [4, 3, 3, 3, 2, 1, 0, 1, 2, 3, 3, 3, 4]
pyramid_1side_6__2side_5__3side_5_4side_1 = [4, 3, 3, 3, 3, 1, 0, 1, 3, 3, 3, 3, 4]
pyramid_1side_6__2side_6__3side_1_4side_1 = [4, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 4]
pyramid_1side_6__2side_6__3side_2_4side_1 = [4, 3, 2, 2, 2, 2, 0, 2, 2, 2, 2, 3, 4]
pyramid_1side_6__2side_6__3side_3_4side_1 = [4, 3, 3, 2, 2, 2, 0, 2, 2, 2, 3, 3, 4]
pyramid_1side_6__2side_6__3side_4_4side_1 = [4, 3, 3, 3, 2, 2, 0, 2, 2, 3, 3, 3, 4]
pyramid_1side_6__2side_6__3side_5_4side_1 = [4, 3, 3, 3, 3, 2, 0, 2, 3, 3, 3, 3, 4]
pyramid_1side_6__2side_6__3side_6_4side_1 = [4, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 4]
pyramid_1side_6__2side_2__3side_2_4side_2 = [4, 4, 1, 1, 1, 1, 0, 1, 1, 1, 1, 4, 4]
pyramid_1side_6__2side_3__3side_2_4side_2 = [4, 4, 2, 1, 1, 1, 0, 1, 1, 1, 2, 4, 4]
pyramid_1side_6__2side_3__3side_3_4side_2 = [4, 4, 3, 1, 1, 1, 0, 1, 1, 1, 3, 4, 4]
pyramid_1side_6__2side_4__3side_2_4side_2 = [4, 4, 2, 2, 1, 1, 0, 1, 1, 2, 2, 4, 4]
pyramid_1side_6__2side_4__3side_3_4side_2 = [4, 4, 3, 2, 1, 1, 0, 1, 1, 2, 3, 4, 4]
pyramid_1side_6__2side_4__3side_4_4side_2 = [4, 4, 3, 3, 1, 1, 0, 1, 1, 3, 3, 4, 4]
pyramid_1side_6__2side_5__3side_2_4side_2 = [4, 4, 2, 2, 2, 1, 0, 1, 2, 2, 2, 4, 4]
pyramid_1side_6__2side_5__3side_3_4side_2 = [4, 4, 3, 2, 2, 1, 0, 1, 2, 2, 3, 4, 4]
pyramid_1side_6__2side_5__3side_4_4side_2 = [4, 4, 3, 3, 2, 1, 0, 1, 2, 3, 3, 4, 4]
pyramid_1side_6__2side_5__3side_5_4side_2 = [4, 4, 3, 3, 3, 1, 0, 1, 3, 3, 3, 4, 4]
pyramid_1side_6__2side_6__3side_2_4side_2 = [4, 4, 2, 2, 2, 2, 0, 2, 2, 2, 2, 4, 4]
pyramid_1side_6__2side_6__3side_3_4side_2 = [4, 4, 3, 2, 2, 2, 0, 2, 2, 2, 3, 4, 4]
pyramid_1side_6__2side_6__3side_4_4side_2 = [4, 4, 3, 3, 2, 2, 0, 2, 2, 3, 3, 4, 4]
pyramid_1side_6__2side_6__3side_5_4side_2 = [4, 4, 3, 3, 3, 2, 0, 2, 3, 3, 3, 4, 4]
pyramid_1side_6__2side_6__3side_6_4side_2 = [4, 4, 3, 3, 3, 3, 0, 3, 3, 3, 3, 4, 4]
pyramid_1side_6__2side_3__3side_3_4side_3 = [4, 4, 4, 1, 1, 1, 0, 1, 1, 1, 4, 4, 4]
pyramid_1side_6__2side_4__3side_3_4side_3 = [4, 4, 4, 2, 1, 1, 0, 1, 1, 2, 4, 4, 4]
pyramid_1side_6__2side_4__3side_4_4side_3 = [4, 4, 4, 3, 1, 1, 0, 1, 1, 3, 4, 4, 4]
pyramid_1side_6__2side_5__3side_3_4side_3 = [4, 4, 4, 2, 2, 1, 0, 1, 2, 2, 4, 4, 4]
pyramid_1side_6__2side_5__3side_4_4side_3 = [4, 4, 4, 3, 2, 1, 0, 1, 2, 3, 4, 4, 4]
pyramid_1side_6__2side_5__3side_5_4side_3 = [4, 4, 4, 3, 3, 1, 0, 1, 3, 3, 4, 4, 4]
pyramid_1side_6__2side_6__3side_3_4side_3 = [4, 4, 4, 2, 2, 2, 0, 2, 2, 2, 4, 4, 4]
pyramid_1side_6__2side_6__3side_4_4side_3 = [4, 4, 4, 3, 2, 2, 0, 2, 2, 3, 4, 4, 4]
pyramid_1side_6__2side_6__3side_5_4side_3 = [4, 4, 4, 3, 3, 2, 0, 2, 3, 3, 4, 4, 4]
pyramid_1side_6__2side_6__3side_6_4side_3 = [4, 4, 4, 3, 3, 3, 0, 3, 3, 3, 4, 4, 4]
pyramid_1side_6__2side_4__3side_4_4side_4 = [4, 4, 4, 4, 1, 1, 0, 1, 1, 4, 4, 4, 4]
pyramid_1side_6__2side_5__3side_4_4side_4 = [4, 4, 4, 4, 2, 1, 0, 1, 2, 4, 4, 4, 4]
pyramid_1side_6__2side_5__3side_5_4side_4 = [4, 4, 4, 4, 3, 1, 0, 1, 3, 4, 4, 4, 4]
pyramid_1side_6__2side_6__3side_4_4side_4 = [4, 4, 4, 4, 2, 2, 0, 2, 2, 4, 4, 4, 4]
pyramid_1side_6__2side_6__3side_5_4side_4 = [4, 4, 4, 4, 3, 2, 0, 2, 3, 4, 4, 4, 4]
pyramid_1side_6__2side_6__3side_6_4side_4 = [4, 4, 4, 4, 3, 3, 0, 3, 3, 4, 4, 4, 4]
pyramid_1side_6__2side_5__3side_5_4side_5 = [4, 4, 4, 4, 4, 1, 0, 1, 4, 4, 4, 4, 4]
pyramid_1side_6__2side_6__3side_5_4side_5 = [4, 4, 4, 4, 4, 2, 0, 2, 4, 4, 4, 4, 4]
pyramid_1side_6__2side_6__3side_6_4side_5 = [4, 4, 4, 4, 4, 3, 0, 3, 4, 4, 4, 4, 4]
pyramid_1side_6__2side_6__3side_6_4side_6 = [4, 4, 4, 4, 4, 4, 0, 4, 4, 4, 4, 4, 4]
# 1 3 6 10 15 21 "28" 36 45 55
# side7 OK 84
pyramid_1side_7__2side_1__3side_1_4side_1 = [4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4]
pyramid_1side_7__2side_2__3side_1_4side_1 = [4, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4]
pyramid_1side_7__2side_2__3side_2_4side_1 = [4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 4]
pyramid_1side_7__2side_3__3side_1_4side_1 = [4, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 4]
pyramid_1side_7__2side_3__3side_2_4side_1 = [4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4]
pyramid_1side_7__2side_3__3side_3_4side_1 = [4, 3, 3, 1, 1, 1, 1, 1, 1, 1, 3, 3, 4]
pyramid_1side_7__2side_4__3side_1_4side_1 = [4, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 4]
pyramid_1side_7__2side_4__3side_2_4side_1 = [4, 3, 2, 2, 1, 1, 1, 1, 1, 2, 2, 3, 4]
pyramid_1side_7__2side_4__3side_3_4side_1 = [4, 3, 3, 2, 1, 1, 1, 1, 1, 2, 3, 3, 4]
pyramid_1side_7__2side_4__3side_4_4side_1 = [4, 3, 3, 3, 1, 1, 1, 1, 1, 3, 3, 3, 4]
pyramid_1side_7__2side_5__3side_1_4side_1 = [4, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 4]
pyramid_1side_7__2side_5__3side_2_4side_1 = [4, 3, 2, 2, 2, 1, 1, 1, 2, 2, 2, 3, 4]
pyramid_1side_7__2side_5__3side_3_4side_1 = [4, 3, 3, 2, 2, 1, 1, 1, 2, 2, 3, 3, 4]
pyramid_1side_7__2side_5__3side_4_4side_1 = [4, 3, 3, 3, 2, 1, 1, 1, 2, 3, 3, 3, 4]
pyramid_1side_7__2side_5__3side_5_4side_1 = [4, 3, 3, 3, 3, 1, 1, 1, 3, 3, 3, 3, 4]
pyramid_1side_7__2side_6__3side_1_4side_1 = [4, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 4]
pyramid_1side_7__2side_6__3side_2_4side_1 = [4, 3, 2, 2, 2, 2, 1, 2, 2, 2, 2, 3, 4]
pyramid_1side_7__2side_6__3side_3_4side_1 = [4, 3, 3, 2, 2, 2, 1, 2, 2, 2, 3, 3, 4]
pyramid_1side_7__2side_6__3side_4_4side_1 = [4, 3, 3, 3, 2, 2, 1, 2, 2, 3, 3, 3, 4]
pyramid_1side_7__2side_6__3side_5_4side_1 = [4, 3, 3, 3, 3, 2, 1, 2, 3, 3, 3, 3, 4]
pyramid_1side_7__2side_6__3side_6_4side_1 = [4, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 4]
pyramid_1side_7__2side_7__3side_1_4side_1 = [4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4]
pyramid_1side_7__2side_7__3side_2_4side_1 = [4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4]
pyramid_1side_7__2side_7__3side_3_4side_1 = [4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 3, 3, 4]
pyramid_1side_7__2side_7__3side_4_4side_1 = [4, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 4]
pyramid_1side_7__2side_7__3side_5_4side_1 = [4, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3, 3, 4]
pyramid_1side_7__2side_7__3side_6_4side_1 = [4, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 4]
pyramid_1side_7__2side_7__3side_7_4side_1 = [4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4]
pyramid_1side_7__2side_2__3side_2_4side_2 = [4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4]
pyramid_1side_7__2side_3__3side_2_4side_2 = [4, 4, 2, 1, 1, 1, 1, 1, 1, 1, 2, 4, 4]
pyramid_1side_7__2side_3__3side_3_4side_2 = [4, 4, 3, 1, 1, 1, 1, 1, 1, 1, 3, 4, 4]
pyramid_1side_7__2side_4__3side_2_4side_2 = [4, 4, 2, 2, 1, 1, 1, 1, 1, 2, 2, 4, 4]
pyramid_1side_7__2side_4__3side_3_4side_2 = [4, 4, 3, 2, 1, 1, 1, 1, 1, 2, 3, 4, 4]
pyramid_1side_7__2side_4__3side_4_4side_2 = [4, 4, 3, 3, 1, 1, 1, 1, 1, 3, 3, 4, 4]
pyramid_1side_7__2side_5__3side_2_4side_2 = [4, 4, 2, 2, 2, 1, 1, 1, 2, 2, 2, 4, 4]
pyramid_1side_7__2side_5__3side_3_4side_2 = [4, 4, 3, 2, 2, 1, 1, 1, 2, 2, 3, 4, 4]
pyramid_1side_7__2side_5__3side_4_4side_2 = [4, 4, 3, 3, 2, 1, 1, 1, 2, 3, 3, 4, 4]
pyramid_1side_7__2side_5__3side_5_4side_2 = [4, 4, 3, 3, 3, 1, 1, 1, 3, 3, 3, 4, 4]
pyramid_1side_7__2side_6__3side_2_4side_2 = [4, 4, 2, 2, 2, 2, 1, 2, 2, 2, 2, 4, 4]
pyramid_1side_7__2side_6__3side_3_4side_2 = [4, 4, 3, 2, 2, 2, 1, 2, 2, 2, 3, 4, 4]
pyramid_1side_7__2side_6__3side_4_4side_2 = [4, 4, 3, 3, 2, 2, 1, 2, 2, 3, 3, 4, 4]
pyramid_1side_7__2side_6__3side_5_4side_2 = [4, 4, 3, 3, 3, 2, 1, 2, 3, 3, 3, 4, 4]
pyramid_1side_7__2side_6__3side_6_4side_2 = [4, 4, 3, 3, 3, 3, 1, 3, 3, 3, 3, 4, 4]
pyramid_1side_7__2side_7__3side_2_4side_2 = [4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4]
pyramid_1side_7__2side_7__3side_3_4side_2 = [4, 4, 3, 2, 2, 2, 2, 2, 2, 2, 3, 4, 4]
pyramid_1side_7__2side_7__3side_4_4side_2 = [4, 4, 3, 3, 2, 2, 2, 2, 2, 3, 3, 4, 4]
pyramid_1side_7__2side_7__3side_5_4side_2 = [4, 4, 3, 3, 3, 2, 2, 2, 3, 3, 3, 4, 4]
pyramid_1side_7__2side_7__3side_6_4side_2 = [4, 4, 3, 3, 3, 3, 2, 3, 3, 3, 3, 4, 4]
pyramid_1side_7__2side_7__3side_7_4side_2 = [4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4]
pyramid_1side_7__2side_3__3side_3_4side_3 = [4, 4, 4, 1, 1, 1, 1, 1, 1, 1, 4, 4, 4]
pyramid_1side_7__2side_4__3side_3_4side_3 = [4, 4, 4, 2, 1, 1, 1, 1, 1, 2, 4, 4, 4]
pyramid_1side_7__2side_4__3side_4_4side_3 = [4, 4, 4, 3, 1, 1, 1, 1, 1, 3, 4, 4, 4]
pyramid_1side_7__2side_5__3side_3_4side_3 = [4, 4, 4, 2, 2, 1, 1, 1, 2, 2, 4, 4, 4]
pyramid_1side_7__2side_5__3side_4_4side_3 = [4, 4, 4, 3, 2, 1, 1, 1, 2, 3, 4, 4, 4]
pyramid_1side_7__2side_5__3side_5_4side_3 = [4, 4, 4, 3, 3, 1, 1, 1, 3, 3, 4, 4, 4]
pyramid_1side_7__2side_6__3side_3_4side_3 = [4, 4, 4, 2, 2, 2, 1, 2, 2, 2, 4, 4, 4]
pyramid_1side_7__2side_6__3side_4_4side_3 = [4, 4, 4, 3, 2, 2, 1, 2, 2, 3, 4, 4, 4]
pyramid_1side_7__2side_6__3side_5_4side_3 = [4, 4, 4, 3, 3, 2, 1, 2, 3, 3, 4, 4, 4]
pyramid_1side_7__2side_6__3side_6_4side_3 = [4, 4, 4, 3, 3, 3, 1, 3, 3, 3, 4, 4, 4]
pyramid_1side_7__2side_7__3side_3_4side_3 = [4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4]
pyramid_1side_7__2side_7__3side_4_4side_3 = [4, 4, 4, 3, 2, 2, 2, 2, 2, 3, 4, 4, 4]
pyramid_1side_7__2side_7__3side_5_4side_3 = [4, 4, 4, 3, 3, 2, 2, 2, 3, 3, 4, 4, 4]
pyramid_1side_7__2side_7__3side_6_4side_3 = [4, 4, 4, 3, 3, 3, 2, 3, 3, 3, 4, 4, 4]
pyramid_1side_7__2side_7__3side_7_4side_3 = [4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4]
pyramid_1side_7__2side_4__3side_4_4side_4 = [4, 4, 4, 4, 1, 1, 1, 1, 1, 4, 4, 4, 4]
pyramid_1side_7__2side_5__3side_4_4side_4 = [4, 4, 4, 4, 2, 1, 1, 1, 2, 4, 4, 4, 4]
pyramid_1side_7__2side_5__3side_5_4side_4 = [4, 4, 4, 4, 3, 1, 1, 1, 3, 4, 4, 4, 4]
pyramid_1side_7__2side_6__3side_4_4side_4 = [4, 4, 4, 4, 2, 2, 1, 2, 2, 4, 4, 4, 4]
pyramid_1side_7__2side_6__3side_5_4side_4 = [4, 4, 4, 4, 3, 2, 1, 2, 3, 4, 4, 4, 4]
pyramid_1side_7__2side_6__3side_6_4side_4 = [4, 4, 4, 4, 3, 3, 1, 3, 3, 4, 4, 4, 4]
pyramid_1side_7__2side_7__3side_4_4side_4 = [4, 4, 4, 4, 2, 2, 2, 2, 2, 4, 4, 4, 4]
pyramid_1side_7__2side_7__3side_5_4side_4 = [4, 4, 4, 4, 3, 2, 2, 2, 3, 4, 4, 4, 4]
pyramid_1side_7__2side_7__3side_6_4side_4 = [4, 4, 4, 4, 3, 3, 2, 3, 3, 4, 4, 4, 4]
pyramid_1side_7__2side_7__3side_7_4side_4 = [4, 4, 4, 4, 3, 3, 3, 3, 3, 4, 4, 4, 4]
pyramid_1side_7__2side_5__3side_5_4side_5 = [4, 4, 4, 4, 4, 1, 1, 1, 4, 4, 4, 4, 4]
pyramid_1side_7__2side_6__3side_5_4side_5 = [4, 4, 4, 4, 4, 2, 1, 2, 4, 4, 4, 4, 4]
pyramid_1side_7__2side_6__3side_6_4side_5 = [4, 4, 4, 4, 4, 3, 1, 3, 4, 4, 4, 4, 4]
pyramid_1side_7__2side_7__3side_5_4side_5 = [4, 4, 4, 4, 4, 2, 2, 2, 4, 4, 4, 4, 4]
pyramid_1side_7__2side_7__3side_6_4side_5 = [4, 4, 4, 4, 4, 3, 2, 3, 4, 4, 4, 4, 4]
pyramid_1side_7__2side_7__3side_7_4side_5 = [4, 4, 4, 4, 4, 3, 3, 3, 4, 4, 4, 4, 4]
pyramid_1side_7__2side_6__3side_6_4side_6 = [4, 4, 4, 4, 4, 4, 1, 4, 4, 4, 4, 4, 4]
pyramid_1side_7__2side_7__3side_6_4side_6 = [4, 4, 4, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4]
pyramid_1side_7__2side_7__3side_7_4side_6 = [4, 4, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4]
pyramid_1side_7__2side_7__3side_7_4side_7 = [4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4]
#########################################################################################
# "1" 3 6 10 15 21 28 36 45 55
# side1 OK 1
ch032_pyramid_1side_1__2side_1__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_1__2side_1__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_1__2side_1__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_1__2side_1__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
# 1 "3" 6 10 15 21 28 36 45 55
# side2 OK 4
ch032_pyramid_1side_2__2side_1__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_2__2side_1__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_2__2side_1__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_2__2side_1__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_2__2side_2__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_2__2side_2__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_2__2side_2__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_2__2side_2__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_2__2side_2__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_2__2side_2__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_2__2side_2__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_2__2side_2__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_2__2side_2__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_2__2side_2__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_2__2side_2__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_2__2side_2__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
# 1 3 "6" 10 15 21 28 36 45 55
# side3 OK 10
ch032_pyramid_1side_3__2side_1__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_3__2side_1__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_3__2side_1__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_3__2side_1__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_3__2side_2__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_3__2side_2__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_3__2side_2__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_3__2side_2__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_3__2side_2__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_3__2side_2__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_3__2side_2__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_3__2side_2__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_3__2side_3__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_3__2side_3__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_3__2side_2__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_3__2side_2__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_3__2side_2__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_3__2side_2__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_3__2side_3__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_3__2side_3__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
# 1 3 6 "10" 15 21 28 36 45 55
# side4 OK 20
ch032_pyramid_1side_4__2side_1__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_1__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_1__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_1__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_2__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_2__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_2__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_2__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_2__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_2__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_2__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_2__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_3__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_3__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_4__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_4__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_4_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_4_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_4_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_2__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_2__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_2__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_2__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_3__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_4__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_4_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_4_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_4_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_3__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_4_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_4_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_4_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_4_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_4_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_4__2side_4__3side_4_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
# 1 3 6 10 "15" 21 28 36 45 55
# side5 OK 35
ch032_pyramid_1side_5__2side_1__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_1__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_1__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_1__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_2__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_2__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_2__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_2__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_2__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_2__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_2__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_2__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_3__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_3__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_3__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_4__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_4__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_4__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_4__3side_4_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_4_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_4_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_4_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_4_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_4_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_4_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_4_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_5_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_2__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_2__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_2__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_2__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_3__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_3__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_4__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_4__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_4__3side_4_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_4_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_4_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_4_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_4_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_4_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_4_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_4_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_5_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_3__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_3__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_4__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_4__3side_4_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_4_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_4_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_4_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_4_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_4_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_4_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_4_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_5_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_4__3side_4_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_4_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_4_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_4__3side_4_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_4_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_4_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_4_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_4_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_5_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5__3side_5_4side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_5, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_5, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_5__2side_5__3side_5_4side_5) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
# 1 3 6 10 15 "21" 28 36 45 55
# side6 OK 56
ch032_pyramid_1side_6__2side_1__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_1__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_1__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_1__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_2__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_2__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_2__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_2__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_2__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_2__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_2__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_2__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_3__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_3__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_3__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_4__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_4__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_4__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_4__3side_4_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_4_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_4_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_4_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_4_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_4_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_4_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_4_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_5_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_4_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_4_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_4_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_4_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_5_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_6_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_2__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_2__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_2__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_2__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_3__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_3__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_4__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_4__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_4__3side_4_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_4_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_4_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_4_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_4_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_4_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_4_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_4_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_5_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_4_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_4_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_4_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_4_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_5_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_6_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_3__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_3__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_4__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_4__3side_4_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_4_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_4_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_4_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_4_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_4_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_4_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_4_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_5_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_4_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_4_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_4_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_4_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_5_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_6_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_4__3side_4_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_4_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_4_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_4__3side_4_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_4_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_4_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_4_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_4_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_5_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_4_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_4_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_4_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_4_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_5_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_6_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5__3side_5_4side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_5, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_5, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_5__3side_5_4side_5) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_5_4side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_5, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_5, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_5_4side_5) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_6_4side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_5, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_5, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_5) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6__3side_6_4side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_6, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_6, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_6__2side_6__3side_6_4side_6) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
# 1 3 6 10 15 21 "28" 36 45 55
# side7 OK 84
ch032_pyramid_1side_7__2side_1__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_1__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_1__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_1__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_2__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_2__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_2__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_2__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_2__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_2__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_2__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_2__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_3__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_3__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_3__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_4__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_4__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_4__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_4__3side_4_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_4_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_4_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_4_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_4_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_4_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_4_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_4_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_5_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_4_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_4_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_4_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_4_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_5_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_6_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_1_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_1_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_1_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_1_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_2_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_2_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_2_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_2_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_3_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_3_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_3_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_3_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_4_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_4_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_4_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_4_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_5_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_6_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_7_4side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_1, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_1, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_1) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_2__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_2__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_2__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_2__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_3__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_3__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_4__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_4__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_4__3side_4_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_4_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_4_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_4_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_4_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_4_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_4_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_4_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_5_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_4_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_4_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_4_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_4_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_5_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_6_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_2_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_2_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_2_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_2_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_3_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_3_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_3_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_3_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_4_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_4_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_4_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_4_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_5_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_6_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_7_4side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_2, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_2, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_2) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_3__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_3__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_4__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_4__3side_4_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_4_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_4_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_4_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_4_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_4_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_4_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_4_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_5_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_4_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_4_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_4_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_4_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_5_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_6_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_3_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_3_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_3_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_3_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_4_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_4_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_4_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_4_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_5_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_6_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_7_4side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_3, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_3, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_3) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_4__3side_4_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_4_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_4_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_4__3side_4_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_4_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_4_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_4_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_4_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_5_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_4_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_4_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_4_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_4_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_5_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_6_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_4_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_4_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_4_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_4_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_5_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_6_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_7_4side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_4, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_4, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_4) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5__3side_5_4side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_5, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_5, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_5__3side_5_4side_5) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_5_4side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_5, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_5, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_5_4side_5) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_6_4side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_5, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_5, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_5) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_5_4side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_5, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_5, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_5_4side_5) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_6_4side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_5, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_5, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_5) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_7_4side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_5, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_5, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_5) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6__3side_6_4side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_6, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_6, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_6__3side_6_4side_6) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_6_4side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_6, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_6, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_6_4side_6) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_7_4side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_6, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_6, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_6) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7__3side_7_4side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_multi_model_builders(op_type="I_to_Wx_Wy_Wz", I_to_Wx=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_7, I_to_Wy=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_7, I_to_Wz=pyr_1ch_model.ch032_pyramid_1side_7__2side_7__3side_7_4side_7) .set_gen_op( use_gen_op ).set_train_step( use_train_step )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_1side_1__2side_1__3side_1_4side_1
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
print(use_model.model_describe)
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
81e404ed98ec96915a3628c44c71a508ebd0e5d3 | a25e2aa102ffe9c2d9b553252a1882fe5a9d7ec9 | /SprityBird/spritybird/python3.5/lib/python3.5/site-packages/openpyxl/formatting/tests/test_rule.py | ad4cc25a93850facfe3ae3cf6730e51174e6aa40 | [
"MIT"
] | permissive | MobileAnalytics/iPython-Framework | f96ebc776e763e6b4e60fb6ec26bb71e02cf6409 | da0e598308c067cd5c5290a6364b3ffaf2d2418f | refs/heads/master | 2020-03-22T06:49:29.022949 | 2018-07-04T04:22:17 | 2018-07-04T04:22:17 | 139,660,631 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,212 | py | # coding=utf8
from __future__ import absolute_import
# copyright 2010-2015 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.xml.constants import SHEET_MAIN_NS
from openpyxl.styles import Color
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def FormatObject():
from ..rule import FormatObject
return FormatObject
class TestFormatObject:
def test_create(self, FormatObject):
xml = fromstring("""<cfvo type="num" val="3"/>""")
cfvo = FormatObject.from_tree(xml)
assert cfvo.type == "num"
assert cfvo.val == 3
assert cfvo.gte is None
def test_serialise(self, FormatObject):
cfvo = FormatObject(type="percent", val=4)
xml = tostring(cfvo.to_tree())
expected = """<cfvo type="percent" val="4"/>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.parametrize("typ, value, expected",
[
('num', '5', 5.0),
('percent', '70', 70),
('max', 10, 10),
('min', '4.2', 4.2),
('formula', "=A2*4", "=A2*4"),
('percentile', 10, 10),
('formula', None, None),
]
)
def test_value_types(self, FormatObject, typ, value, expected):
cfvo = FormatObject(type=typ, val=value)
assert cfvo.val == expected
@pytest.fixture
def ColorScale():
from ..rule import ColorScale
return ColorScale
@pytest.fixture
def ColorScaleRule():
from ..rule import ColorScaleRule
return ColorScaleRule
class TestColorScale:
def test_create(self, ColorScale):
src = """
<colorScale xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<cfvo type="min"/>
<cfvo type="max"/>
<color rgb="FFFF7128"/>
<color rgb="FFFFEF9C"/>
</colorScale>
"""
xml = fromstring(src)
cs = ColorScale.from_tree(xml)
assert len(cs.cfvo) == 2
assert len(cs.color) == 2
def test_serialise(self, ColorScale, FormatObject):
fo1 = FormatObject(type="min", val="0")
fo2 = FormatObject(type="percent", val="50")
fo3 = FormatObject(type="max", val="0")
col1 = Color(rgb="FFFF0000")
col2 = Color(rgb="FFFFFF00")
col3 = Color(rgb="FF00B050")
cs = ColorScale(cfvo=[fo1, fo2, fo3], color=[col1, col2, col3])
xml = tostring(cs.to_tree())
expected = """
<colorScale>
<cfvo type="min" val="0"/>
<cfvo type="percent" val="50"/>
<cfvo type="max" val="0"/>
<color rgb="FFFF0000"/>
<color rgb="FFFFFF00"/>
<color rgb="FF00B050"/>
</colorScale>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_two_colors(self, ColorScaleRule):
cfRule = ColorScaleRule(start_type='min', start_value=None,
start_color='FFAA0000', end_type='max', end_value=None,
end_color='FF00AA00')
xml = tostring(cfRule.to_tree())
expected = """
<cfRule priority="0" type="colorScale">
<colorScale>
<cfvo type="min"/>
<cfvo type="max"/>
<color rgb="FFAA0000"/>
<color rgb="FF00AA00"/>
</colorScale>
</cfRule>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_three_colors(self, ColorScaleRule):
cfRule = ColorScaleRule(start_type='percentile', start_value=10,
start_color='FFAA0000', mid_type='percentile', mid_value=50,
mid_color='FF0000AA', end_type='percentile', end_value=90,
end_color='FF00AA00')
xml = tostring(cfRule.to_tree())
expected = """
<cfRule priority="0" type="colorScale">
<colorScale>
<cfvo type="percentile" val="10"></cfvo>
<cfvo type="percentile" val="50"></cfvo>
<cfvo type="percentile" val="90"></cfvo>
<color rgb="FFAA0000"></color>
<color rgb="FF0000AA"></color>
<color rgb="FF00AA00"></color>
</colorScale>
</cfRule>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.fixture
def DataBar():
from ..rule import DataBar
return DataBar
class TestDataBar:
def test_create(self, DataBar):
src = """
<dataBar xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<cfvo type="min"/>
<cfvo type="max"/>
<color rgb="FF638EC6"/>
</dataBar>
"""
xml = fromstring(src)
db = DataBar.from_tree(xml)
assert len(db.cfvo) == 2
assert db.color.value == "FF638EC6"
def test_serialise(self, DataBar, FormatObject):
fo1 = FormatObject(type="min", val="0")
fo2 = FormatObject(type="percent", val="50")
db = DataBar(minLength=4, maxLength=10, cfvo=[fo1, fo2], color="FF2266", showValue=True)
xml = tostring(db.to_tree())
expected = """
<dataBar maxLength="10" minLength="4" showValue="1">
<cfvo type="min" val="0"></cfvo>
<cfvo type="percent" val="50"></cfvo>
<color rgb="00FF2266"></color>
</dataBar>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.fixture
def IconSet():
from ..rule import IconSet
return IconSet
class TestIconSet:
def test_create(self, IconSet):
src = """
<iconSet iconSet="5Rating" xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<cfvo type="percent" val="0"/>
<cfvo type="percentile" val="20"/>
<cfvo type="percentile" val="40"/>
<cfvo type="percentile" val="60"/>
<cfvo type="percentile" val="80"/>
</iconSet>
"""
xml = fromstring(src)
icon = IconSet.from_tree(xml)
assert icon.iconSet == "5Rating"
assert len(icon.cfvo) == 5
def test_serialise(self, IconSet, FormatObject):
fo1 = FormatObject(type="num", val="2")
fo2 = FormatObject(type="num", val="4")
fo3 = FormatObject(type="num", val="6")
fo4 = FormatObject(type="percent", val="0")
icon = IconSet(cfvo=[fo1, fo2, fo3, fo4], iconSet="4ArrowsGray", reverse=True, showValue=False)
xml = tostring(icon.to_tree())
expected = """
<iconSet iconSet="4ArrowsGray" showValue="0" reverse="1">
<cfvo type="num" val="2"/>
<cfvo type="num" val="4"/>
<cfvo type="num" val="6"/>
<cfvo type="percent" val="0"/>
</iconSet>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.fixture
def Rule():
from ..rule import Rule
return Rule
class TestRule:
def test_create(self, Rule, datadir):
datadir.chdir()
with open("worksheet.xml") as src:
xml = fromstring(src.read())
rules = []
for el in xml.findall("{%s}conditionalFormatting/{%s}cfRule" % (SHEET_MAIN_NS, SHEET_MAIN_NS)):
rules.append(Rule.from_tree(el))
assert len(rules) == 30
assert rules[17].formula == ['2', '7']
assert rules[-1].formula == ["AD1>3",]
def test_serialise(self, Rule):
rule = Rule(type="cellIs", dxfId="26", priority="13", operator="between")
rule.formula = ["2", "7"]
xml = tostring(rule.to_tree())
expected = """
<cfRule type="cellIs" dxfId="26" priority="13" operator="between">
<formula>2</formula>
<formula>7</formula>
</cfRule>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_non_ascii_formula(self, Rule):
rule = Rule(type="cellIs", priority=10, formula=[b"D\xc3\xbcsseldorf".decode("utf-8")])
xml = tostring(rule.to_tree())
expected = """
<cfRule priority="10" type="cellIs">
<formula>Düsseldorf</formula>
</cfRule>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_formula_rule():
from ..rule import FormulaRule
from openpyxl.styles.differential import DifferentialStyle
cf = FormulaRule(formula=['ISBLANK(C1)'], stopIfTrue=True)
assert dict(cf) == {'priority': '0', 'stopIfTrue': '1', 'type': 'expression'}
assert cf.formula == ['ISBLANK(C1)']
assert cf.dxf == DifferentialStyle()
def test_cellis_rule():
from ..rule import CellIsRule
from openpyxl.styles import PatternFill
red_fill = PatternFill(start_color='FFEE1111', end_color='FFEE1111',
fill_type='solid')
rule = CellIsRule(operator='<', formula=['C$1'], stopIfTrue=True, fill=red_fill)
assert dict(rule) == {'operator': 'lessThan', 'priority': '0', 'type': 'cellIs', 'stopIfTrue':'1'}
assert rule.formula == ['C$1']
assert rule.dxf.fill == red_fill
@pytest.mark.parametrize("value, expansion",
[
('<=', 'lessThanOrEqual'),
('>', 'greaterThan'),
('!=', 'notEqual'),
('=', 'equal'),
('>=', 'greaterThanOrEqual'),
('==', 'equal'),
('<', 'lessThan'),
]
)
def test_operator_expansion(value, expansion):
from ..rule import CellIsRule
cf1 = CellIsRule(operator=value, formula=[])
cf2 = CellIsRule(operator=expansion, formula=[])
assert cf1.operator == expansion
assert cf2.operator == expansion
def test_iconset_rule():
from ..rule import IconSetRule
rule = IconSetRule('5Arrows', 'percent', [10, 20, 30, 40, 50])
xml = tostring(rule.to_tree())
expected = """
<cfRule priority="0" type="iconSet">
<iconSet iconSet="5Arrows">
<cfvo type="percent" val="10"/>
<cfvo type="percent" val="20"/>
<cfvo type="percent" val="30"/>
<cfvo type="percent" val="40"/>
<cfvo type="percent" val="50"/>
</iconSet>
</cfRule>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_databar_rule():
from ..rule import DataBarRule
rule = DataBarRule(start_type='percentile', start_value=10,
end_type='percentile', end_value='90', color="FF638EC6")
xml = tostring(rule.to_tree())
expected = """
<cfRule type="dataBar" priority="0">
<dataBar>
<cfvo type="percentile" val="10"/>
<cfvo type="percentile" val="90"/>
<color rgb="FF638EC6"/>
</dataBar>
</cfRule>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
| [
"909889261@qq.com"
] | 909889261@qq.com |
a67a7e3853351430549f23b2546bf23d0b9996aa | d6c84c8d2568fdbf898f0f477f0f666ad5769f53 | /polybot/planning.py | 64baebd4e075375fdba645e5bae56a431858ddad | [
"Apache-2.0"
] | permissive | cwang344/polybot-web-service | ae533535e834da10da3b4ddff1a458f89d9dbb89 | 0aebb082829e0a8b4ac4a6d15c5da3f4458ab143 | refs/heads/master | 2023-07-18T07:03:05.577043 | 2021-08-20T14:47:51 | 2021-08-20T14:47:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,617 | py | """Definition for the class that runs the optimization routine.
We describe the policy for starting new runs by implementing a
`Colmena <http://colmena.rtfd.org/>`_ Thinker class.
"""
import random
from pathlib import Path
from typing import Dict, Callable, Union
import requests
from colmena.redis.queue import ClientQueues, TaskServerQueues
from colmena.task_server import ParslTaskServer
from colmena.thinker import BaseThinker, result_processor
from colmena.models import Result
from parsl import Config, ThreadPoolExecutor
from pydantic import BaseModel, Field, AnyHttpUrl
from polybot.models import SampleTemplate
from polybot.robot import send_new_sample
class OptimizationProblem(BaseModel):
"""Define the optimization problem and any settings for the planning algorithm."""
# Define the search space
search_template_path: Union[AnyHttpUrl, Path] = Field(
..., description="Path to the sample template. Defines the input variables and the search space"
" for the optimization. Can be either a path on the local filesystem or a HTTP URL")
# Options the planning algorithm
planner_options: Dict = Field(default_factory=dict, description='Any options for the planning algorithm')
# Define the optimization metric
# TODO (wardlt): Should we dare cross into multi-objective optimization in this document or leave it up to
# the implementation of the Planner?
output: str = Field(..., description="Output variable. Name of values within the `processed_outputs` dictionary")
maximize: bool = Field(True, description="Whether to maximize (or minimize) the target function")
@property
def search_template(self) -> SampleTemplate:
"""Template that defines the sample search space"""
if isinstance(self.search_template_path, str):
# Download it to disk
reply = requests.get(self.search_template_path)
return SampleTemplate.parse_obj(reply.json())
else:
return SampleTemplate.parse_file(self.search_template_path)
class Config:
extras = 'forbid'
class BasePlanner(BaseThinker):
"""Base class for planning algorithms based on the `Colmena BaseThinker
<https://colmena.readthedocs.io/en/latest/how-to.html#creating-a-thinker-application>`_ class.
Subclasses should provide the optimization specification to the initializer of this class
so that it is available as the `opt_spec` attribute. Additional options to the planner
should be set using keyword arguments to the initializer, so that we can define them in the
:class:`OptimizationProblem` JSON document.
There are no requirements on how you implement the planning algorithm, but you may at least want an agent
waiting for results with the "robot" topic. For example,
.. code: python
@result_processor(topic='robot')
def robot_result_handler(self, _: Result):
output = self.opt_spec.get_sample_template()
send_send_new_sample(output)
"""
def __init__(self, queues: ClientQueues, opt_spec: OptimizationProblem, daemon: bool = False):
super().__init__(queues, daemon=daemon)
self.opt_spec = opt_spec
class RandomPlanner(BasePlanner):
"""Submit a randomly-selected point from the search space each time a new result is completed"""
@result_processor(topic='robot')
def robot_result_handler(self, _: Result):
"""Generate a new task to be run on the robot after one completes
Args:
_: Result that is not actually used for now.
"""
# Make a choice for each variable
output = self.opt_spec.search_template.create_new_sample()
for key, acceptable_values in self.opt_spec.search_template.list_acceptable_input_values().items():
output.inputs[key] = random.choice(acceptable_values)
# Send it to the robot
send_new_sample(output)
return
def _execute(f: Callable):
"""Debug function"""
return f()
def build_thread_pool_executor(queues: TaskServerQueues) -> ParslTaskServer:
"""Builds a task server that runs a single task on a local thread.
This server is primarily meant for testing, and has a single task,
"execute," that receives a Callable and executes it remotely.
Args:
queues: Queues to use to communicate
Returns:
A configured task server
"""
config = Config(executors=[ThreadPoolExecutor(max_threads=1)])
return ParslTaskServer(
queues=queues,
methods=[_execute],
config=config
)
| [
"ward.logan.t@gmail.com"
] | ward.logan.t@gmail.com |
0c5ed03200eea5fa07824fedb9ecfa87e7fe52e8 | a563a95e0d5b46158ca10d6edb3ca5d127cdc11f | /tccli/services/iotexplorer/v20190423/help.py | dff03e15e9ea50a3d1125332751988261fcb441f | [
"Apache-2.0"
] | permissive | SAIKARTHIGEYAN1512/tencentcloud-cli | e93221e0a7c70f392f79cda743a86d4ebbc9a222 | d129f1b3a943504af93d3d31bd0ac62f9d56e056 | refs/heads/master | 2020-08-29T09:20:23.790112 | 2019-10-25T09:30:39 | 2019-10-25T09:30:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,925 | py | # -*- coding: utf-8 -*-
DESC = "iotexplorer-2019-04-23"
INFO = {
"ModifyStudioProduct": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "ProductName",
"desc": "产品名称"
},
{
"name": "ProductDesc",
"desc": "产品描述"
},
{
"name": "ModuleId",
"desc": "模型ID"
}
],
"desc": "提供修改产品的名称和描述等信息的能力"
},
"DeleteStudioProduct": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
}
],
"desc": "提供删除某个项目下产品的能力"
},
"DescribeStudioProduct": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
}
],
"desc": "提供查看茶品详细信息的能力,包括产品的ID、数据协议、认证类型等重要参数"
},
"DescribeDeviceData": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "DeviceName",
"desc": "设备名称"
}
],
"desc": "根据设备产品ID、设备名称,获取设备上报的属性数据。"
},
"CreateStudioProduct": {
"params": [
{
"name": "ProductName",
"desc": "产品名称"
},
{
"name": "CategoryId",
"desc": "产品分组模板ID"
},
{
"name": "ProductType",
"desc": "产品类型"
},
{
"name": "EncryptionType",
"desc": "加密类型"
},
{
"name": "NetType",
"desc": "连接类型"
},
{
"name": "DataProtocol",
"desc": "数据协议"
},
{
"name": "ProductDesc",
"desc": "产品描述"
},
{
"name": "ProjectId",
"desc": "产品的项目ID"
}
],
"desc": "为用户提供新建产品的能力,用于管理用户的设备"
},
"DescribeDevice": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "DeviceName",
"desc": "设备名"
}
],
"desc": "用于查看某个设备的详细信息"
},
"SearchStudioProduct": {
"params": [
{
"name": "ProjectId",
"desc": "项目ID"
},
{
"name": "ProductName",
"desc": "产品名称"
},
{
"name": "Limit",
"desc": "列表Limit"
},
{
"name": "Offset",
"desc": "列表Offset"
},
{
"name": "DevStatus",
"desc": "产品Status"
}
],
"desc": "提供根据产品名称查找产品的能力"
},
"GetProjectList": {
"params": [
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "个数限制"
}
],
"desc": "提供查询用户所创建的项目列表查询功能。"
},
"DescribeDeviceDataHistory": {
"params": [
{
"name": "MinTime",
"desc": "区间开始时间"
},
{
"name": "MaxTime",
"desc": "区间结束时间"
},
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "DeviceName",
"desc": "设备名称"
},
{
"name": "FieldName",
"desc": "属性字段名称"
},
{
"name": "Limit",
"desc": "返回条数"
},
{
"name": "Context",
"desc": "检索上下文"
}
],
"desc": "获取设备在指定时间范围内上报的历史数据。"
},
"DeleteDevice": {
"params": [
{
"name": "ProductId",
"desc": "产品ID。"
},
{
"name": "DeviceName",
"desc": "设备名称。"
}
],
"desc": "删除设备"
},
"ModifyProject": {
"params": [
{
"name": "ProjectId",
"desc": "项目ID"
},
{
"name": "ProjectName",
"desc": "项目名称"
},
{
"name": "ProjectDesc",
"desc": "项目描述"
}
],
"desc": "修改项目"
},
"CreateDevice": {
"params": [
{
"name": "ProductId",
"desc": "产品ID。"
},
{
"name": "DeviceName",
"desc": "设备名称。"
}
],
"desc": "创建设备"
},
"ListEventHistory": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "DeviceName",
"desc": "设备名称"
},
{
"name": "Type",
"desc": "搜索的事件类型"
},
{
"name": "StartTime",
"desc": "起始时间, 为0 表示 当前时间 - 24h"
},
{
"name": "EndTime",
"desc": "结束时间, 为0 表示当前时间"
},
{
"name": "Context",
"desc": "搜索上下文, 用作查询游标"
},
{
"name": "Size",
"desc": "单次获取的历史数据项目的最大数量"
}
],
"desc": "获取设备的历史事件"
},
"GetDeviceList": {
"params": [
{
"name": "ProductId",
"desc": "需要查看设备列表的产品 ID"
},
{
"name": "Offset",
"desc": "分页偏移"
},
{
"name": "Limit",
"desc": "分页的大小,数值范围 10-100"
}
],
"desc": "用于查询某个产品下的设备列表"
},
"ReleaseStudioProduct": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "DevStatus",
"desc": "产品DevStatus"
}
],
"desc": "产品开发完成并测试通过后,通过发布产品将产品设置为发布状态"
},
"ModifyModelDefinition": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "ModelSchema",
"desc": "数据模板定义"
}
],
"desc": "提供修改产品的数据模板的能力"
},
"DescribeProject": {
"params": [
{
"name": "ProjectId",
"desc": "项目ID"
}
],
"desc": "查询项目详情"
},
"DescribeModelDefinition": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
}
],
"desc": "查询产品配置的数据模板信息"
},
"CreateProject": {
"params": [
{
"name": "ProjectName",
"desc": "项目名称"
},
{
"name": "ProjectDesc",
"desc": "项目描述"
}
],
"desc": "为用户提供新建项目的能力,用于集中管理产品和应用。"
},
"DeleteProject": {
"params": [
{
"name": "ProjectId",
"desc": "项目ID"
}
],
"desc": "提供删除某个项目的能力"
},
"ControlDeviceData": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "DeviceName",
"desc": "设备名称"
},
{
"name": "Data",
"desc": "属性数据"
},
{
"name": "Method",
"desc": "请求类型"
},
{
"name": "DeviceId",
"desc": "设备ID,该字段有值将代替 ProductId/DeviceName"
}
],
"desc": "根据设备产品ID、设备名称,设置控制设备的属性数据。"
},
"GetStudioProductList": {
"params": [
{
"name": "ProjectId",
"desc": "项目ID"
},
{
"name": "DevStatus",
"desc": "产品DevStatus"
},
{
"name": "Offset",
"desc": "Offset"
},
{
"name": "Limit",
"desc": "Limit"
}
],
"desc": "提供查询某个项目下所有产品信息的能力。"
}
} | [
"tencentcloudapi@tencent.com"
] | tencentcloudapi@tencent.com |
84c6951f02125c18e80e44560c2b69348c7b7a14 | 45a0434de7cb5aaf51f372a9ea39c2e62528e8d7 | /decoder_hier_fsoftmax_v1.py | bbba0b4556ec3440adaff9eb6a5e5f96ff1e0890 | [] | no_license | hongtaowutj/Seq2Seq-Keyphrase-Generation | 44b5b24f3af7a85c24fc5ef231c53c1dac7e48ff | 6f2d08222b108b543b7628b32e98480f2e3a32b0 | refs/heads/master | 2020-03-27T10:43:09.941194 | 2018-07-23T07:21:35 | 2018-07-23T07:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,457 | py | import os
import sys
sys.path.append(os.getcwd())
import numpy as np
from datetime import datetime
import time
import tensorflow as tf
from keras.models import Model
import keras.backend as K
from keras.models import load_model
from keras.utils import to_categorical
from utils.data_connector import DataConnector
from utils.true_keyphrases import TrueKeyphrases
from utils.decoding_fullsoftmax import DecodingSoftmax
from models.hier_seq2seq import HierarchyFullSoftmax
def decoder(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
model_path = params['model_path']
result_path = params['result_path']
decode_path = params['decode_path']
file_name = params['file_name']
weights = params['weights']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
max_sents = params['max_sents']
embedding_dim = params['embedding_dim']
birnn_dim = params['birnn_dim']
rnn_dim = params['rnn_dim']
vocab_size = params['vocab_size']
'''
Reading vocabulary dictionaries
'''
indices_words_connector = DataConnector(preprocessed_v2, 'all_indices_words_sent_fsoftmax.pkl', data=None)
indices_words_connector.read_pickle()
indices_words = indices_words_connector.read_file
words_indices_connector = DataConnector(preprocessed_v2, 'all_words_indices_sent_fsoftmax.pkl', data=None)
words_indices_connector.read_pickle()
words_indices = words_indices_connector.read_file
y_test_true_connector = DataConnector(data_path, 'test_sent_output_tokens.npy', data=None)
y_test_true_connector.read_numpys()
y_test_true = y_test_true_connector.read_file
# non-paired data set
X_test_connector = DataConnector(preprocessed_data, 'X_test_pad_sent_fsoftmax.npy', data=None)
X_test_connector.read_numpys()
X_test = X_test_connector.read_file
'''
Decoder model for inference stage
Return: generated keyphrases
'''
full_softmax = HierarchyFullSoftmax(encoder_length=encoder_length, decoder_length=decoder_length, max_sents=max_sents, embedding_dim=embedding_dim, birnn_dim=birnn_dim, rnn_dim=rnn_dim, vocab_size=vocab_size, filepath=result_path, filename=file_name, batch_train_iter=None, batch_val_iter=None, batch_size=None, steps_epoch=None, val_steps=None, epochs=None)
# skeleton of model architecture
full_softmax.train_hier_seq2seq()
encoder_model = full_softmax.encoder_model
predict_softmax_model = full_softmax.predict_seq2seq(weights)
decoder_model = full_softmax.create_decoder_model()
# transform tokenized y_true (ground truth of keyphrases) into full sentences / keyphrases
keyphrases_transform = TrueKeyphrases(y_test_true)
keyphrases_transform.get_true_keyphrases()
keyphrases_transform.get_stat_keyphrases()
y_true = keyphrases_transform.y_true
max_kp_num = keyphrases_transform.max_kp_num
mean_kp_num = keyphrases_transform.mean_kp_num
std_kp_num = keyphrases_transform.std_kp_num
print("Maximum number of key phrases per document in corpus: %s" %max_kp_num)
sys.stdout.flush()
print("Average number of key phrases per document in corpus: %s" %mean_kp_num)
sys.stdout.flush()
print("Standard Deviation of number of key phrases per document in corpus: %s" %std_kp_num)
sys.stdout.flush()
# round up function for computing beam width
def roundup(x):
return x if x % 5 == 0 else x + 5 - x % 5
beam_width = int(roundup(mean_kp_num + (3 * std_kp_num)))
print("\nBeam width: %s\n" %beam_width)
sys.stdout.flush()
num_hypotheses = beam_width
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Start decoding...")
sys.stdout.flush()
inference_mode = DecodingSoftmax(encoder_model=encoder_model, decoder_model=decoder_model, indices_words=indices_words, words_indices=words_indices, enc_in_seq=None, decoder_length=decoder_length, rnn_dim=rnn_dim, beam_width=beam_width, num_hypotheses=num_hypotheses, filepath=decode_path, filename=file_name)
t0_1 = time.time()
print("Start beam decoding...")
sys.stdout.flush()
beam_keyphrases = inference_mode.beam_decoder(X_test[:500])
beam_decode_connector = DataConnector(decode_path, 'beam_kp-%s.npy'%(file_name), beam_keyphrases)
beam_decode_connector.save_numpys()
t1_1 = time.time()
print("Beam decoding is done in %.3fsec" % (t1_1 - t0_1))
sys.stdout.flush()
| [
"i.nimah@tue.nl"
] | i.nimah@tue.nl |
b03e63e83a8119e43592f779267c3e1089933a42 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /vaufKtjX3gKoq9PeS_17.py | 188c5344f01609d7777059c7e888e3ce3952cdbd | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py |
def ohms_law(v, r, i):
if r and i and not v:
ans = round(r * i, 2)
elif v and i and not r:
ans = round(v / i, 2)
elif v and r and not i:
ans = round(v / r, 2)
else:
ans = 'Invalid'
return ans
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
bba7fcd21ff8aad07745a06ba5cf7bec4ede0975 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/1601.py | 4a308e37f8e46ee749375c4c4f01d664c3823c76 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | #!/usr/bin/env python
import sys
def main(argv=None):
if argv is None:
argv = sys.argv
T = int(sys.stdin.readline())
for t in xrange(T):
vrow = int(sys.stdin.readline())
cards1 = []
for i in xrange(4):
row = sys.stdin.readline()
if i == vrow - 1:
cards1 = map(int, row.split(" "))
vrow = int(sys.stdin.readline())
cards2 = []
for i in xrange(4):
row = sys.stdin.readline()
if i == vrow - 1:
cards2 = map(int, row.split(" "))
cards = cards1 + cards2
uniqueCount = len(set(cards))
outcome = ""
if uniqueCount == 8:
outcome = "Volunteer cheated!"
elif uniqueCount <= 6:
outcome = "Bad magician!"
else: # We're good!
cards.sort()
i = 0
while outcome == "":
if cards[i] == cards[i + 1]:
outcome = str(cards[i])
i += 1
print "Case #%d: %s" % (t + 1, outcome)
if __name__ == "__main__":
sys.exit(main())
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
c4fd6c0e6b2e2d046e2e8e8768f03f52b6748480 | bb7d5f000de07cc2c458a64fd275f3b14701dea2 | /examples/Lottery.py | 3c002d7320f7fe471e8334c122caa8f4f5162c79 | [
"MIT"
] | permissive | timmy61109/Introduction-to-Programming-Using-Python | 201d532a5c041ed045939c10909de0426a6e8be7 | bcbfd8d66173f5adfa1553103a692c02500e7896 | refs/heads/master | 2022-12-12T22:07:56.647918 | 2022-11-23T19:13:48 | 2022-11-23T19:18:46 | 210,872,428 | 0 | 6 | MIT | 2021-11-21T12:58:47 | 2019-09-25T14:58:14 | Python | UTF-8 | Python | false | false | 811 | py | import random
# Generate a lottery
lottery = random.randint(0, 99)
# Prompt the user to enter a guess
guess = eval(input("Enter your lottery pick (two digits): "))
# Get digits from lottery
lotteryDigit1 = lottery // 10
lotteryDigit2 = lottery % 10
# Get digits from guess
guessDigit1 = guess // 10
guessDigit2 = guess % 10
print("The lottery number is", lottery)
# Check the guess
if guess == lottery:
print("Exact match: you win $10,000")
elif (guessDigit2 == lotteryDigit1 and \
guessDigit1 == lotteryDigit2):
print("Match all digits: you win $3,000")
elif (guessDigit1 == lotteryDigit1
or guessDigit1 == lotteryDigit2
or guessDigit2 == lotteryDigit1
or guessDigit2 == lotteryDigit2):
print("Match one digit: you win $1,000")
else:
print("Sorry, no match")
| [
"38396747+timmy61109@users.noreply.github.com"
] | 38396747+timmy61109@users.noreply.github.com |
02f7ab0bee68cd454347bcc87f684b94c7b503e2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2111/60832/263214.py | 493b6cd70cdd6be8f9b3626e503bb4888b869bfb | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | n = int(input())
x = 2
i = 1
while i < n:
num = x
while num % 5 == 0:
num = num // 5
while num % 3 == 0:
num //= 3
while num % 2 == 0:
num //= 2
if num == 1:
i += 1
x += 1
print(x - 1) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
348dc2478aef4fefed9ef1a05d40b69aab88803f | 853c6a09af16fd4dd8a53efa8bde631e63315b59 | /BOJ BF/sum_subsequence.py | e1c9ee7e1ffa0b4065c89119d04a4ff13f4ba0ac | [] | no_license | Areum0921/Abox | 92840897b53e9bbab35c0e0aae5a017ab19a0500 | f4739c0c0835054afeca82484769e71fb8de47c8 | refs/heads/master | 2021-12-13T11:16:33.583366 | 2021-10-10T08:09:50 | 2021-10-10T08:09:50 | 176,221,995 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | # 콤비네이션 안쓰고 리스트 안에 있는 숫자들을 조합하여 더할 수 있는 모든 경우의 값
# set 안쓰고 중복을 방지하고싶으면 False 배열을 만들고, 결과값 인덱스를 True 바꾸는 식으로 사용.
N=int(input())
s=list(map(int,input().split(" ")))
sum_list=[]
def dfs(x,y):
if(x==N):
sum_list.append(y)
return
dfs(x+1,y)
dfs(x+1,y+s[x])
dfs(0,0)
# 결과물 sum_list.sort()하면 None으로 나옴
# sorted(sum_list)하면 정상적으로 나옴
print(sorted(set(sum_list))) | [
"a90907@gmail.com"
] | a90907@gmail.com |
fe0604f50c4cfe8fa89dbadfb87823935a8e5e5f | 3591ab22e1cc0fc1362f909017a8aa5c2b53bd92 | /FundNoticeSpiders/BoYangInvestNotice.py | e5c41b22ec6938b39b38abc013bcd7137d30f716 | [] | no_license | Wnltc/ggscrapy | ef7e9559ce6140e7147f539778e25fc7f6cbee4c | bf929112e14b875a583803fe92980fe67129bdac | refs/heads/master | 2023-03-15T22:00:45.377540 | 2018-06-06T02:19:14 | 2018-06-06T02:19:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from urllib.parse import urljoin
from scrapy.utils.response import get_base_url
from FundNoticeSpiders import GGFundNoticeItem
from FundNoticeSpiders import GGFundNoticeSpider
class BoYangInvestNoticeSpider(GGFundNoticeSpider):
name = 'FundNotice_BoYangInvestNotice'
sitename = '博洋投资'
entry = 'http://www.byfgroup.com/'
ips = [{
'url': 'http://www.byfgroup.com/news.jsp',
'ref': 'http://www.byfgroup.com/',
'ext': {'page': '1'}
}]
def parse_item(self, response):
ext = response.meta['ext']
page = int(ext['page'])
total_page = response.xpath('//a[text()="最后一页"]/@href').re_first(r'pageDirect\((\d+)\)')
if total_page is None or total_page == '':
total_page = 0
else:
total_page = int(total_page)
notices = response.xpath('//td[@class="newxxlist"]')
years = response.xpath('//td[@class="newxxdate"]/text()').re(r'(\d+-\d+-\d+)')
for row in notices:
url = row.xpath('./a/@href').extract_first()
url = urljoin(get_base_url(response), url)
title = row.xpath('./a//text()').extract_first()
publish_time = years.pop(0)
publish_time = datetime.strptime(publish_time, '%Y-%m-%d')
item = GGFundNoticeItem()
item['sitename'] = self.sitename
item['channel'] = self.channel
item['url_entry'] = self.entry
item['url'] = url
item['title'] = title
item['publish_time'] = publish_time
yield item
if page < total_page:
self.ips.append({
'url': 'http://www.byfgroup.com/news.jsp',
'form': {'page': str(page+1)},
'ref': response.url,
'ext': {'page': str(page+1)}
})
| [
"songxh@go-goal.com"
] | songxh@go-goal.com |
68a35022c085f4199fb83aa56e770d6c7eae54af | 3c119c145a00fbfc3b2d8841a3b9280fa4bc1da8 | /commons/utils/es_.py | 63a4ef747e86519b04ef4e015eb307990362ff25 | [] | no_license | disenQF/GoodsServer | 5cd54786d8c9b3444ffad38057fc62ebade87d3a | de7207b8fba0e60315ae7458978e210b325f305e | refs/heads/master | 2022-12-11T04:37:12.975547 | 2019-04-01T02:21:15 | 2019-04-01T02:21:15 | 177,748,032 | 3 | 1 | null | 2022-12-08T04:55:20 | 2019-03-26T08:45:59 | JavaScript | UTF-8 | Python | false | false | 2,861 | py | import pymysql
import requests
from pymysql.cursors import DictCursor
from FoodsAdminServer.settings import DATABASES
# 哪些数据添加到搜索引擎
sql = """
select c.name as cate_name,
f.id, f.name,f.price,f.image
from t_category c
join t_foods f on (c.id=f.category_id)
"""
HOST = 'localhost' # ES 的服务器地址
PORT = 9200 # ES RESTful 端口
URL_ = 'http://%s:%d' %(HOST, PORT)
def init_index(index_name):
url = URL_+ '/%s' % index_name
# 判断当前的index_name索引是否已存在
resp = requests.get(url)
data = resp.json()
if data.get('status') == 404:
# PUT , 添加索引
params = {
'settings': {
"number_of_shards": 5,
"number_of_replicas": 1
}
}
resp = requests.request('PUT', url, json=params)
data = resp.json()
if data.get('acknowledged'):
print('%s 索引创建成功' % index_name)
else:
print('%s 索引创建失败' % index_name)
else:
print('---- 索引已存在----')
def init_docs(index_name, type_name):
config = DATABASES.get('default')
del config['ENGINE']
config['DB'] = config.pop('NAME')
config = {key.lower():value for key, value in config.items()}
db = pymysql.Connect(**config)
with db.cursor(cursor=DictCursor) as c:
c.execute(sql)
for row_data in c.fetchall():
# 添加索引文档
url = URL_+ '/%s/%s/%s' % (index_name, type_name, row_data['id'])
resp = requests.post(url, json=row_data)
resp_result = resp.json()
if resp_result.get('created'):
print('----添加 %s-> %s 成功---' % (row_data['cate_name'], row_data['name']))
else:
print('----添加 %s-> %s 失败---' % (row_data['cate_name'], row_data['name']))
def update_doc(index_name, type_name, item):
url = URL_ + '/%s/%s/%s' % (index_name, type_name, item['id'])
requests.request('PUT', url, json=item)
def delete_doc(index_name, type_name, id_):
url = URL_ + '/%s/%s/%s' % (index_name, type_name, id_)
print('-->', url)
requests.request('DELETE', url)
def add_doc(index_name, type_name, item):
"""
:param index_name:
:param type_name:
:param item: {'cate_name': , 'id': , 'name': , 'price':, 'image':}
:return:
"""
url = URL_ + '/%s/%s/%s' % (index_name, type_name, item['id'])
resp = requests.post(url, json=item)
resp_result = resp.json()
if resp_result.get('created'):
print('----添加 %s-> %s 成功---' % (item['cate_name'], item['name']))
else:
print('----添加 %s-> %s 失败---' % (item['cate_name'], item['name']))
if __name__ == '__main__':
init_index('foods_site')
init_docs('foods_site', 'foods') | [
"610039018@qq.com"
] | 610039018@qq.com |
eccfa6654ec3fb088a7177d281c098151f918970 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2491/58758/260329.py | 584afe2be11bf0d64307db2c872bcfd9c6c35ace | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | nums1 = eval(input())
nums2 = eval(input())
nums1.sort()
nums2.sort()
ans = []
for i in nums1:
try:
ind = nums2.index(i)
nums2.remove(i)
ans.append(i)
except Exception:
continue
print(ans)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
38cd18e7d9ad3e43327fd7219a1a4dd1428fc53e | 35dbd536a17d7127a1dd1c70a2903ea0a94a84c2 | /src/sentry/integrations/custom_scm/repository.py | 2667d60c50a84c2264e0af4de028c4fb0a2556f5 | [
"Apache-2.0",
"BUSL-1.1"
] | permissive | nagyist/sentry | efb3ef642bd0431990ca08c8296217dabf86a3bf | d9dd4f382f96b5c4576b64cbf015db651556c18b | refs/heads/master | 2023-09-04T02:55:37.223029 | 2023-01-09T15:09:44 | 2023-01-09T15:09:44 | 48,165,782 | 0 | 0 | BSD-3-Clause | 2022-12-16T19:13:54 | 2015-12-17T09:42:42 | Python | UTF-8 | Python | false | false | 1,797 | py | from django.http import Http404
from rest_framework.request import Request
from rest_framework.response import Response
from sentry.api.serializers import serialize
from sentry.models import Integration, Repository
from sentry.plugins.providers import IntegrationRepositoryProvider
class CustomSCMRepositoryProvider(IntegrationRepositoryProvider):
name = "CustomSCM"
repo_provider = "custom_scm"
def repository_external_slug(self, repo):
return repo.name
def dispatch(self, request: Request, organization, **kwargs):
"""
Adding a repository to the Custom SCM integration is
just two steps:
1. Change the provider from `null` to 'integrations:custom_scm'
2. Add the integration_id that is passed from the request
We set the `identifier` to be the repo's id in our db
when we call `get_repositories`. Normally this is the id or
identifier in the other service (i.e. the GH repo id)
"""
repo_id = request.data.get("identifier")
integration_id = request.data.get("installation")
try:
# double check the repository_id passed is not
# for an already 'claimed' repository
repo = Repository.objects.get(
organization_id=organization.id,
id=repo_id,
integration_id__isnull=True,
provider__isnull=True,
)
integration = Integration.objects.get(organizations=organization, id=integration_id)
except (Repository.DoesNotExist, Integration.DoesNotExist):
raise Http404
repo.provider = self.id
repo.integration_id = integration.id
repo.save()
return Response(serialize(repo, request.user), status=201)
| [
"noreply@github.com"
] | nagyist.noreply@github.com |
7115f457ba5937098412a7e1070fbe84366470d4 | 87ba55b289f5bf0451e03384ceb0531ddc7016eb | /setup.py | d2206a7e07f97300c5228c15c96ece87a2991229 | [
"Apache-2.0"
] | permissive | ck196/TensorFlowASR | 5e8e57c6f62947e97d968bb9153784394bd5846b | 16c81282f08fc31b08156bb179d59eea3daaf120 | refs/heads/main | 2023-03-28T07:34:45.623636 | 2021-05-22T13:18:08 | 2021-05-22T13:18:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,073 | py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as fr:
requirements = fr.read().splitlines()
setuptools.setup(
name="TensorFlowASR",
version="1.0.1",
author="Huy Le Nguyen",
author_email="nlhuy.cs.16@gmail.com",
description="Almost State-of-the-art Automatic Speech Recognition using Tensorflow 2",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/TensorSpeech/TensorFlowASR",
packages=setuptools.find_packages(include=["tensorflow_asr*"]),
install_requires=requirements,
extras_require={
"tf2.3": ["tensorflow==2.3.2", "tensorflow-text==2.3.0", "tensorflow-io==0.16.0"],
"tf2.3-gpu": ["tensorflow-gpu==2.3.2", "tensorflow-text==2.3.0", "tensorflow-io==0.16.0"],
"tf2.4": ["tensorflow>=2.4", "tensorflow-text==2.4.3", "tensorflow-io==0.17.0"],
"tf2.4-gpu": ["tensorflow-gpu>=2.4", "tensorflow-text==2.4.3", "tensorflow-io==0.17.0"]
},
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Intended Audience :: Science/Research",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules"
],
python_requires='>=3.6',
)
| [
"nlhuy.cs.16@gmail.com"
] | nlhuy.cs.16@gmail.com |
ceb964674032bd1ac4980a9a892671fa4f5d22d1 | 57a054c03419607bd0dad1c50b0692430a9ace40 | /home/migrations/0002_load_initial_data.py | f58c19e4fde560d2607585826423571fef1e099b | [] | no_license | crowdbotics-apps/project-1-25765 | a4c1d1cfff2378a7633cb1429303008a9301a8fa | 148acaf70e78f03c9428a73a5c4e1c60deb13da4 | refs/heads/master | 2023-04-10T13:40:54.952083 | 2021-04-18T19:49:15 | 2021-04-18T19:49:15 | 359,238,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Project_1"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Project_1</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "project-1-25765.botics.co"
site_params = {
"name": "Project_1",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
a235e57a2ee471ee699cc093909e1875ca74704b | cc9c720f8831b4feeb94930a92792430a40ceb7c | /richcontext/scholapi/__init__.py | c032438cd46d5216c27f2330da14701e6fdf78df | [
"CC0-1.0"
] | permissive | kaydoh/RCApi | 55f9e24d36c5151cdef13252e6c0421e839c0580 | c1f3cf96d1b176cd616830e6a4563585280c12e9 | refs/heads/master | 2023-02-16T18:27:05.899301 | 2021-01-11T18:35:23 | 2021-01-11T18:35:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | from .scholapi import ScholInfraAPI
| [
"ceteri@gmail.com"
] | ceteri@gmail.com |
06e0b1986d2f094be6d854335ade4123fbe7b438 | d1c67f2031d657902acef4411877d75b992eab91 | /swagger_client/models/dynatrace_app_mon_integration.py | ca1117554501b7498f36d5208bbec16407bd3784 | [] | no_license | Certn/opsgenie-python | c6e6a7f42394499e5224d679cc9a449042fcf9c3 | bd5f402f97d591e4082b38c938cbabca4cf29787 | refs/heads/master | 2023-01-01T10:45:13.132455 | 2020-10-27T17:40:01 | 2020-10-27T17:40:01 | 307,769,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,415 | py | # coding: utf-8
"""
Opsgenie REST API
Opsgenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DynatraceAppMonIntegration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'suppress_notifications': 'bool',
'ignore_teams_from_payload': 'bool',
'ignore_recipients_from_payload': 'bool',
'recipients': 'list[Recipient]',
'is_advanced': 'bool',
'ignore_responders_from_payload': 'bool',
'ignore_tags_from_payload': 'bool',
'ignore_extra_properties_from_payload': 'bool',
'responders': 'list[Recipient]',
'priority': 'str',
'custom_priority': 'str',
'tags': 'list[str]',
'extra_properties': 'dict(str, str)',
'assigned_team': 'TeamMeta',
'feature_type': 'str',
'allow_configuration_access': 'bool',
'allow_read_access': 'bool',
'allow_write_access': 'bool',
'allow_delete_access': 'bool',
'alert_filter': 'AlertFilter',
'forwarding_enabled': 'bool',
'forwarding_action_mappings': 'list[ActionMapping]',
'callback_type': 'str',
'updates_action_mappings': 'list[ActionMapping]',
'updates_enabled': 'bool',
'bidirectional_callback_type': 'str',
'username': 'str',
'password': 'str',
'url': 'str',
'profile_name': 'str'
}
attribute_map = {
'suppress_notifications': 'suppressNotifications',
'ignore_teams_from_payload': 'ignoreTeamsFromPayload',
'ignore_recipients_from_payload': 'ignoreRecipientsFromPayload',
'recipients': 'recipients',
'is_advanced': 'isAdvanced',
'ignore_responders_from_payload': 'ignoreRespondersFromPayload',
'ignore_tags_from_payload': 'ignoreTagsFromPayload',
'ignore_extra_properties_from_payload': 'ignoreExtraPropertiesFromPayload',
'responders': 'responders',
'priority': 'priority',
'custom_priority': 'customPriority',
'tags': 'tags',
'extra_properties': 'extraProperties',
'assigned_team': 'assignedTeam',
'feature_type': 'feature-type',
'allow_configuration_access': 'allowConfigurationAccess',
'allow_read_access': 'allowReadAccess',
'allow_write_access': 'allowWriteAccess',
'allow_delete_access': 'allowDeleteAccess',
'alert_filter': 'alertFilter',
'forwarding_enabled': 'forwardingEnabled',
'forwarding_action_mappings': 'forwardingActionMappings',
'callback_type': 'callback-type',
'updates_action_mappings': 'updatesActionMappings',
'updates_enabled': 'updatesEnabled',
'bidirectional_callback_type': 'bidirectional-callback-type',
'username': 'username',
'password': 'password',
'url': 'url',
'profile_name': 'profileName'
}
def __init__(self, suppress_notifications=None, ignore_teams_from_payload=None, ignore_recipients_from_payload=None, recipients=None, is_advanced=None, ignore_responders_from_payload=None, ignore_tags_from_payload=None, ignore_extra_properties_from_payload=None, responders=None, priority=None, custom_priority=None, tags=None, extra_properties=None, assigned_team=None, feature_type=None, allow_configuration_access=None, allow_read_access=None, allow_write_access=None, allow_delete_access=None, alert_filter=None, forwarding_enabled=None, forwarding_action_mappings=None, callback_type=None, updates_action_mappings=None, updates_enabled=None, bidirectional_callback_type=None, username=None, password=None, url=None, profile_name=None): # noqa: E501
"""DynatraceAppMonIntegration - a model defined in Swagger""" # noqa: E501
self._suppress_notifications = None
self._ignore_teams_from_payload = None
self._ignore_recipients_from_payload = None
self._recipients = None
self._is_advanced = None
self._ignore_responders_from_payload = None
self._ignore_tags_from_payload = None
self._ignore_extra_properties_from_payload = None
self._responders = None
self._priority = None
self._custom_priority = None
self._tags = None
self._extra_properties = None
self._assigned_team = None
self._feature_type = None
self._allow_configuration_access = None
self._allow_read_access = None
self._allow_write_access = None
self._allow_delete_access = None
self._alert_filter = None
self._forwarding_enabled = None
self._forwarding_action_mappings = None
self._callback_type = None
self._updates_action_mappings = None
self._updates_enabled = None
self._bidirectional_callback_type = None
self._username = None
self._password = None
self._url = None
self._profile_name = None
self.discriminator = None
if suppress_notifications is not None:
self.suppress_notifications = suppress_notifications
if ignore_teams_from_payload is not None:
self.ignore_teams_from_payload = ignore_teams_from_payload
if ignore_recipients_from_payload is not None:
self.ignore_recipients_from_payload = ignore_recipients_from_payload
if recipients is not None:
self.recipients = recipients
if is_advanced is not None:
self.is_advanced = is_advanced
if ignore_responders_from_payload is not None:
self.ignore_responders_from_payload = ignore_responders_from_payload
if ignore_tags_from_payload is not None:
self.ignore_tags_from_payload = ignore_tags_from_payload
if ignore_extra_properties_from_payload is not None:
self.ignore_extra_properties_from_payload = ignore_extra_properties_from_payload
if responders is not None:
self.responders = responders
if priority is not None:
self.priority = priority
if custom_priority is not None:
self.custom_priority = custom_priority
if tags is not None:
self.tags = tags
if extra_properties is not None:
self.extra_properties = extra_properties
if assigned_team is not None:
self.assigned_team = assigned_team
if feature_type is not None:
self.feature_type = feature_type
if allow_configuration_access is not None:
self.allow_configuration_access = allow_configuration_access
if allow_read_access is not None:
self.allow_read_access = allow_read_access
if allow_write_access is not None:
self.allow_write_access = allow_write_access
if allow_delete_access is not None:
self.allow_delete_access = allow_delete_access
if alert_filter is not None:
self.alert_filter = alert_filter
if forwarding_enabled is not None:
self.forwarding_enabled = forwarding_enabled
if forwarding_action_mappings is not None:
self.forwarding_action_mappings = forwarding_action_mappings
if callback_type is not None:
self.callback_type = callback_type
if updates_action_mappings is not None:
self.updates_action_mappings = updates_action_mappings
if updates_enabled is not None:
self.updates_enabled = updates_enabled
if bidirectional_callback_type is not None:
self.bidirectional_callback_type = bidirectional_callback_type
if username is not None:
self.username = username
if password is not None:
self.password = password
if url is not None:
self.url = url
if profile_name is not None:
self.profile_name = profile_name
@property
def suppress_notifications(self):
"""Gets the suppress_notifications of this DynatraceAppMonIntegration. # noqa: E501
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:return: The suppress_notifications of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._suppress_notifications
@suppress_notifications.setter
def suppress_notifications(self, suppress_notifications):
"""Sets the suppress_notifications of this DynatraceAppMonIntegration.
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:param suppress_notifications: The suppress_notifications of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._suppress_notifications = suppress_notifications
@property
def ignore_teams_from_payload(self):
"""Gets the ignore_teams_from_payload of this DynatraceAppMonIntegration. # noqa: E501
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_teams_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_teams_from_payload
@ignore_teams_from_payload.setter
def ignore_teams_from_payload(self, ignore_teams_from_payload):
"""Sets the ignore_teams_from_payload of this DynatraceAppMonIntegration.
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:param ignore_teams_from_payload: The ignore_teams_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._ignore_teams_from_payload = ignore_teams_from_payload
@property
def ignore_recipients_from_payload(self):
"""Gets the ignore_recipients_from_payload of this DynatraceAppMonIntegration. # noqa: E501
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_recipients_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_recipients_from_payload
@ignore_recipients_from_payload.setter
def ignore_recipients_from_payload(self, ignore_recipients_from_payload):
"""Sets the ignore_recipients_from_payload of this DynatraceAppMonIntegration.
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:param ignore_recipients_from_payload: The ignore_recipients_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._ignore_recipients_from_payload = ignore_recipients_from_payload
@property
def recipients(self):
"""Gets the recipients of this DynatraceAppMonIntegration. # noqa: E501
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:return: The recipients of this DynatraceAppMonIntegration. # noqa: E501
:rtype: list[Recipient]
"""
return self._recipients
@recipients.setter
def recipients(self, recipients):
"""Sets the recipients of this DynatraceAppMonIntegration.
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:param recipients: The recipients of this DynatraceAppMonIntegration. # noqa: E501
:type: list[Recipient]
"""
self._recipients = recipients
@property
def is_advanced(self):
"""Gets the is_advanced of this DynatraceAppMonIntegration. # noqa: E501
:return: The is_advanced of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._is_advanced
@is_advanced.setter
def is_advanced(self, is_advanced):
"""Sets the is_advanced of this DynatraceAppMonIntegration.
:param is_advanced: The is_advanced of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._is_advanced = is_advanced
@property
def ignore_responders_from_payload(self):
"""Gets the ignore_responders_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:return: The ignore_responders_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_responders_from_payload
@ignore_responders_from_payload.setter
def ignore_responders_from_payload(self, ignore_responders_from_payload):
"""Sets the ignore_responders_from_payload of this DynatraceAppMonIntegration.
:param ignore_responders_from_payload: The ignore_responders_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._ignore_responders_from_payload = ignore_responders_from_payload
@property
def ignore_tags_from_payload(self):
"""Gets the ignore_tags_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:return: The ignore_tags_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_tags_from_payload
@ignore_tags_from_payload.setter
def ignore_tags_from_payload(self, ignore_tags_from_payload):
"""Sets the ignore_tags_from_payload of this DynatraceAppMonIntegration.
:param ignore_tags_from_payload: The ignore_tags_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._ignore_tags_from_payload = ignore_tags_from_payload
@property
def ignore_extra_properties_from_payload(self):
"""Gets the ignore_extra_properties_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:return: The ignore_extra_properties_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_extra_properties_from_payload
@ignore_extra_properties_from_payload.setter
def ignore_extra_properties_from_payload(self, ignore_extra_properties_from_payload):
"""Sets the ignore_extra_properties_from_payload of this DynatraceAppMonIntegration.
:param ignore_extra_properties_from_payload: The ignore_extra_properties_from_payload of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._ignore_extra_properties_from_payload = ignore_extra_properties_from_payload
@property
def responders(self):
"""Gets the responders of this DynatraceAppMonIntegration. # noqa: E501
:return: The responders of this DynatraceAppMonIntegration. # noqa: E501
:rtype: list[Recipient]
"""
return self._responders
@responders.setter
def responders(self, responders):
"""Sets the responders of this DynatraceAppMonIntegration.
:param responders: The responders of this DynatraceAppMonIntegration. # noqa: E501
:type: list[Recipient]
"""
self._responders = responders
@property
def priority(self):
"""Gets the priority of this DynatraceAppMonIntegration. # noqa: E501
:return: The priority of this DynatraceAppMonIntegration. # noqa: E501
:rtype: str
"""
return self._priority
@priority.setter
def priority(self, priority):
"""Sets the priority of this DynatraceAppMonIntegration.
:param priority: The priority of this DynatraceAppMonIntegration. # noqa: E501
:type: str
"""
self._priority = priority
@property
def custom_priority(self):
"""Gets the custom_priority of this DynatraceAppMonIntegration. # noqa: E501
:return: The custom_priority of this DynatraceAppMonIntegration. # noqa: E501
:rtype: str
"""
return self._custom_priority
@custom_priority.setter
def custom_priority(self, custom_priority):
"""Sets the custom_priority of this DynatraceAppMonIntegration.
:param custom_priority: The custom_priority of this DynatraceAppMonIntegration. # noqa: E501
:type: str
"""
self._custom_priority = custom_priority
@property
def tags(self):
"""Gets the tags of this DynatraceAppMonIntegration. # noqa: E501
:return: The tags of this DynatraceAppMonIntegration. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this DynatraceAppMonIntegration.
:param tags: The tags of this DynatraceAppMonIntegration. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def extra_properties(self):
"""Gets the extra_properties of this DynatraceAppMonIntegration. # noqa: E501
:return: The extra_properties of this DynatraceAppMonIntegration. # noqa: E501
:rtype: dict(str, str)
"""
return self._extra_properties
@extra_properties.setter
def extra_properties(self, extra_properties):
"""Sets the extra_properties of this DynatraceAppMonIntegration.
:param extra_properties: The extra_properties of this DynatraceAppMonIntegration. # noqa: E501
:type: dict(str, str)
"""
self._extra_properties = extra_properties
@property
def assigned_team(self):
"""Gets the assigned_team of this DynatraceAppMonIntegration. # noqa: E501
:return: The assigned_team of this DynatraceAppMonIntegration. # noqa: E501
:rtype: TeamMeta
"""
return self._assigned_team
@assigned_team.setter
def assigned_team(self, assigned_team):
"""Sets the assigned_team of this DynatraceAppMonIntegration.
:param assigned_team: The assigned_team of this DynatraceAppMonIntegration. # noqa: E501
:type: TeamMeta
"""
self._assigned_team = assigned_team
@property
def feature_type(self):
"""Gets the feature_type of this DynatraceAppMonIntegration. # noqa: E501
:return: The feature_type of this DynatraceAppMonIntegration. # noqa: E501
:rtype: str
"""
return self._feature_type
@feature_type.setter
def feature_type(self, feature_type):
"""Sets the feature_type of this DynatraceAppMonIntegration.
:param feature_type: The feature_type of this DynatraceAppMonIntegration. # noqa: E501
:type: str
"""
allowed_values = ["email-based", "token-based"] # noqa: E501
if feature_type not in allowed_values:
raise ValueError(
"Invalid value for `feature_type` ({0}), must be one of {1}" # noqa: E501
.format(feature_type, allowed_values)
)
self._feature_type = feature_type
@property
def allow_configuration_access(self):
"""Gets the allow_configuration_access of this DynatraceAppMonIntegration. # noqa: E501
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:return: The allow_configuration_access of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_configuration_access
@allow_configuration_access.setter
def allow_configuration_access(self, allow_configuration_access):
"""Sets the allow_configuration_access of this DynatraceAppMonIntegration.
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:param allow_configuration_access: The allow_configuration_access of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._allow_configuration_access = allow_configuration_access
@property
def allow_read_access(self):
"""Gets the allow_read_access of this DynatraceAppMonIntegration. # noqa: E501
:return: The allow_read_access of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_read_access
@allow_read_access.setter
def allow_read_access(self, allow_read_access):
"""Sets the allow_read_access of this DynatraceAppMonIntegration.
:param allow_read_access: The allow_read_access of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._allow_read_access = allow_read_access
@property
def allow_write_access(self):
"""Gets the allow_write_access of this DynatraceAppMonIntegration. # noqa: E501
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:return: The allow_write_access of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_write_access
@allow_write_access.setter
def allow_write_access(self, allow_write_access):
"""Sets the allow_write_access of this DynatraceAppMonIntegration.
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:param allow_write_access: The allow_write_access of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._allow_write_access = allow_write_access
@property
def allow_delete_access(self):
"""Gets the allow_delete_access of this DynatraceAppMonIntegration. # noqa: E501
:return: The allow_delete_access of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_delete_access
@allow_delete_access.setter
def allow_delete_access(self, allow_delete_access):
"""Sets the allow_delete_access of this DynatraceAppMonIntegration.
:param allow_delete_access: The allow_delete_access of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._allow_delete_access = allow_delete_access
@property
def alert_filter(self):
"""Gets the alert_filter of this DynatraceAppMonIntegration. # noqa: E501
:return: The alert_filter of this DynatraceAppMonIntegration. # noqa: E501
:rtype: AlertFilter
"""
return self._alert_filter
@alert_filter.setter
def alert_filter(self, alert_filter):
"""Sets the alert_filter of this DynatraceAppMonIntegration.
:param alert_filter: The alert_filter of this DynatraceAppMonIntegration. # noqa: E501
:type: AlertFilter
"""
self._alert_filter = alert_filter
@property
def forwarding_enabled(self):
"""Gets the forwarding_enabled of this DynatraceAppMonIntegration. # noqa: E501
:return: The forwarding_enabled of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._forwarding_enabled
@forwarding_enabled.setter
def forwarding_enabled(self, forwarding_enabled):
"""Sets the forwarding_enabled of this DynatraceAppMonIntegration.
:param forwarding_enabled: The forwarding_enabled of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._forwarding_enabled = forwarding_enabled
@property
def forwarding_action_mappings(self):
"""Gets the forwarding_action_mappings of this DynatraceAppMonIntegration. # noqa: E501
:return: The forwarding_action_mappings of this DynatraceAppMonIntegration. # noqa: E501
:rtype: list[ActionMapping]
"""
return self._forwarding_action_mappings
@forwarding_action_mappings.setter
def forwarding_action_mappings(self, forwarding_action_mappings):
"""Sets the forwarding_action_mappings of this DynatraceAppMonIntegration.
:param forwarding_action_mappings: The forwarding_action_mappings of this DynatraceAppMonIntegration. # noqa: E501
:type: list[ActionMapping]
"""
self._forwarding_action_mappings = forwarding_action_mappings
@property
def callback_type(self):
"""Gets the callback_type of this DynatraceAppMonIntegration. # noqa: E501
:return: The callback_type of this DynatraceAppMonIntegration. # noqa: E501
:rtype: str
"""
return self._callback_type
@callback_type.setter
def callback_type(self, callback_type):
"""Sets the callback_type of this DynatraceAppMonIntegration.
:param callback_type: The callback_type of this DynatraceAppMonIntegration. # noqa: E501
:type: str
"""
allowed_values = ["amazon-sns-callback", "base-webhook-callback", "bidirectional-callback-new", "bmc-remedy-on-demand-callback", "oec-callback"] # noqa: E501
if callback_type not in allowed_values:
raise ValueError(
"Invalid value for `callback_type` ({0}), must be one of {1}" # noqa: E501
.format(callback_type, allowed_values)
)
self._callback_type = callback_type
@property
def updates_action_mappings(self):
"""Gets the updates_action_mappings of this DynatraceAppMonIntegration. # noqa: E501
:return: The updates_action_mappings of this DynatraceAppMonIntegration. # noqa: E501
:rtype: list[ActionMapping]
"""
return self._updates_action_mappings
@updates_action_mappings.setter
def updates_action_mappings(self, updates_action_mappings):
"""Sets the updates_action_mappings of this DynatraceAppMonIntegration.
:param updates_action_mappings: The updates_action_mappings of this DynatraceAppMonIntegration. # noqa: E501
:type: list[ActionMapping]
"""
self._updates_action_mappings = updates_action_mappings
@property
def updates_enabled(self):
"""Gets the updates_enabled of this DynatraceAppMonIntegration. # noqa: E501
:return: The updates_enabled of this DynatraceAppMonIntegration. # noqa: E501
:rtype: bool
"""
return self._updates_enabled
@updates_enabled.setter
def updates_enabled(self, updates_enabled):
"""Sets the updates_enabled of this DynatraceAppMonIntegration.
:param updates_enabled: The updates_enabled of this DynatraceAppMonIntegration. # noqa: E501
:type: bool
"""
self._updates_enabled = updates_enabled
@property
def bidirectional_callback_type(self):
"""Gets the bidirectional_callback_type of this DynatraceAppMonIntegration. # noqa: E501
:return: The bidirectional_callback_type of this DynatraceAppMonIntegration. # noqa: E501
:rtype: str
"""
return self._bidirectional_callback_type
@bidirectional_callback_type.setter
def bidirectional_callback_type(self, bidirectional_callback_type):
"""Sets the bidirectional_callback_type of this DynatraceAppMonIntegration.
:param bidirectional_callback_type: The bidirectional_callback_type of this DynatraceAppMonIntegration. # noqa: E501
:type: str
"""
allowed_values = ["bmc-foot-prints-v11-callback", "bmc-foot-prints-v12-callback", "bmc-remedy-callback", "cherwell-callback", "circonus-callback", "connect-wise-manage-callback", "connect-wise-manage-v2-callback", "datadog-callback", "dynatrace-app-mon-callback", "freshdesk-callback", "freshservice-callback", "hp-service-manager-callback", "jira-callback", "jira-service-desk-callback", "kayako-callback", "libre-nms-callback", "logic-monitor-callback", "magentrix-callback", "ms-teams-callback", "ms-teams-v2-callback", "op5-callback", "ops-genie-callback", "prtg-callback", "rollbar-callback", "sales-force-service-cloud-callback", "service-now-v2-callback", "service-now-v3-callback", "solarwinds-msp-ncentral-callback", "splunk-callback", "splunk-itsi-callback", "status-page-io-callback", "sumo-logic-callback", "zendesk-callback"] # noqa: E501
if bidirectional_callback_type not in allowed_values:
raise ValueError(
"Invalid value for `bidirectional_callback_type` ({0}), must be one of {1}" # noqa: E501
.format(bidirectional_callback_type, allowed_values)
)
self._bidirectional_callback_type = bidirectional_callback_type
@property
def username(self):
"""Gets the username of this DynatraceAppMonIntegration. # noqa: E501
:return: The username of this DynatraceAppMonIntegration. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this DynatraceAppMonIntegration.
:param username: The username of this DynatraceAppMonIntegration. # noqa: E501
:type: str
"""
self._username = username
@property
def password(self):
"""Gets the password of this DynatraceAppMonIntegration. # noqa: E501
:return: The password of this DynatraceAppMonIntegration. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this DynatraceAppMonIntegration.
:param password: The password of this DynatraceAppMonIntegration. # noqa: E501
:type: str
"""
self._password = password
@property
def url(self):
"""Gets the url of this DynatraceAppMonIntegration. # noqa: E501
:return: The url of this DynatraceAppMonIntegration. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this DynatraceAppMonIntegration.
:param url: The url of this DynatraceAppMonIntegration. # noqa: E501
:type: str
"""
self._url = url
@property
def profile_name(self):
"""Gets the profile_name of this DynatraceAppMonIntegration. # noqa: E501
:return: The profile_name of this DynatraceAppMonIntegration. # noqa: E501
:rtype: str
"""
return self._profile_name
@profile_name.setter
def profile_name(self, profile_name):
"""Sets the profile_name of this DynatraceAppMonIntegration.
:param profile_name: The profile_name of this DynatraceAppMonIntegration. # noqa: E501
:type: str
"""
self._profile_name = profile_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DynatraceAppMonIntegration, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DynatraceAppMonIntegration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"john@oram.ca"
] | john@oram.ca |
fc1dd3e2e53ac9889619341981ad72b9420d9047 | ffe68fc28402e30e1feeb32c972702e3c45da354 | /saybapco/run.py | a89c40df190b04e6f9790d7e5745e0954b1fb72c | [] | no_license | mcnigno/saybapco | 5ad8254d35af02b5e7b517d234487d1b18741904 | 28207c1ce09498808ef9f3525129b7e47f2cf731 | refs/heads/master | 2022-12-11T20:31:47.405351 | 2018-04-29T20:01:26 | 2018-04-29T20:01:26 | 131,268,488 | 1 | 0 | null | 2022-12-08T02:03:51 | 2018-04-27T08:27:33 | JavaScript | UTF-8 | Python | false | false | 69 | py | from app import app
app.run(host='0.0.0.0', port=5001, debug=True)
| [
"mcnigno@gmail.com"
] | mcnigno@gmail.com |
9517eb82fff730a2ecd4af39230afef7a9dc6d0d | ac61c80a2d9e95838ab6fdd8d41c9328e64a740e | /org/api/f.py | 029341f2c46e61303c8ed4fa0ebd2d22350ca324 | [] | no_license | zdYng/ients_down | ac45f8f1016e8876b8e282d9055975b2c19291db | a85be63365ab7f54a3629498e19cd0c619e67887 | refs/heads/master | 2020-03-09T04:24:37.824832 | 2018-03-19T00:56:02 | 2018-03-19T00:56:02 | 128,586,973 | 1 | 0 | null | 2018-04-08T02:07:16 | 2018-04-08T02:07:16 | null | UTF-8 | Python | false | false | 13,845 | py | # 模块的功能
from django.shortcuts import render, redirect, HttpResponse
from datetime import datetime, timezone, timedelta
from store import models as Store_models
from org import models as org_models
from general import models as general_models
from org import views
import general.functions
import org
import uuid
import json
# 存储操作名称和函数直接对应关系
_registered_actions = {}
# 装饰器, 将操作名称存入dict
def actions(name):
def decorator(f):
_registered_actions[name] = f
return f
return decorator
# 处理uuid对象不能直接被序列号
class UUIDEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, uuid.UUID):
return str(obj)
return json.JSONEncoder.default(self, obj)
# 构造显示组织机构所需数据
def show_node(Head_id):
'''
构造显示组织机构需要的数据
:param Head_id:
:return:
'''
temp = {
'id': Head_id,
'name': org_models.Department.objects.filter(id=Head_id).first().Name,
'pid': None,
'childrens': []
}
def tree_traversal(temp):
val = org_models.Department_Con.objects.filter(Head_id=temp['id']).values('Leef_id')
# 循环所有的Leef
for row in val:
arg = {
'id': row['Leef_id'],
'name': org_models.Department.objects.filter(id=row['Leef_id']).first().Name,
'pid': temp['id'],
'childrens': []
}
# 将Leef添加到Head的childrens里
temp['childrens'].append(arg)
# 判断该Leef是否存在childrens
if org_models.Department_Con.objects.filter(Head_id=row['Leef_id']):
# 进行递归
val = org_models.Department_Con.objects.filter(Head_id=row['Leef_id']).values('Leef_id')
tree_traversal(arg)
tree_traversal(temp)
res = {'data': []}
res['data'].append(temp)
return res
# 设置组织机构
@actions('ZG-F-01-01')
def add_department_con(ret, content, *args, **kwargs):
'''
增加组织机构
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_add(ret, 'Department_Con', content)
@actions('ZG-F-01-02')
def delete_department_con(ret, content, *args, **kwargs):
'''
删除组织机构
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_delete(ret, 'Department_Con', content)
@actions('ZG-F-01-03')
def put_department_con(ret, content, *args, **kwargs):
'''
修改组织机构
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_put(ret, 'Department_Con', content)
@actions('ZG-F-01-04')
def get_department_con(ret, content, *args, **kwargs):
'''
# 返回显示组织机构所需数据
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
Department_id = content.get('Department_id', None)
try:
ret['content'] = show_node(Department_id)
ret['status'] = True
except Exception as e:
ret['status'] = False
ret['message'] = str(e)
return ret
# 设置公司权限
@actions('ZG-F-02-01')
def add_authority_company(ret, content, *args, **kwargs):
'''
增加公司权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_add(ret, 'AuthorityCompany', content)
@actions('ZG-F-02-02')
def delete_authority_company(ret, content, *args, **kwargs):
'''
删除公司权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_delete(ret, 'AuthorityCompany', content)
@actions('ZG-F-02-03')
def put_authority_company(ret, content, *args, **kwargs):
'''
修改公司权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_put(ret, 'AuthorityCompany', content)
@actions('ZG-F-02-04')
def get_authority_company(ret, content, *args, **kwargs):
'''
查看公司权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_get(ret, 'AuthorityCompany', content)
# 设置角色权限
@actions('ZG-F-03-01')
def add_authority_role(ret, content, *args, **kwargs):
'''
增加角色权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_add(ret, 'AuthorityRole', content)
@actions('ZG-F-03-02')
def delete_authority_role(ret, content, *args, **kwargs):
'''
删除角色权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_delete(ret, 'AuthorityRole', content)
@actions('ZG-F-03-03')
def put_authority_role(ret, content, *args, **kwargs):
'''
修改角色权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_put(ret, 'AuthorityRole', content)
@actions('ZG-F-03-04')
def get_authority_role(ret, content, *args, **kwargs):
'''
查看角色权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_get(ret, 'AuthorityRole', content)
# 设置用户权限
@actions('ZG-F-04-01')
def add_authority_user(ret, content, *args, **kwargs):
'''
增加用户权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_add(ret, 'AuthorityUser', content)
@actions('ZG-F-04-02')
def delete_authority_user(ret, content, *args, **kwargs):
'''
删除用户权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_delete(ret, 'AuthorityUser', content)
@actions('ZG-F-04-03')
def put_authority_user(ret, content, *args, **kwargs):
'''
修改用户权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_put(ret, 'AuthorityUser', content)
@actions('ZG-F-04-04')
def get_authority_user(ret, content, *args, **kwargs):
'''
查看用户权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_get(ret, 'AuthorityUser', content)
# 设置部门权限
@actions('ZG-F-05-01')
def add_authority_department(ret, content, *args, **kwargs):
'''
增加部门权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_add(ret, 'AuthorityDepartment', content)
@actions('ZG-F-05-02')
def delete_authority_department(ret, content, *args, **kwargs):
'''
删除岗位权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_delete(ret, 'AuthorityDepartment', content)
@actions('ZG-F-05-03')
def put_authority_department(ret, content, *args, **kwargs):
'''
修改岗位权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_put(ret, 'AuthorityDepartment', content)
@actions('ZG-F-05-04')
def get_authority_department(ret, content, *args, **kwargs):
'''
查看公司权限
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_get(ret, 'AuthorityDepartment', content)
# 设置部门
@actions('ZG-F-06-01')
def add_department(ret, content, *args, **kwargs):
'''
增加部门
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_add(ret, 'Department', content)
@actions('ZG-F-06-02')
def delete_department(ret, content, *args, **kwargs):
'''
删除部门
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_delete(ret, 'Department', content)
@actions('ZG-F-06-03')
def put_department(ret, content, *args, **kwargs):
'''
修改部门
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_put(ret, 'Department', content)
@actions('ZG-F-06-04')
def get_department(ret, content, *args, **kwargs):
'''
查看部门
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_get(ret, 'Department', content)
# 部门内账户操作
@actions('ZG-F-07-01')
def department_user_add(ret, content, *args, **kwargs):
'''
为某部门添加账户
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_add(ret, 'User_Con_Department', content)
@actions('ZG-F-07-02')
def department_user_delete(ret, content, *args, **kwargs):
'''
删除某部门账户
:param ret:
:param content:icon-cog101
:param args:
:param kwargs:
:return:
'''
User_Con_Department_content = {}
User_Con_Role_content = {}
User_Con_Department_content['uuid'] = content.pop('User_Con_Department_id')
User_Con_Role_content['uuid'] = content.pop('User_Con_Role_id')
views.action_delete(ret, 'User_Con_Role', User_Con_Role_content)
return views.action_delete(ret, 'User_Con_Department', User_Con_Department_content)
@actions('ZG-F-07-04')
def department_user_add(ret, content, *args, **kwargs):
'''
获取某部门账户
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
try:
User_list = views.action_get(ret, 'User_Con_Department', content)['content']['value']
objs = []
for row in list(User_list):
temp = org_models.User_Con_Role.objects.filter(User_id=row[1]).values('Role__Name', 'id')
if temp:
objs.extend(
[(row[0], row[1], row[2], temp[0]['id'], temp[0]['Role__Name']), ]
)
else:
objs.extend(
[(row[0], row[1], row[2], None, None), ]
)
header_title = ['id', 'User_id', '用户名', 'User_Con_Role_id', '岗位']
ret['status'] = True
ret['message'] = '{0}{1}'.format('获取', 'Department_Con_User')
ret['content']['title'] = header_title
ret['content']['value'] = list(objs)
except Exception as e:
ret['status'] = False
ret['message'] = str(e)
return ret
# 设置岗位
@actions('ZG-F-08-01')
def add_role(ret, content, *args, **kwargs):
'''
增加岗位
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_add(ret, 'Role', content)
@actions('ZG-F-08-02')
def delete_role(ret, content, *args, **kwargs):
'''
删除岗位
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_delete(ret, 'Role', content)
@actions('ZG-F-08-03')
def put_role(ret, content, *args, **kwargs):
'''
修改岗位
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_put(ret, 'Role', content)
@actions('ZG-F-08-04')
def get_role(ret, content, *args, **kwargs):
'''
查看岗位
:param ret:
:param content:
:param args:
:param kwargs:
:return:
'''
return views.action_get(ret, 'Role', content)
def modify_content(action,content,Company_id):
'''
:param action:
:param content:
:param Company_id:
:return:
'''
if action.startswith('ZG-F-01'):
if action == 'ZG-F-01-04':
content['Department_id'] = org_models.Department.objects.filter(
Company__id=Company_id,
is_First=True
).first().id
else:
pass
elif action.startswith('ZG-F-06'):
content['Company_id'] = Company_id
elif action.startswith('ZG-F-08'):
content['Company_id'] = Company_id
elif action == 'ZG-F-03-04':
content['Role__Company_id'] = Company_id
elif action == 'ZG-F-04-04':
content['Company_id'] = Company_id
elif action == 'ZG-F-05-04':
content['Department__Company_id'] = Company_id
else:
pass
return content
def ZG_F_Json(request):
'''
:param request:
:return:
'''
if request.method == 'GET':
pass
if request.method == 'POST':
ret = {'status': None, 'message': None, 'errors': None, 'content': {}}
val = json.loads(request.body.decode('utf8'))
content = val.get('content', None)
action = val.get('action', None)
# 进行权限判断
if general.functions.general_has_authority(request, '/org/ZG_F/'):
if request.session['Account_Type'] == '1':
pass
else:
Company_id = request.session['Company_id']
content = modify_content(action,content,Company_id)
try:
f = _registered_actions[action]
except KeyError as e:
ret['status'] = False
ret['message'] = str(e)
else:
ret = f(ret, content)
else:
try:
ret['message'] = general_models.Message.objects.filter(NO='005').first().Content
except Exception as e:
ret['message'] = str(e)
return HttpResponse(json.dumps(ret, cls=UUIDEncoder), content_type='application/json')
| [
"mcdull9393@gmail.com"
] | mcdull9393@gmail.com |
fe372976873228e0ed042f92dc498e7f69260681 | c839961aeab22795200d9edef9ba043fe42eeb9c | /data/script869.py | 6c72307996165323f1b6da8a8eb5e4ccbe7c4420 | [] | no_license | StevenLOL/kaggleScape | ad2bb1e2ed31794f1ae3c4310713ead1482ffd52 | 18bede8420ab8d2e4e7c1eaf6f63280e20cccb97 | refs/heads/master | 2020-03-17T05:12:13.459603 | 2018-05-02T19:35:55 | 2018-05-02T19:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,142 | py |
# coding: utf-8
# # Intro
# **This is Lesson 7 in the [Deep Learning](https://www.kaggle.com/learn/machine-learning) track**
#
# The models you've built so far have relied on pre-trained models. But they aren't the ideal solution for many use cases. In this lesson, you will learn how to build totally new models.
#
# # Lesson
#
# In[1]:
from IPython.display import YouTubeVideo
YouTubeVideo('YbNE3zhtsoo', width=800, height=450)
# # Sample Code
# In[ ]:
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow.python import keras
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten, Conv2D, Dropout
a=pd.read_csv("../input/digit-recognizer/train.csv")
a.drop('label', axis=1)
img_rows, img_cols = 28, 28
num_classes = 10
def data_prep(raw):
out_y = keras.utils.to_categorical(raw.label, num_classes)
num_images = raw.shape[0]
x_as_array = raw.values[:,1:]
x_shaped_array = x_as_array.reshape(num_images, img_rows, img_cols, 1)
out_x = x_shaped_array / 255
return out_x, out_y
train_size = 30000
train_file = "../input/digit-recognizer/train.csv"
raw_data = pd.read_csv(train_file)
x, y = data_prep(raw_data)
model = Sequential()
model.add(Conv2D(20, kernel_size=(3, 3),
activation='relu',
input_shape=(img_rows, img_cols, 1)))
model.add(Conv2D(20, kernel_size=(3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer='adam',
metrics=['accuracy'])
model.fit(x, y,
batch_size=128,
epochs=2,
validation_split = 0.2)
# # Your Turn
# You are ready to [build your own model](https://www.kaggle.com/dansbecker/exercise-modeling-from-scratch).
#
# # Keep Going
# [Move on](https://www.kaggle.com/dansbecker/dropout-and-strides-for-larger-models) to learn two techniques that will make your models run faster and more robust to overfitting.
| [
"adithyagirish@berkeley.edu"
] | adithyagirish@berkeley.edu |
b91f4fe7546cb810a246edc25d61c27d766888e2 | c1a5a5779fa3cebee65d23d0216549f09fdffda5 | /508saver.py | f3f0e4d8410b7357f37db8805df69b203d2f6961 | [] | no_license | greenmac/python-morvan-tensorflow | b0821825f8857d969d4d60437334f8fbb2ca18aa | 0abd63e74b3f5a54f82337fb8deaf4edecef294f | refs/heads/master | 2020-04-11T06:20:28.315996 | 2019-01-08T13:57:27 | 2019-01-08T13:57:27 | 161,578,306 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | # https://morvanzhou.github.io/tutorials/machine-learning/tensorflow/5-06-save/
# https://github.com/MorvanZhou/tutorials/blob/master/tensorflowTUT/tf19_saver.py
import tensorflow as tf
import numpy as np
# //////////
# ## Save to file
# ## remember to define the same dtype and shape when restore
# W = tf.Variable([[1, 2, 3], [3, 4, 5]], dtype=tf.float32, name='weights')
# b = tf.Variable([[1, 2, 3]], dtype=tf.float32, name='biases')
# init = tf.initialize_all_variables()
# saver = tf.train.Saver()
# with tf.Session() as sess:
# sess.run(init)
# save_path = saver.save(sess, "my_net/save_net.ckpt")
# print("Savee to path:", save_path)
# //////////
## restore variable
## redefine the same shape and same type for your variables
W = tf.Variable(np.arange(6).reshape((2, 3)), dtype=tf.float32, name='weights')
b = tf.Variable(np.arange(3).reshape((1, 3)), dtype=tf.float32, name='biases')
## not need init step
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "my_net/save_net.ckpt")
print("weights:", sess.run(W))
print("biases:", sess.run(b)) | [
"alwaysmac@msn.com"
] | alwaysmac@msn.com |
930a3906ee90eb771f1a686e1d0ba722cc78cf13 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_135/ch88_2020_05_06_11_49_20_934805.py | e6bba1cfd6fb8a4323e72c322207bfb9927fdc3b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | class Retangulo:
def __init__(self, inferior_esquerdo, superior_direito):
self.inferior_esquerdo = inferior_esquerdo
self.superior_direito = superior_direito
def calcula_perimetro(self):
lado_x = superior_direito.x - inferior_esquerdo.x
lado_y = superior_direito.y - inferior_esquerdo.y
perimetro = lado_x * 2 + lado_y * 2
return perimetro
def calcula_area(self):
lado_x = superior_direito.x - inferior_esquerdo.x
lado_y = superior_direito.y - inferior_esquerdo.y
area = lado_x * lado_y
return area | [
"you@example.com"
] | you@example.com |
e9a6a86f1f9c97aeebc0645f9143517e6480c3a1 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_95/1367.py | 0cf8ef40104ecb429f0cbaab2ce61a8013ecb146 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | # Google Code jam Problem A Speaking in Tongues
# Apr. 13, 2012
# Python 3.2.3
import sys
import string
def ReadRules(d):
encrypted = 'ejp mysljylc kd kxveddknmc re jsicpdrysi rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd de kr kd eoya kw aej tysr re ujdr lkgc jv y e q z'
original = 'our language is impossible to understand there are twenty six factorial possibilities so it is okay if you want to just give up a o z q'
ency_splitted = encrypted.split()
orig_splitted = original.split()
len_words = len(ency_splitted)
for i in range(len_words):
len_letters = len(ency_splitted[i])
for j in range(len_letters):
a = ency_splitted[i][j]
b = orig_splitted[i][j]
if a not in d:
d[a] = b
d[' '] = ' '
d['\n'] = ''
return d
def main(inFileName):
inFile = open(inFileName, mode='r')
numberOfCases = int(inFile.readline())
d = {}
d = ReadRules(d)
#for (k, v) in sorted(d.items()):
# print(k + "->" + v + "\n")
for caseNumber in range(numberOfCases):
line = inFile.readline()
answer = ''
for i in range(len(line)):
answer += d[line[i]]
print('Case #' + str(caseNumber+1) + ': ' + answer )
if __name__ == '__main__':
main(sys.argv[1])
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
a26ee3c1141a3c077ccbfe142df161d7aaff5cbe | 98acc759f4023cc907fc601d18e531101ce23f1b | /jsonschema/__main__.py | fb260ae145d7518f8e8fa16ed13cc2c6173e7404 | [
"MIT"
] | permissive | python-jsonschema/jsonschema | e2465e85dda618f8003cf69e15ee95e4e947405d | 40733195ec975da5cd86c98522959f9162b338b0 | refs/heads/main | 2023-08-17T06:32:47.748711 | 2023-08-09T15:51:57 | 2023-08-09T15:51:57 | 3,072,629 | 671 | 84 | MIT | 2023-09-12T00:11:27 | 2011-12-30T03:37:43 | Python | UTF-8 | Python | false | false | 115 | py | """
The jsonschema CLI is now deprecated in favor of check-jsonschema.
"""
from jsonschema.cli import main
main()
| [
"Julian@GrayVines.com"
] | Julian@GrayVines.com |
09df8a06e99dac1e29ef731782777407116b0ea2 | 9463b87c683fdada077cacca542998e19557f1e5 | /其他教学参考/随书源码/CH1.2-P015“百鸡百钱”问题.py | 8b5e3d725cc8267233b4d85feabb7731ed0b0688 | [] | no_license | hongm32/2020design | fa3d9e06b0e91be30784f3ad78bf65cbcfb3550b | 34b6a3c6a2740f049134eada1fbab6cacc154b0d | refs/heads/master | 2023-07-25T10:34:23.708781 | 2021-09-05T01:08:28 | 2021-09-05T01:08:28 | 303,857,648 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | # “百鸡百钱”问题
# 是一个有名的数学问题,出自《张丘建算经》。其内容是:
# 公鸡5文钱1只,
# 母鸡3文钱1只,
# 小鸡3只1文钱,
# 用100文钱买100只鸡,其中公鸡、母鸡和小鸡都必须有,
# 问公鸡、母鸡和小鸡各多少只?
money = 100 # 一共100文钱
num = 100 # 一共100只鸡
cock_price = 5 # 公鸡价格5文
hen_price = 3 # 母鸡价格3文
chick_price = 3 # 3只小鸡1文
for cock_num in range(1, money // cock_price + 1): # 公鸡只数可能为1-20
for hen_num in range(1, money // hen_price + 1): # 母鸡只数可能为1-33
chick_num = num - cock_num - hen_num # 小鸡数量
money1 = cock_num * cock_price + hen_num * hen_price + chick_num / chick_price
if money1 == money:
print("公鸡:{: >2} 母鸡:{: >2} 小鸡:{}".format(cock_num, hen_num, chick_num))
input("运行完毕,请按回车键退出...")
| [
"XHM,1024cj"
] | XHM,1024cj |
54fd5dc3bc286cccf4c6d9ba9a7ddc091dcb1d07 | 4b0c57dddf8bd98c021e0967b5d94563d15372e1 | /run_MatrixElement/test/crabConfigFiles/crab_STopT_T_JESUp_cfg.py | c094b1e645d7a28aef753031438be4d92f8c137b | [] | no_license | aperloff/TAMUWW | fea6ed0066f3f2cef4d44c525ee843c6234460ba | c18e4b7822076bf74ee919509a6bd1f3cf780e11 | refs/heads/master | 2021-01-21T14:12:34.813887 | 2018-07-23T04:59:40 | 2018-07-23T04:59:40 | 10,922,954 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'run_MatrixElement_STopT_T_JESUp'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'PrivateMC'
config.JobType.scriptExe = 'JobScript.sh'
config.JobType.scriptArgs = ["numberOfEvents=100","SampleName=STopT_T_JESUp"]
config.JobType.psetName = 'emptyPSet_STopT_T_JESUp_cfg.py'
config.JobType.allowUndistributedCMSSW = True
config.JobType.inputFiles = ['FrameworkJobReport.xml','../../../../bin/slc6_amd64_gcc472/run_MatrixElement','../data/cteq5l.tbl', '../data/cteq6l.tbl', '../data/TF_TTbarMG_B_00_24.txt', '../data/TF_TTbarMG_G_00_24.txt', '../data/TF_TTbarMG_UDS_00_24.txt']
config.JobType.outputFiles = ['STopT_T_JESUp.root']
config.section_("Data")
config.Data.userInputFiles = ['root://cmsxrootd.fnal.gov//store/user/aperloff/MatrixElement/Summer12ME8TeV/MEInput/STopT_T_JESUp.root']
config.Data.primaryDataset = 'STopT_T_JESUp'
config.Data.splitting = 'EventBased'
config.Data.unitsPerJob = 1
NJOBS = 1661 # This is not a configuration parameter, but an auxiliary variable that we use in the next line.
config.Data.totalUnits = config.Data.unitsPerJob * NJOBS
config.Data.publication = False
config.Data.outLFNDirBase = '/store/user/aperloff/MatrixElement/Summer12ME8TeV/MEResults/rootOutput/'
config.Data.ignoreLocality = True
config.section_("Site")
config.Site.storageSite = 'T3_US_FNALLPC'
| [
"aperloff@physics.tamu.edu"
] | aperloff@physics.tamu.edu |
556b24d6265074e5934dfa15c924c83202d587df | ff5d50f40629e50794a1fd4774a9a1a8ce3a2ecd | /controles/migrations/0001_initial.py | 5a23d36747b966a639429842817c16424b62b0fd | [] | no_license | thiagorocha06/mairimed | 0d9de3db03ff073de431c0d40e16b3c1c5b1d3fe | 6705e36b52410823c04b41db58e8f0b6b3f30b85 | refs/heads/master | 2022-12-13T07:37:49.619189 | 2018-12-30T16:03:49 | 2018-12-30T16:03:49 | 109,196,690 | 0 | 0 | null | 2022-12-08T02:23:39 | 2017-11-01T23:56:20 | Python | UTF-8 | Python | false | false | 2,743 | py | # Generated by Django 2.0.7 on 2018-10-18 21:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Glicemia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('glicemia', models.IntegerField(blank=True, null=True)),
('data', models.DateField(default=django.utils.timezone.now)),
('hora', models.TimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Peso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('peso', models.IntegerField(blank=True, null=True)),
('data', models.DateField(default=django.utils.timezone.now)),
('hora', models.TimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Pressao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sistolica', models.IntegerField(blank=True, null=True)),
('diastolica', models.IntegerField(blank=True, null=True)),
('data', models.DateField(default=django.utils.timezone.now)),
('hora', models.TimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Temperatura',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tempetura', models.IntegerField(blank=True, null=True)),
('data', models.DateField(default=django.utils.timezone.now)),
('hora', models.TimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"thiagorocha06@gmail.com"
] | thiagorocha06@gmail.com |
16528ea339ae9b698b0ceb7e36bc37dfd763c35a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_293/ch147_2020_04_26_03_40_37_343598.py | ffa76dab8a358a07959763021cc31259573a94f1 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | def mais_frequente(lista):
dic = {}
mais_freq = 0
for e in lista:
if e not in dic:
dic[e] = 1
else:
dic[e] += 1
if mais_freq < dic[e]:
mais_freq = dic[e]
a = e
return a | [
"you@example.com"
] | you@example.com |
22b207801f06c41467931b863d2751b9314ccccd | 6ed01f4503fc9de234a561c945adff7cf4b1c81b | /ncar_lib/lib/frameworks.py | d00ac877a2c94b3b7ffa1bceab14ddcb9f7de3be | [] | no_license | ostwald/python-lib | b851943c913a68424a05ce3c7b42878ff9519f68 | 9acd97ffaa2f57b3e9e632e1b75016549beb29e5 | refs/heads/master | 2021-10-28T06:33:34.156095 | 2021-10-21T23:54:49 | 2021-10-21T23:54:49 | 69,060,616 | 0 | 1 | null | 2018-06-21T16:05:30 | 2016-09-23T21:04:46 | Roff | UTF-8 | Python | false | false | 6,986 | py | """
classes for reading library_dc and webcat XML records
"""
import os, sys, re, codecs
from JloXml import XmlRecord, XmlUtils
import globals
class NCARRec (XmlRecord):
"""
assumes a flat metadata structure (all fields are children of docRoot)
"""
field_list = None
id_field = None
description_field = None
xpath_delimiter = "/"
def __init__ (self, path=None, xml=None):
XmlRecord.__init__ (self, path, xml)
for attr in self.field_list:
setattr (self, attr, None)
for element in self.getElements(self.doc):
setattr (self, element.tagName, self.getText(element))
print 'set %s to %s' % (element.tagName, self.getText(element))
def getFieldValue (self, field):
path = "%s/%s" % (self.rootElementName, field)
value = self.getTextAtPath (path)
if not value is None:
value = value.strip()
return value
def getFieldValues (self, field):
path = "%s/%s" % (self.rootElementName, field)
nodes = self.selectNodes (self.dom, path)
values = []
for node in nodes:
value = self.getText (node)
if value is None:
continue
value = value.strip()
if value:
values.append (value)
return values
def getFieldElements (self, field):
path = "%s/%s" % (self.rootElementName, field)
return self.selectNodes (self.dom, path)
def numFieldValues (self, field):
path = "%s/%s" % (self.rootElementName, field)
nodes = self.selectNodes (self.dom, path)
return len(nodes)
def addFieldValue (self, field, value):
"""
do not add a value if this field already has it
strip value before adding
"""
path = "%s/%s" % (self.rootElementName, field)
element = self.addElement (self.doc, field)
if not value is None:
value = value.strip()
if not value in self.getFieldValues (field):
self.setText (element, value)
def setFieldValue (self, field, value):
"""
if there are existing values, this will change the first only
"""
path = "%s/%s" % (self.rootElementName, field)
if not value is None:
value = value.strip()
element = self.selectSingleNode (self.dom, path)
if not element:
element = self.addElement (self.doc, field)
self.setText (element, value)
def removeField (self, field):
path = "%s/%s" % (self.rootElementName, field)
nodes = self.selectNodes (self.dom, path)
for node in nodes:
self.deleteElement (node)
def setFieldValues (self, field, values):
self.removeField (field)
self.addFieldValues (field, values)
def addFieldValues (self, field, values):
for value in values:
self.addFieldValue (field, value)
def orderFields (self):
""" based on converter.Converter """
elements = self.doc.childNodes
# print "-------------"
mycmp = lambda x, y:cmp (self.field_list.index(x.tagName),
self.field_list.index(y.tagName))
if elements:
elements.sort(mycmp)
def getId (self):
return self.getFieldValue (self.id_field)
def setId (self, value):
self.setFieldValue (self.id_field, value)
def getDescription (self):
return self.getFieldValue (self.description_field)
def setDescription (self, value):
self.setFieldValue (self.description_field, value)
class WebcatRec (NCARRec):
rootElementName = "record"
## issue_delimiter = re.compile ("(?P<issue>NCAR.+?) [:-] (?P<title>[a-zA-Z].+)") # - for all but manuscripts, which use :
issue_delimiter = re.compile ("(?P<issue>NCAR.+?)[\s]+[:-][\s]+(?P<title>.+)") # - for all but manuscripts, which use :
field_list = globals.webcat_fields
id_field = "recordID"
accessionNum_field = "accessionNum"
description_field = "description"
def __init__ (self, path=None, xml=None):
NCARRec.__init__ (self, path, xml)
def getAccessionNum (self):
return self.getFieldValue (accessionNum_field)
def getPublishers (self):
return self.getFieldValues ("publisher")
def getScientificDivisions (self):
return self.getFieldValues ("scientificDivision")
class LibraryDCRec_v1_0 (NCARRec):
"""
made obsolete (~2/09) when framework changed to contain a single namespace!
we are always writing a new rec, not reading an existing one ...
xsi:schemaLocation="http://www.dlsciences.org/frameworks/library_dc
http://www.dlsciences.org/frameworks/library_dc/1.0/schemas/library_dc.xsd"
"""
rootElementName = "library_dc:record"
schemaUri = "http://www.dlsciences.org/frameworks/library_dc/1.0/schemas/library_dc.xsd"
nameSpaceUri = "http://www.dlsciences.org/frameworks/library_dc"
dcNameSpaceUri = "http://purl.org/dc/elements/1.1/"
field_list = globals.library_dc_fields
id_field = "library_dc:recordID"
url_field = "library_dc:URL"
description_field = "dc:description"
altTitle_field = "library_dc:altTitle"
instName_field = "library_dc:instName"
instDivision_field = "library_dc:instDivision"
def __init__ (self, path=None):
if path:
XmlRecord.__init__ (self, path=path)
else:
self.makeRecord ()
def makeRecord (self):
xml = "<%s xmlns:library_dc='%s' />" % (self.rootElementName, self.nameSpaceUri)
NCARRec.__init__ (self, xml=xml)
self.doc.setAttribute ("xmlns:library_dc", self.nameSpaceUri)
self.doc.setAttribute ("xmlns:dc", self.dcNameSpaceUri)
self.doc.setAttribute ("xmlns:"+self.schema_instance_namespace, \
self.SCHEMA_INSTANCE_URI)
self.setSchemaLocation (self.schemaUri, self.nameSpaceUri)
def getUrl (self):
return self.getFieldValue (self.url_field)
def setUrl (self, value):
self.setFieldValue (self.url_field, value)
def getAltTitle (self):
return self.getFieldValue (self.altTitle_field)
def setAltTitle (self, value):
self.setFieldValue (self.altTitle_field, value)
def getInstName (self):
return self.getFieldValue (self.instName_field)
def setInstName (self, value):
self.setFieldValue (self.instName_field, value)
def getInstDivisions (self):
return self.getFieldValues (self.instDivision_field)
def setInstDivisions (self, value):
self.setFieldValues (self.instDivision_field, value)
def getTitle (self):
return self.getFieldValue ("dc:title")
def getIssue (self):
return self.getFieldValue ("library_dc:issue")
def setIssue (self, val):
return self.setFieldValue ("library_dc:issue", val)
def getContributors (self):
return self.getFieldValues ("dc:contributor")
def getCreators (self):
return self.getFieldValues ("dc:creators")
def LibraryDCRecTester ():
rec = LibraryDCRec_v1_0 ()
rec.setFieldValue ("library_dc:URL", "http://fooberry/index.html")
print "URL: %s" % rec.getFieldValue ("library_dc:URL")
rec.setUrl ("imachanged")
print "URL: %s" % rec.getUrl()
rec.addFieldValues ("dc:subject", ['sub1', 'sub2'])
print rec
rec.addFieldValues ("dc:subject", ['sub3', 'sub4'])
print rec
print "number of dc:subject fields: %d" % rec.numFieldValues ("dc:subject")
print "number of dc:Xsubject fields: %d" % rec.numFieldValues ("dc:Xsubject")
rec.removeField ("dc:subject")
print rec
if __name__ == "__main__":
LibraryDCRecTester ()
| [
"ostwald@ucar.edu"
] | ostwald@ucar.edu |
7d2b7099645047f346ca3482c84e6f3449782ee8 | 767e864a1b1a2722b4952fb5034a776064b2ef64 | /sentry_youtrack/youtrack.py | 392e297142606b6935d89406888fa5f14f6bb367 | [] | no_license | pombredanne/sentry-youtrack | 18403a9c218e65bc044cfa6244f1fe63fd298638 | 1d1b11aeaf63299c8b1aa83a814d708c23d9cb8a | refs/heads/master | 2021-01-22T09:09:55.134392 | 2013-11-04T22:50:08 | 2013-11-04T22:50:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,022 | py | import requests
from BeautifulSoup import BeautifulStoneSoup
class YouTrackError(Exception):
pass
class YouTrackClient(object):
LOGIN_URL = '/rest/user/login'
PROJECT_URL = '/rest/admin/project/<project_id>'
PROJECTS_URL = '/rest/project/all'
CREATE_URL = '/rest/issue'
ISSUES_URL = '/rest/issue/byproject/<project_id>'
COMMAND_URL = '/rest/issue/<issue>/execute'
CUSTOM_FIELD_VALUES = '/rest/admin/customfield/<param_name>/<param_value>'
USER_URL = '/rest/admin/user/<user>'
API_KEY_COOKIE_NAME = 'jetbrains.charisma.main.security.PRINCIPAL'
def __init__(self, url, username=None, password=None, api_key=None):
self.url = url.rstrip('/') if url else ''
if api_key is None:
self._login(username, password)
else:
self.cookies = {self.API_KEY_COOKIE_NAME: api_key}
self.api_key = api_key
def _login(self, username, password):
credentials = {
'login': username,
'password': password
}
url = self.url + self.LOGIN_URL
self._request(url, data=credentials, method='post')
self.cookies = self.response.cookies
self.api_key = self.cookies.get(self.API_KEY_COOKIE_NAME)
def _request(self, url, data=None, params=None, method='get'):
if method not in ['get', 'post']:
raise AttributeError("Invalid method %s" % method)
kwargs = {
'url': url,
'data': data,
'params': params
}
if hasattr(self, 'cookies'):
kwargs['cookies'] = self.cookies
if method == 'get':
self.response = requests.get(**kwargs)
elif method == 'post':
self.response = requests.post(**kwargs)
self.response.raise_for_status()
return BeautifulStoneSoup(self.response.text)
def _get_enumeration(self, soap):
if soap.find('error'):
raise YouTrackError(soap.find('error').string)
return [item.text for item in soap.enumeration]
def get_project_name(self, project_id):
url = self.url + self.PROJECT_URL.replace('<project_id>', project_id)
soap = self._request(url, method='get')
return soap.project['name']
def get_user(self, user):
url = self.url + self.USER_URL.replace('<user>', user)
soap = self._request(url, method='get')
return soap.user
def get_projects(self):
url = self.url + self.PROJECTS_URL
soap = self._request(url, method='get')
return soap.projects
def get_priorities(self):
values = self.get_custom_field_values('bundle', 'Priorities')
return self._get_enumeration(values)
def get_issue_types(self):
values = self.get_custom_field_values('bundle', 'Types')
return self._get_enumeration(values)
def get_custom_field_values(self, name, value):
url = self.url + (self.CUSTOM_FIELD_VALUES
.replace("<param_name>", name)
.replace('<param_value>', value))
response = requests.get(url, cookies=self.cookies)
return BeautifulStoneSoup(response.text)
def get_project_issues(self, project_id, query=None, offset=0, limit=15):
url = self.url + self.ISSUES_URL.replace('<project_id>', project_id)
params = {'max': limit, 'after': offset, 'filter': query}
soap = self._request(url, method='get', params=params)
return soap.issues
def create_issue(self, data):
url = self.url + self.CREATE_URL
soap = self._request(url, data=data, method='post')
return soap.issue
def execute_command(self, issue, command):
url = self.url + self.COMMAND_URL.replace('<issue>', issue)
data = {'command': command}
self._request(url, data=data, method='post')
def add_tags(self, issue, tags):
for tag in tags:
cmd = u'add tag %s' % tag
self.execute_command(issue, cmd)
| [
"adam@bogdal.pl"
] | adam@bogdal.pl |
debadb1000b285c673c11ca3b02952361c6269a6 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/object_init-10.py | d1da68d96efe52d99bed5413918cba60abbfd04d | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | class A(object):
$TypedVar = 42
class B(A):
b:bool = True
def __init__(self:"B"):
print("B")
B()
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
ad0979e6aca66c863e3842d3e0935bfb6eda761d | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/dice-roll-simulation/395113484.py | 4bebf6dda3ade473f97b66109b045114c3d4405d | [] | no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | # title: dice-roll-simulation
# detail: https://leetcode.com/submissions/detail/395113484/
# datetime: Sun Sep 13 22:57:31 2020
# runtime: 120 ms
# memory: 14 MB
class Solution:
def dieSimulator(self, n: int, rollMax: List[int]) -> int:
MOD = 10 ** 9 + 7
dp =collections.deque([[1] * 6 for i in range(max(rollMax) + 1)])
for i in range(2, n + 1):
dp2 = [0] * 6
for j in range(6):
if i - rollMax[j] <= 0:
dp2[j] = sum(dp[-1]) % MOD
elif i - rollMax[j] == 1:
dp2[j] = (sum(dp[-1]) - 1) % MOD
else:
p = dp[-rollMax[j] - 1]
dp2[j] = (sum(dp[-1]) - sum(p) + p[j]) % MOD
dp.popleft()
dp.append(dp2)
return sum(dp[-1]) % MOD
| [
"ljm51689@gmail.com"
] | ljm51689@gmail.com |
714411dc03e2aaedad3968d900b044a60fada680 | 189e14e07571b4d5720f01db73faaaef26ee9712 | /dj/dj/settings.py | e0d57bfd76c00098690ab693bede6126bd0d0bff | [] | no_license | italomaia/mylittlework | d7e936fa3f24f40ea6a95e7ab01a10208036f439 | f6d0aacec46f8c2adb429626fff0532e8939b8b8 | refs/heads/master | 2021-01-19T19:13:48.605376 | 2017-04-10T02:01:10 | 2017-04-10T02:01:10 | 86,644,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,208 | py | """
Django settings for dj project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qug)assem8iz7&z=ayzkh4w((riz*l!s!1%09gz32#&0=0z=bo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static_files"),
]
| [
"italo.maia@gmail.com"
] | italo.maia@gmail.com |
0be35c239f203a478a140f9a2e835f4a4f7ae5d7 | d19c695ab6a3f470412cd90a84c028d555bf7f03 | /Usernames/un.py | 1acf677bee02b702e127c2e5ff7805397db3d215 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | XanderRiga/reddit | 696a38fd0ca3862484645604cf7f0a859235519e | 5ce75bda6a8ccc57d3c01342f502a6d32ce9cb54 | refs/heads/master | 2020-12-06T17:22:18.105205 | 2015-12-21T22:49:52 | 2015-12-21T22:49:52 | 48,395,160 | 0 | 0 | null | 2015-12-21T21:31:07 | 2015-12-21T21:31:06 | null | UTF-8 | Python | false | false | 22,402 | py | #/u/GoldenSights
import datetime
import praw
import random
import requests
import sqlite3
import string
import sys
import time
import traceback
sql = sqlite3.connect('C:/git/reddit/usernames/un.db')
cur = sql.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS users(
idint INT,
idstr TEXT,
created INT,
human TEXT,
name TEXT,
link_karma INT,
comment_karma INT,
total_karma INT,
available INT,
lastscan INT)
''')
cur.execute('CREATE INDEX IF NOT EXISTS userindex ON users(idint)')
cur.execute('CREATE INDEX IF NOT EXISTS nameindex ON users(lowername)')
sql.commit()
# 0 - idint
# 1 - idstr
# 2 - created
# 3 - human
# 4 - name
# 5 - link karma
# 6 - comment karma
# 7 - total karma
# 8 - available
# 9 - lastscan
SQL_COLUMNCOUNT = 11
SQL_IDINT = 0
SQL_IDSTR = 1
SQL_CREATED = 2
SQL_HUMAN = 3
SQL_NAME = 4
SQL_LINK_KARMA = 5
SQL_COMMENT_KARMA = 6
SQL_TOTAL_KARMA = 7
SQL_AVAILABLE = 8
SQL_LASTSCAN = 9
SQL_LOWERNAME = 10
USERAGENT = '''
/u/GoldenSights Usernames data collection:
Gathering the creation dates of user accounts for visualization.
More at https://github.com/voussoir/reddit/tree/master/Usernames
'''.replace('\n', ' ')
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
try:
import bot
#USERAGENT = bot.aG
APP_ID = bot.oG_id
APP_SECRET = bot.oG_secret
APP_URI = bot.oG_uri
APP_REFRESH = bot.oG_scopes['all']
except ImportError:
pass
print('Logging in.')
# http://redd.it/3cm1p8
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
AVAILABILITY = {True:'available', False:'unavailable', 'available':1, 'unavailable':0}
HEADER_FULL = ' ID CREATED NAME LINK COMMENT TOTAL LAST SCANNED'
HEADER_BRIEF = ' LAST SCANNED | NAME'
MEMBERFORMAT_FULL = '%s %s %s %s %s (%s) | %s'
MEMBERFORMAT_BRIEF = '%s | %s'
MIN_LASTSCAN_DIFF = 86400 * 365
# Don't rescan a name if we scanned it this many days ago
VALID_CHARS = string.ascii_letters + string.digits + '_-'
def allpossiblefromset(characters, length=None, minlength=None, maxlength=None):
'''
Given an iterable of characters, return a generator that creates every
permutation of length `length`.
If `minlength` and `maxlength` are both provided, all values of intermediate
lengths will be generated
'''
if not (minlength is None or maxlength is None):
for x in range(minlength, maxlength+1):
for item in allpossiblefromset(characters, x):
yield item
elif length is None:
raise ValueError('`length` must be provided if you arent using the min/max')
else:
endingpoint = len(characters) ** length
characters = ''.join(sorted(list(set(characters))))
for permutation in range(endingpoint):
permutation = base36encode(permutation, alphabet=characters)
l = len(permutation)
if l < length:
permutation = (characters[0] * (length-l)) + permutation
yield permutation
def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def b36(i):
if type(i) == int:
return base36encode(i)
if type(i) == str:
return base36decode(i)
def check_old(available=None, threshold=86400):
'''
Update names in ascending order of their last scan
available = False : do not include available names
None : do include available names
True : only include available names
threshold = how long ago the lastscan must be.
'''
now = getnow()
threshold = now - threshold
assert available in (False, None, True)
if available == False:
query = 'SELECT name FROM users WHERE available = 0 AND lastscan < ? ORDER BY lastscan ASC'
elif available == None:
query = 'SELECT name FROM users WHERE lastscan < ? ORDER BY lastscan ASC'
elif available == True:
query = 'SELECT name FROM users WHERE available = 1 AND lastscan < ? ORDER BY lastscan ASC'
cur.execute(query, [threshold])
availables = cur.fetchall()
for item in availables:
process(item, quiet=True, noskip=True)
def commapadding(s, spacer, spaced, left=True, forcestring=False):
'''
Given a number 's', make it comma-delimted and then
pad it on the left or right using character 'spacer'
so the whole string is of length 'spaced'
Providing a non-numerical string will skip straight
to padding
'''
if not forcestring:
try:
s = int(s)
s = '{0:,}'.format(s)
except:
pass
spacer = spacer * (spaced - len(s))
if left:
return spacer + s
return s + spacer
def count(validonly=False):
if validonly:
cur.execute('SELECT COUNT(*) FROM users WHERE idint IS NOT NULL AND available == 0')
else:
cur.execute('SELECT COUNT(*) FROM users')
return cur.fetchone()[0]
def everyoneyouveeverinteractedwith(username):
'''
Not because I should have, but because I was able to.
'''
max_todo_length = 2 ** 10
max_done_length = 2 ** 13
def find_interactions(username2):
if username2 in done:
return
process(username2, quiet=True)
done.add(username2)
while len(done) >= max_done_length:
done.pop()
if len(todo) >= max_todo_length-100:
return
user = r.get_redditor(username2)
comments = user.get_comments(limit=max_todo_length)
for comment in comments:
author = comment.link_author.lower()
if len(todo) > max_todo_length:
break
if author == '[deleted]':
continue
if author in done:
continue
if author in todo:
continue
print('%s interacted with %s' % (username2, author))
todo.add(author)
todo = set()
done = set()
todo.add(username)
l = 1
while l > 0:
find_interactions(todo.pop())
l = len(todo)
print('Have %d names\r' % l, end='')
sys.stdout.flush()
def execit(*args, **kwargs):
'''
Allows another module to do stuff here using local names instead of qual names.
'''
exec(*args, **kwargs)
def fetchgenerator():
'''
Create an generator from cur fetches so I don't
have to use while loops for everything
'''
while True:
fetch = cur.fetchone()
if fetch is None:
break
yield fetch
def fetchwriter(outfile, spacer1=' ', spacer2=None, brief=False):
'''
Write items from the current sql query to the specified file.
If two spacers are provided, it will flip-flop between them
on alternating lines to help readability.
'''
flipflop = True
for item in fetchgenerator():
spacer = spacer1 if flipflop else spacer2
if brief:
item = memberformat_brief(item, spacer)
else:
item = memberformat_full(item, spacer)
print(item, file=outfile)
if spacer2 is not None:
flipflop = not flipflop
def find(name, doreturn=False):
'''
Print the details of a username.
'''
f = getentry(name=name)
if f:
if doreturn:
return f
print_message(f)
return None
def get_from_hot(sr, limit=None, submissions=True, comments=False, returnnames=False):
'''
Shortcut for get_from_listing, using /hot
'''
listfunction = praw.objects.Subreddit.get_hot
return get_from_listing(sr, limit, listfunction, submissions, comments, returnnames)
def get_from_listing(sr, limit, listfunction, submissions=True, comments=True, returnnames=False):
'''
Get submission listings using one of PRAW's get methods
and process those usernames
`listfunction` would be praw.objects.Subreddit.get_new for example
'''
subreddit = r.get_subreddit(sr, fetch=sr != 'all')
if limit is None:
limit = 1000
authors = set()
if submissions is True:
print('/r/%s, %d submissions' % (subreddit.display_name, limit))
subreddit.lf = listfunction
for item in subreddit.lf(subreddit, limit=limit):
if item.author is not None:
authors.add(item.author.name)
if comments is True:
print('/r/%s, %d comments' % (subreddit.display_name, limit))
for item in subreddit.get_comments(limit=limit):
if item.author is not None:
authors.add(item.author.name)
if returnnames is True:
return authors
try:
process(authors)
except KeyboardInterrupt:
sql.commit()
raise
def get_from_new(sr, limit=None, submissions=True, comments=True, returnnames=False):
'''
Shortcut for get_from_listing, using /new
'''
listfunction = praw.objects.Subreddit.get_new
return get_from_listing(sr, limit, listfunction, submissions, comments, returnnames)
def get_from_top(sr, limit=None, submissions=True, comments=False, returnnames=False):
'''
Shortcut for get_from_listing, using /top?t=all
'''
listfunction = praw.objects.Subreddit.get_top_from_all
return get_from_listing(sr, limit, listfunction, submissions, comments, returnnames)
def getentry(**kwargs):
if len(kwargs) != 1:
raise Exception("Only 1 argument please")
kw = list(kwargs.keys())[0]
if kw == 'idint':
cur.execute('SELECT * FROM users WHERE idint=?', [kwargs[kw]])
elif kw == 'idstr':
cur.execute('SELECT * FROM users WHERE idstr=?', [kwargs[kw]])
elif kw == 'name':
cur.execute('SELECT * FROM users WHERE lowername=?', [kwargs[kw].lower()])
else:
return None
return cur.fetchone()
def getnow(timestamp=True):
now = datetime.datetime.now(datetime.timezone.utc)
if timestamp:
return now.timestamp()
return now
def human(timestamp):
day = datetime.datetime.utcfromtimestamp(timestamp)
human = datetime.datetime.strftime(day, "%b %d %Y %H:%M:%S UTC")
return human
def idlenew(subreddit='all', sleepy=15):
'''
Infinitely grab the /new queue and process names, ignoring any
exceptions. Great for processing while AFK.
'''
while True:
try:
get_from_new(subreddit, 100)
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
time.sleep(sleepy)
def load_textfile(filename):
'''
Returns list of lines.
See also `save_textfile`.
'''
f = open(filename, 'r')
lines = [x.strip() for x in f.readlines()]
f.close()
return lines
def memberformat_brief(data, spacer='.'):
'''
Shorter version of memberformat which I'm using for the "available" list.
'''
name = data[SQL_NAME]
lastscan = data[SQL_LASTSCAN]
lastscan = human(lastscan)
out = MEMBERFORMAT_BRIEF % (lastscan, name)
return out
def memberformat_full(data, spacer='.'):
'''
Given a data list, create a string that will
become a single row in one of the show files.
'''
idstr = data[SQL_IDSTR]
idstr = commapadding(idstr, spacer, 5, forcestring=True)
# Usernames are maximum of 20 chars
name = data[SQL_NAME]
name += spacer*(20 - len(name))
thuman = data[SQL_HUMAN]
if thuman is None:
thuman = ' '*24
link_karma = data[SQL_LINK_KARMA]
comment_karma = data[SQL_COMMENT_KARMA]
total_karma = data[SQL_TOTAL_KARMA]
if link_karma is None:
link_karma = commapadding('None', spacer, 9)
comment_karma = commapadding('None', spacer, 9)
total_karma = commapadding('None', spacer, 10)
else:
link_karma = commapadding(link_karma, spacer, 9)
comment_karma = commapadding(comment_karma, spacer, 9)
total_karma = commapadding(total_karma, spacer, 10)
lastscan = data[SQL_LASTSCAN]
lastscan = human(lastscan)
out = MEMBERFORMAT_FULL % (
idstr,
thuman,
name,
link_karma,
comment_karma,
total_karma,
lastscan)
return out
def popgenerator(x):
'''
Create a generator which whittles away at the input
list until there are no items left.
This destroys the input list in-place.
'''
while len(x) > 0:
yield x.pop()
def process(users, quiet=False, knownid='', noskip=False):
'''
Fetch the /u/ page for a user or list of users
users : A list of strings, each representing a username. Since reddit
usernames must be 3 - 20 characters and only contain
alphanumeric + "_-", any improper strings will be removed.
quiet : Silences the "x old" report at the end
knownid : If you're processing a user which does not exist, but you know
what their user ID was supposed to be, this will at least allow
you to flesh out the database entry a little better.
noskip : Do not skip usernames which are already in the database.
'''
olds = 0
if isinstance(users, list):
users = list(set(users))
if isinstance(users, str):
users = [users]
# I don't want to import types.GeneratorType just for one isinstance
if type(users).__name__ == 'generator' or len(users) > 1:
knownid=''
users = userify_list(users, noskip=noskip, quiet=quiet)
current = 0
for user in users:
current += 1
data = [None] * SQL_COLUMNCOUNT
data[SQL_LASTSCAN] = int(getnow())
if isinstance(user, list):
# This happens when we receive NotFound. [name, availability]
if knownid != '':
data[SQL_IDINT] = b36(knownid)
data[SQL_IDSTR] = knownid
data[SQL_NAME] = user[0]
data[SQL_AVAILABLE] = AVAILABILITY[user[1]]
else:
# We have a Redditor object.
h = human(user.created_utc)
data[SQL_IDINT] = b36(user.id)
data[SQL_IDSTR] = user.id
data[SQL_CREATED] = user.created_utc
data[SQL_HUMAN] = h
data[SQL_NAME] = user.name
data[SQL_LINK_KARMA] = user.link_karma
data[SQL_COMMENT_KARMA] = user.comment_karma
data[SQL_TOTAL_KARMA] = user.comment_karma + user.link_karma
data[SQL_AVAILABLE] = 0
data[SQL_LOWERNAME] = data[SQL_NAME].lower()
x = smartinsert(data, '%04d' % current)
if x is False:
olds += 1
if quiet is False:
print('%d old' % olds)
p = process
def processid(idnum, ranger=1):
'''
Do an author_fullname search in an attempt to find a user by their ID.
This is not reliable if the user has no public submissions.
'''
idnum = idnum.split('_')[-1]
base = b36(idnum)
for x in range(ranger):
idnum = x + base
exists = getentry(idint=idnum)
if exists is not None:
print('Skipping %s : %s' % (b36(idnum), exists[SQL_NAME]))
continue
idnum = 't2_' + b36(idnum)
idnum = idnum.lower()
print('%s - ' % idnum, end='')
sys.stdout.flush()
search = list(r.search('author_fullname:%s' % idnum))
if len(search) > 0:
item = search[0].author.name
process(item, quiet=True, knownid=idnum[3:])
else:
print('No idea.')
pid = processid
def print_message(data, printprefix=''):
if data[SQL_HUMAN] is not None:
print('%s %s : %s : %s : %d : %d' % (
printprefix,
data[SQL_IDSTR].rjust(5, ' '),
data[SQL_HUMAN],
data[SQL_NAME],
data[SQL_LINK_KARMA],
data[SQL_COMMENT_KARMA]))
else:
statement = 'available' if data[SQL_AVAILABLE] is 1 else 'unavailable'
statement = statement.rjust(32, ' ')
print('%s %s : %s' % (printprefix, statement, data[SQL_NAME]))
def save_textfile(filename, lines):
'''
Write items of list as lines in file.
See also `load_textfile`.
'''
f = open(filename, 'w')
for x in lines:
print(x, file=f)
f.close()
def show():
'''
Create a bunch of text files that nobody will read
'''
file_time = open('show\\time.txt', 'w')
file_name = open('show\\name.txt', 'w')
file_karma_total = open('show\\karma_total.txt', 'w')
#file_karma_link = open('show\\karma_link.txt', 'w')
#file_karma_comment = open('show\\karma_comment.txt', 'w')
file_available = open('show\\available.txt', 'w')
file_stats = open('show\\stats.txt', 'w')
file_readme = open('README.md', 'r')
totalitems = count(validonly=False)
validitems = count(validonly=True)
print(totalitems, validitems)
print('Updating readme')
readmelines = file_readme.readlines()
file_readme.close()
readmelines[3] = '#####{0:,} accounts\n'.format(validitems)
readmelines = ''.join(readmelines)
file_readme = open('README.md', 'w')
file_readme.write(readmelines)
file_readme.close()
print('Writing time file.')
print(HEADER_FULL, file=file_time)
cur.execute('SELECT * FROM users WHERE idint IS NOT NULL AND created IS NOT NULL ORDER BY created ASC')
fetchwriter(file_time)
file_time.close()
print('Writing name file.')
print(HEADER_FULL, file=file_name)
cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY lowername ASC')
fetchwriter(file_name)
file_name.close()
print('Writing karma total file.')
print(HEADER_FULL, file=file_karma_total)
cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY total_karma DESC, lowername ASC')
fetchwriter(file_karma_total)
file_karma_total.close()
#print('Writing karma link file.')
#print(HEADER_FULL, file=file_karma_link)
#cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY link_karma DESC, lowername ASC')
#fetchwriter(file_karma_link)
#file_karma_link.close()
#print('Writing karma comment file.')
#print(HEADER_FULL, file=file_karma_comment)
#cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY comment_karma DESC, lowername ASC')
#fetchwriter(file_karma_comment)
#file_karma_comment.close()
print('Writing available')
print(HEADER_BRIEF, file=file_available)
cur.execute('SELECT * FROM users WHERE available == 1 AND LENGTH(name) > 3 ORDER BY lowername ASC')
fetchwriter(file_available, spacer1=' ', brief=True)
file_available.close()
def smartinsert(data, printprefix=''):
'''
Originally, all queries were based on idint, but this caused problems
when accounts were deleted / banned, because it wasn't possible to
sql-update without knowing the ID.
'''
print_message(data, printprefix)
exists_in_db = (getentry(name=data[SQL_NAME].lower()) != None)
if exists_in_db:
isnew = False
data = [
data[SQL_IDINT],
data[SQL_IDSTR],
data[SQL_CREATED],
data[SQL_HUMAN],
data[SQL_LINK_KARMA],
data[SQL_COMMENT_KARMA],
data[SQL_TOTAL_KARMA],
data[SQL_AVAILABLE],
data[SQL_LASTSCAN],
data[SQL_NAME],
data[SQL_NAME].lower()]
# coalesce allows us to fallback on the existing values
# if the given values are null, to avoid erasing data about users
# whose accounts are now deleted.
cur.execute('UPDATE users SET \
idint = coalesce(?, idint), \
idstr = coalesce(?, idstr), \
created = coalesce(?, created), \
human = coalesce(?, human), \
link_karma = coalesce(?, link_karma), \
comment_karma = coalesce(?, comment_karma), \
total_karma = coalesce(?, total_karma), \
available = coalesce(?, available), \
lastscan = coalesce(?, lastscan), \
name = coalesce(?, name) \
WHERE lowername=?', data)
else:
isnew = True
cur.execute('INSERT INTO users VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', data)
sql.commit()
return isnew
def userify_list(users, noskip=False, quiet=False):
if quiet is False:
if hasattr(users, '__len__'):
print('Processing %d unique names' % len(users))
for username in users:
if isinstance(username, str):
if len(username) < 3 or len(username) > 20:
print('%s : Invalid length of %d' % (username, len(username)))
continue
if not all(c in VALID_CHARS for c in username):
print('%s : Contains invalid characters' % username)
continue
elif isinstance(username, praw.objects.Redditor):
username = username.name.lower()
else:
print('Don\'t know what to do with %s' % username)
existing_entry = getentry(name=username)
if existing_entry is not None:
lastscan = existing_entry[SQL_LASTSCAN]
should_rescan = (getnow() - lastscan) > MIN_LASTSCAN_DIFF
if should_rescan is False and noskip is False:
prefix = ' ' * 29
appendix = '(available)' if existing_entry[SQL_AVAILABLE] else ''
print('%sskipping : %s %s' % (prefix, username, appendix))
continue
try:
user = r.get_redditor(username, fetch=True)
yield user
except praw.errors.NotFound:
availability = r.is_username_available(username)
availability = AVAILABILITY[availability]
yield [username, availability]
| [
"edalool@yahoo.com"
] | edalool@yahoo.com |
18ef80dd2de67a6233f311d3e3f208441538b880 | d737fa49e2a7af29bdbe5a892bce2bc7807a567c | /software/qt_examples/src/pyqt-official/painting/painterpaths.py | 71c45fc5a6d7b0abe3471e2fbfd81fca085adbb7 | [
"MIT",
"CC-BY-NC-SA-4.0",
"GPL-1.0-or-later",
"GPL-3.0-only"
] | permissive | TG-Techie/CASPER | ec47dfbfd6c3a668739ff4d707572e0b853518b4 | 2575d3d35e7dbbd7f78110864e659e582c6f3c2e | refs/heads/master | 2020-12-19T12:43:53.825964 | 2020-01-23T17:24:04 | 2020-01-23T17:24:04 | 235,736,872 | 0 | 1 | MIT | 2020-01-23T17:09:19 | 2020-01-23T06:29:10 | Python | UTF-8 | Python | false | false | 11,019 | py | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from math import cos, pi, sin
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import (QBrush, QColor, QFont, QLinearGradient, QPainter,
QPainterPath, QPalette, QPen)
from PyQt5.QtWidgets import (QApplication, QComboBox, QGridLayout, QLabel,
QSizePolicy, QSpinBox, QWidget)
class RenderArea(QWidget):
def __init__(self, path, parent=None):
super(RenderArea, self).__init__(parent)
self.path = path
self.penWidth = 1
self.rotationAngle = 0
self.setBackgroundRole(QPalette.Base)
def minimumSizeHint(self):
return QSize(50, 50)
def sizeHint(self):
return QSize(100, 100)
def setFillRule(self, rule):
self.path.setFillRule(rule)
self.update()
def setFillGradient(self, color1, color2):
self.fillColor1 = color1
self.fillColor2 = color2
self.update()
def setPenWidth(self, width):
self.penWidth = width
self.update()
def setPenColor(self, color):
self.penColor = color
self.update()
def setRotationAngle(self, degrees):
self.rotationAngle = degrees
self.update()
def paintEvent(self, event):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.scale(self.width() / 100.0, self.height() / 100.0)
painter.translate(50.0, 50.0)
painter.rotate(-self.rotationAngle)
painter.translate(-50.0, -50.0)
painter.setPen(
QPen(self.penColor, self.penWidth, Qt.SolidLine, Qt.RoundCap,
Qt.RoundJoin))
gradient = QLinearGradient(0, 0, 0, 100)
gradient.setColorAt(0.0, self.fillColor1)
gradient.setColorAt(1.0, self.fillColor2)
painter.setBrush(QBrush(gradient))
painter.drawPath(self.path)
class Window(QWidget):
NumRenderAreas = 9
def __init__(self):
super(Window, self).__init__()
rectPath = QPainterPath()
rectPath.moveTo(20.0, 30.0)
rectPath.lineTo(80.0, 30.0)
rectPath.lineTo(80.0, 70.0)
rectPath.lineTo(20.0, 70.0)
rectPath.closeSubpath()
roundRectPath = QPainterPath()
roundRectPath.moveTo(80.0, 35.0)
roundRectPath.arcTo(70.0, 30.0, 10.0, 10.0, 0.0, 90.0)
roundRectPath.lineTo(25.0, 30.0)
roundRectPath.arcTo(20.0, 30.0, 10.0, 10.0, 90.0, 90.0)
roundRectPath.lineTo(20.0, 65.0)
roundRectPath.arcTo(20.0, 60.0, 10.0, 10.0, 180.0, 90.0)
roundRectPath.lineTo(75.0, 70.0)
roundRectPath.arcTo(70.0, 60.0, 10.0, 10.0, 270.0, 90.0)
roundRectPath.closeSubpath()
ellipsePath = QPainterPath()
ellipsePath.moveTo(80.0, 50.0)
ellipsePath.arcTo(20.0, 30.0, 60.0, 40.0, 0.0, 360.0)
piePath = QPainterPath()
piePath.moveTo(50.0, 50.0)
piePath.lineTo(65.0, 32.6795)
piePath.arcTo(20.0, 30.0, 60.0, 40.0, 60.0, 240.0)
piePath.closeSubpath()
polygonPath = QPainterPath()
polygonPath.moveTo(10.0, 80.0)
polygonPath.lineTo(20.0, 10.0)
polygonPath.lineTo(80.0, 30.0)
polygonPath.lineTo(90.0, 70.0)
polygonPath.closeSubpath()
groupPath = QPainterPath()
groupPath.moveTo(60.0, 40.0)
groupPath.arcTo(20.0, 20.0, 40.0, 40.0, 0.0, 360.0)
groupPath.moveTo(40.0, 40.0)
groupPath.lineTo(40.0, 80.0)
groupPath.lineTo(80.0, 80.0)
groupPath.lineTo(80.0, 40.0)
groupPath.closeSubpath()
textPath = QPainterPath()
timesFont = QFont('Times', 50)
timesFont.setStyleStrategy(QFont.ForceOutline)
textPath.addText(10, 70, timesFont, "Qt")
bezierPath = QPainterPath()
bezierPath.moveTo(20, 30)
bezierPath.cubicTo(80, 0, 50, 50, 80, 80)
starPath = QPainterPath()
starPath.moveTo(90, 50)
for i in range(1, 5):
starPath.lineTo(50 + 40 * cos(0.8 * i * pi),
50 + 40 * sin(0.8 * i * pi))
starPath.closeSubpath()
self.renderAreas = [RenderArea(rectPath), RenderArea(roundRectPath),
RenderArea(ellipsePath), RenderArea(piePath),
RenderArea(polygonPath), RenderArea(groupPath),
RenderArea(textPath), RenderArea(bezierPath),
RenderArea(starPath)]
assert len(self.renderAreas) == 9
self.fillRuleComboBox = QComboBox()
self.fillRuleComboBox.addItem("Odd Even", Qt.OddEvenFill)
self.fillRuleComboBox.addItem("Winding", Qt.WindingFill)
fillRuleLabel = QLabel("Fill &Rule:")
fillRuleLabel.setBuddy(self.fillRuleComboBox)
self.fillColor1ComboBox = QComboBox()
self.populateWithColors(self.fillColor1ComboBox)
self.fillColor1ComboBox.setCurrentIndex(
self.fillColor1ComboBox.findText("mediumslateblue"))
self.fillColor2ComboBox = QComboBox()
self.populateWithColors(self.fillColor2ComboBox)
self.fillColor2ComboBox.setCurrentIndex(
self.fillColor2ComboBox.findText("cornsilk"))
fillGradientLabel = QLabel("&Fill Gradient:")
fillGradientLabel.setBuddy(self.fillColor1ComboBox)
fillToLabel = QLabel("to")
fillToLabel.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.penWidthSpinBox = QSpinBox()
self.penWidthSpinBox.setRange(0, 20)
penWidthLabel = QLabel("&Pen Width:")
penWidthLabel.setBuddy(self.penWidthSpinBox)
self.penColorComboBox = QComboBox()
self.populateWithColors(self.penColorComboBox)
self.penColorComboBox.setCurrentIndex(
self.penColorComboBox.findText('darkslateblue'))
penColorLabel = QLabel("Pen &Color:")
penColorLabel.setBuddy(self.penColorComboBox)
self.rotationAngleSpinBox = QSpinBox()
self.rotationAngleSpinBox.setRange(0, 359)
self.rotationAngleSpinBox.setWrapping(True)
self.rotationAngleSpinBox.setSuffix(u'\N{DEGREE SIGN}')
rotationAngleLabel = QLabel("&Rotation Angle:")
rotationAngleLabel.setBuddy(self.rotationAngleSpinBox)
self.fillRuleComboBox.activated.connect(self.fillRuleChanged)
self.fillColor1ComboBox.activated.connect(self.fillGradientChanged)
self.fillColor2ComboBox.activated.connect(self.fillGradientChanged)
self.penColorComboBox.activated.connect(self.penColorChanged)
for i in range(Window.NumRenderAreas):
self.penWidthSpinBox.valueChanged.connect(self.renderAreas[i].setPenWidth)
self.rotationAngleSpinBox.valueChanged.connect(self.renderAreas[i].setRotationAngle)
topLayout = QGridLayout()
for i in range(Window.NumRenderAreas):
topLayout.addWidget(self.renderAreas[i], i / 3, i % 3)
mainLayout = QGridLayout()
mainLayout.addLayout(topLayout, 0, 0, 1, 4)
mainLayout.addWidget(fillRuleLabel, 1, 0)
mainLayout.addWidget(self.fillRuleComboBox, 1, 1, 1, 3)
mainLayout.addWidget(fillGradientLabel, 2, 0)
mainLayout.addWidget(self.fillColor1ComboBox, 2, 1)
mainLayout.addWidget(fillToLabel, 2, 2)
mainLayout.addWidget(self.fillColor2ComboBox, 2, 3)
mainLayout.addWidget(penWidthLabel, 3, 0)
mainLayout.addWidget(self.penWidthSpinBox, 3, 1, 1, 3)
mainLayout.addWidget(penColorLabel, 4, 0)
mainLayout.addWidget(self.penColorComboBox, 4, 1, 1, 3)
mainLayout.addWidget(rotationAngleLabel, 5, 0)
mainLayout.addWidget(self.rotationAngleSpinBox, 5, 1, 1, 3)
self.setLayout(mainLayout)
self.fillRuleChanged()
self.fillGradientChanged()
self.penColorChanged()
self.penWidthSpinBox.setValue(2)
self.setWindowTitle("Painter Paths")
def fillRuleChanged(self):
rule = Qt.FillRule(self.currentItemData(self.fillRuleComboBox))
for i in range(Window.NumRenderAreas):
self.renderAreas[i].setFillRule(rule)
def fillGradientChanged(self):
color1 = QColor(self.currentItemData(self.fillColor1ComboBox))
color2 = QColor(self.currentItemData(self.fillColor2ComboBox))
for i in range(Window.NumRenderAreas):
self.renderAreas[i].setFillGradient(color1, color2)
def penColorChanged(self):
color = QColor(self.currentItemData(self.penColorComboBox))
for i in range(Window.NumRenderAreas):
self.renderAreas[i].setPenColor(color)
def populateWithColors(self, comboBox):
colorNames = QColor.colorNames()
for name in colorNames:
comboBox.addItem(name, name)
def currentItemData(self, comboBox):
return comboBox.itemData(comboBox.currentIndex())
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| [
"TGTechie01@gmail.com"
] | TGTechie01@gmail.com |
1f1772421914f015926ea0d471a316d3cdc7ee23 | 766da3ffcbd26e7f58d711f5b0e7312cb365e9fb | /framework/utils/time_meter.py | d1d9c26db12e5b47cb70fb745317ca636bd3c553 | [
"MIT"
] | permissive | RobertCsordas/ndr | 1277b353eb61267e023b671072730bdc7e779ca5 | da20530dfb4336deddfbe5e79d62e72d1dc2580e | refs/heads/master | 2023-09-02T22:38:57.601098 | 2021-11-19T09:52:23 | 2021-11-19T09:52:23 | 414,588,414 | 20 | 4 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | import time
class ElapsedTimeMeter:
def __init__(self):
self.reset()
def start(self):
self.start_time = time.time()
def _curr_timer(self) -> float:
if self.start_time is None:
return 0
return time.time() - self.start_time
def stop(self):
self.sum += self._curr_timer()
self.start_time = None
def get(self, reset=False) -> float:
res = self.sum + self._curr_timer()
if reset:
self.reset()
return res
def reset(self):
self.start_time = None
self.sum = 0
def __enter__(self):
assert self.start_time is None
self.start()
def __exit__(self, *args):
self.stop()
| [
"xdever@gmail.com"
] | xdever@gmail.com |
e8eab1a20e20aaa2466c43899cd2dbc957cfe1d7 | e3afb1720da16d9437b58ee3839e6184f521d9ee | /05_POST/main.py | 0a316d92d7b61e1ce6515e37c63d58b583fe4ed9 | [] | no_license | jaisenbe58r/APIs_Python | 1517d0bafa7a97bd460292514784105b9b005529 | afae603b2a2df86e65b06b9967c4960835767ae5 | refs/heads/master | 2021-04-11T12:43:16.883269 | 2020-03-21T17:05:26 | 2020-03-21T17:05:26 | 249,022,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | import requests
import os
import json
if __name__ == '__main__':
url = "https://httpbin.org/post"
payload = {'nombre':'Jaime', 'curso':'python', 'nivel':'intermedio'}
response = requests.post(url, data = json.dumps(payload))
if response.status_code == 200:
print(response.content) | [
"jsendra@autis.es"
] | jsendra@autis.es |
1b68bf740ed1005e4796f4811ed3a0fb17945fc6 | f8bbdfb112618136fc4adccb03ce25fbfc48bff5 | /panel/module/management_ranking/CustomProcesses/ScoreCompilingProcess.py | 7f3e9d1df3d356a08670fc04958730bb997eec4e | [] | no_license | lazypanda10117/CICSA-Ranking-Platform | 160973987b533ede6e0b94af29b5bc85646b2bc0 | d5f6ac64a1f85c3333c71a7d81749b49145b9a16 | refs/heads/master | 2022-12-09T23:21:28.649252 | 2020-04-28T22:53:07 | 2020-04-28T22:53:07 | 133,093,367 | 3 | 2 | null | 2021-09-22T17:51:39 | 2018-05-11T22:14:01 | Python | UTF-8 | Python | false | false | 1,607 | py | from django.shortcuts import redirect
from django.shortcuts import reverse
from misc.CustomFunctions import RequestFunctions
from api.model_api import EventAPI
from api.model_api import SummaryAPI
from panel.module.base.block.CustomProcesses import AbstractBaseProcess
class ScoreCompilingProcess(AbstractBaseProcess):
def process(self):
post_dict = dict(self.request.POST)
event_id = int(self.param["id"])
related_summaries = SummaryAPI(self.request).filterSelf(summary_event_parent=event_id)
for i in range(1, int(len(post_dict)/4)+1):
school_id = int(RequestFunctions.getSingleRequestObj(post_dict, 'school_id_' + str(i)))
score = int(RequestFunctions.getSingleRequestObj(post_dict, 'score_' + str(i)))
ranking = int(RequestFunctions.getSingleRequestObj(post_dict, 'ranking_' + str(i)))
override_ranking = int(RequestFunctions.getSingleRequestObj(post_dict, 'override_ranking_' + str(i)))
summary_id = related_summaries.get(summary_event_school=school_id).id
result = dict(ranking=ranking, override_ranking=override_ranking, race_score=score)
SummaryAPI(self.request).updateSummaryResult(summary_id, result)
EventAPI(self.request).updateEventStatus(event_id, 'done')
return redirect(
reverse(
'panel.module.management_ranking.view_dispatch_param',
args=['activity', event_id]
)
)
def parseParams(self, param):
super().parseMatch('\d+')
param = dict(id=param)
return param
| [
"jeffreykam0415@gmail.com"
] | jeffreykam0415@gmail.com |
1a58f3399d6440b08b067dfb5c463b434e8e21a5 | 6a423fba995b1106086998477eb2bbd1953d3e70 | /js_events/cms_menus.py | 8bf76b09548418fb76b73807dc14013811acb02c | [] | no_license | compoundpartners/js-events | c94addf5c3d4440ed3d170b1232d753120b92262 | 3d2798c6e197cce96d246305642fed1002ce67f7 | refs/heads/master | 2023-08-09T05:52:03.545468 | 2023-07-24T15:06:36 | 2023-07-24T15:06:36 | 170,514,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,773 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from django.core.urlresolvers import NoReverseMatch
except ImportError:
# Django 2.0
from django.urls import NoReverseMatch
from django.utils.translation import (
get_language_from_request,
ugettext_lazy as _,
)
from cms.menu_bases import CMSAttachMenu
from cms.apphook_pool import apphook_pool
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
from .models import Event
class EventsMenu(CMSAttachMenu):
name = _('Events Menu')
def get_queryset(self, request):
"""Returns base queryset with support for preview-mode."""
queryset = Event.objects
if not (request.toolbar and request.toolbar.edit_mode_active):
queryset = queryset.published()
return queryset
def get_nodes(self, request):
nodes = []
language = get_language_from_request(request, check_path=True)
events = self.get_queryset(request).active_translations(language)
if hasattr(self, 'instance') and self.instance:
app = apphook_pool.get_apphook(self.instance.application_urls)
if app:
config = app.get_config(self.instance.application_namespace)
if config:
events = events.filter(app_config=config)
for event in events:
try:
url = event.get_absolute_url(language=language)
except NoReverseMatch:
url = None
if url:
node = NavigationNode(event.safe_translation_getter(
'title', language_code=language), url, event.pk)
nodes.append(node)
return nodes
menu_pool.register_menu(EventsMenu)
| [
"evgeny.dmi3ev@gmail.com"
] | evgeny.dmi3ev@gmail.com |
c5af36d24bce66eace96ce089931f566d69ce2bc | 7860d9fba242d9bdcb7c06c32ee4064e4a7fa2f1 | /litex_boards/platforms/trenz_max1000.py | 9772cf7130da1dc27fbf2845ca2f848cf350ffce | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | litex-hub/litex-boards | ef1f200fd6d34c96621f4efa094ede874f4c34ab | b92c96b3a445fde31037f593a40fe621f85cb58c | refs/heads/master | 2023-09-03T15:09:11.198560 | 2023-08-30T15:22:11 | 2023-08-30T15:22:11 | 191,191,221 | 291 | 283 | BSD-2-Clause | 2023-09-03T20:32:58 | 2019-06-10T15:09:10 | Python | UTF-8 | Python | false | false | 4,043 | py | #
# This file is part of LiteX-Boards.
#
# Copyright (c) 2019-2021 Antti Lukats <antti.lukats@gmail.com>
# SPDX-License-Identifier: BSD-2-Clause
#
# http://trenz.org/max1000-info
from litex.build.generic_platform import *
from litex.build.altera import AlteraPlatform
from litex.build.altera.programmer import USBBlaster
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk12", 0, Pins("H6"), IOStandard("3.3-V LVTTL")),
# Leds
("user_led", 0, Pins("A8"), IOStandard("3.3-V LVTTL")),
("user_led", 1, Pins("A9"), IOStandard("3.3-V LVTTL")),
("user_led", 2, Pins("A11"), IOStandard("3.3-V LVTTL")),
("user_led", 3, Pins("A10"), IOStandard("3.3-V LVTTL")),
("user_led", 4, Pins("B10"), IOStandard("3.3-V LVTTL")),
("user_led", 5, Pins("C9"), IOStandard("3.3-V LVTTL")),
("user_led", 6, Pins("C10"), IOStandard("3.3-V LVTTL")),
("user_led", 7, Pins("D8"), IOStandard("3.3-V LVTTL")),
# Buttons
("user_btn", 0, Pins("E6"), IOStandard("3.3-V LVTTL")),
("user_btn", 1, Pins("E7"), IOStandard("3.3-V LVTTL")), # nConfig.
# Serial
("serial", 0,
Subsignal("tx", Pins("B4"), IOStandard("3.3-V LVTTL")),
Subsignal("rx", Pins("A4"), IOStandard("3.3-V LVTTL"))
),
# SPI Flash
("spiflash4x", 0,
Subsignal("cs_n", Pins("B3")),
Subsignal("clk", Pins("A3")),
Subsignal("dq", Pins("A2", "B2", "B9", "C4")),
IOStandard("3.3-V LVTTL")
),
("spiflash", 0,
Subsignal("cs_n", Pins("B3")),
Subsignal("clk", Pins("A3")),
Subsignal("mosi", Pins("A2")),
Subsignal("miso", Pins("B2")),
Subsignal("wp", Pins("B9")),
Subsignal("hold", Pins("C4")),
IOStandard("3.3-V LVTTL"),
),
# SDRAM
("sdram_clock", 0, Pins("M9"), IOStandard("3.3-V LVTTL")),
("sdram", 0,
Subsignal("a", Pins(
"K6 M5 N5 J8 N10 M11 N9 L10",
"M13 N8 N4 M10")),
Subsignal("ba", Pins("N6 K8")),
Subsignal("cs_n", Pins("M4")),
Subsignal("cke", Pins("M8")),
Subsignal("ras_n", Pins("M7")),
Subsignal("cas_n", Pins("N7")),
Subsignal("we_n", Pins("K7")),
Subsignal("dq", Pins(
"D11 G10 F10 F9 E10 D9 G9 F8",
"F13 E12 E13 D12 C12 B12 B13 A12")),
Subsignal("dm", Pins("E9 F12")),
IOStandard("3.3-V LVTTL")
),
# all IO not connected to peripherals mapped to MFIO
# <- LEDS -> <- PMOD -> <- D0..D14, D11R, D12R -> <- AIN0..AIN7 -> JE [C O I S i1 i2]sw
("bbio", 0, Pins("A8 A9 A11 A10 B10 C9 C10 D8 M3 L3 M2 M1 N3 N2 K2 K1 H8 K10 H5 H4 J1 J2 L12 J12 J13 K11 K12 J10 H10 H13 G12 B11 G13 E1 C2 C1 D1 E3 F1 E4 B1 E5 J6 J7 K5 L5 J5 L4 E6"),
IOStandard("3.3-V LVTTL")),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(AlteraPlatform):
default_clk_name = "clk12"
default_clk_period = 1e9/12e6
def __init__(self, toolchain="quartus"):
AlteraPlatform.__init__(self, "10M08SAU169C8G", _io, toolchain=toolchain)
self.add_platform_command("set_global_assignment -name FAMILY \"MAX 10\"")
self.add_platform_command("set_global_assignment -name ENABLE_CONFIGURATION_PINS OFF")
self.add_platform_command("set_global_assignment -name INTERNAL_FLASH_UPDATE_MODE \"SINGLE IMAGE WITH ERAM\"")
def create_programmer(self):
return USBBlaster()
def do_finalize(self, fragment):
AlteraPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk12", loose=True), 1e9/12e6)
# Generate PLL clock in STA
self.toolchain.additional_sdc_commands.append("derive_pll_clocks")
# Calculates clock uncertainties
self.toolchain.additional_sdc_commands.append("derive_clock_uncertainty")
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
b8d06a8e1b6fd0756d5f515307ecef361a32c7f9 | 4bd555bc662b8182a2e7644976bfdb00ed5e1ebe | /PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/logic/utilities/dimacs.py | bc3b091d40e5b4892e961122c7f6f09c982cc07c | [] | no_license | fhelmli/homeNOWG2 | a103df1ef97194dec9501dbda87ec1f7c111fb4a | e794fd87b296544542fd9dc7ac94c981c6312419 | refs/heads/master | 2020-04-04T13:40:20.417769 | 2019-01-30T21:41:04 | 2019-01-30T21:41:04 | 155,970,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | #import pythonista
"""For reading in DIMACS file format
www.cs.ubc.ca/~hoos/SATLIB/Benchmarks/SAT/satformat.ps
"""
from __future__ import print_function, division
from sympy.core import Symbol
from sympy.logic.boolalg import And, Or
import re
def load(s):
"""Loads a boolean expression from a string.
Examples
========
>>> from sympy.logic.utilities.dimacs import load
>>> load('1')
cnf_1
>>> load('1 2')
Or(cnf_1, cnf_2)
>>> load('1 \\n 2')
And(cnf_1, cnf_2)
>>> load('1 2 \\n 3')
And(Or(cnf_1, cnf_2), cnf_3)
"""
clauses = []
lines = s.split('\n')
pComment = re.compile('c.*')
pStats = re.compile('p\s*cnf\s*(\d*)\s*(\d*)')
while len(lines) > 0:
line = lines.pop(0)
# Only deal with lines that aren't comments
if not pComment.match(line):
m = pStats.match(line)
if not m:
nums = line.rstrip('\n').split(' ')
list = []
for lit in nums:
if lit != '':
if int(lit) == 0:
continue
num = abs(int(lit))
sign = True
if int(lit) < 0:
sign = False
if sign:
list.append(Symbol("cnf_%s" % num))
else:
list.append(~Symbol("cnf_%s" % num))
if len(list) > 0:
clauses.append(Or(*list))
return And(*clauses)
def load_file(location):
"""Loads a boolean expression from a file."""
with open(location) as f:
s = f.read()
return load(s)
| [
"tberk@gmx.at"
] | tberk@gmx.at |
57ed5ba90da3aa7395be14a2b86bb1c3a1d84f41 | 10c9ef03f6916f9596d18ecc28c0c73f548017ad | /manage.py | e59608d55b3e953ea1e3a79cf395e6127c56395b | [] | no_license | WinningAddicted/website-Django | 2127756b1b9ca6389e79822f9df59207ea9a1d46 | 79bef48f408df9123d28840fba1179710a9f7b38 | refs/heads/master | 2021-01-11T01:10:12.765425 | 2016-10-25T13:07:59 | 2016-10-25T13:07:59 | 71,049,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yatharth.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"="
] | = |
db32911df7ddf0cec04b14d0424f03ec7969dcaa | 0bad722e38038bdc5bf86970e1b69bd41dcd4974 | /exp/exp010.py | 19d2b1c9e153ab23135f113b8b344e0dff548475 | [] | no_license | Kitsunetic/commonlit | b66f0b89a10d2f59c3cee00ee3b5c08a1434589b | 8781139c8ed4cc59f7c7ac9d97c72c351ee91377 | refs/heads/master | 2023-07-03T00:27:11.126056 | 2021-08-03T01:49:40 | 2021-08-03T01:49:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,854 | py | import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torch.nn import functional as F
from pytorch_lightning.core.lightning import LightningModule
import pandas as pd
import dataclasses
from transformers import AutoTokenizer, AutoModel
import pytorch_lightning as pl
from transformers import AdamW, get_linear_schedule_with_warmup
from typing import Any
import os
import numpy as np
from pytorch_lightning import Trainer
try:
import mlflow.pytorch
except Exception as e:
print("error: mlflow is not found")
from datetime import datetime as dt
from pytorch_lightning.callbacks import ModelCheckpoint
from typing import List
import gc
import pickle
from collections import OrderedDict
class CommonLitDataset(Dataset):
def __init__(self, df, tokenizer, transforms=None):
self.df = df.reset_index()
self.augmentations = transforms
self.tokenizer = tokenizer
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
def replace_stop_words(x):
x = x.replace(".", " . ")
x = x.replace(",", " , ")
x = x.replace("!", " ! ")
x = x.replace("?", " ? ")
x = x.replace("\n", " \n ")
x = x.replace(")", " ) ")
x = x.replace("(", " ( ")
x = x.replace('"', ' " ')
x = x.replace("'", " ' ")
x = x.replace(";", " ; ")
x = x.replace(" ", " ")
x = x.replace(" ", " ")
x = x.replace(" ", " ")
return x
row = self.df.iloc[index]
text = row["excerpt"]
text = replace_stop_words(text)
text = self.tokenizer(text, padding="max_length", max_length=256, truncation=True, return_tensors="pt")
input_ids = text["input_ids"][0]
attention_mask = text["attention_mask"][0]
target = torch.tensor(row["target"], dtype=torch.float)
return input_ids, attention_mask, target
@dataclasses.dataclass
class Config:
experiment_name: str
debug: bool = False
fold: int = 0
nlp_model_name: str = "microsoft/deberta-base"
linear_dim: int = 128
dropout: float = 0.2
dropout_stack: float = 0.5
batch_size: int = 16
lr_bert: float = 3e-5
lr_fc: float = 1e-5
lr_rnn: float = 1e-4
num_warmup_steps: int = 16*100
num_training_steps: int = 16*2500
if debug:
epochs: int = 2
else:
epochs: int = 15
activation: Any = nn.GELU
optimizer: Any = AdamW
weight_decay: float = 0.01
rnn_module: nn.Module = nn.LSTM
rnn_module_num: int = 1
rnn_module_dropout: float = 0.1
rnn_module_activation: Any = None
rnn_module_shrink_ratio = 1
class LSTMModule(nn.Module):
def __init__(self, config, hidden_size):
super().__init__()
self.config = config
self.hidden_size = hidden_size
hidden_out = int(hidden_size * config.rnn_module_shrink_ratio)
self.rnn_module = self.config.rnn_module(hidden_size, hidden_out)
self.layer_norm = nn.LayerNorm(hidden_out)
self.rnn_module_activation = self.config.rnn_module_activation
self.dropout = nn.Dropout(self.config.rnn_module_dropout)
def forward(self, x):
x = self.rnn_module(x)[0]
x = self.layer_norm(x)
x = self.dropout(x)
if not self.rnn_module_activation is None:
x = self.rnn_module_activation(x)
return x
class CommonLitModule(LightningModule):
def __init__(self,
config: Config,
output_dir: str,
seed: int = 19900222):
super().__init__()
self.save_hyperparameters()
self.config = config
self.output_dir = output_dir
self.bert = AutoModel.from_pretrained(self.config.nlp_model_name)
self.tokenizer = AutoTokenizer.from_pretrained(config.nlp_model_name)
self.dropout_bert_stack = nn.Dropout(self.config.dropout_stack)
self.seed = seed
pl.seed_everything(seed)
self.lstm = self.make_lstm_module()
# network config
hidden_size = int(self.bert.config.hidden_size * (config.rnn_module_shrink_ratio**self.config.rnn_module_num))
self.linear = nn.Sequential(
nn.Linear(hidden_size, self.config.linear_dim),
nn.Dropout(self.config.dropout),
config.activation(),
nn.Linear(self.config.linear_dim, 1)
)
self.df_train: pd.DataFrame
self.df_val: pd.DataFrame
self.dataset_train: Dataset
self.dataset_val: Dataset
self.best_rmse = np.inf
def make_lstm_module(self):
ret = []
hidden_size = self.bert.config.hidden_size
for i in range(self.config.rnn_module_num):
ret.append((f"lstm_module_{i}", LSTMModule(config=self.config, hidden_size=hidden_size)))
hidden_size = int(hidden_size * config.rnn_module_shrink_ratio)
return nn.Sequential(OrderedDict(ret))
def forward(self, input_ids, attention_mask):
if "deberta" in self.config.nlp_model_name:
x = self.bert(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True)[1]
x = torch.stack([self.dropout_bert_stack(x) for x in x[-4:]]).mean(dim=0)
else:
x = self.bert(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True)[2]
x = torch.stack([self.dropout_bert_stack(x) for x in x[-4:]]).mean(dim=0)
x = self.lstm(x).mean(dim=1)
x = self.linear(x)
return x
def training_step(self, batch, batch_idx):
scheduler = self.lr_schedulers()
scheduler.step()
input_ids, attention_mask, target = batch
output = self.forward(input_ids, attention_mask)
loss = F.mse_loss(output.flatten(), target.flatten())
self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
input_ids, attention_mask, target = batch
output = self.forward(input_ids, attention_mask)
loss = F.mse_loss(output.flatten(), target.flatten())
self.log('val_loss', loss, prog_bar=True)
return output.cpu().detach().numpy().flatten(), target.cpu().detach().numpy().flatten()
def validation_epoch_end(self, val_step_outputs):
pred = []
target = []
for output in val_step_outputs:
pred.extend(output[0].tolist())
target.extend(output[1].tolist())
pred = np.array(pred)
target = np.array(target)
if len(pred) != len(self.df_val):
return
df_val = self.df_val[["id"]]
df_val["pred"] = pred
df_val["target"] = target
df_val.to_csv(f"{self.output_dir}/val_fold{self.config.fold}_step{self.global_step}.csv", index=False)
rmse = np.sqrt(1 / len(pred) * ((target - pred)**2).sum())
self.log(f"rmse_fold{config.fold}", rmse, prog_bar=True)
if self.best_rmse > rmse:
self.log(f"best_rmse_fold{config.fold}", rmse, prog_bar=True)
self.best_rmse = rmse
df_val.to_csv(f"{self.output_dir}/val_fold{self.config.fold}_best.csv", index=False)
def setup(self, stage=None):
df = pd.read_csv("input/commonlitreadabilityprize/train.csv")
if config.debug:
df = df.iloc[:100]
val_idx = np.arange(self.config.fold, len(df), 5)
train_idx = [i for i in range(len(df)) if i not in val_idx]
self.df_val = df.iloc[val_idx].reset_index(drop=True)
self.df_train = df.iloc[train_idx].reset_index(drop=True)
self.dataset_train = CommonLitDataset(df=self.df_train,
tokenizer=self.tokenizer)
self.dataset_val = CommonLitDataset(df=self.df_val,
tokenizer=self.tokenizer)
def configure_optimizers(self):
optimizer = self.config.optimizer(params=[{"params": self.bert.parameters(), "lr": config.lr_bert},
{"params": self.linear.parameters(), "lr": config.lr_fc},
{"params": self.lstm.parameters(), "lr": config.lr_rnn}],
weight_decay=config.weight_decay)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=config.num_warmup_steps,
num_training_steps=config.num_training_steps)
return [optimizer], [scheduler]
def train_dataloader(self):
return DataLoader(self.dataset_train, batch_size=self.config.batch_size)
def val_dataloader(self):
return DataLoader(self.dataset_val, batch_size=self.config.batch_size)
def main(config: Config,
folds: List):
output_dir = f"output/{os.path.basename(__file__)[:-3]}/{dt.now().strftime('%Y%m%d%H%M%S')}"
os.makedirs(output_dir)
rmse = 0
with mlflow.start_run() as run:
mlflow.pytorch.autolog(log_models=False)
for key, value in config.__dict__.items():
mlflow.log_param(key, value)
with open(f"{output_dir}/config.pickle", "wb") as f:
pickle.dump(config, f)
for fold in folds:
try:
config.fold = fold
checkpoint_callback = ModelCheckpoint(
monitor='val_loss',
dirpath=output_dir,
filename=f'best_fold{fold}',
save_top_k=1,
mode='min',
)
model = CommonLitModule(config=config,
output_dir=output_dir)
trainer = Trainer(gpus=1,
precision=16,
amp_level="02",
max_epochs=config.epochs,
progress_bar_refresh_rate=1,
default_root_dir=output_dir,
callbacks=[checkpoint_callback])
trainer.fit(model)
rmse += model.best_rmse
del trainer, model
gc.collect()
torch.cuda.empty_cache()
except Exception as e:
print(e)
mlflow.log_metric("rmse_mean", rmse / len(folds))
if __name__ == "__main__":
experiment_name = "lstm"
folds = [0, 1, 2, 3, 4]
for rnn_module_drop_ratio in [1]:
config = Config(experiment_name=experiment_name)
config.rnn_module_shrink_ratio = rnn_module_drop_ratio
main(config, folds=folds)
| [
"kurupical@gmail.com"
] | kurupical@gmail.com |
296fcbafb9e14304b02ae171edcc38f915888c75 | 59f0fde411ca668b874fa6fa6001069b9146f596 | /src/blog/migrations/0001_initial.py | 84fd012107ea912917ff84da08179642506c4756 | [] | no_license | nabilatajrin/django-blog-application | 4c256755fc31b41f609b44a5329fb128d46c5fa1 | 7971f8f7d8b3b442fbd4530bc0f32dff7865adcc | refs/heads/master | 2020-12-06T16:09:00.310415 | 2020-11-03T05:37:34 | 2020-11-03T05:37:34 | 232,503,248 | 0 | 0 | null | 2020-01-08T07:24:39 | 2020-01-08T07:19:38 | Python | UTF-8 | Python | false | false | 474 | py | # Generated by Django 2.2 on 2020-01-09 01:40
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog1Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
],
),
]
| [
"nabilatajrin@gmail.com"
] | nabilatajrin@gmail.com |
339639d4a52ad804b5f0d74045f5f8bc6b486ab6 | c94f888541c0c430331110818ed7f3d6b27b788a | /ak_05b080ffa82d4d06b1e7a357a34277ba/python/setup.py | 3c6ee05721510fdc60c55feb4c7cd7b6f1706b3b | [
"Apache-2.0",
"MIT"
] | permissive | alipay/antchain-openapi-prod-sdk | 48534eb78878bd708a0c05f2fe280ba9c41d09ad | 5269b1f55f1fc19cf0584dc3ceea821d3f8f8632 | refs/heads/master | 2023-09-03T07:12:04.166131 | 2023-09-01T08:56:15 | 2023-09-01T08:56:15 | 275,521,177 | 9 | 10 | MIT | 2021-03-25T02:35:20 | 2020-06-28T06:22:14 | PHP | UTF-8 | Python | false | false | 2,650 | py | # -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
from setuptools import setup, find_packages
"""
setup module for antchain_ak_05b080ffa82d4d06b1e7a357a34277ba.
Created on 19/08/2022
@author: Ant Chain SDK
"""
PACKAGE = "antchain_sdk_ak_05b080ffa82d4d06b1e7a357a34277ba"
NAME = "antchain_ak_05b080ffa82d4d06b1e7a357a34277ba" or "alibabacloud-package"
DESCRIPTION = "Ant Chain Ak_05b080ffa82d4d06b1e7a357a34277ba SDK Library for Python"
AUTHOR = "Ant Chain SDK"
AUTHOR_EMAIL = "sdk-team@alibabacloud.com"
URL = "https://github.com/alipay/antchain-openapi-prod-sdk"
VERSION = __import__(PACKAGE).__version__
REQUIRES = [
"antchain_alipay_util>=1.0.1, <2.0.0",
"alibabacloud_tea_util>=0.3.6, <1.0.0",
"alibabacloud_rpc_util>=0.0.4, <1.0.0"
]
LONG_DESCRIPTION = ''
if os.path.exists('./README.md'):
with open("README.md", encoding='utf-8') as fp:
LONG_DESCRIPTION = fp.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["antchain","ak","05b080ffa82d4d06b1e7a357a34277ba"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
python_requires=">=3.6",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development"
)
)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
bce1e3aee1f648f85291bf76517e5522f6502ef0 | 3af6960c805e9903eb27c09d8bc7ebc77f5928fe | /problems/0056_Merge_Intervals/__init__.py | 5304b1da2e9a95667938b3f5520a802216339283 | [] | no_license | romain-li/leetcode | b3c8d9d4473eebd039af16ad2d4d99abc2768bdd | 5e82b69bd041c2c168d75cb9179a8cbd7bf0173e | refs/heads/master | 2020-06-04T20:05:03.592558 | 2015-06-08T18:05:03 | 2015-06-08T18:05:03 | 27,431,664 | 2 | 1 | null | 2015-06-08T18:05:04 | 2014-12-02T12:31:58 | Python | UTF-8 | Python | false | false | 298 | py | ID = '56'
TITLE = 'Merge Intervals'
DIFFICULTY = 'Hard'
URL = 'https://oj.leetcode.com/problems/merge-intervals/'
BOOK = False
PROBLEM = r"""Given a collection of intervals, merge all overlapping intervals.
For example,
Given `[1,3],[2,6],[8,10],[15,18]`,
return `[1,6],[8,10],[15,18]`.
"""
| [
"romain_li@163.com"
] | romain_li@163.com |
38b74240c0ac72b22bbb1077080fc560750aedf1 | 43c24c890221d6c98e4a45cd63dba4f1aa859f55 | /test/cpython/test_gdbm.py | f4a1592118780e9bdae368559dda1f7f9c485ac2 | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | jmgc/pyston | c8e4df03c33c6b81d20b7d51a781d9e10148238e | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | refs/heads/master | 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 | NOASSERTION | 2020-09-11T14:38:39 | 2015-07-17T08:09:31 | Python | UTF-8 | Python | false | false | 40 | py | ../../from_cpython/Lib/test/test_gdbm.py | [
"kmod@dropbox.com"
] | kmod@dropbox.com |
cf139f390e7f58784c0fcf319c590eff3ad17f0e | 556403cb93b2fdd464c3aef4cba4f1c3dc42e9d7 | /AutomationProject/demo/getTestname.py | 1e04817bef4bb45df162c33780e2b5c0aac891af | [] | no_license | msivakumarm/PycharmProjects | 4d90a0105f334f2393d30fe46dc650808002b4fd | 7d84194a576f9ec8356ff272642d07dbddc48d42 | refs/heads/master | 2020-09-06T14:42:12.945424 | 2019-11-08T11:42:14 | 2019-11-08T11:42:14 | 219,989,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | import inspect
# functions
def whoami():
return inspect.stack()[1][3]
def myfunc():
x=whoami()
print(x)
myfunc() | [
"sivakumarm.mamillapalli@gmail.com"
] | sivakumarm.mamillapalli@gmail.com |
949d8a85b10490cceaf3bf00e17d2898bd3f6164 | 48d820d4bd6a433c2b0fdb0dcb7657b62db050bf | /Training_Work/ODOO_C_MODULE_BACKUPS/new_app/controllers/controllers.py | 3d7cfd1ee67329ccccc1c9ed2fdf8fe04ca77a97 | [] | no_license | dhruv-aktiv/training_task_data | 1a30580a512aa4831fb547b250faffff11f7e008 | 3d8b25ca812e876a484d387fc57272257322c85f | refs/heads/master | 2023-06-07T07:06:04.193576 | 2021-07-01T04:37:13 | 2021-07-01T04:37:13 | 381,908,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | # -*- coding: utf-8 -*-
# from odoo import http
# class NewApp(http.Controller):
# @http.route('/new_app/new_app/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/new_app/new_app/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('new_app.listing', {
# 'root': '/new_app/new_app',
# 'objects': http.request.env['new_app.new_app'].search([]),
# })
# @http.route('/new_app/new_app/objects/<model("new_app.new_app"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('new_app.object', {
# 'object': obj
# })
| [
"dhruv.s@icreativetechnolabs.com"
] | dhruv.s@icreativetechnolabs.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.