blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38d5fcccac7e170f152339629aea554fd246c000 | 8c9c27cb88a2d210a5e2fb5803fe89204dba95ef | /phy/cluster/manual/tests/test_views.py | 840eb0c9c9512508abe958d990f4ba57f638d4fd | [] | no_license | arnefmeyer/phy | c13b1eceb70ee72cf0ff9c4a273e195f122fabc4 | 14663e1f2baad421d6bc9f420d34170c6c969bbe | refs/heads/master | 2020-12-07T15:42:49.605432 | 2016-04-20T21:10:38 | 2016-04-20T21:10:38 | 56,718,986 | 1 | 0 | null | 2016-04-20T20:32:18 | 2016-04-20T20:32:18 | null | UTF-8 | Python | false | false | 6,873 | py | # -*- coding: utf-8 -*-
"""Test views."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import numpy as np
from numpy.testing import assert_equal as ae
from numpy.testing import assert_allclose as ac
from vispy.util import keys
from pytest import fixture
from phy.utils import Bunch
from .conftest import MockController
from ..views import (ScatterView,
_extract_wave,
_extend,
)
#------------------------------------------------------------------------------
# Utils
#------------------------------------------------------------------------------
@fixture
def state(tempdir):
# Save a test GUI state JSON file in the tempdir.
state = Bunch()
state.WaveformView0 = Bunch(overlap=False)
state.TraceView0 = Bunch(scaling=1.)
state.FeatureView0 = Bunch(feature_scaling=.5)
state.CorrelogramView0 = Bunch(uniform_normalization=True)
return state
@fixture
def gui(tempdir, state):
controller = MockController(config_dir=tempdir)
return controller.create_gui(add_default_views=False, **state)
def _select_clusters(gui):
gui.show()
mc = gui.controller.manual_clustering
assert mc
mc.select([])
mc.select([0])
mc.select([0, 2])
mc.select([0, 2, 3])
#------------------------------------------------------------------------------
# Test utils
#------------------------------------------------------------------------------
def test_extend():
l = list(range(5))
assert _extend(l) == l
assert _extend(l, 0) == []
assert _extend(l, 4) == list(range(4))
assert _extend(l, 5) == l
assert _extend(l, 6) == (l + [4])
def test_extract_wave():
traces = np.arange(30).reshape((6, 5))
mask = np.array([0, 1, 1, .5, 0])
wave_len = 4
hwl = wave_len // 2
ae(_extract_wave(traces, 0 - hwl, mask, wave_len)[0],
[[0, 0], [0, 0], [1, 2], [6, 7]])
ae(_extract_wave(traces, 1 - hwl, mask, wave_len)[0],
[[0, 0], [1, 2], [6, 7], [11, 12]])
ae(_extract_wave(traces, 2 - hwl, mask, wave_len)[0],
[[1, 2], [6, 7], [11, 12], [16, 17]])
ae(_extract_wave(traces, 5 - hwl, mask, wave_len)[0],
[[16, 17], [21, 22], [0, 0], [0, 0]])
#------------------------------------------------------------------------------
# Test waveform view
#------------------------------------------------------------------------------
def test_waveform_view(qtbot, gui):
v = gui.controller.add_waveform_view(gui)
_select_clusters(gui)
ac(v.boxed.box_size, (.1818, .0909), atol=1e-2)
v.toggle_waveform_overlap()
v.toggle_waveform_overlap()
v.toggle_zoom_on_channels()
v.toggle_zoom_on_channels()
v.toggle_show_labels()
assert not v.do_show_labels
# Box scaling.
bs = v.boxed.box_size
v.increase()
v.decrease()
ac(v.boxed.box_size, bs)
bs = v.boxed.box_size
v.widen()
v.narrow()
ac(v.boxed.box_size, bs)
# Probe scaling.
bp = v.boxed.box_pos
v.extend_horizontally()
v.shrink_horizontally()
ac(v.boxed.box_pos, bp)
bp = v.boxed.box_pos
v.extend_vertically()
v.shrink_vertically()
ac(v.boxed.box_pos, bp)
a, b = v.probe_scaling
v.probe_scaling = (a, b * 2)
ac(v.probe_scaling, (a, b * 2))
a, b = v.box_scaling
v.box_scaling = (a * 2, b)
ac(v.box_scaling, (a * 2, b))
v.zoom_on_channels([0, 2, 4])
# Simulate channel selection.
_clicked = []
@v.gui.connect_
def on_channel_click(channel_idx=None, button=None, key=None):
_clicked.append((channel_idx, button, key))
v.events.key_press(key=keys.Key('2'))
v.events.mouse_press(pos=(0., 0.), button=1)
v.events.key_release(key=keys.Key('2'))
assert _clicked == [(0, 1, 2)]
v.next_data()
# qtbot.stop()
gui.close()
#------------------------------------------------------------------------------
# Test trace view
#------------------------------------------------------------------------------
def test_trace_view(qtbot, gui):
v = gui.controller.add_trace_view(gui)
_select_clusters(gui)
ac(v.stacked.box_size, (1., .08181), atol=1e-3)
assert v.time == .5
v.go_to(.25)
assert v.time == .25
v.go_to(-.5)
assert v.time == .125
v.go_left()
assert v.time == .125
v.go_right()
assert v.time == .175
# Change interval size.
v.interval = (.25, .75)
ac(v.interval, (.25, .75))
v.widen()
ac(v.interval, (.125, .875))
v.narrow()
ac(v.interval, (.25, .75))
# Widen the max interval.
v.set_interval((0, gui.controller.duration))
v.widen()
v.toggle_show_labels()
assert not v.do_show_labels
# Change channel scaling.
bs = v.stacked.box_size
v.increase()
v.decrease()
ac(v.stacked.box_size, bs, atol=1e-3)
v.origin = 'upper'
assert v.origin == 'upper'
# qtbot.stop()
gui.close()
#------------------------------------------------------------------------------
# Test feature view
#------------------------------------------------------------------------------
def test_feature_view(qtbot, gui):
v = gui.controller.add_feature_view(gui)
_select_clusters(gui)
assert v.feature_scaling == .5
v.add_attribute('sine',
np.sin(np.linspace(-10., 10., gui.controller.n_spikes)))
v.increase()
v.decrease()
v.on_channel_click(channel_idx=3, button=1, key=2)
v.clear_channels()
v.toggle_automatic_channel_selection()
# qtbot.stop()
gui.close()
#------------------------------------------------------------------------------
# Test scatter view
#------------------------------------------------------------------------------
def test_scatter_view(qtbot, gui):
n = 1000
v = ScatterView(coords=lambda c: Bunch(x=np.random.randn(n),
y=np.random.randn(n),
spike_ids=np.arange(n),
spike_clusters=np.ones(n).
astype(np.int32) * c[0],
) if 2 not in c else None,
# data_bounds=[-3, -3, 3, 3],
)
v.attach(gui)
_select_clusters(gui)
# qtbot.stop()
gui.close()
#------------------------------------------------------------------------------
# Test correlogram view
#------------------------------------------------------------------------------
def test_correlogram_view(qtbot, gui):
v = gui.controller.add_correlogram_view(gui)
_select_clusters(gui)
v.toggle_normalization()
v.set_bin(1)
v.set_window(100)
# qtbot.stop()
gui.close()
| [
"cyrille.rossant@gmail.com"
] | cyrille.rossant@gmail.com |
182b04bde697101e629bf4f0c85d2c853c1567a5 | dc0b6b680fd1fc0ab86ed7a3460137cde3a8612d | /Meus códigos/Python/Economia/mdic/mdic_1f.py | ec1951120d5e6bd7b7c22b8aa57b02d253aed461 | [] | no_license | pedromfnakashima/codigos_versionados | 6c8c692bc08a0dda39a82bf91c5245f28d9be330 | c40c94d69f1ee3dd4317786f1c25bcc1bbcc2bb9 | refs/heads/main | 2023-03-21T20:32:53.677701 | 2021-03-20T00:03:10 | 2021-03-20T00:03:10 | 305,754,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,097 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 25 12:40:44 2020
@author: pedro-salj
"""
#############################
##### CONFIGURAÇÃO GERAL ####
#############################
globals().clear()
""" Mudar diretório """
import os
from pathlib import Path
import getpass
if getpass.getuser() == "pedro":
caminho_base = Path(r'D:\Códigos, Dados, Documentação e Cheat Sheets')
elif getpass.getuser() == "pedro-salj":
caminho_base = Path(r'C:\Users\pedro-salj\Desktop\Pedro Nakashima\Códigos, Dados, Documentação e Cheat Sheets')
""" Mudar diretório para dados Siconfi"""
caminho_wd = caminho_base / 'Dados'
os.chdir(caminho_wd)
import pandas as pd
##########################################################################################################
##########################################################################################################
##########################################################################################################
def gs_mdic(tipo, ufs, ncms):
import glob
import numpy as np
import pandas as pd
pasta = caminho_base / 'Dados' / 'mdic' / 'anos'
os.chdir(pasta)
# -----------------------------------------------------------------------------------
np_datas = np.arange('1997-01-01','2022-01-01', 1, dtype='datetime64[M]')
meses = pd.to_datetime(np_datas).to_frame()
meses.rename(columns={0:'mês'}, inplace=True)
meses.set_index('mês',inplace=True)
meses.index.freq = 'MS'
# -----------------------------------------------------------------------------------
#tipo = 'EXP'
#uf = 'MS'
#ufs = ['MS','MT','GO']
#ncm = 12019000
#ncms = [12019000,10059010]
busca = tipo + '_????.csv'
# -----------------------------------------------------------------------------------
#ncm = 12019000
for index_ncm, ncm in enumerate(ncms):
for index_arq, arq_nome in enumerate(glob.glob(busca)):
print(arq_nome)
pasta = caminho_base / 'Dados' / 'mdic' / 'anos'
df = pd.read_csv(pasta / arq_nome,
encoding = 'latin',
delimiter = ';')
df.rename(columns={'CO_ANO':'year','CO_MES':'month'},inplace=True)
df['day'] = 1
df['mês'] = pd.to_datetime(df[['year', 'month', 'day']])
df.drop(['year','month','day'],axis=1,inplace=True)
cond1 = df['CO_NCM'] == ncm
filtro_ncm = df.loc[cond1,:]
df_soma_por_uf = filtro_ncm.groupby(['mês','SG_UF_NCM'])['VL_FOB'].sum().to_frame()
if index_arq == 0:
df_bruto = df_soma_por_uf.copy()
else:
df_bruto = df_bruto.append(df_soma_por_uf)
df_bruto.reset_index(inplace=True)
df_bruto.set_index('mês',inplace=True)
df_bruto_br = df_bruto.groupby(['mês'])['VL_FOB'].sum().to_frame()
if tipo == 'EXP':
tipo_sigla = 'X'
elif tipo == 'IMP':
tipo_sigla = 'M'
col_nome = tipo_sigla + 'BR' + str(ncm)
df_bruto_br.rename(columns={'VL_FOB':col_nome},inplace=True)
meses_copia = meses.copy()
meses_copia = meses_copia.merge(df_bruto_br,how='left',left_index=True,right_index=True)
for uf in ufs:
cond1 = df_bruto['SG_UF_NCM'] == uf
df_bruto_uf_i = df_bruto.copy().loc[cond1,['VL_FOB']]
col_nome = tipo_sigla + uf + str(ncm)
df_bruto_uf_i.rename(columns={'VL_FOB':col_nome},inplace=True)
meses_copia = meses_copia.merge(df_bruto_uf_i,how='left',left_index=True,right_index=True)
if index_ncm == 0:
df_final = meses_copia.copy()
else:
df_final = df_final.merge(meses_copia, how='left', left_index=True, right_index=True)
df_final.dropna(thresh=1, inplace=True)
df_final.fillna(0, inplace=True)
return df_final
# ------------------------------------
def g_médiaMóvel(df, períodos):
df_copia = df.copy()
for coluna in df_copia.columns:
df_copia[coluna] = df_copia[coluna].rolling(períodos).mean()
return df_copia
# ------------------------------------
ufs = ['MS','MT','GO']
ncms = [12019000,10059010]
# ------------------------------------
df_series_exp = gs_mdic(tipo='EXP', ufs=ufs, ncms=ncms)
média_móvel = g_médiaMóvel(df_series_exp, períodos=12)
# ------------------------------------
pasta = caminho_base / 'Dados' / 'mdic'
with pd.ExcelWriter(pasta / 'séries.xlsx', mode='a', engine="openpyxl") as writer:
df_series_exp.to_excel(writer, sheet_name='brutas', index=True)
# ------------------------------------
pasta = caminho_base / 'Dados' / 'mdic'
with pd.ExcelWriter(pasta / 'séries.xlsx', mode='a', engine="openpyxl") as writer:
média_móvel.to_excel(writer, sheet_name='médiaMóvel', index=True)
| [
"pedromfnakashima@gmail.com"
] | pedromfnakashima@gmail.com |
ae00981c254d0dc088012dfacb9cdee40e031a73 | 0aec617440075b73e5da64cd1477b6a098ed864c | /data_structures/Project_Show_me_Data_Structures/active_directory.py | f76d2179be46597200adc649a3296be5cf3735c2 | [
"MIT"
] | permissive | severian5it/udacity_dsa | 0b1512cc8c5125149d6be6f78fa14446e7ab5c25 | e47f27b0179961d6107fe46a236ac7d887fe6816 | refs/heads/main | 2023-03-07T02:24:37.299599 | 2021-02-14T10:34:50 | 2021-02-14T10:34:50 | 316,949,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,943 | py | class Group(object):
def __init__(self, _name):
self.name = _name
self.groups = []
self.users = []
def add_group(self, group):
self.groups.append(group)
def add_user(self, user):
self.users.append(user)
def get_groups(self):
return self.groups
def get_users(self):
return self.users
def get_name(self):
return self.name
def is_user_in_group(user, group):
"""
Return True if user is in the group, False otherwise.
Args:
user(str): user name/id
group(class:Group): group to check user membership against
"""
is_in_group = False
is_in_subgroup = False
for u in group.get_users():
if u == user:
is_in_group = True
for g in group.get_groups():
if is_user_in_group(user, g):
is_in_subgroup = True
return is_in_group or is_in_subgroup
if __name__ == "__main__":
parent = Group("parent")
child = Group("child")
sub_child = Group("subchild")
sub_child_user = "sub_child_user"
sub_child.add_user(sub_child_user)
child.add_group(sub_child)
parent.add_group(child)
# Test Case1
print(f"is sub child user in parent group? {is_user_in_group(sub_child_user, parent)}") # expected True
# Test Case2
print(f"is sub child user in child group? {is_user_in_group(sub_child_user, child)}") # expected True
# Test Case3
print(f"is sub child user in sub child group? {is_user_in_group(sub_child_user, sub_child)}") # expected True
# Test Case4
print(f"is sub child user2 in sub child group? {is_user_in_group('sub_child_user2', parent)}") # expected False
# Test Case6 child empty
print(f"is empty string in sub child group? {is_user_in_group('', parent)}") # expected False
# Test Case7 child None
print(
f"is Nonein sub child group? {is_user_in_group(None, parent)}") # expected False
| [
"pierluca@amazon.com"
] | pierluca@amazon.com |
4fc662539852d925c3aa23683981860cecb38cb4 | e2590e0a78046a22131b69c76ebde21bf042cdd1 | /ABC201_300/ABC243/B.py | e7f62726863d27002413f7819fe6d8366e13c7d8 | [] | no_license | masato-sso/AtCoderProblems | b8e23941d11881860dcf2942a5002a2b19b1f0c8 | fbc02e6b7f8c6583e5a4e5187463e0001fc5f4d8 | refs/heads/main | 2023-01-22T23:57:58.509585 | 2023-01-21T14:07:47 | 2023-01-21T14:07:47 | 170,867,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py |
N = int(input())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
def getIndex(l, x):
if x in l:
return l.index(x)
else:
return -1
ans1 = 0
ans2 = 0
for aIdx,a in enumerate(A):
bIdx = getIndex(B,a)
if(bIdx == -1):
continue
if(aIdx == bIdx):
ans1+=1
else:
ans2+=1
print(ans1)
print(ans2) | [
"masato@seijinnoMacBook-Pro-2.local"
] | masato@seijinnoMacBook-Pro-2.local |
cad633fa0cd47dc61a6c9b15c55400e0fab5095e | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5634697451274240_0/Python/Quorrin/pancakes.py | c760b7448180fb75bcb85128bc8bb4b5612e82e8 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,251 | py | f = open('input.in', 'r')
n = int(f.readline().strip())
out = open('output.txt', 'w')
pancakeDict = {}
def condenseStack(pancakes):
newStack = []
previous = ""
for i in range(len(pancakes)):
if pancakes[i] != previous:
newStack.append(pancakes[i])
previous = pancakes[i]
return newStack
def flipSign(sign):
if (sign == '-'):
return '+'
else:
return '-'
def reverse(inArr, idx):
dx = 0
retArr = list(inArr)
for i in range(idx):
if (i >= idx-i):
return retArr
else:
dx = retArr[idx-i-1]
retArr[idx-i-1] = flipSign(retArr[i])
retArr[i] = flipSign(dx)
return retArr
def getClearedLevel(inputArr):
for i in range(len(inputArr), 0, -1):
if (inputArr[i-1] == '-'):
return i
return 0
def flipped(inArr):
for i in range(len(inArr)):
if (inArr[i] == '-'):
return False
return True
def dp(inputArr, depth, unsortedLevel):
if (unsortedLevel == 0):
return depth
elif (depth > 20):
return 20
else:
minDepth = 200
for i in range(1,unsortedLevel+1):
newPancakes = condenseStack(reverse(inputArr, i))
pHash = ''.join(newPancakes)
if (pHash in pancakeDict):
return pancakeDict[pHash]
else:
currentDepth = dp(newPancakes, depth+1, getClearedLevel(newPancakes))
pancakeDict[pHash] = currentDepth
if (currentDepth < minDepth):
minDepth = currentDepth
return minDepth
for i in range(n):
inputStr = f.readline().strip()
pancakes = condenseStack(list(inputStr))
print (pancakes)
count = -1
if (len(inputStr) == 0):
count = 0
elif (len(inputStr) == 1):
if (inputStr == "-"):
count = 1
else:
count = 0
else:
pancakeDict = {}
count = dp(pancakes, 0, getClearedLevel(pancakes))
print (count)
out.write("Case #" + str(i+1) + ": " + str(count)+ "\n")
out.close()
f.close()
| [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
4e0e36f2e345051fa5addfbd79acdc5acfd44bae | 88c1fa6dd5b51a93c4345951c41c4f56a82ba5a3 | /LiveProject-Python/AppBuilder9000/ZPYLP0612/FoodyApp/apps.py | e2a81009cb3725b2536136985c67aef7a083403c | [] | no_license | Sean-Beyer/PythonDjango-LiveProject | 83335c4d5e22d00c34dac1c71c39f770ad896c4e | 986b567fad49368c52182eb5196534ff8a8ebcfc | refs/heads/master | 2022-12-13T22:43:21.820355 | 2020-09-01T00:34:18 | 2020-09-01T00:34:18 | 291,854,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | from django.apps import AppConfig
class FoodyappConfig(AppConfig):
name = 'FoodyApp'
| [
"61070387+Thalious@users.noreply.github.com"
] | 61070387+Thalious@users.noreply.github.com |
df403f3387076740ba819c48f201f9fb9d443b4a | b5c92150b0fb76daf9b8725c7a64ba1b54f2d9c7 | /product_grammage/models/purchase.py | 7804f5922d646144f5f8f8d4ae275d50bff4d991 | [] | no_license | hashemalycore/CMNT_00107_2017_SAR_addons | 63da3c66eddc99b585671cc85a53661a497771aa | 071646e495fcd9563f72a02f6630ee4d70afa438 | refs/heads/master | 2020-04-02T10:25:32.457793 | 2018-02-12T13:09:16 | 2018-02-12T13:09:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # -*- coding: utf-8 -*-
# © 2017 Comunitea Servicios Tecnológicos S.L. (http://comunitea.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import models, fields
class PurchaseOrder(models.Model):
_inherit = 'purchase.order.line'
thread = fields.Float('Thread', related='product_id.thread')
gauge = fields.Float('Gauge', related='product_id.gauge')
width = fields.Float('Width')
grammage = fields.Float('Grammage')
| [
"javierjcf@gmail.com"
] | javierjcf@gmail.com |
4a25a135777867175549f9b2359a28dc05cfc0ef | ed0ed8863e05384db504fa58e47db1b49977bb7d | /AnalysisAndDesignOfAlgorithms/python/package_memory.py | 350a2ed835ac8013a3fae6aeca7fb450a8b27701 | [] | no_license | rogeroyer/Accumulation | fa4f6083cd1fe3d112fe4d62275280033070b174 | ac04f486761744362c57abcc51f7768c775f270c | refs/heads/master | 2022-07-22T18:58:05.679619 | 2022-07-14T04:39:53 | 2022-07-14T04:39:53 | 102,483,020 | 10 | 33 | null | null | null | null | UTF-8 | Python | false | false | 2,218 | py | class package_memory_deal(object):
def __init__(self, weight, value, max_weight, printMatrix=False):
self.weight = weight # 存储重量 #
self.value = value # 存储权值 #
self.max_weight = max_weight # 背包所能承受最大重量 #
self.array_length = len(self.value) # 物品个数 #
self.select = [[-1 for i in range(self.max_weight+1)] for j in range(self.array_length)] # 存储矩阵 #
self.printMatrix = printMatrix # 是否打印存储矩阵 #
for index in range(0, self.max_weight+1): # 初始没有物品时候,背包的价值为0 #
self.select[0][index] = 0
for index in range(1, self.array_length):
self.select[index][0] = 0
def print_out(self):
print(self.MFKnapsack(self.array_length - 1, self.max_weight))
if self.printMatrix is True:
self.select = np.array(self.select)
print(self.select)
self.show_element()
def MFKnapsack(self, i, j):
'''计算存储矩阵'''
if self.select[i][j] < 0:
if j < self.weight[i]:
value = self.MFKnapsack(i - 1, j)
else:
value = max(self.MFKnapsack(i - 1, j), self.value[i] + self.MFKnapsack(i - 1, j - self.weight[i]))
self.select[i][j] = value
return self.select[i][j] # 返回最大值 #
def show_element(self):
'''输出被选物品'''
remain_space = self.max_weight # 当前背包剩余容量 #
for i in range(self.array_length-1, 0, -1):
if remain_space >= self.weight[i]:
if self.select[i][remain_space] - self.select[i-1][remain_space-self.weight[i]] == self.value[i]:
print('item ', i, ' is selected!')
remain_space = remain_space - self.weight[i]
def main():
weight = [0, 2, 1, 3, 2]
value = [0, 12, 10, 20, 15]
max_weight = 5
# weight = [0, 19, 23, 12, 34, 24, 34, 56, 24, 53, 35]
# value = [0, 57, 68, 87, 17, 12, 21, 31, 42, 14, 15]
# max_weight = 300
al = package_memory_deal(weight, value, max_weight, True)
al.print_out()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | rogeroyer.noreply@github.com |
8c6b878927c7dac6590fca1e63d2cfb1a5ef4b1f | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/vanilla/testcase/firstcases/testcase3_003.py | cf0d72f444c3338e02c82106cee27e79e171819d | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,748 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'ch.blinkenlights.android.vanilla',
'appActivity' : 'ch.blinkenlights.android.vanilla.LibraryActivity',
'resetKeyboard' : True,
'androidCoverage' : 'ch.blinkenlights.android.vanilla/ch.blinkenlights.android.vanilla.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase003
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"Artists\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"ch.blinkenlights.android.vanilla:id/dragger\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Happy High EPBlackbird Blackbird\")", "new UiSelector().className(\"android.widget.TextView\").instance(6)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Play all\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"01:48\")", "new UiSelector().className(\"android.widget.TextView\").instance(13)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"01:48\")", "new UiSelector().className(\"android.widget.TextView\").instance(13)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"intermissionchiptek, She\")", "new UiSelector().className(\"android.widget.TextView\").instance(22)")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"ch.blinkenlights.android.vanilla:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"ch.blinkenlights.android.vanilla:id/cover\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"3_003\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'ch.blinkenlights.android.vanilla'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
374c6dc3c8939e43c4c3fecd5522911c9253d932 | c8098e3907f39210ac159cf78f66cd871440fc10 | /vagrant/lesson 1/database_setup.py | 45e857aab66b72fc7b79507451798969667f5bee | [] | no_license | jaapdejong/fullstack-nanodegree-vm--ud088 | e297dbc421d19df61499a16ae87ca572426228df | ba383a9ab7a315279e4acd0425bbca1e25ba943a | refs/heads/master | 2021-06-11T23:14:37.669565 | 2017-01-22T11:59:55 | 2017-01-22T11:59:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | #!/usr/bin/python
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Restaurant(Base):
__tablename__ = 'restaurant'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
class MenuItem(Base):
__tablename__ = 'menu_item'
name = Column(String(80), nullable=False)
id = Column(Integer, primary_key=True)
description = Column(String(250))
price = Column(String(8))
course = Column(String(250))
restaurant_id = Column(Integer, ForeignKey('restaurant.id'))
restaurant = relationship(Restaurant)
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.create_all(engine)
| [
"jaap.dejong@nedap.com"
] | jaap.dejong@nedap.com |
4f657bc1f1c2e30b69b8ba84ed32bd6ee4e0ddf7 | 76133934b1dd287273a9bfa0c801d10d08a21b21 | /test/functional/getchaintips.py | 95d9627833e5526bc62095eaf21841cd9365d834 | [
"MIT"
] | permissive | kenfmcoin/kenfmcoin | d8783b34fcb3ae01067e8d1b33e3a73e3b82b1f9 | 1fa48487593233f2066757dc54f48b2349e2d9db | refs/heads/master | 2020-03-10T17:53:31.569229 | 2018-04-14T12:28:55 | 2018-04-14T12:28:55 | 129,511,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,183 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The KenFMcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getchaintips RPC.
- introduce a network split
- work on chains of different lengths
- join the network together again
- verify that getchaintips now returns two chain tips.
"""
from test_framework.test_framework import KenFMcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (KenFMcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def run_test (self):
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all([self.nodes[:2], self.nodes[2:]])
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| [
"37983255+spineinhalb@users.noreply.github.com"
] | 37983255+spineinhalb@users.noreply.github.com |
10f80e4c65bd78181993027e8a5a587a62070faf | cfbf8e78017a7c97107112680b04b2733bd27f8e | /Raw_data_Modules/Modules/DatabaseConnector.py | 852a60a6801bde9f0fdb33ac213cfeb2f701874b | [] | no_license | WenRichard/Web-page-Recommendation | bdd279e382a119a2068480f5f49e1703d0703777 | 0757b43f2d3f62c29c4aca9c1dd7a8b327204f32 | refs/heads/master | 2020-03-27T20:11:58.218795 | 2018-09-02T04:46:46 | 2018-09-02T04:46:46 | 147,047,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | import pymysql
import pandas as pd
# 连接数据库
def connector(host, user, password, database):
db = pymysql.connect(host, user, password, database)
return db
# 断开数据库
def closer(db):
db.close()
return "the database has been closed."
# 读取数据:从csv文本文件
# url, date, features(1~n), labels(1~m)
class CSVReader(object):
def __init__(self, file_path):
self.file_path = file_path
self.df = self.csv_to_df()
def csv_to_df(self):
return pd.read_csv(self.file_path)
# 数据库连接器类
class MySQLReader(object):
'''一个连接MySQL数据库的对象'''
def __int__(self, host="localhost", user="root",
password="broadtech", database="world"):
self.database_connector = pymysql.connect(host, user, password, database)
def connector(self, host, user, password, database):
db = pymysql.connect(host, user, password, database)
return db
def close(self):
self.database_connector.close()
def exec(self, sql_exp=""):
'''执行一段sql语句并获取全部返回结果'''
cursor = self.database_connector.cursor()
cursor.execute(sql_exp)
data = cursor.fetchall()
if data is ():
print("Query result is empty.")
return None
return data
def get_table(self, table_name="None"):
'''获取一张表的全部内容,转换为pd.DataFrame对象'''
data = self.exec("select * from %s;" % table_name)
if data is not None:
data = list(map(list, data))
data = pd.DataFrame(data)
del data[0]
return data
else:
print("Get an empty table.")
return None
if __name__ == "__main__":
d = MySQLReader()
d.__int__(database="WPF")
data = d.exec("select * from visit_url_features limit 1;")
print(data)
| [
"xiezhengwen2013@163.com"
] | xiezhengwen2013@163.com |
b39c2f3984971a040831df35e49517e3fb93df8c | 3e63befd66d0f8fddaba4ce8c1ed73525c32a5aa | /venv/Lib/site-packages/mediapipe/calculators/core/sequence_shift_calculator_pb2.py | ec7af4e37ba7cb45a6a8086a1af24275c15bee53 | [
"MIT"
] | permissive | tanvirtareq/awesome-hand-gesture-detection | b0ecc6636e810412950b705e6ef5c1d83099b547 | ccc836557b730cf34861301712de0de3eec1076d | refs/heads/main | 2023-06-04T02:24:34.452783 | 2021-06-18T11:36:39 | 2021-06-18T11:36:39 | 389,102,297 | 1 | 0 | MIT | 2021-07-24T13:10:45 | 2021-07-24T13:10:45 | null | UTF-8 | Python | false | true | 3,553 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/calculators/core/sequence_shift_calculator.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.framework import calculator_pb2 as mediapipe_dot_framework_dot_calculator__pb2
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe_dot_framework_dot_calculator__options__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/calculators/core/sequence_shift_calculator.proto',
package='mediapipe',
syntax='proto2',
serialized_pb=_b('\n:mediapipe/calculators/core/sequence_shift_calculator.proto\x12\tmediapipe\x1a$mediapipe/framework/calculator.proto\"\x94\x01\n\x1eSequenceShiftCalculatorOptions\x12\x19\n\rpacket_offset\x18\x01 \x01(\x05:\x02-12W\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\x87\xba\xa9\x33 \x01(\x0b\x32).mediapipe.SequenceShiftCalculatorOptionsB\x0c\xa2\x02\tMediaPipe')
,
dependencies=[mediapipe_dot_framework_dot_calculator__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SEQUENCESHIFTCALCULATOROPTIONS = _descriptor.Descriptor(
name='SequenceShiftCalculatorOptions',
full_name='mediapipe.SequenceShiftCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='packet_offset', full_name='mediapipe.SequenceShiftCalculatorOptions.packet_offset', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.SequenceShiftCalculatorOptions.ext', index=0,
number=107633927, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None),
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=112,
serialized_end=260,
)
DESCRIPTOR.message_types_by_name['SequenceShiftCalculatorOptions'] = _SEQUENCESHIFTCALCULATOROPTIONS
SequenceShiftCalculatorOptions = _reflection.GeneratedProtocolMessageType('SequenceShiftCalculatorOptions', (_message.Message,), dict(
DESCRIPTOR = _SEQUENCESHIFTCALCULATOROPTIONS,
__module__ = 'mediapipe.calculators.core.sequence_shift_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.SequenceShiftCalculatorOptions)
))
_sym_db.RegisterMessage(SequenceShiftCalculatorOptions)
_SEQUENCESHIFTCALCULATOROPTIONS.extensions_by_name['ext'].message_type = _SEQUENCESHIFTCALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_SEQUENCESHIFTCALCULATOROPTIONS.extensions_by_name['ext'])
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\242\002\tMediaPipe'))
# @@protoc_insertion_point(module_scope)
| [
"airriaislam@gmail.com"
] | airriaislam@gmail.com |
446ce31ab4d34876176ddf4f9eb6c48cb5d1cbaf | 6ff4a7e4743e6574a9584ef5f672a976d4976baa | /code/learn-python/python/util/file/file_write_overwrite.py | c5642fd12236a40a39224f604f1b9d3ad8d5218c | [
"BSD-2-Clause"
] | permissive | lsieun/learn-python | 1c7d6eeb80aa6e47e5e94a055ba9b674fd886cb9 | ce067202e8e77351de1fc99897185b6363d98219 | refs/heads/master | 2021-06-26T07:41:32.393981 | 2018-10-20T04:21:54 | 2018-10-20T04:21:54 | 111,169,474 | 1 | 1 | BSD-2-Clause | 2019-05-07T12:52:34 | 2017-11-18T02:21:19 | Python | UTF-8 | Python | false | false | 119 | py | #写文件(覆盖写)
path = "D://mynote.txt"
f = open(path,"w",encoding="utf-8")
f.write("我爱阿芬")
f.close() | [
"331505785@qq.com"
] | 331505785@qq.com |
c9bc5d6c76ec5047e3101a82c773eb67ac5b156d | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Keras_tensorflow/source/tensorflow/contrib/tensor_forest/client/random_forest_test.py | 1e774dab2b06f2db402aebc4b8b64d052e5a56d6 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 3,224 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorForestTrainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.tensor_forest.client import random_forest
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.platform import test
class TensorForestTrainerTests(test.TestCase):
def testClassification(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20)
classifier = random_forest.TensorForestEstimator(hparams.fill())
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.float32)
classifier.fit(x=data, y=labels, steps=100, batch_size=50)
classifier.evaluate(x=data, y=labels, steps=10)
def testClassificationTrainingLoss(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=3, max_nodes=1000, num_classes=3, num_features=4)
classifier = random_forest.TensorForestEstimator(
hparams, graph_builder_class=(tensor_forest.TrainingLossForest))
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.float32)
monitors = [random_forest.TensorForestLossHook(10)]
classifier.fit(x=data, y=labels, steps=100, monitors=monitors)
classifier.evaluate(x=data, y=labels, steps=10)
def testRegression(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=1,
num_features=13,
regression=True,
split_after_samples=20)
regressor = random_forest.TensorForestEstimator(hparams.fill())
boston = base.load_boston()
data = boston.data.astype(np.float32)
labels = boston.target.astype(np.float32)
regressor.fit(x=data, y=labels, steps=100, batch_size=50)
regressor.evaluate(x=data, y=labels, steps=10)
if __name__ == "__main__":
test.main()
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
80d38101f88dedd6685e1de271ee7ba897dc1487 | 3db48e7c13b330af7c488820d14d22edf0a7cfda | /그래프 이론/[10-3]위상정렬 알고리즘.py | 06fd577232f9442862e3ff32bd6969a65947f818 | [] | no_license | kim-kiwon/Coding-test | 1555d7e7699a21655e86f892e76f784accf4b9cc | aa8563ab54596c9c6dace84494d4f68fbd8e97f4 | refs/heads/master | 2023-04-01T10:04:11.152485 | 2021-04-05T10:17:51 | 2021-04-05T10:17:51 | 328,202,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,370 | py | #위상정렬 알고리즘 : 방향 그래프로 순서가 주어질 경우. 모든 노드를 순서에 거스르지 않고 정렬.
#선수과목 고려한 수강신청이 주된 문제.
#진입차수 0 인 노드 큐에 삽입. 큐에서 제거시 해당 노드에서 나가는 간선 모두제거. 반복
#큐가 비었는데 방문하지 않는 노드가 남았다 -> 사이클존재 (남은 노드 중에 진입차수 0 인 노드가 없게되므로)
from collections import deque
v, e = map(int, input().split())
indegree = [0] * (v+1) #진입차수 0으로 초기화
graph = [[] for i in range(v+1)]
#간선 입력받기
for _ in range(e):
a, b = map(int, input().split())
graph[a].append(b)
indegree[b] +=1 #b 진입차수 증가
def topology_sort():
result = []
q = deque()
#진입차수 0인 노드 큐에 삽입
for i in range(1, v+1):
if indegree[i] == 0:
q.append(i)
while q:
now = q.popleft() #큐에서 꺼내고
result.append(now) #결과에 넣기.
#해당 원소와 연결된 노드 진입차수 1 빼주기
for i in graph[now]:
indegree[i] -= 1
#새로 진입차수 0인 노드 큐에 삽입
if indegree[i] == 0:
q.append(i)
#결과 출력
for i in result:
print(i, end=' ')
topology_sort()
| [
"76721493+kim-kiwon@users.noreply.github.com"
] | 76721493+kim-kiwon@users.noreply.github.com |
1431f3ebabae290a7e25a9c3f1c2fd5ffb3a26eb | 34652a47355a8dbe9200db229a1bbc62619de364 | /Matlibplots/samples2/contour_label_demo.py | 44ac1ddc73c54dcdf9cad3b695a1f9fb4dc177ef | [] | no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,319 | py | #!/usr/bin/env python
"""
Illustrate some of the more advanced things that one can do with
contour labels.
See also contour_demo.py.
"""
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
##################################################
# Define our surface
##################################################
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
##################################################
# Make contour labels using creative float classes
# Follows suggestion of Manuel Metz
##################################################
plt.figure()
# Basic contour plot
CS = plt.contour(X, Y, Z)
# Define a class that forces representation of float to look a certain way
# This remove trailing zero so '1.0' becomes '1'
class nf(float):
def __repr__(self):
str = '%.1f' % (self.__float__(),)
if str[-1] == '0':
return '%.0f' % self.__float__()
else:
return '%.1f' % self.__float__()
# Recast levels to new class
CS.levels = [nf(val) for val in CS.levels ]
# Label levels with specially formatted floats
if plt.rcParams["text.usetex"]:
fmt = r'%r \%%'
else:
fmt = '%r %%'
plt.clabel(CS, CS.levels, inline=True, fmt=fmt, fontsize=10)
##################################################
# Label contours with arbitrary strings using a
# dictionary
##################################################
plt.figure()
# Basic contour plot
CS = plt.contour(X, Y, Z)
fmt = {}
strs = [ 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh' ]
for l, s in zip(CS.levels, strs):
fmt[l] = s
# Label every other level using strings
plt.clabel(CS, CS.levels[::2], inline=True, fmt=fmt, fontsize=10)
# Use a Formatter
plt.figure()
CS = plt.contour(X, Y, 100 ** Z, locator=plt.LogLocator())
fmt = ticker.LogFormatterMathtext()
fmt.create_dummy_axis()
plt.clabel(CS, CS.levels, fmt=fmt)
plt.title("$100^Z$")
plt.show()
| [
"bogdan.evanzo@gmail.com"
] | bogdan.evanzo@gmail.com |
4e950512f3e46044884aa3e2cb21adc6db35ee7a | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-2/d1fcf255c5402d75a3f7b450bd1e795196d5817a-<_login>-bug.py | e920b6d9685aacf11b23547d526d2bc7ebec0fda | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py |
def _login(self):
(username, password) = self._get_login_info()
if (username is None):
return
(_, urlh) = self._download_webpage_handle('https://learning.oreilly.com/accounts/login-check/', None, 'Downloading login page')
def is_logged(urlh):
return ('learning.oreilly.com/home/' in compat_str(urlh.geturl()))
if is_logged(urlh):
self.LOGGED_IN = True
return
redirect_url = compat_str(urlh.geturl())
parsed_url = compat_urlparse.urlparse(redirect_url)
qs = compat_parse_qs(parsed_url.query)
next_uri = compat_urlparse.urljoin('https://api.oreilly.com', qs['next'][0])
(auth, urlh) = self._download_json_handle('https://www.oreilly.com/member/auth/login/', None, 'Logging in', data=json.dumps({
'email': username,
'password': password,
'redirect_uri': next_uri,
}).encode(), headers={
'Content-Type': 'application/json',
'Referer': redirect_url,
}, expected_status=400)
credentials = auth.get('credentials')
if ((not auth.get('logged_in')) and (not auth.get('redirect_uri')) and credentials):
raise ExtractorError(('Unable to login: %s' % credentials), expected=True)
self._apply_first_set_cookie_header(urlh, 'groot_sessionid')
(_, urlh) = self._download_webpage_handle((auth.get('redirect_uri') or next_uri), None, 'Completing login')
if is_logged(urlh):
self.LOGGED_IN = True
return
raise ExtractorError('Unable to log in')
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
78f0996a27e3e1f8cbd9d1d794408e8374c9d79c | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/DGS-3120-24SC-DC-L3MGMT-MIB.py | 0352de5e08aab9967fa55cbb67c04f6cfa9d6da5 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 18,396 | py | #
# PySNMP MIB module DGS-3120-24SC-DC-L3MGMT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DGS-3120-24SC-DC-L3MGMT-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:28:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
Ipv6Address, = mibBuilder.importSymbols("IPV6-TC", "Ipv6Address")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, iso, Counter64, ObjectIdentity, Bits, Integer32, Gauge32, MibIdentifier, Unsigned32, TimeTicks, Counter32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "iso", "Counter64", "ObjectIdentity", "Bits", "Integer32", "Gauge32", "MibIdentifier", "Unsigned32", "TimeTicks", "Counter32", "IpAddress")
TruthValue, DisplayString, TextualConvention, RowStatus, PhysAddress = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TextualConvention", "RowStatus", "PhysAddress")
dlink_Dgs3120Proj_Dgs3120_24SC_DC, = mibBuilder.importSymbols("SWDGS3120PRIMGMT-MIB", "dlink-Dgs3120Proj-Dgs3120-24SC-DC")
swL3MgmtMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3))
if mibBuilder.loadTexts: swL3MgmtMIB.setLastUpdated('1211160000Z')
if mibBuilder.loadTexts: swL3MgmtMIB.setOrganization(' ')
class NodeAddress(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(6, 6)
fixedLength = 6
class NetAddress(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
swL3IpMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2))
swL3IpCtrlMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1))
swL3IpFdbMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 2))
swL3IpCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3), )
if mibBuilder.loadTexts: swL3IpCtrlTable.setStatus('current')
swL3IpCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1), ).setIndexNames((0, "DGS-3120-24SC-DC-L3MGMT-MIB", "swL3IpCtrlInterfaceName"))
if mibBuilder.loadTexts: swL3IpCtrlEntry.setStatus('current')
swL3IpCtrlInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpCtrlInterfaceName.setStatus('current')
swL3IpCtrlIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpCtrlIfIndex.setStatus('current')
swL3IpCtrlIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpAddr.setStatus('current')
swL3IpCtrlIpSubnetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpSubnetMask.setStatus('current')
swL3IpCtrlVlanName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlVlanName.setStatus('current')
swL3IpCtrlProxyArp = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlProxyArp.setStatus('current')
swL3IpCtrlSecondary = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 7), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlSecondary.setStatus('current')
swL3IpCtrlMode = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("bootp", 3), ("dhcp", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlMode.setStatus('current')
swL3IpCtrlAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlAdminState.setStatus('current')
swL3IpCtrlIpv4AdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpv4AdminState.setStatus('current')
swL3IpCtrlIpv6AdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpv6AdminState.setStatus('current')
swL3IpCtrlIpv6LinkLocalAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 14), Ipv6Address()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpCtrlIpv6LinkLocalAddress.setStatus('current')
swL3IpCtrlIpv6LinkLocalPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpCtrlIpv6LinkLocalPrefixLen.setStatus('current')
swL3IpCtrlState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 16), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL3IpCtrlState.setStatus('current')
swL3IpCtrlIpv6LinkLocalAutoState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("enabled", 2), ("disabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpv6LinkLocalAutoState.setStatus('current')
swL3IpCtrlLocalProxyArp = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlLocalProxyArp.setStatus('current')
swL3IpCtrlDhcpv6ClientState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlDhcpv6ClientState.setStatus('current')
swL3IpCtrlIpDhcpOption12State = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpDhcpOption12State.setStatus('current')
swL3IpCtrlIpDhcpOption12HostName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 3, 1, 23), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlIpDhcpOption12HostName.setStatus('current')
swL3Ipv6CtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 4), )
if mibBuilder.loadTexts: swL3Ipv6CtrlTable.setStatus('current')
swL3Ipv6CtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 4, 1), ).setIndexNames((0, "DGS-3120-24SC-DC-L3MGMT-MIB", "swL3Ipv6CtrlInterfaceName"))
if mibBuilder.loadTexts: swL3Ipv6CtrlEntry.setStatus('current')
swL3Ipv6CtrlInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 4, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3Ipv6CtrlInterfaceName.setStatus('current')
swL3Ipv6CtrlMaxReassmblySize = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 4, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3Ipv6CtrlMaxReassmblySize.setStatus('current')
swL3Ipv6CtrlNsRetransTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 4, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3Ipv6CtrlNsRetransTimer.setStatus('current')
swL3Ipv6AddressCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 5), )
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlTable.setStatus('current')
swL3Ipv6AddressCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 5, 1), ).setIndexNames((0, "DGS-3120-24SC-DC-L3MGMT-MIB", "swL3Ipv6AddressCtrlInterfaceName"), (0, "DGS-3120-24SC-DC-L3MGMT-MIB", "swL3Ipv6Address"), (0, "DGS-3120-24SC-DC-L3MGMT-MIB", "swL3Ipv6AddressCtrlPrefixLen"))
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlEntry.setStatus('current')
swL3Ipv6AddressCtrlInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 5, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 12))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlInterfaceName.setStatus('current')
swL3Ipv6Address = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 5, 1, 2), Ipv6Address()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3Ipv6Address.setStatus('current')
swL3Ipv6AddressCtrlPrefixLen = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 5, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlPrefixLen.setStatus('current')
swL3Ipv6AddressCtrlState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 5, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlState.setStatus('current')
swL3Ipv6AddressCtrlAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 5, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("manual", 1), ("dhcpv6", 2), ("stateless", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3Ipv6AddressCtrlAddressType.setStatus('current')
swL3IpCtrlAllIpIfState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("enabled", 2), ("disabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpCtrlAllIpIfState.setStatus('current')
swL3IpFdbInfoTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 2, 1), )
if mibBuilder.loadTexts: swL3IpFdbInfoTable.setStatus('current')
swL3IpFdbInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 2, 1, 1), ).setIndexNames((0, "DGS-3120-24SC-DC-L3MGMT-MIB", "swL3IpFdbInfoIpAddr"))
if mibBuilder.loadTexts: swL3IpFdbInfoEntry.setStatus('current')
swL3IpFdbInfoIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 2, 1, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpFdbInfoIpAddr.setStatus('current')
swL3IpFdbInfoIpSubnetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 2, 1, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpFdbInfoIpSubnetMask.setStatus('current')
swL3IpFdbInfoPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpFdbInfoPort.setStatus('current')
swL3IpFdbInfoType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("static", 2), ("dynamic", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpFdbInfoType.setStatus('current')
swL3IpArpAgingTime = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL3IpArpAgingTime.setStatus('current')
swL3IpStaticRouteTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 5), )
if mibBuilder.loadTexts: swL3IpStaticRouteTable.setStatus('current')
swL3IpStaticRouteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 5, 1), ).setIndexNames((0, "DGS-3120-24SC-DC-L3MGMT-MIB", "swL3IpStaticRouteDest"), (0, "DGS-3120-24SC-DC-L3MGMT-MIB", "swL3IpStaticRouteMask"), (0, "DGS-3120-24SC-DC-L3MGMT-MIB", "swL3IpStaticRouteNextHop"))
if mibBuilder.loadTexts: swL3IpStaticRouteEntry.setStatus('current')
swL3IpStaticRouteDest = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 5, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpStaticRouteDest.setStatus('current')
swL3IpStaticRouteMask = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 5, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpStaticRouteMask.setStatus('current')
swL3IpStaticRouteBkupState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("primary", 1), ("backup", 2), ("none", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL3IpStaticRouteBkupState.setStatus('current')
swL3IpStaticRouteNextHop = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 5, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpStaticRouteNextHop.setStatus('current')
swL3IpStaticRouteMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL3IpStaticRouteMetric.setStatus('current')
swL3IpStaticRouteStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("invalid", 2), ("valid", 3), ("active", 4), ("inActive", 5)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL3IpStaticRouteStatus.setStatus('current')
swL3IpStaticRouteWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 5, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL3IpStaticRouteWeight.setStatus('current')
swL3IpStaticRouteInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 117, 1, 6, 3, 2, 5, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL3IpStaticRouteInterfaceName.setStatus('current')
mibBuilder.exportSymbols("DGS-3120-24SC-DC-L3MGMT-MIB", swL3Ipv6CtrlNsRetransTimer=swL3Ipv6CtrlNsRetransTimer, swL3IpCtrlIpSubnetMask=swL3IpCtrlIpSubnetMask, swL3IpCtrlProxyArp=swL3IpCtrlProxyArp, swL3IpCtrlDhcpv6ClientState=swL3IpCtrlDhcpv6ClientState, swL3IpStaticRouteWeight=swL3IpStaticRouteWeight, swL3IpFdbInfoEntry=swL3IpFdbInfoEntry, swL3IpCtrlTable=swL3IpCtrlTable, swL3Ipv6CtrlTable=swL3Ipv6CtrlTable, NodeAddress=NodeAddress, swL3IpFdbMgmt=swL3IpFdbMgmt, swL3IpFdbInfoTable=swL3IpFdbInfoTable, swL3Ipv6CtrlInterfaceName=swL3Ipv6CtrlInterfaceName, swL3IpStaticRouteInterfaceName=swL3IpStaticRouteInterfaceName, swL3IpCtrlIpDhcpOption12HostName=swL3IpCtrlIpDhcpOption12HostName, swL3IpCtrlLocalProxyArp=swL3IpCtrlLocalProxyArp, swL3IpArpAgingTime=swL3IpArpAgingTime, swL3Ipv6AddressCtrlInterfaceName=swL3Ipv6AddressCtrlInterfaceName, swL3IpCtrlEntry=swL3IpCtrlEntry, swL3IpStaticRouteMask=swL3IpStaticRouteMask, swL3IpStaticRouteTable=swL3IpStaticRouteTable, NetAddress=NetAddress, swL3IpStaticRouteNextHop=swL3IpStaticRouteNextHop, swL3IpCtrlAdminState=swL3IpCtrlAdminState, swL3IpStaticRouteMetric=swL3IpStaticRouteMetric, swL3IpStaticRouteStatus=swL3IpStaticRouteStatus, swL3IpCtrlIfIndex=swL3IpCtrlIfIndex, swL3IpCtrlMode=swL3IpCtrlMode, swL3IpMgmt=swL3IpMgmt, swL3IpCtrlIpDhcpOption12State=swL3IpCtrlIpDhcpOption12State, swL3IpCtrlAllIpIfState=swL3IpCtrlAllIpIfState, swL3IpCtrlIpv6LinkLocalPrefixLen=swL3IpCtrlIpv6LinkLocalPrefixLen, swL3Ipv6AddressCtrlTable=swL3Ipv6AddressCtrlTable, PYSNMP_MODULE_ID=swL3MgmtMIB, swL3IpCtrlMgmt=swL3IpCtrlMgmt, swL3IpCtrlIpv4AdminState=swL3IpCtrlIpv4AdminState, swL3Ipv6AddressCtrlEntry=swL3Ipv6AddressCtrlEntry, swL3IpStaticRouteEntry=swL3IpStaticRouteEntry, swL3IpCtrlIpAddr=swL3IpCtrlIpAddr, swL3IpCtrlVlanName=swL3IpCtrlVlanName, swL3IpCtrlIpv6LinkLocalAutoState=swL3IpCtrlIpv6LinkLocalAutoState, swL3Ipv6Address=swL3Ipv6Address, swL3IpFdbInfoIpAddr=swL3IpFdbInfoIpAddr, swL3IpCtrlIpv6AdminState=swL3IpCtrlIpv6AdminState, swL3Ipv6CtrlEntry=swL3Ipv6CtrlEntry, swL3Ipv6AddressCtrlAddressType=swL3Ipv6AddressCtrlAddressType, swL3IpFdbInfoType=swL3IpFdbInfoType, swL3IpStaticRouteBkupState=swL3IpStaticRouteBkupState, swL3Ipv6AddressCtrlPrefixLen=swL3Ipv6AddressCtrlPrefixLen, swL3IpFdbInfoPort=swL3IpFdbInfoPort, swL3IpStaticRouteDest=swL3IpStaticRouteDest, swL3Ipv6AddressCtrlState=swL3Ipv6AddressCtrlState, swL3IpFdbInfoIpSubnetMask=swL3IpFdbInfoIpSubnetMask, swL3Ipv6CtrlMaxReassmblySize=swL3Ipv6CtrlMaxReassmblySize, swL3IpCtrlIpv6LinkLocalAddress=swL3IpCtrlIpv6LinkLocalAddress, swL3MgmtMIB=swL3MgmtMIB, swL3IpCtrlSecondary=swL3IpCtrlSecondary, swL3IpCtrlInterfaceName=swL3IpCtrlInterfaceName, swL3IpCtrlState=swL3IpCtrlState)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
9ac32b680ac33b4a55f38e4dd4629e58ff8b07c8 | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/shared/sdktool/info_holder.py | c12254327b7b03061f05654fef591d5b781d801b | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 10,036 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
"""Contains utilities for holding and formatting install information.
This is useful for the output of 'gcloud info', which in turn is extremely
useful for debugging issues related to weird installations, out-of-date
installations, and so on.
"""
import os
import re
import StringIO
import sys
import textwrap
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import platforms
class InfoHolder(object):
"""Base object to hold all the configuration info."""
def __init__(self):
self.basic = BasicInfo()
self.installation = InstallationInfo()
self.config = ConfigInfo()
self.logs = LogsInfo()
def __str__(self):
out = StringIO.StringIO()
out.write(str(self.basic) + '\n')
out.write(str(self.installation) + '\n')
out.write(str(self.config) + '\n')
out.write(str(self.logs) + '\n')
return out.getvalue()
class BasicInfo(object):
"""Holds basic information about your system setup."""
def __init__(self):
platform = platforms.Platform.Current()
self.version = config.CLOUD_SDK_VERSION
self.operating_system = platform.operating_system
self.architecture = platform.architecture
self.python_version = sys.version
self.site_packages = 'site' in sys.modules
def __str__(self):
return textwrap.dedent("""\
Google Cloud SDK [{version}]
Platform: [{os}, {arch}]
Python Version: [{python_version}]
Site Packages: [{site_packages}]
""".format(
version=self.version,
os=self.operating_system.name,
arch=self.architecture.name,
python_version=self.python_version.replace('\n', ' '),
site_packages='Enabled' if self.site_packages else 'Disabled'))
class InstallationInfo(object):
"""Holds information about your Cloud SDK installation."""
def __init__(self):
self.sdk_root = config.Paths().sdk_root
self.release_channel = config.INSTALLATION_CONFIG.release_channel
self.repo_url = config.INSTALLATION_CONFIG.snapshot_url
repos = properties.VALUES.component_manager.additional_repositories.Get(
validate=False)
self.additional_repos = repos.split(',') if repos else []
self.path = os.environ.get('PATH', '')
if self.sdk_root:
manager = update_manager.UpdateManager()
self.components = manager.GetCurrentVersionsInformation()
self.old_tool_paths = manager.FindAllOldToolsOnPath()
paths = [os.path.realpath(p) for p in self.path.split(os.pathsep)]
this_path = os.path.realpath(
os.path.join(self.sdk_root,
update_manager.UpdateManager.BIN_DIR_NAME))
# TODO(markpell): Validate symlinks in /usr/local/bin when we start
# creating them.
self.on_path = this_path in paths
else:
self.components = {}
self.old_tool_paths = []
self.on_path = False
def __str__(self):
out = StringIO.StringIO()
out.write('Installation Root: [{0}]\n'.format(
self.sdk_root if self.sdk_root else 'Unknown'))
if config.INSTALLATION_CONFIG.IsAlternateReleaseChannel():
out.write('Release Channel: [{0}]\n'.format(self.release_channel))
out.write('Repository URL: [{0}]\n'.format(self.repo_url))
if self.additional_repos:
out.write('Additional Repositories:\n {0}\n'.format(
'\n '.join(self.additional_repos)))
if self.components:
components = ['{0}: [{1}]'.format(name, value) for name, value in
self.components.iteritems()]
out.write('Installed Components:\n {0}\n'.format(
'\n '.join(components)))
out.write('System PATH: [{0}]\n'.format(self.path))
out.write('Cloud SDK on PATH: [{0}]\n'.format(self.on_path))
if self.old_tool_paths:
out.write('\nWARNING: There are old versions of the Google Cloud '
'Platform tools on your system PATH.\n {0}\n'
.format('\n '.join(self.old_tool_paths)))
return out.getvalue()
class ConfigInfo(object):
"""Holds information about where config is stored and what values are set."""
def __init__(self):
self.paths = config.Paths()
self.account = properties.VALUES.core.account.Get(validate=False)
self.project = properties.VALUES.core.project.Get(validate=False)
self.properties = properties.VALUES.AllValues()
def __str__(self):
out = StringIO.StringIO()
out.write(textwrap.dedent("""\
Installation Properties: [{installation_properties}]
User Config Directory: [{global_config}]
User Properties: [{user_properties}]
Current Workspace: [{workspace}]
Workspace Config Directory: [{workspace_config}]
Workspace Properties: [{workspace_properties}]
Account: [{account}]
Project: [{project}]
""".format(
installation_properties=self.paths.installation_properties_path,
global_config=self.paths.global_config_dir,
user_properties=self.paths.user_properties_path,
workspace=self.paths.workspace_dir,
workspace_config=self.paths.workspace_config_dir,
workspace_properties=self.paths.workspace_properties_path,
account=self.account,
project=self.project)))
out.write('Current Properties:\n')
for section, props in self.properties.iteritems():
out.write(' [{section}]\n'.format(section=section))
for name, value in props.iteritems():
out.write(' {name}: [{value}]\n'.format(
name=name, value=value))
return out.getvalue()
def RecentLogFiles(logs_dir, num=1):
"""Finds the most recent (not current) gcloud log files.
Args:
logs_dir: str, The path to the logs directory being used.
num: the number of log files to find
Returns:
A list of full paths to the latest num log files, excluding the current
log file. If there are fewer than num log files, include all of
them. They will be in chronological order.
"""
date_dirs = FilesSortedByName(logs_dir)
if not date_dirs:
return []
found_files = []
for date_dir in reversed(date_dirs):
log_files = reversed(FilesSortedByName(date_dir) or [])
found_files.extend(log_files)
if len(found_files) >= num + 1:
return found_files[1:num+1]
return found_files[1:]
def LastLogFile(logs_dir):
"""Finds the last (not current) gcloud log file.
Args:
logs_dir: str, The path to the logs directory being used.
Returns:
str, The full path to the last (but not the currently in use) log file
if it exists, or None.
"""
files = RecentLogFiles(logs_dir)
if files:
return files[0]
return None
def FilesSortedByName(directory):
"""Gets the list of files in the given directory, sorted by name.
Args:
directory: str, The path to the directory to list.
Returns:
[str], The full paths of the files, sorted by file name, or None.
"""
if not os.path.isdir(directory):
return None
dates = os.listdir(directory)
if not dates:
return None
return [os.path.join(directory, date) for date in sorted(dates)]
class LogData(object):
"""Representation of a log file.
Stores information such as the name of the log file, its contents, and the
command run.
"""
# This precedes the traceback in the log file.
TRACEBACK_MARKER = '\nTraceback (most recent call last):\n'
# This shows the command run in the log file
COMMAND_REGEXP = r'Running (gcloud\.[a-z.]+)'
def __init__(self, filename, command, contents, traceback):
self.filename = filename
self.command = command
self.contents = contents
self.traceback = traceback
def __str__(self):
logs_dir = config.Paths().logs_dir
log_path = self.filename
if self.filename.startswith(logs_dir):
# Just keep the parts of the log paths that aren't common
log_path = self.filename[len(logs_dir + os.path.sep):]
crash_detected = ' (crash detected)' if self.traceback else ''
return '[{0}]: [{1}]{2}'.format(log_path, self.command, crash_detected)
@classmethod
def FromFile(cls, log_file):
"""Parse the file at the given path into a LogData.
Args:
log_file: str, the path to the log file to read
Returns:
LogData, representation of the log file
"""
with open(log_file) as log_fp:
contents = log_fp.read()
traceback = None
command = None
match = re.search(cls.COMMAND_REGEXP, contents)
if match:
# ex. gcloud.group.subgroup.command
dotted_cmd_string, = match.groups()
command = ' '.join(dotted_cmd_string.split('.'))
if cls.TRACEBACK_MARKER in contents:
traceback = (cls.TRACEBACK_MARKER +
contents.split(cls.TRACEBACK_MARKER)[-1])
# Trim any log lines that follow the traceback
traceback = re.split(log.LOG_PREFIX_PATTERN, traceback)[0]
traceback = traceback.strip()
return cls(log_file, command, contents, traceback)
class LogsInfo(object):
"""Holds information about where logs are located."""
NUM_RECENT_LOG_FILES = 5
def __init__(self):
paths = config.Paths()
self.logs_dir = paths.logs_dir
self.last_log = LastLogFile(self.logs_dir)
self.last_logs = RecentLogFiles(self.logs_dir, self.NUM_RECENT_LOG_FILES)
def __str__(self):
return textwrap.dedent("""\
Logs Directory: [{logs_dir}]
Last Log File: [{log_file}]
""".format(logs_dir=self.logs_dir, log_file=self.last_log))
def LastLogContents(self):
if not self.last_log:
return ''
with open(self.last_log) as fp:
return fp.read()
def GetRecentRuns(self):
"""Return the most recent runs, as reported by info_holder.LogsInfo.
Returns:
A list of LogData
"""
return [LogData.FromFile(log_file) for log_file in self.last_logs]
| [
"joe@longreen.io"
] | joe@longreen.io |
b6f1115d4f04e8309fb6d9dd7f163c32b2b8bf2e | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /090_logging/_exercises/_templates/Python Logging Basics/004_Example 3 – Log File.py | a2705a8ad6d0da1023b298252b981003efb9282c | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,263 | py | # # The final option is to log messages directly to a file. This is rarely useful these days, as administrators can
# # configure syslog to write certain messages to specific files, or if deploying inside containers,
# # this is an anti-pattern. Also if you use centralized logging, having to deal with additional log files is an
# # added concern. But it is an option that is still available.
# #
# # When logging to files, the main thing to be wary of is that log files need to be rotated regularly.
# # The application needs to detect the log file being renamed and handle that situation. While Python provides its
# # own file rotation handler, it is best to leave log rotation to dedicated tools such as logrotate.
# # The WatchedFileHandler will keep track of the log file and reopen it if it is rotated, making it work well with
# # logrotate without requiring any specific signals.
# #
# # Here is a sample implementation.
#
# ______ l____
# ______ l____.h__
# ______ os
#
# handler _ l____.h__.WFH_(
# __.e___.g.. "LOGFILE", "/var/log/yourapp.log"
# formatter _ l____.F... l____.B..
# h__.sF_ f..
# root _ l____.gL_
# ?.sL_ __.e__.g__ "LOGLEVEL", "INFO"
# ?.aH_ h..
#
# t__
# e.. m..
# e___ E..
# l____.e.. "Exception in main()"
# e.. 1
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
45b3718a502218323dd157b5f41726f28c7cb8b9 | 929d12e11ed2fb69476b9d07932e38662f0ce6fc | /Two Pointers/3 Sum.py | db65bf577617e5c0cda320df9d5e87c5a5f947dd | [] | no_license | arnabs542/Data-Structures-And-Algorithms | b8f341a31ca18044bf179294fbcb0fac1f835216 | ffcc2f8a25520ce37cd1f67e6225281c85141a65 | refs/heads/master | 2022-12-13T14:09:55.005341 | 2020-09-13T11:58:58 | 2020-09-13T11:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,814 | py | """
3 Sum
Problem Description
Given an array A of N integers, find three integers in A such that the sum is closest to a given number B. Return the sum of those three integers. Assume that there will only be one solution.
Problem Constraints
-10^8 <= B <= 10^8
1 <= N <= 10^4
-10^8 <= A[i] <= 10^8
Input Format
First argument is an integer array A of size N. Second argument is an integer B denoting the sum you need to get close to.
Output Format
Return a single integer denoting the sum of three integers which is closest to B.
Example Input
Input 1:
A = [-1, 2, 1, -4]
B = 1
Input 2:
A = [1, 2, 3]
B = 6
Example Output
Output 1:
2
Output 2:
6
Example Explanation
Explanation 1:
The sum that is closest to the target is 2. (-1 + 2 + 1 = 2)
Explanation 2:
Take all elements to get exactly 6.
"""
class Solution:
# @param A : list of integers
# @param B : integer
# @return an integer
def threeSumClosest(self, A, B):
n = len(A)
A.sort()
ans = float("inf")
closest_sum = 0
for i in range(n-2):
start = i+1
end = n-1
temp = B-A[i]
result = 0
min_val = float("inf")
while(start<end):
diff = (temp-A[start]-A[end])
if abs(diff) < min_val:
min_val = abs(diff)
result = A[start]+A[end]
if diff<0:
end -= 1
else:
start += 1
if abs(A[i]+result-B) < ans:
ans = abs(A[i]+result-B)
closest_sum = A[i]+result
return closest_sum
| [
"noreply@github.com"
] | arnabs542.noreply@github.com |
7a11fe137ba5bf37e86e22d6f0511f13d1e1b673 | ba7134468cb18014fe2e3e1513382fa52aafd4eb | /01_Python_basic_grammar_supplement/005_Python常用内置函数/002_map_映射函数_按规律生成新列表.py | 1a656cdc6a055d276239ad4f2a42c28e99d16e3a | [] | no_license | FelixZFB/Python_advanced_learning | 4e44616b390e1c6e7da37229c7ad48c069cee71b | a71a6d733ed2134a79f02a6488807862b23438b8 | refs/heads/master | 2021-06-27T11:15:07.754719 | 2020-11-20T02:41:25 | 2020-11-20T02:41:25 | 183,116,714 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | # map: 映射
# 即把集合或者列表中的元素,每一个元素都按照一定的规则进行操作,生成一个新的列表或者集合
# map函数是系统提供的具有映射功能的高阶函数,返回值是一个迭代对象
# 先看一个列表[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],以该列表为基础每个数字乘以10
# 生成一个新的列表[0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
# 代码如下:
l1 = [i for i in range(10)]
l2 = []
for i in l1:
l2.append(i * 10)
print(l2)
# map函数实现上面的功能,代码变的更简单
l3 = [i for i in range(10)]
def mulTen(n):
return n * 10
l4 = map(mulTen, l3)
print(type(l4))
print(l4)
# map类型是可迭代的,使用for循环取出每个元素
for i in l4:
print(i)
| [
"18200116656@qq.com"
] | 18200116656@qq.com |
8c727e64d3c3a290d5db68cd724530baa918f3ff | 4d523d4d9d1fdd643bb42403ffc4cab67ee260ca | /benchmarks/dynamo/common.py | b0752f10d4338a51ee25afbc6e34b2266fb78639 | [
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | kiszk/pytorch | 6061cd5afc053e2f5c2bfe0d251f3b2d4527cef0 | 62c53aabdb0d2630c87141f21072a79aa69aee5c | refs/heads/main | 2023-05-26T12:07:52.086928 | 2023-05-08T15:21:16 | 2023-05-08T15:21:16 | 231,248,506 | 0 | 0 | NOASSERTION | 2020-01-01T18:12:00 | 2020-01-01T18:11:59 | null | UTF-8 | Python | false | false | 93,093 | py | #!/usr/bin/env python3
import argparse
import collections
import copy
import csv
import functools
import importlib
import itertools
import logging
import os
import random
import signal
import subprocess
import sys
import time
from contextlib import contextmanager
from typing import NamedTuple
from unittest.mock import MagicMock
import numpy as np
import pandas as pd
import psutil
import torch
import torch._dynamo
import torch._dynamo.utils
import torch.distributed
from scipy.stats import gmean, ttest_ind
from torch._dynamo.exc import BackendCompilerFailed
from torch._dynamo.profiler import fx_insert_profiling, Profiler
from torch._dynamo.testing import dummy_fx_compile, format_speedup, same
from torch._dynamo.utils import clone_inputs, graph_break_reasons
from torch._functorch.aot_autograd import set_model_name
from torch._inductor import config as inductor_config
from torch._inductor.utils import fresh_inductor_cache
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils._pytree import tree_map, tree_map_only
try:
from .microbenchmarks.operator_inp_utils import OperatorInputsMode
except ImportError:
from microbenchmarks.operator_inp_utils import OperatorInputsMode
try:
import torch_xla.core.xla_model as xm
except ImportError:
# ignore the error if torch_xla is not installed
pass
log = logging.getLogger(__name__)
# We are primarily interested in TF32
torch.backends.cuda.matmul.allow_tf32 = True
current_name = ""
current_device = ""
current_batch_size = None
output_filename = None
class CI(NamedTuple):
backend: str # aot_eager or inductor
training: bool
dynamic: bool = False
device: str = "cuda"
CI_SKIP = collections.defaultdict(list)
# Skips for dynamic=False
# Here eager really means dynamo+eager
CI_SKIP[CI("eager", training=False)] = [
# TorchBench
"DALLE2_pytorch", # AttributeError: text_encodings
"llama", # does not support complex32
# TypeError: pad_center() takes 1 positional argument but 2 were given
"tacotron2",
# torchrec_dlrm requires gcc-11, https://github.com/pytorch/benchmark/pull/1427
"torchrec_dlrm",
# Huggingface
"DebertaV2ForQuestionAnswering", # OOM
# KeyError: '_ignore_torch_cuda_oom'
"detectron2_maskrcnn_r_101_c4",
"detectron2_maskrcnn_r_101_fpn",
"detectron2_maskrcnn_r_50_c4",
"detectron2_maskrcnn_r_50_fpn",
]
CI_SKIP[CI("eager", training=True)] = [
*CI_SKIP[CI("eager", training=False)],
# TorchBench
"BERT_pytorch", # accuracy
"Background_Matting", # fp64_OOM
"hf_BigBird", # fp64_OOM
"hf_T5_base", # fp64_OOM
"vision_maskrcnn", # eager_two_runs_differ
# Huggingface
"XGLMForCausalLM", # OOM
# TIMM
"cait_m36_384", # fp64_OOM
"convit_base", # fp64_OOM
"mobilenetv2_100", # accuracy
"xcit_large_24_p8_224", # fp64_OOM,
]
CI_SKIP[CI("aot_eager", training=False)] = [
*CI_SKIP[CI("eager", training=False)],
# all dynamic shapes errors for detectron variants
"demucs", # OOM
"detectron2_fasterrcnn_r_101_c4",
"detectron2_fasterrcnn_r_101_dc5",
"detectron2_fasterrcnn_r_101_fpn",
"detectron2_fasterrcnn_r_50_c4",
"detectron2_fasterrcnn_r_50_dc5",
"detectron2_fasterrcnn_r_50_fpn",
"detectron2_fcos_r_50_fpn",
"detectron2_maskrcnn_r_101_c4",
"detectron2_maskrcnn_r_101_fpn",
"detectron2_maskrcnn_r_50_c4",
"detectron2_maskrcnn_r_50_fpn",
"moco", # Please convert all Tensors to FakeTensors first
"hf_BigBird", # OOM
"tacotron2", # AssertionError: Deduped args out of bounds
# Huggingface
"BartForConditionalGeneration", # OOM
"DebertaV2ForQuestionAnswering", # OOM
# Torchbench
"speech_transformer", # https://github.com/pytorch/pytorch/issues/99893
"pyhpc_isoneutral_mixing", # https://github.com/pytorch/pytorch/issues/99893
"pyhpc_turbulent_kinetic_energy", # https://github.com/pytorch/pytorch/issues/99893
]
CI_SKIP[CI("aot_eager", training=True)] = [
*CI_SKIP[CI("aot_eager", training=False)],
# TorchBench
"Background_Matting", # fp64_OOM
"hf_T5_base", # fp64_OOM
"mobilenet_v2_quantized_qat", # fp64_OOM
"resnet50_quantized_qat", # fp64_OOM
"moco",
"pytorch_struct",
"vision_maskrcnn",
# Huggingface
"MBartForConditionalGeneration", # OOM
"M2M100ForConditionalGeneration", # OOM
"XGLMForCausalLM", # OOM
# TIMM
"cait_m36_384", # fp64_OOM
"convit_base", # fp64_OOM
"fbnetv3_b", # Accuracy (blocks.2.2.bn1.weight.grad)
"levit_128", # Accuracy (patch_embed.0.c.weight.grad)
"lcnet_050", # Accuracy (blocks.1.0.bn2.weight.grad)
"sebotnet33ts_256", # Accuracy (stem.conv1.conv.weight.grad)
"xcit_large_24_p8_224", # fp64_OOM,
"gernet_l", # accuracy https://github.com/pytorch/pytorch/issues/93847
"gluon_xception65", # accuracy https://github.com/pytorch/pytorch/issues/93847
"tinynet_a", # accuracy https://github.com/pytorch/pytorch/issues/93847
]
CI_SKIP[CI("inductor", training=False)] = [
# TorchBench
"DALLE2_pytorch", # AttributeError: text_encodings
"llama", # does not support complex32
# torchrec_dlrm requires gcc-11, https://github.com/pytorch/benchmark/pull/1427
"torchrec_dlrm",
"demucs", # OOM
"detectron2_fasterrcnn_r_101_c4",
"detectron2_fasterrcnn_r_101_dc5",
"detectron2_fasterrcnn_r_101_fpn",
"detectron2_fasterrcnn_r_50_c4",
"detectron2_fasterrcnn_r_50_dc5",
"detectron2_fasterrcnn_r_50_fpn",
"detectron2_fcos_r_50_fpn",
"detectron2_maskrcnn_r_101_c4",
"detectron2_maskrcnn_r_101_fpn",
"detectron2_maskrcnn_r_50_c4",
"detectron2_maskrcnn_r_50_fpn",
# TorchBench
"detectron2",
"densenet121", # flaky accuracy
"hf_T5", # accuracy
"hf_BigBird", # accuracy
"hf_GPT2_large", # OOM
"maml", # accuracy
"mobilenet_v2_quantized_qat", # The eval test only supports CPU
"moco", # accuracy
"pytorch_struct", # Test eval is not implemented
"pyhpc_equation_of_state", # Accuracy
"pyhpc_turbulent_kinetic_energy", # Accuracy
"tacotron2",
"vision_maskrcnn", # accuracy
]
CI_SKIP[CI("inductor", training=False, device="cpu")] = [
# TorchBench
"drq", # Need to update torchbench
"detectron2_fasterrcnn_r_101_c4",
"detectron2_fasterrcnn_r_101_dc5",
"detectron2_fasterrcnn_r_101_fpn",
"detectron2_fasterrcnn_r_50_c4",
"detectron2_fasterrcnn_r_50_dc5",
"detectron2_fasterrcnn_r_50_fpn",
"detectron2_fcos_r_50_fpn",
"detectron2_maskrcnn_r_101_c4",
"detectron2_maskrcnn_r_101_fpn",
"detectron2_maskrcnn_r_50_c4",
"detectron2_maskrcnn_r_50_fpn",
"doctr_det_predictor", # requires newer gcc
"doctr_reco_predictor", # requires newer gcc
"gat", # does not work with fp32
"gcn", # does not work with fp32
"hf_Bert_large", # OOM
"hf_GPT2_large", # Intermittent failure on CI
"hf_T5_base", # OOM
"llama", # does not support complex32
"mobilenet_v2_quantized_qat",
"pyhpc_turbulent_kinetic_energy",
"vision_maskrcnn",
"resnet50_quantized_qat", # Eager model failed to run(Quantize only works on Float Tensor, got Double)
"sage", # does not work with fp32
# torchrec_dlrm requires gcc-11, https://github.com/pytorch/benchmark/pull/1427
"torchrec_dlrm",
# Huggingface
"AllenaiLongformerBase",
"BartForConditionalGeneration", # OOM
"DebertaV2ForQuestionAnswering", # OOM
"MBartForConditionalGeneration", # Accuracy https://github.com/pytorch/pytorch/issues/94793
"PLBartForConditionalGeneration", # Accuracy https://github.com/pytorch/pytorch/issues/94794
# TIMM
"cait_m36_384", # Accuracy
"pnasnet5large", # OOM
"xcit_large_24_p8_224", # OOM https://github.com/pytorch/pytorch/issues/95984
"opacus_cifar10", # Fails to run https://github.com/pytorch/pytorch/issues/99201
]
CI_SKIP[CI("inductor", training=True)] = [
*CI_SKIP[CI("inductor", training=False)],
# TorchBench
"Background_Matting", # fp64_OOM
"dlrm", # Fails on CI - unable to repro locally
"hf_T5_base", # accuracy
"mobilenet_v3_large", # accuracy
"resnet50_quantized_qat", # Eager model failed to run
]
# Skips for dynamic=True
CI_SKIP[CI("aot_eager", training=False, dynamic=True)] = [
*CI_SKIP[CI("aot_eager", training=False)],
]
CI_SKIP[CI("aot_eager", training=True, dynamic=True)] = [
*CI_SKIP[CI("aot_eager", training=True)],
*CI_SKIP[CI("aot_eager", training=False, dynamic=True)],
]
CI_SKIP[CI("inductor", training=False, dynamic=True)] = [
*CI_SKIP[CI("aot_eager", training=False, dynamic=True)],
*CI_SKIP[CI("inductor", training=False)],
]
CI_SKIP[CI("inductor", training=True, dynamic=True)] = [
# NB: Intentionally omitting for symmetry with dynamic=False
# *CI_SKIP[CI("aot_eager", training=True, dynamic=True)],
*CI_SKIP[CI("inductor", training=False, dynamic=True)],
*CI_SKIP[CI("inductor", training=True)],
"levit_128", # Accuracy fails on A10G, passes on A100
"sebotnet33ts_256", # Flaky accuracy failed
]
CI_SKIP_OPTIMIZER = {
# TIMM
"convmixer_768_32", # accuracy
"hrnet_w18", # Stack issue in fx
# TorchBench
"dlrm", # symbolic shapes error
# HF
"pnasnet5large", # Stack issue in fx
"MobileBertForMaskedLM", # Stack issue in fx
"MobileBertForQuestionAnswering", # Stack issue in fx
"PegasusForConditionalGeneration", # OOM
}
def model_specified_by_path(path_and_class_str):
return ":" in path_and_class_str
def load_model_from_path(path_and_class_str):
configs = {}
for kvstr in path_and_class_str.split(","):
k, v = kvstr.split(":")
configs[k] = v
for name in ["path", "class"]:
if name not in configs:
raise RuntimeError(
"Invalid --only arguments. Check help message for the correct format"
)
path = configs["path"]
class_name = configs["class"]
if path[:1] != "/":
raise RuntimeError(
"Use absolute path since dynamo may change the current working directory which makes using relative path tricky"
)
spec = importlib.util.spec_from_file_location("module_name", path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
model_class = getattr(module, class_name)
assert issubclass(model_class, torch.nn.Module)
model = model_class()
assert hasattr(model, "get_example_inputs")
inputs = model.get_example_inputs()
return model, inputs
def output_csv(filename, headers, row):
if os.path.exists(filename):
with open(filename, "r") as fd:
lines = list(csv.reader(fd)) or [[]]
if headers and len(headers) > len(lines[0]):
# if prior results failed the header might not be filled in yet
lines[0] = headers
else:
headers = lines[0]
else:
lines = [headers]
lines.append([(f"{x:.6f}" if isinstance(x, float) else x) for x in row])
with open(filename, "w") as fd:
writer = csv.writer(fd, lineterminator="\n")
for line in lines:
writer.writerow(line + ["0"] * (len(headers) - len(line)))
class NullContext:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def nothing(f):
return f
@functools.lru_cache(None)
def patch_torch_manual_seed():
"""Make torch manual seed deterministic. Helps with accuracy testing."""
def deterministic_torch_manual_seed(*args, **kwargs):
from torch._C import default_generator
seed = 1337
import torch.cuda
if not torch.cuda._is_in_bad_fork():
torch.cuda.manual_seed_all(seed)
return default_generator.manual_seed(seed)
torch.manual_seed = deterministic_torch_manual_seed
def synchronize():
pass
def summarize_graph_break(filename):
"""
Sorts and de-dupes the graphs breaks on the reason string. Note that this
function is just a best effort to reduce the logging information. We could
miss some graph breaks because of de-duping. We can further refine this
function as need arises.
"""
log_file = f"{filename.rstrip('.csv')}_graph_breaks.csv"
if os.path.exists(log_file):
df = pd.read_csv(log_file)
df = df.sort_values("reason").drop_duplicates(subset="reason")
# Specialize for multi tensor sgd as reason is not identical
multi_tensor_sgd_row = df.loc[df["reason"].str.contains("_multi_tensor_sgd")]
if len(multi_tensor_sgd_row):
df = df[
~df["reason"].str.contains("_multi_tensor_sgd")
] # Drop all sgd rows
df = pd.concat(
[df, pd.DataFrame([multi_tensor_sgd_row.iloc[0]])], axis=0
) # Add back a single row
df.to_csv(f"{log_file.rstrip('.csv')}_deduped.csv", index=False)
def print_summary(filename):
if not (filename and os.path.exists(filename)):
return
data = pd.read_csv(filename)
if "tag" in data.columns:
for tag in data.tag.unique():
if tag == "0.0000":
continue # This happens for failed runs
print(f"\nSummary for tag={tag}:")
print_summary_table(data[data.tag == tag])
else:
print_summary_table(data)
summarize_graph_break(filename)
def print_summary_table(data):
width = max(map(len, data.columns))
for col in data.columns:
try:
if col in ("dev", "name", "batch_size", "tag"):
continue
elif col in ("pct_ops", "pct_time"):
print(col.ljust(width), f"{data[col].mean():.3%}")
elif col in ("graphs", "graph_calls", "captured_ops", "total_ops"):
print(col.ljust(width), f"{data[col].mean():.3f}")
elif col in ("compilation_latency"):
print(col.ljust(width), f"mean={data[col].mean():.3f} seconds")
elif col in ("compression_ratio"):
print(col.ljust(width), f"mean={data[col].mean():.3f}x")
elif col in ("accuracy"):
pass_rate = (data[col] == "pass").mean()
print(col.ljust(width), f"pass_rate={100*pass_rate:.2f}%")
else:
cdata = data[col]
print(
col.ljust(width),
f"gmean={gmean(cdata):.2f}x mean={cdata.mean():.3f}x",
)
except Exception as e:
pass
def tensor_is_on_xla(tensors):
def visit(x: torch.Tensor):
nonlocal result
if x.device.type == "xla":
result = True
result = False
tree_map_only(torch.Tensor, visit, tensors)
return result
def timed(
model,
model_iter_fn,
example_inputs,
times=1,
return_result=False,
collect_outputs=False,
):
use_xla = tensor_is_on_xla(example_inputs)
synchronize()
if use_xla:
xm.mark_step()
xm.wait_device_ops()
time_total = 0
# Dont collect outputs to correctly measure timing
for _ in range(times):
# Put this call inside the loop to reset the seed for each iteration.
# Don't include reset_rng_state() to correctly measure timing
reset_rng_state(use_xla)
t_iter_begin = time.perf_counter()
result = model_iter_fn(model, example_inputs, collect_outputs=collect_outputs)
# instead of calling sync on result_list, we should call mark_step.
# In training case, result_list may be empty, but we want to
# send all the pending graphs for compilation.
if use_xla:
# For the model running on regular torchxla (baseline), we need the
# mark step to send the accumulated graph for compilation.
#
# For the model running with dynamo/torchxla bridge, in training case,
# we need the mark step to send the optimizer graph out for
# compilation.
xm.mark_step()
t_iter_end = time.perf_counter()
time_total += t_iter_end - t_iter_begin
t_0 = time.perf_counter()
if use_xla:
xm.wait_device_ops()
synchronize()
t_1 = time.perf_counter()
time_total += t_1 - t_0
return (time_total, result) if return_result else time_total
class Stats:
totals = collections.defaultdict(collections.Counter)
@classmethod
def reset_counters(cls):
for k, v in torch._dynamo.utils.counters.items():
cls.totals[k].update(v)
ok = torch._dynamo.utils.counters["frames"]["ok"]
total = torch._dynamo.utils.counters["frames"]["total"]
torch._dynamo.utils.counters.clear()
return ok, total
@classmethod
def print_summary(cls):
for k, v in sorted(cls.totals.items()):
lines = "\n ".join(map(str, v.most_common(50)))
print(f"STATS {k}\n {lines}")
@classmethod
def aot_summary(cls):
return [cls.totals["aot_autograd"]["total"], cls.totals["aot_autograd"]["ok"]]
def coverage_experiment(args, model_iter_fn, model, example_inputs):
"""
Test operator/model coverage of TorchDynamo and record statistics
taken from a profiler. This target is mainly intended to check
correctness.
Writes to ./coverage.csv
"""
profiler = Profiler()
frozen_model_iter_fn = torch._dynamo.run(model_iter_fn)
with profiler.prof:
frozen_model_iter_fn(model, example_inputs)
coverage_result = profiler.results()
output_csv(
output_filename,
(
"dev",
"name",
"batch_size",
"graphs",
"graph_calls",
"captured_ops",
"total_ops",
"pct_ops",
"pct_time",
),
[
current_device,
current_name,
current_batch_size,
]
+ coverage_result.tocsv(),
)
return coverage_result
def speedup_experiment_fx2trt(args, model_iter_fn, model, example_inputs):
"""
Measure speedups over eager using the trt inference backend. TRT backend is based fx graph
generated by torch._dynamo.
Writes to ./speedups_fx2trt.csv
"""
return speedup_experiment(args, model_iter_fn, model, example_inputs)
def recompile_profiler_experiment(args, model_iter_fn, model, example_inputs):
with torch._dynamo.utils.CompileProfiler() as prof:
opt_model_iter_fn = torch._dynamo.optimize(prof, nopython=args.nopython)(
model_iter_fn
)
opt_model_iter_fn(model, example_inputs)
output_csv(
output_filename, ["model", "profiler report"], [current_name, prof.report()]
)
met = prof.get_metrics()
guard_failures = len(met["guard_failures"])
return [guard_failures]
def randomize_input(inputs):
if isinstance(inputs, (list, tuple)):
return type(inputs)([randomize_input(x) for x in inputs])
elif isinstance(inputs, torch.Tensor):
if inputs.dtype in (torch.float32, torch.float64):
torch._dynamo.utils.counters["randomize_input"]["times"] += 1
return torch.randn_like(inputs)
elif inputs.dtype == torch.int64:
# Note: we can not simply tune integer tensors as follows
# `return torch.randint_like(inputs, high=inputs.max().item())`
# This may break some invariants between tensors.
# E.g. in embedding lookup case, one tensor is the length
# and another is an indices tensor.
return inputs
else:
raise RuntimeError(
f"randomize_input need support tensor of type {inputs.dtype}"
)
else:
raise RuntimeError(
f"randomize_input can not handle input of type {type(inputs)}"
)
def maybe_mark_step(args):
if args.trace_on_xla:
xm.mark_step()
def speedup_experiment(args, model_iter_fn, model, example_inputs, **kwargs):
"""
Measure speedups over eager.
Writes to ./speedups.csv
"""
# if args.dynamic_shapes:
# return speedup_experiment_ds(args, model_iter_fn, model, example_inputs)
timings = np.zeros((args.repeat, 2), np.float64)
# if we randomize the input, we should also check the result is correct
should_check_result = should_randomize_input = args.randomize_input
import contextlib
from torch._inductor.utils import maybe_profile
@contextlib.contextmanager
def maybe_mark_profile(*args, **kwargs):
prof: torch.profiler.profile = kwargs.pop("p", None)
mark = kwargs.pop("mark", None)
if prof:
with torch.profiler.record_function(mark):
yield
else:
yield
times = args.iterations_per_run
# Use higher tolerance for XLA since XLA cause numerical unstability when
# graph size changes
tolerance = args.xla_tolerance if args.trace_on_xla else 1e-4
torch._dynamo.config.repro_tolerance = tolerance
with maybe_profile(args.export_profiler_trace) as p:
frozen_model_iter_fn = torch._dynamo.run(model_iter_fn)
for rep in range(args.repeat):
inputs = (
randomize_input(copy.deepcopy(example_inputs))
if should_randomize_input
else example_inputs
)
# need call mark_step to perform the computation
# on randomize_input. Otherwise the first call using the
# inputs will incur high penalty then the next one.
maybe_mark_step(args)
# interleave the runs to handle frequency scaling and load changes
with maybe_mark_profile(p=p, mark="expected"):
timings[rep, 0], expected_output = timed(
model,
model_iter_fn,
inputs,
return_result=True,
times=times,
collect_outputs=args.collect_outputs,
)
# call mark_step between the 2 calls to make the comparison fair.
maybe_mark_step(args)
with maybe_mark_profile(p=p, mark="actual"):
timings[rep, 1], actual_output = timed(
model,
frozen_model_iter_fn,
inputs,
return_result=True,
times=times,
collect_outputs=args.collect_outputs,
)
if should_check_result:
is_correct = is_correct and same(
expected_output, actual_output, tol=tolerance
)
if args.export_profiler_trace:
name = args.profiler_trace_name + "_" + model.name + ".json"
name = os.path.join(torch._dynamo.config.base_dir, name)
p.export_chrome_trace(name)
median = np.median(timings, axis=0)
speedup = median[0] / median[1]
if args.dump_raw_metrics:
np.save(
f"{output_filename[:-4]}-raw_timings-{current_name}-{current_device}.npy",
timings,
)
first_headers = ["dev", "name", "batch_size"]
first_fields = [current_device, current_name, current_batch_size]
if "tag" in kwargs:
first_headers.append("tag")
first_fields.append(kwargs["tag"])
headers = first_headers + ["speedup", "abs_latency"]
row = first_fields + [float(speedup), median[1] * 1000]
msg = f"{speedup:.3f}x"
if args.baseline:
headers.extend(
[
"baseline",
"speedup_vs_baseline",
]
)
df = pd.read_csv(args.baseline)
try:
baseline_speedup = df[df["name"] == current_name]["speedup"].item()
row.extend([baseline_speedup, speedup / baseline_speedup])
msg = f"{baseline_speedup:.3f}x -> {speedup:.3f}x [{speedup / baseline_speedup:.3f}x]"
except (KeyError, ZeroDivisionError):
row.extend(
[
0.0,
0.0,
]
)
if "compilation_latency" in kwargs:
headers += [
"compilation_latency",
"compression_ratio",
"eager_peak_mem",
"dynamo_peak_mem",
]
row.append(kwargs["compilation_latency"])
row.append(kwargs["compression_ratio"])
row.append(kwargs["eager_peak_mem"])
row.append(kwargs["dynamo_peak_mem"])
if "dynamo_stats" in kwargs:
for k, v in kwargs["dynamo_stats"].items():
headers.append(k)
row.append(v)
output_csv(
output_filename,
headers,
row,
)
headers, data = torch._dynamo.utils.compile_times(repr="csv", aggregate=True)
assert (
output_filename.find(".csv") > 0
), f"expected output_filename to be a .csv, but got {output_filename}"
output_csv(
output_filename[:-4] + "_compilation_metrics.csv",
first_headers + headers,
first_fields + data,
)
return msg
def speedup_experiment_ds(args, model_iter_fn, model, example_inputs):
"""
Run dynamic shapes benchmarks.
Requires dynamic shape compatible models, which provide a list of example inputs.
Warms up using the first input example and then iterates the inputs,
measuring (and expecting minimal) variance between the runtime for different examples.
"""
timings = np.zeros((args.repeat, len(example_inputs), 2), np.float64)
if args.repeat > 5:
print(
f"\ndynamic shapes experiments are slow, consider setting --repeat less than {args.repeat}\n"
)
nwarmup = 4
for rep in range(args.repeat):
# Start each rep fresh, e.g. only warmup on example 0
torch._dynamo.reset()
optimized_model_iter_fn = optimize_ctx(model_iter_fn)
for _ in range(nwarmup):
optimized_model_iter_fn(model, example_inputs[0])
for input_idx, inputs in enumerate(example_inputs):
# interleave the runs to handle frequency scaling and load changes
timings[rep, input_idx, 0] = timed(
model, model_iter_fn, inputs, return_result=False
)
# different from regular speedup_experiment, we _DO_ want to allow recompilation
timings[rep, input_idx, 1] = timed(
model, optimized_model_iter_fn, inputs, return_result=False
)
medians = np.median(timings, axis=0)
speedups = list(medians[:, 0] / medians[:, 1])
speedups_mean = np.mean(speedups)
speedups_median = np.median(speedups)
speedups_var = np.var(speedups)
# TODO this x[0] is not going to work in general but bert only has 1 input
shapes = [x[0].shape for x in example_inputs]
shape_keys = sorted(set(shapes))
shape_speedups = {
shape: [
it[1] for it in filter(lambda it: it[0] == shape, zip(shapes, speedups))
]
for shape in shape_keys
}
output_str = (
f"mean: {speedups_mean:.3f}, median: {speedups_median:.3f}, var: {speedups_var:.3f}"
+ "\nSpeedups by shape: "
+ "\n".join(
[
f"{shape}: "
+ ", ".join([f"{speedup: .3g}" for speedup in shape_speedups[shape]])
for shape in shape_keys
]
)
)
output_csv(
output_filename,
("dev", "name", "batch_size", "speedup mean", "speedup median", "speedup var"),
[
current_device,
current_name,
current_batch_size,
speedups_mean,
speedups_median,
speedups_var,
],
)
return output_str
def overhead_experiment(*args, model_iter_fn):
"""
Measure overheads of TorchDynamo by running with no backend (only
eager+FX), and reporting speedup/slowdown over eager.
Writes to ./overheads.csv
"""
return speedup_experiment(*args, model_iter_fn)
def print_fx(gm, example_inputs):
print(gm.graph)
return gm
def print_aten_ops(gm, example_inputs):
from functorch.compile import aot_module
def trace_printer(gm, _):
print(gm.graph)
return gm
return aot_module(gm, fw_compiler=trace_printer, bw_compiler=trace_printer)
def baselines(models, model_iter_fn, example_inputs, args):
"""
Common measurement code across all baseline experiments.
"""
models = list(models)
for idx, (name, model) in enumerate(models):
if idx == 0:
result0 = model_iter_fn(model, example_inputs)
elif model is not None:
try:
result = model_iter_fn(model, example_inputs)
if same(result0, result):
continue
print(name, "is INCORRECT")
except Exception:
log.exception("error checking %s", name)
models[idx] = (name, None)
timings = np.zeros((args.repeat, len(models)), np.float64)
timings.fill(1.0e10)
for rep in range(args.repeat):
for idx, (name, model) in enumerate(models):
if model is not None:
try:
timings[rep, idx] = timed(model, model_iter_fn, example_inputs)
except Exception:
pass
pvalue = [
ttest_ind(timings[:, 0], timings[:, i]).pvalue
for i in range(1, timings.shape[1])
]
median = np.median(timings, axis=0)
speedup = median[0] / median[1:]
for idx, (name, model) in enumerate(models[1:]):
if model is None:
speedup[idx] = 0.0
result = " ".join(
[
format_speedup(s, p, m is not None)
for s, p, m in zip(speedup, pvalue, [m for n, m in models[1:]])
]
)
output_csv(
output_filename,
("dev", "name", "batch_size") + tuple(n for n, m in models[1:]),
[current_device, current_name, current_batch_size]
+ [f"{x:.4f}" for x in speedup],
)
return result
def xla(args, model_iter_fn, model, example_inputs):
xla_dev = xm.xla_device(devkind=current_device)
model_xla = copy.deepcopy(model).to("cpu").to(device=xla_dev)
example_inputs_xla = tree_map_only(
torch.Tensor, lambda x: x.to("cpu").to(device=xla_dev), example_inputs
)
for _ in range(3): # warmup
timed(model, model_iter_fn, example_inputs)
timed(model_xla, model_iter_fn, example_inputs_xla)
timings = np.zeros((args.repeat, 2), np.float64)
timings.fill(1.0e10)
for rep in range(args.repeat):
timings[rep, 0] = timed(model, model_iter_fn, example_inputs)
timings[rep, 1] = timed(model_xla, model_iter_fn, example_inputs_xla)
pvalue = ttest_ind(timings[:, 0], timings[:, 1]).pvalue
time_baseline, time_xla = np.median(timings, axis=0)
speedup = time_baseline / time_xla
output_csv(
output_filename,
("dev", "name", "batch_size", "speedup", "time_baseline", "time_xla"),
[
current_device,
current_name,
current_batch_size,
speedup,
time_baseline,
time_xla,
],
)
return format_speedup(speedup, pvalue)
def try_script(model, example_inputs):
try:
return torch.jit.script(model)
except Exception:
return None
def read_batch_size_from_file(args, filename, model_name):
batch_size = None
if os.path.exists("benchmarks"):
filename = os.path.join("benchmarks", filename)
assert os.path.exists(filename), filename
with open(filename, "r") as f:
lines = f.readlines()
lines = [i.split(",") for i in lines if len(i.strip()) > 0]
for val in lines:
cur_name, b = val
if model_name == cur_name:
batch_size = int(b)
if batch_size is None:
log.warning("Could not find batch size for %s", model_name)
elif batch_size == -1:
raise RuntimeError(
f"Batch size is unset for {model_name} in {args.batch_size_file}"
)
print(f"batch size: {batch_size}")
return batch_size
class TimeOutException(Exception):
pass
def alarm_handler(signum, frame):
raise TimeOutException()
def exit_after(s):
"""
Decorator to raise TimeoutException if the fn is taking more than s seconds
to run.
"""
def outer(fn):
def inner(*args, **kwargs):
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(s)
try:
result = fn(*args, **kwargs)
finally:
signal.alarm(0)
return result
return inner
return outer
def get_peak_memory():
return torch.cuda.max_memory_allocated() / 10**9
def null_experiment(args, model_iter_fn, model, example_inputs):
"""
A no-op experiment useful for making sure TorchBenchark alone works properly.
"""
return []
def cast_to(dtype, model, inputs):
# cast model and inputs to fp16
if dtype == torch.float16:
model = model.half()
else:
model = model.to(dtype)
inputs = tree_map(
lambda x: x.to(dtype)
if isinstance(x, torch.Tensor) and x.is_floating_point()
else x,
inputs,
)
return model, inputs
def cast_to_bf16(model, inputs):
return cast_to(torch.bfloat16, model, inputs)
def cast_to_fp16(model, inputs):
return cast_to(torch.float16, model, inputs)
def cast_to_fp64(model, inputs):
return cast_to(torch.float64, model, inputs)
def cast_to_fp32(model, inputs):
return cast_to(torch.float32, model, inputs)
def reset_rng_state(use_xla=False):
torch.manual_seed(1337)
random.seed(1337)
np.random.seed(1337)
if use_xla:
xm.set_rng_state(1337, str(xm.xla_device()))
class DummyGradScaler:
def scale(self, loss):
return loss
def get_dynamo_stats():
# TODO: consider deepcopy'ing the entire counters struct and
# adding a helper to do subtraction on it
return collections.Counter(
{
"calls_captured": torch._dynamo.utils.counters["stats"]["calls_captured"],
"unique_graphs": torch._dynamo.utils.counters["stats"]["unique_graphs"],
"graph_breaks": sum(torch._dynamo.utils.counters["graph_break"].values()),
# NB: The plus removes zero counts
"unique_graph_breaks": len(+torch._dynamo.utils.counters["graph_break"]),
}
)
def maybe_fresh_cache(fn, is_cold_start):
def inner(*args, **kwargs):
cache_minder = NullContext()
if is_cold_start:
cache_entries = {}
cache_minder = fresh_inductor_cache(cache_entries)
try:
with cache_minder:
return fn(*args, **kwargs)
finally:
dump_cache = False
if dump_cache and is_cold_start:
output_csv(
output_filename[:-4] + "_triton_cache.csv",
["dev", "name", "batch_size", "triton_cache"],
[
current_device,
current_name,
current_batch_size,
cache_entries,
],
)
return inner
@contextmanager
def maybe_init_distributed(should_init_distributed, port="6789", rank=0, world_size=1):
# To avoid multiple inheritance from _dynamo.test_case.TestCase and MultiProcessTestCase,
# Just manually implement the most important part of the dynamo behavior to reset/clear.
try:
if should_init_distributed:
torch.cuda.set_device(rank)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = port
torch.distributed.init_process_group(
"nccl", rank=rank, world_size=world_size
)
yield
finally:
if should_init_distributed:
torch.distributed.destroy_process_group()
class BenchmarkRunner:
def __init__(self):
self.model_iter_fn = None
self.grad_scaler = DummyGradScaler()
self.autocast = NullContext
self.optimizer = None
self._args = None
def setup_amp(self):
if self.args.amp and self.args.training and self.args.devices == ["cuda"]:
# AMP training can lead to small loss values which can undeflow
# gradient values returning in zero gradients. To solve this
# problem, PyTorch introduces GradScaler. GradScaler is a stateful
# structure, that scales the loss values to prevent underflow. Loss
# values are big at the beginning of training (therefore not
# requiring scaling), while loss value tends to be small as network
# starts getting better (requiring scaling). GradScaler manages all
# of this fine tuning, checking the gradients are turning to inf,
# discarding such batches.
# Since we are not running a long iteration, default value of
# init_scale 65536 is going to turn all gradients to inf. Therefore,
# we just use a init_scale of 2.0 for benchmarking purpose.
# Disabling Gradscaler because
# 1) Benchmark setup runs 2 iterations of fwd-bwd. So, not useful.
# 2) Current setup shares grad_scaler for eager and dynamo model,
# which is bad as Gradscaler has state and can adjust the scaling
# factor between eager and dynamo run, making accuracy check
# harder.
# self.grad_scaler = torch.cuda.amp.GradScaler(init_scale=2.0)
self.autocast = torch.cuda.amp.autocast
elif self.args.bfloat16 and self.args.devices == ["cpu"]:
self.autocast = torch.cpu.amp.autocast
def init_optimizer(self, name, device, params):
if device == "cuda" and self.args.training and name not in CI_SKIP_OPTIMIZER:
self.optimizer = torch.optim.SGD(params, lr=0.01)
else:
self.optimizer = None
@property
def args(self):
return self._args
@args.setter
def args(self, args):
self._args = args
@property
def skip_models(self):
return set()
@property
def skip_models_for_cuda(self):
return set()
@property
def slow_models(self):
return set()
@property
def very_slow_models(self):
return set()
@property
def non_deterministic_models(self):
return set()
@property
def skip_not_suitable_for_training_models(self):
return set()
@property
def failing_torchinductor_models(self):
return set()
@property
def failing_fx2trt_models(self):
return set()
@property
def failing_dynamic_shape_models(self):
return set()
@property
def skip_accuracy_checks_large_models_dashboard(self):
return set()
@property
def skip_accuracy_check_as_eager_non_deterministic(self):
return set()
@property
def get_tolerance_and_cosine_flag(self, is_training, current_device, name):
raise NotImplementedError()
@property
def equal_nan(self):
equal_nan = True
if self.args.float32:
equal_nan = False
return equal_nan
def iter_models(self, args):
for model_name in self.iter_model_names(args):
for device in args.devices:
try:
yield self.load_model(
device,
model_name,
batch_size=args.batch_size,
)
except NotImplementedError:
continue # bad benchmark implementation
def validate_model(self, model, example_inputs):
"""
Runs the eager model with example inputs to ensure that eager passes.
"""
model = copy.deepcopy(model)
example_inputs = clone_inputs(example_inputs)
if self.args.float32:
model, example_inputs = cast_to_fp32(model, example_inputs)
elif self.args.float16:
model, example_inputs = cast_to_fp16(model, example_inputs)
elif self.args.bfloat16:
model, example_inputs = cast_to_bf16(model, example_inputs)
try:
self.model_iter_fn(model, example_inputs)
except Exception as e:
raise NotImplementedError("Eager model failed to run") from e
def maybe_cast(self, model, example_inputs):
model = copy.deepcopy(model)
example_inputs = clone_inputs(example_inputs)
if self.args.float32:
model, example_inputs = cast_to_fp32(model, example_inputs)
elif self.args.float16:
model, example_inputs = cast_to_fp16(model, example_inputs)
elif self.args.bfloat16:
model, example_inputs = cast_to_bf16(model, example_inputs)
return model, example_inputs
def decay_batch_exp(self, batch_size, factor=0.5, divisor=2):
out_batch_size = batch_size * factor
if out_batch_size > divisor:
out_batch_size = (out_batch_size + 1) // divisor * divisor
else:
out_batch_size = batch_size - 1
return max(0, int(out_batch_size))
def batch_size_finder(self, device, model_name, initial_batch_size=1024):
batch_size = initial_batch_size
while batch_size >= 1:
torch.cuda.empty_cache()
try:
device, name, model, example_inputs, _ = self.load_model(
device,
model_name,
batch_size,
)
self.model_iter_fn(model, example_inputs)
return batch_size
except RuntimeError as e:
error_str = str(e)
if "channels_last" in error_str:
break
batch_size = self.decay_batch_exp(batch_size)
return 1
def run_n_iterations(self, mod, inputs):
n = self.args.iterations
for _ in range(n - 1):
self.model_iter_fn(mod, inputs, collect_outputs=False)
return self.model_iter_fn(mod, inputs, collect_outputs=True)
def optimizer_zero_grad(self, mod):
if self.optimizer is not None:
self.optimizer.zero_grad(True)
else:
mod.zero_grad(True)
def optimizer_step(self):
if self.optimizer is not None:
self.optimizer.step()
def get_benchmark_indices(self, length):
start = self._args.partition_id * (length // self._args.total_partitions)
end = (
(self._args.partition_id + 1) * (length // self._args.total_partitions)
if self._args.partition_id < self._args.total_partitions - 1
else length
)
return start, end
def check_accuracy(
self, name, model, example_inputs, optimize_ctx, experiment, tag
):
"""
Checks accuracy.
1) Collect the outputs with fp64 datatype. This is useful for error checking.
2) Checks if eager itself has variations.
"""
start_stats = get_dynamo_stats()
def record_status(accuracy_status, dynamo_start_stats):
"""
Records the status in the csv file
"""
if current_name in self.non_deterministic_models:
if accuracy_status in (
"pass",
"eager_two_runs_differ",
"fail_accuracy",
):
accuracy_status = "pass"
headers = ["dev", "name", "batch_size", "accuracy"]
fields = [current_device, current_name, current_batch_size, accuracy_status]
if tag is not None:
headers.insert(3, "tag")
fields.insert(3, tag)
dynamo_stats = get_dynamo_stats()
dynamo_stats.subtract(dynamo_start_stats)
for k, v in dynamo_stats.items():
headers.append(k)
fields.append(v)
output_csv(output_filename, headers, fields)
return accuracy_status
if name in self.skip_accuracy_checks_large_models_dashboard:
return record_status("pass_due_to_skip", dynamo_start_stats=start_stats)
def deepcopy_and_maybe_ddp(model):
model = copy.deepcopy(model)
if self.args.ddp:
model = DDP(model, find_unused_parameters=True)
elif self.args.fsdp:
model = FSDP(model, use_orig_params=True)
if torch._inductor.config.triton.cudagraphs:
log.warning("Disabling cudagraphs for FSDP compatibility")
torch._inductor.config.triton.cudagraphs = False
return model
# Collect the fp64 reference outputs to be used later for accuracy checking.
fp64_outputs = None
try:
model_fp64, inputs_fp64 = cast_to_fp64(
deepcopy_and_maybe_ddp(model),
clone_inputs(example_inputs),
)
self.init_optimizer(name, current_device, model_fp64.parameters())
fp64_outputs = self.run_n_iterations(model_fp64, inputs_fp64)
except Exception:
log.warning(
"fp64 golden ref were not generated for %s. Setting accuracy check to cosine",
name,
)
self.args.cosine = True
fp64_outputs = None
tolerance, cos_similarity = self.get_tolerance_and_cosine_flag(
self.args.training, current_device, name
)
# Cast the model to float16/float32 as necessary
model, example_inputs = self.maybe_cast(model, example_inputs)
accuracy_status = "pass"
with self.pick_grad(name, self.args.training):
# Get results of native pytorch
reset_rng_state()
try:
model_copy = deepcopy_and_maybe_ddp(model)
self.init_optimizer(name, current_device, model_copy.parameters())
correct_result = self.run_n_iterations(
model_copy, clone_inputs(example_inputs)
)
except Exception as e:
accuracy_status = (
"eager_1st_run_OOM"
if isinstance(e, torch.cuda.OutOfMemoryError)
else "eager_1st_run_fail"
)
return record_status(accuracy_status, dynamo_start_stats=start_stats)
# Rerun native pytorch
reset_rng_state()
try:
model_copy = deepcopy_and_maybe_ddp(model)
self.init_optimizer(name, current_device, model_copy.parameters())
correct_rerun_result = self.run_n_iterations(
model_copy, clone_inputs(example_inputs)
)
except Exception as e:
accuracy_status = (
"eager_2nd_run_OOM"
if isinstance(e, torch.cuda.OutOfMemoryError)
else "eager_2nd_run_fail"
)
return record_status(accuracy_status, dynamo_start_stats=start_stats)
# Two eager runs should have exactly same result
if (
name not in self.skip_accuracy_check_as_eager_non_deterministic
and not same(
correct_result,
correct_rerun_result,
fp64_ref=None,
cos_similarity=False,
tol=0,
equal_nan=self.equal_nan,
)
):
accuracy_status = "eager_two_runs_differ"
return record_status(accuracy_status, dynamo_start_stats=start_stats)
correct_rerun_result = None
# Run with Dynamo
# Sometime CI fails with random triton compilation failure which will be skipped for now
# TODO: revisit this after switching to new Triton runtime
reset_rng_state()
torch._dynamo.reset()
try:
model_copy = deepcopy_and_maybe_ddp(model)
self.init_optimizer(name, current_device, model_copy.parameters())
optimized_model_iter_fn = optimize_ctx(self.run_n_iterations)
new_result = optimized_model_iter_fn(model_copy, example_inputs)
except Exception as e:
log.exception(e)
if (
self.args.ci
and isinstance(e, BackendCompilerFailed)
and (
"Internal Triton PTX codegen error" in str(e)
or "cubin" in str(e)
)
):
accuracy_status = "pass_due_to_skip"
return record_status(
accuracy_status, dynamo_start_stats=start_stats
)
else:
print(
"TorchDynamo optimized model failed to run because of following error"
)
accuracy_status = (
"OOM"
if isinstance(e, torch.cuda.OutOfMemoryError)
else "fail_to_run"
)
return record_status(
accuracy_status, dynamo_start_stats=start_stats
)
if name in self.skip_accuracy_check_as_eager_non_deterministic:
return record_status("pass_due_to_skip", dynamo_start_stats=start_stats)
if not same(
correct_result,
new_result,
fp64_outputs,
equal_nan=self.equal_nan,
cos_similarity=cos_similarity,
tol=tolerance,
):
if self.args.skip_accuracy_check:
accuracy_status = "pass_due_to_skip"
else:
accuracy_status = "fail_accuracy"
return record_status(accuracy_status, dynamo_start_stats=start_stats)
return record_status(accuracy_status, dynamo_start_stats=start_stats)
def run_performance_test(
self, name, model, example_inputs, optimize_ctx, experiment, tag=None
):
if self.args.xla:
with self.pick_grad(name, self.args.training):
return experiment(*self.maybe_cast(model, example_inputs))
def warmup(fn, model, example_inputs, mode, niters=5):
peak_mem = 0
start_stats = get_dynamo_stats()
try:
if current_device == "cuda":
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
t0 = time.perf_counter()
for _ in range(niters):
fn(model, example_inputs)
t1 = time.perf_counter()
latency = t1 - t0
if current_device == "cuda":
peak_mem = get_peak_memory()
elif current_device == "cpu":
total = psutil.virtual_memory().total
percentage = psutil.Process(os.getpid()).memory_percent()
peak_mem = percentage * total / 10**9
except Exception:
log.exception("Backend %s failed in warmup()", mode)
return sys.exit(-1)
dynamo_stats = get_dynamo_stats()
dynamo_stats.subtract(start_stats)
return latency, peak_mem, dynamo_stats
# Cast the model to float16/float32 as necessary
model, example_inputs = self.maybe_cast(model, example_inputs)
self.init_optimizer(name, current_device, model.parameters())
with self.pick_grad(name, self.args.training):
ok, total = Stats.reset_counters()
experiment_kwargs = {}
if tag is not None:
experiment_kwargs["tag"] = tag
results = []
eager_latency, eager_peak_mem, _ = warmup(
self.model_iter_fn, model, example_inputs, "eager"
)
optimized_model_iter_fn = optimize_ctx(self.model_iter_fn)
dynamo_latency, dynamo_peak_mem, dynamo_stats = warmup(
optimized_model_iter_fn, model, example_inputs, "dynamo"
)
compilation_time = dynamo_latency - eager_latency
compression_ratio = (
eager_peak_mem / dynamo_peak_mem if dynamo_peak_mem else 0.0
)
if self.args.print_memory:
print(
f"memory: eager: {eager_peak_mem:.2f} GB, "
f"dynamo: {dynamo_peak_mem:.2f} GB, "
f"ratio: {compression_ratio:.2f}"
)
if experiment.func is speedup_experiment:
experiment_kwargs["compilation_latency"] = compilation_time
experiment_kwargs["compression_ratio"] = compression_ratio
experiment_kwargs["eager_peak_mem"] = eager_peak_mem
experiment_kwargs["dynamo_peak_mem"] = dynamo_peak_mem
experiment_kwargs["dynamo_stats"] = dynamo_stats
if experiment.func is coverage_experiment:
ok, total = Stats.reset_counters()
results = []
# run with torch._dynamo few times to populate the cache
for _ in range(3):
optimized_model_iter_fn(model, example_inputs)
_, frames_second_pass = Stats.reset_counters() # should be 0
if frames_second_pass > 0:
optimized_model_iter_fn(model, example_inputs)
_, frames_third_pass = Stats.reset_counters() # should be 0
else:
frames_third_pass = 0
results.append(
f"{ok:3}/{total:3} +{frames_third_pass} frames {compilation_time:3.0f}s"
)
if not hasattr(model, name):
model.name = name
results.append(experiment(model, example_inputs, **experiment_kwargs))
return " ".join(map(str, results))
def run_one_model(
self,
name,
model,
example_inputs,
optimize_ctx,
experiment,
explain=False,
tag=None,
):
mode = "train" if self.args.training else "eval"
msg = f"{current_device:4} {mode:5} {current_name:34} "
if tag:
msg += f" {tag:26}"
print(msg, end=" ", flush=True)
start_stats = get_dynamo_stats()
if self.args.accuracy:
status = self.check_accuracy(
name, model, example_inputs, optimize_ctx, experiment, tag
)
print(status)
elif self.args.performance:
status = self.run_performance_test(
name, model, example_inputs, optimize_ctx, experiment, tag
)
print(status)
if self.args.timing:
from torch._dynamo.utils import op_count, print_time_report
from torch.utils._stats import simple_call_counter
print_time_report()
stats = "STATS: "
stats = stats + " | ".join(
itertools.chain(
[f"call_* op count: {op_count}"],
(f"{key}:{value}" for key, value in simple_call_counter.items()),
)
)
print(stats)
stats = get_dynamo_stats()
stats.subtract(start_stats)
if explain:
print(
f"Dynamo produced {stats['unique_graphs']} graphs "
f"covering {stats['calls_captured']} ops with "
f"{stats['graph_breaks']} graph breaks ({stats['unique_graph_breaks']} unique)"
)
if explain or self.args.log_graph_breaks or self.args.print_graph_breaks:
filename = f"{output_filename.rstrip('.csv')}_graph_breaks.csv"
def add_double_quotes(x):
# Delimiter because reason could have comma
return f'"{x}"'
for graph_break in graph_break_reasons:
reason = add_double_quotes(graph_break.reason)
user_stack = add_double_quotes(
", ".join([str(x) for x in graph_break.user_stack])
)
output_csv(
filename,
["model", "reason", "user_stack"],
[current_name, reason, user_stack],
)
if self.args.stats:
Stats.print_summary()
def help(fn):
return fn.__doc__
diff_branch_default = "DIFF-BRANCH-DEFAULT"
def should_diff_branch(args):
return args.diff_branch != diff_branch_default
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument(
"--filter", "-k", action="append", help="filter benchmarks with regexp"
)
parser.add_argument(
"--exclude", "-x", action="append", help="filter benchmarks with regexp"
)
parser.add_argument(
"--exclude-exact", action="append", help="filter benchmarks with exact match"
)
parser.add_argument(
"--total-partitions",
type=int,
default=1,
choices=range(1, 10),
help="Total number of partitions we want to divide the benchmark suite into",
)
parser.add_argument(
"--partition-id",
type=int,
default=0,
help="ID of the benchmark suite partition to be run. Used to divide CI tasks",
)
parser.add_argument(
"--devices", "--device", "-d", action="append", help="cpu or cuda"
)
parser.add_argument("--device-index", help="CUDA device index")
parser.add_argument(
"--repeat", "-n", type=int, default=30, help="number of timing runs"
)
iterations_per_run_help = """
Run this may iterations for each time measurement. This is mainly used for
XLA training. We want to run multiple iterations per measurement so the
tracing and computation for different iteartions can overlap with each
other. This makes sure we have an accurate xla baseline.
"""
parser.add_argument(
"--iterations-per-run", type=int, default=1, help=iterations_per_run_help
)
parser.add_argument(
"--randomize-input",
action="store_true",
help="Whether to randomize the input values. Dimensions will be kept the same.",
)
parser.add_argument(
"--threads",
"-t",
type=int,
help="number of threads to use for eager and inductor",
)
parser.add_argument(
"--nopython", action="store_true", help="Turn graph breaks into errors"
)
parser.add_argument(
"--no-skip",
action="store_true",
help="run models that are in the global SKIP list",
)
parser.add_argument(
"--prims-nvfuser", action="store_true", help="user prims + nvfuser backend"
)
parser.add_argument(
"--dump-raw-metrics",
action="store_true",
help="dump raw timing metrics from speedup experiment",
)
parser.add_argument(
"--log-operator-inputs",
action="store_true",
default=False,
)
parser.add_argument(
"--channels-last",
action="store_true",
default=False,
help="use channels last format",
)
parser.add_argument(
"--batch-size", "--batch_size", type=int, help="batch size for benchmarking"
)
parser.add_argument(
"--iterations", type=int, default=2, help="how many iterations to run"
)
parser.add_argument(
"--batch-size-file", type=str, help="String to load batch size from"
)
parser.add_argument("--cosine", action="store_true", help="use cosine similarity")
parser.add_argument(
"--cpp-wrapper", action="store_true", help="turn on cpp/cuda wrapper codegen"
)
parser.add_argument(
"--ci", action="store_true", help="Flag to tell that its a CI run"
)
parser.add_argument(
"--dynamic-ci-skips-only",
action="store_true",
help=(
"Run only the models that would have been skipped in CI "
"if dynamic-shapes, compared to running without dynamic-shapes. "
"This is useful for checking if more models are now "
"successfully passing with dynamic shapes. "
"Implies --dynamic-shapes and --ci"
),
)
parser.add_argument(
"--dashboard", action="store_true", help="Flag to tell that its a Dashboard run"
)
parser.add_argument(
"--skip-fp64-check", action="store_true", help="skip accuracy check using fp64"
)
parser.add_argument(
"--fast", "-f", action="store_true", help="skip slow benchmarks"
)
parser.add_argument(
"--only",
help="""Run just one model from torchbench. Or
specify the path and class name of the model in format like:
--only=path:<MODEL_FILE_PATH>,class:<CLASS_NAME>
Due to the fact that dynamo changes current working directory,
the path should be an absolute path.
The class should have a method get_example_inputs to return the inputs
for the model. An example looks like
```
class LinearModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 10)
def forward(self, x):
return self.linear(x)
def get_example_inputs(self):
return (torch.randn(2, 10),)
```
""",
)
parser.add_argument(
"--ddp",
action="store_true",
help="Wraps model in DDP before running it, and uses dynamo DDPOptmizer (graph breaks) by default.",
)
parser.add_argument(
"--fsdp",
action="store_true",
help="""Wraps model in FSDP before running it. Disables cudagraphs by default.
Doesn't recursively wrap, mainly useful for checking dynamo UnspecNNModule compatibility
""",
)
parser.add_argument(
"--no-optimize-ddp",
action="store_true",
help="Disables dynamo DDPOptimizer (graph breaks). (Applies only when using --ddp benchmark mode).",
)
parser.add_argument(
"--distributed-master-port",
default="6789",
help="Port to bind for for torch.distributed. Use the default unless it's conflicting with another user",
)
parser.add_argument(
"--dynamic-shapes",
action="store_true",
help="Runs a dynamic shapes version of the benchmark, if available.",
)
parser.add_argument(
"--dynamic-batch-only",
action="store_true",
help="Only assume batch dimension is dynamic. Implies --dynamic-shapes",
)
parser.add_argument(
"--specialize-int", action="store_true", help="Run with specialize_int=True."
)
parser.add_argument(
"--use-eval-mode",
action="store_true",
help="sets model.eval() to reduce randomness",
)
parser.add_argument(
"--skip-accuracy-check",
action="store_true",
help="keeps running even when accuracy fails",
)
parser.add_argument(
"--generate-aot-autograd-stats",
action="store_true",
help="Generates AOT Autograd stats like how mnay graphs are sent to AOT",
)
parser.add_argument(
"--inductor-settings",
action="store_true",
help="Use same settings as --inductor for baseline comparisons",
)
parser.add_argument(
"--suppress-errors",
action="store_true",
help="Suppress errors instead of raising them",
)
parser.add_argument(
"--output",
help="Overrides the output filename",
)
parser.add_argument(
"--output-directory",
help="Overrides the directory to place output files.",
)
parser.add_argument(
"--baseline",
help="Compare with a prior --output",
)
parser.add_argument(
"--part",
default=None,
help="Specify the part of the model to run.",
)
parser.add_argument(
"--export-profiler-trace",
action="store_true",
help="exports trace of kineto profiler",
)
parser.add_argument(
"--profiler-trace-name",
"--profiler_trace_name",
help="Overwrites exported trace name",
)
parser.add_argument(
"--diff-branch",
default=diff_branch_default,
help="delta current branch against given branch.",
)
parser.add_argument(
"--tag", default=None, help="Specify a tag to be included in csv files."
)
parser.add_argument(
"--explain",
action="store_true",
help="print some graph/op statistics during the run, similar to .explain()",
)
parser.add_argument(
"--stats",
action="store_true",
help="print graph counter stats",
)
parser.add_argument(
"--print-memory",
action="store_true",
help="print extra memory statistics",
)
parser.add_argument(
"--cold-start-latency",
"--cold_start_latency",
action="store_true",
help="Use a fresh triton cachedir when running each model, to force cold-start compile.",
)
parser.add_argument(
"--disable-cudagraphs",
action="store_true",
help="Disables cudagraphs for Inductor",
)
parser.add_argument(
"--disable-split-reductions",
action="store_true",
help="Disables split reductions for Inductor",
)
parser.add_argument(
"--disable-persistent-reductions",
action="store_true",
help="Disables split reductions for Inductor",
)
parser.add_argument(
"--disable-divisible-by-16",
action="store_true",
help="Disables divisible by 16 hint to Triton for Inductor",
)
parser.add_argument(
"--inductor-compile-mode",
default=None,
help="torch.compile mode argument for inductor runs.",
)
parser.add_argument(
"--print-graph-breaks",
action="store_true",
help="Show a warning whenever graph break",
)
parser.add_argument(
"--log-graph-breaks",
action="store_true",
help="log graph breaks in a file",
)
parser.add_argument(
"--trace-on-xla",
action="store_true",
help="Whether to trace the model on XLA or on eager device",
)
parser.add_argument(
"--xla-tolerance",
type=float,
default=1e-2,
help="XLA needs a loose tolerance to pass the correctness check",
)
parser.add_argument(
"--collect-outputs",
action="store_true",
help="""Whether to collect outputs for training. Set this to true if we
want to verify the numerical correctness of graidents. But that may
cause time measurement not accurate""",
)
parser.add_argument("--timing", action="store_true", help="Emits phase timing")
parser.add_argument(
"--progress",
action="store_true",
help="Print n/k models message between each model run.",
)
parser.add_argument(
"--timeout",
type=int,
default=1800,
help="timeout (second) for benchmarking.",
)
parser.add_argument(
"--per_process_memory_fraction",
type=float,
default=1,
help="Set per-process GPU memory fraction (limit) for reducing usable size and reproducing OOMs",
)
group_fuser = parser.add_mutually_exclusive_group()
# --nvfuser is now the default, keep the option to not break scripts
group_fuser.add_argument("--nvfuser", action="store_true", help=argparse.SUPPRESS)
group_fuser.add_argument("--nnc", action="store_true", help="enable NNC for GPUs")
group_prec = parser.add_mutually_exclusive_group()
group_prec.add_argument("--float16", action="store_true", help="cast model to fp16")
group_prec.add_argument(
"--bfloat16", action="store_true", help="cast model to bf16"
)
group_prec.add_argument("--float32", action="store_true", help="cast model to fp32")
group_prec.add_argument(
"--amp", action="store_true", help="use automatic mixed precision"
)
group_printout = parser.add_mutually_exclusive_group()
group_printout.add_argument(
"--verbose", "-v", action="store_true", help="enable verbose debug printouts"
)
group_printout.add_argument(
"--quiet", "-q", action="store_true", help="suppress debug printouts"
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--coverage", action="store_true", help="(default) " + help(coverage_experiment)
)
group.add_argument(
"--overhead", action="store_true", help=help(overhead_experiment)
)
group.add_argument(
"--speedup-dynamo-ts",
action="store_true",
help="TorchDynamo frontend with torchscript backend",
)
group.add_argument(
"--speedup-fx2trt", action="store_true", help=help(speedup_experiment_fx2trt)
)
group.add_argument(
"--speedup-fx2trt-fp16",
action="store_true",
help=help(speedup_experiment_fx2trt),
)
group.add_argument(
"--print-fx",
action="store_true",
help="Print fx traces captured from model",
)
group.add_argument(
"--print-aten-ops",
action="store_true",
help="Print traces of aten ops captured by AOT autograd",
)
group.add_argument(
"--inductor",
action="store_true",
help="Measure speedup with TorchInductor",
)
group.add_argument(
"--xla", action="store_true", help="Compare TorchXLA to eager PyTorch"
)
group.add_argument(
"--backend",
choices=torch._dynamo.list_backends(exclude_tags=None),
help="measure speedup with a given backend",
)
group.add_argument("--nothing", action="store_true", help=help(null_experiment))
group.add_argument(
"--log-conv-args",
action="store_true",
help="Dump convolution input/weight/bias's shape/stride/dtype and other options to json",
)
group.add_argument(
"--recompile-profiler",
"--recompile_profiler",
action="store_true",
help="Run the dynamo recompilation profiler on each model.",
)
group.add_argument(
"--find-batch-sizes",
action="store_true",
help="finds the largest batch size that could fit on GPUs",
)
mode_group = parser.add_mutually_exclusive_group(required=True)
mode_group.add_argument(
"--accuracy",
action="store_true",
help="Checks accuracy with small batch size and eval mode",
)
mode_group.add_argument(
"--performance", action="store_true", help="Measures performance speedup"
)
run_mode_group = parser.add_mutually_exclusive_group(required=True)
run_mode_group.add_argument(
"--training",
action="store_true",
help="Performs training",
)
run_mode_group.add_argument(
"--inference", action="store_true", help="Performs inference"
)
return parser.parse_args(args)
def main(runner, original_dir=None):
if original_dir:
os.chdir(original_dir)
args = parse_args()
if args.baseline:
args.baseline = os.path.abspath(args.baseline)
if should_diff_branch(args):
import git
# We do this here so we error out earlier if there's an issue
repo = git.Repo()
if repo.is_dirty():
raise RuntimeError(
"--diff-branch called on dirty branch. Commit, stash, or reset."
)
main_branch = repo.active_branch.name
if main_branch == args.diff_branch:
raise RuntimeError(
f"--diff-branch: current branch is same as {args.diff_branch} branch, what are you diffing?"
)
with maybe_init_distributed(
(args.ddp or args.fsdp) and args.only, port=args.distributed_master_port
):
return maybe_fresh_cache(
run, (args.cold_start_latency and args.only) or args.ci
)(runner, args, original_dir)
def run(runner, args, original_dir=None):
# Pass the parsed args object to benchmark runner object
runner.args = args
args.filter = args.filter or [r"."]
args.exclude = args.exclude or [r"^$"]
args.exclude_exact = args.exclude_exact or []
if args.inductor:
assert args.backend is None
args.backend = "inductor"
if args.dynamic_ci_skips_only:
args.dynamic_shapes = True
args.ci = True
if args.dynamic_batch_only:
args.dynamic_shapes = True
torch._dynamo.config.assume_static_by_default = True
if args.dynamic_shapes:
torch._dynamo.config.dynamic_shapes = True
if not args.dynamic_batch_only:
torch._dynamo.config.assume_static_by_default = False
if args.specialize_int:
torch._dynamo.config.specialize_int = True
if args.ci:
if args.accuracy:
# Run fewer iterations when checking accuracy
args.repeat = 2
if args.dynamic_ci_skips_only:
# Test only the incremental set of jobs whose skipped was
# caused solely by turning on dynamic shapes
assert args.dynamic_shapes
ci = functools.partial(CI, args.backend, training=args.training)
args.filter = list(
set(CI_SKIP[ci(dynamic=True)]) - set(CI_SKIP[ci(dynamic=False)])
)
else:
ci = functools.partial(
CI, args.backend, training=args.training, dynamic=args.dynamic_shapes
)
for device in args.devices:
args.exclude_exact.extend(CI_SKIP[ci(device=device)])
if args.ddp:
# TODO: we could also hook DDP bench up to --speedup bench, _not_ for mgpu e2e perf,
# but just to measure impact on singlenode of performing graph-breaks.
# Left it as a follow up to keep this PR isolated.
assert (
args.accuracy
), "DDP benchmark is currently only hooked up to --accuracy bench"
assert args.training, "DDP benchmark requires --training mode"
if args.no_optimize_ddp:
torch._dynamo.config.optimize_ddp = False
else:
# TODO(whc) after enabling DDPOptimizer by default this could be removed or assert
torch._dynamo.config.optimize_ddp = True
if args.only == "dlrm":
log.error(
"DLRM+DDP is unsupported as it requires sharding the embedding layer separately from DDP"
)
return sys.exit(-1)
if args.accuracy:
# Use small batch size. We use >1 batch size to ensure we test
# batch_norm type of operators that work on batch dims.
# TODO - Go through the failures for batch size = 2
if args.batch_size is None:
if runner.suite_name == "huggingface":
args.batch_size = 1
elif runner.suite_name == "torchbench":
args.batch_size = 4
else:
# Larger batch size of TIMM models to have stable batch_norm
assert runner.suite_name == "timm_models"
args.batch_size = 8
# Remove sources of randomness
if runner.suite_name != "timm_models":
# TODO - Using train mode for timm_models. Move to train mode for HF and Torchbench as well.
args.use_eval_mode = True
inductor_config.fallback_random = True
if args.only is not None and args.only not in {
"alexnet",
"Background_Matting",
"pytorch_CycleGAN_and_pix2pix",
"pytorch_unet",
"Super_SloMo",
"vgg16",
"vision_maskrcnn",
}:
# some of the models do not support use_deterministic_algorithms
torch.use_deterministic_algorithms(True)
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.allow_tf32 = False
torch.backends.cudnn.benchmark = False
torch.backends.cuda.matmul.allow_tf32 = False
# Remove randomeness when torch manual seed is called
patch_torch_manual_seed()
# Some models e.g. yolov3 assert batch size on n_gpus
if "CUDA_VISIBLE_DEVICES" not in os.environ:
args.device_index = "0"
# Stricter check to disable fallbacks
args.suppress_errors = False
if args.device_index is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device_index
elif args.performance:
# Ensure that we test on real scenarios
args.use_eval_mode = False
if args.partition_id > args.total_partitions or args.partition_id < 0:
print("Invalid partition id")
return sys.exit(-1)
if not args.devices:
if torch.cuda.is_available():
args.devices = ["cuda"]
else:
log.warning("torch.cuda.is_available() == False, using CPU")
args.devices = ["cpu"]
if args.devices != ["cpu"] and torch.cuda.is_available():
global synchronize
synchronize = torch.cuda.synchronize
if (
args.devices == ["cuda"]
and torch.cuda.get_device_properties(0).total_memory < 25 * 2**30
):
# OOM errors on an RTX 3090 with 24gb RAM
runner.skip_models.update(
{
# torchbench
"hf_Longformer",
"timm_nfnet",
"timm_efficientdet",
}
)
if args.training:
runner.skip_models.add("hf_T5")
if torch._dynamo.config.dynamic_shapes:
# TODO(jansel): fix bugs in these
runner.skip_models.update(runner.failing_dynamic_shape_models)
if args.nnc:
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
if args.threads:
torch.set_num_threads(args.threads)
if args.verbose:
torch._logging.set_logs(dynamo=logging.DEBUG)
if args.print_graph_breaks:
torch._dynamo.config.print_graph_breaks = True
if args.quiet:
torch._logging.set_logs(dynamo=logging.ERROR)
torch._dynamo.config.suppress_errors = args.suppress_errors
if args.training:
runner.model_iter_fn = runner.forward_and_backward_pass
runner.skip_models.update(runner.skip_not_suitable_for_training_models)
else:
runner.model_iter_fn = runner.forward_pass
if args.fast:
runner.skip_models.update(runner.slow_models)
if args.devices == ["cpu"]:
runner.skip_models.update(runner.very_slow_models)
elif args.devices == ["cuda"]:
runner.skip_models.update(runner.skip_models_for_cuda)
if args.inductor or args.inductor_settings:
runner.skip_models.update(runner.failing_torchinductor_models)
if args.float16:
# TODO(jansel): check if correctness issue is real
runner.skip_models.add("yolov3")
if args.float16:
# these give `INCORRECT - Variation in Eager runs itself` sometimes
runner.non_deterministic_models.update(
{
"demucs",
"pyhpc_equation_of_state",
"timm_efficientdet",
"pyhpc_isoneutral_mixing",
"pyhpc_turbulent_kinetic_energy",
"shufflenet_v2_x1_0",
}
)
if args.no_skip:
runner.skip_models.clear()
experiment = null_experiment
global current_name, current_device, current_batch_size, output_filename, optimize_ctx
optimize_ctx = NullContext()
if args.overhead:
optimize_ctx = torch._dynamo.optimize(dummy_fx_compile, nopython=args.nopython)
experiment = speedup_experiment
output_filename = "overheads.csv"
elif args.inductor:
inductor_config.debug = args.verbose
if (
args.ci
and args.accuracy
and args.training
and args.only in {"dla102", "gernet_l"}
):
# Log generated code for flaky tests, to check if there is any codegen difference
inductor_config.debug = True
if args.threads:
inductor_config.cpp.threads = args.threads
optimize_ctx = functools.partial(
torch.compile,
backend="inductor",
fullgraph=args.nopython,
mode=args.inductor_compile_mode,
)
experiment = speedup_experiment
output_filename = "inductor.csv"
elif args.xla:
(dev,) = args.devices
os.environ["PJRT_DEVICE"] = {"cuda": "GPU", "cpu": "CPU"}[dev]
torch._dynamo.mark_dynamic = MagicMock()
experiment = xla
output_filename = "xla.csv"
elif args.speedup_dynamo_ts:
optimize_ctx = torch._dynamo.optimize("ts", nopython=args.nopython)
experiment = speedup_experiment
output_filename = "speedup_dynamo_ts.csv"
elif args.prims_nvfuser:
optimize_ctx = torch._dynamo.optimize("prims_nvfuser", nopython=args.nopython)
experiment = speedup_experiment
backend_str = "prims_nvfuser"
output_filename = f"accuracy_aot_{backend_str}.csv"
elif args.print_fx:
optimize_ctx = torch._dynamo.optimize(
print_fx,
nopython=args.nopython,
)
elif args.print_aten_ops:
optimize_ctx = torch._dynamo.optimize(
print_aten_ops,
nopython=args.nopython,
)
elif args.nothing:
optimize_ctx = nothing
experiment = speedup_experiment
output_filename = "nothing.csv"
elif args.backend:
optimize_ctx = torch._dynamo.optimize(args.backend, nopython=args.nopython)
experiment = speedup_experiment
if args.accuracy:
output_filename = f"accuracy_{args.backend}.csv"
else:
output_filename = f"speedup_{args.backend}.csv"
elif args.recompile_profiler:
output_filename = "recompile_profiler_log.csv"
experiment = recompile_profiler_experiment
else:
optimize_ctx = torch._dynamo.optimize(
fx_insert_profiling, nopython=args.nopython
)
experiment = coverage_experiment
output_filename = "coverage.csv"
if args.inductor or args.backend == "inductor":
inductor_config.triton.cudagraphs = not args.disable_cudagraphs
inductor_config.triton.persistent_reductions = (
not args.disable_persistent_reductions
)
inductor_config.split_reductions = not args.disable_split_reductions
inductor_config.triton.divisible_by_16 = not args.disable_divisible_by_16
inductor_config.cpp_wrapper = args.cpp_wrapper
runner.setup_amp()
if args.output:
output_filename = args.output
if output_filename:
if args.output_directory:
output_filename = os.path.join(args.output_directory, output_filename)
else:
output_filename = os.path.join(
torch._dynamo.config.base_dir, output_filename
)
if args.find_batch_sizes and args.only:
for device in args.devices:
batch_size = runner.batch_size_finder(device, args.only)
print(args.only, batch_size)
output_csv(output_filename, [], [args.only, batch_size])
return
if args.export_profiler_trace:
if args.profiler_trace_name is None:
if args.backend:
args.profiler_trace_name = args.backend
elif args.inductor:
args.profiler_trace_name = "inductor"
else:
args.profiler_trace_name = "profile"
else:
args.profiler_trace_name = args.profiler_trace_name
experiment = functools.partial(experiment, args, runner.model_iter_fn)
if args.only and should_diff_branch(args):
import git
repo = git.Repo()
main_branch = repo.active_branch.name
try:
# Adding diff-branch again to the args will override previous value
call_args = (
[sys.executable] + sys.argv + [f"--diff-branch={diff_branch_default}"]
)
# Run for main branch
subprocess.check_call(call_args + [f"--tag={main_branch}"])
# Run for comparison branch
repo.git.checkout(args.diff_branch)
subprocess.check_call(call_args + [f"--tag={args.diff_branch}"])
finally:
# Go back to main branch
repo.git.checkout(main_branch)
elif args.only:
model_name = args.only
for device in args.devices:
batch_size = args.batch_size
if args.batch_size_file:
batch_size = read_batch_size_from_file(
args, args.batch_size_file, model_name
)
if model_specified_by_path(args.only):
model, example_inputs = load_model_from_path(args.only)
name = model.__class__.__name__
model = model.to(device=device)
example_inputs = tree_map_only(
torch.Tensor, lambda x: x.to(device=device), example_inputs
)
else:
try:
if args.part:
(
device,
name,
model,
example_inputs,
batch_size,
) = runner.load_model(
device, model_name, batch_size=batch_size, part=args.part
)
else:
(
device,
name,
model,
example_inputs,
batch_size,
) = runner.load_model(device, model_name, batch_size=batch_size)
except NotImplementedError as e:
print(e)
import traceback
print(traceback.format_exc())
logging.warning("%s failed to load", args.only)
continue # bad benchmark implementation
if args.trace_on_xla:
xla_dev = xm.xla_device()
model = model.to(device=xla_dev)
example_inputs = tree_map_only(
torch.Tensor, lambda x: x.to(device=xla_dev), example_inputs
)
current_name = name
current_device = device
current_batch_size = batch_size
set_model_name(name)
if args.float32:
model, example_inputs = cast_to_fp32(model, example_inputs)
elif args.float16:
model, example_inputs = cast_to_fp16(model, example_inputs)
elif args.bfloat16:
model, example_inputs = cast_to_bf16(model, example_inputs)
# Look for stuff that looks like batch size, and mark it dynamic.
# Better integration would integrate directly with benchmark suite
# but cannot conveniently do this
# NB: This must be done late enough so that we don't do more
# conversions on the inputs
# NB: Assumes only the first batch-y like dimension is the batch
marked = False
def detect_and_mark_batch(t):
nonlocal marked
for i, s in enumerate(t.size()):
if s == batch_size:
torch._dynamo.mark_dynamic(t, i)
marked = True
break
if args.dynamic_batch_only and batch_size > 1:
tree_map_only(torch.Tensor, detect_and_mark_batch, example_inputs)
assert marked, f"nothing in example_inputs had a dim with {batch_size}"
if args.log_operator_inputs:
log_operator_inputs(
model, example_inputs, runner.model_iter_fn, name, args
)
continue
if args.per_process_memory_fraction != 1:
torch.cuda.set_per_process_memory_fraction(
args.per_process_memory_fraction
)
runner.run_one_model(
name,
model,
example_inputs,
optimize_ctx,
experiment,
explain=args.explain,
tag=args.tag,
)
if args.generate_aot_autograd_stats:
stats_file = output_filename.split(".csv")[0] + "_stats.csv"
output_csv(
stats_file,
("dev", "name", "batch_size", "total_aot_graphs", "ok_aot_graphs"),
[
current_device,
current_name,
current_batch_size,
*Stats.aot_summary(),
],
)
else:
if output_filename and os.path.exists(output_filename):
os.unlink(output_filename)
if original_dir:
os.chdir(original_dir)
model_names = list(runner.iter_model_names(args))
nmodels = len(model_names)
for i, name in enumerate(model_names):
current_name = name
placeholder_batch_size = 0
if args.progress:
print(f"Running model {i+1}/{nmodels}", flush=True)
def write_csv(status):
for device in args.devices:
output_csv(
output_filename,
["dev", "name", "batch_size", "accuracy"],
[device, name, placeholder_batch_size, status],
)
try:
timeout = args.timeout
if should_diff_branch(args):
timeout *= 2
subprocess.check_call(
[sys.executable] + sys.argv + [f"--only={name}"], timeout=timeout
)
except subprocess.TimeoutExpired:
print("TIMEOUT", file=sys.stderr)
write_csv("timeout")
except subprocess.SubprocessError:
print("ERROR", file=sys.stderr)
write_csv("infra_error")
print_summary(output_filename)
def log_operator_inputs(model, example_inputs, model_iter_fn, name, args):
mode = "training" if args.training else "eval"
output = os.path.join(os.path.dirname(args.output), f"{name}_{mode}.txt")
# TODO - add option for coalescing inputs over multiple runs
if os.path.exists(output):
print(f"Skipping {name}, {output} already exists")
return
print(f"Running {name}")
operator_mode = OperatorInputsMode()
fake_tensor_mode = FakeTensorMode()
with torch._subclasses.fake_tensor.FakeCopyMode(fake_tensor_mode):
model_fake = copy.deepcopy(model)
example_inputs_fake = copy.deepcopy(example_inputs)
try:
with fake_tensor_mode, operator_mode:
model_iter_fn(model_fake, example_inputs_fake, collect_outputs=False)
except Exception as e:
print(f"{name} failed to run with fake tensors, trying real. Exception: {e}")
operator_mode = OperatorInputsMode()
try:
with operator_mode:
model_iter_fn(model, example_inputs, collect_outputs=False)
except Exception as e2:
print(f"{name} failed to run with real. Exception: {e2}")
raise
print(f"Writing output to {output}")
operator_mode.log_to_file(output)
if __name__ == "__main__":
raise RuntimeError(
f"You shouldn't run {sys.argv[0]} directly, instead try timm_model.py, torchbench.py or hugginface.py"
)
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
a97dfba740547798aa43e4e8df8ee377d844b172 | 9d1701a88644663277342f3a12d9795cd55a259c | /CSC108/a1/test.py | e3696c3c0ae1ba55928f89f9bbe2a46d1880dfc7 | [] | no_license | xxcocoymlxx/Study-Notes | cb05c0e438b0c47b069d6a4c30dd13ab97e4ee6d | c7437d387dc2b9a8039c60d8786373899c2e28bd | refs/heads/master | 2023-01-13T06:09:11.005038 | 2020-05-19T19:37:45 | 2020-05-19T19:37:45 | 252,774,764 | 2 | 0 | null | 2022-12-22T15:29:26 | 2020-04-03T15:44:44 | Jupyter Notebook | UTF-8 | Python | false | false | 1,238 | py | SIGN_GROUPS = '[ARI,LEO,SAG],[TAU,VIR,CAP],[GEM,LIB,AQU],[PIS,SCO,CAN]'
SIGNS = 'ARI:03,21-04,19;TAU:04,20-05,20;GEM:05,21-06,21;CAN:06,22-07,22;' + \
'LEO:07,23-08,22;VIR:08,23-09,22;LIB:09,23-10,23;SCO:10,24-11,20;' + \
'SAG:11,21-12,21;CAP:12,22-01,20;AQU:01,21-02,21;PIS:02,22-03,20;'
def get_sign_group(sign):
'''
>>> get_sign_group('ARI')
0
>>> get_sign_group('CAN')
3
'''
i = 0
group_number = 0
while i < len(SIGN_GROUPS):
if SIGN_GROUPS[i] != ']':
i += 1
elif SIGN_GROUPS[i] == ']':
group = SIGN_GROUPS[i-12:i+1]
#print(group)
i += 1
if sign not in group:
group_number += 1
else:
return group_number
def find_astrological_sign(month, date):
'''
>>> find_astrological_sign(9, 2)
'VIR'
>>> find_astrological_sign(10, 23)
'LIB'
'''
i = 0
while i + 16 <= len(SIGNS):
if (int(SIGNS[i+4:i+6]) == month and date >= int(SIGNS[i+7:i+9])) or \
(int(SIGNS[i+10:i+12]) == month and date <= int(SIGNS[i+13:i+15])):
return SIGNS[i:i+3]
else:
i = i + 16
| [
"coco.yang@mail.utoronto.ca"
] | coco.yang@mail.utoronto.ca |
c5835b7fa2b4f5ca981932cbb072da01a4eb7ff8 | fc27e1e21ad4891b1d4e769170671da1a4d32ed2 | /aliyun-python-sdk-ccs/setup.py | 02f22be7a4dd0804225e7b9d1814db5fb67fdfa6 | [
"Apache-2.0"
] | permissive | yonzhan2/aliyun-openapi-python-sdk | 3d05f7e83aeb286ad553a6a36c42ce932a1ece3e | e64873f9b528e1a83e3ea27d583f3f7998e7650b | refs/heads/master | 2020-04-11T10:22:48.511973 | 2018-12-13T09:29:21 | 2018-12-13T09:29:21 | 161,712,443 | 1 | 0 | null | 2018-12-14T00:52:39 | 2018-12-14T00:52:39 | null | UTF-8 | Python | false | false | 2,586 | py | #!/usr/bin/python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
from setuptools import setup, find_packages
import os
import sys
"""
setup module for ccs.
Created on 7/3/2015
@author: alex
"""
PACKAGE = "aliyunsdkccs"
NAME = "aliyun-python-sdk-ccs"
DESCRIPTION = "The ccs module of Aliyun Python sdk."
AUTHOR = "Aliyun"
AUTHOR_EMAIL = "aliyun-developers-efficiency@list.alibaba-inc.com"
URL = "http://develop.aliyun.com/sdk/python"
TOPDIR = os.path.dirname(__file__) or "."
VERSION = __import__(PACKAGE).__version__
desc_file = open("README.rst")
try:
LONG_DESCRIPTION = desc_file.read()
finally:
desc_file.close()
requires = []
if sys.version_info < (3, 3):
requires.append("aliyun-python-sdk-core>=2.0.2")
else:
requires.append("aliyun-python-sdk-core-v3>=2.3.5")
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache",
url=URL,
keywords=["aliyun","sdk","ccs"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=requires,
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development",
)
) | [
"yixiong.jxy@alibaba-inc.com"
] | yixiong.jxy@alibaba-inc.com |
f66ca3f9bba3dc867733f33d8e8453c735d63d42 | f0bc59dc9aab005ef977957e6ea6b91bbe430952 | /2018-02-22-mongo-python-kennedy/code/data/release_health.py | ad6e41fad36f7fd0f5e9e01a03516dc3dbb6197e | [
"Apache-2.0"
] | permissive | Wintellect/WintellectWebinars | 3ac0f6ae02d2d52eefb80f4f06d70f44e0d66095 | 5a59d9742c340022d58ec7e2cda69a1eba0feb53 | refs/heads/master | 2023-03-02T06:31:25.457579 | 2022-04-29T19:26:55 | 2022-04-29T19:26:55 | 87,122,981 | 68 | 124 | Apache-2.0 | 2023-03-01T02:39:17 | 2017-04-03T21:33:32 | JavaScript | UTF-8 | Python | false | false | 192 | py | import mongoengine
class ReleaseHealth(mongoengine.EmbeddedDocument):
ci = mongoengine.BooleanField()
coverage = mongoengine.FloatField()
health_index = mongoengine.FloatField()
| [
"mikeckennedy@gmail.com"
] | mikeckennedy@gmail.com |
c9967a831db5b1498cb70bbd89be219e0c57becd | bb6ebff7a7f6140903d37905c350954ff6599091 | /tools/telemetry/telemetry/timeline/process.py | aa94a9cc40d91e045da00eb4f426995817470912 | [
"BSD-3-Clause",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | PDi-Communication-Systems-Inc/lollipop_external_chromium_org | faa6602bd6bfd9b9b6277ce3cd16df0bd26e7f2f | ccadf4e63dd34be157281f53fe213d09a8c66d2c | refs/heads/master | 2022-12-23T18:07:04.568931 | 2016-04-11T16:03:36 | 2016-04-11T16:03:36 | 53,677,925 | 0 | 1 | BSD-3-Clause | 2022-12-09T23:46:46 | 2016-03-11T15:49:07 | C++ | UTF-8 | Python | false | false | 2,450 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.event_container as event_container
import telemetry.timeline.counter as tracing_counter
import telemetry.timeline.thread as tracing_thread
class Process(event_container.TimelineEventContainer):
''' The Process represents a single userland process in the trace.
'''
def __init__(self, parent, pid):
super(Process, self).__init__('process %s' % pid, parent)
self.pid = pid
self._threads = {}
self._counters = {}
@property
def threads(self):
return self._threads
@property
def counters(self):
return self._counters
def IterChildContainers(self):
for thread in self._threads.itervalues():
yield thread
for counter in self._counters.itervalues():
yield counter
def IterAllSlicesOfName(self, name):
for thread in self._threads.itervalues():
for s in thread.IterAllSlicesOfName(name):
yield s
def IterAllAsyncSlicesOfName(self, name):
for thread in self._threads.itervalues():
for s in thread.IterAllAsyncSlicesOfName(name):
yield s
def IterEventsInThisContainer(self):
return
yield # pylint: disable=W0101
def GetOrCreateThread(self, tid):
thread = self.threads.get(tid, None)
if thread:
return thread
thread = tracing_thread.Thread(self, tid)
self._threads[tid] = thread
return thread
def GetCounter(self, category, name):
counter_id = category + '.' + name
if counter_id in self.counters:
return self.counters[counter_id]
raise ValueError(
'Counter %s not found in process with id %s.' % (counter_id,
self.pid))
def GetOrCreateCounter(self, category, name):
try:
return self.GetCounter(category, name)
except ValueError:
ctr = tracing_counter.Counter(self, category, name)
self._counters[ctr.full_name] = ctr
return ctr
def AutoCloseOpenSlices(self, max_timestamp, thread_time_bounds):
for thread in self._threads.itervalues():
thread.AutoCloseOpenSlices(max_timestamp, thread_time_bounds[thread].max)
def FinalizeImport(self):
for thread in self._threads.itervalues():
thread.FinalizeImport()
for counter in self._counters.itervalues():
counter.FinalizeImport()
| [
"mrobbeloth@pdiarm.com"
] | mrobbeloth@pdiarm.com |
60ab6a543eed1a43ddab5434945c723e9390423a | e26437e26ebb17187ae9c9caaa5dfc4208a7ec1d | /venv/bin/pyreverse | 9f96a391a24de4c7fb7d39e36f0c1b16bf65b197 | [
"CC0-1.0"
] | permissive | OseiasBeu/PyECom | 93f36fe22aca1b8c06be0fa0027d6cd42e614b6a | 2ea4e7e3be4ca015fb1bbc1083aa3f2d44accc5f | refs/heads/master | 2022-12-15T00:16:29.799351 | 2020-08-31T21:16:00 | 2020-08-31T21:16:00 | 287,870,077 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | #!/home/oseiasbeu/Documents/djangoecommerce-aula002/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
| [
"oseiasbeu@outlook.com"
] | oseiasbeu@outlook.com | |
da66572725917bc7eae7416aef1d229579a87d5e | 105d55b45e36ae1d3062135b22781f1df0fb1653 | /oauth_tokens/providers/facebook.py | 650cdc472bd5d2c75041be8704e017c55b924156 | [
"BSD-3-Clause"
] | permissive | EndyKaufman/django-oauth-tokens | 6151dd26acb99bb53aabbe5e75d01eac6cdd377e | b813b13d383b79e1a78e15a3881be5b94680a011 | refs/heads/master | 2021-01-14T11:20:09.040188 | 2015-06-25T18:03:45 | 2015-06-25T18:03:45 | 38,067,554 | 0 | 0 | null | 2015-06-25T18:48:54 | 2015-06-25T18:48:54 | null | UTF-8 | Python | false | false | 3,939 | py | # -*- coding: utf-8 -*-
import re
import urllib
from xml.sax import saxutils as su
from bs4 import BeautifulSoup
from django.core.exceptions import ImproperlyConfigured
import requests
from ..base import AccessTokenBase, AuthRequestBase
from ..exceptions import LoginPasswordError, AccountLocked, WrongRedirectUrl
class FacebookAuthRequest(AuthRequestBase):
'''
Facebook authorized request class
'''
provider = 'facebook'
form_action_domain = 'https://facebook.com'
login_url = 'https://www.facebook.com/login.php'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/34.0.1847.116 Chrome/34.0.1847.116 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Charset': 'utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
'Host': 'www.facebook.com',
}
account_locked_phrases = [
'Ваш аккаунт временно заблокирован',
'Мы заблокировали ваш аккаунт в связи с попыткой входа из незнакомого места. Пожалуйста, помогите нам подтвердить, что попытка входа была произведена вами.',
'Your account is temporarily locked.',
]
def add_data_credentials(self, data):
data['email'] = self.username
data['pass'] = self.password
def authorize(self):
'''
TODO: cover with tests for each condition
'''
response = super(FacebookAuthRequest, self).authorize()
if 'You are trying too often' in response.content:
# TODO: fix it
raise Exception("Facebook authorization request returns error 'You are trying too often'")
if 'Cookies Required' in response.content:
response = requests.get(self.form_action_domain)
self.cookies = response.cookies
self.authorize()
# TODO: move this to FacebookAcessToken class
if 'API Error Code: 191' in response.content:
raise ImproperlyConfigured(
"You must specify URL '%s' in your facebook application settings" % self.redirect_uri)
for account_locked_phrase in self.account_locked_phrases:
if account_locked_phrase in response.content:
raise AccountLocked(
"Facebook errored 'Your account is temporarily locked.'. Try to login via web browser")
return response
class FacebookAccessToken(AccessTokenBase):
provider = 'facebook'
type = 'oauth2'
authorize_url = 'https://www.facebook.com/dialog/oauth'
access_token_url = 'https://graph.facebook.com/oauth/access_token'
redirect_uri = 'https://google.com/404'
auth_request_class = FacebookAuthRequest
def authorization_get_request(self):
response = super(FacebookAccessToken, self).authorization_get_request()
bs = BeautifulSoup(response.content)
if bs.find('title').text == 'Error':
raise WrongRedirectUrl(bs.find('div').text)
return response
def authorization_permissions_request(self, response):
if 'Redirecting...' in response.content:
matches = re.findall(r'<meta http-equiv="refresh" content="0;url=(.+)" /></head>', response.content)
url = su.unescape(urllib.unquote(matches[0]))
response = self.oauth.request(
method='get', url=url, cookies=response.cookies, headers=self.auth_request.headers)
return response
def get_url_from_response(self, response):
if response.status_code == 404 and 'code=' in response.url:
return response.url
else:
return None
| [
"ramusus@gmail.com"
] | ramusus@gmail.com |
844a9a758831717b2da46cbd65a2b6d94b78da26 | ac8b725681e25177c5de3daf58afe00135241d0f | /leetcode/0622_design_circular_queue.py | 238c3abdb146d116221b12774b973ca013cac211 | [
"MIT"
] | permissive | jacquerie/leetcode | 7af100ea1d7292c8c3da34210cf04d891be5561b | 0cb213b9c7bcb6efa11210e9ebc291befb560bb9 | refs/heads/master | 2022-05-19T22:19:46.284065 | 2022-03-27T02:41:58 | 2022-03-27T02:41:58 | 129,323,741 | 3 | 0 | MIT | 2021-01-04T01:41:50 | 2018-04-12T23:51:56 | Python | UTF-8 | Python | false | false | 1,228 | py | # -*- coding: utf-8 -*-
class MyCircularQueue:
def __init__(self, k):
self.capacity = k
self.count = 0
self.elements = [0] * k
self.index = 0
def enQueue(self, value):
if self.isFull():
return False
self.elements[(self.index + self.count) % self.capacity] = value
self.count += 1
return True
def deQueue(self):
if self.isEmpty():
return False
self.index = (self.index + 1) % self.capacity
self.count -= 1
return True
def Front(self):
if self.isEmpty():
return -1
return self.elements[self.index]
def Rear(self):
if self.isEmpty():
return -1
return self.elements[(self.index + self.count - 1) % self.capacity]
def isEmpty(self):
return 0 == self.count
def isFull(self):
return self.capacity == self.count
if __name__ == "__main__":
obj = MyCircularQueue(3)
assert obj.enQueue(1)
assert obj.enQueue(2)
assert obj.enQueue(3)
assert not obj.enQueue(4)
assert 3 == obj.Rear()
assert obj.isFull()
assert obj.deQueue()
assert obj.enQueue(4)
assert 4 == obj.Rear()
| [
"jacopo.notarstefano@gmail.com"
] | jacopo.notarstefano@gmail.com |
82d8010ea973ca811c5b181a212f6d636c8b8d9e | 1e30788a9e045e3bda2cfcb3bb42adfa7ee85dae | /venev/lib/python2.7/site-packages/coverage/misc.py | e3723c1847da8137214715ca5849ac49eb6f0584 | [
"MIT"
] | permissive | CompeteLeak/crankycoin | 1bee3a032c4c6360093035aed1a7842cfffb46f0 | 9376fbd3095429f2d46a3e4436023f814bb2e36a | refs/heads/master | 2020-03-10T15:59:10.016605 | 2018-05-01T06:16:24 | 2018-05-01T06:16:24 | 129,462,940 | 0 | 0 | MIT | 2018-04-25T20:56:12 | 2018-04-13T23:17:30 | Python | UTF-8 | Python | false | false | 7,487 | py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Miscellaneous stuff for coverage.py."""
import errno
import hashlib
import inspect
import locale
import os
import sys
import types
from coverage import env
from coverage.backward import string_class, to_bytes, unicode_class
ISOLATED_MODULES = {}
def isolate_module(mod):
"""Copy a module so that we are isolated from aggressive mocking.
If a test suite mocks os.path.exists (for example), and then we need to use
it during the test, everything will get tangled up if we use their mock.
Making a copy of the module when we import it will isolate coverage.py from
those complications.
"""
if mod not in ISOLATED_MODULES:
new_mod = types.ModuleType(mod.__name__)
ISOLATED_MODULES[mod] = new_mod
for name in dir(mod):
value = getattr(mod, name)
if isinstance(value, types.ModuleType):
value = isolate_module(value)
setattr(new_mod, name, value)
return ISOLATED_MODULES[mod]
os = isolate_module(os)
# Use PyContracts for assertion testing on parameters and returns, but only if
# we are running our own test suite.
if env.TESTING:
from contracts import contract # pylint: disable=unused-import
from contracts import new_contract as raw_new_contract
def new_contract(*args, **kwargs):
"""A proxy for contracts.new_contract that doesn't mind happening twice."""
try:
return raw_new_contract(*args, **kwargs)
except ValueError:
# During meta-coverage, this module is imported twice, and
# PyContracts doesn't like redefining contracts. It's OK.
pass
# Define contract words that PyContract doesn't have.
new_contract('bytes', lambda v: isinstance(v, bytes))
if env.PY3:
new_contract('unicode', lambda v: isinstance(v, unicode_class))
else: # pragma: not covered
# We aren't using real PyContracts, so just define a no-op decorator as a
# stunt double.
def contract(**unused):
"""Dummy no-op implementation of `contract`."""
return lambda func: func
def new_contract(*args_unused, **kwargs_unused):
"""Dummy no-op implementation of `new_contract`."""
pass
def nice_pair(pair):
"""Make a nice string representation of a pair of numbers.
If the numbers are equal, just return the number, otherwise return the pair
with a dash between them, indicating the range.
"""
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
def format_lines(statements, lines):
"""Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
long as the lines represent consecutive statements. This will coalesce
even if there are gaps between statements.
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
"""
pairs = []
i = 0
j = 0
start = None
statements = sorted(statements)
lines = sorted(lines)
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start is None:
start = lines[j]
end = lines[j]
j += 1
elif start:
pairs.append((start, end))
start = None
i += 1
if start:
pairs.append((start, end))
ret = ', '.join(map(nice_pair, pairs))
return ret
def expensive(fn):
"""A decorator to indicate that a method shouldn't be called more than once.
Normally, this does nothing. During testing, this raises an exception if
called more than once.
"""
if env.TESTING:
attr = "_once_" + fn.__name__
def _wrapped(self):
"""Inner function that checks the cache."""
if hasattr(self, attr):
raise Exception("Shouldn't have called %s more than once" % fn.__name__)
setattr(self, attr, True)
return fn(self)
return _wrapped
else:
return fn
def bool_or_none(b):
"""Return bool(b), but preserve None."""
if b is None:
return None
else:
return bool(b)
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
return "|".join("(?:%s)" % r for r in regexes)
def file_be_gone(path):
"""Remove a file, and don't get annoyed if it doesn't exist."""
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def output_encoding(outfile=None):
"""Determine the encoding to use for output written to `outfile` or stdout."""
if outfile is None:
outfile = sys.stdout
encoding = (
getattr(outfile, "encoding", None) or
getattr(sys.__stdout__, "encoding", None) or
locale.getpreferredencoding()
)
return encoding
class Hasher(object):
"""Hashes Python data into md5."""
def __init__(self):
self.md5 = hashlib.md5()
def update(self, v):
"""Add `v` to the hash, recursively if needed."""
self.md5.update(to_bytes(str(type(v))))
if isinstance(v, string_class):
self.md5.update(to_bytes(v))
elif isinstance(v, bytes):
self.md5.update(v)
elif v is None:
pass
elif isinstance(v, (int, float)):
self.md5.update(to_bytes(str(v)))
elif isinstance(v, (tuple, list)):
for e in v:
self.update(e)
elif isinstance(v, dict):
keys = v.keys()
for k in sorted(keys):
self.update(k)
self.update(v[k])
else:
for k in dir(v):
if k.startswith('__'):
continue
a = getattr(v, k)
if inspect.isroutine(a):
continue
self.update(k)
self.update(a)
def hexdigest(self):
"""Retrieve the hex digest of the hash."""
return self.md5.hexdigest()
def _needs_to_implement(that, func_name):
"""Helper to raise NotImplementedError in interface stubs."""
if hasattr(that, "_coverage_plugin_name"):
thing = "Plugin"
name = that._coverage_plugin_name
else:
thing = "Class"
klass = that.__class__
name = "{klass.__module__}.{klass.__name__}".format(klass=klass)
raise NotImplementedError(
"{thing} {name!r} needs to implement {func_name}()".format(
thing=thing, name=name, func_name=func_name
)
)
class CoverageException(Exception):
"""An exception specific to coverage.py."""
pass
class NoSource(CoverageException):
"""We couldn't find the source for a module."""
pass
class NoCode(NoSource):
"""We couldn't find any code at all."""
pass
class NotPython(CoverageException):
"""A source file turned out not to be parsable Python."""
pass
class ExceptionDuringRun(CoverageException):
"""An exception happened while running customer code.
Construct it with three arguments, the values from `sys.exc_info`.
"""
pass
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
4bc4de35ea4be1709c988bf5903e9a0bba31b0d8 | 7844d2c4c4863e5d83f37d803ff1ccf477e654e2 | /signup/backends/sts_credentials.py | fdc0f7ff1b847a2bde88fad73477a8c0eaf4dad7 | [
"BSD-2-Clause"
] | permissive | VanL/djaodjin-signup | 195df527fdc97bddc198a3655a4bd395b9fef775 | 72abd2699c543a2219a4991d9575a0bb0aadf42d | refs/heads/master | 2023-02-09T02:28:53.672743 | 2020-06-07T14:25:17 | 2020-06-07T14:25:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,407 | py | # Copyright (c) 2020, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime, base64, hashlib, hmac, json, logging
import boto3
from .. import settings
from ..compat import is_authenticated, urlparse
from ..helpers import datetime_or_now
LOGGER = logging.getLogger(__name__)
def temporary_security_token(request,
aws_upload_role=None, aws_external_id=None,
aws_region=None, at_time=None):
"""
Create temporary security credentials on AWS. This typically needed
to allow uploads from the browser directly to S3.
"""
if not is_authenticated(request):
return
at_time = datetime_or_now(at_time)
if ('access_key_expires_at' in request.session
and at_time + datetime.timedelta(seconds=5) < datetime_or_now(
request.session['access_key_expires_at'])):
# +5s buffer, in case of clock drift.
return
# Lazy creation of temporary credentials.
if not aws_upload_role:
aws_upload_role = settings.AWS_UPLOAD_ROLE
if not aws_external_id:
aws_external_id = settings.AWS_EXTERNAL_ID
kwargs = {}
if aws_external_id:
kwargs = {"ExternalId": aws_external_id}
if not aws_region:
aws_region = settings.AWS_REGION
conn = boto3.client('sts', region_name=aws_region)
# AWS will fail if we don't sanetize and limit the length
# of the session key.
aws_session_key = request.session.session_key.replace('/', '')[:64]
if aws_session_key != request.session.session_key:
LOGGER.warning("sanetized session key %s to %s for %s in order to"\
" match AWS requirements", request.session.session_key,
aws_session_key, request.user, extra={'request': request})
# See http://boto.cloudhackers.com/en/latest/ref/sts.html#\
# boto.sts.STSConnection.assume_role
duration_seconds = 3600
access_key_expires_at = at_time + datetime.timedelta(
seconds=duration_seconds)
assumed_role = conn.assume_role(RoleArn=aws_upload_role,
RoleSessionName=aws_session_key, **kwargs)
request.session['access_key'] = assumed_role['Credentials']['AccessKeyId']
request.session['secret_key'] \
= assumed_role['Credentials']['SecretAccessKey']
request.session['security_token'] \
= assumed_role['Credentials']['SessionToken']
request.session['access_key_expires_at'] = access_key_expires_at.isoformat()
LOGGER.info('AWS temporary credentials for %s to assume role %s: %s',
request.user, aws_upload_role, request.session['access_key'],
extra={'event': 'create-aws-credentials',
'request': request, 'aws_role': aws_upload_role,
'aws_access_key': request.session['access_key']})
LOGGER.debug('AWS Access Key %s, Secret Key=%s, Security Token=%s',
request.session['access_key'], request.session['secret_key'],
request.session['security_token'])
def _signed_policy(region, service, requested_at,
access_key, secret_key, security_token,
bucket=None, key_prefix="", acl=None):
#pylint:disable=too-many-arguments,too-many-locals
signature_date = requested_at.strftime("%Y%m%d")
x_amz_credential = '/'.join([
access_key, signature_date, region, service, 'aws4_request'])
x_amz_date = '%sT000000Z' % signature_date
conditions = [
{"bucket": bucket},
{"x-amz-algorithm": "AWS4-HMAC-SHA256"},
{"x-amz-credential": x_amz_credential},
{"x-amz-date": x_amz_date},
{"x-amz-security-token": security_token},
["starts-with", "$key", key_prefix],
["starts-with", "$Content-Type", ""]
]
if acl is not None:
conditions += [{"acl": acl}]
if acl is None or acl != 'public-read':
conditions += [{"x-amz-server-side-encryption": "AES256"}]
policy = json.dumps({
"expiration": (requested_at + datetime.timedelta(
hours=24)).strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"conditions": conditions}).encode("utf-8")
policy_base64 = base64.b64encode(policy).decode(
"utf-8").replace('\n', '')
date_key = hmac.new(("AWS4%s" % secret_key).encode("utf-8"),
signature_date.encode("utf-8"),
hashlib.sha256).digest()
date_region_key = hmac.new(
date_key, region.encode("utf-8"),
hashlib.sha256).digest()
date_region_service_key = hmac.new(
date_region_key, service.encode("utf-8"),
hashlib.sha256).digest()
signing_key = hmac.new(
date_region_service_key, "aws4_request".encode("utf-8"),
hashlib.sha256).digest()
policy_signature = hmac.new(
signing_key, policy_base64.encode("utf-8"),
hashlib.sha256).hexdigest()
if acl is not None:
acl_prefix = acl.replace('-', '_') + "_"
context = {'acl': acl}
else:
acl_prefix = ""
context = {}
context.update({
'access_key': access_key,
'security_token': security_token,
"%saws_policy" % acl_prefix: policy_base64,
"%saws_policy_signature" % acl_prefix: policy_signature,
'x_amz_credential': x_amz_credential,
'x_amz_date': x_amz_date})
return context
def aws_bucket_context(request, location, acls=None, aws_upload_role=None,
aws_external_id=None, aws_region=None):
"""
Context to use in templates to upload from the client brower
to the bucket directly.
"""
#pylint:disable=too-many-arguments
context = {}
if is_authenticated(request):
# Derives a bucket_name and key_prefix from a location
# (ex: s3://bucket_name/key_prefix,
# https://s3-region.amazonaws/bucket_name/key_prefix)
parts = urlparse(location)
bucket_name = parts.netloc.split('.')[0]
key_prefix = parts.path
if bucket_name.startswith('s3-'):
aws_region = bucket_name[3:]
name_parts = key_prefix.split('/')
if name_parts and not name_parts[0]:
name_parts.pop(0)
bucket_name = name_parts[0]
key_prefix = '/'.join(name_parts[1:])
if key_prefix.startswith('/'):
# we rename leading '/' otherwise S3 copy triggers a 404
# because it creates an URL with '//'.
key_prefix = key_prefix[1:]
if key_prefix and key_prefix.endswith('/'):
key_prefix = key_prefix[:-1]
if not aws_region:
aws_region = settings.AWS_REGION
requested_at = datetime_or_now()
# Lazy creation of temporary credentials.
temporary_security_token(request, aws_upload_role=aws_upload_role,
aws_external_id=aws_external_id, aws_region=aws_region,
at_time=requested_at)
if acls is not None:
for acl in acls:
context.update(_signed_policy(
aws_region, "s3", requested_at,
request.session['access_key'],
request.session['secret_key'],
security_token=request.session['security_token'],
bucket=bucket_name, key_prefix=key_prefix, acl=acl))
else:
context.update(_signed_policy(
aws_region, "s3", requested_at,
request.session['access_key'],
request.session['secret_key'],
security_token=request.session['security_token'],
bucket=bucket_name, key_prefix=key_prefix))
context.update({"location": "https://%s.s3-%s.amazonaws.com/%s" % (
bucket_name, aws_region, key_prefix)})
return context
class AWSContextMixin(object):
def get_context_data(self, *args, **kwargs):
#pylint: disable=unused-argument
return aws_bucket_context(self.request, kwargs.get('location', None),
acls=kwargs.get('acls', None),
aws_upload_role=kwargs.get('aws_upload_role', None),
aws_external_id=kwargs.get('aws_external_id', None),
aws_region=kwargs.get('aws_region', None))
| [
"smirolo@djaodjin.com"
] | smirolo@djaodjin.com |
bf4e0aab1c159295634285c1e66c3ddbf71eaa43 | 35fc3136ca3f4af52ebeb36cedcd30b41d685146 | /RNASeq/pipelines_ds/RNASeq_MDD21.py | f7d2cd5bd88f8ae9d1fc645cae3f5ac64ce93125 | [] | no_license | stockedge/tpot-fss | cf260d9fd90fdd4b3d50da168f8b780bb2430fd1 | d1ee616b7552ef254eb3832743c49a32e1203d6a | refs/heads/master | 2022-09-19T13:10:30.479297 | 2020-06-02T15:43:16 | 2020-06-02T15:43:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | import numpy as np
import pandas as pd
from sklearn.cluster import FeatureAgglomeration
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from tpot.builtins import DatasetSelector
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=21)
# Average CV score on the training set was:0.752695652173913
exported_pipeline = make_pipeline(
DatasetSelector(sel_subset=12, subset_list="module23.csv"),
FeatureAgglomeration(affinity="l2", linkage="average"),
RandomForestClassifier(bootstrap=False, criterion="entropy", max_features=0.5, min_samples_leaf=9, min_samples_split=14, n_estimators=100)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| [
"grixor@gmail.com"
] | grixor@gmail.com |
54b3cba439ff4df98ef0664037b16637b744cc2c | 2df47589ca457d16fbffd4e1bccf5133174a0b97 | /highcharts/core/urls.py | 224545575fb36ead36d321285b6b52ffed2591b7 | [] | no_license | bguerbas/highcharts | a805419cb8d5a00bc3f82b5c4df285598f7685d8 | 571fba58465136c5040266b3d4ba2d65a5cc740c | refs/heads/master | 2022-02-12T19:33:12.244474 | 2016-06-04T05:00:24 | 2016-06-04T05:00:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | from django.conf.urls import url
from highcharts.core.graphics import dollar_json, euro_json, product_json
from highcharts.core import views as v
urlpatterns = [
url(r'^$', v.home, name='home'),
url(r'^dollar-graphic/$', v.dollar_graphic, name='dollar-graphic'),
url(r'^euro-graphic/$', v.euro_graphic, name='euro-graphic'),
url(r'^product-graphic/$', v.product_graphic, name='product-graphic'),
url(r'^dollar_json/$', dollar_json),
url(r'^euro_json/$', euro_json),
url(r'^product_json/$', product_json),
]
| [
"rg3915@yahoo.com.br"
] | rg3915@yahoo.com.br |
aa2082dc6d4bc7facdfcda2f11287a57b36d45d5 | 921b3a67a24df947f085e93ba58833ec20f6b89e | /producer-tutorial/Lib/site-packages/faker/providers/ssn/en_PH/__init__.py | b46f518321a9d4fc9f7172f85be5a33b8bd0612e | [] | no_license | jaslanm/python | e3bacd7ad0020b7e11adcb1b17dd6da3e4b2f65c | 5cfa3913b89acb0b8cf79247de1b2820a8b92f3a | refs/heads/main | 2023-08-30T01:53:13.752918 | 2021-10-23T13:24:48 | 2021-10-23T13:24:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,638 | py | from ... import BaseProvider
class Provider(BaseProvider):
"""
Provider for Philippine IDs that are related to social security
There is no unified social security program in the Philippines. Instead, the Philippines has a messy collection of
social programs and IDs that, when put together, serves as an analogue of other countries' social security program.
The government agencies responsible for these programs have relatively poor/outdated information and documentation
on their respective websites, so the sources section include third party "unofficial" information.
- Social Security System (SSS) - Social insurance program for workers in private, professional, and informal sectors
- Government Service Insurance System (GSIS) - Social insurance program for government employees
- Home Development Mutual Fund (popularly known as Pag-IBIG) - Socialized financial assistance and loaning program
- Philippine Health Insurance Corporation (PhilHealth) - Social insurance program for health care
- Unified Multi-Purpose ID (UMID) - Identity card with common reference number (CRN) that serves as a link to
the four previous programs and was planned to supersede the previous IDs, but
its future is now uncertain because of the upcoming national ID system
Sources:
- https://www.sss.gov.ph/sss/DownloadContent?fileName=SSSForms_UMID_Application.pdf
- https://www.gsis.gov.ph/active-members/benefits/ecard-plus/
- https://www.pagibigfund.gov.ph/DLForms/providentrelated/PFF039_MembersDataForm_V07.pdf
- https://filipiknow.net/is-umid-and-sss-id-the-same/
- https://filipiknow.net/philhealth-number/
- https://en.wikipedia.org/wiki/Unified_Multi-Purpose_ID
"""
sss_formats = ('##-#######-#',)
gsis_formats = ('###########',)
philhealth_formats = ('##-#########-#',)
pagibig_formats = ('####-####-####',)
umid_formats = ('####-#######-#',)
def sss(self) -> str:
return self.numerify(self.random_element(self.sss_formats))
def gsis(self) -> str:
return self.numerify(self.random_element(self.gsis_formats))
def pagibig(self) -> str:
return self.numerify(self.random_element(self.pagibig_formats))
def philhealth(self) -> str:
return self.numerify(self.random_element(self.philhealth_formats))
def umid(self) -> str:
return self.numerify(self.random_element(self.umid_formats))
def ssn(self) -> str:
# Use UMID as SSN in the interim till its deprecation
return self.umid()
| [
"jaslanm@gmail.com"
] | jaslanm@gmail.com |
d67a97f7d001de095080b8e061061fdc66d4ab5c | 1d01f44e748c03e2f00ede0b317ac57d868cb9a8 | /bdd/features/steps/generic.py | a41853ca92fad327f95aa4f5481dc70ae24b84f1 | [
"Apache-2.0"
] | permissive | gtoonstra/airflow-hovercraft | 6ef9e6588a1dbc9a97f4c725ee8e50d38f913d3a | 87d3f3dde410d186dcfbe30fb3330b6c3c8d08d9 | refs/heads/master | 2021-01-22T23:10:49.646487 | 2017-06-18T20:07:37 | 2017-06-18T20:07:37 | 92,804,454 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,290 | py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import ast
from bddlib.fake_hook import FakeHook
from airflow.hooks.base_hook import BaseHook
def get_default_context():
return {}
@given('no specific state')
def step_impl(context):
pass
@given('a specific initializer')
def step_impl(context):
if context.table is not None:
row = context.table[0]
headers = context.table.headings
d = {}
for header in headers:
d[header] = ast.literal_eval(row[header])
context.initializer = d
@given('hook mocked with FakeHook')
def step_impl(context):
returned_data = {}
if context.table is not None:
row = context.table[0]
headers = context.table.headings
for header in headers:
returned_data[header] = ast.literal_eval(row[header])
def get_hook(conn_id='fake'):
return FakeHook(returned_data)
BaseHook.get_hook = get_hook
@when('the {operator_type} is created')
def step_impl(context, operator_type):
"""
This step checks if it can instantiate
a class of a certain type
"""
try:
context.exception = None
s = operator_type.split(".")
mod = ".".join(s[:len(s)-1])
clz = s[len(s)-1]
MyClass = getattr(importlib.import_module(mod), clz)
d = {}
if "initializer" in context:
d = context.initializer
d['task_id'] = 'test'
context.instance = MyClass(**d)
else:
context.instance = MyClass(task_id='test')
except Exception as e:
context.exception = e
@then('the operator is executed')
def step_impl(context):
try:
ctxt = get_default_context()
context.return_value = context.instance.execute(ctxt)
except Exception as e:
context.exception = e
@then('no exception is raised')
def step_impl(context):
"""
This step just checks if an exception was raised
in a previous step.
"""
if context.exception is not None:
raise context.exception
@then('the exception {exception_type} is raised')
def step_impl(context, exception_type):
"""
This step just checks if an exception was raised
in a previous step.
"""
if context.exception is None:
raise Exception("No exception was raised when one was expected")
assert type(context.exception).__name__ == exception_type
@then('the return value is {return_value}')
def step_impl(context, return_value):
"""
This step just checks if an exception was raised
in a previous step.
"""
if context.return_value is not None:
assert str(context.return_value) == str(return_value)
else:
raise Exception("No return value from operator")
| [
"gtoonstra@gmail.com"
] | gtoonstra@gmail.com |
82c10264b839855b634e22321e3d1c1056cc2fa2 | ffc1cc3bb7b68335b115122fdc7924fc4e31d528 | /hun89.py | 598943a29a06fb9e8bd394f448e35cc79e22821b | [] | no_license | Rihanashariff/swathi24 | dba1dd3c3d2ff583ae431b432e0ef262bfeb3ac3 | 2b0d21f2febdd2a563e8f0affeebd5ca7a5821b8 | refs/heads/master | 2020-07-02T05:28:32.199982 | 2019-06-29T08:22:10 | 2019-06-29T08:22:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | #s
n=input()
l=[]
for i in range(0,len(n)):
if n[i] not in l:
l.append(n[i])
l=l[::-1]
for i in range(0,len(l)-1):
print(l[i],end="")
print(l[-1])
| [
"noreply@github.com"
] | Rihanashariff.noreply@github.com |
445b8f4162f676a1d2d53a66b8f67bd4b216b021 | a2d3f2787cd26f2bf90f30ba9516d1675a69f8be | /emission/tests/coreTests/TestEntry.py | 93e03219cfcb42cee4b1af697e90badb0d2316d4 | [
"BSD-3-Clause"
] | permissive | njriasan/e-mission-server | 318833ba06cb7f40ddb7b8d2ac3da4d049e7c846 | 23224ddcfd29f31c13f75d819d9ad8530aea052f | refs/heads/master | 2020-05-02T11:02:00.528836 | 2019-03-27T19:21:31 | 2019-03-27T19:21:31 | 177,915,408 | 1 | 0 | BSD-3-Clause | 2019-03-27T04:01:32 | 2019-03-27T04:01:31 | null | UTF-8 | Python | false | false | 4,859 | py | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Test the class that supports usercache entries
# The main change here is that
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import unittest
from uuid import UUID
import geojson as gj
import bson.objectid as bo
# Our imports
import emission.core.wrapper.entry as ecwe
import emission.core.wrapper.motionactivity as ecwm
import emission.core.wrapper.trip as ecwt
import emission.tests.common as etc
class TestEntry(unittest.TestCase):
def testWrapLocation(self):
testEntryJSON = {'_id': '55a4418c7d65cb39ee9737cf',
'data': {'accuracy': 52.5,
'altitude': 0,
'bearing': 0,
'elapsedRealtimeNanos': 100623898000000,
'latitude': 37.3885529,
'longitude': -122.0879696,
'loc': {"coordinates": [-122.0879696, 37.3885529], "type": "Point"},
'sensed_speed': 0,
'ts': 1436826356.852},
'metadata': {'key': 'background/location',
'platform': 'android',
'read_ts': 0,
'type': 'message',
'write_ts': 1436826357.115,
'write_fmt_time': '2015-07-13 15:25:57.115000-07:00'
},
'user_id': UUID('0763de67-f61e-3f5d-90e7-518e69793954')}
entry = ecwe.Entry(testEntryJSON)
self.assertEquals(entry.metadata.key, 'background/location')
self.assertEquals(entry.metadata.type, 'message')
self.assertEquals(entry.data.latitude, 37.3885529)
self.assertEquals(entry.data.longitude, -122.0879696)
# self.assertEquals(entry.data.loc, gj.Point((-122.0879696, 37.3885529)))
self.assertTrue(isinstance(entry.data.loc, gj.Point))
logging.debug("location time = %s, written at %s (%s)" %
(entry.data.ts, entry.metadata.write_ts, entry.metadata.write_fmt_time))
def testWrapActivity(self):
testEntryJSON = {
'_id': '55a4418c7d65cb39ee9737d2',
'data': {
'type': 5,
'confidence': 100,
'ts': 1436826360.493
},
'metadata': {'key': 'background/motion_activity',
'platform': 'android',
'read_ts': 0,
'type': 'message',
'write_ts': 1436826360.493,
'write_fmt_time': '2015-07-13 15:26:00.493000-07:00'
},
'user_id': UUID('0763de67-f61e-3f5d-90e7-518e69793954')
}
entry = ecwe.Entry(testEntryJSON)
self.assertEquals(entry.metadata.key, 'background/motion_activity')
self.assertEquals(entry.metadata.type, 'message')
self.assertEquals(entry.data.type, ecwm.MotionTypes.TILTING)
self.assertEquals(entry.data.confidence, 100)
logging.debug("activity time = %s, written at %s (%s)" %
(entry.data.ts, entry.metadata.write_ts, entry.metadata.write_fmt_time))
def testWrapTrip(self):
testTripJSON = {
'_id': bo.ObjectId("55d8c47b7d65cb39ee983c2d"),
'start_ts': 1436826360.200,
'start_fmt_time': '2015-07-13 15:26:00.200000-07:00',
'end_ts': 1436826360.493,
'end_fmt_time': '2015-07-13 15:26:00.493000-07:00',
'start_place': bo.ObjectId("55d8c47b7d65cb39ee983c2d"),
'end_place': bo.ObjectId("55d8c47b7d65cb39ee983c2d"),
'start_loc': {"coordinates": [-122, 37], "type": "Point"},
'user_id': UUID('0763de67-f61e-3f5d-90e7-518e69793954')
}
trip = ecwt.Trip(testTripJSON)
self.assertEquals(trip.get_id(), bo.ObjectId("55d8c47b7d65cb39ee983c2d"))
self.assertEquals(trip.start_place, bo.ObjectId("55d8c47b7d65cb39ee983c2d"))
self.assertEquals(trip.end_place, bo.ObjectId("55d8c47b7d65cb39ee983c2d"))
self.assertTrue(isinstance(trip.start_loc, gj.Point))
def testDedupList(self):
import emission.core.wrapper.location as ecwl
import emission.core.wrapper.transition as ecwt
self.assertEqual(type(ecwe.Entry.get_dedup_list("background/filtered_location")),
list)
self.assertIn("latitude", ecwe.Entry.get_dedup_list("background/filtered_location"))
self.assertIn("ts", ecwe.Entry.get_dedup_list("background/filtered_location"))
self.assertEqual(type(ecwe.Entry.get_dedup_list("statemachine/transition")),
list)
self.assertIn("curr_state", ecwe.Entry.get_dedup_list("statemachine/transition"))
self.assertIn("ts", ecwe.Entry.get_dedup_list("statemachine/transition"))
if __name__ == '__main__':
etc.configLogging()
unittest.main()
| [
"shankari@eecs.berkeley.edu"
] | shankari@eecs.berkeley.edu |
42ef418b29e8afe99bd8a80b80757cb7ddc5210e | 48b7b96a5caf2102ae6ca5626efc0135d4088a75 | /server/providers/models.py | 80bce9d435fb3af1e97647ff6ba22b3133451c1d | [] | no_license | DonAurelio/coder | ced49498e0e0717fa8f0c523e5a2ff87895f162d | 429d2e4c43d66770792200bac6cd103f86dcf8b1 | refs/heads/master | 2023-01-11T08:59:43.749545 | 2019-10-02T19:58:07 | 2019-10-02T19:58:07 | 106,939,435 | 2 | 1 | null | 2022-12-29T07:03:46 | 2017-10-14T15:33:08 | JavaScript | UTF-8 | Python | false | false | 1,669 | py | from django.db import models
import requests
# Create your models here.
class Service(models.Model):
name = models.CharField(max_length=100,primary_key=True,
help_text='Service name has to be unique')
base_url = models.CharField(
max_length=400,
help_text='API base url without ending slash')
description = models.CharField(max_length=400,
help_text='Describe the service in few words')
def __str__(self):
return self.name
class Resource(models.Model):
# Service who makes the resource available
service = models.ForeignKey(Service)
# Resource name
name = models.CharField(max_length=100,
help_text='Name of this resouce')
# An summary about resouce functinality
description = models.CharField(max_length=400,
help_text='purpose of this resource')
class Meta:
unique_together = (("service", "name"),)
def url(self,*args):
url_parts = [self.service.base_url,self.name] + list(args)
return '/'.join(url_parts)
def is_available(self):
message = ''
try:
response = requests.get(self.url(),timeout=1)
return True
except requests.exceptions.ConnectionError as e:
return False
return message
def status(self):
message = ''
try:
response = requests.head(self.url(),timeout=1)
if response.status_code is 200:
message = 'Online'
else:
message = 'Online with erros'
except requests.exceptions.ConnectionError as e:
message = 'Offline'
return message | [
"aurelio.vivas@correounivalle.edu.co"
] | aurelio.vivas@correounivalle.edu.co |
5031bc2a965c2f9f19ed8c7313cf8595400f10be | 3d4094d6eca69329d4c6ba08e0c8ce79eedeb6b6 | /starter/Recusion.py | 9294f308c3d312ec11a45b4945592dbb04f552bc | [] | no_license | agkozik/Python_Course | c9f3c8b68e60b452e57f43da7554c13daf386a0c | 4b095bbc86f33999efe95127528b3e1d8bfded9f | refs/heads/master | 2022-04-27T06:04:15.276472 | 2020-04-22T11:49:06 | 2020-04-22T11:49:06 | 255,082,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | def non_recursion(n):
result = 1
for multiplayer in range(n):
result *= multiplayer
return result
def recursive_fact(n):
if n == 0:
return 1
else:
return n * recursive_fact(n-1)
print(recursive_fact(100))
print(non_recursion(100)) | [
"agkozik@gmail.com"
] | agkozik@gmail.com |
b87853502213aa5bfaef56faf421d1b76f6f3e71 | 82c09012f7dbf4c6b5988eb517d2b53ac6e175de | /mayaTools/cgm/lib/zoo/zooPyMaya/skeletonBuilderPresets.py | a3054ab7da8e90f3c66012ce1cc05a5fbf4264fb | [
"BSD-3-Clause"
] | permissive | jjburton/cgmTools | 3def62c6a631b78b16eed89e7c746ff5190c17f7 | 8aba7e73fc330befc578da963809c5c8eda2c26c | refs/heads/master | 2023-08-21T14:43:25.249298 | 2023-02-06T00:33:07 | 2023-02-06T00:33:07 | 173,785,099 | 92 | 20 | null | 2021-02-16T19:43:55 | 2019-03-04T16:54:32 | Python | UTF-8 | Python | false | false | 41 | py |
from baseSkeletonPreset import *
#end
| [
"jjburton@gmail.com"
] | jjburton@gmail.com |
05081be19c602536b83c4921c511a0830229bd5c | 77b300d44131c74ce42c9099e1b709b9c5941ba1 | /src/zojax/content/model/tests/view.py | 2c2bb9d223bd8e21e24d76ced9764bbf6a8aa39d | [
"ZPL-2.1"
] | permissive | Zojax/zojax.content.model | 26c0984457a8a9940105d11143a1c7cb9ed9d8c0 | 07d14dc8ba467f6efb2ad58e68c050afebd0e69d | refs/heads/master | 2016-09-06T07:22:26.242130 | 2011-12-16T07:12:30 | 2011-12-16T07:12:30 | 2,035,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope import interface, component, schema
from zope.security.proxy import removeSecurityProxy
from zojax.content.model.model import ViewModel
from zojax.content.model.interfaces import IModelRenderer
from zojax.content.type.interfaces import IContentView
class IMyDynamicView(interface.Interface):
content = schema.TextLine(
title = u'Content title',
required = False)
class MyDynamicView(ViewModel):
interface.implements(IModelRenderer, IContentView)
component.adapts(interface.Interface, interface.Interface)
def render(self):
if self.content:
return 'My Dynamic View: %s'%self.content
else:
return 'My Dynamic View: %s'%self.context.title
class IMyDynamicView2(interface.Interface):
pass
class MyDynamicView2(ViewModel):
component.adapts(interface.Interface, interface.Interface)
class MyDynamicView2View(object):
def __call__(self, *args, **kw):
return 'My Dynamic View: %s'%self.context.context.title
| [
"andrey.fedoseev@gmail.com"
] | andrey.fedoseev@gmail.com |
d84df8e0443840cc9a741459f80f50079bd18ce3 | fc96f28fc3dd08ecd418fe13f13d71c8f7b51cd9 | /enrich/tfc.py | 12be029215f5e4e49edfbe219a703bfb874c92ff | [
"MIT"
] | permissive | mindis/spacyapp | 59d811854291a770bcb9d6f0552a9ceaa48246ec | 2b5b6af1be4e0cee55bcc776253d63f5005f899f | refs/heads/master | 2020-04-14T12:54:17.922808 | 2019-01-02T14:12:10 | 2019-01-02T14:12:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,409 | py | from enrich.tei import XMLReader
from datetime import datetime
class Tcf(XMLReader):
""" a class to read an process tfc-documents
tried with 'data/nn_nrhz_001_1418.tcf.xml'
"""
def list_nodes(self, element):
""" returns a list of passed in element-nodes"""
expr = "//tcf:*[local-name() = $name]"
nodes = self.tree.xpath(expr, name=element, namespaces=self.nsmap)
return nodes
def list_multiple_nodes(self, elements=['token', 'lemma', 'tag', 'sentence']):
""" returns a dict with keys of past in elements and a list of those nodes as values"""
expr = "//tcf:*[local-name() = $name]"
nodes = {}
for x in elements:
nodes[x] = self.list_nodes(x)
return nodes
def count_multiple_nodes(self, elements=['token', 'lemma', 'tag', 'sentence']):
""" counts the number of nodes of the passed in elements """
nodes = self.list_multiple_nodes(elements)
result = {}
for key, value in nodes.items():
result[key] = len(value)
return result
def create_sent_list(self):
""" create a list of dicts for each sentence with their according token elements"""
elements = ['token', 'lemma', 'tag', 'sentence']
start = 0
end = 0
sent_list = []
nodes = self.list_multiple_nodes(elements)
sentences = nodes['sentence']
tokens = nodes['token']
tags = nodes['tag']
lemmas = nodes['lemma']
for x in sentences:
sent = {}
token_count = len(x.xpath('./@tokenIDs')[0].split(' '))
end = start + token_count
sent['sent_id'] = x.xpath('./@ID')[0]
sent['words'] = tokens[start:end]
sent['tags'] = tags[start:end]
sent['lemmas'] = lemmas[start:end]
start = end
sent_list.append(sent)
return sent_list
def tag_train_data(self):
""" returns a list of samples to trains spacy's pos-tagger"""
TRAIN_DATA = []
for x in self.create_sent_list():
text = (" ".join([y.text for y in x['words']]))
tags = {'tags': [y.text for y in x['tags']]}
words = {'word': [y.text for y in x['words']]}
lemmas = {'lemma': [y.text for y in x['lemmas']]}
TRAIN_DATA.append((text, [words, tags, lemmas]))
return TRAIN_DATA
def create_tokenlist(self):
""" returns a list of token-dicts extracted from tcf:token """
words = self.list_nodes('token')
token_list = []
for x in words:
token = {}
token['value'] = x.text
token['tokenId'] = x.xpath('./@ID')[0]
try:
follows = x.getnext().text
except AttributeError:
follows = None
if follows:
if token['value'] == "(":
token['whitespace'] = False
elif token['value'] == "„":
token['whitespace'] = False
elif token['value'] == "‒":
token['whitespace'] = True
elif follows[0].isalnum():
token['whitespace'] = True
elif follows[0] == "„":
token['whitespace'] = True
elif follows[0] == "(":
token['whitespace'] = True
else:
token['whitespace'] = False
else:
token['whitespace'] = False
token_list.append(token)
return token_list
def process_tokenlist(self, tokenlist, by_id=None):
""" takes a tokenlist and updates the selected elements. Returns the updated self.tree """
nr_tokens = len(tokenlist)
nr_nodes = len(self.tree.xpath('.//tcf:token', namespaces=self.nsmap))
print("# tokens: {}".format(nr_tokens))
print("# token-nodes: {}".format(nr_nodes))
if by_id:
expr = './/tcf:token[@ID=$id]'
for x in tokenlist:
print('by ID')
try:
node = self.tree.xpath(expr, id=x['tokenId'], namespaces=self.nsmap)[0]
except IndexError:
node = None
if node is not None:
try:
node.attrib['lemma'] = x['lemma']
except AttributeError:
pass
try:
node.attrib['iob'] = x['iob']
except AttributeError:
pass
try:
node.attrib['type'] = x['type']
except AttributeError:
pass
try:
node.attrib['ana'] = x['pos']
except AttributeError:
pass
elif nr_nodes == nr_nodes:
print('not by ID')
counter = 0
for x in self.list_nodes('token'):
x.attrib['lemma'] = tokenlist[counter]['lemma']
x.attrib['iob'] = tokenlist[counter]['iob']
x.attrib['type'] = tokenlist[counter]['type']
x.attrib['ana'] = tokenlist[counter]['pos']
counter += 1
else:
pass
return self.tree
| [
"Peter.Andorfer@oeaw.ac.at"
] | Peter.Andorfer@oeaw.ac.at |
2be6d7c09e45275b87c59105dd1e93b1a0c45a14 | ac7e039a70ba627f6d9a7a02c9a8849ed5e18a89 | /unep.project-database/tags/0.8/content/SubProject.py | 8b875041728ebaf498aace281aeaa619d7847571 | [] | no_license | jean/project-database | 65a2559844175350351ba87e820d25c3037b5fb2 | e818d322ec11d950f2770cd5324fbcd1acaa734d | refs/heads/master | 2021-01-01T06:27:24.528764 | 2014-01-31T11:11:45 | 2014-01-31T11:11:45 | 32,125,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,722 | py | # -*- coding: utf-8 -*-
#
# File: SubProject.py
#
# Copyright (c) 2009 by []
# Generator: ArchGenXML Version 2.1
# http://plone.org/products/archgenxml
#
# GNU General Public License (GPL)
#
__author__ = """Mike Metcalfe <mikejmets@gmail.com>, Jurgen Blignaut
<jurgen.blignaut@gmail.com>"""
__docformat__ = 'plaintext'
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import *
from zope.interface import implements
import interfaces
from Products.ProjectDatabase.content.CurrencyMixin import CurrencyMixin
from Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin
from Products.ATVocabularyManager.namedvocabulary import NamedVocabulary
from Products.ProjectDatabase.config import *
# additional imports from tagged value 'import'
import ProjectGeneralInformation
from Products.FinanceFields.MoneyField import MoneyField
from Products.DataGridField import DataGridField, Column, SelectColumn, CalendarColumn
from Products.CMFCore.utils import getToolByName
from Products.ATReferenceBrowserWidget.ATReferenceBrowserWidget import ReferenceBrowserWidget
##code-section module-header #fill in your manual code here
from DateTime import DateTime
from Products.FinanceFields.Money import Money
from Products.ProjectDatabase.utils import getYearVocabulary as getAnnualVocabulary
from Products.DataGridField import MoneyColumn, ReferenceColumn
datagrid_schema = Schema((
MoneyField(
name='cofinancing_cash_planned_amount',
default='0.0',
widget=MoneyField._properties['widget'](
label="",
i18n_domain='Financials',
),
),
MoneyField(
name='cofinancing_cash_actual_amount',
default='0.0',
widget=MoneyField._properties['widget'](
label="",
i18n_domain='Financials',
),
),
MoneyField(
name='cofinancing_inkind_planned_amount',
default='0.0',
widget=MoneyField._properties['widget'](
label="",
i18n_domain='Financials',
),
),
MoneyField(
name='cofinancing_inkind_actual_amount',
default='0.0',
widget=MoneyField._properties['widget'](
label="",
i18n_domain='Financials',
),
),
MoneyField(
name='cash_disbursements_amount',
default='0.0',
widget=MoneyField._properties['widget'](
label="",
i18n_domain='Financials',
),
),
MoneyField(
name='amount',
default='0.0',
widget=MoneyField._properties['widget'](
label="",
i18n_domain='Financials',
),
),
))
##/code-section module-header
schema = Schema((
ComputedField(
name='FinanceCategory',
widget=ComputedField._properties['widget'](
label="Financial Category",
label_msgid='ProjectDatabase_label_FinanceCategory',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
StringField(
name='PMSNumber',
widget=StringField._properties['widget'](
label="PMS Number",
label_msgid='ProjectDatabase_label_PMSNumber',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
StringField(
name='IMISNumber',
widget=StringField._properties['widget'](
label="IMIS Number",
label_msgid='ProjectDatabase_label_IMISNumber',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
DataGridField(
name='SubProjectExecutingAgency',
widget=DataGridField._properties['widget'](
columns={'executing_agency':ReferenceColumn('Executing Agency', fieldname='ExecutingAgencyName'),'executing_agency_category':SelectColumn('Category', vocabulary='getCategoryVocabulary')},
label="Lead Executing Agency",
label_msgid='ProjectDatabase_label_SubProjectExecutingAgency',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
columns=('executing_agency','executing_agency_category'),
),
MoneyField(
name='CommittedGrant',
default='0.0',
widget=MoneyField._properties['widget'](
label="Committed Grant",
label_msgid='ProjectDatabase_label_CommittedGrant',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
DataGridField(
name='CoFinancingCash',
widget=DataGridField._properties['widget'](
columns={ 'cofinancing_cash_source' : SelectColumn("Source", vocabulary="getDonorTypesVocabulary"), 'cofinancing_cash_donor_name' : Column("Name of donor"), 'cofinancing_cash_planned_amount' : MoneyColumn("Planned Amount", field=datagrid_schema['cofinancing_cash_planned_amount']), 'cofinancing_cash_actual_amount' : MoneyColumn("Actual Amount", field=datagrid_schema['cofinancing_cash_actual_amount']) },
label="Cofinancing: Cash",
label_msgid='ProjectDatabase_label_CoFinancingCash',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
columns=("cofinancing_cash_source", "cofinancing_cash_donor_name", "cofinancing_cash_planned_amount", "cofinancing_cash_actual_amount"),
),
DataGridField(
name='CoFinancingInKind',
widget=DataGridField._properties['widget'](
columns={ 'cofinancing_inkind_source' : SelectColumn("Source", vocabulary="getDonorTypesVocabulary"), 'cofinancing_inkind_donor_name' : Column("Name of donor"), 'cofinancing_inkind_planned_amount' : MoneyColumn("Planned Amount", field=datagrid_schema['cofinancing_inkind_planned_amount']), 'cofinancing_inkind_actual_amount' : MoneyColumn("Actual Amount", field=datagrid_schema['cofinancing_inkind_actual_amount']) },
label="Cofinancing: In Kind",
label_msgid='ProjectDatabase_label_CoFinancingInKind',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
columns=("cofinancing_inkind_source", "cofinancing_inkind_donor_name", "cofinancing_inkind_planned_amount", "cofinancing_inkind_actual_amount"),
),
ComputedField(
name='SumCoFinCashPlanned',
widget=ComputedField._properties['widget'](
label="Total Cofinancing: Cash (Planned)",
label_msgid='ProjectDatabase_label_SumCoFinCashPlanned',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
ComputedField(
name='SumCoFinCashActual',
widget=ComputedField._properties['widget'](
label="Total Cofinancing: Cash (Actual)",
label_msgid='ProjectDatabase_label_SumCoFinCashActual',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
ComputedField(
name='SumCoFinInKindPlanned',
widget=ComputedField._properties['widget'](
label="Total Cofinancing: In Kind (Planned)",
label_msgid='ProjectDatabase_label_SumCoFinInKindPlanned',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
ComputedField(
name='SumCoFinInKindActual',
widget=ComputedField._properties['widget'](
label="Total Cofinancing: In Kind (Actual)",
label_msgid='ProjectDatabase_label_SumCoFinInKindActual',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
ComputedField(
name='TotalCostOfSubProjectPlanned',
widget=ComputedField._properties['widget'](
label="Total Cost of Sub Project (Planned)",
label_msgid='ProjectDatabase_label_TotalCostOfSubProjectPlanned',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
ComputedField(
name='TotalCostOfSubProjectActual',
widget=ComputedField._properties['widget'](
label="Total Cost of Sub Project (Actual)",
label_msgid='ProjectDatabase_label_TotalCostOfSubProjectActual',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
DataGridField(
name='CashDisbursements',
widget=DataGridField._properties['widget'](
columns={ 'cash_disbursements_date' : CalendarColumn("Date"), 'cash_disbursements_amount' : MoneyColumn("Amount", field=datagrid_schema['cash_disbursements_amount']), 'cash_disbursements_imis_rcpt_number' : Column("IMIS RCPT Number") },
label="Cash Disbursements",
label_msgid='ProjectDatabase_label_CashDisbursements',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
columns=("cash_disbursements_date", "cash_disbursements_amount", "cash_disbursements_imis_rcpt_number"),
),
ComputedField(
name='SumCashDisbursements',
widget=ComputedField._properties['widget'](
label="Total Cash Disbursements",
label_msgid='ProjectDatabase_label_SumCashDisbursements',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
DataGridField(
name='YearlyExpenditures',
widget=DataGridField._properties['widget'](
columns={ 'year' : SelectColumn("Year", vocabulary='getYearVocabulary'), 'amount' : MoneyColumn("Amount", field=datagrid_schema['amount']) },
label="Yearly Expenditures",
label_msgid='ProjectDatabase_label_YearlyExpenditures',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
columns=("year", "amount"),
),
ComputedField(
name='SumYearlyExpenditures',
widget=ComputedField._properties['widget'](
label="Total Sub-Project Expenditures",
label_msgid='ProjectDatabase_label_SumYearlyExpenditures',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
StringField(
name='Status',
widget=SelectionWidget(
label="Sub Project Status",
label_msgid='ProjectDatabase_label_Status',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
vocabulary=NamedVocabulary("""Status"""),
),
TextField(
name='FinancialStatusRemarks',
widget=TextAreaWidget(
label="Sub Project Financial Status – Remarks",
label_msgid='ProjectDatabase_label_FinancialStatusRemarks',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
IntegerField(
name='PlannedDuration',
widget=IntegerField._properties['widget'](
label="Planned Duration",
label_msgid='ProjectDatabase_label_PlannedDuration',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
DateTimeField(
name='InitialCompletionDate',
widget=DateTimeField._properties['widget'](
label="Initial Completion Date",
show_hm=False,
label_msgid='ProjectDatabase_label_InitialCompletionDate',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
DateTimeField(
name='RevisedCompletionDate',
widget=DateTimeField._properties['widget'](
label="Revised Completion Date",
show_hm=False,
label_msgid='ProjectDatabase_label_RevisedCompletionDate',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
TextField(
name='DelayReason',
widget=TextAreaWidget(
label="Reasons for Delay",
label_msgid='ProjectDatabase_label_DelayReason',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
),
DataGridField(
name='Reports',
widget=DataGridField._properties['widget'](
columns={ 'report_type' : SelectColumn("Report Type", vocabulary="getReportTypesVocabulary"), 'report_period' : SelectColumn("Report Period", vocabulary="getYearVocabulary"), 'report_received_date' : CalendarColumn("Report Received Date"), 'amount' : MoneyColumn("Amount", field=datagrid_schema['amount']) },
label='Reports',
label_msgid='ProjectDatabase_label_Reports',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
columns=("report_type", "report_period", "report_received_date", "amount"),
),
DataGridField(
name='SubProjectRevision',
widget=DataGridField._properties['widget'](
columns={"revision_number":Column("Revision Number"), "revision_type":SelectColumn("Revision Type", vocabulary="getRevisionTypeVocabulary"),"revision_date":CalendarColumn("Revision Date")},
label="Sub Project Revision",
label_msgid='ProjectDatabase_label_SubProjectRevision',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
columns=("revision_number", "revision_type","revision_date"),
),
DataGridField(
name='ExecutingAgencyRiskRating',
widget=DataGridField._properties['widget'](
columns= {'Risk_Level':SelectColumn("Risk Level", vocabulary='getRiskLevelVocabulary'), "Assessment_Date":CalendarColumn("Assessment Date"), 'Remarks':Column("Remarks")},
label="Agency Risk Rating",
label_msgid='ProjectDatabase_label_ExecutingAgencyRiskRating',
i18n_domain='ProjectDatabase',
),
write_permission="FMO",
columns= ("Risk_Level", "Assessment_Date", "Remarks"),
),
),
)
##code-section after-local-schema #fill in your manual code here
##/code-section after-local-schema
SubProject_schema = BaseSchema.copy() + \
schema.copy()
##code-section after-schema #fill in your manual code here
SubProject_schema['title'].widget.label = 'Sub-Project Title'
SubProject_schema = SubProject_schema.copy() + Schema((
ReferenceField("ExecutingAgencyName",
widget = ReferenceBrowserWidget(
label="Executing Agency",
visible=False,
startup_directory='/contacts',
),
allowed_types=('Organisation',),
relationship='subproj_executingagency_fake',
multiValued=1,
),
))
##/code-section after-schema
class SubProject(BaseContent, CurrencyMixin, BrowserDefaultMixin):
"""
"""
security = ClassSecurityInfo()
implements(interfaces.ISubProject)
meta_type = 'SubProject'
_at_rename_after_creation = True
schema = SubProject_schema
##code-section class-header #fill in your manual code here
##/code-section class-header
# Methods
# Manually created methods
security.declarePublic('getCategoryVocabulary')
def getCategoryVocabulary(self):
"""
"""
pvt = getToolByName(self, 'portal_vocabularies')
vocab = pvt.getVocabularyByName('Category')
return vocab.getDisplayList(self)
security.declarePublic('getDonorTypesVocabulary')
def getDonorTypesVocabulary(self):
"""
"""
pvt = getToolByName(self, 'portal_vocabularies')
vocab = pvt.getVocabularyByName('DonorType')
return vocab.getDisplayList(self)
security.declarePublic('getReportTypesVocabulary')
def getReportTypesVocabulary(self):
"""
"""
pvt = getToolByName(self, 'portal_vocabularies')
vocab = pvt.getVocabularyByName('ReportType')
return vocab.getDisplayList(self)
security.declarePublic('getRevisionTypeVocabulary')
def getRevisionTypeVocabulary(self):
"""
"""
pvt = getToolByName(self, 'portal_vocabularies')
vocab = pvt.getVocabularyByName('ProjectRevisionType')
return vocab.getDisplayList(self)
security.declarePublic('getRiskLevelVocabulary')
def getRiskLevelVocabulary(self):
"""
"""
pvt = getToolByName(self, 'portal_vocabularies')
vocab = pvt.getVocabularyByName('RiskLevel')
return vocab.getDisplayList(self)
security.declarePublic('getFinanceCategory')
def getFinanceCategory(self):
return self.aq_parent.getFinanceCategory()
security.declarePublic('_computeDataGridAmount')
def _computeDataGridAmount(self,column):
"""
"""
amount = self.getZeroMoneyInstance()
for v in column:
if v:
amount += v
return amount
security.declarePublic('getSumCoFinCashPlanned')
def getSumCoFinCashPlanned(self):
"""
"""
values = self.getCoFinancingCash()
return self._computeDataGridAmount( \
[v['cofinancing_cash_planned_amount'] \
for v in values if v['cofinancing_cash_planned_amount']])
security.declarePublic('getSumCoFinCashActual')
def getSumCoFinCashActual(self):
"""
"""
values = self.getCoFinancingCash()
return self._computeDataGridAmount( \
[v['cofinancing_cash_actual_amount'] \
for v in values if v['cofinancing_cash_actual_amount']])
security.declarePublic('getSumCoFinInKindPlanned')
def getSumCoFinInKindPlanned(self):
"""
"""
values = self.getCoFinancingInKind()
return self._computeDataGridAmount( \
[v['cofinancing_inkind_planned_amount'] \
for v in values if v['cofinancing_inkind_planned_amount']])
security.declarePublic('getSumCoFinInKindActual')
def getSumCoFinInKindActual(self):
"""
"""
values = self.getCoFinancingInKind()
return self._computeDataGridAmount( \
[v['cofinancing_inkind_actual_amount'] \
for v in values if v['cofinancing_inkind_actual_amount']])
security.declarePublic('getSumCashDisbursements')
def getSumCashDisbursements(self):
"""
"""
values = self.getCashDisbursements()
returnVal = self._computeDataGridAmount( \
[v['cash_disbursements_amount'] \
for v in values if v['cash_disbursements_amount']])
return returnVal
def getSumYearlyExpenditures(self):
"""
"""
values = self.getYearlyExpenditures()
returnValue = self._computeDataGridAmount( \
[v['amount'] for v in values if v['amount']])
return returnValue
def getTotalCostOfSubProjectPlanned(self):
"""
"""
total = self.getCommittedGrant()
if self.getSumCoFinCashPlanned():
total += self.getSumCoFinCashPlanned()
if self.getSumCoFinInKindPlanned():
total += self.getSumCoFinInKindPlanned()
return total
def getTotalCostOfSubProjectActual(self):
"""
"""
total = self.getSumYearlyExpenditures()
if self.getSumCoFinCashActual():
total += self.getSumCoFinCashActual()
if self.getSumCoFinInKindActual():
total += self.getSumCoFinInKindActual()
return total
def getAmountReceivable(self):
return self.getSumCashDisbursements() - self.getSumYearlyExpenditures()
def getLatestReportData(self, report, field):
values = self.getReports()
result = ''
if values:
date = DateTime('1900/01/01')
for v in values:
if v['report_received_date'] and v['report_type'] == report:
if date < v['report_received_date']:
date = v['report_received_date']
result = v[field]
if date != DateTime('1900/01/01'):
return result
return 'Unspecified'
def getLeadExecutingAgencyNames(self):
lead = self.getSubProjectExecutingAgency()
result = ''
if lead:
refcat = getToolByName(self, 'reference_catalog')
for v in lead:
if v['executing_agency']:
agency = refcat.lookupObject(v['executing_agency'])
if agency is not None:
name = agency.getName()
else:
name = 'Unspecified'
result += name + ', '
return result[:-2]
return 'Unspecified'
def getYearVocabulary(self):
return getAnnualVocabulary()
registerType(SubProject, PROJECTNAME)
# end of class SubProject
##code-section module-footer #fill in your manual code here
##/code-section module-footer
| [
"jurgen.blignaut@61ed036f-b72b-0410-9ea5-b9ec1d72d98d"
] | jurgen.blignaut@61ed036f-b72b-0410-9ea5-b9ec1d72d98d |
5b3676d9864fd42804bba265747557e5df923681 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-vs/aliyunsdkvs/request/v20181212/DescribeVsDomainReqBpsDataRequest.py | 307d6bbfec73206085e343b8694ed5221dad5335 | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 2,455 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvs.endpoint import endpoint_data
class DescribeVsDomainReqBpsDataRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vs', '2018-12-12', 'DescribeVsDomainReqBpsData','vs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_LocationNameEn(self):
return self.get_query_params().get('LocationNameEn')
def set_LocationNameEn(self,LocationNameEn):
self.add_query_param('LocationNameEn',LocationNameEn)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_IspNameEn(self):
return self.get_query_params().get('IspNameEn')
def set_IspNameEn(self,IspNameEn):
self.add_query_param('IspNameEn',IspNameEn)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Interval(self):
return self.get_query_params().get('Interval')
def set_Interval(self,Interval):
self.add_query_param('Interval',Interval) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
e3629543f40c546cf51bb37c2ae7539b0733c980 | b521802cca8e4ee4ff5a5ffe59175a34f2f6d763 | /maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Main/Cam_Main/Cam_Main_20190117194302.py | 39149174f32dc16da49b80990804dd7f78bc9b70 | [] | no_license | all-in-one-of/I-Do-library | 2edf68b29558728ce53fe17168694ad0353a076e | 8972ebdcf1430ccc207028d8482210092acf02ce | refs/heads/master | 2021-01-04T06:58:57.871216 | 2019-12-16T04:52:20 | 2019-12-16T04:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,739 | py | # -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
# Maya Header
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMayaUI as omui
import plugin.Qt as Qt
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Main.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
import Cam_Item_Layout
import Cam_Attrubte_Panel
reload(Cam_Item_Layout)
reload(Cam_Attrubte_Panel)
from Cam_Item_Layout import Cam_Item_Layout
from Cam_Attrubte_Panel import Cam_Attrubte_Panel
class Cam_Main(form_class,base_class):
def __init__(self):
super(Cam_Main,self).__init__()
self.setupUi(self)
self.Cam_Item_Widget = Cam_Item_Layout()
self.Cam_Attrubte_Widget = Cam_Attrubte_Panel()
splitter = QSplitter()
splitter.setHandleWidth(5)
splitter.addWidget(self.Cam_Item_Widget)
splitter.addWidget(self.Cam_Attrubte_Widget)
self.Main_Layout.layout().addWidget(splitter)
self.Default_Attr_Setting()
def Default_Attr_Setting(self):
self.Cam_Attrubte_Widget.Cam_Name_Label.setText(u"<center> - 请选择摄像机 - </center>")
self.Cam_Attrubte_Widget.Cam_Input_Toggle.setVisible(False)
self.Cam_Attrubte_Widget.Cam_Input_Layout.setVisible(False)
self.Cam_Attrubte_Widget.Cam_Output_Toggle.setVisible(False)
self.Cam_Attrubte_Widget.Cam_Output_Layout.setVisible(False)
def Save_Json_Fun(self,path=GUI_STATE_PATH):
GUI_STATE = {}
GUI_STATE['DOCK'] = self.DOCK
try:
with open(path,'w') as f:
json.dump(GUI_STATE,f,indent=4)
except:
if path != "":
QMessageBox.warning(self, u"Warning", u"保存失败")
def Load_Json_Fun(self,path=GUI_STATE_PATH,load=False):
if os.path.exists(path):
GUI_STATE = {}
with open(path,'r') as f:
GUI_STATE = json.load(f)
return True
else:
if load==True:
QMessageBox.warning(self, u"Warning", u"加载失败\n检查路径是否正确")
return False
def mousePressEvent(self,e):
for i,child in enumerate(self.Cam_Item_Widget.Item_Layout.children()):
if i != 0:
if child.geometry().contains(e.pos()):
child.setStyleSheet("Cam_Item_%s{{border:3px solid red}}" % i)
else:
child.setStyleSheet("Cam_Item_%s{{border:3px solid red}}"% i)
| [
"2595715768@qq.com"
] | 2595715768@qq.com |
c48b263b359fd78edb64adc043143431bdb69b80 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/e3ebbbfe7e234f848c86e6281082178b.py | 364fd3459a9c67d7731467dfdf0b50a1914242a2 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 210 | py |
def hey(string):
if not string.strip():
return 'Fine. Be that way!'
elif string.isupper():
return 'Whoa, chill out!'
elif string.endswith('?'):
return 'Sure.'
else:
return 'Whatever.'
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
8a6ba3996b46a72a73ca370b427eba807287b0eb | e11b2493f55c60685c3ea76f900be73e6a454b2f | /high_peformance_python/matrix & iterator/list_and_set.py | deda5e0f90c5c616b600d6de3b6e00c725d130dd | [] | no_license | zpoint/Reading-Exercises-Notes | 29e566dd86d97eadb84d7bb6f8f640b85486557c | 31b38fe927232ba8e6f6a0e7ab9c58026eefcffb | refs/heads/master | 2021-06-04T08:12:42.777309 | 2021-04-19T02:12:22 | 2021-04-19T02:12:22 | 70,507,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | def list_unique_names(phonebook):
unique_names = []
for name, phonenumber in phonebook:
firstname, lastname = name.split(" ", 1)
for unique in unique_names:
if unique == firstname:
break
else:
unique_names.append(firstname)
return len(unique_names)
def set_unique_names(phonebook):
unique_names = set()
for name, phonenumber in phonebook:
first_name, last_name = name.split(" ", 1)
unique_names.add(first_name)
return len(unique_names)
phonebook = [
("Joe Doe", "555-555-5555"),
("Albert Einstein", "212-555-5555"),
("John Murphey", "202-555-5555"),
("Albert Rutherford", "647-555-5555"),
("Elaine Bodian", "301-555-5555")
]
for i in range(10000):
if (i % 2 == 0):
phonebook.append(("Jo" + chr(i) + " Doe", "555-555-5555"))
else:
phonebook.append(("Elaine"+ chr(i) +" Bodian", "301-555-5555"))
print ("Number of unique name from set method", set_unique_names(phonebook))
print ("Number of unique names from list method", list_unique_names(phonebook))
#In [21]: %timeit list_unique_names(phonebook)
#1 loop, best of 3: 2.3 s per loop
#In [22]: %timeit set_unique_names(phonebook)
#100 loops, best of 3: 9.44 ms per loop | [
"zp0int@qq.com"
] | zp0int@qq.com |
005dab74220c894d396199899ee60aaacaae6ac3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_squished.py | 706288b75fe0e4e6999c43fefc2d7e58c3ec6189 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
from xai.brain.wordbase.verbs._squish import _SQUISH
#calss header
class _SQUISHED(_SQUISH, ):
def __init__(self,):
_SQUISH.__init__(self)
self.name = "SQUISHED"
self.specie = 'verbs'
self.basic = "squish"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
9f9f80ab179a523cd94e5105dec350aa9f2db895 | 84e4149b3571ff4abe5c27a66ecbde03c5afec3c | /chapter_10/section_4_3/remember_me.py | 290e2cdaddd45c4dd93827b728d06f21b960e63d | [] | no_license | zhanlu-wm/Python-Crash-Course | 6efa04bd5c03e37394b3602d20e7ae57688836e7 | 043fe97b4acdf0008351fd0fdb045888e9bdd44d | refs/heads/master | 2021-07-18T18:34:32.435763 | 2017-10-23T15:27:17 | 2017-10-23T15:27:17 | 103,259,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | import json
def greet_user():
"""问候用户,并指出其名字"""
filename = 'username.json'
try:
with open(filename) as f_obj:
username = json.load(f_obj)
except FileNotFoundError:
username = input("What is your name? ")
with open(filename, 'w') as f_obj:
json.dump(username, f_obj)
print("We'll remember you when you come back, " + username + "!")
else:
print("Welcome back, " + username + "!")
greet_user() | [
"ncu09wangming@163.com"
] | ncu09wangming@163.com |
b54e8f18efa6c1389182d0d9d0d9ed00020a5ac5 | 4d5e6e0a7057123ddd7cb97027e667117e1be143 | /data_structure/python_dictionary.py | 7c2cdca3a3a25e5200e605b252fd542c38fde9b4 | [] | no_license | shubhomedia/Learn_Python | cee48990c04521fcbb7dbf5ad120c69170dcd1be | 01e0a8e3dc2de87b09c963e7cb9fc5e246831ddb | refs/heads/master | 2021-07-01T08:53:51.151326 | 2021-01-02T17:31:36 | 2021-01-02T17:31:36 | 204,191,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | #python Dictionary
my_dictionary = {'key':'value',('K','E','Y'):5}
my_dictionary1 = {x:x+1 for x in range(10)}
print(my_dictionary['key'])
print(my_dictionary1)
try:
print(my_dictionary[1])
except Exception as e:
print(e)
print(my_dictionary.keys()) # print keys
print(my_dictionary.values()) # print values
my_dictionary1.clear()
print(my_dictionary1) | [
"shubhomedia@gmail.com"
] | shubhomedia@gmail.com |
227f5a29b56728a8daf1b78dbeac24d393ae2c6d | 8a47ab47a101d4b44dd056c92a1763d5fac94f75 | /力扣/简单练习/344-双指针实现反转字符串.py | ed42ba2e0f43f9e67fb638bb7438fb76bc5b6fbc | [] | no_license | Clint-cc/Leecode | d5528aa7550a13a5bcf2f3913be2d5db2b5299f3 | 8befe73ab3eca636944800e0be27c179c45e1dbf | refs/heads/master | 2020-09-14T07:35:41.382377 | 2020-07-01T01:27:18 | 2020-07-01T01:27:18 | 223,066,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | # !D:/Code/python
# -*- coding:utf-8 -*-
# @Author : Clint
# 双指针
def reverseString(s):
i = 0
j = len(s) - 1
while i < j:
s[i], s[j] = s[j], s[i]
i += 1
j -= 1
return s
print(reverseString(['w', 'e', 'a', 'r', 'y']))
| [
"clint1801@163.com"
] | clint1801@163.com |
1773f62bd5e54835d7f80a13b980ba3bec26d85b | b771dbc3dc2dc330cf67ff5d030c3bbd474b5a86 | /setup.py | d302b0f496404be0fb05a6368e9df706b82bb04a | [] | no_license | RedTurtle/pyramid_alfresco | 8ddd273604edfdf36eabf11205c38d7d140d8312 | 63129943f52839956e3a39244c1f547ebe5a342f | refs/heads/master | 2021-01-10T16:10:22.817580 | 2013-04-29T08:10:06 | 2013-04-29T08:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
requires = [
'pyramid',
'SQLAlchemy',
'transaction',
'pyramid_tm',
'pyramid_debugtoolbar',
'zope.sqlalchemy',
'waitress',
'pyramid_fanstatic',
'js.bootstrap==2.2.2',
'js.jqueryui',
'js.tinymce',
'velruse',
'pyramid_beaker',
'cmislib'
]
setup(name='pyramid_alfresco',
version='0.0',
description='pyramid_alfresco',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='pyramid_alfresco',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = pyramid_alfresco:main
[console_scripts]
initdb = pyramid_alfresco.initdb:main
[fanstatic.libraries]
pyramid_alfresco = pyramid_alfresco.resources:library
""",
)
| [
"andrew@mleczko.net"
] | andrew@mleczko.net |
c30b597e24156bf7d366d9399a2e0c57ceb567e3 | 93e55f080779f16f47a7382a3fb0b29a4189e074 | /convertor/huawei/te/lang/cce/rl_bank/withdraw.py | 87bb88cd279db71a35a86e12622cb16606d69a5d | [] | no_license | jizhuoran/caffe-huawei-atlas-convertor | b00cfdec3888da3bb18794f52a41deea316ada67 | 148511a31bfd195df889291946c43bb585acb546 | refs/heads/master | 2022-11-25T13:59:45.181910 | 2020-07-31T07:37:02 | 2020-07-31T07:37:02 | 283,966,371 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 25,542 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use this file
except in compliance with the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
Define the main function of generate schedule by cheque
"""
import re
from te import tvm
from te.platform import cce_emitinsn_params
from te.lang.cce.rl_bank.bank_cfg import INTRIN_MAP
from te.lang.cce.rl_bank.bank_cfg import SCOPE_DICT
from te.lang.cce.rl_bank.bank_cfg import MODE_RUNTIME
from te.lang.cce.rl_bank.bank_cfg import ScheduleTarget
from te.lang.cce.rl_bank.bank_cfg import Axis
from te.lang.cce.rl_bank.bank_cfg import PRIMITIVE_DICT
def proc_cache_read(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments,
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 0:
sch_target = sch_targets[stage_index]
# cache Read args is Scope and Consumers
scope_id = args[0]
scope = SCOPE_DICT[scope_id]
consumers_indicies = args[1]
consumers = [sch_targets[i].obj for i in consumers_indicies]
consumer_names = ', '.join([sch_targets[i].name for i in consumers_indicies])
readed_tensor = None
if mode == MODE_RUNTIME:
readed_tensor = sch.cache_read(sch_target.obj, scope, consumers)
# orignal Tensor name x,cacheRead Tensor name:x_l_n
read_index = 0
readed_pattern = r'^%s_l_\d+$' % (sch_target.name)
if stage_index + 1 < len(sch_targets) and re.match(readed_pattern,
sch_targets[stage_index + 1].name):
read_index = int(sch_targets[stage_index + 1].name.split('_')[-1]) + 1
readed_name = '%s_l_%d' % (sch_target.name, read_index)
# inset after orignal tensor
sch_targets.insert(stage_index + 1, ScheduleTarget(readed_name, readed_tensor, []))
code_line = "%s = sch.cache_read(%s, '%s', [%s])" % (readed_name, sch_target.name, scope,
consumer_names)
code_lines.append(code_line)
def proc_cache_write(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 1:
if isinstance(stage_index, list):
# cheque form is [[6, 2], 1, 1] when more than one tensors do cache_write
write_tensor_nums = stage_index[1]
stage_index = stage_index[0]
sch_target = sch_targets[stage_index]
stage_name = sch_target.name
# cache write args is Scope
scope_id = args[0]
scope = SCOPE_DICT[scope_id]
written_tensors = [None]
write_tensor_objs = []
write_tensor_names = []
written_tensor_names = []
for idx in range(write_tensor_nums):
write_tensor_objs.append(sch_target.obj.op.output(idx))
write_tensor_names.append(stage_name + "_v%s" % idx)
written_tensor_names.append(stage_name + "_v%s_l" % idx)
if mode == MODE_RUNTIME:
written_tensors = sch.cache_write(write_tensor_objs, scope)
written_name = '%s_l' % stage_name
# insert before orignal tensor
sch_targets.insert(stage_index,
ScheduleTarget(written_name, written_tensors[0], []))
code_lines.append("%s = sch.cache_write([%s], '%s')" %
(', '.join(written_tensor_names),
', '.join(write_tensor_names), scope))
code_lines.append('%s = %s' % (
written_name, written_tensor_names[0]))
else:
sch_target = sch_targets[stage_index]
# cache write args is Scope
scope_id = args[0]
scope = SCOPE_DICT[scope_id]
written_tensor = None
if mode == MODE_RUNTIME:
written_tensor = sch.cache_write(sch_target.obj, scope)
# Tensor name x,after x_l_n
written_name = '%s_l' % sch_target.name
# insert before orignal tensor
sch_targets.insert(stage_index, ScheduleTarget(written_name, written_tensor, []))
code_lines.append("%s = sch.cache_write(%s, '%s')" %
(written_name, sch_target.name, scope))
def proc_double_buffer(stage_index, primitive, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 2:
sch_target = sch_targets[stage_index]
if mode == MODE_RUNTIME:
sch[sch_target.obj].double_buffer()
code_lines.append("sch[%s].double_buffer()" % sch_target.name)
def proc_compute_inline(stage_index, primitive, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 3:
sch_target = sch_targets[stage_index]
if mode == MODE_RUNTIME:
sch[sch_target.obj].compute_inline()
code_lines.append("sch[%s].compute_inline()" % sch_target.name)
def proc_get_axis(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 4:
# get axis
sch_target = sch_targets[stage_index]
# axis_num
axis_num = args[0]
for i in range(axis_num):
axis_obj = None
if mode == MODE_RUNTIME:
axis_obj = sch[sch_target.obj].op.axis[i]
axis_name = '%s_axis_%d' % (sch_target.name, i)
sch_target.axes.append(Axis(axis_name, axis_obj))
code_lines.append("%s = sch[%s].op.axis[%d]" % (axis_name, sch_target.name, i))
def proc_get_reduce_axis(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 5:
# get reduce axis
sch_target = sch_targets[stage_index]
axis_num = args[0]
for i in range(axis_num):
axis_obj = None
if mode == MODE_RUNTIME:
axis_obj = sch[sch_target.obj].op.reduce_axis[i]
axis_name = '%s_reduce_axis_%d' % (sch_target.name, i)
sch_target.axes.append(Axis(axis_name, axis_obj))
code_lines.append("%s = sch[%s].op.reduce_axis[%d]" % (axis_name, sch_target.name, i))
def proc_split(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 6:
# Split by Factor
sch_target = sch_targets[stage_index]
# SplitByFactor args is axis_index and Factor
axis_index = args[0]
factor = args[1]
# delete split axis
proc_axis = sch_target.axes.pop(axis_index)
axis_name = proc_axis.name
axis_obj = proc_axis.obj
outer, inner = None, None
if mode == MODE_RUNTIME:
outer, inner = sch[sch_target.obj].split(axis_obj, factor=factor)
# insert inner then outer
sch_target.axes.insert(axis_index, Axis("%s_i" % axis_name, inner))
sch_target.axes.insert(axis_index, Axis("%s_o" % axis_name, outer))
code_lines.append("%s_o, %s_i = sch[%s].split(%s, factor=%d)" %
(axis_name, axis_name, sch_target.name, axis_name, factor))
def proc_nparts(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 7:
# Split by Nparts
sch_target = sch_targets[stage_index]
# SplitByFactor args is axis index and nparts
axis_index = args[0]
nparts = args[1]
# delete split axis
proc_axis = sch_target.axes.pop(axis_index)
axis_name = proc_axis.name
axis_obj = proc_axis.obj
outer, inner = None, None
if mode == MODE_RUNTIME:
outer, inner = sch[sch_target.obj].split(axis_obj, nparts=nparts)
# insert inner,then outer
sch_target.axes.insert(axis_index, Axis("%s_i" % axis_name, inner))
sch_target.axes.insert(axis_index, Axis("%s_o" % axis_name, outer))
code_lines.append("%s_o, %s_i = sch[%s].split(%s, nparts=%d)" %
(axis_name, axis_name, sch_target.name, axis_name, nparts))
def proc_reorder(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 8:
# Reorder
sch_target = sch_targets[stage_index]
order = args[0]
sch_target.axes = [sch_target.axes[i] for i in order]
if mode == MODE_RUNTIME:
sch[sch_target.obj].reorder(*([axis.obj for axis in sch_target.axes]))
new_order_str = ', '.join([axis.name for axis in sch_target.axes])
code_lines.append("sch[%s].reorder(%s,)" % (sch_target.name, new_order_str))
def proc_compute_at(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 9:
# compute at
sch_target = sch_targets[stage_index]
at_stage_index = args[0]
at_axis_index = args[1]
at_sch_target = sch_targets[at_stage_index]
at_axis = at_sch_target.axes[at_axis_index]
if mode == MODE_RUNTIME:
sch[sch_target.obj].compute_at(sch[at_sch_target.obj], at_axis.obj)
code_lines.append("sch[%s].compute_at(sch[%s], %s)" %
(sch_target.name, at_sch_target.name, at_axis.name))
def proc_fuse(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 15:
sch_target = sch_targets[stage_index]
fuse_axis_idx_list = args[0]
fuse_axis_obj_list = [sch_target.axes[i].obj for i in fuse_axis_idx_list]
fuse_axis_name_list = [sch_target.axes[i].name for i in fuse_axis_idx_list]
axis_type = "axis"
if "reduce_axis" in fuse_axis_name_list[0]:
axis_type = "reduce_axis"
code_lines.append(
"%s_%s_fused_0 = sch[%s].fuse(%s)" %
(sch_target.name, axis_type, sch_target.name, ", ".join(fuse_axis_name_list)))
if mode == MODE_RUNTIME:
fused_axis_obj = sch[sch_target.obj].fuse(*(fuse_axis_obj_list))
fuse_start_idx = min(fuse_axis_idx_list)
for _ in fuse_axis_idx_list:
sch_target.axes.pop(fuse_start_idx)
sch_target.axes.insert(
fuse_start_idx, Axis("%s_%s_fused_0" % (sch_target.name, axis_type),
fused_axis_obj))
def proc_rfactor(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 17:
sch_target = sch_targets[stage_index]
rfactor_axis = sch_target.axes[args[0]]
factor_axis = args[1]
rfactor_name = sch_target.name + "_rfactor"
code_lines.append("%s = sch.rfactor(%s, %s, factor_axis=%s)" %
(rfactor_name, sch_target.name, rfactor_axis.name, factor_axis))
if mode == MODE_RUNTIME:
tensor_rfactor = sch.rfactor(sch_target.obj, rfactor_axis.obj, factor_axis)
if not isinstance(tensor_rfactor, tvm.tensor.Tensor):
tensor_rfactor = tensor_rfactor[0]
sch_targets.insert(stage_index, ScheduleTarget(rfactor_name, tensor_rfactor, []))
def proc_set_scope(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 18:
sch_target = sch_targets[stage_index]
scope_id = int(args[0])
if mode == MODE_RUNTIME:
sch[sch_target.obj].set_scope(SCOPE_DICT[scope_id])
code_lines.append("sch[%s].set_scope('%s')" % (sch_target.name, SCOPE_DICT[scope_id]))
def proc_bind(stage_index, primitive, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 10:
sch_target = sch_targets[stage_index]
bind_axis = sch_target.axes[0]
if mode == MODE_RUNTIME:
block = tvm.thread_axis('blockIdx.x')
sch[sch_target.obj].bind(bind_axis.obj, block)
code_lines.append("block = tvm.thread_axis('blockIdx.x')")
code_lines.append("sch[%s].bind(%s, block)" % (sch_target.name, bind_axis.name))
def proc_pragma(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 16:
# Pragma
sch_target = sch_targets[stage_index]
axis_index = args[0]
pragma_insn_name = INTRIN_MAP[args[1]]
pragma_insn_offset = args[2]
if axis_index[0] == -1:
axis = sch_target.axes[args[0][1]]
else:
axis_index = axis_index[0]
if mode == MODE_RUNTIME:
axis = Axis('sch[%s].op.axis[%d]' % (sch_target.name, axis_index),
sch[sch_target.obj].op.axis[axis_index])
else:
axis = Axis('sch[%s].op.axis[%d]' % (sch_target.name, axis_index), None)
if mode == MODE_RUNTIME:
sch[sch_target.obj].pragma(axis.obj, pragma_insn_name, pragma_insn_offset)
code_lines.append("sch[%s].pragma(%s, '%s', %s)" %
(sch_target.name, axis.name, pragma_insn_name, pragma_insn_offset))
def proc_emit_insn(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 11:
# EmitInsn
sch_target = sch_targets[stage_index]
axis_index = args[0]
intrinsic = INTRIN_MAP[args[1]]
if axis_index[0] == -1:
axis = sch_target.axes[args[0][1]]
else:
axis_index = axis_index[0]
if mode == MODE_RUNTIME:
axis = Axis('sch[%s].op.axis[%d]' % (sch_target.name, axis_index),
sch[sch_target.obj].op.axis[axis_index])
else:
axis = Axis('sch[%s].op.axis[%d]' % (sch_target.name, axis_index), None)
# 生成代码
code_lines.append("sch[%s].emit_insn(%s, '%s')" % (sch_target.name, axis.name, intrinsic))
if mode == MODE_RUNTIME:
sch[sch_target.obj].emit_insn(axis.obj, intrinsic)
def proc_insert_param(primitive, args, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 12:
# broadcast_axis_offset
offset = args[0]
if mode == MODE_RUNTIME:
cce_emitinsn_params.cceEmitParamsIns.del_param('broadcast_axis_offset')
cce_emitinsn_params.cceEmitParamsIns.insert_param('broadcast_axis_offset', offset)
code_lines.append("cce_emitinsn_params.cceEmitParamsIns.del_param('broadcast_axis_offset')")
code_lines.append(
"cce_emitinsn_params.cceEmitParamsIns.insert_param('broadcast_axis_offset', %d)" %
offset)
def proc_storage_align(stage_index, primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 13:
# storage_align args : axis_index and block_num
sch_target = sch_targets[stage_index]
axis_index = args[0]
block_num = args[1]
if mode == MODE_RUNTIME:
axis = Axis('sch[%s].op.axis[%d]' % (sch_target.name, axis_index),
sch[sch_target.obj].op.axis[axis_index])
else:
axis = Axis('sch[%s].op.axis[%d]' % (sch_target.name, axis_index), None)
if mode == MODE_RUNTIME:
sch[sch_target.obj].storage_align(axis.obj, block_num, 0)
code_lines.append("sch[%s].storage_align(%s, %s, 0)" %
(sch_target.name, axis.name, block_num))
def proc_cce_special(primitive, args, sch_targets, sch, mode, code_lines): # pylint: disable=too-many-locals, too-many-arguments
'''
proc_cache_read
:param stage_index:
:param primitive:
:param args:
:param sch_targets:
:param sch:
:param mode:
:param code_lines:
:return:
'''
if primitive == 14:
# cce_special
tensor_list_objs = []
tensor_list_names = []
orign_out_tensor_list_objs = []
orign_out_tensor_list_names = []
real_out_tensor_list_objs = []
real_out_tensor_list_names = []
# general cce_special cheque form is [-1, 14, [], [8], [7]]
# tuple_reduce cce_special cheque form is [-1, 14, [], [[8, 2]], [[7, 2]]]
for arg_index, tmp_tensor_list_index in enumerate(args):
tmp_tensor_list_objs = []
tmp_tensor_list_names = []
for stage_index in tmp_tensor_list_index:
if isinstance(stage_index, list):
tensor_nums = stage_index[1]
stage_index = stage_index[0]
sch_target = sch_targets[stage_index]
stage_name = sch_target.name
for idx in range(tensor_nums):
tmp_tensor_list_objs.append(sch_target.obj.op.output(idx))
if stage_name.endswith('_l'):
tensor_name = "%s_v%s_l" % (stage_name.split('_l')[0], idx)
else:
tensor_name = "%s_v%s" % (stage_name.split('_l')[0], idx)
tmp_tensor_list_names.append(tensor_name)
else:
tmp_tensor_list_objs.append(sch_targets[stage_index].obj.op.output(0))
tmp_tensor_list_names.append(sch_targets[stage_index].name)
if arg_index == 0:
tensor_list_objs = tmp_tensor_list_objs
tensor_list_names = tmp_tensor_list_names
elif arg_index == 1:
orign_out_tensor_list_objs = tmp_tensor_list_objs
orign_out_tensor_list_names = tmp_tensor_list_names
else:
real_out_tensor_list_objs = tmp_tensor_list_objs
real_out_tensor_list_names = tmp_tensor_list_names
if mode == MODE_RUNTIME:
sch.cce_special = dict()
sch.cce_special["tensor_list"] = tensor_list_objs
sch.cce_special["orign_out_tensor"] = orign_out_tensor_list_objs
sch.cce_special["real_out_tensor"] = real_out_tensor_list_objs
code_lines.append("sch.cce_special = dict()")
code_lines.append(
'sch.cce_special["tensor_list"] = %s' % tensor_list_names)
code_lines.append(
'sch.cce_special["orign_out_tensor"] = %s' % orign_out_tensor_list_names)
code_lines.append(
'sch.cce_special["real_out_tensor"] = %s' % real_out_tensor_list_names)
def withdraw(res_list, cheque, mode="runtime"):
'''
withdraw
:param res_list:
:param cheque:
:param mode:
:return:
'''
# firstly create_schedule
code_lines = []
sch = tvm.create_schedule([res.op for res in res_list])
# element in sch_targets List is [Tensor name, Tensor obj, comm axis list, reduce axis list]
sch_targets = []
for stage in sch.stages:
sch_targets.append(ScheduleTarget(stage.op.name, stage.op.output(0), []))
for action in cheque:
stage_index, primitive, *args = action
if primitive not in PRIMITIVE_DICT:
RuntimeError('Invalid primitive: [%s]' % primitive)
proc_cache_read(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_cache_write(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_double_buffer(stage_index, primitive, sch_targets, sch, mode, code_lines)
proc_compute_inline(stage_index, primitive, sch_targets, sch, mode, code_lines)
proc_get_axis(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_get_reduce_axis(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_split(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_nparts(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_reorder(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_compute_at(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_fuse(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_rfactor(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_set_scope(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_bind(stage_index, primitive, sch_targets, sch, mode, code_lines)
proc_pragma(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_emit_insn(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_insert_param(primitive, args, mode, code_lines)
proc_storage_align(stage_index, primitive, args, sch_targets, sch, mode, code_lines)
proc_cce_special(primitive, args, sch_targets, sch, mode, code_lines)
return sch, code_lines
def gen_sch_by_cheque(out_tensors, action_list):
'''
gen_sch_by_cheque
:param out_tensors:
:param action_list:
:return:
'''
try:
sch, _ = withdraw(out_tensors, action_list, MODE_RUNTIME)
return True, sch
except RuntimeError:
return False, None
| [
"jizr@connect.hku.hk"
] | jizr@connect.hku.hk |
d71dbfa190cac97849af580c43abaa0c77ae8fd9 | 34088b8e82bc64a10678a08c03db2732d52f0c1a | /Pinbot/app/vip/models.py | 15266cd21515f80e33ecdec75433cdb6b7417899 | [] | no_license | winghou/myFirstProfile | 757d82f5391f3672e48db4aa5774e26a48a5ecc7 | 8fc5d16de7b6449cba058f4d2459bbb0c8438f77 | refs/heads/master | 2020-05-31T13:42:28.554703 | 2016-03-23T11:30:13 | 2016-03-23T11:30:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,626 | py | # coding: utf-8
import datetime
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
class Product(models.Model):
PRODUCT_STATUS = (
('enable', '允许购买'),
('disable', '限制购买'),
)
product_name = models.CharField(
max_length=40,
verbose_name='产品名称',
)
code_name = models.CharField(
max_length=40,
verbose_name='代码别名',
)
price = models.FloatField(
verbose_name='产品单价'
)
desc = models.CharField(
max_length=200,
verbose_name='产品备注',
blank=True,
null=True,
)
create_time = models.DateTimeField(
auto_now_add=True,
verbose_name='创建时间'
)
status = models.CharField(
max_length=20,
choices=PRODUCT_STATUS,
default='enable',
verbose_name='产品状态',
)
is_show = models.BooleanField(
default=False,
verbose_name='是否显示到前台'
)
def __unicode__(self):
return self.code_name
def __str__(self):
return self.__unicode__()
def get_subject(self):
return self.__unicode__()
def get_detail(self):
return self.desc
def get_price(self):
return self.price
class Meta:
verbose_name = '产品列表'
verbose_name_plural = verbose_name
abstract = True
class PackageItem(Product):
salary_range = models.CharField(
max_length=20,
verbose_name='月薪范围'
)
service_month = models.IntegerField(
default=1,
verbose_name='服务月数'
)
candidate_num = models.IntegerField(
default=1,
verbose_name='候选人数量'
)
feed_count = models.IntegerField(
default=0,
verbose_name='定制数'
)
pinbot_point = models.IntegerField(
default=0,
verbose_name='每周聘点'
)
is_commend = models.BooleanField(
default=False,
verbose_name='是否为推荐套餐'
)
@property
def get_product_type(self):
return 'manual_service'
@property
def get_desc(self):
salary_range = self.salary_range.split(',')
if salary_range[0] == '30' and salary_range[1] == '100':
retur_str = '30k以上, %s个月 ,%s名候选人' % (
self.service_month,
self.candidate_num
)
else:
retur_str = '%sk-%sk, %s个月 ,%s名候选人' % (
salary_range[0],
salary_range[1],
self.service_month,
self.candidate_num
)
return retur_str
def __unicode__(self):
return self.product_name
def __str__(self):
return self.__unicode__()
class Meta:
verbose_name = '人工服务配置'
verbose_name_plural = verbose_name
class PinbotPoint(Product):
num = models.IntegerField(
default=1,
verbose_name='聘点数量'
)
def __unicode__(self):
return self.product_name
def __str__(self):
return self.__unicode__()
@property
def get_product_type(self):
return 'pinbot_point'
@property
def get_desc(self):
return '充值聘点'
class Meta:
verbose_name = '聘点购买配置'
verbose_name_plural = verbose_name
class Coin(Product):
def __unicode__(self):
return self.product_name
def __str__(self):
return self.__unicode__()
@property
def get_product_type(self):
return 'coin'
@property
def get_desc(self):
return '充值金币'
class Meta:
verbose_name = '金币购买配置'
verbose_name_plural = verbose_name
class VipRoleSetting(Product):
vip_name = models.CharField(
max_length=20,
verbose_name='会员名称',
)
feed_count = models.IntegerField(
verbose_name='定制数',
)
pinbot_point = models.IntegerField(
verbose_name='赠送聘点',
)
allow_apply = models.BooleanField(
default=False,
verbose_name='允许申请',
)
agreement = models.BooleanField(
default=False,
verbose_name='需要签订协议',
)
level = models.PositiveIntegerField(
default=1,
verbose_name='会员等级',
)
auto_active = models.BooleanField(
default=False,
verbose_name='自动生效',
)
attract_info = models.CharField(
max_length=80,
default='',
blank=True,
verbose_name='优惠信息',
)
index = models.IntegerField(
default=0,
verbose_name='栏位排序'
)
month_price = models.FloatField(
default=0,
verbose_name='每月价格',
)
service_time = models.IntegerField(
default=3,
verbose_name='服务时间',
)
def __unicode__(self):
return self.vip_name
def __str__(self):
return self.__unicode__()
@property
def get_product_type(self):
return 'self_service'
@property
def get_desc(self):
desc = '{vip_name}, {feed_count}个定制, {pinbot_point}聘点'.format(
vip_name=self.vip_name,
feed_count=self.feed_count,
pinbot_point=self.pinbot_point,
)
return desc
class Meta:
verbose_name = '自助服务配置'
verbose_name_plural = verbose_name
class RenewRecord(models.Model):
'''
自助服务续期
'''
user_vip = models.ForeignKey(
'vip.UserVip',
related_name='renew_records',
verbose_name='自助服务',
)
duration = models.IntegerField(
verbose_name='续期时长',
)
price = models.FloatField(
verbose_name='价格',
)
create_time = models.DateTimeField(
auto_now_add=True,
verbose_name='创建时间',
)
@property
def get_product_type(self):
return 'renew_service'
def __unicode__(self):
return str(self.user_vip_id)
def __str__(self):
return self.__unicode__()
def get_subject(self):
return '自助服务续期'
def get_detail(self):
return '自助服务续期'
class Meta:
verbose_name = '自助服务续期'
verbose_name_plural = verbose_name
class UserVip(models.Model):
APPLY_STATUS_META = (
('applying', '申请中'),
('success', '申请完成'),
)
user = models.ForeignKey(
User,
verbose_name='用户',
related_name='vip_roles',
)
vip_role = models.ForeignKey(
VipRoleSetting,
verbose_name='角色',
related_name='setting_roles',
)
custom_point = models.IntegerField(
default=0,
verbose_name='配置每周点数',
)
custom_feed = models.IntegerField(
default=0,
verbose_name='配置定制数',
)
is_active = models.BooleanField(
default=False,
verbose_name='生效状态',
)
create_time = models.DateTimeField(
auto_now_add=True,
verbose_name='申请时间',
)
active_time = models.DateTimeField(
auto_now_add=True,
verbose_name='生效时间',
)
has_sign = models.BooleanField(
default=False,
verbose_name='协议签订',
)
apply_status = models.CharField(
max_length=20,
choices=APPLY_STATUS_META,
default='applying',
verbose_name='申请状态',
)
expire_time = models.DateTimeField(
default=datetime.datetime.now(),
verbose_name='过期时间',
)
total_price = models.FloatField(
default=0,
verbose_name='总价格',
)
def get_subject(self):
return u'%s会员' % self.vip_role.vip_name
def get_detail(self):
return u'%s会员' % self.vip_role.vip_name
def get_price(self):
return self.vip_role.price
@property
def status(self):
return self.apply_status
@status.setter
def status(self, value):
self.apply_status = value
def __unicode__(self):
return u'%s,%s' % (self.user.username, self.vip_role.vip_name)
def __str__(self):
return self.__unicode__()
@property
def get_product_type(self):
return 'self_service'
@property
def item(self):
return self.vip_role
def apply_vip_user(self):
interface = '''
<div class="btn-group">
<a class="editable-handler"
title=""
data-editable-field="status"
data-editable-loadurl="%s"
data-original-title="输入审核状态"><i class="icon-edit"></i></a>
</div>
<span class="editable-field">操作</span>
''' % reverse('vip-apply-user-vip-form', args=(self.id,))
return mark_safe(interface)
apply_vip_user.short_description = 'vip生效'
def disable_vip_user(self):
interface = '''
<div class="btn-group">
<a class="editable-handler"
title=""
data-editable-field="status"
data-editable-loadurl="%s"
data-original-title="输入审核状态"><i class="icon-edit"></i></a>
</div>
<span class="editable-field">操作</span>
''' % reverse('vip-disable-form', args=(self.id,))
return mark_safe(interface)
disable_vip_user.short_description = '停用'
def admin_order_page(self):
interface = '''
<a href="/vip/order/admin_page/#/usermode_noworry/?username={0}" target="_blank">省心套餐</a>
'''.format(self.user.username)
return mark_safe(interface)
admin_order_page.short_description = '开通套餐'
class Meta:
verbose_name = '自助服务'
verbose_name_plural = verbose_name
class UserOrder(models.Model):
ORDER_STATUS_META = (
('unpay', '进行中'),
('paid', '交易成功'),
('fail', '交易失败'),
('refund', '退款中'),
('cancel_refund', '取消退款'),
('refunded', '退款成功'),
('closed', '已关闭'),
('canceled', '已取消'),
('deleted', '已删除'),
)
PAYMENT_TERMS_META = (
('alipay', '支付宝'),
('weixin', '微信'),
('offline', '线下'),
('coin', '金币支付'),
)
ORDER_TYPE_META = (
(1, '自助服务'),
(2, '人工服务'),
(3, '购买聘点'),
(4, '购买金币'),
(5, '提现'),
(6, '会员申请'),
(7, '续期'),
)
item_content_type = models.ForeignKey(
ContentType,
related_name='order_type',
verbose_name='订单类型',
)
item_object_id = models.PositiveIntegerField(
verbose_name='订单类型id',
)
item = generic.GenericForeignKey(
'item_content_type',
'item_object_id',
)
user = models.ForeignKey(
User,
verbose_name='用户',
related_name='user_orders',
)
order_id = models.CharField(
max_length=30,
verbose_name='订单id',
)
order_status = models.CharField(
choices=ORDER_STATUS_META,
default='unpay',
max_length=30,
verbose_name='订单状态',
)
order_price = models.FloatField(
verbose_name='支付金额',
)
actual_price = models.FloatField(
verbose_name='实际支付',
)
create_time = models.DateTimeField(
auto_now_add=True,
verbose_name='生成时间',
)
pay_time = models.DateTimeField(
auto_now_add=True,
verbose_name='支付时间',
)
payment_terms = models.CharField(
choices=PAYMENT_TERMS_META,
default='alipay',
max_length=30,
verbose_name='支付方式',
)
order_remark = models.CharField(
default='',
max_length=30,
verbose_name='交易备注',
)
order_type = models.IntegerField(
choices=ORDER_TYPE_META,
default=3,
blank=True,
verbose_name='订单类型',
)
order_desc = models.CharField(
max_length=60,
default='',
blank=True,
verbose_name='订单内容',
)
is_insurance = models.BooleanField(
default=False,
verbose_name='入职险'
)
is_delete = models.BooleanField(
default=False,
blank=True,
verbose_name='已删除',
)
def __unicode__(self):
return self.order_id
def __str__(self):
return self.__unicode__()
def subject_name(self):
return self.item.get_subject()
def order_detail(self):
return self.item.get_detail()
def offline_pay(self):
if self.order_status == 'paid':
return mark_safe('已支付')
interface = '''
<div class="btn-group">
<a class="editable-handler"
title=""
data-editable-field="status"
data-editable-loadurl="%s"
data-original-title="输入审核状态"><i class="icon-edit"></i></a>
</div>
<span class="editable-field">操作</span>
''' % reverse('vip-offline-pay-form', args=(self.order_id,))
return mark_safe(interface)
offline_pay.short_description = '离线支付'
def refund(self):
if self.order_status != 'refund':
return ''
interface = '''
<div class="btn-group">
<a class="editable-handler"
title=""
data-editable-field="status"
data-editable-loadurl="%s"
data-original-title="输入审核状态"><i class="icon-edit"></i></a>
</div>
<span class="editable-field">操作</span>
''' % reverse('order-refund-form', args=(self.order_id,))
return mark_safe(interface)
refund.short_description = '退款'
class Meta:
verbose_name = '用户订单'
verbose_name_plural = verbose_name
class Mission(models.Model):
MISSION_TYPE_META = (
('none', '无'),
('add_feed', '添加定制'),
('check_resume', '查看简历'),
)
MISSION_STATUS_META = (
('start', '开始'),
('finish', '已完成'),
)
user = models.ForeignKey(
User,
verbose_name='用户',
related_name='missions',
)
mission_type = models.CharField(
max_length=30,
verbose_name='任务类型',
choices=MISSION_TYPE_META,
)
mission_status = models.CharField(
max_length=30,
verbose_name='任务状态',
choices=MISSION_STATUS_META,
default='start',
)
start_time = models.DateTimeField(
auto_now_add=True,
verbose_name='开始时间',
)
finish_time = models.DateTimeField(
auto_now_add=True,
verbose_name='完成时间',
)
grant_status = models.BooleanField(
default=False,
verbose_name='领奖状态',
)
def __unicode__(self):
return self.user.username
def __str__(self):
return self.__unicode__()
class Meta:
verbose_name = '新手任务'
verbose_name_plural = verbose_name
class WithdrawRecord(models.Model):
'''
用户提现
'''
VERIFY_STATUS_META = (
(0, '进行中'),
(1, '审核成功'),
(2, '审核失败'),
)
user = models.ForeignKey(
User,
verbose_name='用户',
related_name='withdraw_records',
)
create_time = models.DateTimeField(
auto_now_add=True,
verbose_name='创建时间',
db_index=True,
)
verify_status = models.IntegerField(
choices=VERIFY_STATUS_META,
default=0,
verbose_name='审核状态',
)
verify_remark = models.CharField(
default='',
max_length=100,
verbose_name='审核备注',
)
verify_time = models.DateTimeField(
auto_now_add=True,
verbose_name='审核时间',
)
money = models.FloatField(
verbose_name='金额',
)
def get_subject(self):
return u'提现'
@property
def get_product_type(self):
return 'withdraw'
def get_desc(self):
return '金币提现'
def __str__(self):
return u'%s提现%s' % (self.user.username, self.money)
def __unicode__(self):
return self.__str__()
def current_coin(self):
return self.user.pinbotpoint.coin
current_coin.short_description = '当前金币'
def operation(self):
if self.verify_status != 0:
return ''
interface = '''
<div class="btn-group">
<a class="editable-handler"
title=""
data-editable-field="status"
data-editable-loadurl="%s"
data-original-title="输入审核状态"><i class="icon-edit"></i></a>
</div>
<span class="editable-field">操作</span>
''' % reverse('vip-withdraw-form', args=(self.id,))
return mark_safe(interface)
operation.short_description = '操作'
class Meta:
verbose_name = '提现记录'
verbose_name_plural = verbose_name
class UserManualService(models.Model):
PACKAGE_STATUS_META = (
('applying', '申请中'),
('success', '已开通'),
('refund', '退款中'),
('continue', '续期用户'),
('cancel_refund', '取消退款'),
('refunded', '退款成功'),
('closed', '已关闭'),
('canceled', '已取消'),
('deleted', '已删除'),
('expired', '已过期'),
('finished', '已完结'),
)
user = models.ForeignKey(
User,
verbose_name='用户',
related_name='manual_roles',
)
item = models.ForeignKey(
PackageItem,
verbose_name='配置',
related_name='manual_settings',
)
is_active = models.BooleanField(
default=False,
verbose_name='生效状态',
)
has_sign = models.BooleanField(
default=False,
verbose_name='协议签订',
)
create_time = models.DateTimeField(
auto_now_add=True,
verbose_name='申请时间',
)
active_time = models.DateTimeField(
auto_now_add=True,
verbose_name='生效时间',
)
expire_time = models.DateTimeField(
auto_now_add=True,
verbose_name='过期时间',
db_index=True,
)
status = models.CharField(
choices=PACKAGE_STATUS_META,
default='applying',
max_length=30,
verbose_name='套餐状态',
)
is_insurance = models.BooleanField(
default=False,
verbose_name='是否包含入职险'
)
order_price = models.FloatField(
default=0.0,
verbose_name='支付金额',
)
item_records = generic.GenericRelation('vip.ItemRecord')
def get_subject(self):
return u'%s会员' % self.item.code_name
def get_detail(self):
return u'%s会员' % self.item.code_name
def get_price(self):
return self.item.price
def __unicode__(self):
return '%s,%s' % (self.user.username, self.item.code_name)
def apply_vip_user(self):
interface = '''
<div class="btn-group">
<a class="editable-handler"
title=""
data-editable-field="status"
data-editable-loadurl="%s"
data-original-title="输入审核状态"><i class="icon-edit"></i></a>
</div>
<span class="editable-field">操作</span>
''' % reverse('vip-apply-user-manual-service-form', args=(self.id,))
return mark_safe(interface)
apply_vip_user.short_description = '人工服务生效'
def refund(self):
if self.status != 'refund':
return ''
interface = '''
<div class="btn-group">
<a class="editable-handler"
title=""
data-editable-field="status"
data-editable-loadurl="%s"
data-original-title="输入审核状态"><i class="icon-edit"></i></a>
</div>
<span class="editable-field">操作</span>
''' % reverse('vip-refund-manual-service-form', args=(self.id,))
return mark_safe(interface)
refund.short_description = '点击退款'
def finished(self):
interface = '''
<div class="btn-group">
<a class="editable-handler"
title=""
data-editable-field="status"
data-editable-loadurl="%s"
data-original-title="输入审核状态"><i class="icon-edit"></i></a>
</div>
<span class="editable-field">操作</span>
''' % reverse('vip-finished-manual-service-form', args=(self.id,))
return mark_safe(interface)
finished.short_description = '完结服务'
def invalid(self):
interface = '''
<div class="btn-group">
<a class="editable-handler"
title=""
data-editable-field="status"
data-editable-loadurl="%s"
data-original-title="输入审核状态"><i class="icon-edit"></i></a>
</div>
<span class="editable-field">操作</span>
''' % reverse('vip-invalid-manual-service-form', args=(self.id,))
return mark_safe(interface)
invalid.short_description = '强制过期'
@property
def get_product_type(self):
return 'manual_service'
class Meta:
verbose_name = '人工服务'
verbose_name_plural = verbose_name
class ItemRecord(models.Model):
num = models.IntegerField(
default=0,
verbose_name='商品数量'
)
total_price = models.FloatField(
default=0,
verbose_name='商品总价'
)
order = models.ForeignKey(
UserOrder,
verbose_name='订单'
)
item_content_type = models.ForeignKey(
ContentType,
related_name='item_type',
verbose_name='商品类型',
)
item_object_id = models.PositiveIntegerField(
verbose_name='商品id',
)
item = generic.GenericForeignKey(
'item_content_type',
'item_object_id',
)
@classmethod
def fetch_related_item(cls, queryset):
'''
reference:
http://stackoverflow.com/questions/12466945/django-prefetch-related-objects-of-a-genericforeignkey
减少数据库查询次数,自定义fetch_related_item
'''
uservip_ctype = ContentType.objects.get_for_model(UserVip)
manual_service_ctype = ContentType.objects.get_for_model(UserManualService)
item_objects = {}
item_objects[uservip_ctype.id] = UserVip.objects.select_related(
'vip_role'
).in_bulk(
[i.item_object_id for i in queryset if i.item_content_type_id == uservip_ctype.id]
)
item_objects[manual_service_ctype.id] = UserManualService.objects.select_related(
'item'
).in_bulk(
[i.item_object_id for i in queryset if i.item_content_type_id == manual_service_ctype.id]
)
for i in queryset:
i.item_obj = item_objects[i.item_content_type_id][i.item_object_id]
return queryset
class Meta:
verbose_name = '商品购买记录'
verbose_name_plural = verbose_name
| [
"Newfarming@NewfarmingdeMacBook-Pro.local"
] | Newfarming@NewfarmingdeMacBook-Pro.local |
1aa621a701a09656aeb71c5930bc6daca9a9e26d | d7949f5b2075384075fa066d571144bbbe02ffd8 | /supervised/utils/subsample.py | cfd80b0b6af309dc95d037293f409a4fbad068f9 | [
"MIT"
] | permissive | mljar/mljar-supervised | 57fb56b05b1a53ea979bf9cb9b127f314853bdbd | 6722eb1e6441c11990f2aed01a444ddcae478c09 | refs/heads/master | 2023-08-30T23:48:28.692945 | 2023-08-28T15:09:39 | 2023-08-28T15:09:39 | 156,218,203 | 2,759 | 388 | MIT | 2023-08-28T10:24:12 | 2018-11-05T12:58:04 | Python | UTF-8 | Python | false | false | 463 | py | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from supervised.algorithms.registry import REGRESSION
def subsample(X, y, ml_task, train_size):
shuffle = True
stratify = None
if ml_task != REGRESSION:
stratify = y
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=train_size, shuffle=shuffle, stratify=stratify
)
return X_train, X_test, y_train, y_test
| [
"pplonski86@gmail.com"
] | pplonski86@gmail.com |
36b154704e3c1462f72659cfa2cda1ae2a75c817 | 78520f19165b33909364299aaaea2283b8aa2367 | /keywords/elif_kwd.py | 9560d07296baca68083aaac8fc0abe480d2b3939 | [
"BSD-2-Clause"
] | permissive | s3n0/Python-Course | 3d3618b97c7d4d9bbe7c3987d2c329203251029b | d48568d096e9a78e397eefd83b2588ddd27aa481 | refs/heads/master | 2020-09-09T00:51:28.874296 | 2019-09-29T17:50:48 | 2019-09-29T17:50:48 | 221,294,533 | 0 | 1 | null | 2019-11-12T19:23:11 | 2019-11-12T19:23:10 | null | UTF-8 | Python | false | false | 218 | py | #!/usr/bin/python3
# elif_kwd.py
name = "Luke"
if name == "Jack":
print ("Hello Jack!")
elif name == "John":
print ("Hello John!")
elif name == "Luke":
print ("Hello Luke!")
else:
print ("Hello there!")
| [
"noreply@github.com"
] | s3n0.noreply@github.com |
c9a06a45ccc50918208dc3b38d5f8f81ece849f5 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/playground/fatih/xorg/xorg-video-imstt/actions.py | 77b159e19b6e28768b169754190e63b78bbf5007 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import get
WorkDir = "xf86-video-imstt-%s" % get.srcVERSION()
def setup():
autotools.configure()
def build():
autotools.make()
def install():
autotools.install()
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
919333992dfe9e94cf1dc40447cfe0e90db3d328 | bd696223aaf5404987df11832b4c17c916b9690f | /rec_sample/gaussian_process_regressor_numeric_rating/gaussian_process_regressor_numeric_rating/main.py | 2d0d88d6ab609827bf79b3e34be5853efebdce0f | [] | no_license | wararaki718/scrapbox3 | 000a285477f25c1e8a4b6017b6ad06c76f173342 | 9be5dc879a33a1988d9f6611307c499eec125dc2 | refs/heads/master | 2023-06-16T08:46:32.879231 | 2021-07-17T14:12:54 | 2021-07-17T14:12:54 | 280,590,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | import numpy as np
from sklearn.datasets import load_wine
from sklearn.gaussian_process import GaussianProcessRegressor
def main():
wine = load_wine()
X = wine.data
y = wine.target
gpr = GaussianProcessRegressor()
gpr.fit(X, y)
for i in np.random.choice(X.shape[0], 10):
x = X[i, :]
print(f'real:{y[i]}, predict: {gpr.predict([x])}')
print('DONE')
if __name__ == '__main__':
main()
| [
"ky7.ott.w@gmail.com"
] | ky7.ott.w@gmail.com |
eff38dcf60b38fcc1037d870b04197c61add0189 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_Lag1Trend_Seasonal_MonthOfYear_SVR.py | bf6738ef2f81d7475f6a83e95b8def2231eab79b | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 160 | py | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['Lag1Trend'] , ['Seasonal_MonthOfYear'] , ['SVR'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
b512a1038df77f42373157c2e38de6ded09715d8 | d52522a713d4e0522c22692e05948be897c4339b | /constants/i18n/greetings.py | 3cbd99d5b4f8e25b26351d8b69f240a5710c19dc | [
"MIT"
] | permissive | frankwrk/django-htk | de52a9132f494845ed9c3cb19a9e81e22f9a57a3 | fa9c6fe18d8651e4b96f036429169d741a1f2fe0 | refs/heads/master | 2023-04-21T10:37:57.563298 | 2019-03-04T10:47:49 | 2019-03-04T10:47:49 | 173,731,278 | 0 | 0 | MIT | 2023-04-03T23:23:47 | 2019-03-04T11:21:42 | Python | UTF-8 | Python | false | false | 844 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
I18N_GREETINGS = {
'ar' : [
u'مرحبا',
],
'el' : [
u'Χαίρετε',
],
'en' : [
u'Greetings',
u'Hello',
],
'eo' : [
u'saluton',
],
'es' : [
u'Hola',
u'Como estas',
],
'fr' : [
u'Salut',
],
'haw' : [
u'Aloha',
],
'he' : [
u'שלום',
],
'hi' : [
u'नमस्ते'
],
'it' : [
u'Ciao',
],
'ja' : [
u'こんにちは',
],
'ko' : [
u'안녕하세요',
],
'mn' : [
u'Сайн уу',
],
'nl' : [
u'Hallo',
],
'ru' : [
u'Здравствуйте',
],
'vi' : [
u'chào bạn',
],
'zh' : [
u'你好',
],
}
| [
"hello@jontsai.com"
] | hello@jontsai.com |
67a9ab339d0c77fe6b902946ddb037814635bb58 | d43c1974de5ef60a85d0e8af648f7d1546c1b5c3 | /exceptions.py | 8525e10d81204d53fb7030613c82e3949c24330c | [] | no_license | Hermotimos/Learning | 7168146b1ba80827997a895716c645dda57a47d7 | 7c5453279a43e9a15c66a1cf925aa9c05c820224 | refs/heads/master | 2023-08-12T15:42:09.043657 | 2023-04-01T09:36:21 | 2023-04-01T09:36:21 | 180,561,558 | 0 | 0 | null | 2023-07-25T21:26:23 | 2019-04-10T10:50:22 | Python | UTF-8 | Python | false | false | 3,366 | py | """
This file is for learning and exercise purposes.
Topics:
- exceptions: syntax and catching
- examples: ZeroDivisionError, AssertionError
Sources:
https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-0001-introduction-to-computer-science-and-programming-in-python-fall-2016/lecture-videos/lecture-7-testing-debugging-exceptions-and-assertions/
"""
############################################################################################
# 1)
def get_ratios(list1, list2):
ratios = []
for index in range(len(list1)):
try:
ratios.append(list1[index] / list2[index])
except ZeroDivisionError:
ratios.append(float('nan'))
except:
raise ValueError('get_ratios called with wrong argument(s)')
return ratios
lista1 = [0, 1, 2, 3, 4, 5]
lista2 = [0, 4, 4, 4, 4, 4]
print(get_ratios(lista1, lista2))
# special float value 'nan' - to secure coherence in the list (only floats)
print(type(float('nan')))
print('#'*30)
############################################################################################
# 2)
def yearly_scores_with_avg(scores):
list_with_scores = []
for elem in scores:
list_with_scores.append([elem[0], elem[1], round(avg(elem[1]), 2)])
return list_with_scores
def avg(list):
try:
return sum(list)/len(list)
except ZeroDivisionError:
print("Warning: some students have no results data (indicated by 0 as average score)")
return 0.0
test_grades = [[['Peter', 'Parker'], [80.0, 70.0, 85.0]],
[['Bruce', 'Wayne'], [100.0, 80.0, 74.0]],
[['Clint', 'Eastwood'], [25.0, 80.0, 85.0]],
[['Mr.', 'Nobody'], []],
[['Clint', 'Westwood'], [25.0, 82.0, 85.0]]]
print(test_grades)
print(yearly_scores_with_avg(test_grades))
print('#'*30)
############################################################################################
# 3) ASSERTIONS:
def yearly_scores_with_avg(scores):
list_with_scores = []
for elem in scores:
list_with_scores.append([elem[0], elem[1], round(avg(elem[1]), 2)])
return list_with_scores
def avg(list):
try:
assert len(list) > 0 # here comes assertion
return sum(list)/len(list)
except AssertionError: # here comes assertion handling
print("Warning: some students have no results data (indicated by 0 as average score)")
return 0.0
except Exception as exception1: # handling of other exceptions
print(f"An error occured: {exception1}")
test_grades = [[['Peter', 'Parker'], [80.0, 70.0, 85.0]],
[['Bruce', 'Wayne'], [100.0, 80.0, 74.0]],
[['Clint', 'Eastwood'], [25.0, 80.0, 85.0]],
[['Mr.', 'Nobody'], []],
[['Clint', 'Westwood'], [25.0, 82.0, 85.0]]]
print(test_grades)
print(yearly_scores_with_avg(test_grades))
print()
# ZADANIE [my version]
def celcius_to_kelvin(temp):
try:
assert temp >= -273.15
temp += 273.15
return temp
except AssertionError:
return "Wrong temperature given: lower than absolute zero !"
print(celcius_to_kelvin(20))
print(celcius_to_kelvin(-400))
| [
"lukas.kozicki@gmail.com"
] | lukas.kozicki@gmail.com |
a50c477311cf2c7d396443b3959bdca2fd9644de | 0feb9799532328d2eb5c9673751bf44a06652375 | /ethics/russellnorvig.py | 1391b1d8e488edda475086428c8fd190f4e9c81b | [] | no_license | krishnakatyal/philosophy | ebc78947508f12a9d06356d2cc8d38f6afb0510a | f7735e9adc9ba609894d89384562dbda2f794548 | refs/heads/master | 2022-03-28T14:00:52.460599 | 2020-01-25T00:28:55 | 2020-01-25T00:28:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py |
"""
An artificial intelligence (AI) approach to ethics can embrace many different
ideas from philosophy. In "Artificial Intelligence: A Modern Approach," (known as AIMA)
computer scientists Peter Norvig and Stuart Russell believe we can create goals for AI
to act rationally. Russell asks "What is AI?" as "What is intelligence?" to identify intelligence
closely tied with rationality. Intelligent agents can take percepts as input and act
based upon them. We can create performance measures by calculating V the expected utility
according to the performance measure U of the agent function f that operates on E:
f_opt = max V(f, E, U)
"""
def fopt(V, E, U):
"""
Maximize our function fopt by maximizing expected utility V in the corresponding environment E
with some performance measure U.
"""
return max(V(f0, E, U))
| [
"shussainather@gmail.com"
] | shussainather@gmail.com |
2de8fd5dc49632454858701d0eb25dff5269111b | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2595/49687/317028.py | 83ccacc93c888f73a52a2032b57d71c47b87f5fa | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | listN = []
listK = []
n = int(input())
for i in range(n):
inp = input().split()
listN.append(int(inp[0]))
listK.append(int(inp[1]))
for i in range(n):
N = listN[i]
K = listK[i]
print(K**(N-1))
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
28eaa1e1e8d6cfee324f75c131cf883a1ef3d182 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/vse-naloge-brez-testov/DN12-M-044.py | b8cbd3d76ee516ba832cb8dba72e35c8674aa25d | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,268 | py | def preberi(ime_datoteke):
a = dict ()
b = dict ()
with open (ime_datoteke) as g:
vrstica = g.readline ()
count = 1
while vrstica:
a[count] = []
b[count] = []
for e in vrstica.strip ().split():
e = int ( e )
a[count].append ( e )
for d in a.items ():
e, k = d
s = []
x = 1000000
for i in k:
if i < x:
x = i
index = k.index ( x )
for element in k[index:]:
s.append ( element )
for e in k[0:index]:
s.append ( e )
b[count] = s
vrstica = g.readline ()
count += 1
return b
def mozna_pot(pot, zemljevid):
zk = []
for d in zemljevid.items ():
e, k = d
if len ( k ) == 1:
zk.append ( e )
i = 0
if pot[i] in zk and pot[-1] in zk:
i += 1
while (i < len ( pot ) - 1):
if pot[i] not in zk:
if pot[i] != pot[i + 1]:
i += 1
else:
return False
break
else:
return False
break
for i in range ( 1, len ( pot ) ):
if pot[i] not in zemljevid[pot[i - 1]]:
return False
return True
else:
return False
def hamiltonova(pot, zemljevid):
zk = []
for i in zemljevid:
if len(zemljevid[i]) == 1:
zk.append(i)
if len(pot) > 1:
if pot[0] not in zk:
return False
if pot[-1] not in zk:
return False
if len(zemljevid)-len(zk)+2 != len(pot):
return False
for i in range(0, len(pot)):
for j in range(i, len(pot)):
if i != j:
if pot[i] == pot[j]:
return False
for i in range(1, len(pot)):
if pot[i] not in zemljevid[pot[i-1]]:
return False
return True
| [
"benjamin.fele@gmail.com"
] | benjamin.fele@gmail.com |
633ef5cfdfe205fc64d22d17d19aa76fd7270d9e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03910/s208268903.py | 9205fa08e81866830a16082ced5475d645ca65f1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | N=int(input())
l=0
r=0
for i in range(1,N+1):
s=i*(i+1)//2
if s>=N:
l=i
r=s-N
break
ans=list(range(1,l+1))
if r!=0:
ans.remove(r)
print(*ans,sep='\n') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1786fbc05af84e705fd1009dcb2cfb8ad667abcb | c8ef568cd88a82459fca9d7ef2ca298763ef75e1 | /04. picamera/01. Basic/ex01.py | 2a3f840788fd062d2b7c58d3ee373655be8da213 | [] | no_license | caniro/multicampus-iot-raspberrypi | 8017711ebe4f9e9a7954649333c8106727b4ff86 | b870b25b6386c5e7954b0cdb1f966a6db89e61fd | refs/heads/main | 2023-08-12T21:36:28.621846 | 2021-09-27T08:56:40 | 2021-09-27T08:56:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | # 파이 카메라 인식 테스트 및 미리보기
from picamera import PiCamera
from time import sleep
camera = PiCamera()
#camera.rotation = 180 # 180도 회전
print(camera.resolution)
camera.start_preview()
# camera.start_preview(alpha=200) # 투명도 : 0 ~ 255
sleep(1)
camera.stop_preview()
| [
"caniro@naver.com"
] | caniro@naver.com |
a4908a97af26ea234e6156e003de1a6e3a6b89a8 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/quest/BlinkingArrows.py | 534b1179b0efd1a72384597e1a99a5cfb1466f4b | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 2,996 | py | from direct.interval.IntervalGlobal import *
from pandac.PandaModules import *
class BlinkingArrows:
def __init__(self, parent=aspect2d, otherNode=None):
self.arrow1 = loader.loadModel('phase_3/models/props/arrow')
self.arrow2 = loader.loadModel('phase_3/models/props/arrow')
self.arrowTrack = None
self.parent = parent
# The otherNode is an optional node that can flash along with the
# arrows.
self.otherNode = otherNode
def delete(self):
self.arrowsOff()
self.arrow1.removeNode()
self.arrow2.removeNode()
del self.arrow1
del self.arrow2
def arrowsOn(self, x1, y1, h1, x2, y2, h2, onTime=0.75, offTime=0.75):
self.stopArrowsFlashing()
self.arrow1.setBin('gui-popup', 0)
self.arrow2.setBin('gui-popup', 0)
self.arrow1.reparentTo(self.parent)
self.arrow2.reparentTo(self.parent)
self.arrow1.setScale(0.2)
self.arrow2.setScale(0.2)
self.arrow1.setPos(x1, 0, y1)
self.arrow2.setPos(x2, 0, y2)
self.arrow1.setR(h1)
self.arrow2.setR(h2)
self.onTime = onTime
self.offTime = offTime
self.startArrowsFlashing()
def arrowsOff(self):
self.stopArrowsFlashing()
self.arrow1.reparentTo(hidden)
self.arrow2.reparentTo(hidden)
def startArrowsFlashing(self):
onColor = Vec4(1,1,1,1)
offColor = Vec4(1,1,1,0.25)
self.arrow1.show()
self.arrow2.show()
if self.otherNode:
self.otherNode.show()
self.arrowTrack = Sequence(
Parallel(
self.arrow1.colorScaleInterval(self.onTime, onColor, offColor),
self.arrow2.colorScaleInterval(self.onTime, onColor, offColor),
self.otherNode.colorScaleInterval(self.onTime, onColor, offColor),
),
Parallel(
self.arrow1.colorScaleInterval(self.offTime, offColor, onColor),
self.arrow2.colorScaleInterval(self.offTime, offColor, onColor),
self.otherNode.colorScaleInterval(self.offTime, offColor, onColor),
),
)
else:
self.arrowTrack = Sequence(
Parallel(
self.arrow1.colorScaleInterval(self.onTime, onColor, offColor),
self.arrow2.colorScaleInterval(self.onTime, onColor, offColor),
),
Parallel(
self.arrow1.colorScaleInterval(self.offTime, offColor, onColor),
self.arrow2.colorScaleInterval(self.offTime, offColor, onColor),
),
)
self.arrowTrack.loop()
def stopArrowsFlashing(self):
if self.arrowTrack:
self.arrowTrack.finish()
self.arrowTrack = None
self.arrow1.hide()
self.arrow2.hide()
if self.otherNode:
self.otherNode.hide()
| [
"66761962+satire6@users.noreply.github.com"
] | 66761962+satire6@users.noreply.github.com |
e02026690c4a2ea039bfc824c17165f8b40c88c6 | e5a52968a86946c4839b64d218cb25f4a91e5ee4 | /ml_project/enities/__init__.py | 692cab33e78b4a63c6e7f83b77aa4ca9d7921440 | [] | no_license | made-ml-in-prod-2021/MaksM89 | 1a6f40c66de671dca2345e1b44051c01d166e2d8 | c00a04b6f77f682e5ff419c0afc4c1ea4669deed | refs/heads/main | 2023-06-07T13:27:14.532934 | 2021-06-24T08:33:08 | 2021-06-24T08:33:08 | 354,295,345 | 1 | 0 | null | 2021-06-24T08:33:09 | 2021-04-03T13:10:38 | Jupyter Notebook | UTF-8 | Python | false | false | 442 | py | from .data_params import Features, InputDataset, SplittingParams
from .train_params import TrainingParams
from .train_pipeline_params import (
read_training_pipeline_params,
TrainingPipelineParamsSchema,
TrainingPipelineParams,
)
__all__ = [
"Features",
"InputDataset",
"SplittingParams",
"TrainingPipelineParams",
"TrainingPipelineParamsSchema",
"TrainingParams",
"read_training_pipeline_params",
]
| [
"noreply@github.com"
] | made-ml-in-prod-2021.noreply@github.com |
c10fa354b6592ecbb7c64daa0fb6e6f00b1a9cc6 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-mrsp.0/mrsp_ut=3.5_rd=0.65_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=18/sched.py | cccef8ab09306debebd37b70d423b713008846dd | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | -S 0 -X RUN -Q 0 -L 3 102 400
-S 0 -X RUN -Q 0 -L 3 64 300
-S 0 -X RUN -Q 0 -L 3 61 200
-S 1 -X RUN -Q 1 -L 2 57 300
-S 1 -X RUN -Q 1 -L 2 52 175
-S 1 -X RUN -Q 1 -L 2 42 200
-S 3 -X RUN -Q 2 -L 1 41 200
-S 3 -X RUN -Q 2 -L 1 37 175
-S 3 -X RUN -Q 2 -L 1 33 200
-S 2 -X RUN -Q 3 -L 1 32 100
-S 2 -X RUN -Q 3 -L 1 32 200
-S 2 -X RUN -Q 3 -L 1 30 125
-S 4 25 300
-S 4 25 300
-S 4 25 175
-S 4 23 125
-S 4 18 150
-S 4 11 100
-S 4 1 300
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
8c647b3ff310fdfcadf078532d7ada9211a25871 | f338eb32c45d8d5d002a84798a7df7bb0403b3c4 | /Calibration/EcalCalibAlgos/python/electronRecalibSCAssociator_cfi.py | a80848d2096bba57fd6a045c88013391ba722a56 | [] | permissive | wouf/cmssw | 0a8a8016e6bebc611f1277379e12bef130464afb | 60da16aec83a0fc016cca9e2a5ed0768ba3b161c | refs/heads/CMSSW_7_3_X | 2022-06-30T04:35:45.380754 | 2015-05-08T17:40:17 | 2015-05-08T17:40:17 | 463,028,972 | 0 | 0 | Apache-2.0 | 2022-02-24T06:05:30 | 2022-02-24T06:05:26 | null | UTF-8 | Python | false | false | 465 | py | import FWCore.ParameterSet.Config as cms
electronRecalibSCAssociator = cms.EDProducer("ElectronRecalibSuperClusterAssociator",
electronCollection = cms.string(''),
scIslandCollection = cms.string('IslandEndcapRecalibSC'),
scIslandProducer = cms.string('correctedIslandEndcapSuperClusters'),
scProducer = cms.string('correctedHybridSuperClusters'),
electronProducer = cms.string('electronFilter'),
scCollection = cms.string('recalibSC')
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
14d500c1c4aae9c78ce481e73ff595c4ecac06f5 | d424bb5aef62c9bf07319a26cebc4f14433f927d | /ganji/GraphMaker/sample_bar_chart.py | 501426d9d6cf28e2227047ea2bc5304f53b4fc51 | [] | no_license | adorn331/CrawlerToy | d840104610ae3f8b51ddf5e8cb604573c626cc3b | e51ffd2785c3c22e934390a555257314ae6ef858 | refs/heads/master | 2021-07-05T18:15:29.892398 | 2017-09-24T14:12:08 | 2017-09-24T14:12:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | #统计北京某个区各种类目前三名的柱状图
import pymongo
import charts
conn = pymongo.MongoClient('localhost', 27017)
ganji_db = conn['ganji']
data_collection = ganji_db['sample']
areas = list(set(i['area'][0] for i in data_collection.find() if i['area']))
print(areas) #查看所有区,选一个柱状图
area = '朝阳' #选中了朝阳区,画图
pipeline = [
{'$match':{ 'area' :area}}, #area只要在'area'这个list数据项里面就会被match
{'$group':{'_id':'$cates', 'counts':{'$sum':1}}},
#'avg_price':{'$avg':'$price'} 除了实现统计个数还可以取它另一个字段的平均值
{'$sort':{'counts':-1}},
{'$limit':3}
]
# for i in data_collection.aggregate(pipeline):
# print(i)
series = [{
'name': i['_id'],
'data':[i['counts']],
'type':'column'
} for i in data_collection.aggregate(pipeline)]
charts.plot(series, show='inline') | [
"="
] | = |
1026a750eaee8be82b50bdae67d4dda6f5751fb1 | a8e095cfb21beef091870e5373b53f02bed66c25 | /arttest_25051/wsgi.py | 6a3b618e2ecf14289ccbc64fe8c07065a8a6d7a9 | [] | no_license | crowdbotics-apps/arttest-25051 | 910f5b636d17e66956ac3e2451854495477e7f21 | 14d408f68fa0cb4ef07d12a911b3eacd38395351 | refs/heads/master | 2023-03-20T20:52:42.115848 | 2021-03-16T15:33:18 | 2021-03-16T15:33:18 | 348,400,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for arttest_25051 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'arttest_25051.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
0042178b6dbbb24ce80e628b46cffd655a787f57 | 76718066cbc971b83d1d633cad9daac52ad3ec50 | /src/truverifi/_compat.py | 4e02e40734d0f0f75a0c14495eeea428a0df1a6e | [
"MIT"
] | permissive | achillesrasquinha/truverifi | 594366622c535faefdc25b8ef2dabdbe3523c733 | 4e1b3760a9744b44a86ec2dfaff6714680c8b78c | refs/heads/master | 2020-05-18T05:13:06.665240 | 2019-04-30T05:57:10 | 2019-04-30T05:57:10 | 184,199,229 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # imports - standard imports
import sys
PYTHON_VERSION = sys.version_info
def _is_python_version(*args, **kwargs):
major = kwargs.get("major", None)
minor = kwargs.get("minor", None)
patch = kwargs.get("patch", None)
result = True
if major:
result = result and major == PYTHON_VERSION.major
if minor:
result = result and minor == PYTHON_VERSION.minor
if patch:
result = result and patch == PYTHON_VERSION.micro
return result
PY2 = _is_python_version(major = 2)
if PY2:
# Add your Python 2 imports here.
from urlparse import urljoin
else:
# Add your Python 3 imports here.
from urllib.parse import urljoin | [
"achillesrasquinha@gmail.com"
] | achillesrasquinha@gmail.com |
7c738fca51ec5ae686b8427e6283007ca86b8fe3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_reconsidered.py | c4e21782a16ada2154592bfc1620661ed878cbaa | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
#calss header
class _RECONSIDERED():
def __init__(self,):
self.name = "RECONSIDERED"
self.definitions = reconsider
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['reconsider']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
b042807be384a59317acdd5cc345a25db9bcb91d | 779c7d032eb8d5a4421b8b236c9004559b70756d | /apps/guide/views.py | d0c5fdb435ccd749ccf739f768d9dd442ab5c698 | [] | no_license | corincerami/opus | 727e91a461a6488f2bc263ca6c98a27a93424228 | 281f246ff5bd703a009ab3bad6271249e0e00bff | refs/heads/master | 2022-11-11T13:46:06.317320 | 2018-01-11T00:33:54 | 2018-01-11T00:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | from guide.models import *
from django.shortcuts import render
from django.http import HttpResponse,Http404
from metrics.views import update_metrics
def guide(request):
update_metrics(request)
base_url = 'http://' +request.META['HTTP_HOST'] + '/opus/'
groups = Group.objects.all()
resources = Resource.objects.filter(display=True).select_related().order_by('disp_order')
return render(request, 'guide.html', locals())
# def update(request)
| [
"lballard.cat@gmail.com"
] | lballard.cat@gmail.com |
d01735590a12438f621f7f2c8fc404a7731f5110 | 1a166165ab8287d01cbb377a13efdb5eff5dfef0 | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations/_express_route_cross_connection_peerings_operations.py | aea084ed7fdb4f9083151e60941914c2209adfea | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | manoj0806/azure-sdk-for-python | 7a14b202ff80f528abd068bf50334e91001a9686 | aab999792db1132232b2f297c76800590a901142 | refs/heads/master | 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 | MIT | 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null | UTF-8 | Python | false | false | 22,377 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCrossConnectionPeeringsOperations:
"""ExpressRouteCrossConnectionPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
cross_connection_name: str,
**kwargs
) -> AsyncIterable["_models.ExpressRouteCrossConnectionPeeringList"]:
"""Gets all peerings in a specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionPeeringList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCrossConnectionPeeringList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeeringList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeeringList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified peering from the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> "_models.ExpressRouteCrossConnectionPeering":
"""Gets the specified peering for the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnectionPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCrossConnectionPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCrossConnectionPeering",
**kwargs
) -> "_models.ExpressRouteCrossConnectionPeering":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCrossConnectionPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCrossConnectionPeering",
**kwargs
) -> AsyncLROPoller["_models.ExpressRouteCrossConnectionPeering"]:
"""Creates or updates a peering in the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update
ExpressRouteCrossConnection peering operation.
:type peering_parameters: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCrossConnectionPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnectionPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCrossConnectionPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
| [
"noreply@github.com"
] | manoj0806.noreply@github.com |
23775dbc49c80065be3ec06e8bca9463ab4b509e | 08ff1c20de52e779f687136839cff5e9e0510d22 | /numpy/37.py | b73d41d440abaf8d6e7860184ae721234dcc8acd | [] | no_license | Xman145/altanml | e5476ee7398fba265e60eb17637d8f2ade46b7c0 | a4116dcc73a16bbf5f866c6bf59b9d0cd7a62330 | refs/heads/master | 2022-05-04T18:03:39.093407 | 2019-03-23T09:37:34 | 2019-03-23T09:37:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | import numpy as np
Z = np.zeros((5,5))
Z += np.arange(5)
print(Z) | [
"you@example.com"
] | you@example.com |
0941270ae04ce144a75e7713ef41b05f253c564e | a74b980fd95d5d810315f181449fc9d1710e6923 | /savecode/pythonpackages/commonbaby/proxy/proxydb.py | 33d4923fcb83ada3ef5947cea1c36f4183d09eb1 | [
"Apache-2.0"
] | permissive | cbbbbbbbb/sspywork | b70f5539203b47b21eec2f0514ddca155affc2b8 | 8f05a6b91fc205960edd57f9076facec04f49a1a | refs/heads/master | 2023-03-22T19:45:13.024076 | 2021-03-08T01:24:21 | 2021-03-08T01:24:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,918 | py | """proxy sqlite db"""
# -*- coding:utf-8 -*-
import sqlite3
import threading
import time
import traceback
from abc import ABCMeta, abstractmethod
from ..mslog.loghook import LogHook
from ..mslog.msloglevel import MsLogLevel, MsLogLevels
from ..sql import (SqlCondition, SqlConditions, SqlConn, SqliteColumn,
SqliteConn, SqliteIndex, SqliteMemoryDB, SqliteTable,
table_locker, table_locker_manual)
from .eproxyanonymity import EProxyAnonymity
from .eproxytype import EProxyType
from .proxydbconfig import ProxyDbConfig
from .proxyitem import ProxyItem
class DbSqliteBase:
"""表示一个sqlite表操作基类"""
__metaclass = ABCMeta
__all_tablenames: dict = {}
__all_tablenames_locker = threading.RLock()
def __init__(self,
dbname: str,
dbcfg: ProxyDbConfig,
logger_hook: callable = None):
self._logger: LogHook = LogHook(logger_hook)
if not isinstance(dbname, str):
raise Exception(
"Invalid table name for TbSqliteBase: {}".format(dbname))
with DbSqliteBase.__all_tablenames_locker:
if DbSqliteBase.__all_tablenames.__contains__(dbname):
raise Exception(
"Reduplicated table name for TbSqliteBase: {}".format(
dbname))
if not isinstance(dbcfg, ProxyDbConfig):
dbcfg = ProxyDbConfig()
self._logger.debug("Proxydb config is None, use default settings.")
self._dbname: str = dbname
self._dbconfig: ProxyDbConfig = dbcfg
self._conn_mngr: SqliteMemoryDB = SqliteMemoryDB(
dbname='{}.db'.format(self._dbname),
pagesize=self._dbconfig._pagesize,
max_page_count=self._dbconfig._maxpagecount,
connecttimeoutsec=self._dbconfig._connecttimeoutsec,
delete_on_error=self._dbconfig._delete_on_error,
)
self._append_tables()
def _dict_factory(self, cursor, row):
"""dict factory for sqlite data rows"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def _text_factory(self, x):
"""text factory"""
if x is None:
return ''
res = self._try_decode(x, 'utf-8')
if res is None:
res = self._try_decode(x, 'gb2312')
if res is None:
res = self._try_decode(x, 'gbk')
if res is None:
res = self._try_decode(x, 'unicode')
if res is None:
raise Exception("decode failed:" + x)
def _try_decode(self, s: bytes, charset: str):
try:
if charset is None or charset == "":
raise ValueError("charset is empty")
if s is None:
return ''
return s.decode(charset)
except Exception:
return None
@abstractmethod
def _append_tables(self):
"""子类实现时,需要向当前tablesqlitebase对象中添加表,以执行自动建表\n
例: self._conn_mngr._append_tables(SqliteTable(xxxx))"""
raise NotImplementedError()
def connect(self, timeout=None) -> SqlConn:
"""获取sqlite可用于增删改的链接"""
conn = None
try:
conn = self._conn_mngr.conn
# conn.cursor.row_factory = sqlite3.Cursor.row_factory
except Exception as ex:
if not conn is None and not conn._conn_closed:
conn.close()
raise ex
return conn
def execute_search_one(self,
tablename: str,
sql: str,
params: tuple = None) -> sqlite3.Row:
"""执行增删改这种修改操作\n
tablename: 必传,用于表加锁"""
if not isinstance(tablename, str) or tablename == "":
raise Exception("Must pass 'tablename' param")
if not isinstance(sql, str) or sql == "":
return False
conn: SqliteConn = None
cursor = None
res = None
try:
with table_locker_manual(tablename):
conn = self.connect()
cursor = conn.cursor_new
# cursor.execute(sql)
if not isinstance(params, tuple) or len(params) < 1:
cursor.execute(sql)
else:
cursor.execute(sql, params)
result = cursor.fetchall()
if len(result) < 1:
return res
fields: dict = {}
for i in range(len(result[0])):
fields[cursor.description[i][0].lower()] = result[0][i]
return fields
except Exception:
self._logger.error(
"Get client status error: %s" % traceback.format_exc())
finally:
if not conn is None:
conn.close()
return res
def execute_search_all(self,
tablename: str,
sql: str,
return_with_conn: bool = False,
params: tuple = None) -> iter:
"""执行增删改这种修改操作\n
return_with_conn: 是否将结果与对应的数据库链接一并返回,默认为False"""
if not isinstance(tablename, str) or tablename == "":
raise Exception("Must pass 'tablename' param")
if not isinstance(sql, str) or sql == "":
return False
conn: SqliteConn = None
cursor = None
try:
try:
with table_locker_manual(tablename):
conn = self.connect()
cursor = conn.cursor_new
if not isinstance(params, tuple) or len(params) < 1:
cursor.execute(sql)
else:
cursor.execute(sql, params)
# result = cursor.fetchall()
# for row in result:
# if return_with_conn:
# yield (row, conn)
# else:
# yield row
result = cursor.fetchall()
if result is None or len(result) < 1:
return
fields: dict = {}
for i in range(len(result[0])):
fields[cursor.description[i][0].lower()] = result[0][i]
yield fields
except Exception as ex:
raise ex
finally:
if not conn is None:
conn.close()
except Exception:
self._logger.error(
"Get client status error: %s" % traceback.format_exc())
def execute(self, tablename: str, sql: str, params: tuple = None) -> int:
"""执行操作,返回execute()的结果对象"""
if not isinstance(tablename, str) or tablename == "":
raise Exception("Must pass 'tablename' param")
if not isinstance(sql, str) or sql == "":
return False
res: int = 0
conn: SqliteConn = None
cursor = None
try:
with table_locker_manual(tablename):
conn = self.connect()
cursor = conn.cursor_new
if not isinstance(params, tuple) or len(params) < 1:
result = cursor.execute(sql)
else:
result = cursor.execute(sql, params)
res = cursor.fetchone()
except Exception:
self._logger.error(
"Execute modify sql error: %s" % traceback.format_exc())
finally:
if not conn is None:
conn.close()
return res
def execute_modify(self, tablename: str, sql: str,
params: tuple = None) -> int:
"""执行增删改这种修改操作,返回受影响的行数"""
if not isinstance(tablename, str) or tablename == "":
raise Exception("Must pass 'tablename' param")
if not isinstance(sql, str) or sql == "":
return False
res: int = 0
conn: SqliteConn = None
cursor = None
try:
with table_locker_manual(tablename):
conn = self.connect()
cursor = conn.cursor_new
if not isinstance(params, tuple) or len(params) < 1:
res = cursor.execute(sql)
else:
res = cursor.execute(sql, params)
if res is None:
return 0
res = res.rowcount
conn.commit()
except Exception:
self._logger.error(
"Execute modify sql error: %s" % traceback.format_exc())
finally:
if not conn is None:
conn.close()
return res
#####################################################
#####################################################
#####################################################
#####################################################
# TableProxy
class ProxyDB(DbSqliteBase):
"""TbProxy"""
__tb_Proxy: SqliteTable = SqliteTable(
'TbProxy',
True,
SqliteColumn(
colname='Id',
coltype='INTEGER',
nullable=False,
is_primary_key=True,
is_auto_increament=True,
is_unique=True).set_index_new(),
SqliteColumn(colname='IP', nullable=False).set_index_new(),
SqliteColumn(colname='Port', coltype='INTEGER',
nullable=False).set_index_new(),
SqliteColumn(
colname='IPType', coltype='INTEGER', nullable=False,
defaultval=1).set_index_new(),
SqliteColumn(
colname='ProxyType',
coltype='INTEGER',
nullable=False,
defaultval=EProxyType.HTTP.value).set_index_new(),
SqliteColumn(
colname='IsSsl', coltype='INTEGER', nullable=False,
defaultval=0).set_index_new(),
SqliteColumn(
colname='Anonymous',
coltype='INTEGER',
nullable=False,
defaultval=EProxyAnonymity.Elite.value).set_index_new(),
SqliteColumn(colname='CountryCode').set_index_new(),
SqliteColumn(colname='AliveSec'),
SqliteColumn(colname='ISP'),
SqliteColumn(colname='Location'),
SqliteColumn(colname='LastVerifyTime'),
SqliteColumn(colname='ResponseSec'),
SqliteColumn(colname='User'),
SqliteColumn(colname='Pwd'),
SqliteColumn(colname='UpdateTime', coltype='DATETIME',
nullable=False).set_index_new(),
SqliteColumn(colname='CreateTime', coltype='DATETIME',
nullable=False).set_index_new(),
)
# 所有列
# IP
# Port
# IPType
# ProxyType
# IsSsl
# Anonymous
# CountryCode
# AliveSec
# ISP
# Location
# LastVerifyTime
# ResponseSec
# User
# Pwd
# UpdateTime
# CreateTime
def __init__(self, dbcfg: ProxyDbConfig, logger_hook: callable = None):
""""""
DbSqliteBase.__init__(
self,
dbname=ProxyDB.__tb_Proxy._tbname,
dbcfg=dbcfg,
logger_hook=logger_hook)
def _append_tables(self):
self._conn_mngr.append_table(ProxyDB.__tb_Proxy)
@table_locker(__tb_Proxy._tbname)
def pop_proxyitem(self, conds: SqlConditions) -> dict:
"""按条件搜索,返回第一个匹配行,并删除此行"""
res: dict = None
try:
res: dict = self.select_proxyitem(conds)
if not isinstance(res, dict) or len(res) < 1:
return res
rowid = res["id"]
cnt = self.delete_proxyitem(
SqlConditions(SqlCondition(colname='Id', val=rowid)))
if cnt < 1:
self._logger.debug(
"Pop proxyitem from db, delete row by rowid failed.")
except Exception:
self._logger.error("Pop proxyitem error: {}".format(
traceback.format_exc()))
return res
@table_locker(__tb_Proxy._tbname)
def select_proxyitem(self, conds: SqlConditions) -> dict:
"""按条件搜索,返回数据行转换成的字段字典"""
conn: SqliteConn = None
cursor = None
conds: SqlConditions = conds
try:
# IP
# Port
# IPType
# ProxyType
# IsSsl
# Anonymous
# CountryCode
# AliveSec
# ISP
# Location
# LastVerifyTime
# ResponseSec
# User
# Pwd
# UpdateTime
# CreateTime
cmd = f'''select
Id,
IP,
Port,
IPType,
ProxyType,
IsSsl,
Anonymous,
CountryCode,
AliveSec,
ISP,
Location,
LastVerifyTime,
ResponseSec,
User,
Pwd,
UpdateTime,
CreateTime
FROM {ProxyDB.__tb_Proxy._tbname} WHERE {conds.text_normal}'''
conn: SqliteConn = self.connect()
# conn._conn.row_factory = self._dict_factory
try:
cursor = conn.cursor_new
cursor.execute(cmd, conds.params)
result = cursor.fetchall()
if result is None or len(result) < 1:
return None
fields: dict = {}
for i in range(len(result[0])):
try:
fields[cursor.description[i][0].lower()] = result[0][i]
except Exception as e:
print(e)
return fields
except Exception:
self._logger.error("Get ProxyItem error: {}".format(
traceback.format_exc()))
finally:
if not conn is None:
conn.close()
except Exception:
self._logger.error(
"Get ProxyItem error: %s" % traceback.format_exc())
return None
@table_locker(__tb_Proxy._tbname)
def select_proxyitems(self, conds: SqlConditions) -> iter:
"""按条件搜索,返回数据行转换成的字段字典迭代器"""
conn: SqliteConn = None
cursor = None
conds: SqlConditions = conds
try:
# IP
# Port
# IPType
# ProxyType
# IsSsl
# Anonymous
# CountryCode
# AliveSec
# ISP,
# Location,
# LastVerifyTime,
# ResponseSec,
# User
# Pwd
# UpdateTime
# CreateTime
cmd = f'''SELECT
Id,
IP,
Port,
IPType,
ProxyType,
IsSsl,
Anonymous,
CountryCode,
AliveSec,
ISP,
Location,
LastVerifyTime,
ResponseSec,
User,
Pwd,
UpdateTime,
CreateTime
FROM {ProxyDB.__tb_Proxy._tbname} WHERE {conds.text_normal}'''
conn: SqliteConn = self.connect()
# conn._conn.row_factory = self._dict_factory
try:
cursor = conn.cursor_new
cursor.execute(cmd, conds.params)
result = cursor.fetchall()
if result is None or len(result) < 1:
return
for row in result:
fields: dict = {}
for i in range(len(result[0])):
fields[cursor.description[i][0].lower()] = row[i]
yield fields
except Exception:
self._logger.error("Get ProxyItems error: {}".format(
traceback.format_exc()))
finally:
if not conn is None:
conn.close()
except Exception:
self._logger.error(
"Get ProxyItems error: %s" % traceback.format_exc())
@table_locker(__tb_Proxy._tbname)
def save_new_proxyitem(self, proxyitem: ProxyItem) -> bool:
""""""
res = False
isnew: bool = False
conn: SqliteConn = None
cursor = None
proxyitem: ProxyItem = proxyitem
try:
cmd = f'''SELECT COUNT(1) FROM {ProxyDB.__tb_Proxy._tbname} WHERE
IP=? and Port=? and ProxyType=?'''
conn: SqliteConn = self.connect()
try:
cursor = conn.cursor_new
cursor.execute(cmd, (
proxyitem._ip,
proxyitem._port,
proxyitem._proxytype.value,
))
result = cursor.fetchall()
if result[0][0] > 0:
res = True
cmd = f'''UPDATE {ProxyDB.__tb_Proxy._tbname} set
ProxyType=?,
IsSsl=?,
Anonymous=?,
CountryCode=?,
AliveSec=?,
ISP=?,
Location=?,
LastVerifyTime=?,
ResponseSec=?,
User=?,
Pwd=?,
UpdateTime=?
WHERE IP=? and Port=? and ProxyType=?;'''
result = cursor.execute(cmd, (
proxyitem._proxytype.value,
1 if proxyitem._is_ssl else 0,
proxyitem._anonymous.value,
proxyitem.countrycode,
proxyitem.alive_sec,
proxyitem.isp,
proxyitem.location,
proxyitem.lastverifytime,
proxyitem.response_sec,
proxyitem.user,
proxyitem.pwd,
time.time(),
proxyitem._ip,
proxyitem._port,
proxyitem._proxytype.value,
))
# 这句没用,就是调试看看结果..
if result is None or result.rowcount < 1: # or len(result) < 1:
pass
except Exception as ex:
conn._conn.rollback()
raise ex
else:
conn.commit()
finally:
if not conn is None:
conn.close()
# IP
# Port
# IPType
# ProxyType
# IsSsl
# Anonymous
# CountryCode
# AliveSec
# ISP,
# Location,
# LastVerifyTime,
# ResponseSec,
# User
# Pwd
# UpdateTime
# CreateTime
# 若没找到,则insert一条到最新的库
# res==True表示至少有一个库里面有一条符合条件的任务,且已更新其字段
if not res:
isnew = True
conn = self.connect(5)
try:
# insert
cmd = f'''INSERT INTO {ProxyDB.__tb_Proxy._tbname}(
IP,
Port,
IPType,
ProxyType,
IsSsl,
Anonymous,
CountryCode,
AliveSec,
ISP,
Location,
LastVerifyTime,
ResponseSec,
User,
Pwd,
UpdateTime,
CreateTime) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'''
# 有几个属于TaskBack的字段在此不更新
cursor = conn.cursor_new
result = cursor.execute(cmd, (
proxyitem._ip,
proxyitem._port,
proxyitem.iptype,
proxyitem._proxytype.value,
1 if proxyitem._is_ssl else 0,
proxyitem._anonymous.value,
proxyitem.countrycode,
proxyitem.alive_sec,
proxyitem.isp,
proxyitem.location,
proxyitem.lastverifytime,
proxyitem.response_sec,
proxyitem.user,
proxyitem.pwd,
time.time(),
time.time(),
))
if result is None or result.rowcount < 1: # or len(result) < 1:
res = False
else:
res = True
except Exception as ex:
conn._conn.rollback()
raise ex
else:
conn.commit()
finally:
if not conn is None:
conn.close()
except Exception:
self._logger.error(
"save new ProxyItem error: %s" % traceback.format_exc())
finally:
if not conn is None:
conn.close()
return res
@table_locker(__tb_Proxy._tbname)
def update_proxyitem(self, ip: str, port: int, proxytype: EProxyType,
updatefields: dict) -> bool:
""""""
res = False
conn: SqliteConn = None
cursor = None
try:
# 搜索每个库,看有没有 TokenId一样的,且时间更新
# 的,一样就更新其他所有字段
cmd = f'''SELECT COUNT(1) FROM {ProxyDB.__tb_Proxy._tbname} WHERE
IP=? and Port=? and ProxyType=?'''
conn: SqliteConn = self.connect()
conn: SqliteConn = conn
try:
cursor = conn.cursor_new
cursor.execute(cmd, (ip, port, proxytype.value))
result = cursor.fetchall()
if result[0][0] > 0:
# 只根据TaskId、platform作为条件,
# 不考虑 任务文件产生时间与现有数据库中已存在任务的时间,每次直接覆盖,以符合用户操作。
# 若来了TaskId一样的数据,则必然分配给同一个ClientId
sqlset = ''
for k in updatefields.keys():
sqlset = sqlset + '{}=?,'.format(k)
sqlset = sqlset.rstrip(',')
cmd = f'''UPDATE {ProxyDB.__tb_Proxy._tbname} set {sqlset} WHERE IP=? and Port=? and ProxyType=?;'''
params = [v for v in updatefields.values()]
params.append(ip)
params.append(port)
params.append(proxytype.value)
result = cursor.execute(cmd, params)
if result is None or result.rowcount < 1: # or len(result) < 1:
pass
else:
res = True
except Exception as ex:
conn._conn.rollback()
raise ex
else:
conn.commit()
finally:
if not conn is None:
conn.close()
except Exception:
self._logger.error(
"Update ProxyItem error: %s" % traceback.format_exc())
return res
@table_locker(__tb_Proxy._tbname)
def delete_proxyitem(self, conds: SqlConditions) -> int:
"""按条删除行,返回受影响的行数"""
res: int = 0
conn: SqliteConn = None
cursor = None
conds: SqlConditions = conds
try:
# IP
# Port
# IPType
# ProxyType
# IsSsl
# Anonymous
# CountryCode
# AliveSec
# ISP,
# Location,
# LastVerifyTime,
# ResponseSec,
# User
# Pwd
# UpdateTime
# CreateTime
cmd = f'''delete FROM {ProxyDB.__tb_Proxy._tbname} WHERE {conds.text_normal}'''
conn: SqliteConn = self.connect()
# conn._conn.row_factory = self._dict_factory
try:
cursor = conn.cursor_new
result = cursor.execute(cmd, conds.params)
res = result.rowcount
except Exception:
self._logger.error("Get ProxyItem error: {}".format(
traceback.format_exc()))
finally:
if not conn is None:
conn.close()
except Exception:
self._logger.error(
"Get ProxyItem error: %s" % traceback.format_exc())
return res
| [
"shiyuegege@qq.com"
] | shiyuegege@qq.com |
949c746f1ce29096b7f31f94da55866632df6c4d | f7ac9ae8835b243a6ddbf4a1e8230883266186b9 | /Maximum-AND/code.py | 80d481203b5b01876dc761171b693523328122c4 | [] | no_license | sohailshaukat/HackerEarth-competition-solutions | b992d8bef8bd9f0806e2fa1cb2870500647942ee | 6e969aec4b8e224a2c0f1b18bddde250d2bcced6 | refs/heads/master | 2020-07-04T21:23:57.339578 | 2019-08-15T20:04:23 | 2019-08-15T20:04:23 | 202,422,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | '''
-sohailshaukat ( https://github.com/sohailshaukat )
-sohail47k@gmail.com
'''
times = int(input())
for _ in range(times):
inp = input().split()
a = int(inp[0])
b = int(inp[1])
maximum_and = 0
maximum = (2 ** len(bin(a).replace('0b','')))-1
for i in range(b+1,a,-1):
if i &(i-1) > maximum_and:
maximum_and = i&(i-1)
if maximum_and == maximum:
break
print(maximum_and)
| [
"sohail47k@gmail.com"
] | sohail47k@gmail.com |
eff291c741a23fff2801d4e5b8d88673b9c4de5e | 3a7412502b89b917f23cda9a3318d2dc4d02185b | /panoptes/accounts/fields.py | 0d6c7b6a50e7bf1df9d2d8df027f17552bc77e87 | [
"BSD-2-Clause"
] | permissive | cilcoberlin/panoptes | 5f0b19d872993bc5c7f51a44c9ccc596fe0a8ab5 | 67d451ea4ffc58c23b5f347bfa5609fa7f853b45 | refs/heads/master | 2021-01-21T00:17:42.038637 | 2012-07-10T03:20:47 | 2012-07-10T03:20:47 | 1,660,305 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py |
from django import forms
from django.contrib.auth.models import User
class UserField(forms.ModelChoiceField):
"""A select field for a user that uses their full name for sorting and display."""
def __init__(self, *args, **kwargs):
kwargs['queryset'] = User.objects.all().order_by('last_name', 'first_name')
super(UserField, self).__init__(*args, **kwargs)
def label_from_instance(self, user):
"""Return the user's full name."""
return user.get_full_name()
| [
"justin.locsei@oberlin.edu"
] | justin.locsei@oberlin.edu |
f94cf11d95f5bda38870aba5378e6b1b03e8652a | 742c5bfcff91a454dfe6df0be6d98408fa990569 | /bnum/tests/implement.py | 61f86421302cdcd18793cc18df15bc5d8d1f9db3 | [] | no_license | andrewcooke/bnum | ea9179c1379a1ea92d68dc361a44414dc7582379 | 7f93379cff5c4605195fdfb3868ba0185f66b20c | refs/heads/master | 2020-05-24T14:48:24.189229 | 2013-06-02T15:34:44 | 2013-06-02T15:34:44 | 10,178,884 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,656 | py |
from unittest import TestCase
from bnum import ImplicitBnum, ExplicitBnum, from_one
'''
Test various implementation details.
'''
class NoExplicitTest(TestCase):
def test_no_explicit_in_implicit(self):
with self.assertRaises(TypeError):
class Colour(ImplicitBnum):
red = 1
with self.assertRaises(TypeError):
class Colour(ImplicitBnum):
def foo(self): pass
with self.assertRaises(TypeError):
class Number(int, ExplicitBnum, values=from_one):
with implicit:
one = 1
class ImplicitTest(TestCase):
def test_implicit(self):
class Foo(ImplicitBnum):
implicit
explicit
assert Foo.implicit in Foo
assert repr(Foo.implicit) == "Foo('implicit')", repr(Foo.implicit)
def test_explicit(self):
with self.assertRaises(AttributeError):
# this one has the initial implicit shadowing the context
class Bar(ExplicitBnum):
implicit = 1
with implicit:
explicit
class Baz(ExplicitBnum):
explicit = 1
with implicit:
implicit
assert Baz.implicit in Baz
assert Baz.explicit in Baz
assert repr(Baz.implicit) == "Baz('implicit')", repr(Baz.implicit)
class Baf(ExplicitBnum):
with implicit:
explicit
implicit = 1
assert Baf.implicit in Baf
assert Baf.explicit in Baf
assert repr(Baf.implicit) == "Baf(value=1, name='implicit')", repr(Baf.implicit)
| [
"andrew@acooke.org"
] | andrew@acooke.org |
bb40962baa7b16fd1d7cade0ce12acb734b3138e | 236a8988e513bfa286298d426e705f92099dc25a | /examples/torch_tensor_io.py | 2621176e34ced5edf3515316d487af041334fc8a | [
"MIT"
] | permissive | hzy5000/taichi | 99a468ad3efe31e57a0bb60f7321f55dd1537f65 | 73dfd36fa190b0ff39a962e18d2c8cd3b41b32ce | refs/heads/master | 2020-08-28T02:49:58.455700 | 2019-10-25T13:29:31 | 2019-10-25T13:29:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | import taichi as ti
import numpy as np
import torch
# ti.set_gdb_trigger(True)
ti.cfg.arch = ti.cuda
# n = 1024 * 1024
n = 32
y = ti.var(ti.f32)
# https://pytorch.org/tutorials/beginner/examples_autograd/two_layer_net_custom_function.html
@ti.layout
def values():
# actually useless in thie example
ti.root.dense(ti.i, n).place(y)
ti.root.lazy_grad()
@ti.kernel
def torch_kernel(t: np.ndarray, o: np.ndarray):
for i in range(n):
o[i] = t[i] * t[i]
@ti.kernel
def torch_kernel_2(t_grad: np.ndarray, t:np.ndarray, o_grad: np.ndarray):
for i in range(n):
ti.print(o_grad[i])
t_grad[i] = 2 * t[i] * o_grad[i]
class Sqr(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
outp = torch.zeros_like(inp)
ctx.save_for_backward(inp)
torch_kernel(inp, outp)
return outp
@staticmethod
def backward(ctx, outp_grad):
outp_grad = outp_grad.contiguous()
inp_grad = torch.zeros_like(outp_grad)
inp, = ctx.saved_tensors
torch_kernel_2(inp_grad, inp, outp_grad)
return inp_grad
sqr = Sqr.apply
X = torch.tensor(2 * np.ones((n, ), dtype=np.float32), device=torch.device('cuda:0'), requires_grad=True)
sqr(X).sum().backward()
print(X.grad.cpu())
| [
"yuanmhu@gmail.com"
] | yuanmhu@gmail.com |
7e6ae7bf76d99e46c491659b5c586f3f419bf314 | 2ff556bb90d2a004e92c07bf4101325f492825b5 | /bot/plugins/lists.py | 12a316e74e0d9146d2daefccde275dcbfdf52a7a | [] | no_license | qinyuhang/qbittorrent-bot | df471f1ef46904dcb0a71291da08a941dc57f6f5 | e7ff17a4513d08e55cfbe1d27afa2729e013933c | refs/heads/master | 2023-09-02T18:25:15.933139 | 2021-11-22T22:20:13 | 2021-11-24T01:20:05 | 411,872,254 | 0 | 0 | null | 2021-11-24T01:20:06 | 2021-09-30T00:38:07 | Python | UTF-8 | Python | false | false | 2,897 | py | import logging
import re
# noinspection PyPackageRequirements
from telegram.ext import CallbackQueryHandler, CallbackContext, MessageHandler, Filters
# noinspection PyPackageRequirements
from telegram import ParseMode, Update, BotCommand
from bot.qbtinstance import qb
from bot.updater import updater
from utils import u
from utils import Permissions
logger = logging.getLogger(__name__)
TORRENT_STRING_COMPACT = """• <code>{short_name}</code> ({progress_pretty}% of {size_pretty}, {state_pretty}, <b>{dl_speed_pretty}/s</b>) \
[<a href="{info_deeplink}">info</a>]"""
TORRENT_STRING_COMPLETED = '• <code>{name}</code> ({size_pretty})'
TORRENTS_CATEGORIES = [r'\/?all', r'\/?completed', r'\/?downloading', r'\/?paused', r'\/?inactive', r'\/?active', r'\/?tostart']
TORRENT_CATEG_REGEX_PATTERN = r'^({})'.format('|'.join(TORRENTS_CATEGORIES))
TORRENT_CATEG_REGEX = re.compile(TORRENT_CATEG_REGEX_PATTERN, re.I)
@u.check_permissions(required_permission=Permissions.READ)
@u.failwithmessage
def on_torrents_list_selection(update: Update, context: CallbackContext):
logger.info('torrents list menu button from %s: %s', update.message.from_user.first_name, context.match[0])
qbfilter = context.match[0]
if qbfilter.startswith('/'):
# remove the "/" if the category has been used as command
qbfilter = qbfilter.replace('/', '')
logger.info('torrents status: %s', qbfilter)
torrents = qb.torrents(filter=qbfilter, sort='dlspeed', reverse=False) or []
if qbfilter == 'tostart':
all_torrents = qb.torrents(filter='all')
completed_torrents = [t.hash for t in qb.torrents(filter='completed')]
active_torrents = [t.hash for t in qb.torrents(filter='active')]
torrents = [t for t in all_torrents if t.hash not in completed_torrents and t.hash not in active_torrents]
logger.info('qbittirrent request returned %d torrents', len(torrents))
if not torrents:
update.message.reply_html('There is no torrent to be listed for <i>{}</i>'.format(qbfilter))
return
if qbfilter == 'completed':
base_string = TORRENT_STRING_COMPLETED # use a shorter string with less info for completed torrents
else:
base_string = TORRENT_STRING_COMPACT
strings_list = [base_string.format(**torrent.dict()) for torrent in torrents]
for strings_chunk in u.split_text(strings_list):
update.message.reply_html('\n'.join(strings_chunk))
updater.add_handler(MessageHandler(Filters.regex(TORRENT_CATEG_REGEX), on_torrents_list_selection), bot_command=[
BotCommand("all", "show all torrents"),
BotCommand("completed", "show completed torrents"),
BotCommand("downloading", "show downloading torrents"),
BotCommand("paused", "show paused torrents"),
BotCommand("inactive", "show inactive torrents"),
BotCommand("tostart", "show torrents that can be started")
])
| [
"numeralzeroone@gmail.com"
] | numeralzeroone@gmail.com |
31e5bdfe0d035cdb5f07f4feb56b9aa681368837 | b6aa9768dbac327943e0220df1c56ce38adc6de1 | /775_n-ary-tree-preorder-traversal.py | 108edc9956ffc6834b3b3b87214f7572d22da8ec | [] | no_license | Khrystynka/LeetCodeProblems | f86e4c1e46f70f874924de137ec5efb2f2518766 | 917bd000c2a055dfa2633440a61ca4ae2b665fe3 | refs/heads/master | 2021-03-17T00:51:10.102494 | 2020-09-28T06:31:03 | 2020-09-28T06:31:03 | 246,954,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | # Problem Title: N-ary Tree Preorder Traversal
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
"""
class Solution(object):
def preorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
self.lst = []
def preorder(node):
if node:
self.lst.append(node.val)
for child in node.children:
preorder(child)
preorder(root)
return self.lst
| [
"khrystyna@Khrystynas-MacBook-Pro.local"
] | khrystyna@Khrystynas-MacBook-Pro.local |
747ee9a7651abc0c1c4d1f012b95d88e8a937ccc | baf3996414315ffb60470c40c7ad797bf4e6897f | /02_ai/1_ml/9_xgboost/code/chapter_15/plot_performance.py | 10e2225a0794e6e34bca57b257321b24d317a4ea | [
"MIT"
] | permissive | thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | 2023-03-15T22:10:21.109707 | 2022-09-14T17:04:35 | 2022-09-14T17:04:35 | 207,919,073 | 0 | 0 | null | 2019-11-13T18:18:23 | 2019-09-11T22:40:46 | Python | UTF-8 | Python | false | false | 343 | py | # Plot performance for learning_rate=0.1
from matplotlib import pyplot
n_estimators = [100, 200, 300, 400, 500]
loss = [-0.001239, -0.001153, -0.001152, -0.001153, -0.001153]
pyplot.plot(n_estimators, loss)
pyplot.xlabel('n_estimators')
pyplot.ylabel('Log Loss')
pyplot.title('XGBoost learning_rate=0.1 n_estimators vs Log Loss')
pyplot.show() | [
"thiago.allue@yahoo.com"
] | thiago.allue@yahoo.com |
62d9537af18fc3c30d69a9a1d9bf0cc5f02f761c | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /better_storylines/src/evaluate_story_cloze_test.py | ea25a83e6fd346a0dd0ad056702fa1e1c874bb62 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 4,258 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Output the overall test accuracy on the 2016 test set.
"""
import os
from absl import app
from absl import flags
from absl import logging
import gin
import gin.tf
import models
import rocstories_sentence_embeddings
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
import utils
gfile = tf.io.gfile
FLAGS = flags.FLAGS
flags.DEFINE_string('base_dir', '/tmp/model',
'Base directory containing checkpoints and .gin config.')
flags.DEFINE_string('data_dir', 'tfds_datasets',
'Where to look for TFDS datasets.')
flags.DEFINE_multi_string('gin_bindings', [], 'Not used.')
tf.enable_v2_behavior()
@gin.configurable('dataset')
def prepare_dataset(dataset_name=gin.REQUIRED,
shuffle_input_sentences=False,
num_eval_examples=2000,
batch_size=32):
"""Create batched, properly-formatted datasets from the TFDS datasets.
Args:
dataset_name: Name of TFDS dataset.
shuffle_input_sentences: Not used during evaluation, but arg still needed
for gin compatibility.
num_eval_examples: Number of examples to use during evaluation. For the
nolabel evaluation, this is also the number of distractors we choose
between.
batch_size: Batch size.
Returns:
A dictionary mapping from the dataset split to a Dataset object.
"""
del batch_size
del num_eval_examples
del shuffle_input_sentences
dataset = tfds.load(
dataset_name,
data_dir=FLAGS.data_dir,
split=rocstories_sentence_embeddings.TEST_2016,
download=False)
dataset = utils.build_validation_dataset(dataset)
return dataset
def eval_single_checkpoint(model, dataset):
"""Runs quantitative evaluation on a single checkpoint."""
test_2016_accuracy = tf.keras.metrics.Accuracy(name='test_spring2016_acc')
for x, fifth_embedding_1, fifth_embedding_2, label in dataset:
correct = utils.eval_step(
model, x, fifth_embedding_1, fifth_embedding_2, label)
test_2016_accuracy(1, correct)
logging.warning('Test accuracy: %f', test_2016_accuracy.result())
return test_2016_accuracy.result().numpy().tolist()
def run_eval(base_dir):
"""Writes model's predictions in proper format to [base_dir]/answer.txt."""
best_checkpoint_name = utils.pick_best_checkpoint(base_dir)
dataset = prepare_dataset()
checkpoint_path = os.path.join(base_dir, best_checkpoint_name)
embedding_dim = tf.compat.v1.data.get_output_shapes(dataset)[0][-1]
num_input_sentences = tf.compat.v1.data.get_output_shapes(dataset)[0][1]
model = models.build_model(
num_input_sentences=num_input_sentences, embedding_dim=embedding_dim)
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(checkpoint_path).expect_partial()
logging.info('Evaluating with checkpoint: "%s"', checkpoint_path)
test_accuracy = eval_single_checkpoint(model, dataset)
with gfile.GFile(os.path.join(base_dir, 'test_spring2016_acc.txt'), 'w') as f:
f.write(str(test_accuracy))
def main(argv):
del argv
base_dir = FLAGS.base_dir
# Load gin.config settings stored in model directory. It might take some time
# for the train script to start up and actually write out a gin config file.
# Wait 10 minutes (periodically checking for file existence) before giving up.
gin_config_path = os.path.join(base_dir, 'config.gin')
if not gfile.exists(gin_config_path):
raise ValueError('Could not find config.gin in "%s"' % base_dir)
gin.parse_config_file(gin_config_path, skip_unknown=True)
gin.finalize()
run_eval(base_dir)
if __name__ == '__main__':
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
80c993714af78f5cb4e4762af61c743c91c48e70 | 379f10752e962b0695bdedcde5d55584b50cb0c0 | /setup.py | 2b616540f4bcfdb409eaba1bf1883e228b215dc0 | [
"MIT"
] | permissive | mahlettaye/Lidar_3DEM | e6a9c875c2900a1d7c9e3c490d4625a11a6b7a29 | af0a10afb7a6acd3e7eb601cb2152015458ed52e | refs/heads/master | 2023-08-13T10:35:13.895085 | 2021-10-20T04:18:32 | 2021-10-20T04:18:32 | 400,816,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | from setuptools import setup, find_packages
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Education',
'Programming Language :: Python :: 3'
]
setup(
author='Mahlet Taye',
author_email='formahlet@gmail.com',
name='LIDAR_3DEM',
version='0.1.0',
description='A python package used to featch and visuaize raster data',
long_description_content_type='text/markdown',
long_description=open('README.md').read(),
url='',
classifiers=classifiers,
keywords='LIDAR',
packages=find_packages(),
install_requires=['georasters','gdal','pdal','geopandas', 'matplotlib']) | [
"you@example.com"
] | you@example.com |
1b406de1919caca2dece64fef18698e9565e11d1 | 055581f9d6c81eda2f73ea05b90b7a2256da1219 | /parts/zodiac/mako/ast.py | 8aa16242b58e31b889f9127734861c58b006ed67 | [] | no_license | Tosti770/zodiac | 488a91c3e872a62d09a3ebb22a951dadcbd1c2df | af0380e20eb90699a84e3b7c6cb2085a1fb81667 | refs/heads/master | 2020-04-13T06:54:26.333228 | 2014-03-03T20:10:11 | 2014-03-03T20:10:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | /home/ruben/zodiac/eggs/Mako-0.9.0-py2.7.egg/mako/ast.py | [
"ruben_tc@hotmail.es"
] | ruben_tc@hotmail.es |
ca15ef94c2fec676a5bdfc69144f1460e71c6167 | c099611e42319053888a747ea78468224e45a725 | /Polar-slepian/V_27_ani/finaltptcombinedplotter.py | c68e65ba5b2b07ca5699c7ddf28a12b335bcfafc | [] | no_license | sbsoumya/PolarProject-Code_Res | 118f54593716520c71cdc0e479236ffdc1a94f89 | 12a3b6fb24cf8160a519c74b064fd845066cbe0b | refs/heads/master | 2021-06-27T21:04:41.057937 | 2019-03-22T20:56:44 | 2019-03-22T20:56:44 | 129,615,052 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,765 | py | from tbounds import *
from pprint import pprint
complist=[0.03,0.11,0.17]
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
plt.rc('savefig',dpi=300)
plt.rc('figure', figsize=[8,3])
"""
fig=plt.figure()
plt.subplots_adjust(top=0.95,bottom=0.15,right=0.8,left=0.09)
ax=plt.subplot(111)
#fig.suptitle("HARQ schemes ED for $\{p_1=$"+str(np.round(complist[0],decimals=3))
#+"$,p_2=$"+str(np.round(complist[1],decimals=3)) +"$,p_3= $"+str(np.round(complist[2],decimals=3))+"$ \}$")
(x,y,z)=(9,10,11)
#plt.title("Effect of $n$ on $\eta$ for RT-Polar scheme, $\delta$="+str(0.05))
maxiters=3
#-----64
N=64
R_p1=18
fileT1="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_18in64_T1_18-05-07_14-33-16.txt"
T=1
lines=ml.getline(fileT1,[x,y,z])
point=len(lines[0])
channel_c=np.array([pl.CapacityBSC(1,p) for p in lines[0]])
MeanIters=pl.getMeanIter(ml.getline(fileT1,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-.go',label='$n=$'+str(N)+', t='+str(T))
#plt.plot(lines[0],channel_c-np.array([float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)]),'-.go',label='t='+str(T)+'bits, $n=$'+str(N))
#----128
N=128
R_p1=42
fileT2="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_42in128_T2_18-05-17_12-15-41.txt"
T=2
lines=ml.getline(fileT2,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT2,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-yv',label='$n=$'+str(N)+', t='+str(T))
#plt.plot(lines[0],channel_c-np.array([float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)]),'-yx',label='t='+str(T)+'bits, $n=$'+str(N))
#----256
fileT9="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_114in256_T9_18-05-17_12-17-08.txt"
N=256
R_p1=114
(x,y,z)=(9,10,11)
T=9
lines=ml.getline(fileT9,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT9,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-.rx',label='$n=$'+str(N)+', t='+str(T))
#plt.plot(lines[0],channel_c-np.array([float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)]),'-.rv',label='t='+str(T)+'bits, $n=$'+str(N))
#~ #-----512
N=512
R_p1=246
fileT9="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_246in512_T9_18-05-17_12-19-25.txt"
T=9
lines=ml.getline(fileT9,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT9,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-m^',label='$n=$'+str(N)+', t='+str(T))
#plt.plot(lines[0],channel_c-np.array([float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)]),'-m^',label='t='+str(T)+'bits, $n=$'+str(N))
#----1024
N=1024
R_p1=510
fileT9="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_510in1024_T9_18-05-17_12-21-48.txt"
T=9
lines=ml.getline(fileT9,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT9,[13])[0],3)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-.b+',label='$n=$'+str(N)+', t='+str(T))
#plt.plot(lines[0],channel_c-np.array([float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)]),'-.b>',label='t='+str(T)+'bits, $n=$'+str(N))
#~ #2048-------
N=2048
R_p1=1020
fileT8="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_1020in2048_T8_18-05-17_12-28-05.txt"
T=8
lines=ml.getline(fileT8,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT8,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-c>',label='$n=$'+str(N)+', t='+str(T))
#plt.plot(lines[0],channel_c-np.array([float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)]),'-c+',label='t='+str(T)+'bits, $n=$'+str(N))
channel_plist=list(np.linspace(0.01,0.2,20))
plt.plot(channel_plist,[pl.CapacityBSC(1,p) for p in channel_plist],"k",label="Capacity")
plt.ylabel('$\eta(p)$')
plt.xlabel('flipover probability $p$')
plt.xlim([0.025,0.175])
plt.grid(True)
plt.legend(loc="upper right", ncol=2, columnspacing=0.1,handletextpad =0.1,borderaxespad=0.1,numpoints=1)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),columnspacing=0.1,handletextpad =0.1,numpoints=1)
plt.show()
"""
#~ #========================================================
#512 - 5 iter-----and 3 iter
fig=plt.figure()
plt.subplots_adjust(hspace=0.3,top=0.95,bottom=0.15)
ax=plt.subplot(111)
ax.locator_params(axis='y', nbins=5)
#-----512
(x,y,z)=(9,10,11)
N=512
#fig.suptitle("Performance of RT-Polar scheme,$\delta$="+str(0.05)+', $n=$'+str(N), fontsize="20")
R_p1=240
fileT9="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_240in512_T9_3I_18-05-19_13-37-50.txt"
#fileT11="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_240in512_T11_3I_18-05-08_22-58-34.txt"
T=9
lines=ml.getline(fileT9,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT9,[13])[0],3)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-m^',label='$r=3$')
complist=[0.03,0.11,0.17,0.2,0.23]
fileT9_5_iter="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_240in512_T9_5I_18-05-16_21-35-04.txt"
#fileT11_5_iter="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_240in512_T11_5I_18-05-08_20-15-47.txt"
N=512
R_p1=240
T=9
lines=ml.getline(fileT9_5_iter,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT9_5_iter,[13])[0],5)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-bx',label='$r=5$')
#~ fig.suptitle("HARQ schemes ED for $\{p_1=$"+str(np.round(complist[0],decimals=3))
#~ +"$,p_2=$"+str(np.round(complist[1],decimals=3)) +"$,p_3= $"+str(np.round(complist[2],decimals=3))+"$,p_4= $"+str(np.round(complist[3],decimals=3))+"$,p_5= $"+str(np.round(complist[4],decimals=3))+"$ \}$")
#-------------------------UK
#~ T=0
#~ fileUK="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_UK240in512_T0_5I_18-05-09_16-38-53.txt"
#~ #fileUK="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_UK246in512_T0_18-05-09_15-23-02.txt"
#~ lines=ml.getline(fileUK,[x,y,z])
#~ point=len(lines[0])
#~ maxiters=5
#~ MeanIters=pl.getMeanIter(ml.getline(fileUK,[13])[0],maxiters)
#~ plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-c^',label='Decoding failure, $NR_1=$'+str(R_p1))
channel_plist=lines[0]
#plt.plot(channel_plist,[pl.CapacityBSC(1,p) for p in channel_plist],"k",label="Capacity")
plt.ylabel('$\eta(p)$')
#plt.xlabel('BSC(p)')
plt.grid(True)
plt.xlim([0.025,0.235])
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.6, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel('flipover probability $p$')
plt.show()
#~ #==============================CRC
#~ #-----512
"""
ax=plt.subplot(111)
ax.locator_params(axis='y', nbins=5)
N=512
R_p1=246
fileT8="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_246in512_T8_18-05-09_20-15-49.txt"
fileCRCT8="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_CRC246in512_T8_18-05-09_20-16-27.txt"
T=8
(x,y,z)=(9,10,11)
maxiters=3
lines=ml.getline(fileT8,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT8,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-mo',label='RB-Polar, t='+str(T)+'bits')
lines=ml.getline(fileCRCT8,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileCRCT8,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-b^',label='CRC,'+str(T)+'bits')
fileT32="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_CRC1020in2048_T32_18-05-09_20-20-08.txt"
fileCRCT32="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_1020in2048_T32_18-05-09_20-17-57.txt"
N=2048
R_p1=1020
T=32
(x,y,z)=(9,10,11)
maxiters=3
lines=ml.getline(fileT32,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT32,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-rx',label='RB-Polar, t='+str(T)+'bits')
lines=ml.getline(fileCRCT32,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileCRCT32,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-g>',label='CRC,'+str(T)+'bits')
#plt.title("Throughput vs p")
plt.ylabel('$\eta(p)$')
plt.xlabel('flipover probability $p$')
plt.xlim([0.025,0.175])
plt.grid(True)
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.6, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),columnspacing=0.1,handletextpad =0.1,numpoints=1)
plt.show()
"""
#==============================================================================Benchmark
#~ #================================512vs FR
#-------------------calc
#------UK
#~ print "UK"+"="*20
#~ N=512
#~ R_p1=246
#~ (x,y,z)=(9,10,11)
#~ T=0
#~ fileUK="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_UK246in512_T0_18-05-04_23-31-44.txt"
#~ lines=ml.getline(fileUK,[x,y,z])
#~ point=len(lines[0])
#~ channel_p=np.round(lines[0],decimals=2)
#~ maxiters=3
#~ Iterprob=ml.getline(fileUK,[13])[0]
#~ MeanIters=pl.getMeanIter(Iterprob,maxiters)
#~ Averagerate=[float(R_p1-T)/(MeanIters[i]) for i in range(point)]
#~ FER=lines[2]
#~ tpt=[float(R_p1-T)/(MeanIters[i]*N)*(1-10**FER[i]) for i in range(point)]
#~ print "R_p1 :"+str(R_p1)+"/"+str(N)
#~ print "T:"+str(T)
#~ print "channel_p: \n",channel_p
#~ print "Iters:"
#~ pprint(zip(channel_p,Iterprob))
#~ print "Mean Iters :\n", zip(channel_p,MeanIters)
#~ print "Average rate :\n",zip(channel_p,Averagerate)
#~ print "FER :\n",zip(channel_p,FER)
#~ print "TPT :\n",zip(channel_p,tpt)
#~ print "CB-scheme"+"="*20
#~ N=512
#~ R_p1=246
#~ fileT11="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_246in512_T11_18-05-06_23-19-05.txt"
#~ T=11
#~ (x,y,z)=(9,10,11)
#~ maxiters=3
#~ lines=ml.getline(fileT11,[x,y,z])
#~ point=len(lines[0])
#~ channel_p=np.round(lines[0],decimals=2)
#~ Iterprob=ml.getline(fileT11,[13])[0]
#~ MeanIters=pl.getMeanIter(Iterprob,maxiters)
#~ Averagerate=[float(R_p1-T)/(MeanIters[i]) for i in range(point)]
#~ FER=lines[2]
#~ tpt=[float(R_p1-T)/(MeanIters[i]*N)*(1-10**FER[i]) for i in range(point)]
#~ print "R_p1 :"+str(R_p1)+"/"+str(N)
#~ print "T:"+str(T)
#~ print "channel_p: \n",channel_p
#~ print "Iters:"
#~ pprint(zip(channel_p,Iterprob))
#~ print "Mean Iters :\n", zip(channel_p,MeanIters)
#~ print "Average rate :\n",zip(channel_p,Averagerate)
#~ print "FER :\n",zip(channel_p,FER)
#~ print "TPT :\n",zip(channel_p,tpt)
#~ #------FR schemes
#~ print "FR"+str("=")*20
#~ channel_plist=list(np.linspace(0.01,0.2,20))
#~ design_plist=channel_plist
#~ Llist=list(np.linspace(np.log10(0.001),np.log10(0.5),8))
#~ Llist=np.log10([0.02,0.03,0.08,0.1,0.2,0.3,0.4])
#~ #print np.power(10,Llist)
#~ print('\t'.join(map(str,np.round(design_plist,decimals=2))))
#~ for Lexp in Llist:
#~ print "Zmax="+str(np.round(10**Lexp,decimals=4))
#~ Rlist=[len(pcon.getGChZCL(p,N,Lexp)[0]) for p in design_plist]
#~ print('\t'.join(map(str,Rlist)))
"""
#-------------------------plot
fig=plt.figure()
ax=plt.subplot(111)
plt.subplots_adjust(top=0.95,bottom=0.2,right=0.8,left=0.09)
N=512
#plt.title("Performance of RT-Polar scheme,$\delta$="+str(0.05)+', $n=$'+str(N))
#-----512
N=512
R_p1=246
fileT9="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_246in512_T9_18-05-17_12-19-25.txt"
T=9
(x,y,z)=(9,10,11)
maxiters=3
lines=ml.getline(fileT9,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT9,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-m^',label='RB-Polar, t='+str(T))
"""
#------BAC
"""
fileT9="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_246in512_T9_18-06-12_16-00-41.txt"
T=9
(x,y,z)=(9,11,12)
maxiters=3
lines=ml.getline(fileT9,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT9,[14])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-y^',label='RT-Polar-BAC $p_1=p_0/2$ , t='+str(T))
"""
"""
#-------------------------UK
T=0
(x,y,z)=(9,10,11)
#fileUK="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_UK246in512_T0_18-05-04_23-31-44.txt"
fileUK="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_UK246in512_T0_18-05-09_15-23-02.txt"
lines=ml.getline(fileUK,[x,y,z])
point=len(lines[0])
maxiters=3
MeanIters=pl.getMeanIter(ml.getline(fileUK,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-.ro',label='Ideal Detection')
#-------------------------LTPT
T=0
(x,y,z)=(10,11,12)
#fileUK="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_UK246in512_T0_18-05-04_23-31-44.txt"
fileLTPT="./simresults/polarchannel_FERvsR_rateless_Det_LTPT_246in512_18-05-10_15-26-03.txt"
lines=ml.getline(fileLTPT,[x,y,z])
point=len(lines[0])
maxiters=3
MeanIters=pl.getMeanIter(ml.getline(fileLTPT,[14])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-gx',label='LT-Polar')
"""
#--------------L4
"""
#./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_246in512_T9_18-06-08_14-20-13.txt
N=512
R_p1=246
fileT9="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_246in512_T9_18-06-08_14-20-13.txt"
fileT9="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_246in512_T9_L118-06-08_14-59-59.txt"
T=9
(x,y,z)=(9,10,11)
maxiters=3
lines=ml.getline(fileT9,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT9,[13])[0],maxiters)
#plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-m^',label='RB-Polar, L=4, t='+str(T))
"""
"""
#---------------------------------cp=dp
N=512
fileFR={}
fileFR["0e005"]="./simresults/polarchannel_FERvsp_FR0e005in512_18-05-08_23-47-09.txt"
fileFR["0e1"]="./simresults/polarchannel_FERvsp_FR0e1in512_18-05-08_23-53-00.txt"
fileFR["0e05"]="./simresults/polarchannel_FERvsp_FR0e05in512_18-05-08_23-46-32.txt"
fileFR["0e5"]="./simresults/polarchannel_FERvsp_FR0e5in512_18-05-09_00-43-29.txt"
fileFR["0e4"]="./simresults/polarchannel_FERvsp_FR0e4in512_18-05-09_14-25-53.txt"
fileFR["0e3"]="./simresults/polarchannel_FERvsp_FR0e3in512_18-05-09_14-25-30.txt"
fileFR["0e2"]="./simresults/polarchannel_FERvsp_FR0e2in512_18-05-09_14-25-08.txt"
fileFR["0e08"]="./simresults/polarchannel_FERvsp_FR0e08in512_18-05-09_14-24-36.txt"
fileFR["0e03"]="./simresults/polarchannel_FERvsp_FR0e03in512_18-05-09_14-23-04.txt"
fileFR["0e02"]="./simresults/polarchannel_FERvsp_FR0e02in512_18-05-09_14-17-33.txt"
fileFR["0e25"]="./simresults/polarchannel_FERvsp_FR0e25in512_18-05-09_15-21-47.txt"
#print fileFR
(x,y,z)=(-4,-3,-2)
zlist=fileFR.keys()
TPTZ={}
for Zmax in zlist:
lines=ml.getline(fileFR[Zmax],[x,y,z])
point=len(lines[0])
plist=lines[0]
#plt.plot(lines[0],[float(lines[1][i]*(1-10**lines[2][i]))/N for i in range(point)],label='$Z \leq $'+Zmax.replace("e","."))
TPTZ[Zmax]=[float(lines[1][i]*(1-10**lines[2][i]))/N for i in range(point)]
TPTmax=[]
for i in range(point):
TPTmax.append(max([TPTZ[Zmax][i] for Zmax in zlist]))
plt.plot(plist,TPTmax,'-.b>',label='Standard Polar')
channel_plist=list(np.linspace(0.01,0.2,20))
plt.plot(channel_plist,[pl.CapacityBSC(1,p) for p in channel_plist],"k",label="Capacity")
plt.ylabel('$\eta(p)$')
plt.xlabel('flipover probability $p$')
plt.xlim([0.025,0.175])
#plt.ylim([0.15,0.9])
plt.grid(True)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),columnspacing=0.1,handletextpad =0.1,numpoints=1)
plt.show()
"""
#===============================M for 1024
"""
fig=plt.figure()
N=1024
ax=plt.subplot(111)
plt.subplots_adjust(top=0.95,bottom=0.2,right=0.85,left=0.09)
#fig.suptitle("HARQ schemes \n N=1024,ED for $\{p_1=$"+str(np.round(complist[0],decimals=3)) +"$,p_2=$"+str(np.round(complist[1],decimals=3)) +"$,p_3= $"+str(np.round(complist[2],decimals=3))+"$ \}$")
#plt.title("Effect of $\delta$ on $\eta$ for RT-Polar scheme, $n$="+str(N))
#~ #-----------------378
fileT10="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_378in1024_T10_18-05-17_12-29-55.txt"
fileT1="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_378in1024_T1_18-04-29_16-36-30.txt"
R_p1=378
maxiters=3
complist=[0.03,0.11,0.17]
N=1024
(x,y,z)=(9,10,11)
#~ T=1
#~ lines=ml.getline(fileT1,[x,y,z])
#~ point=len(lines[0])
#~ MeanIters=pl.getMeanIter(ml.getline(fileT1,[13])[0],maxiters)
#~ plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],':g^',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
T=10
lines=ml.getline(fileT10,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT10,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-gx',label='$\delta$='+str(0.005)+', t='+str(T))
#-------------------510
fileT17="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_510in1024_T17_18-05-17_12-22-16.txt"
fileT1="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_510in1024_T1_18-04-28_15-29-44.txt"
fileT9="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_510in1024_T9_18-05-17_12-21-48.txt"
R_p1=510
maxiters=3
complist=[0.03,0.11,0.17]
N=1024
(x,y,z)=(9,10,11)
#~ T=1
#~ lines=ml.getline(fileT1,[x,y,z])
#~ point=len(lines[0])
#~ MeanIters=pl.getMeanIter(ml.getline(fileT1,[13])[0],maxiters)
#~ plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],':m^',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
T=9
lines=ml.getline(fileT9,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT9,[13])[0],3)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-cv',label='$\delta$='+str(0.05)+', t='+str(T))
#~ T=17
#~ lines=ml.getline(fileT17,[x,y,z])
#~ point=len(lines[0])
#~ MeanIters=pl.getMeanIter(ml.getline(fileT17,[13])[0],maxiters)
#~ plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-.mv',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
#~ #------------------678
fileT10="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_678in1024_T10_18-05-17_12-31-27.txt"
fileT16="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_678in1024_T16_18-05-17_12-31-58.txt"
fileT31="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_678in1024_T31_18-05-17_12-32-32.txt"
complist=[0.03,0.11,0.17]
N=1024
R_p1=678
maxiters=3
(x,y,z)=(9,10,11)
#~ T=10
#~ lines=ml.getline(fileT10,[x,y,z])
#~ point=len(lines[0])
#~ MeanIters=pl.getMeanIter(ml.getline(fileT10,[13])[0],maxiters)
#~ plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],':c^',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
T=16
lines=ml.getline(fileT16,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT16,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-bo',label='$\delta$='+str(0.5)+', t='+str(T))
#~ T=31
#~ lines=ml.getline(fileT31,[x,y,z])
#~ point=len(lines[0])
#~ MeanIters=pl.getMeanIter(ml.getline(fileT31,[13])[0],maxiters)
#~ plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-.cv',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
channel_plist=list(np.linspace(0.01,0.2,20))
plt.plot(channel_plist,[pl.CapacityBSC(1,p) for p in channel_plist],"k",label="Capacity")
plt.ylabel('$\eta(p)$')
plt.xlabel('flipover probability $p$')
plt.xlim([0.025,0.175])
plt.grid(True)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),columnspacing=0.1,handletextpad =0.1,numpoints=1)
plt.show()
"""
#===============================M for 512
"""
fig=plt.figure()
N=512
ax=plt.subplot(111)
plt.subplots_adjust(top=0.95,bottom=0.2,right=0.85,left=0.09)
#fig.suptitle("HARQ schemes \n N=1024,ED for $\{p_1=$"+str(np.round(complist[0],decimals=3)) +"$,p_2=$"+str(np.round(complist[1],decimals=3)) +"$,p_3= $"+str(np.round(complist[2],decimals=3))+"$ \}$")
#plt.title("Effect of $\delta$ on $\eta$ for RT-Polar scheme, $n$="+str(N))
#~ #-----------------192
fileT6="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_192in512_T6_18-05-17_14-15-54.txt"
fileT10="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_192in512_T10_18-05-17_14-14-17.txt"
fileT1="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_192in512_T1_18-05-17_14-14-02.txt"
R_p1=192
maxiters=3
complist=[0.03,0.11,0.17]
N=512
(x,y,z)=(9,10,11)
T=1
lines=ml.getline(fileT1,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT1,[13])[0],maxiters)
#plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],':g^',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
T=6
lines=ml.getline(fileT6,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT6,[13])[0],maxiters)
#plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],':g^',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
T=10
lines=ml.getline(fileT10,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT10,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-gx',label='$\delta$='+str(0.005)+', t='+str(T))
#-------------------246
fileT8="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_246in512_T8_18-05-09_20-15-49.txt"
fileT1="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_246in512_T1_18-05-06_23-18-45.txt"
fileT9="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_246in512_T9_18-05-17_12-19-25.txt"
R_p1=246
maxiters=3
complist=[0.03,0.11,0.17]
N=512
(x,y,z)=(9,10,11)
T=1
lines=ml.getline(fileT1,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT1,[13])[0],maxiters)
#plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],':m^',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
T=9
lines=ml.getline(fileT9,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT9,[13])[0],3)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-cv',label='$\delta$='+str(0.05)+', t='+str(T))
T=8
lines=ml.getline(fileT8,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT8,[13])[0],maxiters)
#plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-.mv',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
#~ #------------------372
#fileT10="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_678in1024_T10_18-05-17_12-31-27.txt"
fileT16="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_372in512_T16_18-05-17_12-57-59.txt"
#fileT31="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_678in1024_T31_18-05-17_12-32-32.txt"
complist=[0.03,0.11,0.17]
N=512
R_p1=372
maxiters=3
(x,y,z)=(9,10,11)
#~ T=10
#~ lines=ml.getline(fileT10,[x,y,z])
#~ point=len(lines[0])
#~ MeanIters=pl.getMeanIter(ml.getline(fileT10,[13])[0],maxiters)
#~ plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],':c^',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
T=16
lines=ml.getline(fileT16,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT16,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-bo',label='$\delta$='+str(0.5)+', t='+str(T))
#~ T=31
#~ lines=ml.getline(fileT31,[x,y,z])
#~ point=len(lines[0])
#~ MeanIters=pl.getMeanIter(ml.getline(fileT31,[13])[0],maxiters)
#~ plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-.cv',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
channel_plist=list(np.linspace(0.01,0.2,20))
plt.plot(channel_plist,[pl.CapacityBSC(1,p) for p in channel_plist],"k",label="Capacity")
plt.ylabel('$\eta(p)$')
plt.xlabel('flipover probability $p$')
plt.xlim([0.02,0.18])
plt.grid(True)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),columnspacing=0.1,handletextpad =0.1,numpoints=1)
plt.show()
"""
#===============================M for 256
"""
fig=plt.figure()
N=256
ax=plt.subplot(111)
plt.subplots_adjust(top=0.95,bottom=0.2,right=0.85,left=0.09)
#fig.suptitle("HARQ schemes \n N=1024,ED for $\{p_1=$"+str(np.round(complist[0],decimals=3)) +"$,p_2=$"+str(np.round(complist[1],decimals=3)) +"$,p_3= $"+str(np.round(complist[2],decimals=3))+"$ \}$")
#plt.title("Effect of $\delta$ on $\eta$ for RT-Polar scheme, $n$="+str(N))
#~ #-----------------78
fileT5="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_78in256_T5_18-05-17_14-27-27.txt"
fileT1="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_78in256_T1_18-05-17_14-26-54.txt"
R_p1=78
maxiters=3
complist=[0.03,0.11,0.17]
N=1024
(x,y,z)=(9,10,11)
T=1
lines=ml.getline(fileT1,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT1,[13])[0],maxiters)
#plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],':g^',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
T=5
lines=ml.getline(fileT5,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT5,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-gx',label='$\delta$='+str(0.005)+', t='+str(T))
#-------------------114
fileT5="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_114in256_T5_18-05-17_12-18-33.txt"
fileT1="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_114in256_T1_18-05-07_15-24-52.txt"
fileT9="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_114in256_T9_18-05-17_12-17-08.txt"
R_p1=114
maxiters=3
complist=[0.03,0.11,0.17]
N=256
(x,y,z)=(9,10,11)
T=1
lines=ml.getline(fileT1,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT1,[13])[0],maxiters)
#plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],':m^',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
T=9
lines=ml.getline(fileT9,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT9,[13])[0],3)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-cv',label='$\delta$='+str(0.05)+', t='+str(T))
T=5
lines=ml.getline(fileT5,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT5,[13])[0],maxiters)
#plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-.mv',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
#~ #-----------------186
fileT7="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_186in256_T7_18-05-17_14-42-53.txt"
fileT11="./simresults/polarchannel_FERvsR_rateless_Det_Iter_retro_186in256_T11_18-05-17_14-43-08.txt"
complist=[0.03,0.11,0.17]
N=256
R_p1=186
maxiters=3
(x,y,z)=(9,10,11)
T=7
lines=ml.getline(fileT7,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT7,[13])[0],maxiters)
#plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],':c^',label='CB '+str(T)+', $NR_1=$'+str(R_p1))
T=11
lines=ml.getline(fileT11,[x,y,z])
point=len(lines[0])
MeanIters=pl.getMeanIter(ml.getline(fileT11,[13])[0],maxiters)
plt.plot(lines[0],[float(R_p1-T)/(MeanIters[i]*N)*(1-10**lines[2][i]) for i in range(point)],'-bo',label='$\delta$='+str(0.5)+', t='+str(T))
channel_plist=list(np.linspace(0.01,0.2,20))
plt.plot(channel_plist,[pl.CapacityBSC(1,p) for p in channel_plist],"k",label="Capacity")
plt.ylabel('$\eta(p)$')
plt.xlabel('flipover probability $p$')
plt.xlim([0.02,0.18])
plt.grid(True)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),columnspacing=0.1,handletextpad =0.1,numpoints=1)
plt.show()
"""
| [
"soumya.s.banerjee17@gmail.com"
] | soumya.s.banerjee17@gmail.com |
6480af91ae31cc3d289d896f4297cc1af6742c2f | 00a9295409b78a53ce790f7ab44931939f42c0e0 | /FPGA/apio/iCEBreaker/FIR_Filter/sympy/venv/lib/python3.8/site-packages/sympy/matrices/tests/test_normalforms.py | 5be332446dac4ca65b0ac9c87b3e1e1666f6869d | [
"Apache-2.0"
] | permissive | klei22/Tech-OnBoarding-Class | c21f0762d2d640d5e9cb124659cded5c865b32d4 | 960e962322c37be9117e0523641f8b582a2beceb | refs/heads/master | 2022-11-10T13:17:39.128342 | 2022-10-25T08:59:48 | 2022-10-25T08:59:48 | 172,292,871 | 2 | 3 | Apache-2.0 | 2019-05-19T00:26:32 | 2019-02-24T03:50:35 | C | UTF-8 | Python | false | false | 882 | py | from sympy import Symbol, Poly
from sympy.polys.solvers import RawMatrix as Matrix
from sympy.matrices.normalforms import invariant_factors, smith_normal_form
from sympy.polys.domains import ZZ, QQ
def test_smith_normal():
m = Matrix([[12, 6, 4,8],[3,9,6,12],[2,16,14,28],[20,10,10,20]])
setattr(m, 'ring', ZZ)
smf = Matrix([[1, 0, 0, 0], [0, 10, 0, 0], [0, 0, -30, 0], [0, 0, 0, 0]])
assert smith_normal_form(m) == smf
x = Symbol('x')
m = Matrix([[Poly(x-1), Poly(1, x),Poly(-1,x)],
[0, Poly(x), Poly(-1,x)],
[Poly(0,x),Poly(-1,x),Poly(x)]])
setattr(m, 'ring', QQ[x])
invs = (Poly(1, x, domain='QQ'), Poly(x - 1, domain='QQ'), Poly(x**2 - 1, domain='QQ'))
assert invariant_factors(m) == invs
m = Matrix([[2, 4]])
setattr(m, 'ring', ZZ)
smf = Matrix([[2, 0]])
assert smith_normal_form(m) == smf
| [
"kaunalei@gmail.com"
] | kaunalei@gmail.com |
1dd04629d4fe0a0932f648ef6f07170eb03fe9e0 | 46ac0965941d06fde419a6f216db2a653a245dbd | /sdks/python/test/test_AppDistributionGroupUsersRequest.py | 93310f6c325a96c20600d8c0e085dcb9a65832aa | [
"MIT",
"Unlicense"
] | permissive | b3nab/appcenter-sdks | 11f0bab00d020abb30ee951f7656a3d7ed783eac | bcc19c998b5f648a147f0d6a593dd0324e2ab1ea | refs/heads/master | 2022-01-27T15:06:07.202852 | 2019-05-19T00:12:43 | 2019-05-19T00:12:43 | 187,386,747 | 0 | 3 | MIT | 2022-01-22T07:57:59 | 2019-05-18T17:29:21 | Python | UTF-8 | Python | false | false | 1,066 | py | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from AppDistributionGroupUsersRequest.clsAppDistributionGroupUsersRequest import AppDistributionGroupUsersRequest # noqa: E501
from appcenter_sdk.rest import ApiException
class TestAppDistributionGroupUsersRequest(unittest.TestCase):
"""AppDistributionGroupUsersRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAppDistributionGroupUsersRequest(self):
"""Test AppDistributionGroupUsersRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsAppDistributionGroupUsersRequest.AppDistributionGroupUsersRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"b3nab@users.noreply.github.com"
] | b3nab@users.noreply.github.com |
d56787796d37eb49a9a784dcd66499f4274899f0 | 20387589a922dcfdfb47c23c692318b9cc4f7515 | /listings/views.py | a7b9bef8da7811f2fc3be8f3a7228589097c1cc8 | [] | no_license | shahjalalh/btre | c95a951eea32bde64d8cd01a73771efed0b99125 | 56069a03b8fc35febdb864312aefb368404d3090 | refs/heads/master | 2020-05-22T03:27:19.401585 | 2019-10-21T10:44:40 | 2019-10-21T10:44:40 | 186,212,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,050 | py | from django.shortcuts import render, get_object_or_404
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from listings.models import Listing
from listings.choices import price_choices, bedroom_choices, state_choices
# Create your views here.
def index(request):
listings = Listing.objects.order_by('-list_date').filter(is_published=True)
paginator = Paginator(listings, 3)
page = request.GET.get('page')
paged_listings = paginator.get_page(page)
context = {
'listings': paged_listings
}
return render(request, 'listings/listings.html', context)
def listing(request, listing_id):
listing = get_object_or_404(Listing, pk=listing_id)
context = {
'listing': listing
}
return render(request, 'listings/listing.html', context)
def search(request):
queryset_list = Listing.objects.order_by('-list_date')
# Keywords
if 'keywords' in request.GET:
keywords = request.GET['keywords']
if keywords:
queryset_list = queryset_list.filter(description__icontains=keywords)
# City
if 'city' in request.GET:
city = request.GET['city']
if city:
queryset_list = queryset_list.filter(city__iexact=city)
# State
if 'state' in request.GET:
state = request.GET['state']
if state:
queryset_list = queryset_list.filter(state__iexact=state)
# Bedrooms
if 'bedrooms' in request.GET:
bedrooms = request.GET['bedrooms']
if bedrooms:
queryset_list = queryset_list.filter(bedrooms__lte=bedrooms)
# Price
if 'price' in request.GET:
price = request.GET['price']
if price:
queryset_list = queryset_list.filter(price__lte=price)
context = {
'state_choices': state_choices,
'bedroom_choices': bedroom_choices,
'price_choices': price_choices,
'listings': queryset_list,
'values': request.GET
}
return render(request, 'listings/search.html', context)
| [
"shahjalal.tipu@gmail.com"
] | shahjalal.tipu@gmail.com |
a50a3721da05a0a27cecaa6aa1c042b2b6af8159 | 117626e3c32dc848519d319635cb995bbe78dd43 | /examples/imdb.py | 017f8bf4266a595c1d70cff9213516ae760ed660 | [
"MIT"
] | permissive | amcs1729/Keras-IndRNN | 412a183a0f1149ce905ebef6748330079ae0ad8d | e5f1da3c4d191bd528491f11ae7bdf0fdb54df21 | refs/heads/master | 2022-11-15T03:24:41.366368 | 2020-07-10T15:34:29 | 2020-07-10T15:34:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,254 | py | '''Trains a Minimal RNN on the IMDB sentiment classification task.
The dataset is actually too small for Minimal RNN to be of any advantage
compared to simpler, much faster methods such as TF-IDF + LogReg.
'''
from __future__ import print_function
import os
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding, BatchNormalization
from keras.callbacks import ModelCheckpoint
from keras.datasets import imdb
from ind_rnn import IndRNN
if not os.path.exists('weights'):
os.makedirs('weights/')
max_features = 20000
maxlen = 500 # cut texts after this number of words (among top max_features most common words)
batch_size = 128
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_shape=(maxlen,)))
model.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
return_sequences=True))
model.add(IndRNN(128, recurrent_clip_min=-1, recurrent_clip_max=-1, dropout=0.0, recurrent_dropout=0.0,
return_sequences=False))
model.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
print('Train...')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=15,
validation_data=(x_test, y_test),
callbacks=[ModelCheckpoint('weights/imdb_indrnn.h5', monitor='val_acc',
save_best_only=True, save_weights_only=True)])
model.load_weights('weights/imdb_indrnn.h5')
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
| [
"titu1994@gmail.com"
] | titu1994@gmail.com |
f89f783a1d90fd93f69cab3aa560869306aa5aad | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/vz/rtintraepg.py | 60a181a70e329f73daad40d7b7e163a4d27998c4 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 6,137 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtIntraEpg(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.vz.RtIntraEpg", "cobra.model.fv.EPg")
meta.moClassName = "vzRtIntraEpg"
meta.rnFormat = "rtfvIntraEpg-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "End Point Group"
meta.writeAccessMask = 0x101
meta.readAccessMask = 0x2701
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.vz.BrCP")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.pol.NFromRef")
meta.rnPrefixes = [
('rtfvIntraEpg-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 33322, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1899
prop.defaultValueStr = "fvEPg"
prop._addConstant("dhcpCRelPg", None, 1467)
prop._addConstant("dhcpPRelPg", None, 1466)
prop._addConstant("fvAEPg", None, 1981)
prop._addConstant("fvEPg", None, 1899)
prop._addConstant("fvTnlEPg", None, 9196)
prop._addConstant("infraCEPg", None, 4326)
prop._addConstant("infraPEPg", None, 4325)
prop._addConstant("l2extInstP", None, 1746)
prop._addConstant("l3extInstP", None, 1775)
prop._addConstant("l3extInstPDef", None, 5987)
prop._addConstant("mgmtInB", None, 2194)
prop._addConstant("unspecified", "unspecified", 0)
prop._addConstant("vnsEPpInfo", None, 4694)
prop._addConstant("vnsREPpInfo", None, 5959)
prop._addConstant("vnsSDEPpInfo", None, 5958)
prop._addConstant("vnsSHEPpInfo", None, 6131)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 33321, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtrctIfToEPgCons", "Contract Interface EPG Consumer", "cobra.model.fv.EPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtrctIfToEPgConsNwIf", "Contract Interface EPG Consumer Interface", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ABrCPToAnyProv", "Any To Provider", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ABrCPToAnyCons", "Any To Consumer", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ABrCPToEPgProv", "EPG Provider", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ABrCPToEPgCons", "EPG Consumer", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("GraphInstancesinacontract", "Graph Instances", "cobra.model.vns.GraphInst"))
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
3922819099c6ac8d461fdda7a281e41f2ade3c9c | 03dfcd4bd41ff9ba76e67895e96a9794ad003a31 | /tutorial/9-classes/object.py | e2a14344132cd7641300d8541f7d7f4f772ff7bf | [] | no_license | gittygitgit/python-sandbox | 71ca68fcc90745931737f7aeb61306ac3417ce60 | 3b3e0eaf4edad13aabe51eb3258ebe9e6b951c67 | refs/heads/master | 2021-01-19T02:41:17.047711 | 2018-11-22T18:07:15 | 2018-11-22T18:07:15 | 39,742,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | #!/usr/bin/python
# is there an Object class that everything extends from?
# yes
# how do you get a handle to the Object class?
print object
# object has no baseclasses
print object.__bases__ # returns empty tuple ()
# difference between a type and a base...
# each object has a single type...it's type
print type("test") # type string
# a base is like a superclass
print type("sdsdf").__bases__
print type(type("sdfsdf")).__bases__
# type returns an instance's __class__ attribute
print isinstance("sdfsdf",str)
# what's the difference between type and object?
# both are primitive objects
# every object has a __class__ attribute
# every type object has a __bases__ attribute
# test if str is a subclass of object
issubclass(str,object) # true if str extends object directly or one of it's baseclasses extends object
# identity operator
a="one"
b=a
print "id(a): " + id(a) + ", id(b):" + id(b)
print a is b # prints True
c="two"
print a is c # prints False
#
# stuff provided by object
| [
"grudkowm@Michaels-Air-2.fios-router.home"
] | grudkowm@Michaels-Air-2.fios-router.home |
9f55979e50154c96648c73a1506a7753eef4cfda | 0c469c4100fe9d352e83731688e388062a3c55c7 | /bactracking/37. Sudoku Solver.py | 96b9e17b6889f4bd966b4d102c71d7b9b3372080 | [] | no_license | asperaa/back_to_grind | 9e055c7e6561384e5b7ae52f01063e4beb34a298 | 5ea1976b9d5c6d04800e296e45e8ff90fdde5001 | refs/heads/master | 2022-12-16T18:32:01.443743 | 2020-09-05T13:29:39 | 2020-09-05T13:29:39 | 254,910,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | """We are the captains of our ships, and we stay 'till the end. We see our stories through.
"""
"""37. Sudoku Solver
"""
class Solution:
def solveSudoku(self, board):
self.solve(board)
def solve(self, board):
for i in range(9):
for j in range(9):
if board[i][j] == ".":
for ch in "123456789":
if self.is_valid(board, i, j, ch):
board[i][j] = ch
if self.solve(board):
return True
board[i][j] = "."
return False
return True
def is_valid(self, board, row, col, ch):
square_row = 3 * (row//3)
square_col = 3 * (col//3)
for i in range(9):
if board[i][col] == ch or board[row][i] == ch:
return False
if board[square_row + i // 3][square_col + i % 3] == ch:
return False
return True | [
"adityaankur44@gmail.com"
] | adityaankur44@gmail.com |
fd33f9e1e4befb522cdb051178add1f66fc9e2ad | c071eb46184635818e8349ce9c2a78d6c6e460fc | /system/python_stubs/-745935208/PyQt5/QtWidgets/QListWidgetItem.py | 0787a284749b0903ac1dee930f442569072e297e | [] | no_license | sidbmw/PyCharm-Settings | a71bc594c83829a1522e215155686381b8ac5c6e | 083f9fe945ee5358346e5d86b17130d521d1b954 | refs/heads/master | 2020-04-05T14:24:03.216082 | 2018-12-28T02:29:29 | 2018-12-28T02:29:29 | 156,927,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,602 | py | # encoding: utf-8
# module PyQt5.QtWidgets
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37\lib\site-packages\PyQt5\QtWidgets.pyd
# by generator 1.146
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import PyQt5.QtGui as __PyQt5_QtGui
import sip as __sip
class QListWidgetItem(__sip.wrapper):
"""
QListWidgetItem(parent: QListWidget = None, type: int = QListWidgetItem.Type)
QListWidgetItem(str, parent: QListWidget = None, type: int = QListWidgetItem.Type)
QListWidgetItem(QIcon, str, parent: QListWidget = None, type: int = QListWidgetItem.Type)
QListWidgetItem(QListWidgetItem)
"""
def background(self): # real signature unknown; restored from __doc__
""" background(self) -> QBrush """
pass
def checkState(self): # real signature unknown; restored from __doc__
""" checkState(self) -> Qt.CheckState """
pass
def clone(self): # real signature unknown; restored from __doc__
""" clone(self) -> QListWidgetItem """
return QListWidgetItem
def data(self, p_int): # real signature unknown; restored from __doc__
""" data(self, int) -> Any """
pass
def flags(self): # real signature unknown; restored from __doc__
""" flags(self) -> Qt.ItemFlags """
pass
def font(self): # real signature unknown; restored from __doc__
""" font(self) -> QFont """
pass
def foreground(self): # real signature unknown; restored from __doc__
""" foreground(self) -> QBrush """
pass
def icon(self): # real signature unknown; restored from __doc__
""" icon(self) -> QIcon """
pass
def isHidden(self): # real signature unknown; restored from __doc__
""" isHidden(self) -> bool """
return False
def isSelected(self): # real signature unknown; restored from __doc__
""" isSelected(self) -> bool """
return False
def listWidget(self): # real signature unknown; restored from __doc__
""" listWidget(self) -> QListWidget """
return QListWidget
def read(self, QDataStream): # real signature unknown; restored from __doc__
""" read(self, QDataStream) """
pass
def setBackground(self, Union, QBrush=None, QColor=None, Qt_GlobalColor=None, QGradient=None): # real signature unknown; restored from __doc__
""" setBackground(self, Union[QBrush, QColor, Qt.GlobalColor, QGradient]) """
pass
def setCheckState(self, Qt_CheckState): # real signature unknown; restored from __doc__
""" setCheckState(self, Qt.CheckState) """
pass
def setData(self, p_int, Any): # real signature unknown; restored from __doc__
""" setData(self, int, Any) """
pass
def setFlags(self, Union, Qt_ItemFlags=None, Qt_ItemFlag=None): # real signature unknown; restored from __doc__
""" setFlags(self, Union[Qt.ItemFlags, Qt.ItemFlag]) """
pass
def setFont(self, QFont): # real signature unknown; restored from __doc__
""" setFont(self, QFont) """
pass
def setForeground(self, Union, QBrush=None, QColor=None, Qt_GlobalColor=None, QGradient=None): # real signature unknown; restored from __doc__
""" setForeground(self, Union[QBrush, QColor, Qt.GlobalColor, QGradient]) """
pass
def setHidden(self, bool): # real signature unknown; restored from __doc__
""" setHidden(self, bool) """
pass
def setIcon(self, QIcon): # real signature unknown; restored from __doc__
""" setIcon(self, QIcon) """
pass
def setSelected(self, bool): # real signature unknown; restored from __doc__
""" setSelected(self, bool) """
pass
def setSizeHint(self, QSize): # real signature unknown; restored from __doc__
""" setSizeHint(self, QSize) """
pass
def setStatusTip(self, p_str): # real signature unknown; restored from __doc__
""" setStatusTip(self, str) """
pass
def setText(self, p_str): # real signature unknown; restored from __doc__
""" setText(self, str) """
pass
def setTextAlignment(self, p_int): # real signature unknown; restored from __doc__
""" setTextAlignment(self, int) """
pass
def setToolTip(self, p_str): # real signature unknown; restored from __doc__
""" setToolTip(self, str) """
pass
def setWhatsThis(self, p_str): # real signature unknown; restored from __doc__
""" setWhatsThis(self, str) """
pass
def sizeHint(self): # real signature unknown; restored from __doc__
""" sizeHint(self) -> QSize """
pass
def statusTip(self): # real signature unknown; restored from __doc__
""" statusTip(self) -> str """
return ""
def text(self): # real signature unknown; restored from __doc__
""" text(self) -> str """
return ""
def textAlignment(self): # real signature unknown; restored from __doc__
""" textAlignment(self) -> int """
return 0
def toolTip(self): # real signature unknown; restored from __doc__
""" toolTip(self) -> str """
return ""
def type(self): # real signature unknown; restored from __doc__
""" type(self) -> int """
return 0
def whatsThis(self): # real signature unknown; restored from __doc__
""" whatsThis(self) -> str """
return ""
def write(self, QDataStream): # real signature unknown; restored from __doc__
""" write(self, QDataStream) """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
Type = 0
UserType = 1000
__hash__ = None
| [
"siddharthnatamai@gmail.com"
] | siddharthnatamai@gmail.com |
c285438f7e8e453c14624038b1b409d0666a8568 | 33ff050337ba4575042032d9602bf84dcf81435e | /test/functional/test_framework/authproxy.py | 270128a77fe0e70ba6108139c410b737c1c6b010 | [
"MIT"
] | permissive | robinadaptor/chronon | 5256b33fbe797bbdeb9c9a3c2091f0592afe6614 | 630b3945824c1b1cd2ea67ca80835a9f669b9124 | refs/heads/master | 2020-07-11T06:27:01.758237 | 2019-12-17T20:53:48 | 2019-12-17T20:53:48 | 145,383,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,620 | py | # Copyright (c) 2011 Jeff Garzik
#
# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
#
# Copyright (c) 2007 Jan-Klaas Kollhof
#
# This file is part of jsonrpc.
#
# jsonrpc is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""HTTP proxy for opening RPC connection to chronond.
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
"""
import base64
import decimal
import http.client
import json
import logging
import socket
import time
import urllib.parse
HTTP_TIMEOUT = 300
USER_AGENT = "AuthServiceProxy/0.1"
log = logging.getLogger("BitcoinRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
try:
errmsg = '%(message)s (%(code)i)' % rpc_error
except (KeyError, TypeError):
errmsg = ''
super().__init__(errmsg)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy():
__id_count = 0
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
self.__service_url = service_url
self._service_name = service_name
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
self.__url = urllib.parse.urlparse(service_url)
port = 80 if self.__url.port is None else self.__url.port
user = None if self.__url.username is None else self.__url.username.encode('utf8')
passwd = None if self.__url.password is None else self.__url.password.encode('utf8')
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=timeout)
else:
self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self._service_name is not None:
name = "%s.%s" % (self._service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def _request(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except http.client.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
except (BrokenPipeError, ConnectionResetError):
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
# ConnectionResetError happens on FreeBSD with Python 3.4
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
def get_request(self, *args):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s" % (AuthServiceProxy.__id_count, self._service_name,
json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
return {'version': '1.1',
'method': self._service_name,
'params': args,
'id': AuthServiceProxy.__id_count}
def __call__(self, *args, **argsn):
postdata = json.dumps(self.get_request(*args), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
response = self._request('POST', self.__url.path, postdata.encode('utf-8'))
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
log.debug("--> " + postdata)
return self._request('POST', self.__url.path, postdata.encode('utf-8'))
def _get_response(self):
req_start_time = time.time()
try:
http_response = self.__conn.getresponse()
except socket.timeout as e:
raise JSONRPCException({
'code': -344,
'message': '%r RPC took longer than %f seconds. Consider '
'using larger timeout for calls that take '
'longer to return.' % (self._service_name,
self.__conn.timeout)})
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
content_type = http_response.getheader('Content-Type')
if content_type != 'application/json':
raise JSONRPCException({
'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
elapsed = time.time() - req_start_time
if "error" in response and response["error"] is None:
log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
else:
log.debug("<-- [%.6f] %s" % (elapsed, responsedata))
return response
def __truediv__(self, relative_uri):
return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn)
| [
"robin.adaptor@gmail.com"
] | robin.adaptor@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.