blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d98234efa40fb798d6faa30258566f5240281566 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/smallestWindow_20200707160434.py | 7409791c55ac7cf067acac42549d2f5fd582c808 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | def min(s,t):
no_of_chars = 256
count = 0
start = 0
start_index = -1
min_len = float('inf')
print(start,start_index,min_len)
# first check if the length of the string is less than the string of the given pattern
if len(t)> len(s):
return ""
else:
# store the occurrences of the characters of the given pat in a hash pat
hash_pat = [0] * no_of_chars
hash_str = [0] * no_of_chars
# here we create a array where we store the number of occurences of a char based on its ascii value
for i in range(len(t)):
hash_pat[ord(t[i])] +=1
print(hash_pat)
for j in range(len(s)):
hash_str[ord(t[j])] +=1
if hash_pat[ord(t[j])] <= hash_str[ord(s[j])] and hash_pat[ord(t[j]) !=0]:
count +=1
# when the count gets to the length of the pattern string then the window string contains the pattern
if count == len(t):
# here we'll try minimize the window --> how
# if the window contains repeating characters that are not in the pattern
# we ignore them
# also if a character is there and not available in the pattern please ignore it
while(hash_str[ord(s[start])] > hash_pat[ord(string[start])] or hash_pat[ord(s[start])] == 0:
# first substring ADOBEC
/
min("ADOBECODEBANC","ABC") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
9c6ef08a4f2ad7ea43a9ad6e3577602aba3521a9 | f3f76d228f7de399831f31d7d3ca8942e0c27d58 | /tutorial_coding_example.py | 1ac17fa1231e79284ce32ef860c65fa73bb9304f | [] | no_license | emylincon/line_encoding | 3cac93630d8416c0f513c172982b561c7fd7469a | 80a79b22501bac2371b6ad63ca6fe955608291f2 | refs/heads/master | 2021-01-04T11:54:42.411696 | 2020-02-15T21:40:13 | 2020-02-15T21:40:13 | 240,536,066 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,775 | py | import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(411)
ax2 = fig.add_subplot(412)
ax3 = fig.add_subplot(413)
ax4 = fig.add_subplot(414)
names = {1: "Unipolar NRZ", 2: "Unipolar RZ", 3: "Manchester coding", 4: "Differential Manchester coding"}
def line_convert(bits, no):
ax1.grid()
x1 = list(range(len(bits) + 1))
x = [0]
for i in x1[1:-1]:
x.append(i)
x.append(i)
x.append(x1[-1])
y = []
for i in bits:
y.append(int(i))
y.append(int(i))
# print(x,y)
ax1.plot(x, y, 'r-.o')
for i in range(len(bits)):
ax1.text(i + 0.5, 0.5, bits[i], rotation=0, size=20,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(0., 0., 0.), fc=(0.98, 0.96, 0.59), ))
ax1.set_title(names[no], fontdict={'weight': 'bold', 'size': 17})
def rz_line_convert(bits, no):
ax2.grid()
x1 = list(range(len(bits) * 2 + 1))
x = [0]
for i in x1[1:-1]:
x += [int(i), int(i)]
x.append(x1[-1])
y = []
for i in bits:
if int(i) == 1:
y += [1, 1, 0, 0]
elif int(i) == 0:
y += [0, 0, 0, 0]
ax2.plot(x, y, 'g-.^')
j = 0
for i in range(0, len(bits) * 2, 2):
ax2.text(i + 1, 0.5, bits[j], rotation=0, size=20,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(0., 0., 0.), fc=(0.98, 0.96, 0.59), ))
j += 1
ax2.set_title(names[no], fontdict={'weight': 'bold', 'size': 17})
def mc_line_convert(bits, no):
ax3.grid()
x1 = list(range(len(bits) * 2 + 1))
x = [0]
for i in x1[1:-1]:
x += [int(i), int(i)]
x.append(x1[-1])
y = []
for i in bits:
if int(i) == 1:
y += [1, 1, 0, 0]
elif int(i) == 0:
y += [0, 0, 1, 1]
ax3.plot(x, y, 'b-.s')
j = 0
for i in range(0, len(bits) * 2, 2):
ax3.text(i + 1, 0.5, bits[j], rotation=0, size=20,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(0., 0., 0.), fc=(0.98, 0.96, 0.59), ))
j += 1
ax3.set_title(names[no], fontdict={'weight': 'bold', 'size': 17})
def differential_manchester(bits, no):
inp1 = [int(i) for i in bits]
li, lock, pre = [], False, ''
for i in range(len(inp1)):
if inp1[i] == 0 and not lock:
li.append(-1)
li.append(-1)
li.append(1)
lock = True
pre = 'S'
elif inp1[i] == 1 and not lock:
li.append(1)
li.append(1)
li.append(-1)
lock = True
pre = 'Z'
else:
if inp1[i] == 0:
if pre == 'S':
li.append(-1)
li.append(1)
else:
li.append(1)
li.append(-1)
else:
if pre == 'Z':
pre = 'S'
li.append(-1)
li.append(1)
else:
pre = 'Z'
li.append(1)
li.append(-1)
j = 0
for i in range(0, len(bits) * 2, 2):
ax4.text(i + 1, 0.5, bits[j], rotation=0, size=20,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(0., 0., 0.), fc=(0.98, 0.96, 0.59), ))
j += 1
ax4.grid()
ax4.plot(li, color='red', drawstyle='steps-pre', marker='>')
ax4.set_title(names[no], fontdict={'weight': 'bold', 'size': 17})
def plot(bits):
line_convert(bits, 1)
mc_line_convert(bits, 3)
rz_line_convert(bits, 2)
differential_manchester(bits, 4)
plt.subplots_adjust(hspace=0.55)
plt.show()
if __name__ == '__main__':
plot(input('Enter the binary bits sequence: ').strip())
| [
"emylincon@gmail.com"
] | emylincon@gmail.com |
4730d785505d4b447c0411e7674925389bef38ed | b6c7f367306f8f3d9fad7551810c68b392a1b879 | /omoide/tests/unit/test_utils.py | 92cf7e3191f512d335d542af0351cc8503a66dc8 | [
"MIT"
] | permissive | TaXeH/Omoide | c96ef35b1394125fc98367e8a9ef1674718e7e9e | 8ccc9d47e802433bb2de21ff930e6630658cd5e3 | refs/heads/main | 2023-07-18T12:00:15.469707 | 2021-08-28T11:37:23 | 2021-08-28T11:37:23 | 400,773,814 | 0 | 0 | MIT | 2021-08-28T11:17:55 | 2021-08-28T11:17:55 | null | UTF-8 | Python | false | false | 2,579 | py | # -*- coding: utf-8 -*-
"""Tests.
"""
from omoide import utils
def test_byte_count_to_text_ru():
"""Must convert to readable size in russian."""
func = utils.byte_count_to_text
assert func(-2_000, language='RU') == '-2.0 КиБ'
assert func(-2_048, language='RU') == '-2.0 КиБ'
assert func(0, language='RU') == '0 Б'
assert func(27, language='RU') == '27 Б'
assert func(999, language='RU') == '999 Б'
assert func(1_000, language='RU') == '1000 Б'
assert func(1_023, language='RU') == '1023 Б'
assert func(1_024, language='RU') == '1.0 КиБ'
assert func(1_728, language='RU') == '1.7 КиБ'
assert func(110_592, language='RU') == '108.0 КиБ'
assert func(1_000_000, language='RU') == '976.6 КиБ'
assert func(7_077_888, language='RU') == '6.8 МиБ'
assert func(452_984_832, language='RU') == '432.0 МиБ'
assert func(1_000_000_000, language='RU') == '953.7 МиБ'
assert func(28_991_029_248, language='RU') == '27.0 ГиБ'
assert func(1_855_425_871_872, language='RU') == '1.7 ТиБ'
assert func(9_223_372_036_854_775_807, language='RU') == '8.0 ЭиБ'
def test_byte_count_to_text_en():
"""Must convert to readable size in english."""
func = utils.byte_count_to_text
assert func(-2_000, language='EN') == '-2.0 KiB'
assert func(-2_048, language='EN') == '-2.0 KiB'
assert func(0, language='EN') == '0 B'
assert func(27, language='EN') == '27 B'
assert func(999, language='EN') == '999 B'
assert func(1_000, language='EN') == '1000 B'
assert func(1_023, language='EN') == '1023 B'
assert func(1_024, language='EN') == '1.0 KiB'
assert func(1_728, language='EN') == '1.7 KiB'
assert func(110_592, language='EN') == '108.0 KiB'
assert func(1_000_000, language='EN') == '976.6 KiB'
assert func(7_077_888, language='EN') == '6.8 MiB'
assert func(452_984_832, language='EN') == '432.0 MiB'
assert func(1_000_000_000, language='EN') == '953.7 MiB'
assert func(28_991_029_248, language='EN') == '27.0 GiB'
assert func(1_855_425_871_872, language='EN') == '1.7 TiB'
assert func(9_223_372_036_854_775_807, language='EN') == '8.0 EiB'
def test_sep_digits():
"""Must separate digits on 1000s."""
func = utils.sep_digits
assert func('12345678') == '12345678'
assert func(12345678) == '12 345 678'
assert func(1234.5678) == '1 234.57'
assert func(1234.5678, precision=4) == '1 234.5678'
assert func(1234.0, precision=4) == '1 234.0000'
assert func(1234.0, precision=0) == '1 234'
| [
"nicord@yandex.ru"
] | nicord@yandex.ru |
56805d4ddf91239e2f1e5d9219a569abc53ef185 | 48deb00f51be78bd0e2eac898428a783b1fb95da | /ejemplos/funcion_palindromo.py | 0dcef6a2c971bfd2d1ed5d36358323128864c922 | [] | no_license | jkaalexkei/TrabajosEnPython | 435bc77982bc3b44f080e28732ff0c439a2c41cd | 764965ddc6736e94e61d7ab8419cc5e33a767a0b | refs/heads/master | 2023-06-04T21:35:56.522264 | 2021-06-22T02:04:51 | 2021-06-22T02:04:51 | 352,990,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py |
def palindromo(texto):
lista =[]
for i in texto:
lista.append(i)
print(lista)
lista_inversa = lista[::-1]#se invierte la lista
print(lista_inversa)
if lista == lista_inversa:
print("verdadero, si es palindromo")
else:
print("Falso, no es palindromo")
palindromo("alex")
| [
"jkaalexkei@gmail.com"
] | jkaalexkei@gmail.com |
3ac66d934734757dcd65a9fc45415ee138398ade | ec9431bc95f5b832876e16351967aef19b1395d7 | /ci-hpc/visualisation/www/plot/highcharts.py | 2f107515cadada74fee026073ba8c3fbe4d2c584 | [
"MIT"
] | permissive | elichad/ci-hpc | 743b828c1e410d6dc1d457121e9508ee2de6f420 | 62c45a2e12a515a7034edafc2f3139f820f8bfeb | refs/heads/master | 2020-03-26T06:09:45.508223 | 2018-08-10T09:30:25 | 2018-08-10T09:30:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,640 | py | #!/usr/bin/python3
# author: Jan Hybs
import collections
import copy
import pandas as pd
import numpy as np
import utils.dateutils as dateutils
from artifacts.db.mongo import Fields as db
from utils.datautils import ensure_iterable
from utils.logging import logger
from utils.timer import Timer
from visualisation.www.plot.highcharts_config import HighchartsConfig, HighchartsSeries
from visualisation.www.plot.highcharts_config import HighchartsChart as Chart
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def copy(self):
"""
:rtype: dotdict
"""
return self.copy_cls(dotdict)
def copy_cls(self, cls):
result = cls()
for k, v in self.items():
if isinstance(v, dict):
result[k] = v.copy()
else:
result[k] = copy.copy(v)
return result
def merge(dct, merge_dct):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: dict
"""
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
return dct
def _fillna(df):
return df.where(pd.notnull(df), None)
def _group_data(df, agg, x=db.GIT_DATETIME, y=db.DURATION, rename=None):
"""
:type rename: dict
"""
with Timer('highcharts: data group: agg', log=logger.debug):
result = df.groupby(x).aggregate(agg).reset_index()
if rename is False:
return result
if rename is None:
rename = dict(x=x, y=y)
dels = set()
for k, v in rename.items():
result[v] = result[k]
dels.add(k)
for k in dels:
del result[k]
return result
def _ci_area(df, ci=(+0.05, -0.05), shift=1):
result = list()
for c in ci:
d = df.copy()
d['y'] = df['y'] + df['y'] * c
ys = list(d['y'].values)
if shift and len(ys) > shift:
ys = ys[:shift] + ys[:-shift]
d['y'] = ys
d = _fillna(d)
result.append(d)
return result
def highcharts_frame_in_time(df, config, estimator=np.mean, title=None, color=None, args=None, add_std=True, add_errorbar=True, metric_name=None):
"""
:type config: visualisation.www.plot.cfg.project_config.ProjectConfig
:type args: argparse.Namespace
:type df: pd.DataFrame
"""
x = config.test_view.x_prop
y = config.test_view.y_prop
linetype = Chart.TYPE_SPLINE if config.test_view.smooth else Chart.TYPE_LINE
areatype = Chart.TYPE_AREA_SPLINE_RANGE if config.test_view.smooth else Chart.TYPE_AREA_RANGE
agg, renames = config.get_test_view_groupby()
agg.update({
y: [estimator, np.std],
})
renames.update({
x: 'x'
})
with Timer('highcharts: data group', log=logger.debug):
result = _group_data(
df, agg, x=x, rename=renames
)
commits, uuids = result['commit'], result['id']
mean, std = result[y]['mean'], result[y]['std']
stds = pd.DataFrame()
stds['x'] = result['x']
stds['low'] = mean - std
stds['high'] = mean + std
e5 = pd.DataFrame()
e5['x'] = result['x']
e5['low'] = mean - mean*0.025
e5['high'] = mean + mean*0.025
e10 = pd.DataFrame()
e10['x'] = result['x']
e10['low'] = mean - mean*0.05
e10['high'] = mean + mean*0.05
means = pd.DataFrame()
means['x'] = result['x']
means['y'] = mean
# obj.rangeSelector = dotdict(selected=1)
# obj.showInNavigator = True
obj = HighchartsConfig()
obj.title.text = title
obj.xAxis.title.text = config.test_view.x_prop
obj.yAxis.title.text = config.test_view.y_prop
obj.xAxis.type = 'category'
obj.add(HighchartsSeries(
type=linetype,
name='mean' if not metric_name else metric_name,
data=means,
commits=commits,
marker=dotdict(enabled=True),
uuids=uuids,
point=dotdict(events=dotdict()),
color=color,
allowPointSelect=True,
zIndex=1,
))
if add_std:
obj.add(HighchartsSeries(
type=areatype,
name='std',
data=stds,
commits=commits,
uuids=uuids,
color='rgba(0, 0, 0, 0.2)',
fillColor='rgba(0, 0, 0, 0.05)',
dashStyle='Dash',
)),
if add_errorbar:
obj.add(HighchartsSeries(
type='errorbar',
name='e5',
data=e5,
commits=commits,
uuids=uuids,
color='rgba(0, 0, 0, 0.3)',
# stemColor='#FF0000',
# whiskerColor='#FF0000',
lineWidth=0.5,
))
obj.add(HighchartsSeries(
type='errorbar',
name='e10',
data=e10,
commits=commits,
uuids=uuids,
color='rgba(0, 0, 0, 0.3)',
# stemColor='#FF0000',
# whiskerColor='#FF0000',
lineWidth=0.5,
))
return obj
def _rename (df, **kwargs):
"""
:rtype: pd.DataFrame
:type df: pd.DataFrame
"""
dels = set()
for k, v in kwargs.items():
if v is None:
del df[k]
else:
df[k] = df[v]
if k != v:
dels.add(v)
for d in dels:
del df[d]
return df
def highcharts_frame_bar(df, config):
"""
:type df: pd.DataFrame
:type config: visualisation.www.plot.cfg.project_config.ProjectConfig
"""
x = config.frame_view.x_prop
y = config.frame_view.y_prop
df = df.sort_values(by=y, ascending=False)
df = df[df[y] > 0.1]
# df[args.rename['name']] = df[args.rename['name']].apply(lambda x: '\n'.join(x.split('::')))
rename = {
'y': y,
'name': x,
'path': x,
}
rename.update(config.frame_view.groupby)
df = _rename(df, **rename)
obj = HighchartsConfig()
obj.tooltip.pointFormat = 'duration <b>{point.y:.2f}</b> sec'
obj.xAxis.title.text = 'frame'
obj.yAxis.title.text = 'duration [sec]'
obj.xAxis.type = 'category'
obj.legend.align = 'center'
obj.legend.verticalAlign = 'bottom'
obj.chart.zoomType = 'x'
obj.title.text = 'Frame breakdown'
# obj.xAxis.scrollbar = dotdict(enabled=True)
# obj.xAxis.min = 0
# obj.xAxis.max = 4
# obj.xAxis.tickLength = 0
# https://jsfiddle.net/gh/get/library/pure/highcharts/highcharts/tree/master/samples/stock/yaxis/inverted-bar-scrollbar
# del obj.yAxis
names = list(config.frame_view.groupby.keys())
for groupby_name, groupby_data in df.groupby(names):
title_dict = dict(zip(names, ensure_iterable(groupby_name)))
obj.add(HighchartsSeries(
type='bar',
name=', '.join('%s=<b>%s</b>' % (str(k), str(v)) for k, v in title_dict.items()),
data=groupby_data.to_dict('records'),
dataLabels=dotdict(
enabled=False,
format='{y:.2f} sec'
)
))
return obj
| [
"jan.hybs@tul.cz"
] | jan.hybs@tul.cz |
f9e9ea5cf0e8aaaf099dc0d00f881e4a8d78ebcc | 69033ac834a34f10df535f102197d3af05e5ee69 | /cmstack/codegen/dnnweavergen/dnnweaver2/tf_utils/__init__.py | fc38259c6600164e872c649c3406bfb281b5de31 | [
"Apache-2.0"
] | permissive | he-actlab/cdstack | 126c3699074bf6ef30f9f9246704069d27e9e614 | 38f605cfa299bf97b5875a19f9fd811a2671d56f | refs/heads/master | 2023-04-10T10:42:10.199207 | 2019-10-03T02:12:49 | 2019-10-03T02:12:49 | 354,713,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,823 | py | import tensorflow as tf
import time
from datetime import datetime
import math
from pynvml import *
import tensorpack
import os
from tqdm import tqdm, trange
import numpy as np
from dnnweaver2.tf_utils.dataset import ImageNetProducer
from dnnweaver2.tf_utils.helper import DataSpec
import logging
def time_tensorflow_run(session, target, info_string, input_shape, labels_shape, images, labels):
num_batches = 100
num_steps_burn_in = 10
b, c, h, w = input_shape
# np_images_shape = tuple(np_images_shape)
np_images = np.random.rand(b,h,w,c)
np_labels = np.random.randint(0,10,size=(b,))
total_duration = 0.0
total_duration_squared = 0.0
if not isinstance(target, list):
target = [target]
target_op = tf.group(*target)
for i in range(num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target_op, feed_dict={images:np_images, labels:np_labels})
duration = time.time() - start_time
if i > num_steps_burn_in:
if not i % 10:
print ('%s: step %d, duration = %.3f' %
(datetime.now(), i - num_steps_burn_in, duration))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / num_batches
vr = total_duration_squared / num_batches - mn * mn
sd = math.sqrt(vr)
print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
(datetime.now(), info_string, num_batches, mn, sd))
return mn, sd
def create_tf_graph(dataset, fq_graph, train):
graph_inputs = {}
g = tf.Graph()
print('Creating Tensorflow graph for {}'.format(fq_graph.name))
if 'qnn' in fq_graph.name.lower():
quantization_type = 'qnn'
elif 'dorefa' in fq_graph.name.lower():
quantization_type = 'dorefa'
elif 'wrpn' in fq_graph.name.lower():
quantization_type = 'wrpn'
else:
logging.errror('Unknown quantization type for network: {}'.format(fq_graph.name))
print('Gradient dtype: {}'.format(fq_graph.grad_dtype))
grad_dtype = fq_graph.grad_dtype
grad_bits = grad_dtype.bits
print('Gradient dtype bits: {}'.format(grad_bits))
nvmlInit()
gpu_handle = nvmlDeviceGetHandleByIndex(0)
gpu_name = nvmlDeviceGetName(gpu_handle)
def get_sparsity(x):
with g.name_scope('sparsity_op'):
with tf.device("/cpu:0"):
x_size = tf.cast(tf.size(x), tf.float32)
non_zero = tf.count_nonzero(x, dtype=tf.float32)
sparsity = 1. - (non_zero / x_size)
return sparsity
def quantize(x, k):
with tf.device("/gpu:0"):
n = float(2**k - 1)
with g.gradient_override_map({"Round": "Identity"}):
return tf.round(x * n) / n
try:
@tf.RegisterGradient("FGGrad_1bit")
def grad_fg_1(op, x):
with tf.device("/cpu:0"):
tf.summary.scalar('backprop-sparsity', get_sparsity(x))
with tf.device("/gpu:0"):
bitG = 1
rank = x.get_shape().ndims
assert rank is not None
maxx = tf.reduce_max(tf.abs(x), list(range(1, rank)), keep_dims=True)
x = x / maxx
n = float(2**bitG - 1)
x = x * 0.5 + 0.5 + tf.random_uniform(
tf.shape(x), minval=-0.5 / n, maxval=0.5 / n)
x = tf.clip_by_value(x, 0.0, 1.0)
x = quantize(x, bitG) - 0.5
return x * maxx * 2
@tf.RegisterGradient("FGGrad_2bit")
def grad_fg_2(op, x):
with tf.device("/cpu:0"):
tf.summary.scalar('backprop-sparsity', get_sparsity(x))
with tf.device("/gpu:0"):
bitG = 2
rank = x.get_shape().ndims
assert rank is not None
maxx = tf.reduce_max(tf.abs(x), list(range(1, rank)), keep_dims=True)
x = x / maxx
n = float(2**bitG - 1)
x = x * 0.5 + 0.5 + tf.random_uniform(
tf.shape(x), minval=-0.5 / n, maxval=0.5 / n)
x = tf.clip_by_value(x, 0.0, 1.0)
x = quantize(x, bitG) - 0.5
return x * maxx * 2
@tf.RegisterGradient("FGGrad_4bit")
def grad_fg_4(op, x):
with tf.device("/cpu:0"):
tf.summary.scalar('backprop-sparsity', get_sparsity(x))
bitG = 4
with tf.device("/gpu:0"):
rank = x.get_shape().ndims
assert rank is not None
maxx = tf.reduce_max(tf.abs(x), list(range(1, rank)), keep_dims=True)
x = x / maxx
n = float(2**bitG - 1)
x = x * 0.5 + 0.5 + tf.random_uniform(
tf.shape(x), minval=-0.5 / n, maxval=0.5 / n)
x = tf.clip_by_value(x, 0.0, 1.0)
x = quantize(x, bitG) - 0.5
return x * maxx * 2
@tf.RegisterGradient("FGGrad_8bit")
def grad_fg_8(op, x):
with tf.device("/cpu:0"):
tf.summary.scalar('backprop-sparsity', get_sparsity(x))
with tf.device("/gpu:0"):
bitG = 8
rank = x.get_shape().ndims
assert rank is not None
maxx = tf.reduce_max(tf.abs(x), list(range(1, rank)), keepdims=True)
x = x / maxx
n = float(2**bitG - 1)
x = x * 0.5 + 0.5 + tf.random_uniform(
tf.shape(x), minval=-0.5 / n, maxval=0.5 / n)
x = tf.clip_by_value(x, 0.0, 1.0)
x = quantize(x, bitG) - 0.5
return x * maxx * 2
@tf.RegisterGradient("FGGrad_16bit")
def grad_fg_16(op, x):
with tf.device("/cpu:0"):
tf.summary.scalar('backprop-sparsity', get_sparsity(x))
with tf.device("/gpu:0"):
bitG = 16
rank = x.get_shape().ndims
assert rank is not None
maxx = tf.reduce_max(tf.abs(x), list(range(1, rank)), keep_dims=True)
x = x / maxx
n = float(2**bitG - 1)
x = x * 0.5 + 0.5 + tf.random_uniform(
tf.shape(x), minval=-0.5 / n, maxval=0.5 / n)
x = tf.clip_by_value(x, 0.0, 1.0)
x = quantize(x, bitG) - 0.5
return x * maxx * 2
@tf.RegisterGradient("FGGrad_32bit")
def grad_fg_32(op, x):
with tf.device("/cpu:0"):
tf.summary.scalar('backprop-sparsity', get_sparsity(x))
return x
except:
pass
def dorefa_quantize_gradient(x, bitG):
with tf.device("/gpu:0"):
grad_name = 'FGGrad_{}bit'.format(bitG)
with g.gradient_override_map({"Identity": grad_name}):
return tf.identity(x)
def dorefa_quantize_weights(x, bitW):
with tf.device("/gpu:0"):
if bitW == 32:
return x
if bitW == 1: # BWN
with g.gradient_override_map({"Sign": "Identity"}):
E = tf.stop_gradient(tf.reduce_mean(tf.abs(x)))
return tf.sign(x / E) * E
x = tf.tanh(x)
x = x / tf.reduce_max(tf.abs(x)) * 0.5 + 0.5
return 2 * quantize(x, bitW) - 1
def wrpn_quantize_weights(x, bitW):
with tf.device("/gpu:0"):
cx = tf.clip_by_value(x, -1, 1)
return quantize(cx, bitW-1)
def dorefa_quantize_activations(x, bitA):
with tf.device("/gpu:0"):
if bitA == 32:
return x
return quantize(x, bitA)
def wrpn_quantize_activations(x, bitA):
with tf.device("/gpu:0"):
if bitA == 32:
return x
cx = tf.clip_by_value(x, 0, 1)
return quantize(cx, bitA)
def _get_weights(shape, name, bits):
w = tf.Variable(tf.random_normal(shape,
dtype=tf.float32,
stddev=1e-1
),
trainable=True,
name=name)
if quantization_type == 'qnn':
return dorefa_quantize_weights(w, bits)
elif quantization_type == 'dorefa':
return dorefa_quantize_weights(w, bits)
else:
return wrpn_quantize_weights(w, bits)
def _get_inputs(shape, name):
if 'data' in name:
print(name, shape)
n, c, h, w = shape
graph_inputs['inputs/data'] = tf.placeholder(tf.float32, shape=[n,h,w,c], name=name)
return tf.transpose(graph_inputs['inputs/data'], [0,3,1,2])
else:
print(name, shape)
batch, num_classes = shape[0], shape[1]
graph_inputs['inputs/labels'] = tf.placeholder(tf.int32, shape=[batch], name=name)
return tf.one_hot(graph_inputs['inputs/labels'], num_classes)
def _nonlin(x, bits):
if bits == 32:
return tf.nn.relu(x)
return tf.clip_by_value(x, 0., 1.)
def _activation(x, bits):
with tf.device("/gpu:0"):
with tf.name_scope('activation'):
if quantization_type == 'dorefa':
qa = dorefa_quantize_activations(_nonlin(x, bits), bits)
ret = dorefa_quantize_gradient(qa, grad_bits)
elif quantization_type == 'qnn':
qa = dorefa_quantize_activations(_nonlin(x, bits), bits)
ret = dorefa_quantize_gradient(qa, grad_bits)
else:
# act = tf.nn.relu(x)
qa = wrpn_quantize_activations(act, bits)
ret = dorefa_quantize_gradient(qa, 32)
return ret
def _conv(op):
with tf.name_scope(op.name):
strides = [1, 1, op.stride[-2], op.stride[-1]]
i = tf_tensor_registry[op.data.name]
with tf.device("/cpu:0"):
tf.summary.scalar('fwdprop-sparsity', get_sparsity(i))
with tf.device("/gpu:0"):
cout = op.weights.shape[-4]
cin = op.weights.shape[-3]
kh = op.weights.shape[-2]
kw = op.weights.shape[-1]
w = _get_weights([kh, kw, cin, cout],
name=op.weights.name,
bits=op.weights.dtype.bits
)
b = _get_weights([cout],
name=op.name + 'bias',
bits=op.weights.dtype.bits
)
pad = 'SAME' if op.pad[0] > 0 else 'VALID'
if i.shape[1] != cin:
i = tf.transpose(i, [0,3,1,2])
conv_out = tf.nn.conv2d(i, w, strides, pad, name=op.name, data_format='NCHW')
o = _activation(
tf.nn.bias_add(conv_out, b, data_format='NCHW'),
op.output_tensors.dtype.bits
)
tf_tensor_registry[op.output_tensors.name] = o
# print(op.output_tensors.name)
def _maxpool(op):
with tf.device("/gpu:0"):
with tf.name_scope(op.name):
strides = [1, 1, op.stride[-2], op.stride[-1]]
i = tf_tensor_registry[op.data.name]
pad = 'SAME' if op.pad[0] > 0 else 'VALID'
kernel = [1, 1, op.pooling_kernel[-2], op.pooling_kernel[-1]]
o = tf.nn.max_pool(i, kernel, strides, pad, data_format='NCHW')
tf_tensor_registry[op.output_tensors.name] = o
def _flatten(op):
with tf.device("/gpu:0"):
with tf.name_scope(op.name):
i = tf_tensor_registry[op.data.name]
o = tf.reshape(i, op.output_tensors.shape)
tf_tensor_registry[op.output_tensors.name] = o
def _matmul(op):
with tf.name_scope(op.name):
with tf.device("/cpu:0"):
w = _get_weights(op.weights.shape,
name=op.weights.name,
bits=op.weights.dtype.bits
)
b = tf.Variable(tf.constant(0.0, shape=[op.output_tensors.shape[-1]], dtype=tf.float32),
trainable=True, name='biases')
i = tf_tensor_registry[op.data.name]
tf.summary.scalar('fwdprop-sparsity', get_sparsity(i))
with tf.device("/gpu:0"):
o = _activation(
tf.matmul(i, w) + b,
op.output_tensors.dtype.bits
)
tf_tensor_registry[op.output_tensors.name] = o
def _xentropy(op):
with tf.device("/gpu:0"):
with tf.name_scope('X-entropy'):
logits = tf_tensor_registry[op.logits.name]
tf_tensor_registry['logits'] = logits
labels = tf_tensor_registry[op.labels.name]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=labels, name=op.output_tensors.name)
tf_tensor_registry['loss'] = cross_entropy
def _concat(op):
with tf.device("/gpu:0"):
with tf.name_scope(op.name):
assert len(op.data) > 1, op.data
input_tensors = [tf_tensor_registry[x.name] for x in op.data]
o = tf.concat(input_tensors, op.concat_dim, name=op.name)
tf_tensor_registry[op.output_tensors.name] = o
def _add(op):
with tf.device("/gpu:0"):
with tf.name_scope(op.name):
assert len(op.data) == 2, op.data
a, b = [tf_tensor_registry[x.name] for x in op.data]
o = a + b
tf_tensor_registry[op.output_tensors.name] = o
def _globalAvgPool(op):
with tf.device("/gpu:0"):
with tf.name_scope(op.name):
i = tf_tensor_registry[op.data.name]
n,c,h,w = op.data.shape
o = tf.reduce_mean(i, [2,3])
tf_tensor_registry[op.output_tensors.name] = o
with g.as_default():
tf_tensor_registry = {}
for tname, t in fq_graph.tensor_registry.iteritems():
if t.name.startswith('input'):
i = _get_inputs(t.shape, t.name)
tf_tensor_registry[tname] = i
for opname, op in fq_graph.op_registry.iteritems():
if op.__class__.__name__ == 'Convolution':
_conv(op)
elif op.__class__.__name__ == 'MaxPooling':
_maxpool(op)
elif op.__class__.__name__ == 'Flatten':
_flatten(op)
elif op.__class__.__name__ == 'MatMul':
_matmul(op)
elif op.__class__.__name__ == 'CrossEntropyLoss':
_xentropy(op)
elif op.__class__.__name__ == 'Concat':
_concat(op)
elif op.__class__.__name__ == 'Add':
_add(op)
elif op.__class__.__name__ == 'GlobalAvgPooling':
_globalAvgPool(op)
else:
name = op.__class__.__name__
assert 'Backprop' in name or 'Grad' in name, name
loss = tf_tensor_registry['loss']
if train:
with tf.device("/gpu:0"):
lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False)
global_step = tf.train.get_or_create_global_step(graph=tf.get_default_graph())
opt = tf.train.AdamOptimizer(lr, epsilon=1e-5)
train_op = opt.minimize(loss, global_step=global_step)
with tf.device("/cpu:0"):
tf.summary.scalar('learning_rate', lr)
else:
train_op = loss
graph_data = graph_inputs['inputs/data']
graph_labels = graph_inputs['inputs/labels']
graph_logits = tf_tensor_registry['logits']
print(graph_data, graph_labels, graph_logits)
return g, train_op, tf.summary.merge_all(), graph_data, graph_labels, graph_logits
class GPUPowerMonitor(object):
def __init__(self, id):
self.handle = nvmlDeviceGetHandleByIndex(id)
self.measure = False
self.measure_thread = None
self.mean_power = 0
self.variance_power = 0
def measure_power(self):
iter = 0
mn = 0
sd = 0
while self.measure:
p = nvmlDeviceGetPowerUsage(self.handle)
mn += p
sd += p * p
iter += 1
time.sleep(0.05)
mn /= iter
sd = sd/iter - mn * mn
sd = math.sqrt(sd)
self.mean_power = mn
self.variance_power = sd
def start(self):
self.measure = True
self.measure_thread = threading.Thread(target=self.measure_power, args=())
self.measure_thread.daemon = True
self.measure_thread.start()
def stop(self):
assert self.measure_thread is not None
self.measure = False
self.measure_thread.join()
return self.mean_power, self.variance_power
def get_tf_performance(dnnweaver2_graph, phase):
train = phase == 'forward+backward'
print(train, phase)
g, train_op, sparsity_op, data, labels, logits = create_tf_graph('random', dnnweaver2_graph, train)
pmon = GPUPowerMonitor(0)
input_shape = dnnweaver2_graph.tensor_registry['inputs/data'].shape
label_shape = dnnweaver2_graph.tensor_registry['inputs/labels'].shape
with g.as_default():
init = tf.global_variables_initializer()
sess = tf.Session('')
sess.run(init)
pmon.start()
time_mn, time_sd = time_tensorflow_run(sess, train_op, phase, input_shape, label_shape, data, labels)
switch = False
p = pmon.stop()
power_mn, power_sd = p
power_mn /= 1000.
power_sd /= 1000.
return time_mn, time_sd, power_mn, power_sd
def time_tensorflow_run_breakdown(session, target, info_string, input_shape, labels_shape, images, labels, writer):
num_batches = 10
num_steps_burn_in = 10
b, c, h, w = input_shape
np_images = np.random.rand(b,h,w,c)
np_labels = np.random.randint(0,10,size=(b,))
total_duration = 0.0
total_duration_squared = 0.0
if not isinstance(target, list):
target = [target]
target_op = tf.group(*target)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
print('Getting breakdown')
for i in range(num_batches + num_steps_burn_in):
print('iteration {}'.format(i))
if i > num_steps_burn_in:
start_time = time.time()
_ = session.run(target_op, options=run_options, run_metadata=run_metadata, feed_dict={images:np_images, labels:np_labels})
duration = time.time() - start_time
if not i % 10:
print ('%s: step %d, duration = %.3f' %
(datetime.now(), i - num_steps_burn_in, duration))
writer.add_run_metadata(run_metadata, 'step%d' % i)
total_duration += duration
total_duration_squared += duration * duration
else:
_ = session.run(target_op, feed_dict={images:np_images, labels:np_labels})
mn = total_duration / num_batches
vr = total_duration_squared / num_batches - mn * mn
sd = math.sqrt(vr)
print ('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %
(datetime.now(), info_string, num_batches, mn, sd))
return mn, sd
def get_tf_breakdown(dnnweaver2_graph):
g, train_op, sparsity_op, data, labels, logits = create_tf_graph('random', dnnweaver2_graph, train=True)
input_shape = dnnweaver2_graph.tensor_registry['inputs/data'].shape
label_shape = dnnweaver2_graph.tensor_registry['inputs/labels'].shape
log_path = '/home/hardik/workspace/tf-logs/breakdown/{}/summary'.format(dnnweaver2_graph.name)
save_path = '/home/hardik/workspace/tf-logs/breakdown/{}/ckpt'.format(dnnweaver2_graph.name)
save_file = os.path.join(save_path, 'model.ckpt')
with g.as_default():
init = tf.global_variables_initializer()
sess = tf.Session('')
writer = tf.summary.FileWriter(log_path, sess.graph)
sess.run(init)
time_mn, time_sd = time_tensorflow_run_breakdown(sess, train_op, 'forward+backward', input_shape, label_shape, data, labels, writer)
switch = False
return time_mn, time_sd, power_mn, power_sd
def print_sparsity(sess, train_op, sparsity_op):
for i in range(100):
sess.run(train_op)
sess.run(sparsity_op)
def get_tf_sparsity(dnnweaver2_graph, num_epochs=10):
print(dnnweaver2_graph.name)
g, train_op, sparsity_ops, data, labels, logits = create_tf_graph('alexnet', dnnweaver2_graph, train=True)
data_shape = dnnweaver2_graph.tensor_registry['inputs/data'].shape
batch_size = data_shape[0]
print(data, labels)
assert data is not None
assert labels is not None
with g.as_default():
log_path = '/home/hardik/workspace/tf-logs/train/{}/summary'.format(dnnweaver2_graph.name)
save_path = '/home/hardik/workspace/tf-logs/train/{}/ckpt'.format(dnnweaver2_graph.name)
save_file = os.path.join(save_path, 'model.ckpt')
saver = tf.train.Saver()
if os.path.isfile(os.path.join(save_path, 'checkpoint')):
print('Restoring model from path: {}'.format(save_path))
# tf.reset_default_graph()
sess = tf.Session('')
saver.restore(sess, save_file)
else:
print('No checkpoint found at path: {}'.format(save_path))
print('Initializing with random data')
init = tf.global_variables_initializer()
sess = tf.Session('')
sess.run(init)
train_writer = tf.summary.FileWriter(log_path, sess.graph)
data_spec = DataSpec(batch_size=batch_size, scale_size=256, crop_size=data_shape[-1], isotropic=False)
image_producer = ImageNetProducer(val_path='/imagenet-data/train/train-clean.txt', data_path='/imagenet-data/train/', data_spec=data_spec)
predictions = tf.cast(tf.argmax(tf.nn.softmax(logits),1), tf.int32)
correct_prediction = tf.equal(predictions, labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('train_accuracy', accuracy)
global_step = tf.train.get_or_create_global_step(graph=g)
merged = tf.summary.merge_all()
i=sess.run(global_step)
for e in range(num_epochs):
coordinator = tf.train.Coordinator()
threads = image_producer.start(session=sess, coordinator=coordinator)
print('Epoch: {}'.format(e))
for _labels, _images in tqdm(image_producer.batches(sess), total=image_producer.num_batches):
if i%100 == 0:
summary, _ = sess.run([merged, train_op], feed_dict={data: _images, labels: _labels})
train_writer.add_summary(summary, i)
save_path = saver.save(sess, save_file)
else:
_ = sess.run([train_op], feed_dict={data: _images, labels: _labels})
i += 1
if i > 1000:
break
coordinator.request_stop()
coordinator.join(threads, stop_grace_period_secs=2)
| [
"sean.kinzer@gmail.com"
] | sean.kinzer@gmail.com |
ae2ec3f90bade82ccddeed661b5e315c0a5f5a14 | 1fdad1e45625edf2fdaf26ca284054461839a2d4 | /rmzfzc/rmzfzc/spiders/jilin_zwgk.py | 0bdaaba3f5ff6751cf7f78211ea6d56d11d32b53 | [] | no_license | Mdxue/crawler-scrapy | b99a61f2fa0eff7a1194495314acdb7907018369 | ed4dcf5ffc15b60f9efa5d7e5093872343d2bbd0 | refs/heads/master | 2020-12-09T08:02:53.134473 | 2020-01-11T13:44:28 | 2020-01-11T13:44:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,292 | py | # -*- coding: utf-8 -*-
import scrapy
import logging
from scrapy_splash import SplashRequest
from rmzfzc.items import rmzfzcItem
import time
from utils.tools.attachment import get_attachments,get_times
script = """
function wait_for_element(splash, css, maxwait)
-- Wait until a selector matches an element
-- in the page. Return an error if waited more
-- than maxwait seconds.
if maxwait == nil then
maxwait = 10
end
return splash:wait_for_resume(string.format([[
function main(splash) {
var selector = '%s';
var maxwait = %s;
var end = Date.now() + maxwait*1000;
function check() {
if(document.querySelector(selector)) {
splash.resume('Element found');
} else if(Date.now() >= end) {
var err = 'Timeout waiting for element';
splash.error(err + " " + selector);
} else {
setTimeout(check, 200);
}
}
check();
}
]], css, maxwait))
end
function main(splash, args)
splash:go(args.url)
assert(splash:wait(0.1))
wait_for_element(splash, "#content > tr > td")
js = string.format("document.querySelector('#jump').value =%s", args.page)
splash:evaljs(js)
assert(splash:wait(0.1))
splash:runjs("document.querySelector('.tmptabel').innerHTML = ''")
assert(splash:wait(0.1))
splash:runjs("document.querySelector('.go-button').click()")
assert(splash:wait(0.1))
wait_for_element(splash, ".tmptabel > tbody > tr > td")
return splash:html()
end
"""
class GansuSpider(scrapy.Spider):
name = 'jilin_zwgk'
custom_settings = {
'CONCURRENT_REQUESTS': 10,
'CONCURRENT_REQUESTS_PER_DOMAIN': 10,
'CONCURRENT_REQUESTS_PER_IP': 0,
'DOWNLOAD_DELAY': 0.5,
'DOWNLOADER_MIDDLEWARES' : {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
},
'SPIDER_MIDDLEWARES' : {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
},
'DUPEFILTER_CLASS': 'scrapy_splash.SplashAwareDupeFilter',
'HTTPCACHE_STORAGE' : 'scrapy_splash.SplashAwareFSCacheStorage',
# 'SPIDER_MIDDLEWARES': {
# 'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
# },
# 'DOWNLOADER_MIDDLEWARES': {
# 'scrapy.downloadermiddleware.useragent.UserAgentMiddleware': None,
# 'utils.middlewares.MyUserAgentMiddleware.MyUserAgentMiddleware': 126,
# 'utils.middlewares.DeduplicateMiddleware.DeduplicateMiddleware': 130,
# },
'ITEM_PIPELINES': {
'utils.pipelines.MysqlTwistedPipeline.MysqlTwistedPipeline': 64,
'utils.pipelines.DuplicatesPipeline.DuplicatesPipeline': 100,
},
# 'DUPEFILTER_CLASS': 'scrapy_splash.SplashAwareDupeFilter',
# 'HTTPCACHE_STORAGE': 'scrapy_splash.SplashAwareFSCacheStorage',
'SPLASH_URL': "http://localhost:8050/"}
def __init__(self, pagenum=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_pagenum = pagenum
def start_requests(self):
try:
contents = [
{
'url': 'http://xxgk.jl.gov.cn/'
}
]
for content in contents:
yield SplashRequest(content['url'],
endpoint = 'execute',
args={
'lua_source': script,
'wait': 1,
'page': 2,
'url': content['url'],
},
callback=self.parse_page,
cb_kwargs=content)
except Exception as e:
logging.error(self.name + ": " + e.__str__())
logging.exception(e)
def parse_page(self, response, **kwargs):
page_count = int(self.parse_pagenum(response))
try:
for pagenum in range(page_count):
if pagenum > 1:
yield SplashRequest(kwargs['url'],
endpoint='execute',
args={
'lua_source': script,
'wait': 1,
'page': pagenum,
'url': kwargs['url']
},
callback=self.parse)
except Exception as e:
logging.error(self.name + ": " + e.__str__())
logging.exception(e)
def parse_pagenum(self, response):
try:
# 在解析页码的方法中判断是否增量爬取并设定爬取列表页数,如果运行
# 脚本时没有传入参数pagenum指定爬取前几页列表页,则全量爬取
if not self.add_pagenum:
return int(response.xpath('//*[@id="page-body"]/a[last()-1]/@data-page').extract_first()) + 1
return self.add_pagenum
except Exception as e:
logging.error(self.name + ": " + e.__str__())
logging.exception(e)
def parse(self, response):
for selector in response.xpath('//*[@class="zly_xxmu_20170104ulbg2"]'):
try:
item = {}
item['title'] = selector.xpath('./td[2]/div/a/text()').extract_first().strip()
item['time'] = selector.xpath('./td[5]/text()').extract_first().strip()
item['article_num'] = selector.xpath('./td[3]/text()').extract_first().strip()
href = selector.xpath('./td[2]/div/a/@href').extract_first()
yield scrapy.Request(href,callback=self.parse_item,dont_filter=True,cb_kwargs=item)
except Exception as e:
logging.error(self.name + ": " + e.__str__())
logging.exception(e)
def parse_item(self, response, **kwargs):
try:
item = rmzfzcItem()
appendix, appendix_name = get_attachments(response)
item['title'] = kwargs['title']
item['article_num'] = kwargs['article_num']
item['time'] = kwargs['time']
item['content'] = "".join(response.xpath('//div[@class="Custom_UnionStyle"]').extract())
item['source'] = ''
item['province'] = '吉林省'
item['city'] = ''
item['area'] = ''
item['website'] = '吉林省人民政府'
item['module_name'] = '吉林省人民政府-政务公开'
item['spider_name'] = 'jilin_zwgk'
item['txt'] = "".join(response.xpath('//div[@class="Custom_UnionStyle"]//text()').extract())
item['appendix_name'] = appendix_name
item['link'] = response.request.url
item['appendix'] = appendix
item['time'] = get_times(item['time'])
print(
"===========================>crawled one item" +
response.request.url)
except Exception as e:
logging.error(
self.name +
" in parse_item: url=" +
response.request.url +
", exception=" +
e.__str__())
logging.exception(e)
yield item
| [
"sn_baby@qq.com"
] | sn_baby@qq.com |
a42429003c82135a616d13a78e0327d1159b7f2f | 84350ae13162c3bb1978ab4340ebb95abca55d34 | /SampleSize/get_ns_sample_sizes.py | e4e48efead29bdd9cf1973e52e8cfb3520a37592 | [
"MIT"
] | permissive | poldracklab/ScanningTheHorizon | 5bd6b31bd7028f4724719c78cca3e89410d4f3e8 | 09b9f64eda5fc5c82e64679aa33d4c3c2685928b | refs/heads/master | 2021-06-14T08:24:59.989406 | 2021-02-10T14:25:27 | 2021-02-10T14:25:27 | 63,067,849 | 8 | 8 | null | 2016-12-10T23:50:12 | 2016-07-11T12:55:22 | Jupyter Notebook | UTF-8 | Python | false | false | 3,259 | py | # get_ns_sample_sizes.py - extract estimated sample size data from neurosynth
# Tal Yarkoni, 2016
import re
import traceback
def text2int(textnum, numwords={}):
if not numwords:
units = [
"zero", "one", "two", "three", "four", "five", "six", "seven",
"eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen",
"fifteen", "sixteen", "seventeen", "eighteen", "nineteen",
]
tens = ["", "", "twenty", "thirty", "forty",
"fifty", "sixty", "seventy", "eighty", "ninety"]
scales = ["hundred", "thousand", "million", "billion", "trillion"]
# numwords["and"] = (1, 0)
for idx, word in enumerate(units):
numwords[word] = (1, idx)
for idx, word in enumerate(tens):
numwords[word] = (1, idx * 10)
for idx, word in enumerate(scales):
numwords[word] = (
10 ** (idx * 3 or 2), 0)
ordinal_words = {'first': 1, 'second': 2, 'third': 3,
'fifth': 5, 'eighth': 8, 'ninth': 9, 'twelfth': 12}
ordinal_endings = [('ieth', 'y'), ('th', '')]
textnum = textnum.replace('-', ' ')
current = result = 0
for word in textnum.split():
if word in ordinal_words:
scale, increment = (1, ordinal_words[word])
else:
for ending, replacement in ordinal_endings:
if word.endswith(ending):
word = "%s%s" % (word[:-len(ending)], replacement)
if word not in numwords:
raise Exception("Illegal word: " + word)
scale, increment = numwords[word]
current = current * scale + increment
if scale > 100:
result += current
current = 0
return result + current
def estimate_n(text):
text = text.lower()
populations = [
'volunteers', 'subjects', 'individuals', 'participants', 'students',
'patients', 'outpatients', 'undergraduates', 'adults', 'control',
'people', 'stroke', 'children'
]
pops = '|'.join(populations)
patt = '([a-zA-Z0-9\-]+)\s+([^\s]+\s+)?(%s)' % pops
matches = re.findall(patt, text)
n = []
for m in matches:
try:
# print(m)
m0 = m[0]
if unicode(m0).isnumeric():
n_ = int(m0)
else:
n_ = text2int(m0)
n.append((re.sub('\s+', ' ', ' '.join(m)), n_))
except:
pass
more = re.findall('[\(\s]+n\s*=\s*(\d+)', text)
n.extend([('n = %d' % int(m), int(m)) for m in more])
return n
c = 0
outf = open('estimated_n.txt', 'w')
outf.write('PMID\tn\tcaptured_group\tabstract\n')
for text in open('abstracts.txt').read().splitlines():
pmid, text = text.split('\t')
res = estimate_n(text)
if res:
for i, r in enumerate(res):
line = '\t'.join([pmid, str(r[1]), str(r[0])])
if i == 0:
line += '\t' + text
# ns = ', '.join([str(r[1]) for r in res])
# matches = ', '.join(['"%s"' % str(r[0]) for r in res]).strip()
# line = '\t'.join([pmid, ns, matches, text])
outf.write(line + '\n')
c += 1
print("Found %d abstracts." % c)
| [
"poldrack@gmail.com"
] | poldrack@gmail.com |
7a4b19779b85a11de4e2f07e299d2bdc2245323b | b6284d558812f07251adfbcda389611028043a4c | /brands/migrations/0006_auto_20180604_0913.py | ffda114cfc851f64bdc47cc39cd8afb239c1100c | [] | no_license | markmurimi/shoe-blog | 980c63c184007cadd5f59fa546f7328a38bed54e | 669f260dfcdb196e47a37d9e12bf7a377f709362 | refs/heads/master | 2020-03-19T00:07:00.550739 | 2018-06-04T13:47:11 | 2018-06-04T13:47:11 | 135,454,230 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-04 06:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('brands', '0005_brand_profile'),
]
operations = [
migrations.AlterField(
model_name='brand',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='brands.Profile'),
),
]
| [
"murimimg180@gmail.com"
] | murimimg180@gmail.com |
6759042deacc240255f80367f88e6bf8224b325a | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-ecs/huaweicloudsdkecs/v2/model/reinstall_server_with_cloud_init_response.py | cfb9cc2e4bdefdc4bf87295e0a15170c6d533e4b | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,105 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ReinstallServerWithCloudInitResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'job_id': 'str'
}
attribute_map = {
'job_id': 'job_id'
}
def __init__(self, job_id=None):
"""ReinstallServerWithCloudInitResponse - a model defined in huaweicloud sdk"""
super(ReinstallServerWithCloudInitResponse, self).__init__()
self._job_id = None
self.discriminator = None
if job_id is not None:
self.job_id = job_id
@property
def job_id(self):
"""Gets the job_id of this ReinstallServerWithCloudInitResponse.
提交任务成功后返回的任务ID,用户可以使用该ID对任务执行情况进行查询。
:return: The job_id of this ReinstallServerWithCloudInitResponse.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this ReinstallServerWithCloudInitResponse.
提交任务成功后返回的任务ID,用户可以使用该ID对任务执行情况进行查询。
:param job_id: The job_id of this ReinstallServerWithCloudInitResponse.
:type: str
"""
self._job_id = job_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReinstallServerWithCloudInitResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
9f2c43f0d001e0f731da8d1a66178d1408dda476 | 384b9368ee0731ed62e48f38c40fc027f08a4b7f | /PycharmProjects/DeepLearning/MachineLearning/Mnist_layers.py | 29702c0870d337404ee60fb79d0cb8c60633bc17 | [] | no_license | secrecy27/MachineLearning | 2ff2d438a91d5b906c61b388deeebeb774f933d3 | f88e6477ec6e4087a347beb168e36b7fe0ea8359 | refs/heads/master | 2021-05-10T07:44:45.703761 | 2019-03-04T02:11:27 | 2019-03-04T02:11:27 | 118,853,812 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,876 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(777)
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
learning_rate = 0.001
training_epochs = 15
batch_size = 100
class Model:
def __init__(self, sess, name):
self.sess = sess
self.name = name
self.__build_network()
def __build_network(self):
with tf.variable_scope(self.name):
# train 여부 test : false / train :true
self.training = tf.placeholder(tf.bool)
self.X = tf.placeholder(tf.float32, shape=[None, 784])
X_img = tf.reshape(self.X, shape=[-1, 28, 28, 1])
self.Y = tf.placeholder(tf.float32, shape=[None, 10])
# ---------------------------
conv1 = tf.layers.conv2d(inputs=X_img, filters=32, kernel_size=[3, 3],
padding="SAME", activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2],
padding="SAME", strides=2)
dropout1 = tf.layers.dropout(inputs=pool1, rate=0.7, training=self.training)
# ---------------------------
conv2 = tf.layers.conv2d(inputs=dropout1, filters=64, kernel_size=[3, 3],
padding="SAME", activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2],
padding="SAME", strides=2)
dropout2 = tf.layers.dropout(inputs=pool2, rate=0.7, training=self.training)
# ---------------------------
conv3 = tf.layers.conv2d(inputs=dropout2, filters=128, kernel_size=[3, 3],
padding="SAME", activation=tf.nn.relu)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2],
padding="SAME", strides=2)
dropout3 = tf.layers.dropout(inputs=pool3, rate=0.7, training=self.training)
# ---------------------------
X_flat = tf.reshape(dropout3, [-1, 128 * 4 * 4])
dense4 = tf.layers.dense(inputs=X_flat, units=625, activation=tf.nn.relu)
dropout4 = tf.layers.dropout(inputs=dense4, rate=0.5, training=self.training)
self.logits = tf.layers.dense(inputs=dropout4, units=10)
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y))
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def predict(self, x, training=False):
return self.sess.run(self.logits, feed_dict={self.X: x, self.training: training})
def get_accuracy(self, x, y, training=False):
return self.sess.run(self.accuracy,
feed_dict={self.X: x, self.Y: y, self.training: training})
def train(self, x_data, y_data, training=True):
return self.sess.run([self.cost, self.optimizer],
feed_dict={self.X: x_data, self.Y: y_data, self.training: training})
sess = tf.Session()
main = Model(sess, "main")
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
cost, _ = main.train(batch_xs, batch_ys)
avg_cost += cost / total_batch
print("Epoch : ", "%04d" % (epoch + 1), "cost = ", "{:.9f}".format(avg_cost))
print("Accuracy : ", main.get_accuracy(mnist.test.images, mnist.test.labels))
| [
"secrecy418@naver.com"
] | secrecy418@naver.com |
65b9d19d5490262f91494a7fbd37661c397b0a1e | 6f3647ede6498a09dd2556130aebe30c9f80db43 | /백트래킹/2580.py | 6e6d1a09d26aca6b9b285868286ec5a891ad63b6 | [] | no_license | dohee479/BAEKJOON | 727a4eb1ce15879017e8edc1662b623babfa3801 | 77ed46da7e0de17d7f3ec9faf5b1bb1efa7b9d6b | refs/heads/master | 2023-03-20T01:30:22.997774 | 2021-03-02T14:54:38 | 2021-03-02T14:54:38 | 275,624,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | # 스도쿠
import sys
sys.stdin = open('input.txt', 'r')
# 가로 체크
def horizontal(x, val):
if val in sudoku[x]:
return False
return True
# 세로 체크
def vertical(y, val):
for i in range(9):
if val == sudoku[i][y]:
return False
return True
# 3x3 체크
def square(x, y, val):
nx = x//3 * 3
ny = y//3 * 3
for i in range(3):
for j in range(3):
if val == sudoku[nx+i][ny+j]:
return False
return True
def backtrack(index):
if index == len(zeros):
for row in sudoku:
for n in row:
print(n, end=" ")
print()
sys.exit(0)
else:
for i in range(1, 10):
nx = zeros[index][0]
ny = zeros[index][1]
# 세로, 가로, 3x3에 내가 대체하려고하는 숫자가 존재하는지 확인
if horizontal(nx, i) and vertical(ny, i) and square(nx, ny, i):
sudoku[nx][ny] = i
backtrack(index+1)
sudoku[nx][ny] = 0
sudoku = [list(map(int, sys.stdin.readline().rstrip().split())) for _ in range(9)]
zeros = [(i, j) for i in range(9) for j in range(9) if sudoku[i][j] == 0]
backtrack(0)
| [
"dohee479@naver.com"
] | dohee479@naver.com |
625838e78bffab40389132c5f4db754939aa0280 | 67a95330832e8bb83d65b22c6a58badaf416c043 | /01_examples/my_close.py | 262b66cd95fb7ba89ec9cb991ac7dea5fd98c4f0 | [
"MIT"
] | permissive | jabbalaci/GUI-Dev-with-Python | af57fdccd59cd48cd2172988fdaa4074d4ac8c09 | a4ac137965156621f3cf418018ef28f2c20541d9 | refs/heads/master | 2020-09-07T04:41:18.920856 | 2019-11-09T15:56:20 | 2019-11-09T15:56:20 | 220,657,859 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | #!/usr/bin/env python3
import sys
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QApplication, QMainWindow, QAction, QShortcut
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.shortcutQuit = QShortcut(QKeySequence("Ctrl+Q"), self)
self.shortcutQuit.activated.connect(self.close)
self.InitWindow()
def closeEvent(self, event):
print("# doing some cleanup...")
def InitWindow(self):
self.mainMenu = self.menuBar()
fileMenu = self.mainMenu.addMenu("&File")
quitItem = QAction("Quit", self)
quitItem.setShortcut("Q")
quitItem.triggered.connect(self.close)
fileMenu.addAction(quitItem)
if __name__ == "__main__":
App = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(App.exec()) | [
"jabba.laci@gmail.com"
] | jabba.laci@gmail.com |
88f4d105da6bb88b15b7a37352e9b8f00e417880 | e1de075ee032cf0cebc3e467aa222e35e6e370e9 | /groups/migrations/0004_group_image.py | a09be9649caa8921f8835f234a2c8fe9a5de4a99 | [] | no_license | benaka-tech/scientract | b6c5d2f6ddc483d4023531b9904590f1fa24f35f | 9d89e80778ac1a44e5def4832fc2311a99d77f89 | refs/heads/master | 2022-11-21T12:32:12.217925 | 2020-10-18T05:16:25 | 2020-10-18T05:16:25 | 230,889,764 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # Generated by Django 2.1 on 2019-07-17 04:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups', '0003_auto_20190706_1030'),
]
operations = [
migrations.AddField(
model_name='group',
name='image',
field=models.ImageField(default='default.jpg', upload_to='group_pics'),
),
]
| [
"cjayanth35@gmail.com"
] | cjayanth35@gmail.com |
5532e746db3d8d199fa70f0b181057bac3d9c0d6 | d9d8b097197baeaadafdacccd86b520773c32d85 | /erifying an Alien Dictionary.py | 26271222349b60aacedd2074ccdf9257cdacb9cc | [] | no_license | RiddhiRex/Leetcode | 5e2f382182c735c6fc721120c6f32726d4cb7414 | eeaa632e4d2b103c79925e823a05072a7264460e | refs/heads/master | 2021-12-23T23:57:31.574066 | 2021-08-14T22:00:23 | 2021-08-14T22:00:23 | 136,531,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | class Solution(object):
def isAlienSorted(self, words, order):
"""
:type words: List[str]
:type order: str
:rtype: bool
"""
l = len(words)
o = {w: i for i,w in enumerate(order)}
for i in range(len(words)-1):
w1= words[i]
w2= words[1+i]
for i in range(min(len(w1), len(w2))):
if w1[i]!=w2[i]:
if o[w1[i]]>o[w2[i]]:
return False
else:
if o[w1[i]]<o[w2[i]]:
break
if w2==w1[0:len(w2)] and len(w1)>len(w2):
return False
return True
| [
"noreply@github.com"
] | RiddhiRex.noreply@github.com |
2dcadf604ebbbfe0a8359400a37f53b2daecfeea | c88d702ef4b64ae90788f5bff007f6c8bc901cd3 | /the-versatile-sieve-of-eratosthenes/eratosthenessievestest.py | d133112a611a16a6552f5bbe0ba76592edd43c03 | [] | no_license | ehem/Nayuki-web-published-code | 566d3cc06d3d94405fa797098914884c8f8164fd | 22003ffb91fd6f63e206dd15b870065f664976f8 | refs/heads/master | 2020-07-30T00:47:27.107344 | 2016-11-18T21:44:29 | 2016-11-18T21:44:29 | 73,651,938 | 1 | 0 | null | 2016-11-14T00:19:06 | 2016-11-14T00:19:06 | null | UTF-8 | Python | false | false | 1,690 | py | #
# Test of variants of the sieve of Eratosthenes (Python)
# by Project Nayuki, 2016. Public domain.
# https://www.nayuki.io/page/the-versatile-sieve-of-eratosthenes
#
import eratosthenessieves, sys
def main():
test_values()
test_prefix_consistency()
def test_values():
assert eratosthenessieves.sieve_primeness(30) == [False, False, True, True, False, True, False, True, False, False, False, True, False, True, False, False, False, True, False, True, False, False, False, True, False, False, False, False, False, True, False]
assert eratosthenessieves.sieve_smallest_prime_factor(30) == [0, 1, 2, 3, 2, 5, 2, 7, 2, 3, 2, 11, 2, 13, 2, 3, 2, 17, 2, 19, 2, 3, 2, 23, 2, 5, 2, 3, 2, 29, 2]
assert eratosthenessieves.sieve_totient(30) == [0, 1, 1, 2, 2, 4, 2, 6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16, 6, 18, 8, 12, 10, 22, 8, 20, 12, 18, 12, 28, 8]
assert eratosthenessieves.sieve_omega(30) == [0, 0, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 2, 1, 2, 1, 2, 1, 2, 1, 3]
assert eratosthenessieves.sieve_radical(30) == [0, 1, 2, 3, 2, 5, 6, 7, 2, 3, 10, 11, 6, 13, 14, 15, 2, 17, 6, 19, 10, 21, 22, 23, 6, 5, 26, 3, 14, 29, 30]
def test_prefix_consistency():
N = 3000
FUNCS = [
eratosthenessieves.sieve_primeness,
eratosthenessieves.sieve_smallest_prime_factor,
eratosthenessieves.sieve_totient,
eratosthenessieves.sieve_omega,
eratosthenessieves.sieve_radical,
]
for func in FUNCS:
prev = []
for i in range(N):
cur = func(i)
assert len(cur) == len(prev) + 1
assert cur[ : -1] == prev
prev = cur
if __name__ == "__main__":
try:
assert False
sys.exit("Error: Need to run with assertions enabled")
except AssertionError:
main()
| [
"me@nayuki.io"
] | me@nayuki.io |
b4de99c9d8766608c026cc0929e60804e84684cd | e68a40e90c782edae9d8f89b827038cdc69933c4 | /res/scripts/client/gui/scaleform/daapi/view/battle/shared/markers2d/manager.py | 9cd0b7f91da659339fae8c59c961cfa51d0951ee | [] | no_license | webiumsk/WOT-0.9.16 | 2486f8b632206b992232b59d1a50c770c137ad7d | 71813222818d33e73e414e66daa743bd7701492e | refs/heads/master | 2021-01-10T23:12:33.539240 | 2016-10-11T21:00:57 | 2016-10-11T21:00:57 | 70,634,922 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 8,714 | py | # 2016.10.11 22:10:44 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/shared/markers2d/manager.py
import weakref
import GUI
from account_helpers.settings_core import g_settingsCore
import constants
from debug_utils import LOG_CURRENT_EXCEPTION
from gui import DEPTH_OF_VehicleMarker, GUI_SETTINGS, g_guiResetters
from gui.Scaleform import SCALEFORM_SWF_PATH_V3
from gui.Scaleform.Flash import Flash
from gui.Scaleform.daapi.view.meta.VehicleMarkersManagerMeta import VehicleMarkersManagerMeta
from gui.Scaleform.framework.application import DAAPIRootBridge
from gui.Scaleform.daapi.view.battle.shared.markers2d import settings
from gui.Scaleform.daapi.view.battle.shared.markers2d import plugins
from gui.battle_control import g_sessionProvider
from gui.doc_loaders import GuiColorsLoader
from gui.shared.events import GameEvent
class MarkersManager(Flash, VehicleMarkersManagerMeta, plugins.IMarkersManager):
def __init__(self, parentUI):
super(MarkersManager, self).__init__(settings.MARKERS_MANAGER_SWF, path=SCALEFORM_SWF_PATH_V3)
self.component.wg_inputKeyMode = 2
self.component.position.z = DEPTH_OF_VehicleMarker
self.component.drawWithRestrictedViewPort = False
self.movie.backgroundAlpha = 0
self.__plugins = None
self.__canvas = None
self.__parentUI = parentUI
self.__daapiBridge = DAAPIRootBridge('root.vehicleMarkersCanvas', 'registerMarkersManager')
self.__daapiBridge.setPyScript(weakref.proxy(self))
return
def close(self):
if self.__daapiBridge is not None:
self.__daapiBridge.clear()
self.__daapiBridge = None
super(MarkersManager, self).close()
return
def setScaleProps(self, minScale = 40, maxScale = 100, defScale = 100, speed = 3.0):
if constants.IS_DEVELOPMENT:
self.__canvas.scaleProperties = (minScale,
maxScale,
defScale,
speed)
def setAlphaProps(self, minAlpha = 40, maxAlpha = 100, defAlpha = 100, speed = 3.0):
if constants.IS_DEVELOPMENT:
self.__canvas.alphaProperties = (minAlpha,
maxAlpha,
defAlpha,
speed)
def createMarker(self, mProv, symbol, active = True):
return self.__canvas.addMarker(mProv, symbol, active)
def setMarkerActive(self, handle, active):
self.__canvas.markerSetActive(handle, active)
def setMarkerMatrix(self, handle, matrix):
self.__canvas.markerSetMatrix(handle, matrix)
def destroyMarker(self, handle):
if self.__canvas:
self.__canvas.delMarker(handle)
def invokeMarker(self, handle, function, args = None):
if handle == -1:
return
else:
if args is None:
args = []
self.__canvas.markerInvoke(handle, (function, args))
return
def getCanvasProxy(self):
return self.__canvasProxy
def _populate(self):
super(MarkersManager, self)._populate()
self.__addCanvas()
self.__setMarkersScale()
self.__setMarkerDuration()
self.__setMarkerSettings()
self.__setColorsSchemes(g_settingsCore.getSetting('isColorBlind'))
self.__addListeners()
def _dispose(self):
self.__removeListeners()
self.__removeCanvas()
self.__parentUI = None
super(MarkersManager, self)._dispose()
return
def __addCanvas(self):
if g_sessionProvider.arenaVisitor.hasFlags():
self.__canvas = GUI.WGVehicleFalloutMarkersCanvasFlashAS3(self.movie)
else:
self.__canvas = GUI.WGVehicleMarkersCanvasFlashAS3(self.movie)
self.__canvas.wg_inputKeyMode = 2
self.__canvas.scaleProperties = GUI_SETTINGS.markerScaleSettings
self.__canvas.alphaProperties = GUI_SETTINGS.markerBgSettings
self.__canvasProxy = weakref.ref(self.__canvas)
self.component.addChild(self.__canvas, 'vehicleMarkersCanvas')
def __removeCanvas(self):
if self.__canvas is not None:
self.component.delChild(self.__canvas)
self.__canvas = None
self.__canvasProxy = None
return
def __setMarkersScale(self, scale = None):
if scale is None:
scale = g_settingsCore.interfaceScale.get()
stage = self.movie.stage
stage.scaleX = scale
stage.scaleY = scale
return
def __setMarkerDuration(self):
self.as_setMarkerDurationS(GUI_SETTINGS.markerHitSplashDuration)
def __setMarkerSettings(self, update = False):
getter = g_settingsCore.getSetting
self.as_setMarkerSettingsS({'enemy': getter('enemy'),
'dead': getter('dead'),
'ally': getter('ally')})
if update:
self.as_updateMarkersSettingsS()
def __setColorsSchemes(self, isColorBlind):
colors = GuiColorsLoader.load()
defaultSchemes = {}
for name in colors.schemasNames():
if not name.startswith(settings.MARKERS_COLOR_SCHEME_PREFIX):
continue
defaultSchemes[name] = colors.getSubSchemeToFlash(name, GuiColorsLoader.DEFAULT_SUB_SCHEME)
colorBlindSchemes = {}
for name in colors.schemasNames():
if not name.startswith(settings.MARKERS_COLOR_SCHEME_PREFIX):
continue
colorBlindSchemes[name] = colors.getSubSchemeToFlash(name, GuiColorsLoader.COLOR_BLIND_SUB_SCHEME)
self.as_setColorSchemesS(defaultSchemes, colorBlindSchemes)
self.as_setColorBlindS(isColorBlind)
def __addListeners(self):
self.__plugins = plugins.createPlugins(self)
self.__plugins.init()
self.__plugins.start()
g_settingsCore.interfaceScale.onScaleChanged += self.__setMarkersScale
self.addListener(GameEvent.SHOW_EXTENDED_INFO, self.__handleShowExtendedInfo, scope=settings.SCOPE)
self.addListener(GameEvent.GUI_VISIBILITY, self.__handleGUIVisibility, scope=settings.SCOPE)
self.addListener(GameEvent.MARKERS_2D_VISIBILITY, self.__handleMarkerVisibility, scope=settings.SCOPE)
g_settingsCore.onSettingsChanged += self.__onSettingsChanged
g_guiResetters.add(self.__onRecreateDevice)
def __removeListeners(self):
self.removeListener(GameEvent.SHOW_EXTENDED_INFO, self.__handleShowExtendedInfo, scope=settings.SCOPE)
self.removeListener(GameEvent.GUI_VISIBILITY, self.__handleGUIVisibility, scope=settings.SCOPE)
self.removeListener(GameEvent.MARKERS_2D_VISIBILITY, self.__handleMarkerVisibility, scope=settings.SCOPE)
if self.__plugins is not None:
self.__plugins.stop()
self.__plugins.fini()
g_settingsCore.interfaceScale.onScaleChanged -= self.__setMarkersScale
g_settingsCore.onSettingsChanged -= self.__onSettingsChanged
g_guiResetters.discard(self.__onRecreateDevice)
return
def __handleShowExtendedInfo(self, event):
"""Show extended vehicle information, when player press [Left Alt] key.
Vehicle marker consists:
- vehicle type (permanent);
- nickname (extended);
- health bar (extended);
- vehicle name (extended);
- vehicle level (extended and configure in settings);
- vehicle icon (extended and configure in settings).
"""
if self.__parentUI is None or not self.__parentUI.isModalViewShown():
self.as_setShowExInfoFlagS(event.ctx['isDown'])
return
def __handleGUIVisibility(self, event):
self.component.visible = event.ctx['visible']
def __handleMarkerVisibility(self, _):
"""Special event toggles markers visibility only by key sequence CAPS + N (by default)
and no any UI visible."""
self.component.visible = not self.component.visible
def __onSettingsChanged(self, diff):
"""Listener for event g_settingsCore.onSettingsChanged.
:param diff: dictionary containing changes in settings."""
if 'isColorBlind' in diff:
self.as_setColorBlindS(diff['isColorBlind'])
if 'enemy' in diff or 'dead' in diff or 'ally' in diff:
self.__setMarkerSettings(True)
def __onRecreateDevice(self):
"""Listener for event personality.onRecreateDevice."""
self.__setMarkersScale()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\battle\shared\markers2d\manager.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.10.11 22:10:44 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
ce0f1fd01e0c92f3db50f007aee37f83ec422233 | 1afa1b1929d1cd463cd9970174dd58ce2ca6eb1e | /configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py | 35486efd42f3d22eaed88e076420094ab7358379 | [
"Apache-2.0"
] | permissive | CAU-HE/CMCDNet | 2328594bf4b883384c691099c72e119b65909121 | 31e660f81f3b625916a4c4d60cd606dcc8717f81 | refs/heads/main | 2023-08-08T17:21:57.199728 | 2023-07-28T07:34:40 | 2023-07-28T07:34:40 | 589,927,845 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | _base_ = './dnl_r50-d8_512x1024_40k_cityscapes.py'
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
| [
"flyhxn@qq.com"
] | flyhxn@qq.com |
5bf6f1a2a9a6ec4d4c56d13b543f635813c9541a | 8bc15212b7c3f097fd2335094f16344dfd5cb530 | /icbc_test_to_anki/__init__.py | 57fd2fcb2b91b6e264b61cec2ec6eec19fb45616 | [
"MIT"
] | permissive | genzj/icbc_test_to_anki | c6888db81f1fb2e282446815006f52fb647bb711 | 148c29cd9513d666afe4b41d2aa4abcbcb5e8220 | refs/heads/main | 2022-12-20T09:26:54.339921 | 2020-10-18T22:19:18 | 2020-10-18T22:19:18 | 305,209,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | """Top-level package for ICBC Test to Anki."""
__author__ = """genzj"""
__email__ = 'zj0512@gmail.com'
__version__ = '0.1.0'
| [
"zj0512@gmail.com"
] | zj0512@gmail.com |
d83ecf1d144629c8d144fbf6023add38a6bfd419 | 106983cf0b8df622f514ecff2bb2fa4c794c9dac | /Misc/Raspberry Pi Things/SimpleCV/sportsballs.py | 49c60bf7ccc25b67065b125f48b3a7fd61125480 | [] | no_license | michael5486/Senior-Design | 2d9ae521c637abf7c0825f85b32752ad61c62744 | 6b6c78bed5f20582a9753a9c10020c709d6b6e53 | refs/heads/master | 2021-01-19T09:58:35.378164 | 2017-05-26T17:17:13 | 2017-05-26T17:17:13 | 67,556,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | from SimpleCV import Color, Image
import time
img = Image("sportsballs.jpg")
circles = img.findCircle(canny=200, thresh=250, distance=15)
circles.sortArea()
circles.draw(width=4)
circles[0].draw(color=Color.RED, width = 4)
img_with_circles = img.applyLayers()
edges_in_image = img.edges(t2=200)
final = img.sideBySide(edges_in_image.sideBySide(img_with_circles)).scale(0.5)
final.show()
time.sleep(15) | [
"michael5486@gmail.com"
] | michael5486@gmail.com |
0356ceefabfd8f1609a9c3d6d709691476e619e4 | 7506c49859870af9e62c3e919857ffcdf2e9a19e | /book2/tf_test/keras_inception2.py | b85281630d569a8b110336815cd45ff6cc371f84 | [] | no_license | Git2191866109/BookStudy | d363717285a5e9767e582f6efd1258680fa26f80 | f172244218871372ca94286c3db64cf334627ef3 | refs/heads/master | 2022-11-08T00:15:00.963332 | 2020-06-28T10:28:33 | 2020-06-28T10:28:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,550 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# coding=utf-8
"""
@author: Li Tian
@contact: 694317828@qq.com
@software: pycharm
@file: keras_inception2.py
@time: 2019/5/6 15:43
@desc: 用原生态的Keras实现Inception
"""
from keras.layers import Conv2D, MaxPooling2D, Input, Dense, Flatten
import keras
from keras.models import Model
from keras.datasets import mnist
from keras import backend as K
# 使用前面介绍的类似方法生成trainX、trainY、testX、testY,唯一的不同是这里只用了
# 全连接层,所以不需要将输入整理成三维矩阵。
num_calsses = 10
img_rows, img_cols = 28, 28
# 通过Keras封装好的API加载MNIST数据。其中trainX就是一个60000x28x28的数组,
# trainY是每一张图片对应的数字。
(trainX, trainY), (testX, testY) = mnist.load_data()
if K.image_data_format() == 'channels_first':
trainX = trainX.reshape(trainX.shape[0], 1, img_rows, img_cols)
testX = testX.reshape(trainX.shape[0], 1, img_rows, img_cols)
# 因为MNIST中的图片是黑白的,所以第一维的取值为1
input_shape = (1, img_rows, img_cols)
else:
trainX = trainX.reshape(trainX.shape[0], img_rows, img_cols, 1)
testX = testX.reshape(testX.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# 将图像像素转化为0到1之间的实数。
trainX = trainX.astype('float32')
testX = testX.astype('float32')
trainX /= 255.0
testX /= 255.0
# 将标准答案转化为需要的格式(One-hot编码)。
trainY = keras.utils.to_categorical(trainY, num_calsses)
testY = keras.utils.to_categorical(testY, num_calsses)
# 定义输入图像尺寸
input_img = Input(shape=(28, 28, 1))
# 定义第一个分支。
tower_1 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
tower_1 = Conv2D(64, (3, 3), padding='same', activation='relu')(tower_1)
# 定义第二个分支。与顺序模型不同,第二个分支的输入使用的是input_img,而不是第一个分支的输出。
tower_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
tower_2 = Conv2D(64, (5, 5), padding='same', activation='relu')(tower_2)
# 定义第三个分支。类似地,第三个分支的输入也是input_img。
tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(input_img)
tower_3 = Conv2D(64, (1, 1), padding='same', activation='relu')(tower_3)
# 将三个分支通过concatenate的方式拼凑在一起。
output = keras.layers.concatenate([tower_1, tower_2, tower_3], axis=1)
# 将卷积层的输出拉直后作为下面全连接的输入。
tower_4 = Flatten()(output)
# 全连接层,有500个节点。
tower_5 = Dense(500, activation='relu')(tower_4)
# 全连接层,得到最后的输出。
predictions = Dense(num_calsses, activation='softmax')(tower_5)
# 通过Model类创建模型,和Sequential类不同的是Model类在初始化的时候需要指定模型的输入和输出
model = Model(inputs=input_img, outputs=predictions)
# 使用与前面类似的方法定义损失函数、优化函数和评测方法。
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.SGD(), metrics=['accuracy'])
# 使用与前面类似的方法训练模型。
model.fit(trainX, trainY, batch_size=128, epochs=20, validation_data=(testX, testY))
# 在测试数据上评测模型。
score = model.evaluate(testX, testY, batch_size=128)
print('Test loss: ', score[0])
print('Test accuracy: ', score[1]) | [
"694317828@qq.com"
] | 694317828@qq.com |
5a7f4fcc5349f7c3cf44fdf7599a2ecb726ac6e8 | bec623f2fab5bafc95eb5bd95e7527e06f6eeafe | /django-gc-shared/profiles/migrations/0016_userprofile_risk_appetite.py | 5289f7c30df4582aa664b83ef65249cc6c01a66d | [] | no_license | riyanhax/a-demo | d714735a8b59eceeb9cd59f788a008bfb4861790 | 302324dccc135f55d92fb705c58314c55fed22aa | refs/heads/master | 2022-01-21T07:24:56.468973 | 2017-10-12T13:48:55 | 2017-10-12T13:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0015_auto_20170118_1908'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='risk_appetite',
field=models.BooleanField(default=False),
),
]
| [
"ibalyko@ubuntu-server-16-04"
] | ibalyko@ubuntu-server-16-04 |
7efa3c706308d3f3fb706e4ca6d7d04a22d29144 | 399dae0b5ad9ca27cde175d25b5435958674eb50 | /Network/Renew IP Configuration/renew-ip-configuration.py | 40ebc13c2dc4e93b13e23b39afab411ca5f3fb03 | [] | no_license | kannanch/pythonscripts | 61e3ea9e8ebf6a6b0ec2a4a829664e4507b803ba | 843a522236f9c2cc2aadc68d504c71bb72600bd9 | refs/heads/master | 2020-06-12T11:18:00.404673 | 2019-06-28T11:24:37 | 2019-06-28T11:24:37 | 194,282,297 | 1 | 0 | null | 2019-06-28T13:55:56 | 2019-06-28T13:55:56 | null | UTF-8 | Python | false | false | 644 | py | import os
import ctypes
class disable_file_system_redirection:
_disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
_revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self._disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self._revert(self.old_value)
with disable_file_system_redirection():
release=os.popen('ipconfig /release').read();
print(release);
renew=os.popen('ipconfig /renew').read();
print(renew);
| [
"noreply@github.com"
] | kannanch.noreply@github.com |
579cc5aad5e975d5285f65e46ab296b71ec91288 | 87d33dc5f071d00723da819a62f01ecda6db0244 | /backend/api/models/vehicle_fuel_type.py | 2f0cbf5e8cddfbcce7a7c0b79bd69bf917dd03c2 | [
"Apache-2.0"
] | permissive | AntonCoetzer/zeva | bc961d1cf1e520ddc4a88e4dc826e317772737dc | 500ed05694348f9084784ca8ec9aafa9b7f4371f | refs/heads/master | 2020-12-23T06:33:04.149965 | 2020-01-28T20:00:09 | 2020-01-28T20:00:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | from django.db import models
from api.models.mixins.effective_dates import EffectiveDates
from auditable.models import Auditable
from .mixins.named import Description
class FuelType(Auditable, Description, EffectiveDates):
vehicle_fuel_code = models.CharField(
blank=False,
db_comment="Fuel type (e.g. B, BX, BZ)",
max_length=3,
null=False,
unique=True
)
class Meta:
db_table = 'vehicle_fuel_type'
db_table_comment = "Fuel type of the vehicle as defined in NRCAN" \
"e.g. B - Electricity" \
"BX - Electricity/Regular Gasoline" \
"BZ - Electricity/Premium Gasoline"
| [
"31664961+kuanfandevops@users.noreply.github.com"
] | 31664961+kuanfandevops@users.noreply.github.com |
9bc0d080bd641b14b0f1ceaa0da41ea242af3190 | 131cf803a1f7b9638ab0a604d61ab2de22906014 | /dimensigon/domain/entities/action_template.py | a8c9815a1afa5aa07818652e46e5d603bc62d2c6 | [
"Apache-2.0"
] | permissive | dimensigon/dimensigon | 757be1e61e57f7ce0a610a9531317761393eaad0 | 079d7c91a66e10f13510d89844fbadb27e005b40 | refs/heads/master | 2023-03-09T06:50:55.994738 | 2021-02-21T11:45:01 | 2021-02-21T11:45:01 | 209,486,736 | 2 | 0 | Apache-2.0 | 2021-02-26T02:59:18 | 2019-09-19T07:11:35 | Python | UTF-8 | Python | false | false | 11,589 | py | import copy
from enum import Enum
from sqlalchemy import orm
from dimensigon import defaults
from dimensigon.domain.entities.base import UUIDistributedEntityMixin
from dimensigon.utils import typos
from dimensigon.utils.helpers import is_iterable_not_string
from dimensigon.utils.typos import MultiLine
from dimensigon.web import db
class ActionType(Enum):
TEST = 0
ANSIBLE = 1
PYTHON = 2
SHELL = 3
ORCHESTRATION = 4
REQUEST = 5
NATIVE = 6
class ActionTemplate(UUIDistributedEntityMixin, db.Model):
SEND_SOFTWARE = '00000000-0000-0000-000a-000000000001'
WAIT_SERVERS = '00000000-0000-0000-000a-000000000002'
ORCHESTRATION = '00000000-0000-0000-000a-000000000003'
WAIT_ROUTE2SERVERS = '00000000-0000-0000-000a-000000000004'
DELETE_SERVERS = '00000000-0000-0000-000a-000000000005'
__tablename__ = 'D_action_template'
order = 10
name = db.Column(db.String(40), nullable=False)
version = db.Column(db.Integer, nullable=False)
action_type = db.Column(typos.Enum(ActionType), nullable=False)
code = db.Column(db.Text)
expected_stdout = db.Column(db.Text)
expected_stderr = db.Column(db.Text)
expected_rc = db.Column(db.Integer)
system_kwargs = db.Column(db.JSON)
pre_process = db.Column(db.Text)
post_process = db.Column(db.Text)
schema = db.Column(db.JSON)
description = db.Column(db.Text)
def __init__(self, name: str, version: int, action_type: ActionType, code: MultiLine = None,
expected_stdout: MultiLine = None, expected_stderr: MultiLine = None,
expected_rc: int = None, system_kwargs: typos.Kwargs = None, pre_process: MultiLine = None,
post_process: MultiLine = None, schema: typos.Kwargs = None, description: MultiLine = None, **kwargs):
super().__init__(**kwargs)
self.name = name
self.version = version
self.action_type = action_type
self.code = '\n'.join(code) if is_iterable_not_string(code) else code
self.schema = schema or {}
self.expected_stdout = '\n'.join(expected_stdout) if is_iterable_not_string(
expected_stdout) else expected_stdout
self.expected_stderr = '\n'.join(expected_stderr) if is_iterable_not_string(
expected_stderr) else expected_stderr
self.expected_rc = expected_rc
self.system_kwargs = system_kwargs or {}
self.pre_process = '\n'.join(pre_process) if is_iterable_not_string(pre_process) else pre_process
self.post_process = '\n'.join(post_process) if is_iterable_not_string(post_process) else post_process
self.description = '\n'.join(description) if is_iterable_not_string(description) else description
__table_args__ = (db.UniqueConstraint('name', 'version'),)
@orm.reconstructor
def init_on_load(self):
self.system_kwargs = self.system_kwargs or {}
def __str__(self):
return f"{self.name}.ver{self.version}"
def to_json(self, split_lines=False, **kwargs):
data = super().to_json(**kwargs)
data.update(name=self.name, version=self.version,
action_type=self.action_type.name)
if self.code is not None:
data.update(code=self.code.split('\n') if split_lines else self.code)
if self.schema:
data.update(schema=self.schema)
if self.system_kwargs:
data.update(system_kwargs=self.system_kwargs)
if self.expected_stdout is not None:
data.update(expected_stdout=self.expected_stdout.split('\n') if split_lines else self.expected_stdout)
if self.expected_stderr is not None:
data.update(expected_stderr=self.expected_stderr.split('\n') if split_lines else self.expected_stderr)
if self.expected_rc is not None:
data.update(expected_rc=self.expected_rc)
if self.post_process is not None:
data.update(post_process=self.post_process.split('\n') if split_lines else self.post_process)
if self.pre_process is not None:
data.update(pre_process=self.pre_process.split('\n') if split_lines else self.pre_process)
return data
@classmethod
def from_json(cls, kwargs):
kwargs = copy.deepcopy(kwargs)
kwargs['action_type'] = ActionType[kwargs.get('action_type')]
return super().from_json(kwargs)
@classmethod
def set_initial(cls, session=None):
from dimensigon.domain.entities import bypass_datamark_update
if session is None:
session = db.session
with bypass_datamark_update(session):
at = session.query(cls).get(cls.SEND_SOFTWARE)
if at is None:
at = ActionTemplate(name='send software', version=1, action_type=ActionType.NATIVE,
expected_rc=201, last_modified_at=defaults.INITIAL_DATEMARK,
schema={"input": {"software": {"type": "string",
"description": "software name or ID to send. If "
"name specified and version not set, "
"biggest version will be taken"},
"version": {"type": "string",
"description": "software version to take"},
"server": {"type": "string",
"description": "destination server id"},
"dest_path": {"type": "string",
"description": "destination path to send software"},
"chunk_size": {"type": "integer"},
"max_senders": {"type": "integer"},
},
"required": ["software", "server"],
"output": ["file"]
},
id=cls.SEND_SOFTWARE,
post_process="import json\nif cp.success:\n json_data=json.loads(cp.stdout)\n vc.set('file', "
"json_data.get('file'))")
session.add(at)
at = session.query(cls).get(cls.WAIT_SERVERS)
if at is None:
at = ActionTemplate(name='wait servers', version=1, action_type=ActionType.NATIVE,
description="waits server_names to join to the dimension",
last_modified_at=defaults.INITIAL_DATEMARK,
schema={"input": {"server_names": {"type": ["array", "string"],
"items": {"type": "string"}},
},
"required": ["server_names"]
},
id=cls.WAIT_SERVERS)
session.add(at)
at = session.query(cls).get(cls.ORCHESTRATION)
if at is None:
at = ActionTemplate(name='orchestration', version=1, action_type=ActionType.ORCHESTRATION,
description="launches an orchestration",
schema={"input": {"orchestration": {"type": "string",
"description": "orchestration name or ID to "
"execute. If no version "
"specified, the last one will "
"be executed"},
"version": {"type": "integer"},
"hosts": {"type": ["string", "array", "object"],
"items": {"type": "string"},
"minItems": 1,
"patternProperties": {
".*": {"anyOf": [{"type": "string"},
{"type": "array",
"items": {"type": "string"},
"minItems": 1
},
]
},
},
},
},
"required": ["orchestration", "hosts"]
},
last_modified_at=defaults.INITIAL_DATEMARK,
id=cls.ORCHESTRATION)
session.add(at)
at = session.query(cls).get(cls.WAIT_ROUTE2SERVERS)
if at is None:
at = ActionTemplate(name='wait route to servers', version=1, action_type=ActionType.NATIVE,
description="waits until we have a valid route to a server",
schema={"input": {"server_names": {"type": ["array", "string"],
"items": {"type": "string"}},
},
"required": ["server_names"]
},
last_modified_at=defaults.INITIAL_DATEMARK,
id=cls.WAIT_ROUTE2SERVERS)
session.add(at)
at = session.query(cls).get(cls.DELETE_SERVERS)
if at is None:
at = ActionTemplate(name='delete servers', version=1, action_type=ActionType.NATIVE,
description="deletes server_names from the dimension",
schema={"input": {"server_names": {"type": ["array", "string"],
"items": {"type": "string"}},
},
"required": ["server_names"]
},
last_modified_at=defaults.INITIAL_DATEMARK,
id=cls.DELETE_SERVERS)
session.add(at)
| [
"joan.prat@knowtrade.eu"
] | joan.prat@knowtrade.eu |
5656b071da54a733c610db362e7eb8a3a509ff00 | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/angle/third_party/glmark2/src/waflib/TaskGen.py | 09c574622b8f905b68f0d5d96505a83460245800 | [
"GPL-3.0-only",
"LicenseRef-scancode-x11-opengl",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 12,619 | py | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import copy,re,os
from waflib import Task,Utils,Logs,Errors,ConfigSet,Node
feats=Utils.defaultdict(set)
HEADER_EXTS=['.h','.hpp','.hxx','.hh']
class task_gen(object):
mappings=Utils.ordered_iter_dict()
prec=Utils.defaultdict(list)
def __init__(self,*k,**kw):
self.source=''
self.target=''
self.meths=[]
self.features=[]
self.tasks=[]
if not'bld'in kw:
self.env=ConfigSet.ConfigSet()
self.idx=0
self.path=None
else:
self.bld=kw['bld']
self.env=self.bld.env.derive()
self.path=self.bld.path
try:
self.idx=self.bld.idx[self.path]=self.bld.idx.get(self.path,0)+1
except AttributeError:
self.bld.idx={}
self.idx=self.bld.idx[self.path]=1
for key,val in kw.items():
setattr(self,key,val)
def __str__(self):
return"<task_gen %r declared in %s>"%(self.name,self.path.abspath())
def __repr__(self):
lst=[]
for x in self.__dict__:
if x not in('env','bld','compiled_tasks','tasks'):
lst.append("%s=%s"%(x,repr(getattr(self,x))))
return"bld(%s) in %s"%(", ".join(lst),self.path.abspath())
def get_cwd(self):
return self.bld.bldnode
def get_name(self):
try:
return self._name
except AttributeError:
if isinstance(self.target,list):
lst=[str(x)for x in self.target]
name=self._name=','.join(lst)
else:
name=self._name=str(self.target)
return name
def set_name(self,name):
self._name=name
name=property(get_name,set_name)
def to_list(self,val):
if isinstance(val,str):
return val.split()
else:
return val
def post(self):
if getattr(self,'posted',None):
return False
self.posted=True
keys=set(self.meths)
self.features=Utils.to_list(self.features)
for x in self.features+['*']:
st=feats[x]
if not st:
if not x in Task.classes:
Logs.warn('feature %r does not exist - bind at least one method to it',x)
keys.update(list(st))
prec={}
prec_tbl=self.prec
for x in prec_tbl:
if x in keys:
prec[x]=prec_tbl[x]
tmp=[]
for a in keys:
for x in prec.values():
if a in x:break
else:
tmp.append(a)
tmp.sort()
out=[]
while tmp:
e=tmp.pop()
if e in keys:out.append(e)
try:
nlst=prec[e]
except KeyError:
pass
else:
del prec[e]
for x in nlst:
for y in prec:
if x in prec[y]:
break
else:
tmp.append(x)
if prec:
txt='\n'.join(['- %s after %s'%(k,repr(v))for k,v in prec.items()])
raise Errors.WafError('Cycle detected in the method execution\n%s'%txt)
out.reverse()
self.meths=out
Logs.debug('task_gen: posting %s %d',self,id(self))
for x in out:
try:
v=getattr(self,x)
except AttributeError:
raise Errors.WafError('%r is not a valid task generator method'%x)
Logs.debug('task_gen: -> %s (%d)',x,id(self))
v()
Logs.debug('task_gen: posted %s',self.name)
return True
def get_hook(self,node):
name=node.name
for k in self.mappings:
try:
if name.endswith(k):
return self.mappings[k]
except TypeError:
if k.match(name):
return self.mappings[k]
raise Errors.WafError("File %r has no mapping in %r (have you forgotten to load a waf tool?)"%(node,self.mappings.keys()))
def create_task(self,name,src=None,tgt=None,**kw):
task=Task.classes[name](env=self.env.derive(),generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
task.__dict__.update(kw)
self.tasks.append(task)
return task
def clone(self,env):
newobj=self.bld()
for x in self.__dict__:
if x in('env','bld'):
continue
elif x in('path','features'):
setattr(newobj,x,getattr(self,x))
else:
setattr(newobj,x,copy.copy(getattr(self,x)))
newobj.posted=False
if isinstance(env,str):
newobj.env=self.bld.all_envs[env].derive()
else:
newobj.env=env.derive()
return newobj
def declare_chain(name='',rule=None,reentrant=None,color='BLUE',ext_in=[],ext_out=[],before=[],after=[],decider=None,scan=None,install_path=None,shell=False):
ext_in=Utils.to_list(ext_in)
ext_out=Utils.to_list(ext_out)
if not name:
name=rule
cls=Task.task_factory(name,rule,color=color,ext_in=ext_in,ext_out=ext_out,before=before,after=after,scan=scan,shell=shell)
def x_file(self,node):
if ext_in:
_ext_in=ext_in[0]
tsk=self.create_task(name,node)
cnt=0
ext=decider(self,node)if decider else cls.ext_out
for x in ext:
k=node.change_ext(x,ext_in=_ext_in)
tsk.outputs.append(k)
if reentrant!=None:
if cnt<int(reentrant):
self.source.append(k)
else:
for y in self.mappings:
if k.name.endswith(y):
self.source.append(k)
break
cnt+=1
if install_path:
self.install_task=self.add_install_files(install_to=install_path,install_from=tsk.outputs)
return tsk
for x in cls.ext_in:
task_gen.mappings[x]=x_file
return x_file
def taskgen_method(func):
setattr(task_gen,func.__name__,func)
return func
def feature(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for name in k:
feats[name].update([func.__name__])
return func
return deco
def before_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not func.__name__ in task_gen.prec[fun_name]:
task_gen.prec[fun_name].append(func.__name__)
return func
return deco
before=before_method
def after_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not fun_name in task_gen.prec[func.__name__]:
task_gen.prec[func.__name__].append(fun_name)
return func
return deco
after=after_method
def extension(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for x in k:
task_gen.mappings[x]=func
return func
return deco
@taskgen_method
def to_nodes(self,lst,path=None):
tmp=[]
path=path or self.path
find=path.find_resource
if isinstance(lst,Node.Node):
lst=[lst]
for x in Utils.to_list(lst):
if isinstance(x,str):
node=find(x)
else:
node=x
if not node:
raise Errors.WafError("source not found: %r in %r"%(x,self))
tmp.append(node)
return tmp
@feature('*')
def process_source(self):
self.source=self.to_nodes(getattr(self,'source',[]))
for node in self.source:
self.get_hook(node)(self,node)
@feature('*')
@before_method('process_source')
def process_rule(self):
if not getattr(self,'rule',None):
return
name=str(getattr(self,'name',None)or self.target or getattr(self.rule,'__name__',self.rule))
try:
cache=self.bld.cache_rule_attr
except AttributeError:
cache=self.bld.cache_rule_attr={}
cls=None
if getattr(self,'cache_rule','True'):
try:
cls=cache[(name,self.rule)]
except KeyError:
pass
if not cls:
rule=self.rule
if hasattr(self,'chmod'):
def chmod_fun(tsk):
for x in tsk.outputs:
os.chmod(x.abspath(),self.chmod)
rule=(self.rule,chmod_fun)
cls=Task.task_factory(name,rule,getattr(self,'vars',[]),shell=getattr(self,'shell',True),color=getattr(self,'color','BLUE'),scan=getattr(self,'scan',None))
if getattr(self,'scan',None):
cls.scan=self.scan
elif getattr(self,'deps',None):
def scan(self):
nodes=[]
for x in self.generator.to_list(getattr(self.generator,'deps',None)):
node=self.generator.path.find_resource(x)
if not node:
self.generator.bld.fatal('Could not find %r (was it declared?)'%x)
nodes.append(node)
return[nodes,[]]
cls.scan=scan
if getattr(self,'always',None):
cls.always_run=True
for x in('after','before','ext_in','ext_out'):
setattr(cls,x,getattr(self,x,[]))
if getattr(self,'cache_rule','True'):
cache[(name,self.rule)]=cls
if getattr(self,'cls_str',None):
setattr(cls,'__str__',self.cls_str)
if getattr(self,'cls_keyword',None):
setattr(cls,'keyword',self.cls_keyword)
tsk=self.create_task(name)
if getattr(self,'target',None):
if isinstance(self.target,str):
self.target=self.target.split()
if not isinstance(self.target,list):
self.target=[self.target]
for x in self.target:
if isinstance(x,str):
tsk.outputs.append(self.path.find_or_declare(x))
else:
x.parent.mkdir()
tsk.outputs.append(x)
if getattr(self,'install_path',None):
self.install_task=self.add_install_files(install_to=self.install_path,install_from=tsk.outputs,chmod=getattr(self,'chmod',Utils.O644))
if getattr(self,'source',None):
tsk.inputs=self.to_nodes(self.source)
self.source=[]
if getattr(self,'cwd',None):
tsk.cwd=self.cwd
@feature('seq')
def sequence_order(self):
if self.meths and self.meths[-1]!='sequence_order':
self.meths.append('sequence_order')
return
if getattr(self,'seq_start',None):
return
if getattr(self.bld,'prev',None):
self.bld.prev.post()
for x in self.bld.prev.tasks:
for y in self.tasks:
y.set_run_after(x)
self.bld.prev=self
re_m4=re.compile('@(\w+)@',re.M)
class subst_pc(Task.Task):
def force_permissions(self):
if getattr(self.generator,'chmod',None):
for x in self.outputs:
os.chmod(x.abspath(),self.generator.chmod)
def run(self):
if getattr(self.generator,'is_copy',None):
for i,x in enumerate(self.outputs):
x.write(self.inputs[i].read('rb'),'wb')
self.force_permissions()
return None
if getattr(self.generator,'fun',None):
ret=self.generator.fun(self)
if not ret:
self.force_permissions()
return ret
code=self.inputs[0].read(encoding=getattr(self.generator,'encoding','ISO8859-1'))
if getattr(self.generator,'subst_fun',None):
code=self.generator.subst_fun(self,code)
if code is not None:
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
self.force_permissions()
return None
code=code.replace('%','%%')
lst=[]
def repl(match):
g=match.group
if g(1):
lst.append(g(1))
return"%%(%s)s"%g(1)
return''
global re_m4
code=getattr(self.generator,'re_m4',re_m4).sub(repl,code)
try:
d=self.generator.dct
except AttributeError:
d={}
for x in lst:
tmp=getattr(self.generator,x,'')or self.env[x]or self.env[x.upper()]
try:
tmp=''.join(tmp)
except TypeError:
tmp=str(tmp)
d[x]=tmp
code=code%d
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
self.generator.bld.raw_deps[self.uid()]=lst
try:delattr(self,'cache_sig')
except AttributeError:pass
self.force_permissions()
def sig_vars(self):
bld=self.generator.bld
env=self.env
upd=self.m.update
if getattr(self.generator,'fun',None):
upd(Utils.h_fun(self.generator.fun).encode())
if getattr(self.generator,'subst_fun',None):
upd(Utils.h_fun(self.generator.subst_fun).encode())
vars=self.generator.bld.raw_deps.get(self.uid(),[])
act_sig=bld.hash_env_vars(env,vars)
upd(act_sig)
lst=[getattr(self.generator,x,'')for x in vars]
upd(Utils.h_list(lst))
return self.m.digest()
@extension('.pc.in')
def add_pcfile(self,node):
tsk=self.create_task('subst_pc',node,node.change_ext('.pc','.pc.in'))
self.install_task=self.add_install_files(install_to=getattr(self,'install_path','${LIBDIR}/pkgconfig/'),install_from=tsk.outputs)
class subst(subst_pc):
pass
@feature('subst')
@before_method('process_source','process_rule')
def process_subst(self):
src=Utils.to_list(getattr(self,'source',[]))
if isinstance(src,Node.Node):
src=[src]
tgt=Utils.to_list(getattr(self,'target',[]))
if isinstance(tgt,Node.Node):
tgt=[tgt]
if len(src)!=len(tgt):
raise Errors.WafError('invalid number of source/target for %r'%self)
for x,y in zip(src,tgt):
if not x or not y:
raise Errors.WafError('null source or target for %r'%self)
a,b=None,None
if isinstance(x,str)and isinstance(y,str)and x==y:
a=self.path.find_node(x)
b=self.path.get_bld().make_node(y)
if not os.path.isfile(b.abspath()):
b.parent.mkdir()
else:
if isinstance(x,str):
a=self.path.find_resource(x)
elif isinstance(x,Node.Node):
a=x
if isinstance(y,str):
b=self.path.find_or_declare(y)
elif isinstance(y,Node.Node):
b=y
if not a:
raise Errors.WafError('could not find %r for %r'%(x,self))
has_constraints=False
tsk=self.create_task('subst',a,b)
for k in('after','before','ext_in','ext_out'):
val=getattr(self,k,None)
if val:
has_constraints=True
setattr(tsk,k,val)
if not has_constraints:
global HEADER_EXTS
for xt in HEADER_EXTS:
if b.name.endswith(xt):
tsk.before=[k for k in('c','cxx')if k in Task.classes]
break
inst_to=getattr(self,'install_path',None)
if inst_to:
self.install_task=self.add_install_files(install_to=inst_to,install_from=b,chmod=getattr(self,'chmod',Utils.O644))
self.source=[]
| [
"jengelh@inai.de"
] | jengelh@inai.de |
88a801941c924e8ff2f4e55af3e00dd0489f7dc8 | 2ceb72ba06468ee4985c205697c9381c09dd71b2 | /everest/repositories/rdb/querying.py | fa6c63eaa1409e7ce6bcaba0c6efe5c81e31637a | [
"MIT"
] | permissive | BigData-Tools/everest | 7e9530125a8ae622825c75605e65f2a9b5669136 | a9fad763148477d5d9112996707070578082c1e8 | refs/heads/master | 2021-01-16T23:04:01.857682 | 2013-05-23T11:48:00 | 2013-05-23T11:48:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,358 | py | """
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Jan 7, 2013.
"""
from everest.entities.attributes import EntityAttributeKinds
from everest.querying.filtering import FilterSpecificationVisitor
from everest.querying.interfaces import IFilterSpecificationVisitor
from everest.querying.interfaces import IOrderSpecificationVisitor
from everest.querying.ordering import OrderSpecificationVisitor
from everest.repositories.rdb.utils import OrderClauseList
from everest.resources.interfaces import IResource
from sqlalchemy import and_ as sqlalchemy_and
from sqlalchemy import not_ as sqlalchemy_not
from sqlalchemy import or_ as sqlalchemy_or
from sqlalchemy.orm.interfaces import MANYTOMANY
from sqlalchemy.orm.interfaces import MANYTOONE
from sqlalchemy.orm.interfaces import ONETOMANY
from sqlalchemy.sql.expression import ClauseList
from zope.interface import implements # pylint: disable=E0611,F0401
__docformat__ = 'reStructuredText en'
__all__ = ['OrmAttributeInspector',
'SqlFilterSpecificationVisitor',
'SqlOrderSpecificationVisitor',
]
class OrmAttributeInspector(object):
"""
Helper class inspecting class attributes mapped by the ORM.
"""
__cache = {}
@staticmethod
def inspect(orm_class, attribute_name):
"""
:param attribute_name: name of the mapped attribute to inspect.
:returns: list of 2-tuples containing information about the inspected
attribute (first element: mapped entity attribute kind; second
attribute: mapped entity attribute)
"""
key = (orm_class, attribute_name)
elems = OrmAttributeInspector.__cache.get(key)
if elems is None:
elems = OrmAttributeInspector.__inspect(key)
OrmAttributeInspector.__cache[key] = elems
return elems
@staticmethod
def __inspect(key):
orm_class, attribute_name = key
elems = []
entity_type = orm_class
ent_attr_tokens = attribute_name.split('.')
count = len(ent_attr_tokens)
for idx, ent_attr_token in enumerate(ent_attr_tokens):
entity_attr = getattr(entity_type, ent_attr_token)
kind, attr_type = OrmAttributeInspector.__classify(entity_attr)
if idx == count - 1:
pass
# We are at the last name token - this must be a TERMINAL
# or an ENTITY.
# if kind == EntityAttributeKinds.AGGREGATE:
# raise ValueError('Invalid attribute name "%s": the '
# 'last element (%s) references an '
# 'aggregate attribute.'
# % (attribute_name, ent_attr_token))
else:
if kind == EntityAttributeKinds.TERMINAL:
# We should not get here - the last attribute was a
# terminal.
raise ValueError('Invalid attribute name "%s": the '
'element "%s" references a terminal '
'attribute.'
% (attribute_name, ent_attr_token))
entity_type = attr_type
elems.append((kind, entity_attr))
return elems
@staticmethod
def __classify(attr):
# Looks up the entity attribute kind and target type for the given
# entity attribute.
# We look for an attribute "property" to identify mapped attributes
# (instrumented attributes and attribute proxies).
if not hasattr(attr, 'property'):
raise ValueError('Attribute "%s" is not mapped.' % attr)
# We detect terminals by the absence of an "argument" attribute of
# the attribute's property.
if not hasattr(attr.property, 'argument'):
kind = EntityAttributeKinds.TERMINAL
target_type = None
else: # We have a relationship.
target_type = attr.property.argument
if attr.property.direction in (ONETOMANY, MANYTOMANY):
if not attr.property.uselist:
# 1:1
kind = EntityAttributeKinds.ENTITY
else:
kind = EntityAttributeKinds.AGGREGATE
elif attr.property.direction == MANYTOONE:
kind = EntityAttributeKinds.ENTITY
else:
raise ValueError('Unsupported relationship direction "%s".' # pragma: no cover
% attr.property.direction)
return kind, target_type
class SqlFilterSpecificationVisitor(FilterSpecificationVisitor):
"""
Filter specification visitor implementation for the RDB repository
(builds a SQL expression).
"""
implements(IFilterSpecificationVisitor)
def __init__(self, entity_class, custom_clause_factories=None):
"""
Constructs a SqlFilterSpecificationVisitor
:param entity_class: an entity class that is mapped with SQLAlchemy
:param custom_clause_factories: a map containing custom clause factory
functions for selected (attribute name, operator) combinations.
"""
FilterSpecificationVisitor.__init__(self)
self.__entity_class = entity_class
if custom_clause_factories is None:
custom_clause_factories = {}
self.__custom_clause_factories = custom_clause_factories
def visit_nullary(self, spec):
key = (spec.attr_name, spec.operator.name)
if key in self.__custom_clause_factories:
self._push(self.__custom_clause_factories[key](spec.attr_value))
else:
FilterSpecificationVisitor.visit_nullary(self, spec)
def _starts_with_op(self, spec):
return self.__build(spec.attr_name, 'startswith', spec.attr_value)
def _ends_with_op(self, spec):
return self.__build(spec.attr_name, 'endswith', spec.attr_value)
def _contains_op(self, spec):
return self.__build(spec.attr_name, 'contains', spec.attr_value)
def _contained_op(self, spec):
return self.__build(spec.attr_name, 'in_', spec.attr_value)
def _equal_to_op(self, spec):
return self.__build(spec.attr_name, '__eq__', spec.attr_value)
def _less_than_op(self, spec):
return self.__build(spec.attr_name, '__lt__', spec.attr_value)
def _less_than_or_equal_to_op(self, spec):
return self.__build(spec.attr_name, '__le__', spec.attr_value)
def _greater_than_op(self, spec):
return self.__build(spec.attr_name, '__gt__', spec.attr_value)
def _greater_than_or_equal_to_op(self, spec):
return self.__build(spec.attr_name, '__ge__', spec.attr_value)
def _in_range_op(self, spec):
from_value, to_value = spec.attr_value
return self.__build(spec.attr_name, 'between', from_value, to_value)
def _conjunction_op(self, spec, *expressions):
return sqlalchemy_and(*expressions)
def _disjunction_op(self, spec, *expressions):
return sqlalchemy_or(*expressions)
def _negation_op(self, spec, expression):
return sqlalchemy_not(expression)
def __build(self, attribute_name, sql_op, *values):
# Builds an SQL expression from the given (possibly dotted)
# attribute name, SQL operation name, and values.
exprs = []
infos = OrmAttributeInspector.inspect(self.__entity_class,
attribute_name)
count = len(infos)
for idx, info in enumerate(infos):
kind, entity_attr = info
if idx == count - 1:
#
args = \
[val.get_entity() if IResource.providedBy(val) else val # pylint: disable=E1101
for val in values]
expr = getattr(entity_attr, sql_op)(*args)
elif kind == EntityAttributeKinds.ENTITY:
expr = entity_attr.has
exprs.insert(0, expr)
elif kind == EntityAttributeKinds.AGGREGATE:
expr = entity_attr.any
exprs.insert(0, expr)
return reduce(lambda g, h: h(g), exprs, expr)
class SqlOrderSpecificationVisitor(OrderSpecificationVisitor):
"""
Order specification visitor implementation for the rdb repository
(builds a SQL expression).
"""
implements(IOrderSpecificationVisitor)
def __init__(self, entity_class, custom_join_clauses=None):
"""
Constructs a SqlOrderSpecificationVisitor
:param klass: a class that is mapped to a selectable using SQLAlchemy
"""
OrderSpecificationVisitor.__init__(self)
self.__entity_class = entity_class
if custom_join_clauses is None:
custom_join_clauses = {}
self.__custom_join_clauses = custom_join_clauses
self.__joins = set()
def visit_nullary(self, spec):
OrderSpecificationVisitor.visit_nullary(self, spec)
if spec.attr_name in self.__custom_join_clauses:
self.__joins = set(self.__custom_join_clauses[spec.attr_name])
def get_joins(self):
return self.__joins.copy()
def _conjunction_op(self, spec, *expressions):
clauses = []
for expr in expressions:
if isinstance(expr, ClauseList):
clauses.extend(expr.clauses)
else:
clauses.append(expr)
return OrderClauseList(*clauses)
def _asc_op(self, spec):
return self.__build(spec.attr_name, 'asc')
def _desc_op(self, spec):
return self.__build(spec.attr_name, 'desc')
def __build(self, attribute_name, sql_op):
expr = None
infos = OrmAttributeInspector.inspect(self.__entity_class,
attribute_name)
count = len(infos)
for idx, info in enumerate(infos):
kind, entity_attr = info
if idx == count - 1:
expr = getattr(entity_attr, sql_op)()
elif kind != EntityAttributeKinds.TERMINAL:
# FIXME: Avoid adding multiple attrs with the same target here.
self.__joins.add(entity_attr)
return expr
| [
"fogathmann@gmail.com"
] | fogathmann@gmail.com |
24911b11fad215799ee373c5a29640eef7216591 | 26fdd3419c1855f180d7e9bea3b59459ba9e6446 | /venv/lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/base.py | c0fc72691170a3514b88711fe5b0a2a7fbc25395 | [] | permissive | vansh1999/fashion-ecomm | eed52884ac007928260f50a885bec963d85a88d2 | 5879d0b1c64411485e861dfc9bcca6b4a82afc57 | refs/heads/master | 2021-06-24T21:58:26.931849 | 2021-04-10T08:37:50 | 2021-04-10T08:37:50 | 219,543,353 | 1 | 0 | Apache-2.0 | 2021-04-10T08:37:51 | 2019-11-04T16:14:06 | Python | UTF-8 | Python | false | false | 3,007 | py | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import functools
import logging
from ..errors import (FatalClientError, OAuth2Error, ServerError,
TemporarilyUnavailableError, InvalidRequestError,
InvalidClientError, UnsupportedTokenTypeError)
log = logging.getLogger(__name__)
class BaseEndpoint(object):
def __init__(self):
self._available = True
self._catch_errors = False
@property
def available(self):
return self._available
@available.setter
def available(self, available):
self._available = available
@property
def catch_errors(self):
return self._catch_errors
@catch_errors.setter
def catch_errors(self, catch_errors):
self._catch_errors = catch_errors
def _raise_on_missing_token(self, request):
"""Raise error on missing token."""
if not request.token:
raise InvalidRequestError(request=request,
description='Missing token parameter.')
def _raise_on_invalid_client(self, request):
"""Raise on failed client authentication."""
if self.request_validator.client_authentication_required(request):
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
log.debug('Client authentication failed, %r.', request)
raise InvalidClientError(request=request)
def _raise_on_unsupported_token(self, request):
"""Raise on unsupported tokens."""
if (request.token_type_hint and
request.token_type_hint in self.valid_token_types and
request.token_type_hint not in self.supported_token_types):
raise UnsupportedTokenTypeError(request=request)
def catch_errors_and_unavailability(f):
@functools.wraps(f)
def wrapper(endpoint, uri, *args, **kwargs):
if not endpoint.available:
e = TemporarilyUnavailableError()
log.info('Endpoint unavailable, ignoring request %s.' % uri)
return {}, e.json, 503
if endpoint.catch_errors:
try:
return f(endpoint, uri, *args, **kwargs)
except OAuth2Error:
raise
except FatalClientError:
raise
except Exception as e:
error = ServerError()
log.warning(
'Exception caught while processing request, %s.' % e)
return {}, error.json, 500
else:
return f(endpoint, uri, *args, **kwargs)
return wrapper
| [
"vansh.bhardwaj1999@gmail.com"
] | vansh.bhardwaj1999@gmail.com |
5f02870079502ff2794bbb095db80ebed49dd7d1 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/93f36b6c867e49c3bdbf84c064b0c842.py | e8e6a56b31f03144abc8f05a090f50e82b90b7b7 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 297 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
def hey(s):
if s.isupper():
return 'Whoa, chill out!'
elif s != '' and s[-1] == '?' :
return 'Sure.'
elif s == '' or s.isspace():
return 'Fine. Be that way!'
else:
return "Whatever."
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
a136b78531bddd949d77867571db4dfb227b0af5 | 355a09eb2f05b17e9acdac68922f297291a34d64 | /hail/python/hail/nd/__init__.py | b9b251e1570234942491306a522d586c019ac5b7 | [
"MIT"
] | permissive | 3vivekb/hail | 0d7ff84475b60322da6597e08132c61b1f72315c | 82c9e0f3ec2154335f91f2219b84c0fb5dbac526 | refs/heads/master | 2020-09-17T08:48:02.250139 | 2019-11-25T23:07:45 | 2019-11-25T23:07:45 | 224,058,163 | 0 | 0 | MIT | 2019-11-25T23:04:45 | 2019-11-25T23:04:45 | null | UTF-8 | Python | false | false | 104 | py | from .nd import array, arange, full, zeros, ones
__all__ = ["array", "arange", "full", "zeros", "ones"] | [
"daniel.zidan.king@gmail.com"
] | daniel.zidan.king@gmail.com |
74f014742fb35dcae65ffef8e6013a171fbea7a2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2321/60623/305900.py | 95c8cdd08e3c24d9183b9dc840807c8375262ddf | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | a=input().split(',')
b=input()
if a==['1','3','5','7'] and b='100':
print(20)
elif a[0]=='1':
print(29523)
elif a[0]=='20':
print(8)
else:
print(a)
print(b) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
0ae86a8f2d6a7390e131c6a71cca58ae7de53deb | 902198ab44ff0c74f8176e3eb3c6850c67ece463 | /vpnaas-sslvpn-agent/neutron-16/neutron/agent/linux/ovs_lib.py | f92189f36929bdcf34b7b6f833b985f099c6f116 | [
"Apache-2.0"
] | permissive | rajeshmohan/openstack | 7ca543e86ce5b18be9615590128bf2811c019ef5 | 19eaf41c20503a7320e9895271802d11675dae2f | refs/heads/master | 2021-03-12T20:29:20.842361 | 2014-09-12T00:48:08 | 2014-09-12T00:48:08 | 23,939,961 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,963 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
# @author: Dave Lapsley, Nicira Networks, Inc.
import re
from oslo.config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as p_const
# TODO(JLH) Should we remove the explicit include of the ovs plugin here
from neutron.plugins.openvswitch.common import constants
# Default timeout for ovs-vsctl command
DEFAULT_OVS_VSCTL_TIMEOUT = 10
OPTS = [
cfg.IntOpt('ovs_vsctl_timeout',
default=DEFAULT_OVS_VSCTL_TIMEOUT,
help=_('Timeout in seconds for ovs-vsctl commands')),
]
cfg.CONF.register_opts(OPTS)
LOG = logging.getLogger(__name__)
class VifPort:
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
self.port_name = port_name
self.ofport = ofport
self.vif_id = vif_id
self.vif_mac = vif_mac
self.switch = switch
def __str__(self):
return ("iface-id=" + self.vif_id + ", vif_mac=" +
self.vif_mac + ", port_name=" + self.port_name +
", ofport=" + str(self.ofport) + ", bridge_name=" +
self.switch.br_name)
class BaseOVS(object):
def __init__(self, root_helper):
self.root_helper = root_helper
self.vsctl_timeout = cfg.CONF.ovs_vsctl_timeout
def run_vsctl(self, args, check_error=False):
full_args = ["ovs-vsctl", "--timeout=%d" % self.vsctl_timeout] + args
try:
return utils.execute(full_args, root_helper=self.root_helper)
except Exception as e:
with excutils.save_and_reraise_exception() as ctxt:
LOG.error(_("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
if not check_error:
ctxt.reraise = False
def add_bridge(self, bridge_name):
self.run_vsctl(["--", "--may-exist", "add-br", bridge_name])
return OVSBridge(bridge_name, self.root_helper)
def delete_bridge(self, bridge_name):
self.run_vsctl(["--", "--if-exists", "del-br", bridge_name])
def bridge_exists(self, bridge_name):
try:
self.run_vsctl(['br-exists', bridge_name], check_error=True)
except RuntimeError as e:
with excutils.save_and_reraise_exception() as ctxt:
if 'Exit code: 2\n' in str(e):
ctxt.reraise = False
return False
return True
def get_bridge_name_for_port_name(self, port_name):
try:
return self.run_vsctl(['port-to-br', port_name], check_error=True)
except RuntimeError as e:
with excutils.save_and_reraise_exception() as ctxt:
if 'Exit code: 1\n' in str(e):
ctxt.reraise = False
def port_exists(self, port_name):
return bool(self.get_bridge_name_for_port_name(port_name))
class OVSBridge(BaseOVS):
def __init__(self, br_name, root_helper):
super(OVSBridge, self).__init__(root_helper)
self.br_name = br_name
self.re_id = self.re_compile_id()
self.defer_apply_flows = False
self.deferred_flows = {'add': '', 'mod': '', 'del': ''}
def re_compile_id(self):
external = 'external_ids\s*'
mac = 'attached-mac="(?P<vif_mac>([a-fA-F\d]{2}:){5}([a-fA-F\d]{2}))"'
iface = 'iface-id="(?P<vif_id>[^"]+)"'
name = 'name\s*:\s"(?P<port_name>[^"]*)"'
port = 'ofport\s*:\s(?P<ofport>(-?\d+|\[\]))'
_re = ('%(external)s:\s{ ( %(mac)s,? | %(iface)s,? | . )* }'
' \s+ %(name)s \s+ %(port)s' % {'external': external,
'mac': mac,
'iface': iface, 'name': name,
'port': port})
return re.compile(_re, re.M | re.X)
def create(self):
self.add_bridge(self.br_name)
def destroy(self):
self.delete_bridge(self.br_name)
def reset_bridge(self):
self.destroy()
self.create()
def add_port(self, port_name):
self.run_vsctl(["--", "--may-exist", "add-port", self.br_name,
port_name])
return self.get_port_ofport(port_name)
def delete_port(self, port_name):
self.run_vsctl(["--", "--if-exists", "del-port", self.br_name,
port_name])
def set_db_attribute(self, table_name, record, column, value):
args = ["set", table_name, record, "%s=%s" % (column, value)]
self.run_vsctl(args)
def clear_db_attribute(self, table_name, record, column):
args = ["clear", table_name, record, column]
self.run_vsctl(args)
def run_ofctl(self, cmd, args, process_input=None):
full_args = ["ovs-ofctl", cmd, self.br_name] + args
try:
return utils.execute(full_args, root_helper=self.root_helper,
process_input=process_input)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
def count_flows(self):
flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:]
return len(flow_list) - 1
def remove_all_flows(self):
self.run_ofctl("del-flows", [])
def get_port_ofport(self, port_name):
return self.db_get_val("Interface", port_name, "ofport")
def get_datapath_id(self):
return self.db_get_val('Bridge',
self.br_name, 'datapath_id').strip('"')
def _build_flow_expr_arr(self, **kwargs):
flow_expr_arr = []
is_delete_expr = kwargs.get('delete', False)
if not is_delete_expr:
prefix = ("hard_timeout=%s,idle_timeout=%s,priority=%s" %
(kwargs.get('hard_timeout', '0'),
kwargs.get('idle_timeout', '0'),
kwargs.get('priority', '1')))
flow_expr_arr.append(prefix)
elif 'priority' in kwargs:
raise Exception(_("Cannot match priority on flow deletion"))
table = ('table' in kwargs and ",table=%s" %
kwargs['table'] or '')
in_port = ('in_port' in kwargs and ",in_port=%s" %
kwargs['in_port'] or '')
dl_type = ('dl_type' in kwargs and ",dl_type=%s" %
kwargs['dl_type'] or '')
dl_vlan = ('dl_vlan' in kwargs and ",dl_vlan=%s" %
kwargs['dl_vlan'] or '')
dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or ''
dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or ''
nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or ''
nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or ''
tun_id = 'tun_id' in kwargs and ",tun_id=%s" % kwargs['tun_id'] or ''
proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or ''
ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or ''
match = (table + in_port + dl_type + dl_vlan + dl_src + dl_dst +
(proto or ip) + nw_src + nw_dst + tun_id)
if match:
match = match[1:] # strip leading comma
flow_expr_arr.append(match)
return flow_expr_arr
def add_or_mod_flow_str(self, **kwargs):
if "actions" not in kwargs:
raise Exception(_("Must specify one or more actions"))
if "priority" not in kwargs:
kwargs["priority"] = "0"
flow_expr_arr = self._build_flow_expr_arr(**kwargs)
flow_expr_arr.append("actions=%s" % (kwargs["actions"]))
flow_str = ",".join(flow_expr_arr)
return flow_str
def add_flow(self, **kwargs):
flow_str = self.add_or_mod_flow_str(**kwargs)
if self.defer_apply_flows:
self.deferred_flows['add'] += flow_str + '\n'
else:
self.run_ofctl("add-flow", [flow_str])
def mod_flow(self, **kwargs):
flow_str = self.add_or_mod_flow_str(**kwargs)
if self.defer_apply_flows:
self.deferred_flows['mod'] += flow_str + '\n'
else:
self.run_ofctl("mod-flows", [flow_str])
def delete_flows(self, **kwargs):
kwargs['delete'] = True
flow_expr_arr = self._build_flow_expr_arr(**kwargs)
if "actions" in kwargs:
flow_expr_arr.append("actions=%s" % (kwargs["actions"]))
flow_str = ",".join(flow_expr_arr)
if self.defer_apply_flows:
self.deferred_flows['del'] += flow_str + '\n'
else:
self.run_ofctl("del-flows", [flow_str])
def defer_apply_on(self):
LOG.debug(_('defer_apply_on'))
self.defer_apply_flows = True
def defer_apply_off(self):
LOG.debug(_('defer_apply_off'))
for action, flows in self.deferred_flows.items():
if flows:
LOG.debug(_('Applying following deferred flows '
'to bridge %s'), self.br_name)
for line in flows.splitlines():
LOG.debug(_('%(action)s: %(flow)s'),
{'action': action, 'flow': line})
self.run_ofctl('%s-flows' % action, ['-'], flows)
self.defer_apply_flows = False
self.deferred_flows = {'add': '', 'mod': '', 'del': ''}
def add_tunnel_port(self, port_name, remote_ip, local_ip,
tunnel_type=p_const.TYPE_GRE,
vxlan_udp_port=constants.VXLAN_UDP_PORT):
vsctl_command = ["--", "--may-exist", "add-port", self.br_name,
port_name]
vsctl_command.extend(["--", "set", "Interface", port_name,
"type=%s" % tunnel_type])
if tunnel_type == p_const.TYPE_VXLAN:
# Only set the VXLAN UDP port if it's not the default
if vxlan_udp_port != constants.VXLAN_UDP_PORT:
vsctl_command.append("options:dst_port=%s" % vxlan_udp_port)
vsctl_command.extend(["options:remote_ip=%s" % remote_ip,
"options:local_ip=%s" % local_ip,
"options:in_key=flow",
"options:out_key=flow"])
self.run_vsctl(vsctl_command)
return self.get_port_ofport(port_name)
def add_patch_port(self, local_name, remote_name):
self.run_vsctl(["add-port", self.br_name, local_name,
"--", "set", "Interface", local_name,
"type=patch", "options:peer=%s" % remote_name])
return self.get_port_ofport(local_name)
def db_get_map(self, table, record, column, check_error=False):
output = self.run_vsctl(["get", table, record, column], check_error)
if output:
output_str = output.rstrip("\n\r")
return self.db_str_to_map(output_str)
return {}
def db_get_val(self, table, record, column, check_error=False):
output = self.run_vsctl(["get", table, record, column], check_error)
if output:
return output.rstrip("\n\r")
def db_str_to_map(self, full_str):
list = full_str.strip("{}").split(", ")
ret = {}
for e in list:
if e.find("=") == -1:
continue
arr = e.split("=")
ret[arr[0]] = arr[1].strip("\"")
return ret
def get_port_name_list(self):
res = self.run_vsctl(["list-ports", self.br_name], check_error=True)
if res:
return res.strip().split("\n")
return []
def get_port_stats(self, port_name):
return self.db_get_map("Interface", port_name, "statistics")
def get_xapi_iface_id(self, xs_vif_uuid):
args = ["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid]
try:
return utils.execute(args, root_helper=self.root_helper).strip()
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Unable to execute %(cmd)s. "
"Exception: %(exception)s"),
{'cmd': args, 'exception': e})
# returns a VIF object for each VIF port
def get_vif_ports(self):
edge_ports = []
port_names = self.get_port_name_list()
for name in port_names:
external_ids = self.db_get_map("Interface", name, "external_ids",
check_error=True)
ofport = self.db_get_val("Interface", name, "ofport",
check_error=True)
if "iface-id" in external_ids and "attached-mac" in external_ids:
p = VifPort(name, ofport, external_ids["iface-id"],
external_ids["attached-mac"], self)
edge_ports.append(p)
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not automatically
# synced to OVS from XAPI, we grab it from XAPI directly
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
p = VifPort(name, ofport, iface_id,
external_ids["attached-mac"], self)
edge_ports.append(p)
return edge_ports
def get_vif_port_set(self):
port_names = self.get_port_name_list()
edge_ports = set()
args = ['--format=json', '--', '--columns=name,external_ids,ofport',
'list', 'Interface']
result = self.run_vsctl(args, check_error=True)
if not result:
return edge_ports
for row in jsonutils.loads(result)['data']:
name = row[0]
if name not in port_names:
continue
external_ids = dict(row[1][1])
# Do not consider VIFs which aren't yet ready
# This can happen when ofport values are either [] or ["set", []]
# We will therefore consider only integer values for ofport
ofport = row[2]
try:
int_ofport = int(ofport)
except (ValueError, TypeError):
LOG.warn(_("Found not yet ready openvswitch port: %s"), row)
else:
if int_ofport > 0:
if ("iface-id" in external_ids and
"attached-mac" in external_ids):
edge_ports.add(external_ids['iface-id'])
elif ("xs-vif-uuid" in external_ids and
"attached-mac" in external_ids):
# if this is a xenserver and iface-id is not
# automatically synced to OVS from XAPI, we grab it
# from XAPI directly
iface_id = self.get_xapi_iface_id(
external_ids["xs-vif-uuid"])
edge_ports.add(iface_id)
else:
LOG.warn(_("Found failed openvswitch port: %s"), row)
return edge_ports
def get_vif_port_by_id(self, port_id):
args = ['--', '--columns=external_ids,name,ofport',
'find', 'Interface',
'external_ids:iface-id="%s"' % port_id]
result = self.run_vsctl(args)
if not result:
return
# TODO(salv-orlando): consider whether it would be possible to use
# JSON formatting rather than doing regex parsing.
match = self.re_id.search(result)
try:
vif_mac = match.group('vif_mac')
vif_id = match.group('vif_id')
port_name = match.group('port_name')
# Tolerate ports which might not have an ofport as they are not
# ready yet
# NOTE(salv-orlando): Consider returning None when ofport is not
# available.
try:
ofport = int(match.group('ofport'))
except ValueError:
LOG.warn(_("ofport for vif: %s is not a valid integer. "
"The port has not yet been configured by OVS"),
vif_id)
ofport = None
return VifPort(port_name, ofport, vif_id, vif_mac, self)
except Exception as e:
LOG.info(_("Unable to parse regex results. Exception: %s"), e)
return
def delete_ports(self, all_ports=False):
if all_ports:
port_names = self.get_port_name_list()
else:
port_names = (port.port_name for port in self.get_vif_ports())
for port_name in port_names:
self.delete_port(port_name)
def get_local_port_mac(self):
"""Retrieve the mac of the bridge's local port."""
address = ip_lib.IPDevice(self.br_name, self.root_helper).link.address
if address:
return address
else:
msg = _('Unable to determine mac address for %s') % self.br_name
raise Exception(msg)
def get_bridge_for_iface(root_helper, iface):
args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout,
"iface-to-br", iface]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Interface %s not found."), iface)
return None
def get_bridges(root_helper):
args = ["ovs-vsctl", "--timeout=%d" % cfg.CONF.ovs_vsctl_timeout,
"list-br"]
try:
return utils.execute(args, root_helper=root_helper).strip().split("\n")
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e)
def get_installed_ovs_usr_version(root_helper):
args = ["ovs-vsctl", "--version"]
try:
cmd = utils.execute(args, root_helper=root_helper)
ver = re.findall("\d+\.\d+", cmd)[0]
return ver
except Exception:
LOG.exception(_("Unable to retrieve OVS userspace version."))
def get_installed_ovs_klm_version():
args = ["modinfo", "openvswitch"]
try:
cmd = utils.execute(args)
for line in cmd.split('\n'):
if 'version: ' in line and not 'srcversion' in line:
ver = re.findall("\d+\.\d+", line)
return ver[0]
except Exception:
LOG.exception(_("Unable to retrieve OVS kernel module version."))
def get_bridge_external_bridge_id(root_helper, bridge):
args = ["ovs-vsctl", "--timeout=2", "br-get-external-id",
bridge, "bridge-id"]
try:
return utils.execute(args, root_helper=root_helper).strip()
except Exception:
LOG.exception(_("Bridge %s not found."), bridge)
return None
| [
"rajesh.mlists@gmail.com"
] | rajesh.mlists@gmail.com |
8779357df2161b13bec2458e00e29592b9255a79 | 8c3755e907a8f7fbae4e5e3334aa9332f8f705bb | /oop/duck_private.py | d8ba463099236eceb21c50f4216f1aa2592ed915 | [] | no_license | xaneon/PythonProgrammingBasics | 20c9db82f621a41735856a0b008bf2c328d8e4b5 | accf4d16034d33e616b5ebe46f69c1130b09f85e | refs/heads/master | 2020-06-13T13:47:02.995326 | 2019-07-01T13:45:29 | 2019-07-01T13:45:29 | 194,235,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | class Ente:
def __init__(self, name):
self.name = name
self.pub = "Ich bin öffentlich"
self._prot = "Ich bin protected"
self.__priv = "Ich bin private"
meineEnte = Ente("Ente Helga")
print(meineEnte.name)
print(meineEnte.pub)
print(meineEnte._prot)
# print(meineEnte.__priv) # funktioniert nicht
| [
"bonne.habekost@gmail.com"
] | bonne.habekost@gmail.com |
50a41939dcaece2ee62cb4ecfa69a3b42812e2d6 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-alloydb/samples/generated_samples/alloydb_v1_generated_alloy_db_admin_failover_instance_sync.py | 5378712384df36085d5bf053c22c3a0de018eeca | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 1,928 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for FailoverInstance
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-alloydb
# [START alloydb_v1_generated_AlloyDBAdmin_FailoverInstance_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import alloydb_v1
def sample_failover_instance():
# Create a client
client = alloydb_v1.AlloyDBAdminClient()
# Initialize request argument(s)
request = alloydb_v1.FailoverInstanceRequest(
name="name_value",
)
# Make the request
operation = client.failover_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END alloydb_v1_generated_AlloyDBAdmin_FailoverInstance_sync]
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
d9fe856ac2aee42a0671c65d6af3cb305a3eb1ec | e1c5b001b7031d1ff204d4b7931a85366dd0ce9c | /CMSDAS2019/CMSDAS2019_PKU/root_filter/definations.py | 32c72710d7fbc2a62aa96daed37a787c2fdc34cb | [] | no_license | fdzyffff/IIHE_code | b9ff96b5ee854215e88aec43934368af11a1f45d | e93a84777afad69a7e63a694393dca59b01c070b | refs/heads/master | 2020-12-30T16:03:39.237693 | 2020-07-13T03:06:53 | 2020-07-13T03:06:53 | 90,961,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,961 | py | import ROOT, array, os, sys, time, threading
from multiprocessing import Pool
from math import *
ROOT.TH1.AddDirectory(ROOT.kFALSE)
ROOT.gROOT.SetBatch(ROOT.kTRUE)
ROOT.gErrorIgnoreLevel=ROOT.kError
### a C/C++ structure is required, to allow memory based access
ROOT.gROOT.ProcessLine(
"struct HEEPinfo_t {\
UInt_t pass_isEcalDriven;\
UInt_t pass_dEtaIn;\
UInt_t pass_dPhiIn;\
UInt_t pass_HoverE;\
UInt_t pass_SigmaIeIe;\
UInt_t pass_showershape;\
UInt_t pass_lostHits;\
UInt_t pass_dxy;\
UInt_t pass_isolEMHadDepth1;\
UInt_t pass_pTIso;\
};" );
const_m_el = 0.000511 ;
const_m_mu = 0.10566 ;
class ShowProcess():
i = 0
max_steps = 0
max_arrow = 50
step_length = 1
pre_percent = -1
def __init__(self, max_steps, print_enable = True):
self.max_steps = max_steps
self.i = 0
self.pre_percent = -1
self.print_enable = print_enable
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
if not self.print_enable:return
if self.step_length > 1:
if (self.max_steps > 100):
if (not int(self.i) % int(float(self.max_steps)/float(self.step_length)) == 0):return
percent = int(self.i * 100.0 / self.max_steps)
if self.pre_percent == percent:return
self.pre_percent = percent
num_arrow = int(self.i * self.max_arrow / self.max_steps)
num_line = self.max_arrow - num_arrow
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']' + '%2d' % percent + '%' + '\r'
sys.stdout.write(process_bar)
sys.stdout.flush()
def close(self, words='done'):
if self.print_enable: print "\n%s"%(words)
self.i = 0
class electron_C():
def __init__(self, charge):
self.p4 = ROOT.TLorentzVector()
self.p4_sc = ROOT.TLorentzVector()
self.isTag = False
self.pass_HEEPID = False
self.sc_Eta = -99.99
self.charge = charge
self.region = 0
self.pass_isEcalDriven = False
self.pass_dEtaIn = False
self.pass_dPhiIn = False
self.pass_HoverE = False
self.pass_SigmaIeIe = False
self.pass_showershape = False
self.pass_lostHits = False
self.pass_dxy = False
self.pass_isolEMHadDepth1 = False
self.pass_pTIso = False
def get_region(self):
if (abs(self.sc_Eta)<1.4442): self.region = 1
elif (abs(self.sc_Eta)<1.566 ): self.region = 2
elif (abs(self.sc_Eta)<2.5 ): self.region = 3
else: self.region = 4
def check_HEEPID(self, dPhiIn, Sieie, missingHits, dxyFirstPV, HOverE, caloEnergy, E1x5OverE5x5, E2x5OverE5x5, isolEMHadDepth1, IsolPtTrks, EcalDriven, dEtaIn, rho):
eta = self.p4.Eta()
Et = self.p4.Et()
if (self.region == 1): self.pass_HoverE = ( HOverE < (0.05 + 1.0/caloEnergy) )
elif (self.region == 3): self.pass_HoverE = ( HOverE < (0.05 + (-0.4 + 0.4*abs(eta)) * rho / caloEnergy) )
accept_E1x5OverE5x5 = ( E1x5OverE5x5 > 0.83 )
accept_E2x5OverE5x5 = ( E2x5OverE5x5 > 0.94 )
if (self.region == 1): self.pass_showershape = (accept_E1x5OverE5x5 or accept_E2x5OverE5x5)
elif (self.region == 3): self.pass_showershape = True
if (self.region == 1): self.pass_SigmaIeIe = True
elif (self.region == 3): self.pass_SigmaIeIe = ( Sieie < 0.03 )
self.pass_isEcalDriven = ( int(EcalDriven) == 1 )
if (self.region == 1):
self.pass_dEtaIn = (abs(dEtaIn) < 0.004)
self.pass_dPhiIn = (abs(dPhiIn) < 0.06 )
elif (self.region == 3):
self.pass_dEtaIn = (abs(dEtaIn) < 0.006)
self.pass_dPhiIn = (abs(dPhiIn) < 0.06 )
if (self.region == 1):
self.pass_isolEMHadDepth1 = ( isolEMHadDepth1 < (2.0+ 0.03*Et + 0.28*rho) )
elif(self.region == 3):
if (Et<50.0):
self.pass_isolEMHadDepth1 = ( isolEMHadDepth1 < (2.5 + (0.15+0.07*abs(eta))*rho) )
else:
self.pass_isolEMHadDepth1 = ( isolEMHadDepth1 < (2.5 + 0.03*(Et-50.0) + (0.15+0.07*abs(eta))*rho) )
self.pass_pTIso = (IsolPtTrks < 5)
self.pass_lostHits = ( int(missingHits) <= 1 )
if (self.region == 1):self.pass_dxy = ( abs(dxyFirstPV) < 0.02 )
elif (self.region == 3):self.pass_dxy = ( abs(dxyFirstPV) < 0.05 )
self.pass_HEEPID = self.pass_isEcalDriven \
and self.pass_dEtaIn \
and self.pass_dPhiIn \
and self.pass_HoverE \
and self.pass_SigmaIeIe \
and self.pass_showershape \
and self.pass_lostHits \
and self.pass_dxy \
and self.pass_isolEMHadDepth1 \
and self.pass_pTIso
def my_walk_dir(my_dir,my_list,n_file = [-1]):
for tmp_file in os.listdir(my_dir):
if n_file[0] > 0 and len(my_list) >= n_file[0]: return
tmp_file_name = my_dir+'/'+tmp_file
if os.path.isfile(tmp_file_name):
if 'failed' in tmp_file_name:continue
if not '.root' in tmp_file_name:continue
my_list.append(tmp_file_name)
else:
my_walk_dir(tmp_file_name,my_list,n_file)
return
class eff_hist():
def __init__(self, hist_name, hist_in):
self.h_denominator = hist_in.Clone("probe_d_%s"%(hist_name))
self.h_numerator = hist_in.Clone("probe_n_%s"%(hist_name))
self.hist_name = hist_name
def Graph_Xerror0(self, graph_in):
for i in range(0,graph_in.GetN()):
graph_in.SetPointEXlow (i, 0)
graph_in.SetPointEXhigh(i, 0)
def draw(self, in_file_name):
#ROOT.gROOT.SetBatch(ROOT.kFALSE)
f_out = ROOT.TFile( in_file_name, 'READ')
c1 = ROOT.TCanvas( 'c2', ' a Canvas ', 50,50,865,780 )
h_denominator = ROOT.TH1F()
h_numerator = ROOT.TH1F()
h_denominator = f_out.Get( self.h_denominator.GetName() )
h_numerator = f_out.Get( self.h_numerator.GetName() )
gr1 = ROOT.TGraphAsymmErrors()
gr1.SetName("gr_%s"%(self.hist_name))
gr1.Divide(h_numerator,h_denominator,"cl=0.683 b(1,1) mode")
self.Graph_Xerror0(gr1)
gr1.SetMarkerStyle( 22 )
gr1.SetMarkerColor(4)
gr1.SetLineColor(4)
gr1.GetYaxis().SetRangeUser(0.0, 1.1)
gr1.Draw( 'AP' )
c1.Update()
out_hist_name = '%s.png'%(self.hist_name)
print "Draw %s"%( out_hist_name )
c1.Print( out_hist_name )
class reskim():
def __init__(self, out_file_name):
self.root_file_list = []
self.print_enable = True
self.tag_HEEPinfo = ROOT.HEEPinfo_t()
self.tag_p4 = array.array( 'f', [0, 0, 0, 0] )
self.tag_sc_Eta = array.array('f', [0] )
self.tag_charge = array.array('i', [0] )
self.tag_region = array.array('i', [0] )
self.tag_pass_HEEPID = array.array('i', [0] )
self.tag_trig_DouEle25_seededleg = array.array('i', [0, 0] )
self.tag_trig_DouEle25_unseededleg = array.array('i', [0, 0] )
self.tag_trig_Ele35WPTight = array.array('i', [0] )
self.tag_trig_Ele32WPTight = array.array('i', [0] )
self.probe_HEEPinfo = ROOT.HEEPinfo_t()
self.probe_p4 = array.array( 'f', [0, 0, 0, 0] )
self.probe_sc_Eta = array.array('f', [0] )
self.probe_charge = array.array('i', [0] )
self.probe_region = array.array('i', [0] )
self.probe_pass_HEEPID = array.array('i', [0] )
self.probe_trig_DouEle25_seededleg = array.array('i', [0, 0] )
self.probe_trig_DouEle25_unseededleg = array.array('i', [0, 0] )
self.probe_trig_Ele35WPTight = array.array('i', [0] )
self.probe_trig_Ele32WPTight = array.array('i', [0] )
self.f_out = ROOT.TFile( out_file_name, 'RECREATE' )
self.tree_out = ROOT.TTree( 'TreeName', 'tree comments here' )
self.tree_out.Branch( 'tag_HEEPinfo', self.tag_HEEPinfo , 'pass_isEcalDriven/I:pass_dEtaIn/I:pass_dPhiIn/I:pass_HoverE/I:pass_SigmaIeIe/I:pass _showershape/I:pass_lostHits/I:pass_dxy/I:pass_isolEMHadDepth1/I:pass_pTIso/I' )
self.tree_out.Branch( 'tag_p4' , self.tag_p4 , 'tag_p4[4]/F')
self.tree_out.Branch( 'tag_sc_Eta' , self.tag_sc_Eta , 'tag_sc_Eta/F' )
self.tree_out.Branch( 'tag_charge' , self.tag_charge , 'tag_charge/I' )
self.tree_out.Branch( 'tag_region' , self.tag_region , 'tag_region/I' )
self.tree_out.Branch( 'tag_pass_HEEPID', self.tag_pass_HEEPID, 'tag_pass_HEEPID/I' )
self.tree_out.Branch( 'tag_trig_DouEle25_unseededleg' , self.tag_trig_DouEle25_unseededleg, 'tag_trig_DouEle25_unseededleg[2]/I' )
self.tree_out.Branch( 'tag_trig_DouEle25_seededleg' , self.tag_trig_DouEle25_seededleg, 'tag_trig_DouEle25_seededleg[2]/I' )
self.tree_out.Branch( 'tag_trig_Ele32WPTight' , self.tag_trig_Ele32WPTight, 'tag_trig_Ele32WPTight/I' )
self.tree_out.Branch( 'tag_trig_Ele35WPTight' , self.tag_trig_Ele35WPTight, 'tag_trig_Ele35WPTight/I' )
self.tree_out.Branch( 'probe_HEEPinfo', self.probe_HEEPinfo, 'pass_isEcalDriven/I:pass_dEtaIn/I:pass_dPhiIn/I:pass_HoverE/I:pass_SigmaIeIe/I:pass _showershape/I:pass_lostHits/I:pass_dxy/I:pass_isolEMHadDepth1/I:pass_pTIso/I' )
self.tree_out.Branch( 'probe_p4' , self.probe_p4 , 'probe_p4[4]/F')
self.tree_out.Branch( 'probe_sc_Eta' , self.probe_sc_Eta , 'probe_sc_Eta/F' )
self.tree_out.Branch( 'probe_charge' , self.probe_charge , 'probe_charge/I' )
self.tree_out.Branch( 'probe_region' , self.probe_region , 'probe_region/I' )
self.tree_out.Branch( 'probe_pass_HEEPID', self.probe_pass_HEEPID, 'probe_pass_HEEPID/I' )
self.tree_out.Branch( 'probe_trig_DouEle25_seededleg' , self.probe_trig_DouEle25_seededleg, 'probe_trig_DouEle25_seededleg[2]/I' )
self.tree_out.Branch( 'probe_trig_DouEle25_unseededleg' , self.probe_trig_DouEle25_unseededleg, 'probe_trig_DouEle25_unseededleg[2]/I' )
self.tree_out.Branch( 'probe_trig_Ele32WPTight' , self.probe_trig_Ele32WPTight, 'probe_trig_Ele32WPTight/I' )
self.tree_out.Branch( 'probe_trig_Ele35WPTight' , self.probe_trig_Ele35WPTight, 'probe_trig_Ele35WPTight/I' )
def trig_match(self, vector_eta, vector_phi, obj_eta, obj_phi, deltaR = 0.1):
obj_p4 = ROOT.TLorentzVector()
obj_p4.SetPtEtaPhiM(100.0, obj_eta, obj_phi, 10.0)
return self.trig_match(vector_eta, vector_phi, obj_p4, deltaR)
def trig_match(self, vector_eta, vector_phi, obj_p4, phi, deltaR = 0.1):
for iVector in range(len(vector_eta)):
tmp_p4 = ROOT.TLorentzVector()
tmp_p4.SetPtEtaPhiM(100.0, vector_eta[iVector], vector_phi[iVector], 10.0)
if (tmp_p4.DeltaR(obj_p4) < deltaR ):
return True
return False
def Fill_branch(self, tree_in):
pre_time = time.time()
tree_in.SetBranchStatus("*", 0)
tree_in.SetBranchStatus('gsf_*', 1)
tree_in.SetBranchStatus('ev_*', 1)
tree_in.SetBranchStatus('trig_HLT_Ele35_WPTight_Gsf_hltEle35noerWPTightGsfTrackIsoFilter_*', 1)
tree_in.SetBranchStatus('trig_HLT_Ele32_WPTight_Gsf_hltEle32WPTightGsfTrackIsoFilter_*', 1)
tree_in.SetBranchStatus('trig_HLT_DoubleEle25_CaloIdL_MW_*', 1)
process_bar = ShowProcess(tree_in.GetEntries(), self.print_enable)
n_saved = 0
for iEvent in range(0, tree_in.GetEntries()):
process_bar.show_process()
tree_in.GetEntry(iEvent)
vector_electron = []
for iEl in range(0, tree_in.gsf_n):
tmp_electron = electron_C(tree_in.gsf_charge[iEl])
tmp_electron.p4.SetPtEtaPhiM(tree_in.gsf_ecalEnergyPostCorr[iEl]*sin(tree_in.gsf_theta[iEl]), tree_in.gsf_eta[iEl], tree_in.gsf_phi[iEl], const_m_el)
tmp_electron.p4_sc.SetPtEtaPhiM(tree_in.gsf_sc_energy[iEl]*sin(tree_in.gsf_theta[iEl]), tree_in.gsf_sc_eta[iEl], tree_in.gsf_phi[iEl], const_m_el)
tmp_electron.sc_Eta = tree_in.gsf_sc_eta[iEl]
tmp_electron.get_region()
if (tmp_electron.region != 1 and tmp_electron.region != 3): continue
tmp_electron.check_HEEPID(\
tree_in.gsf_deltaPhiSuperClusterTrackAtVtx[iEl], \
tree_in.gsf_full5x5_sigmaIetaIeta[iEl],\
tree_in.gsf_nLostInnerHits[iEl], \
abs(tree_in.gsf_dxy_firstPVtx[iEl]), \
tree_in.gsf_hadronicOverEm[iEl],\
tree_in.gsf_sc_energy[iEl], \
( float(tree_in.gsf_full5x5_e1x5[iEl])/float(tree_in.gsf_full5x5_e5x5[iEl]) ), \
( float(tree_in.gsf_full5x5_e2x5Max[iEl])/float(tree_in.gsf_full5x5_e5x5[iEl]) ), \
(tree_in.gsf_dr03EcalRecHitSumEt[iEl] + tree_in.gsf_dr03HcalDepth1TowerSumEt[iEl]), \
tree_in.gsf_heepTrkPtIso[iEl], \
tree_in.gsf_ecaldrivenSeed[iEl], \
tree_in.gsf_deltaEtaSeedClusterTrackAtVtx[iEl], \
tree_in.ev_fixedGridRhoFastjetAll)
tmp_electron.isTag = (tmp_electron.p4.Et() > 35 \
and tmp_electron.region == 1 \
and tmp_electron.pass_HEEPID)
vector_electron.append(tmp_electron)
iTag = -1
iProbe = -1
for iTag in range(len(vector_electron)):
if vector_electron[iTag].isTag:
for iProbe in range(len(vector_electron)):
if iProbe == iTag: continue
self.tag_HEEPinfo.pass_isEcalDriven = vector_electron[iTag].pass_isEcalDriven
self.tag_HEEPinfo.pass_dEtaIn = vector_electron[iTag].pass_dEtaIn
self.tag_HEEPinfo.pass_dPhiIn = vector_electron[iTag].pass_dPhiIn
self.tag_HEEPinfo.pass_HoverE = vector_electron[iTag].pass_HoverE
self.tag_HEEPinfo.pass_SigmaIeIe = vector_electron[iTag].pass_SigmaIeIe
self.tag_HEEPinfo.pass_showershape = vector_electron[iTag].pass_showershape
self.tag_HEEPinfo.pass_lostHits = vector_electron[iTag].pass_lostHits
self.tag_HEEPinfo.pass_dxy = vector_electron[iTag].pass_dxy
self.tag_HEEPinfo.pass_isolEMHadDepth1 = vector_electron[iTag].pass_isolEMHadDepth1
self.tag_HEEPinfo.pass_pTIso = vector_electron[iTag].pass_pTIso
self.tag_p4[0] = vector_electron[iTag].p4.Px()
self.tag_p4[1] = vector_electron[iTag].p4.Py()
self.tag_p4[2] = vector_electron[iTag].p4.Pz()
self.tag_p4[3] = vector_electron[iTag].p4.E()
self.tag_sc_Eta[0] = vector_electron[iProbe].sc_Eta
self.tag_charge[0] = vector_electron[iProbe].charge
self.tag_region[0] = vector_electron[iProbe].region
self.tag_pass_HEEPID[0] = vector_electron[iProbe].pass_HEEPID
try :
self.tag_trig_DouEle25_unseededleg[0] = self.trig_match(tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltDiEG25EtUnseededFilter_eta, tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltDiEG25EtUnseededFilter_phi, vector_electron[iTag].p4, 0.1)
self.tag_trig_DouEle25_unseededleg[1] = self.trig_match(tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltDiEle25CaloIdLMWPMS2UnseededFilter_eta, tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltDiEle25CaloIdLMWPMS2UnseededFilter_phi, vector_electron[iTag].p4, 0.1)
self.tag_trig_DouEle25_seededleg[1] = self.trig_match(tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltEle25CaloIdLMWPMS2Filter_eta, tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltEle25CaloIdLMWPMS2Filter_phi, vector_electron[iTag].p4, 0.1)
self.tag_trig_DouEle25_seededleg[0] = self.trig_match(tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltEGL1SingleAndDoubleEGNonIsoOrWithEG26WithJetAndTauFilter_eta, tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltEGL1SingleAndDoubleEGNonIsoOrWithEG26WithJetAndTauFilter_phi, vector_electron[iTag].p4, 0.1)
self.tag_trig_Ele32WPTight[0] = self.trig_match(tree_in.trig_HLT_Ele32_WPTight_Gsf_hltEle32WPTightGsfTrackIsoFilter_eta, tree_in.trig_HLT_Ele32_WPTight_Gsf_hltEle32WPTightGsfTrackIsoFilter_phi, vector_electron[iTag].p4, 0.1)
self.tag_trig_Ele35WPTight[0] = self.trig_match(tree_in.trig_HLT_Ele35_WPTight_Gsf_hltEle35noerWPTightGsfTrackIsoFilter_eta, tree_in.trig_HLT_Ele35_WPTight_Gsf_hltEle35noerWPTightGsfTrackIsoFilter_phi, vector_electron[iTag].p4, 0.1)
except :pass
self.probe_HEEPinfo.pass_isEcalDriven = vector_electron[iProbe].pass_isEcalDriven
self.probe_HEEPinfo.pass_dEtaIn = vector_electron[iProbe].pass_dEtaIn
self.probe_HEEPinfo.pass_dPhiIn = vector_electron[iProbe].pass_dPhiIn
self.probe_HEEPinfo.pass_HoverE = vector_electron[iProbe].pass_HoverE
self.probe_HEEPinfo.pass_SigmaIeIe = vector_electron[iProbe].pass_SigmaIeIe
self.probe_HEEPinfo.pass_showershape = vector_electron[iProbe].pass_showershape
self.probe_HEEPinfo.pass_lostHits = vector_electron[iProbe].pass_lostHits
self.probe_HEEPinfo.pass_dxy = vector_electron[iProbe].pass_dxy
self.probe_HEEPinfo.pass_isolEMHadDepth1 = vector_electron[iProbe].pass_isolEMHadDepth1
self.probe_HEEPinfo.pass_pTIso = vector_electron[iProbe].pass_pTIso
self.probe_p4[0] = vector_electron[iProbe].p4.Px()
self.probe_p4[1] = vector_electron[iProbe].p4.Py()
self.probe_p4[2] = vector_electron[iProbe].p4.Pz()
self.probe_p4[3] = vector_electron[iProbe].p4.E()
self.probe_sc_Eta[0] = vector_electron[iProbe].sc_Eta
self.probe_charge[0] = vector_electron[iProbe].charge
self.probe_region[0] = vector_electron[iProbe].region
self.probe_pass_HEEPID[0] = vector_electron[iProbe].pass_HEEPID
try :
self.probe_trig_DouEle25_unseededleg[0] = self.trig_match(tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltDiEG25EtUnseededFilter_eta, tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltDiEG25EtUnseededFilter_phi, vector_electron[iProbe].p4, 0.1)
self.probe_trig_DouEle25_unseededleg[1] = self.trig_match(tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltDiEle25CaloIdLMWPMS2UnseededFilter_eta, tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltDiEle25CaloIdLMWPMS2UnseededFilter_phi, vector_electron[iProbe].p4, 0.1)
self.probe_trig_DouEle25_seededleg[1] = self.trig_match(tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltEle25CaloIdLMWPMS2Filter_eta, tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltEle25CaloIdLMWPMS2Filter_phi, vector_electron[iProbe].p4, 0.1)
self.probe_trig_DouEle25_seededleg[0] = self.trig_match(tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltEGL1SingleAndDoubleEGNonIsoOrWithEG26WithJetAndTauFilter_eta, tree_in.trig_HLT_DoubleEle25_CaloIdL_MW_hltEGL1SingleAndDoubleEGNonIsoOrWithEG26WithJetAndTauFilter_phi, vector_electron[iProbe].p4, 0.1)
self.probe_trig_Ele32WPTight[0] = self.trig_match(tree_in.trig_HLT_Ele32_WPTight_Gsf_hltEle32WPTightGsfTrackIsoFilter_eta, tree_in.trig_HLT_Ele32_WPTight_Gsf_hltEle32WPTightGsfTrackIsoFilter_phi, vector_electron[iProbe].p4, 0.1)
self.probe_trig_Ele35WPTight[0] = self.trig_match(tree_in.trig_HLT_Ele35_WPTight_Gsf_hltEle35noerWPTightGsfTrackIsoFilter_eta, tree_in.trig_HLT_Ele35_WPTight_Gsf_hltEle35noerWPTightGsfTrackIsoFilter_phi, vector_electron[iProbe].p4, 0.1)
except :pass
self.tree_out.Fill()
n_saved += 1
process_bar.close('Finish, %d saved'%(n_saved))
if self.print_enable: print "%0.1f event / s , waiting for other processes"%(tree_in.GetEntries() / float(time.time() - pre_time))
def Loop(self):
tChain = ROOT.TChain("IIHEAnalysis")
if self.print_enable: print "TChian initializing, %d root files in total"%(len(self.root_file_list))
process_bar1 = ShowProcess( len(self.root_file_list) , self.print_enable)
for file_name in self.root_file_list:
tChain.Add(file_name)
process_bar1.show_process()
process_bar1.close('TChian initialized, reskiming %d events'%(tChain.GetEntries()))
self.Fill_branch(tChain)
self.f_out.Write()
self.f_out.Close() | [
"1069379433@qq.com"
] | 1069379433@qq.com |
973d62af79f23603d12e59d2936310822445ccff | 497535fd65de15c1c39c53ceacc778aa557b42c8 | /penn_treebank_reader.py | 6d6915ad5c9b774211b5bf804fa53e5d5f6d18cd | [] | no_license | mrdrozdov/chart-parser | d44936f1872d2e8e18469dba5f6e8d6172ace53a | 5dfa79ed1aea2a11112a4320618e78c752520f46 | refs/heads/master | 2020-06-20T14:21:49.709284 | 2019-07-16T08:06:45 | 2019-07-16T08:06:45 | 197,149,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,788 | py | import json
import os
import nltk
from nltk.corpus import ptb
# For tests.
import unittest
####################
# Reader (raw PTB) #
####################
class RawPTBReader(object):
def __init__(self):
section2fileid = {}
for fileid in ptb.fileids():
if not fileid.startswith('WSJ'):
continue
section = int(fileid.split('/')[1])
section2fileid.setdefault(section, []).append(fileid)
self.tr_sections = [x for x in range(0, 22)]
self.va_sections = [x for x in range(22, 23)]
self.te_sections = [x for x in range(23, 24)]
self.section2fileid = section2fileid
def read_sections(self, sections):
for section in sections:
for fileid in self.section2fileid[section]:
for s in ptb.parsed_sents(fileid):
yield s
def read_tr(self):
return self.read_sections(self.tr_sections)
def read_va(self):
return self.read_sections(self.va_sections)
def read_te(self):
return self.read_sections(self.te_sections)
################################
# Converter (raw PTB -> jsonl) #
################################
def tree_to_string(tree):
def helper(tree):
if isinstance(tree, str):
return tree
out = '({}'.format(tree.label())
for x in tree:
out += ' {}'.format(helper(x))
return out + ')'
return helper(tree)
def tree_to_spans(tree):
spans = []
def helper(tree, pos=0):
if isinstance(tree, str):
return 1
size = 0
for x in tree:
spansize = helper(x, pos+size)
size += spansize
label = tree.label().split('-')[0] # TODO: This is wrong!
spans.append((pos, size, label))
return size
helper(tree)
return spans
class RawToJSONLConverter(object):
def __init__(self, saveto):
super(RawToJSONLConverter, self).__init__()
self.reader = RawPTBReader()
self.saveto = saveto
if not os.path.exists(self.saveto):
raise Exception('The `saveto` directory does not exist. ' + \
'Run: `mkdir -p {}`'.format(self.saveto))
def to_object(self, tree, example_id):
o = {}
o['example_id'] = example_id
o['sentence'] = tree.leaves()
o['parse'] = tree_to_string(tree)
o['spans'] = tree_to_spans(tree)
return o
def run(self):
count = 0
savepath = os.path.join(self.saveto, 'train.jsonl')
data = self.reader.read_tr()
with open(savepath, 'w') as f:
for tree in data:
f.write('{}\n'.format(json.dumps(self.to_object(tree, example_id='ptb{}'.format(count)))))
count += 1
savepath = os.path.join(self.saveto, 'valid.jsonl')
data = self.reader.read_va()
with open(savepath, 'w') as f:
for tree in data:
f.write('{}\n'.format(json.dumps(self.to_object(tree, example_id='ptb{}'.format(count)))))
count += 1
savepath = os.path.join(self.saveto, 'test.jsonl')
data = self.reader.read_te()
with open(savepath, 'w') as f:
for tree in data:
f.write('{}\n'.format(json.dumps(self.to_object(tree, example_id='ptb{}'.format(count)))))
count += 1
##################
# Reader (jsonl) #
##################
class JSONLReader(object):
def __init__(self, path):
super(JSONLReader, self).__init__()
self.path = path
def read(self):
with open(self.path) as f:
for line in f:
yield json.loads(line)
#########
# Tests #
#########
class TestPTBReader(object):
def __init__(self):
self.reader = RawPTBReader()
def run(self):
self.test_dataset_count()
def test_num_examples(self):
tr = [s for s in self.reader.read_tr()]
assert len(tr) == 43746
va = [s for s in self.reader.read_va()]
assert len(va) == 1700
te = [s for s in self.reader.read_te()]
assert len(te) == 2416
assert len(tr) + len(va) + len(te) == 47862
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--saveto', default=os.path.expanduser('~/data/ptb'), type=str)
parser.add_argument('--mode', default='test', choices=('test', 'convert', 'demo'))
options = parser.parse_args()
if options.mode == 'test':
TestPTBReader().run()
if options.mode == 'convert':
RawToJSONLConverter(options.saveto).run()
if options.mode == 'demo':
print(next(JSONLReader(os.path.join(options.saveto, 'train.jsonl')).read()))
| [
"andrew@mrdrozdov.com"
] | andrew@mrdrozdov.com |
e2fb370681ad6a240332bc2274c752d6b9e04960 | c9fd8f943918e3fa3f19edeea96cff2303368ab6 | /apps/quotes/urls.py | 9bb2416f6570f34f8eea50e8d98c4ef48e3e35f5 | [] | no_license | fgomesc/system_stocks | e363181837d4efe3e563d49e1a80869b91f0048c | 190e855c5e95f4459dfcf1204e8939ccac6f7778 | refs/heads/master | 2022-06-25T13:33:44.428654 | 2020-05-08T19:34:19 | 2020-05-08T19:34:19 | 262,412,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | from django.urls import path, include
from .views import home
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('about/', views.about, name='about'),
path('add_stocks.html', views.add_stocks, name='add_stocks'),
path('delete/<stock_id>', views.delete, name='delete'),
path('delete_stocks.html', views.delete_stocks, name='delete_stocks'),
]
| [
"fgomesc0586@gmail.com"
] | fgomesc0586@gmail.com |
5535132c338f582ef5cc8e7c14057cc6bfd01c5d | 7949f96ee7feeaa163608dbd256b0b76d1b89258 | /toontown/suit/DistributedBossbotBossAI.py | 704473b6aab824f2953dfe003fc194ae15799602 | [] | no_license | xxdecryptionxx/ToontownOnline | 414619744b4c40588f9a86c8e01cb951ffe53e2d | e6c20e6ce56f2320217f2ddde8f632a63848bd6b | refs/heads/master | 2021-01-11T03:08:59.934044 | 2018-07-27T01:26:21 | 2018-07-27T01:26:21 | 71,086,644 | 8 | 10 | null | 2018-06-01T00:13:34 | 2016-10-17T00:39:41 | Python | UTF-8 | Python | false | false | 38,255 | py | # File: t (Python 2.4)
import random
import math
from pandac.PandaModules import Point3
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import FSM
from direct.interval.IntervalGlobal import LerpPosInterval
from toontown.coghq import DistributedFoodBeltAI
from toontown.coghq import DistributedBanquetTableAI
from toontown.coghq import DistributedGolfSpotAI
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import ToontownBattleGlobals
from toontown.suit import DistributedBossCogAI
from toontown.suit import DistributedSuitAI
from toontown.suit import SuitDNA
from toontown.building import SuitBuildingGlobals
from toontown.battle import DistributedBattleWaitersAI
from toontown.battle import DistributedBattleDinersAI
from toontown.battle import BattleExperienceAI
from direct.distributed.ClockDelta import globalClockDelta
class DistributedBossbotBossAI(DistributedBossCogAI.DistributedBossCogAI, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBossbotBossAI')
maxToonLevels = 77
toonUpLevels = [
1,
2,
3,
4]
def __init__(self, air):
DistributedBossCogAI.DistributedBossCogAI.__init__(self, air, 'c')
FSM.FSM.__init__(self, 'DistributedBossbotBossAI')
self.battleOneBattlesMade = False
self.battleThreeBattlesMade = False
self.battleFourSetup = False
self.foodBelts = []
self.numTables = 1
self.numDinersPerTable = 3
self.tables = []
self.numGolfSpots = 4
self.golfSpots = []
self.toonFoodStatus = { }
self.bossMaxDamage = ToontownGlobals.BossbotBossMaxDamage
self.threatDict = { }
self.keyStates.append('BattleFour')
self.battleFourStart = 0
self.battleDifficulty = 0
self.movingToTable = False
self.tableDest = -1
self.curTable = -1
self.speedDamage = 0
self.maxSpeedDamage = ToontownGlobals.BossbotMaxSpeedDamage
self.speedRecoverRate = ToontownGlobals.BossbotSpeedRecoverRate
self.speedRecoverStartTime = 0
self.battleFourTimeStarted = 0
self.numDinersExploded = 0
self.numMoveAttacks = 0
self.numGolfAttacks = 0
self.numGearAttacks = 0
self.numGolfAreaAttacks = 0
self.numToonupGranted = 0
self.totalLaffHealed = 0
self.toonupsGranted = []
self.doneOvertimeOneAttack = False
self.doneOvertimeTwoAttack = False
self.overtimeOneTime = simbase.air.config.GetInt('overtime-one-time', 1200)
self.battleFourDuration = simbase.air.config.GetInt('battle-four-duration', 1800)
self.overtimeOneStart = float(self.overtimeOneTime) / self.battleFourDuration
self.moveAttackAllowed = True
def delete(self):
self.notify.debug('DistributedBossbotBossAI.delete')
self.deleteBanquetTables()
self.deleteFoodBelts()
self.deleteGolfSpots()
return DistributedBossCogAI.DistributedBossCogAI.delete(self)
def enterElevator(self):
DistributedBossCogAI.DistributedBossCogAI.enterElevator(self)
self.makeBattleOneBattles()
def enterIntroduction(self):
self.arenaSide = None
self.makeBattleOneBattles()
self.barrier = self.beginBarrier('Introduction', self.involvedToons, 45, self.doneIntroduction)
def makeBattleOneBattles(self):
if not self.battleOneBattlesMade:
self.postBattleState = 'PrepareBattleTwo'
self.initializeBattles(1, ToontownGlobals.BossbotBossBattleOnePosHpr)
self.battleOneBattlesMade = True
def getHoodId(self):
return ToontownGlobals.LawbotHQ
def generateSuits(self, battleNumber):
if battleNumber == 1:
weakenedValue = ((1, 1), (2, 2), (2, 2), (1, 1), (1, 1, 1, 1, 1))
listVersion = list(SuitBuildingGlobals.SuitBuildingInfo)
if simbase.config.GetBool('bossbot-boss-cheat', 0):
listVersion[14] = weakenedValue
SuitBuildingGlobals.SuitBuildingInfo = tuple(listVersion)
retval = self.invokeSuitPlanner(14, 0)
return retval
else:
suits = self.generateDinerSuits()
return suits
def invokeSuitPlanner(self, buildingCode, skelecog):
suits = DistributedBossCogAI.DistributedBossCogAI.invokeSuitPlanner(self, buildingCode, skelecog)
activeSuits = suits['activeSuits'][:]
reserveSuits = suits['reserveSuits'][:]
if len(activeSuits) + len(reserveSuits) >= 4:
while len(activeSuits) < 4:
activeSuits.append(reserveSuits.pop()[0])
retval = {
'activeSuits': activeSuits,
'reserveSuits': reserveSuits }
return retval
def makeBattle(self, bossCogPosHpr, battlePosHpr, roundCallback, finishCallback, battleNumber, battleSide):
if battleNumber == 1:
battle = DistributedBattleWaitersAI.DistributedBattleWaitersAI(self.air, self, roundCallback, finishCallback, battleSide)
else:
battle = DistributedBattleDinersAI.DistributedBattleDinersAI(self.air, self, roundCallback, finishCallback, battleSide)
self.setBattlePos(battle, bossCogPosHpr, battlePosHpr)
battle.suitsKilled = self.suitsKilled
battle.battleCalc.toonSkillPtsGained = self.toonSkillPtsGained
battle.toonExp = self.toonExp
battle.toonOrigQuests = self.toonOrigQuests
battle.toonItems = self.toonItems
battle.toonOrigMerits = self.toonOrigMerits
battle.toonMerits = self.toonMerits
battle.toonParts = self.toonParts
battle.helpfulToons = self.helpfulToons
mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(battleNumber)
battle.battleCalc.setSkillCreditMultiplier(mult)
activeSuits = self.activeSuitsA
if battleSide:
activeSuits = self.activeSuitsB
for suit in activeSuits:
battle.addSuit(suit)
battle.generateWithRequired(self.zoneId)
return battle
def initializeBattles(self, battleNumber, bossCogPosHpr):
self.resetBattles()
if not self.involvedToons:
self.notify.warning('initializeBattles: no toons!')
return None
self.battleNumber = battleNumber
suitHandles = self.generateSuits(battleNumber)
self.suitsA = suitHandles['activeSuits']
self.activeSuitsA = self.suitsA[:]
self.reserveSuits = suitHandles['reserveSuits']
if battleNumber == 3:
if self.toonsB:
movedSuit = self.suitsA.pop()
self.suitsB = [
movedSuit]
self.activeSuitsB = [
movedSuit]
self.activeSuitsA.remove(movedSuit)
else:
self.suitsB = []
self.activeSuitsB = []
else:
suitHandles = self.generateSuits(battleNumber)
self.suitsB = suitHandles['activeSuits']
self.activeSuitsB = self.suitsB[:]
self.reserveSuits += suitHandles['reserveSuits']
if self.toonsA:
if battleNumber == 1:
self.battleA = self.makeBattle(bossCogPosHpr, ToontownGlobals.WaiterBattleAPosHpr, self.handleRoundADone, self.handleBattleADone, battleNumber, 0)
self.battleAId = self.battleA.doId
else:
self.battleA = self.makeBattle(bossCogPosHpr, ToontownGlobals.DinerBattleAPosHpr, self.handleRoundADone, self.handleBattleADone, battleNumber, 0)
self.battleAId = self.battleA.doId
else:
self.moveSuits(self.activeSuitsA)
self.suitsA = []
self.activeSuitsA = []
if self.arenaSide == None:
self.b_setArenaSide(0)
if self.toonsB:
if battleNumber == 1:
self.battleB = self.makeBattle(bossCogPosHpr, ToontownGlobals.WaiterBattleBPosHpr, self.handleRoundBDone, self.handleBattleBDone, battleNumber, 1)
self.battleBId = self.battleB.doId
else:
self.battleB = self.makeBattle(bossCogPosHpr, ToontownGlobals.DinerBattleBPosHpr, self.handleRoundBDone, self.handleBattleBDone, battleNumber, 1)
self.battleBId = self.battleB.doId
else:
self.moveSuits(self.activeSuitsB)
self.suitsB = []
self.activeSuitsB = []
if self.arenaSide == None:
self.b_setArenaSide(1)
self.sendBattleIds()
def enterPrepareBattleTwo(self):
self.barrier = self.beginBarrier('PrepareBattleTwo', self.involvedToons, 45, self._DistributedBossbotBossAI__donePrepareBattleTwo)
self.createFoodBelts()
self.createBanquetTables()
def _DistributedBossbotBossAI__donePrepareBattleTwo(self, avIds):
self.b_setState('BattleTwo')
def exitPrepareBattleTwo(self):
self.ignoreBarrier(self.barrier)
def createFoodBelts(self):
if self.foodBelts:
return None
for i in xrange(2):
newBelt = DistributedFoodBeltAI.DistributedFoodBeltAI(self.air, self, i)
self.foodBelts.append(newBelt)
newBelt.generateWithRequired(self.zoneId)
def deleteFoodBelts(self):
for belt in self.foodBelts:
belt.requestDelete()
self.foodBelts = []
def createBanquetTables(self):
if self.tables:
return None
self.calcAndSetBattleDifficulty()
diffInfo = ToontownGlobals.BossbotBossDifficultySettings[self.battleDifficulty]
self.diffInfo = diffInfo
self.numTables = diffInfo[0]
self.numDinersPerTable = diffInfo[1]
dinerLevel = diffInfo[2]
for i in xrange(self.numTables):
newTable = DistributedBanquetTableAI.DistributedBanquetTableAI(self.air, self, i, self.numDinersPerTable, dinerLevel)
self.tables.append(newTable)
newTable.generateWithRequired(self.zoneId)
def deleteBanquetTables(self):
for table in self.tables:
table.requestDelete()
self.tables = []
def enterBattleTwo(self):
self.resetBattles()
self.createFoodBelts()
self.createBanquetTables()
for belt in self.foodBelts:
belt.turnOn()
for table in self.tables:
table.turnOn()
self.barrier = self.beginBarrier('BattleTwo', self.involvedToons, ToontownGlobals.BossbotBossServingDuration + 1, self._DistributedBossbotBossAI__doneBattleTwo)
def exitBattleTwo(self):
self.ignoreBarrier(self.barrier)
for table in self.tables:
table.goInactive()
for belt in self.foodBelts:
belt.goInactive()
def _DistributedBossbotBossAI__doneBattleTwo(self, avIds):
self.b_setState('PrepareBattleThree')
def requestGetFood(self, beltIndex, foodIndex, foodNum):
grantRequest = False
avId = self.air.getAvatarIdFromSender()
if self.state != 'BattleTwo':
grantRequest = False
elif (beltIndex, foodNum) not in self.toonFoodStatus.values():
if avId not in self.toonFoodStatus:
grantRequest = True
elif self.toonFoodStatus[avId] == None:
grantRequest = True
if grantRequest:
self.toonFoodStatus[avId] = (beltIndex, foodNum)
self.sendUpdate('toonGotFood', [
avId,
beltIndex,
foodIndex,
foodNum])
def requestServeFood(self, tableIndex, chairIndex):
grantRequest = False
avId = self.air.getAvatarIdFromSender()
if self.state != 'BattleTwo':
grantRequest = False
elif tableIndex < len(self.tables):
table = self.tables[tableIndex]
dinerStatus = table.getDinerStatus(chairIndex)
if dinerStatus in (table.HUNGRY, table.ANGRY):
if self.toonFoodStatus[avId]:
grantRequest = True
if grantRequest:
self.toonFoodStatus[avId] = None
table.foodServed(chairIndex)
self.sendUpdate('toonServeFood', [
avId,
tableIndex,
chairIndex])
def enterPrepareBattleThree(self):
self.barrier = self.beginBarrier('PrepareBattleThree', self.involvedToons, ToontownGlobals.BossbotBossServingDuration + 1, self._DistributedBossbotBossAI__donePrepareBattleThree)
self.divideToons()
self.makeBattleThreeBattles()
def exitPrepareBattleThree(self):
self.ignoreBarrier(self.barrier)
def _DistributedBossbotBossAI__donePrepareBattleThree(self, avIds):
self.b_setState('BattleThree')
def makeBattleThreeBattles(self):
if not self.battleThreeBattlesMade:
if not self.tables:
self.createBanquetTables()
for table in self.tables:
table.turnOn()
table.goInactive()
notDeadList = []
for table in self.tables:
tableInfo = table.getNotDeadInfo()
notDeadList += tableInfo
self.notDeadList = notDeadList
self.postBattleState = 'PrepareBattleFour'
self.initializeBattles(3, ToontownGlobals.BossbotBossBattleThreePosHpr)
self.battleThreeBattlesMade = True
def generateDinerSuits(self):
diners = []
for i in xrange(len(self.notDeadList)):
if simbase.config.GetBool('bossbot-boss-cheat', 0):
suit = self._DistributedBossbotBossAI__genSuitObject(self.zoneId, 2, 'c', 2, 0)
else:
info = self.notDeadList[i]
suitType = info[2] - 4
suitLevel = info[2]
suit = self._DistributedBossbotBossAI__genSuitObject(self.zoneId, suitType, 'c', suitLevel, 1)
diners.append((suit, 100))
active = []
for i in xrange(2):
if simbase.config.GetBool('bossbot-boss-cheat', 0):
suit = self._DistributedBossbotBossAI__genSuitObject(self.zoneId, 2, 'c', 2, 0)
else:
suitType = 8
suitLevel = 12
suit = self._DistributedBossbotBossAI__genSuitObject(self.zoneId, suitType, 'c', suitLevel, 1)
active.append(suit)
return {
'activeSuits': active,
'reserveSuits': diners }
def _DistributedBossbotBossAI__genSuitObject(self, suitZone, suitType, bldgTrack, suitLevel, revives = 0):
newSuit = DistributedSuitAI.DistributedSuitAI(simbase.air, None)
skel = self._DistributedBossbotBossAI__setupSuitInfo(newSuit, bldgTrack, suitLevel, suitType)
if skel:
newSuit.setSkelecog(1)
newSuit.setSkeleRevives(revives)
newSuit.generateWithRequired(suitZone)
newSuit.node().setName('suit-%s' % newSuit.doId)
return newSuit
def _DistributedBossbotBossAI__setupSuitInfo(self, suit, bldgTrack, suitLevel, suitType):
dna = SuitDNA.SuitDNA()
dna.newSuitRandom(suitType, bldgTrack)
suit.dna = dna
self.notify.debug('Creating suit type ' + suit.dna.name + ' of level ' + str(suitLevel) + ' from type ' + str(suitType) + ' and track ' + str(bldgTrack))
suit.setLevel(suitLevel)
return False
def enterBattleThree(self):
self.makeBattleThreeBattles()
self.notify.debug('self.battleA = %s' % self.battleA)
if self.battleA:
self.battleA.startBattle(self.toonsA, self.suitsA)
if self.battleB:
self.battleB.startBattle(self.toonsB, self.suitsB)
def exitBattleThree(self):
self.resetBattles()
def enterPrepareBattleFour(self):
self.resetBattles()
self.setupBattleFourObjects()
self.barrier = self.beginBarrier('PrepareBattleFour', self.involvedToons, 45, self._DistributedBossbotBossAI__donePrepareBattleFour)
def _DistributedBossbotBossAI__donePrepareBattleFour(self, avIds):
self.b_setState('BattleFour')
def exitPrepareBattleFour(self):
self.ignoreBarrier(self.barrier)
def enterBattleFour(self):
self.battleFourTimeStarted = globalClock.getFrameTime()
self.numToonsAtStart = len(self.involvedToons)
self.resetBattles()
self.setupBattleFourObjects()
self.battleFourStart = globalClock.getFrameTime()
self.waitForNextAttack(5)
def exitBattleFour(self):
self.recordCeoInfo()
for belt in self.foodBelts:
belt.goInactive()
def recordCeoInfo(self):
didTheyWin = 0
if self.bossDamage == self.bossMaxDamage:
didTheyWin = 1
self.battleFourTimeInMin = globalClock.getFrameTime() - self.battleFourTimeStarted
self.battleFourTimeInMin /= 60.0
self.numToonsAtEnd = 0
toonHps = []
for toonId in self.involvedToons:
toon = simbase.air.doId2do.get(toonId)
if toon:
self.numToonsAtEnd += 1
toonHps.append(toon.hp)
continue
self.air.writeServerEvent('ceoInfo', self.doId, '%d|%.2f|%d|%d|%d|%d|%d|%d|%s|%s|%.1f|%d|%d|%d|%d|%d}%d|%s|' % (didTheyWin, self.battleFourTimeInMin, self.battleDifficulty, self.numToonsAtStart, self.numToonsAtEnd, self.numTables, self.numTables * self.numDinersPerTable, self.numDinersExploded, toonHps, self.involvedToons, self.speedDamage, self.numMoveAttacks, self.numGolfAttacks, self.numGearAttacks, self.numGolfAreaAttacks, self.numToonupGranted, self.totalLaffHealed, 'ceoBugfixes'))
def setupBattleFourObjects(self):
if self.battleFourSetup:
return None
if not self.tables:
self.createBanquetTables()
for table in self.tables:
table.goFree()
if not self.golfSpots:
self.createGolfSpots()
self.createFoodBelts()
for belt in self.foodBelts:
belt.goToonup()
self.battleFourSetup = True
def hitBoss(self, bossDamage):
avId = self.air.getAvatarIdFromSender()
if not self.validate(avId, avId in self.involvedToons, 'hitBoss from unknown avatar'):
return None
self.validate(avId, bossDamage <= 3, 'invalid bossDamage %s' % bossDamage)
if bossDamage < 1:
return None
currState = self.getCurrentOrNextState()
if currState != 'BattleFour':
return None
bossDamage *= 2
bossDamage = min(self.getBossDamage() + bossDamage, self.bossMaxDamage)
self.b_setBossDamage(bossDamage, 0, 0)
if self.bossDamage >= self.bossMaxDamage:
self.b_setState('Victory')
else:
self._DistributedBossbotBossAI__recordHit(bossDamage)
def _DistributedBossbotBossAI__recordHit(self, bossDamage):
now = globalClock.getFrameTime()
self.hitCount += 1
avId = self.air.getAvatarIdFromSender()
self.addThreat(avId, bossDamage)
def getBossDamage(self):
return self.bossDamage
def b_setBossDamage(self, bossDamage, recoverRate, recoverStartTime):
self.d_setBossDamage(bossDamage, recoverRate, recoverStartTime)
self.setBossDamage(bossDamage, recoverRate, recoverStartTime)
def setBossDamage(self, bossDamage, recoverRate, recoverStartTime):
self.bossDamage = bossDamage
self.recoverRate = recoverRate
self.recoverStartTime = recoverStartTime
def d_setBossDamage(self, bossDamage, recoverRate, recoverStartTime):
timestamp = globalClockDelta.localToNetworkTime(recoverStartTime)
self.sendUpdate('setBossDamage', [
bossDamage,
recoverRate,
timestamp])
def getSpeedDamage(self):
now = globalClock.getFrameTime()
elapsed = now - self.speedRecoverStartTime
self.notify.debug('elapsed=%s' % elapsed)
floatSpeedDamage = max(self.speedDamage - self.speedRecoverRate * elapsed / 60.0, 0)
self.notify.debug('floatSpeedDamage = %s' % floatSpeedDamage)
return int(max(self.speedDamage - self.speedRecoverRate * elapsed / 60.0, 0))
def getFloatSpeedDamage(self):
now = globalClock.getFrameTime()
elapsed = now - self.speedRecoverStartTime
floatSpeedDamage = max(self.speedDamage - self.speedRecoverRate * elapsed / 60.0, 0)
self.notify.debug('floatSpeedDamage = %s' % floatSpeedDamage)
return max(self.speedDamage - self.speedRecoverRate * elapsed / 60.0, 0)
def b_setSpeedDamage(self, speedDamage, recoverRate, recoverStartTime):
self.d_setSpeedDamage(speedDamage, recoverRate, recoverStartTime)
self.setSpeedDamage(speedDamage, recoverRate, recoverStartTime)
def setSpeedDamage(self, speedDamage, recoverRate, recoverStartTime):
self.speedDamage = speedDamage
self.speedRecoverRate = recoverRate
self.speedRecoverStartTime = recoverStartTime
def d_setSpeedDamage(self, speedDamage, recoverRate, recoverStartTime):
timestamp = globalClockDelta.localToNetworkTime(recoverStartTime)
self.sendUpdate('setSpeedDamage', [
speedDamage,
recoverRate,
timestamp])
def createGolfSpots(self):
if self.golfSpots:
return None
for i in xrange(self.numGolfSpots):
newGolfSpot = DistributedGolfSpotAI.DistributedGolfSpotAI(self.air, self, i)
self.golfSpots.append(newGolfSpot)
newGolfSpot.generateWithRequired(self.zoneId)
newGolfSpot.forceFree()
def deleteGolfSpots(self):
for spot in self.golfSpots:
spot.requestDelete()
self.golfSpots = []
def ballHitBoss(self, speedDamage):
avId = self.air.getAvatarIdFromSender()
if not self.validate(avId, avId in self.involvedToons, 'hitBoss from unknown avatar'):
return None
if speedDamage < 1:
return None
currState = self.getCurrentOrNextState()
if currState != 'BattleFour':
return None
now = globalClock.getFrameTime()
newDamage = self.getSpeedDamage() + speedDamage
self.notify.debug('newDamage = %s' % newDamage)
speedDamage = min(self.getFloatSpeedDamage() + speedDamage, self.maxSpeedDamage)
self.b_setSpeedDamage(speedDamage, self.speedRecoverRate, now)
self.addThreat(avId, 0.10000000000000001)
def enterVictory(self):
self.resetBattles()
for table in self.tables:
table.turnOff()
for golfSpot in self.golfSpots:
golfSpot.turnOff()
self.suitsKilled.append({
'type': None,
'level': None,
'track': self.dna.dept,
'isSkelecog': 0,
'isForeman': 0,
'isVP': 1,
'isCFO': 0,
'isSupervisor': 0,
'isVirtual': 0,
'activeToons': self.involvedToons[:] })
self.barrier = self.beginBarrier('Victory', self.involvedToons, 30, self._DistributedBossbotBossAI__doneVictory)
def _DistributedBossbotBossAI__doneVictory(self, avIds):
self.d_setBattleExperience()
self.b_setState('Reward')
BattleExperienceAI.assignRewards(self.involvedToons, self.toonSkillPtsGained, self.suitsKilled, ToontownGlobals.dept2cogHQ(self.dept), self.helpfulToons)
for toonId in self.involvedToons:
toon = self.air.doId2do.get(toonId)
if toon:
self.givePinkSlipReward(toon)
toon.b_promote(self.deptIndex)
continue
def givePinkSlipReward(self, toon):
self.notify.debug('TODO give pink slip to %s' % toon)
toon.addPinkSlips(self.battleDifficulty + 1)
def getThreat(self, toonId):
if toonId in self.threatDict:
return self.threatDict[toonId]
else:
return 0
def addThreat(self, toonId, threat):
if toonId in self.threatDict:
self.threatDict[toonId] += threat
else:
self.threatDict[toonId] = threat
def subtractThreat(self, toonId, threat):
if toonId in self.threatDict:
self.threatDict[toonId] -= threat
else:
self.threatDict[toonId] = 0
if self.threatDict[toonId] < 0:
self.threatDict[toonId] = 0
def waitForNextAttack(self, delayTime):
currState = self.getCurrentOrNextState()
if currState == 'BattleFour':
taskName = self.uniqueName('NextAttack')
taskMgr.remove(taskName)
taskMgr.doMethodLater(delayTime, self.doNextAttack, taskName)
def doNextAttack(self, task):
attackCode = -1
optionalParam = None
if self.movingToTable:
self.waitForNextAttack(5)
elif self.attackCode == ToontownGlobals.BossCogDizzyNow:
attackCode = ToontownGlobals.BossCogRecoverDizzyAttack
elif self.getBattleFourTime() > self.overtimeOneStart and not (self.doneOvertimeOneAttack):
attackCode = ToontownGlobals.BossCogOvertimeAttack
self.doneOvertimeOneAttack = True
optionalParam = 0
elif self.getBattleFourTime() > 1.0 and not (self.doneOvertimeTwoAttack):
attackCode = ToontownGlobals.BossCogOvertimeAttack
self.doneOvertimeTwoAttack = True
optionalParam = 1
else:
attackCode = random.choice([
ToontownGlobals.BossCogGolfAreaAttack,
ToontownGlobals.BossCogDirectedAttack,
ToontownGlobals.BossCogDirectedAttack,
ToontownGlobals.BossCogDirectedAttack,
ToontownGlobals.BossCogDirectedAttack])
if attackCode == ToontownGlobals.BossCogAreaAttack:
self._DistributedBossbotBossAI__doAreaAttack()
if attackCode == ToontownGlobals.BossCogGolfAreaAttack:
self._DistributedBossbotBossAI__doGolfAreaAttack()
elif attackCode == ToontownGlobals.BossCogDirectedAttack:
self._DistributedBossbotBossAI__doDirectedAttack()
elif attackCode >= 0:
self.b_setAttackCode(attackCode, optionalParam)
def progressValue(self, fromValue, toValue):
t0 = float(self.bossDamage) / float(self.bossMaxDamage)
elapsed = globalClock.getFrameTime() - self.battleFourStart
t1 = elapsed / float(self.battleThreeDuration)
t = max(t0, t1)
progVal = fromValue + (toValue - fromValue) * min(t, 1)
self.notify.debug('progVal=%s' % progVal)
import pdb as pdb
pdb.set_trace()
return progVal
def _DistributedBossbotBossAI__doDirectedAttack(self):
toonId = self.getMaxThreatToon()
self.notify.debug('toonToAttack=%s' % toonId)
unflattenedToons = self.getUnflattenedToons()
attackTotallyRandomToon = random.random() < 0.10000000000000001
if unflattenedToons:
if attackTotallyRandomToon or toonId == 0:
toonId = random.choice(unflattenedToons)
if toonId:
toonThreat = self.getThreat(toonId)
toonThreat *= 0.25
threatToSubtract = max(toonThreat, 10)
self.subtractThreat(toonId, threatToSubtract)
if self.isToonRoaming(toonId):
self.b_setAttackCode(ToontownGlobals.BossCogGolfAttack, toonId)
self.numGolfAttacks += 1
elif self.isToonOnTable(toonId):
doesMoveAttack = simbase.air.config.GetBool('ceo-does-move-attack', 1)
if doesMoveAttack:
chanceToShoot = 0.25
else:
chanceToShoot = 1.0
if not self.moveAttackAllowed:
self.notify.debug('moveAttack is not allowed, doing gearDirectedAttack')
chanceToShoot = 1.0
if random.random() < chanceToShoot:
self.b_setAttackCode(ToontownGlobals.BossCogGearDirectedAttack, toonId)
self.numGearAttacks += 1
else:
tableIndex = self.getToonTableIndex(toonId)
self.doMoveAttack(tableIndex)
else:
self.b_setAttackCode(ToontownGlobals.BossCogGolfAttack, toonId)
else:
uprightTables = self.getUprightTables()
if uprightTables:
tableToMoveTo = random.choice(uprightTables)
self.doMoveAttack(tableToMoveTo)
else:
self.waitForNextAttack(4)
def doMoveAttack(self, tableIndex):
self.numMoveAttacks += 1
self.movingToTable = True
self.tableDest = tableIndex
self.b_setAttackCode(ToontownGlobals.BossCogMoveAttack, tableIndex)
def getUnflattenedToons(self):
result = []
uprightTables = self.getUprightTables()
for toonId in self.involvedToons:
toonTable = self.getToonTableIndex(toonId)
if toonTable >= 0 and toonTable not in uprightTables:
continue
result.append(toonId)
return result
def getMaxThreatToon(self):
returnedToonId = 0
maxThreat = 0
maxToons = []
for toonId in self.threatDict:
curThreat = self.threatDict[toonId]
tableIndex = self.getToonTableIndex(toonId)
if tableIndex > -1 and self.tables[tableIndex].state == 'Flat':
continue
if curThreat > maxThreat:
maxToons = [
toonId]
maxThreat = curThreat
continue
if curThreat == maxThreat:
maxToons.append(toonId)
continue
if maxToons:
returnedToonId = random.choice(maxToons)
return returnedToonId
def getToonDifficulty(self):
highestCogSuitLevel = 0
totalCogSuitLevels = 0.0
totalNumToons = 0.0
for toonId in self.involvedToons:
toon = simbase.air.doId2do.get(toonId)
if toon:
toonLevel = toon.getNumPromotions(self.dept)
totalCogSuitLevels += toonLevel
totalNumToons += 1
if toon.cogLevels > highestCogSuitLevel:
highestCogSuitLevel = toonLevel
toon.cogLevels > highestCogSuitLevel
if not totalNumToons:
totalNumToons = 1.0
averageLevel = totalCogSuitLevels / totalNumToons
self.notify.debug('toons average level = %f, highest level = %d' % (averageLevel, highestCogSuitLevel))
retval = min(averageLevel, self.maxToonLevels)
return retval
def calcAndSetBattleDifficulty(self):
self.toonLevels = self.getToonDifficulty()
numDifficultyLevels = len(ToontownGlobals.BossbotBossDifficultySettings)
battleDifficulty = int((self.toonLevels / self.maxToonLevels) * numDifficultyLevels)
if battleDifficulty >= numDifficultyLevels:
battleDifficulty = numDifficultyLevels - 1
self.b_setBattleDifficulty(battleDifficulty)
def b_setBattleDifficulty(self, batDiff):
self.setBattleDifficulty(batDiff)
self.d_setBattleDifficulty(batDiff)
def setBattleDifficulty(self, batDiff):
self.battleDifficulty = batDiff
def d_setBattleDifficulty(self, batDiff):
self.sendUpdate('setBattleDifficulty', [
batDiff])
def getUprightTables(self):
tableList = []
for table in self.tables:
if table.state != 'Flat':
tableList.append(table.index)
continue
return tableList
def getToonTableIndex(self, toonId):
tableIndex = -1
for table in self.tables:
if table.avId == toonId:
tableIndex = table.index
break
continue
return tableIndex
def getToonGolfSpotIndex(self, toonId):
golfSpotIndex = -1
for golfSpot in self.golfSpots:
if golfSpot.avId == toonId:
golfSpotIndex = golfSpot.index
break
continue
return golfSpotIndex
def isToonOnTable(self, toonId):
result = self.getToonTableIndex(toonId) != -1
return result
def isToonOnGolfSpot(self, toonId):
result = self.getToonGolfSpotIndex(toonId) != -1
return result
def isToonRoaming(self, toonId):
if not self.isToonOnTable(toonId):
pass
result = not self.isToonOnGolfSpot(toonId)
return result
def reachedTable(self, tableIndex):
if self.movingToTable and self.tableDest == tableIndex:
self.movingToTable = False
self.curTable = self.tableDest
self.tableDest = -1
def hitTable(self, tableIndex):
self.notify.debug('hitTable tableIndex=%d' % tableIndex)
if tableIndex < len(self.tables):
table = self.tables[tableIndex]
if table.state != 'Flat':
table.goFlat()
def awayFromTable(self, tableIndex):
self.notify.debug('awayFromTable tableIndex=%d' % tableIndex)
if tableIndex < len(self.tables):
taskName = 'Unflatten-%d' % tableIndex
unflattenTime = self.diffInfo[3]
taskMgr.doMethodLater(unflattenTime, self.unflattenTable, taskName, extraArgs = [
tableIndex])
def unflattenTable(self, tableIndex):
if tableIndex < len(self.tables):
table = self.tables[tableIndex]
if table.state == 'Flat':
if table.avId and table.avId in self.involvedToons:
table.forceControl(table.avId)
else:
table.goFree()
def incrementDinersExploded(self):
self.numDinersExploded += 1
def magicWordHit(self, damage, avId):
self.hitBoss(damage)
def _DistributedBossbotBossAI__doAreaAttack(self):
self.b_setAttackCode(ToontownGlobals.BossCogAreaAttack)
def _DistributedBossbotBossAI__doGolfAreaAttack(self):
self.numGolfAreaAttacks += 1
self.b_setAttackCode(ToontownGlobals.BossCogGolfAreaAttack)
def hitToon(self, toonId):
avId = self.air.getAvatarIdFromSender()
if not self.validate(avId, avId != toonId, 'hitToon on self'):
return None
if avId not in self.involvedToons or toonId not in self.involvedToons:
return None
toon = self.air.doId2do.get(toonId)
if toon:
self.healToon(toon, 1)
self.sendUpdate('toonGotHealed', [
toonId])
def requestGetToonup(self, beltIndex, toonupIndex, toonupNum):
grantRequest = False
avId = self.air.getAvatarIdFromSender()
if self.state != 'BattleFour':
grantRequest = False
elif (beltIndex, toonupNum) not in self.toonupsGranted:
toon = simbase.air.doId2do.get(avId)
if toon:
grantRequest = True
if grantRequest:
self.toonupsGranted.insert(0, (beltIndex, toonupNum))
if len(self.toonupsGranted) > 8:
self.toonupsGranted = self.toonupsGranted[0:8]
self.sendUpdate('toonGotToonup', [
avId,
beltIndex,
toonupIndex,
toonupNum])
if toonupIndex < len(self.toonUpLevels):
self.healToon(toon, self.toonUpLevels[toonupIndex])
self.numToonupGranted += 1
self.totalLaffHealed += self.toonUpLevels[toonupIndex]
else:
self.notify.warning('requestGetToonup this should not happen')
self.healToon(toon, 1)
def toonLeftTable(self, tableIndex):
if self.movingToTable and self.tableDest == tableIndex:
if random.random() < 0.5:
self.movingToTable = False
self.waitForNextAttack(0)
def getBattleFourTime(self):
if self.state != 'BattleFour':
t1 = 0
else:
elapsed = globalClock.getFrameTime() - self.battleFourStart
t1 = elapsed / float(self.battleFourDuration)
return t1
def getDamageMultiplier(self):
mult = 1.0
if self.doneOvertimeOneAttack and not (self.doneOvertimeTwoAttack):
mult = 1.25
if self.getBattleFourTime() > 1.0:
mult = self.getBattleFourTime() + 1
return mult
def toggleMove(self):
self.moveAttackAllowed = not (self.moveAttackAllowed)
return self.moveAttackAllowed
| [
"fr1tzanatore@aol.com"
] | fr1tzanatore@aol.com |
48902b3ddf518fcde7dd331a6fd0e765785f9e38 | af82475dc7eb45c478414372c222e7b6016359d4 | /python书籍/Python For Finance Code/Code of Python For Finance/4375OS_04_Code/4375OS_04_03_cumulative_standard_normal_CND.py | 2a6c8a23cd889e5cbf7786369ec7bdeffe6ba0ba | [] | no_license | enfangzhong/PythonBaseCode | 8f58c8b817eb9f4b0f0a5be437a52d5b5fab3433 | 9ab4a578b2692fdbb6aeeacb310251d51f72e953 | refs/heads/master | 2020-05-17T16:26:02.598344 | 2019-04-27T20:49:40 | 2019-04-27T20:49:40 | 183,817,172 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | """
Name : 4375OS_04_03_cumulative_standard_normal_CND.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/25/2013
email : yany@canisius.edu
paulyxy@hotmail.com
"""
from math import *
def CND(X):
"""
Objective: cumulative stardard normal distribution
X: input value
e.g.,
>>> CND(0)
0.5000000005248086
>>> CND(1)
0.8413447404368684
>>> CND(-1)
0.15865525956313165
>>> 1-CND(-1)
0.8413447404368684
>>>
"""
(a1,a2,a3,a4,a5)=(0.31938153,-0.356563782,1.781477937,-1.821255978,1.330274429)
L = abs(X)
K=1.0/(1.0+0.2316419*L)
w=1.0-1.0/sqrt(2*pi)*exp(-L*L/2.)*(a1*K+a2*K*K+a3*pow(K,3)+a4*pow(K,4)+a5*pow(K,5))
if X<0:
w = 1.0-w
return w
| [
"944727327@qq.com"
] | 944727327@qq.com |
7a1b05fc18498fd91411554f68ba46fa469f7957 | 26329cc5464a2aa69a2bc1636c71772efafdd467 | /lab 6/GameObjects/Player.py | 9396ad525308790419f076230f419f2fe9e46221 | [] | no_license | ThomasMGilman/ETGG-2801_2 | e71030d7368a929a24e20efddae346bd6b3a6173 | 209b37e79bd32fc41a544c29bf050e77f91bc71f | refs/heads/master | 2020-07-11T13:44:58.623191 | 2019-12-04T04:22:32 | 2019-12-04T04:22:32 | 204,556,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,302 | py | from sdl2.keycode import *
from utilityLibs import glCommands, ImageTexture2DArray
from toolLibs import math3d
from GameObjects import Bullet, Shapes
#from GameObjects.Entity import *
import array, globs
class Player:#(Entity):
vbuff = None
tbuff = None
ibuff = None
vao = None
tex = None
def __init__(self, x, y, size):
self.pos = math3d.vec2(x, y) #set players start position
self.crouchScale = math3d.vec2(1, .5)
self.crouching = False
self.direction = 0 #-1:Left, 1:Right
self.lastFired = 0 #Time since last Fired
self.state = globs.ON_GROUND #State player is in
self.life = 10 #amount of life left
self.size = size #Scale of player
self.halfSize = size / 2 #half scale of player
#'''
if Player.vao == None:
Player.vbuff = array.array("f")
Player.tbuff = array.array("f")
Player.ibuff = array.array("I")
Shapes.createSquare(Player.vbuff, size, size, x, y)
Shapes.createSquareTextureArray(Player.tbuff)
Shapes.createSquareIndexArray(Player.ibuff)
Player.vao = glCommands.setup(Player.vbuff, Player.tbuff, Player.ibuff)
#'''
#super().__init__()
if Player.tex == None:
Player.tex = ImageTexture2DArray.ImageTexture2DArray(*globs.playerTextures);
def draw(self):
#print(str(self.pos))
if self.crouching:
glCommands.changeUniform(self.pos, self.crouchScale)
else:
glCommands.changeUniform(self.pos)
#super().draw(Player.tex)
glCommands.drawElement(glCommands.GL_TRIANGLES, len(Player.ibuff), Player.vao, Player.tex, 0, 0)
def update(self, elapsedTime):
if (SDLK_d or SDLK_RIGHT) in globs.keyset:
self.direction = globs.FACING_RIGHT
self.pos[0] += globs.playerSpeed * elapsedTime
if (SDLK_a or SDLK_LEFT) in globs.keyset:
self.direction = globs.FACING_LEFT
self.pos[0] -= globs.playerSpeed * elapsedTime
if (SDLK_s or SDLK_DOWN) in globs.keyset:
self.crouching = True
else:
self.crouching = False
if self.state == globs.RISING:
self.pos[1] += globs.playerSpeed * elapsedTime
elif self.state == globs.FALLING:
self.pos[1] -= globs.playerSpeed * elapsedTime
if SDLK_SPACE in globs.keyset and self.lastFired <= 0: #fireBullet
bulletPosY = self.pos[1]+self.halfSize
if self.crouching: bulletPosY *= .5
globs.objectsToDraw.append(Bullet.Bullet(self.pos[0], bulletPosY, self.direction))
self.lastFired = globs.playerFireRate
if SDLK_w in globs.keyset and self.state == globs.ON_GROUND:
self.state = globs.RISING
elif self.pos[1] >= globs.jumpPeak and self.state == globs.RISING:
self.pos[1] = globs.jumpPeak
self.state = globs.FALLING
elif self.pos[1] <= 0 and self.state == globs.FALLING:
self.pos[1] = 0
self.state = globs.ON_GROUND
self.lastFired -= elapsedTime
def alive(self):
return self.life > 0 | [
"Thomas.Gilman@ymail.com"
] | Thomas.Gilman@ymail.com |
d55024cc1d14013dab3c9fdb65756c1e8cb97845 | 1864af9eda58307024acbf7fe5d5f2f39f435e44 | /quickstart_guides/recursion/python/reverse_linked_list.py | 244a165ff3027aacc2733b2dde26717f6a2260f6 | [] | no_license | vprusso/6-Weeks-to-Interview-Ready | c393bbfe071d97cba12f0f0668e53a25fb25986f | 8105e1b20bf450a03a9bb910f344fc140e5ba703 | refs/heads/master | 2021-08-11T04:48:34.252178 | 2020-08-09T22:54:55 | 2020-08-09T22:54:55 | 210,997,768 | 6 | 2 | null | 2019-09-26T04:12:44 | 2019-09-26T04:12:44 | null | UTF-8 | Python | false | false | 2,103 | py | """
Title: Reverse linked list
Problem:
Given a linked list, write a function that prints the nodes of the list in
reverse order.
Execution: python reverse_linked_list.py
"""
import unittest
class Node:
"""Node class for linked list."""
def __init__(self, data) -> None:
self.data = data
self.next = None
class LinkedList:
"""Linked list class."""
def __init__(self):
self.head = None
def append(self, data) -> None:
"""Append to end of linked list."""
new_node = Node(data)
if self.head is None:
self.head = new_node
return
last_node = self.head
while last_node.next:
last_node = last_node.next
last_node.next = new_node
def reverse_recursive(self):
def _reverse_recursive(cur, prev) -> Node:
if not cur:
return prev
nxt = cur.next
cur.next = prev
prev = cur
cur = nxt
return _reverse_recursive(cur, prev)
self.head = _reverse_recursive(cur=self.head, prev=None)
class TestReverseLinkedList(unittest.TestCase):
"""Unit tests for reverse_linked_list."""
def test_1(self):
llist = LinkedList()
llist.append("A")
llist.append("B")
llist.append("C")
llist.append("D")
llist.reverse_recursive()
res = []
expected_res = ["D", "C", "B", "A"]
cur_node = llist.head
while cur_node:
res.append(cur_node.data)
cur_node = cur_node.next
self.assertEqual(expected_res, res)
def test_2(self):
llist = LinkedList()
llist.append(1)
llist.append(2)
llist.append(3)
llist.append(4)
llist.reverse_recursive()
res = []
expected_res = [4, 3, 2, 1]
cur_node = llist.head
while cur_node:
res.append(cur_node.data)
cur_node = cur_node.next
self.assertEqual(expected_res, res)
if __name__ == "__main__":
unittest.main()
| [
"vincentrusso1@gmail.com"
] | vincentrusso1@gmail.com |
5840d6184e90ee1b3c4b1a42b7b7153e6c2fd7d5 | b2203c60193d7707407fa269e47200f6e1804f0c | /deid/dicom/fields.py | a47e8d6b6c6fa43fb1fe67cd83d4287c18c96806 | [
"MIT"
] | permissive | ticlazau/deid | 4790c3f09a731a656dbe2e55570c1776bb1cbd86 | 827844dac2b06ce2221068697a9662779447ee81 | refs/heads/master | 2020-04-17T20:53:37.988381 | 2019-01-18T21:14:02 | 2019-01-18T21:14:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,522 | py | '''
Copyright (c) 2017-2019 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from pydicom.sequence import Sequence
from pydicom.dataset import RawDataElement
from deid.logger import bot
from pydicom import read_file
import os
import re
def extract_sequence(sequence,prefix=None):
'''return a pydicom.sequence.Sequence recursively
as a list of dictionary items
'''
items = []
for item in sequence:
for key,val in item.items():
if not isinstance(val,RawDataElement):
header = val.keyword
if prefix is not None:
header = "%s__%s" %(prefix,header)
value = val.value
if isinstance(value,bytes):
value = value.decode('utf-8')
if isinstance (value,Sequence):
items += extract_sequence(value,prefix=header)
continue
entry = {"key": header, "value": value}
items.append(entry)
return items
def expand_field_expression(field, dicom, contenders=None):
'''Get a list of fields based on an expression. If
no expression found, return single field. Options for fields include:
endswith: filter to fields that end with the expression
startswith: filter to fields that start with the expression
contains: filter to fields that contain the expression
allfields: include all fields
exceptfields: filter to all fields except those listed ( | separated)
'''
# Expanders that don't have a : must be checked for
expanders = ['all']
# if no contenders provided, use all in dicom headers
if contenders is None:
contenders = dicom.dir()
# Case 1: field is an expander without an argument (e.g., no :)
if field.lower() in expanders:
if field.lower() == "all":
fields = contenders
return fields
# Case 2: The field is a specific field OR an axpander with argument (A:B)
fields = field.split(':')
if len(fields) == 1:
return fields
# if we get down here, we have an expander and expression
expander, expression = fields
expression = expression.lower()
fields = []
# Expanders here require an expression, and have <expander>:<expression>
if expander.lower() == "endswith":
fields = [x for x in contenders if re.search('(%s)$' %expression, x.lower())]
elif expander.lower() == "startswith":
fields = [x for x in contenders if re.search('^(%s)' %expression, x.lower())]
elif expander.lower() == "except":
fields = [x for x in contenders if not re.search(expression, x.lower())]
elif expander.lower() == "contains":
fields = [x for x in contenders if re.search(expression, x.lower())]
return fields
def get_fields(dicom, skip=None, expand_sequences=True):
'''get fields is a simple function to extract a dictionary of fields
(non empty) from a dicom file.
'''
if skip is None:
skip = []
if not isinstance(skip,list):
skip = [skip]
fields = dict()
contenders = dicom.dir()
for contender in contenders:
if contender in skip:
continue
try:
value = dicom.get(contender)
# Adding expanded sequences
if isinstance(value,Sequence) and expand_sequences is True:
sequence_fields = extract_sequence(value,prefix=contender)
for sf in sequence_fields:
fields[sf['key']] = sf['value']
else:
if value not in [None,""]:
if isinstance(value,bytes):
value = value.decode('utf-8')
fields[contender] = str(value)
except:
pass # need to look into this bug
return fields
def get_fields_byVR(dicom,exclude_fields=None):
'''filter a dicom's fields based on a list of value
representations (VR). If exclude_fields is not defined,
defaults to "US" and "SS"
'''
if exclude_fields is None:
exclude_fields = ['US','SS']
if not isinstance(exclude_fields,list):
exclude_fields = [exclude_fields]
fields = []
for field in dicom.dir():
if dicom.data_element(field) is not None:
if "VR" in dicom.data_element(field).__dict__:
if dicom.data_element(field) not in exclude_fields:
fields.append(field)
return fields
| [
"vsochat@stanford.edu"
] | vsochat@stanford.edu |
4b1bb2a44f75ecdfb99ce83063c562e36192098b | 00af09f4ac6f98203910d86c3791c152184ace9a | /Lib/email/iterators.py | 881b2439a615ee50f496dc7c4be7e91b56a1ac07 | [] | no_license | orf53975/CarnosOS | 621d641df02d742a2452fde2f28a28c74b32695a | d06849064e4e9f30ef901ad8cf90960e1bec0805 | refs/heads/master | 2023-03-24T08:06:48.274566 | 2017-01-05T16:41:01 | 2017-01-05T16:41:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,545 | py | <<<<<<< HEAD
<<<<<<< HEAD
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Various types of useful iterators and generators."""
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from io import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
yield from subpart.walk()
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, str):
yield from StringIO(payload)
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print(tab + msg.get_content_type(), end='', file=fp)
if include_default:
print(' [%s]' % msg.get_default_type(), file=fp)
else:
print(file=fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
=======
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Various types of useful iterators and generators."""
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from io import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
yield from subpart.walk()
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, str):
yield from StringIO(payload)
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print(tab + msg.get_content_type(), end='', file=fp)
if include_default:
print(' [%s]' % msg.get_default_type(), file=fp)
else:
print(file=fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Various types of useful iterators and generators."""
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from io import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
yield from subpart.walk()
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, str):
yield from StringIO(payload)
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print(tab + msg.get_content_type(), end='', file=fp)
if include_default:
print(' [%s]' % msg.get_default_type(), file=fp)
else:
print(file=fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| [
"Weldon@athletech.org"
] | Weldon@athletech.org |
e3d5b4f6f3145f4fce8231acb6e4696d0b02ff4e | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/renderplus/data.py | 12c9f7792fa1157e207d11cea0d79b48fc7da224 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,589 | py | # ------------------------------------------------------------------------------
# LICENSE
# ------------------------------------------------------------------------------
# Render+ - Blender addon
# (c) Copyright Diego Garcia Gangl (januz) - 2014, 2015
# <diego@sinestesia.co>
# ------------------------------------------------------------------------------
# This file is part of Render+
#
# Render+ is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ------------------------------------------------------------------------------
import os
import platform
import bpy
from bpy.props import (IntProperty,
StringProperty,
BoolProperty,
EnumProperty,
FloatProperty,
FloatVectorProperty,
CollectionProperty,
PointerProperty)
from . import utils
# ------------------------------------------------------------------------------
# CONVENIENCE STUFF
# ------------------------------------------------------------------------------
# Addon preferences
try:
prefs = bpy.context.user_preferences.addons[__package__].preferences
except KeyError:
prefs = None
# ------------------------------------------------------------------------------
# ADDON PREFERENCES
# ------------------------------------------------------------------------------
def default_path_debug():
""" Create a useful default for support log """
return os.path.expanduser('~' + os.sep + 'renderplus_support.log')
def make_path_sane(key):
""" Prevent Blender's relative paths of doom """
if prefs[key] and prefs[key].startswith('//'):
prefs[key] = utils.sane_path(prefs[key])
elif key == 'debug_file' and prefs.debug_file == '':
prefs['debug_file'] = default_path_debug()
class RP_MT_MailQuickSetup(bpy.types.Menu):
bl_idname = 'wm.rp_mt_mail_quick_setup'
bl_label = 'Quick Setup'
def draw(self, context):
layout = self.layout
layout.operator(
'renderplus.mail_quick_setup',
text='Gmail').provider = 'GMAIL'
layout.operator(
'renderplus.mail_quick_setup',
text='Yahoo').provider = 'YAHOO'
layout.operator(
'renderplus.mail_quick_setup',
text='MSN/Hotmail/Live').provider = 'LIVE'
class RP_Preferences(bpy.types.AddonPreferences):
""" Addon preferences for Render+ """
bl_idname = __package__
# --------------------------------------------------------------------------
# NOTIFICATIONS TAB
sound_file = StringProperty(
name='Custom Sound for notifications',
description='Use notifications sound',
subtype='FILE_PATH',
update=lambda a,b: make_path_sane('sound_file'),
default=utils.path('assets', 'notification.ogg')
)
sound_volume = FloatProperty(
name='Sound Volume',
description='Set the volume for sound notifications',
default=90.0,
min=0,
max=100.0
)
mail_user = StringProperty(
name='Username',
description='User to login into the mail server',
default=''
)
mail_password = StringProperty(
name='Password',
description='Password to login into the mail server',
subtype='PASSWORD',
default=''
)
mail_ssl = BoolProperty(
name='Use SSL',
description='Connect to mail server using Secure Sockets',
default=False,
)
mail_server = StringProperty(
name='Mail server (SMTP)',
description='Server to send use when sending mails',
default=''
)
mail_to = StringProperty(
name='Send to',
description='Address to send mail to',
)
# --------------------------------------------------------------------------
# BATCH TAB
show_batch = BoolProperty(
name='Show Batch render panel',
description='Show Batch rendering panel in render properties',
default=True,
)
batch_refresh_interval = FloatProperty(
name='Refresh interval for batch panel',
description=('Time between refreshes in the UI panel while a batch is'
'running (in seconds).'),
default=1.0,
min=0.2,
max=60.0
)
batch_new_dirs = BoolProperty(
name = 'Automatically create directories when rendering',
description = ('Try to create directories set in output paths'
' if they don\'t exist when rendering.'),
default = True,
)
batch_use_custom_css = BoolProperty(
name = 'Use a custom CSS file for RSS feeds',
description = 'Use a custom stylesheet for RSS feeds',
default = False,
)
batch_custom_css = StringProperty(
name = 'Custom CSS file',
description = 'Custom CSS file to use for RSS Feeds',
default = '',
update=lambda a,b: make_path_sane('batch_custom_css'),
subtype = 'FILE_PATH',
)
batch_cuda_devices = IntProperty(
name='Amount of Cuda devices in system',
min=-1,
max=64,
default=-1,
)
batch_cuda_active = StringProperty(
name='Cuda device set in preferences',
default='',
)
blender_path = StringProperty(
name='Custom Blender Command',
description=('Blender to use for batches. Type a'
'command or point this to the Blender executable.'),
update=lambda a,b: make_path_sane('blender_path'),
subtype='FILE_PATH'
)
term_path = StringProperty(
name='Custom Terminal Command',
description=('Terminal to use for batches. Type a'
'command or point this to a terminal executable.'),
update=lambda a,b: make_path_sane('term_path'),
subtype='FILE_PATH'
)
# --------------------------------------------------------------------------
# HELP TAB
enable_debug = BoolProperty(
name='Generate support log',
description=('Enable debugging output. This is used to get information'
'when reporting a bug, or requesting support.'),
default = False,
)
debug_file = StringProperty(
name='Support log file',
description='Where to save the support log output',
update=lambda a,b: make_path_sane('debug_file'),
subtype='FILE_PATH',
default=default_path_debug(),
)
ui_tab = EnumProperty(
name='Tab',
description='Tab in the preferences editor',
items=(('NOTIFICATIONS', 'Notifications', ''),
('BATCH', 'Batch', ''),
('HELP', 'Help', ''),
),
default='NOTIFICATIONS')
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(self, 'ui_tab', expand=True)
if self.ui_tab == 'NOTIFICATIONS':
layout.separator()
layout.prop(self, 'sound_file', icon='PLAY_AUDIO')
row = layout.row()
row.label(text='Sound Volume')
row.prop(self, 'sound_volume', text='', slider=True)
layout.separator()
layout.separator()
split = layout.split(0.75)
split.label(text='Email Setup', icon='SCRIPTWIN')
split.menu('wm.rp_mt_mail_quick_setup')
split = layout.split(1.0)
col = split.column()
col.prop(self, 'mail_to')
col.separator()
col.prop(self, 'mail_user')
col.prop(self, 'mail_password')
col.prop(self, 'mail_server')
col.prop(self, 'mail_ssl')
elif self.ui_tab == 'BATCH':
layout.separator()
layout.prop(self, 'show_batch')
row = layout.row()
row.enabled = self.show_batch
row.prop(self, 'batch_refresh_interval')
layout.separator()
row = layout.row()
row.prop(self, 'batch_new_dirs')
split = layout.split(0.4)
col = split.column()
col.prop(self, 'batch_use_custom_css')
col = split.column()
col.enabled = self.batch_use_custom_css
col.prop(self, 'batch_custom_css', text='')
layout.separator()
layout.prop(self, 'blender_path', icon='BLENDER')
layout.prop(self, 'term_path', icon='CONSOLE')
layout.label(text=('Fill this if you want to use a different'
' Blender or Terminal for batches.'
' Leave emtpy to use defaults.'), icon='INFO')
layout.separator()
elif self.ui_tab == 'HELP':
layout.prop(self, 'enable_debug')
layout.separator()
if self.enable_debug:
privacy = (
'The debug file will contain the following information '
'about your system: Operating system, Blender version and branch.')
layout.prop(self, 'debug_file')
layout.label(privacy, icon='INFO')
layout.separator()
# ------------------------------------------------------------------------------
# BATCH OPERATOR SETTINGS
# ------------------------------------------------------------------------------
# These are settings used by operators. They are set as props here
# so they can be shown in panels, instead of popups.
suffix_options =(
('NONE', 'None', ''),
('SCENE', 'Scene', ''),
('RENDERLAYER', 'Render Layer', ''),
('CAMERA', 'Camera', ''),
)
class RP_Batch_Ops_OutputChange(bpy.types.PropertyGroup):
""" Data for Output Change """
# Output
# --------------------------------------------------------------------------
base_directory = StringProperty(
name='Base directory',
default='',
subtype='FILE_PATH'
)
base_filename = StringProperty(
name='Base filename',
default='',
)
# Suffixes for filenames
# --------------------------------------------------------------------------
name_suffix_01 = EnumProperty(
items= suffix_options,
name='First Suffix',
)
name_suffix_02 = EnumProperty(
items= suffix_options,
name='Second Suffix',
)
name_suffix_03 = EnumProperty(
items= suffix_options,
name='Third Suffix',
)
# Subdirectories
# --------------------------------------------------------------------------
subdirs_scene = BoolProperty(
name='Scenes',
description='Make subir for each scene',
default=False,
)
subdirs_cam = BoolProperty(
name='Cameras',
description='Make subir for each camera',
default=False,
)
subdirs_layer = BoolProperty(
name='Render Layers',
description='Make subir for each renderlayer',
default=False,
)
class RP_Batch_Ops_QuickBatch(bpy.types.PropertyGroup):
""" Data for Quick Batch """
tiles_x = IntProperty(
name='Horizontal Tiles',
min=1,
max=10,
default=2,
)
tiles_y = IntProperty(
name='Vertical Tiles',
min=1,
max=10,
default=2,
)
output_path = StringProperty(
name='Output path',
default='',
subtype='FILE_PATH'
)
size_x = IntProperty(
name='Width',
min=1,
max=10000,
default=1,
subtype='PIXEL',
)
size_y = IntProperty(
name='Height',
min=1,
max=100000,
default=1,
subtype='PIXEL',
)
scene = StringProperty(default="", name="Scene")
all_scenes = BoolProperty(default=False, name="Use all scenes")
use_animation = BoolProperty(
name='Animation',
default=True,
description='Make animation render jobs',
)
no_camera = BoolProperty(
name='Don\'t use cameras',
default=False,
description='Don\'t setup cameras for render jobs',
)
class RP_Batch_Ops(bpy.types.PropertyGroup):
""" Settings for operators """
output_change = PointerProperty(type=RP_Batch_Ops_OutputChange)
quick_batch = PointerProperty(type=RP_Batch_Ops_QuickBatch)
# ------------------------------------------------------------------------------
# RENDER JOB
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# HELPER FUNCTIONS
def check_job_name(name):
""" Make sure the job name is unique """
def check_duplicate(i, name_to_check):
""" check new names recursively """
if name_to_check not in seen:
return name_to_check
else:
i += 1
correct_name = '{0}.{1:0>3}'.format(name, i)
return check_duplicate(i, correct_name)
# --------------------------------------------------------------------------
batch = bpy.context.scene.renderplus.batch.jobs
seen = set()
i = 0
for job in batch:
if job.name not in seen:
seen.add(job.name)
return check_duplicate(i, name)
def set_job_name(self, value):
""" Wrapper to call check_job_name """
if 'name' in self and self['name'] == value:
return
self['name'] = check_job_name(value)
def get_job_name(self):
""" Get the job's name """
# Sometimes draw() calls this, before it's defined
if 'name' in self:
return self['name']
else:
return 'Untitled Render Job'
def default_job_name():
""" Return the default name for a job """
return check_job_name('New Render Job')
def fill_job_from_scene(self, context):
""" Populate all fields for render job from scene data """
batch_list = bpy.context.scene.renderplus.batch.jobs
index = bpy.context.scene.renderplus.batch.index
if batch_list[index].use_external:
return
try:
scene = bpy.data.scenes[batch_list[index].scene]
except KeyError:
return
try:
batch_list[index].camera = scene.camera.name
except AttributeError:
pass
try:
batch_list[index].world = scene.world.name
except AttributeError:
pass
batch_list[index].layer = scene.render.layers[0].name
def set_external(self, context):
batch_list = bpy.context.scene.renderplus.batch.jobs
index = bpy.context.scene.renderplus.batch.index
if batch_list[index].use_external:
batch_list[index].scene = ''
batch_list[index].camera = ''
batch_list[index].world = ''
batch_list[index].layer = ''
else:
batch_list[index].scene = context.scene.name
def is_batch_format_optional(format):
""" Check if a file format is optional """
optional = (
'HDR',
'TIFF',
'EXR',
'MULTILAYER',
'MPEG',
'AVICODEC',
'QUICKTIME',
'CINEON',
'DPX',
'DDS')
return (format in optional)
def generate_GPU_enum(self, context):
""" Generate list of computing devices for ui """
items = [
('DEFAULT', 'Default', 'Don\'t change computing device'),
('CPU', 'CPU', 'Render using the CPU')
]
for i in range(prefs.batch_cuda_devices):
items.append(('CUDA_' + str(i),
'GPU #' + str(i+1),
'Use this GPU to render'))
return items
# ------------------------------------------------------------------------------
# CLASSES
class RP_CustomOverride(bpy.types.PropertyGroup):
""" Custom overrides for a render job """
path = StringProperty(
name='Datapath',
description='Datapath to property',
default='')
data = StringProperty(
name='Data',
description='Data to use',
default='')
name = StringProperty(
name='Name',
description='Override Name',
default='New Custom Override')
enabled = BoolProperty(
name='Enabled',
description='Enable this override',
default = True,)
class RP_RenderJob(bpy.types.PropertyGroup):
""" Render job to put in queue """
# --------------------------------------------------------------------------
# BASIC PROPS
name = StringProperty(
name='Name',
description='A name to identify this job in the queue',
default='Untitled Render Job',
set=set_job_name,
get=get_job_name)
scene = StringProperty(
name='Scene',
description='Scene to render',
default='',
update=fill_job_from_scene)
camera = StringProperty(
name='Camera',
description='Camera to use in this render',
default='')
world = StringProperty(
name='World',
description='World to use in this render',
default='')
layer = StringProperty(
name='Render Layer',
description='Use only this render layer',
default='')
enabled = BoolProperty(
name='Enable this render job',
description='Process this render job',
default=True)
# --------------------------------------------------------------------------
# EXTERNAL BLEND
# --------------------------------------------------------------------------
use_external = BoolProperty(
name='Use external blendfile',
description='Use a external blend file for this job',
default=False,
update=set_external)
blend_file = StringProperty(
name='Blend File',
description='Path to external blendfile',
subtype='FILE_PATH',
default='')
# --------------------------------------------------------------------------
# FRAMES AND ANIMATION
# --------------------------------------------------------------------------
animation = BoolProperty(
name='Animation',
description='Render an animation instead of a still image',
default=False)
frame_custom = BoolProperty(
name='Custom Frame',
description='Use a custom frame or frame range for this render',
default=False)
frame_still = IntProperty(
name='Frame',
description='Frame to render',
default=0)
frame_start = IntProperty(
name='Start Frame',
description='First frame of the animation range',
default=0)
frame_end = IntProperty(
name='End Frame',
description='Final frame of the animation range',
default=250)
# --------------------------------------------------------------------------
# OUTPUT
# --------------------------------------------------------------------------
output = StringProperty(
name='Output',
description='Filename to output to',
subtype='FILE_PATH',
default='')
use_custom_format = BoolProperty(
name='Custom File Format',
description='Use a specific file format for this render job',
default=False,
)
format = EnumProperty(
name='Format',
description='Format to use in the render job',
items=(('TGA', 'Targa', ''),
('IRIS', 'Iris', ''),
('JPEG', 'Jpeg', ''),
('MOVIE', 'Movie', ''),
('RAWTGA', 'Raw Targa', ''),
('AVIRAW', 'Raw AVI', ''),
('AVIJPEG', 'Jpeg AVI', ''),
('PNG', 'PNG', ''),
('BMP', 'BMP', ''),
('HDR', 'Radiance HDR', ''),
('TIFF', 'TIFF', ''),
('EXR', 'OpenEXR', ''),
('MULTILAYER', 'OpenEXR Multilayer', ''),
('MPEG', 'MPEG', ''),
('QUICKTIME', 'Quicktime', ''),
('CINEON', 'Cineon', ''),
('DPX', 'DPX', ''),
('DDS', 'DDS', ''),
),
default='PNG',
)
cycles_samples = IntProperty(
name='Samples',
description=('Samples to render. Set to 0 to use'
' the value set in the scene'),
default=0,
min=0,
max=10000)
threads = IntProperty(
name='Threads',
description='Threads to use while rendering',
default=0,
min=0,
max=64)
# --------------------------------------------------------------------------
# RENDER SIZE
# --------------------------------------------------------------------------
size_custom = BoolProperty(
name='Custom Size',
description='Use a custom render size for this job',
default=False)
size_x = IntProperty(
name='Width',
description='Custom render width for this job',
default=1920,
min=4)
size_y = IntProperty(
name='Height',
description='Custom render height for this job',
default=1080,
min=4)
use_section = BoolProperty(
name='Render section',
description = 'Render only a section of the image',
default= False,)
section_x = FloatProperty(
name='X',
description='Starting X coordinate for section render',
default=0,
min=0,
max=0.99,)
section_y = FloatProperty(
name='Y',
description='Starting Y coordinate for section render',
default=0,
min=0,
max=0.99,)
section_width = FloatProperty(
name='Width',
description='Width for section render',
default=1,
min=0.01,
max=1,)
section_height = FloatProperty(
name='Height',
description='Height for section render',
default=1,
min=0.01,
max=1,)
device = EnumProperty(
name='Compute Device',
description='Compute device to render with',
items=generate_GPU_enum)
# --------------------------------------------------------------------------
# CUSTOM OVERIDES
# --------------------------------------------------------------------------
custom_overrides = CollectionProperty(type=RP_CustomOverride)
custom_overrides_index = IntProperty(
name='Index of current custom override',
default=0)
# ------------------------------------------------------------------------------
# RENDER SLOTS
# ------------------------------------------------------------------------------
class RP_RenderSlot(bpy.types.PropertyGroup):
""" Customizable render slots """
identifier = IntProperty(
name='ID',
description='Int to identify this slot',
default=0,
min=0,
max=8
)
name = StringProperty(
name='Name',
description='A name to identify this slot',
default='Slot',
)
is_used = BoolProperty(
name='Slot is used',
description='True if this slot has been used for render',
default=False)
# ------------------------------------------------------------------------------
# STATS
# ------------------------------------------------------------------------------
class RP_StatsData(bpy.types.PropertyGroup):
""" Stats data """
average = FloatProperty(
name='Average frame rendertime',
description='Averaged rendertime for all frames',
default=0)
slowest = FloatVectorProperty(
name='Slowest frame rendertime',
description='Highest rendertime for all frames',
size=2,
default=(0, 0))
fastest = FloatVectorProperty(
name='Fastest frame rendertime',
description='Smallest rendertime of all frames',
size=2,
default=(0, 0))
remaining = FloatProperty(
name='Time remaining to complete animation',
description='Estimation of how long rendering will take',
default=0)
total = FloatProperty(
name='Total rendertime',
description='Time it took to render the last animation',
default=-1)
ui_toggle = BoolProperty(
name='Show time stats',
description='Show more stats about render time',
default=False)
save_file = BoolProperty(
name='Save stats to a file',
description='Save the stats to a CSV file',
default=False)
# ------------------------------------------------------------------------------
# BATCH
# ------------------------------------------------------------------------------
class RP_BatchSettings(bpy.types.PropertyGroup):
""" Batch Data """
jobs = CollectionProperty(type=RP_RenderJob)
index = IntProperty(
name='Index of current render job in list',
default=0)
# Batch Renders Settings -----------------------------
rss_path = StringProperty(
name='RSS file',
description='Filepath to write batch RSS file to',
default='//feed.rss',
subtype='FILE_PATH'
)
use_rss = BoolProperty(
name='Write RSS file',
description='Generate a RSS file to monitor batch process',
default=False)
write_logs = BoolProperty(
name='Write log files',
description='Write log files for each render job',
default=False)
use_global_size = BoolProperty(
name='Global size',
description='Override size for all render jobs',
default=False)
global_size_x = IntProperty(
name='Width',
description='Custom render width for all jobs',
default=1920,
min=4)
global_size_y = IntProperty(
name='Height',
description='Custom render height for all jobs',
default=1080,
min=4)
use_global_percentage = BoolProperty(
name='Global Percentage',
description='Override size percentage for all jobs',
default=True)
global_percentage = FloatProperty(
name='Percentage',
description='Custom size percentage for all jobs',
subtype='PERCENTAGE',
precision=0,
min=1,
max=100,
default=100)
ignore_border = BoolProperty(
name='Ignore render border',
description='Ignore render border for batch',
default=False)
use_term = BoolProperty(
name='Use terminal',
description='Run the batch inside a terminal',
default=False,)
use_rplus_settings = BoolProperty(
name='Use Render+ Settings',
description='Use notification, poweroff and post/pre actions in batch',
default=False,)
# Batch Renders UI
# --------------------------------------------------------------------------
ui_job_tab = EnumProperty(
name='Tab for render job overrides',
description='Current tab for render job overrides',
items=(('SCENE', 'Scene', 'Scene related overrides'),
('RENDER', 'Render', 'Rendering related overrides'),
('CUSTOM', 'Custom', 'Custom Overrides')),
default='SCENE')
# ------------------------------------------------------------------------------
# ACTION
# ------------------------------------------------------------------------------
class RP_ActionSettings(bpy.types.PropertyGroup):
""" Settings for pre/post actions """
option = EnumProperty(
name='Option',
description='Options to run this action',
items=(('command', 'Command', 'Run a command'),
('script', 'Script', 'Run a Python script')),
default='command')
command = StringProperty(
name='Command',
description='Command to execute',
default='')
script = StringProperty(
name='Script',
description='Script to run',
default='')
# ------------------------------------------------------------------------------
# SETTINGS
# ------------------------------------------------------------------------------
class RP_Settings(bpy.types.PropertyGroup):
""" Settings and UI States for R+ """
off_options = EnumProperty(
name='Power Off',
description='Power off when rendering is finished',
items=(('DISABLED', 'Disabled', 'Let the computer on'),
('SLEEP', 'Sleep', 'Set computer to sleep'),
('OFF', 'Shut down', 'Turn off computer')),
default='DISABLED')
notifications_desktop = BoolProperty(
name='Desktop Notifications',
description='Notify me using the Desktop',
default=False)
notifications_sound = BoolProperty(
name='Sound',
description='Notify me using Sound',
default=False)
notifications_mail = BoolProperty(
name='Email',
description='Send an email to notify me',
default=False)
opengl_transparent = BoolProperty(
name='Transparent',
description='Make background transparent',
default=False)
opengl_use_viewport = BoolProperty(
name='Render Viewport',
description='Render the entire viewport (including invisible objects)',
default=False)
opengl_percentage = FloatProperty(
name='Size Percentage',
description='Custom size percentage OpenGL renders',
subtype='PERCENTAGE',
precision=0,
min=1,
max=100,
default=100)
autosave = BoolProperty(
name='Autosave image renders',
description=('Save image renders automatically to the folder in the'
'output panel'),
default=False)
stats = PointerProperty(type=RP_StatsData)
batch = PointerProperty(type=RP_BatchSettings)
batch_ops = PointerProperty(type=RP_Batch_Ops)
# Render Slots
# --------------------------------------------------------------------------
slots = CollectionProperty(type=RP_RenderSlot)
active_slot = IntProperty(
name='Index of active slot',
default=0,
min=0,
max=8)
# Post-render settings
# --------------------------------------------------------------------------
post_enabled = BoolProperty(
name='Post Render Toggle',
description='Enable/Disable post render actions',
default=False)
post_settings = PointerProperty(type=RP_ActionSettings)
# Pre-render settings
# --------------------------------------------------------------------------
pre_enabled = BoolProperty(
name='Pre Render Toggle',
description='Enable/Disable Pre render actions',
default=False)
pre_settings = PointerProperty(type=RP_ActionSettings)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
4a355ab43b857bd830f0d81558594437970485f1 | 6c2ecefb12be6b04f597e3fb887d9389050aa7e1 | /DjangoCourse/第七周/freshshop/fs_goods/models.py | 928b9f8ce8a000fcae1b280535926d0418865cc1 | [] | no_license | GmyLsh/learngit | 99d3c75843d2b0b873f26e098025832985c635b3 | 3e7993c7119b79216fea24e5e35035336e4f5f5b | refs/heads/master | 2020-04-12T09:11:55.068312 | 2018-12-19T07:19:42 | 2018-12-19T07:19:42 | 162,395,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | from django.db import models
# 商品类型分类模型
class TypeInfo(models.Model):
title = models.CharField(max_length=20, verbose_name='分类标题')
is_delete = models.BooleanField(default=False, verbose_name='是否删除')
def __str__(self):
return self.title
class Meta:
verbose_name = '商品类型'
verbose_name_plural = '商品类型'
# 具体某一个商品的相关信息模型类
class GoodsInfo(models.Model):
# 商品名称
g_title = models.CharField(max_length=20, verbose_name='商品名称')
# 商品图片
g_pic = models.ImageField(upload_to='fs_goods/%Y/%m', verbose_name='商品图片')
# 商品价格 max_digits=5数字的总位数
# decimal_places=2小数位数,FloatField()不好控制小数位数。
g_price = models.DecimalField(max_digits=5, decimal_places=2, verbose_name='商品价格')
is_delete = models.BooleanField(default=False, verbose_name='是否删除')
# 商品计价单位
g_unit = models.CharField(max_length=20, default='500g', verbose_name='商品计价')
# 商品有按照人气排序,所以设置一个点击量
g_click = models.IntegerField(verbose_name='商品浏览量')
# 以上是商品列表页的相关数据提取,接下来是商品详情页中数据的分析提取。
# 商品简介
g_abstract = models.CharField(max_length=200, verbose_name='商品简介')
# 商品库存
g_stock = models.IntegerField(verbose_name='商品库存')
# 商品详情介绍
g_content = models.TextField(verbose_name='商品详情')
# 该商品对应的是哪一个商品分类,需要设置外键关联。
g_type = models.ForeignKey(TypeInfo, verbose_name='所属分类', on_delete=models.DO_NOTHING)
def __str__(self):
return self.g_title
class Meta:
verbose_name = '商品信息'
verbose_name_plural = '商品信息'
| [
"469192981@qq.com"
] | 469192981@qq.com |
e015ea8cfa5f548fa912b28984f7499b639d1bed | 255021fadf9f739db042809ca95f5b9f75609ec5 | /Adv/5650 핀볼게임.py | 771fdbfb2a006e5ee03930ac9293d0be077fa50d | [] | no_license | unsung107/Algorithm_study | 13bfff518fc1bd0e7a020bb006c88375c9ccacb2 | fb3b8563bae7640c52dbe9324d329ca9ee981493 | refs/heads/master | 2022-12-13T02:10:31.173333 | 2020-09-13T11:32:10 | 2020-09-13T11:32:10 | 295,137,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,745 | py | directions = {1:(0, -1), 2:(0, 1), 3:(-1, 0), 4:(1, 0)}
aside = {1:2, 2:1, 3:4, 4:3}
def meet1(y, x, d):
if d == 1:
d = 2
elif d == 2:
d = 4
elif d == 3:
d = 1
else:
d = 3
return y + directions[d][1], x + directions[d][0], d
def meet2(y, x, d):
if d == 1:
d = 4
elif d == 2:
d = 1
elif d == 3:
d = 2
else:
d = 3
return y + directions[d][1], x + directions[d][0], d
def meet3(y, x, d):
if d == 1:
d = 3
elif d == 2:
d = 1
elif d == 3:
d = 4
else:
d = 2
return y + directions[d][1], x + directions[d][0], d
def meet4(y, x, d):
if d == 1:
d = 2
elif d == 2:
d = 3
elif d == 3:
d = 4
else:
d = 1
return y + directions[d][1], x + directions[d][0], d
def meet5(y, x, d):
if d == 1:
d = 2
elif d == 2:
d = 1
elif d == 3:
d = 4
else:
d = 3
return y + directions[d][1], x + directions[d][0], d
def hall(y, x, d, num):
for wy, wx in halls[num]:
if (y, x) != (wy, wx):
return wy + directions[d][1] , wx + directions[d][0], d
# def move(y, x, d):
# global cnt
# if y == start[0] and x == start[1] and cnt > 0:
# return
# dx = directions[d][0]
# dy = directions[d][1]
# while 0 <= y < N and 0 <= x < N and not board[y][x]:
# y += dy
# x += dx
# if y == start[0] and x == start[1]:
# return
# if not (0 <= y < N and 0 <= x < N):
# d = aside[d]
# cnt += 1
# x += directions[d][0]
# y += directions[d][1]
# move(y, x, d)
# elif 1 <= board[y][x] <= 5:
# y, x, d = meets[board[y][x]](y, x, d)
# cnt += 1
# move(y, x, d)
# elif 6 <= board[y][x] <= 10:
# y, x, d = hall(y, x, d, board[y][x])
# move(y, x, d)
# else: return
meets = [0, meet1, meet2, meet3, meet4, meet5]
for ro in range(int(input())):
N = int(input())
starts = []
board = []
halls = [[] for _ in range(11)]
for y in range(N):
board.append(list(map(int,input().split())))
for x in range(N):
if not board[y][x]:
for d in range(1, 5):
starts.append((y, x, d))
elif 5 < board[y][x] < 11:
halls[board[y][x]].append((y, x))
res = 0
while starts:
y, x, d = starts.pop()
queue = [(y, x, d)]
cnt = 0
start = (y, x)
t = 0
while queue:
t += 1
y, x, d = queue.pop(0)
if y == start[0] and x == start[1] and t != 1:
break
dx = directions[d][0]
dy = directions[d][1]
while 0 <= y < N and 0 <= x < N and not board[y][x]:
y += dy
x += dx
if y == start[0] and x == start[1]:
break
if not (0 <= y < N and 0 <= x < N):
d = aside[d]
cnt += 1
x += directions[d][0]
y += directions[d][1]
queue.append((y, x, d))
elif 1 <= board[y][x] <= 5:
y, x, d = meets[board[y][x]](y, x, d)
cnt += 1
queue.append((y, x, d))
elif 6 <= board[y][x] <= 10:
y, x, d = hall(y, x, d, board[y][x])
queue.append((y, x, d))
else: break
print(queue)
if cnt > res:
res = cnt
print('#%d %d' %(ro + 1, res))
| [
"unsung102@naver.com"
] | unsung102@naver.com |
abbde3fd251b2dccc42da4c3a43154a3c9c35846 | 1ffc17893d9e15fd939628bbc41c3d2633713ebd | /skl2onnx/operator_converters/pipelines.py | c7b3ba34da23d9d118749da61a273cb421c92735 | [
"Apache-2.0"
] | permissive | xadupre/sklearn-onnx | 646e8a158cdded725064964494f0f8a760630aa8 | b05e4864cedbf4f2a9e6c003781d1db8b53264ac | refs/heads/master | 2023-09-01T15:58:38.112315 | 2022-12-21T01:59:45 | 2022-12-21T01:59:45 | 382,323,831 | 0 | 2 | Apache-2.0 | 2023-01-04T13:41:33 | 2021-07-02T11:22:00 | Python | UTF-8 | Python | false | false | 1,971 | py | # SPDX-License-Identifier: Apache-2.0
from sklearn.base import is_classifier
from ..common._registration import register_converter
from ..common._topology import Scope, Operator
from ..common._container import ModelComponentContainer
from .._parse import _parse_sklearn
def convert_pipeline(scope: Scope, operator: Operator,
container: ModelComponentContainer):
model = operator.raw_operator
inputs = operator.inputs
for step in model.steps:
step_model = step[1]
if is_classifier(step_model):
scope.add_options(id(step_model), options={'zipmap': False})
container.add_options(id(step_model), options={'zipmap': False})
outputs = _parse_sklearn(scope, step_model, inputs,
custom_parsers=None)
inputs = outputs
if len(outputs) != len(operator.outputs):
raise RuntimeError(
"Mismatch between pipeline output %d and "
"last step outputs %d." % (
len(outputs), len(operator.outputs)))
for fr, to in zip(outputs, operator.outputs):
container.add_node(
'Identity', fr.full_name, to.full_name,
name=scope.get_unique_operator_name("Id" + operator.onnx_name))
def convert_feature_union(scope: Scope, operator: Operator,
container: ModelComponentContainer):
raise NotImplementedError(
"This converter not needed so far. It is usually handled "
"during parsing.")
def convert_column_transformer(scope: Scope, operator: Operator,
container: ModelComponentContainer):
raise NotImplementedError(
"This converter not needed so far. It is usually handled "
"during parsing.")
register_converter('SklearnPipeline', convert_pipeline)
register_converter('SklearnFeatureUnion', convert_feature_union)
register_converter('SklearnColumnTransformer', convert_column_transformer)
| [
"noreply@github.com"
] | xadupre.noreply@github.com |
c0190465b828f87abbc3ab7021fb9c721401241b | 1705e97ef5613685e142e3f78a2057399b09858c | /Code/asiportal/rquests/services/emailer.py | 78a33d7d3764f695eb5e9b690d2725d5eb245dbb | [] | no_license | FIU-SCIS-Senior-Projects/Academic-Success-Initiative---ASI-PantherCentric-1.0 | 0b956175efb031022ed32412195531c7f0c162c5 | 8ee64b58e2634384d5905defd3701a453b49b966 | refs/heads/master | 2022-11-24T00:07:52.458186 | 2017-08-02T01:36:32 | 2017-08-02T01:36:32 | 91,715,982 | 0 | 0 | null | 2022-11-22T01:31:04 | 2017-05-18T16:37:10 | SQLPL | UTF-8 | Python | false | false | 2,813 | py | from django.core.mail import EmailMessage
from django.template.loader import get_template
def no_room_available_email(request):
title = '[ASI] Tutoring Request Unable To Be Scheduled'
email_template = get_template('no_room_available.txt')
email_context = {'firstName' : request.submitted_by.first_name,
'courseID' : request.course,
'start' : request.availability.start_time,
'end' : request.availability.end_time,
}
message = email_template.render(email_context)
tutee_email = request.submitted_by.email
email = EmailMessage(title,message,'asi-noreply@cs.fiu.edu',[tutee_email], bcc=['asisoftwaretest@gmail.com'])
email.send()
def tutoring_confirmation_email(session):
title = '[ASI] Scheduled Tutoring Session Confirmation'
email_template = get_template('session_confirmation.txt')
email_context = {'firstName': session.tutee.first_name,
'courseID': session.course,
'day':session.availability.get_day_display(),
'startTime': session.availability.start_time,
'endTime':session.availability.end_time,
'ambassadorName':
session.availability.ambassador.get_full_name(),
'ambassadorEmail':session.availability.ambassador.email,
'startDate':session.start_date.strftime('%B %d, %Y'),
'endDate':session.end_date.strftime('%B %d, %Y'),
}
message = email_template.render(email_context)
tutee_email = session.tutee.email
ambassador_email = session.availability.ambassador.email
email = EmailMessage(title,message,'asi-noreply@cs.fiu.edu',[tutee_email, ambassador_email], bcc=['asisoftwaretest@gmail.com'])
email.send()
def request_submission_email(request):
title = '[ASI] Tutoring Request Received'
email_template = get_template('request_confirmation.txt')
email_context = {
'tutee_name' : request.submitted_by.get_full_name(),
'course' : request.course,
'day' : request.availability.get_day_display(),
'start_time' : request.availability.start_time.strftime('%-I:%M %p'),
'end_time' : request.availability.end_time.strftime('%-I:%M %p'),
'ambassador': request.availability.ambassador.get_full_name(),
'semester' : request.availability.semester,
}
message = email_template.render(email_context)
tutee_email = request.submitted_by.email
email = EmailMessage(title,message,'asi-noreply@cs.fiu.edu', [tutee_email], bcc=['asisoftwaretest@gmail.com'])
email.send()
| [
"jakedlopez@gmail.com"
] | jakedlopez@gmail.com |
05dc6b9ebd8801ca06357f2accaa763cc5a0c6f7 | ce73929de648d080420fc99a86e7b73bfb15f0dc | /tms_maintenance/tms_maintenance_program.py | c057166bd3462094aaa9bfd8ebe11cd83fdb5eb8 | [] | no_license | thinkasoft/TMS | dce16ee4b10f9e35d392c883b443f556946d9526 | d8d07227749e07e047a03713142c0bb898a9abf6 | refs/heads/master | 2021-01-10T02:31:22.526633 | 2016-02-01T22:25:00 | 2016-02-01T22:25:00 | 50,875,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,503 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
import time
from datetime import datetime, date
from osv.orm import browse_record, browse_null
from osv.orm import except_orm
from tools.translate import _
from tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, float_compare
import decimal_precision as dp
import netsvc
import openerp
class tms_maintenance_order(osv.Model):
_inherit = ['mail.thread', 'ir.needaction_mixin']
_name = 'tms.maintenance.order'
_description = 'Order Maintenace'
########################### Metodos ####################################################################################
########## Copy ##########
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'name': self.pool.get('ir.sequence').get(cr, uid, 'tms.maintenance.order'),
})
return super(tms_maintenance_order, self).copy(cr, uid, id, default, context=context)
########################### Columnas : Atributos #######################################################################
_columns = {
'name' : fields.char('Order Number', readonly=True),
'state' : fields.selection([('cancel','Cancelled'), ('draft','Draft'), ('open','Open'), ('released','Released'), ('done','Done')],'State'),
'description' : fields.char('Description'),
'notes' : fields.text('Notes', readonly=True, states={'draft':[('readonly',False)], 'open':[('readonly',False)], 'released':[('readonly',False)]}),
'partner_id': fields.many2one('res.partner','Partner', readonly=True, states={'draft':[('readonly',False)], 'open':[('readonly',False)], 'released':[('readonly',False)]}),
'internal_repair' : fields.boolean('Internal Repair', readonly=True, states={'draft':[('readonly',False)], 'open':[('readonly',False)], 'released':[('readonly',False)]}),
'date_start': fields.datetime('Scheduled Date Start', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'date_end': fields.datetime('Scheduled Date End', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'date_start_real': fields.datetime('Scheduled Date Start Real', readonly=True),
'date_end_real': fields.datetime('Scheduled Date End Real', readonly=True),
'date': fields.datetime('Date', readonly=True, states={'draft':[('readonly',False)]}),
'cost_service': fields.float('Service Cost', readonly=True),
'parts_cost': fields.float('Parts Cost', readonly=True),
########Many2One###########
'shop_id': fields.many2one('sale.shop','Shop', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'unit_id': fields.many2one('fleet.vehicle','Unit', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'product_id': fields.many2one('product.product','Service', required=True, domain=[('tms_category','=','maint_service_type')], readonly=True, states={'draft':[('readonly',False)]}),
'driver_id': fields.many2one('hr.employee','Driver',domain=[('tms_category', '=', 'driver')], required=True, readonly=True, states={'draft':[('readonly',False)]}),
'supervisor_id': fields.many2one('hr.employee','Supervisor',domain=[('tms_category', '=', 'driver')], required=True, readonly=True, states={'draft':[('readonly',False)], 'open':[('readonly',False)], 'released':[('readonly',False)]}),
'user_id': fields.many2one('res.users','User', readonly=True),
'stock_origin_id': fields.many2one('stock.location','Stock Origin', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'stock_dest_id': fields.many2one('stock.location','Stock Dest'),
########One2Many###########
'activities_ids': fields.one2many('tms.maintenance.order.activity','maintenance_order_id','Tasks', readonly=True, states={'draft':[('readonly',False)], 'open':[('readonly',False)], 'released':[('readonly',False)]}),
#'stock_picking_ids': fields.one2many('stock.piking','tms_order_id','Stock_pickings'),
}
###################################################################################################
########## Metodos para crear la factura ##########
def button_generate_invoices(self,cr,uid,ids,context={}):
this = self.get_current_instance(cr, uid, ids)
#self.write(cr, uid, ids, {'state':'draft'})
activities_external_done_not_invoice = self.get_activities_external_done_not_invoice(cr,uid,ids,context)
activities = activities_external_done_not_invoice
#band = False
#for line in activities:
# band = True
#if not band:
# raise osv.except_osv(_('Warning!'),_('No Existen Actividades Externas en Esta Orden o Ya estan Facturadas'))
#partner = activities[0]['supplier_id']
self.create_invoices_from_activities_not_invoice_and_done(cr,uid,ids, activities)
#self.create_invoice_based_by_activities(cr,uid,ids, partner, activities)
return True
def create_invoices_from_activities_not_invoice_and_done(self,cr,uid,ids, activities):
partners = []
for activity in activities:
if not activity['supplier_id'] in partners:
partners.append(activity['supplier_id'])
#print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ partner: '+str(partners)
for partner in partners:
activities_to_partner = []
for activity in activities:
if activity['supplier_id']['id'] == partner['id']:
activities_to_partner.append(activity)
###COnstruir las facturas basadas en este parner
self.create_invoice_based_by_activities(cr,uid,ids, partner, activities_to_partner)
#print 'Parner : '+str(partner)+str(', ........... '+str(activities_to_partner))
def create_invoice_lines_from_activity(self,cr,uid,ids, activity):
return invoice_lines
def get_activities_external_done(self,cr,uid,ids,context={}):
this = self.get_current_instance(cr, uid, ids)
external_done = []
for line in self.get_activity_lines(cr,uid,ids):
if line['state'] in 'done':
if line['external_workshop']:
external_done.append(line)
return external_done
def get_activities_external_done_not_invoice(self,cr,uid,ids,context={}):
this = self.get_current_instance(cr, uid, ids)
not_invoice = []
for line in self.get_activities_external_done(cr,uid,ids,context):
#if not line['invoiced']:
if not line['invoiced']:
not_invoice.append(line)
return not_invoice
def synchronize_invoice_one_to_many(self,cr,uid,ids, factura, invoice_lines):
this = self.get_current_instance(cr, uid, ids)
#print 'synchronize Factura: '+str(factura)
#print 'synchronize invoice_lines: '+str(invoice_lines)
def create_invoice_based_by_activities(self,cr,uid,ids, partner, activities):
### Diccionarios de Invoice Lines
### Diccionarios de Invoice Lines
### Diccionarios de Invoice Lines
invoice_lines = []
##Se generan los Diccionarios de Inv_line vasados en la lista de actividades
##Se generan los Diccionarios de Inv_line vasados en la lista de actividades
##Se generan los Diccionarios de Inv_line vasados en la lista de actividades
for activity in activities:
a = activity['maintenance_order_id']['product_id']['property_stock_production']['valuation_in_account_id']['id']
if not a:
a = self.pool.get('ir.property').get(cr, uid, 'property_account_expense_categ', 'product.category', context=context).id
a = self.pool.get('account.fiscal.position').map_account(cr, uid, False, a)
descripcion = str(activity['maintenance_order_id']['name']) +str(', ') +str(activity['product_id']['name_template'])
inv_line = (0,0,{
#'name': activity['product_id']['name_template'],
'name': descripcion, #Descripcion
'origin': activity['maintenance_order_id']['product_id']['name_template'],
'account_id': a,
'price_unit': activity['cost_service']+activity['parts_cost'],
'quantity': 1,
'uos_id': activity['product_id'].uos_id.id,
'product_id': activity['product_id']['id'],
'invoice_line_tax_id': [(6, 0, [x.id for x in activity['product_id'].supplier_taxes_id])],
'note': 'Notasss',
'account_analytic_id': False,
})
invoice_lines.append(inv_line)
#################### Generar La Factura ################################
#################### Generar La Factura ################################
#################### Generar La Factura ################################
journal_id = self.pool.get('account.journal').search(cr, uid, [('type', '=', 'purchase')], context=None)
journal_id = journal_id and journal_id[0] or False
vals = {
'name' : 'Invoice TMS Maintenance',
'origin' : 'Maaaantenimiento',
'type' : 'in_invoice',
'journal_id' : journal_id,
'reference' : 'Maintenance Invoice',
'account_id' : partner.property_account_payable.id,
'partner_id' : partner.id,
'address_invoice_id': self.pool.get('res.partner').address_get(cr, uid, [partner.id], ['default'])['default'],
'address_contact_id': self.pool.get('res.partner').address_get(cr, uid, [partner.id], ['default'])['default'],
'invoice_line' : [x for x in invoice_lines], #account.invoice.line
#'currency_id' : data[1], #res.currency
'comment' : 'Siiiiin Comentarios',
#'payment_term' : pay_term, #account.payment.term
'fiscal_position' : partner.property_account_position.id,
'date_invoice' : time.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
}
invoice_id = self.pool.get('account.invoice').create(cr, uid, vals)
invoice_obj= self.pool.get('account.invoice').browse(cr,uid,ids,invoice_id)
#################### Direccionar la Factura generada a las actividades ################################
for activity in activities:
activity.write({'invoice_id':invoice_id, 'invoiced':True})
return invoice_obj
########################### Metodos ####################################################################################, readonly=True
########## Metodos para el costo de productos y servicio ##########
def set_cost_service(self,cr,uid,ids, cost_service=0.0):
self.write(cr, uid, ids, {'cost_service':cost_service})
def set_parts_cost(self,cr,uid,ids, parts_cost=0.0):
self.write(cr, uid, ids, {'parts_cost':parts_cost})
def calculate_parts_cost(self,cr,uid,ids,context=None):
suma = 0.0
for line in self.get_activity_lines(cr,uid,ids):
line.calculate_parts_cost()
suma = suma + line['parts_cost']
self.set_parts_cost(cr,uid,ids, suma)
def calculate_cost_service(self,cr,uid,ids,context=None):
suma = 0.0
this = self.get_current_instance(cr, uid, ids)
for line in self.get_activity_lines(cr,uid,ids):
line.calculate_cost_service()
suma = suma + line['cost_service']
self.set_cost_service(cr,uid,ids, suma)
def on_change_product_id(self,cr,uid,ids, product_id):
producto = self.pool.get('product.product').browse(cr, uid, product_id)
location_id = producto['property_stock_production']['id']
return {'value':{'stock_dest_id':location_id}}
def set_stock_dest(self,cr,uid,ids, stock_dest_id):
self.write(cr, uid, ids, {'stock_dest_id':stock_dest_id})
return True
###################################################################################################
########## Metodos para el 'state' ##########
def action_draft(self,cr,uid,ids,context={}):
self.write(cr, uid, ids, {'state':'draft'})
return True
def process_activities_in_pending(self, cr, uid, ids, context=None):
this = self.get_current_instance(cr, uid, ids)
for line in this['activities_ids']:
if line['state'] in 'pending':
line.action_process(context)
def action_open(self, cr, uid, ids, context=None):
this = self.get_current_instance(cr, uid, ids)
band = False
for band in this['activities_ids']:
band = True
if not band:
raise osv.except_osv(_('Warning!'),_('For Open should be exist Activities Asigned'))
self.write(cr, uid, ids,{'state':'open'})
self.write(cr, uid, ids,{'date_start_real':time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
self.process_activities_in_pending(cr, uid, ids, context)
return True
def action_done(self,cr,uid,ids,context={}):
self.calculate_parts_cost(cr,uid,ids)
self.calculate_cost_service(cr,uid,ids)
self.write(cr, uid, ids, {'state':'done'})
self.write(cr, uid, ids, {'date_end_real':time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
return True
def action_released(self,cr,uid,ids,context={}):
exist_activites = self.is_exist_activity_lines(cr,uid,ids)
##### Cerrar Todas las Actividades si es posible
for act in self.get_current_instance(cr, uid, ids)['activities_ids']:
if act['state'] in 'process':
act.action_done()
#####
if (self.is_exist_activities_done(cr, uid, ids) | self.is_exist_activities_cancel(cr, uid, ids) | (not exist_activites)): ## IF 1
if not (self.is_exist_activities_process(cr, uid, ids)): ## IF 2
if (self.is_exist_activities_pending(cr, uid, ids)): ## IF 3
for line in self.get_activity_lines(cr, uid, ids): ## For
if line['state'] in 'pending':
line.action_cancel()
## End For
## END IF 3
self.write(cr, uid, ids, {'state':'released'})
return True
## END IF 2
## END IF 1
raise osv.except_osv(_('Warning!'),_('All Activity Lines should be not State Process'))
return False
def action_cancel(self, cr, uid, ids, context=None):
exist_activites = self.is_exist_activity_lines(cr,uid,ids)
if (self.is_exist_activities_done(cr, uid, ids) | self.is_exist_activities_cancel(cr, uid, ids) | (not exist_activites)): ## IF 1
if not (self.is_exist_activities_process(cr, uid, ids)): ## IF 2
if (self.is_exist_activities_pending(cr, uid, ids)): ## IF 3
for line in self.get_activity_lines(cr, uid, ids): ## For
if line['state'] in 'cancel':
line.action_cancel()
## End For
## END IF 3
self.write(cr, uid, ids, {'state':'cancel'})
return True
## END IF 2
## END IF 1
raise osv.except_osv(_('Warning!'),_('All Activity Lines should be not State Process'))
return False
########## Metodo Create ##########
def create(self, cr, uid, vals, context=None):
shop = self.pool.get('sale.shop').browse(cr, uid, vals['shop_id'])
seq_id = shop.tms_maintenance_seq.id
if shop.tms_maintenance_seq:
seq_number = self.pool.get('ir.sequence').get_id(cr, uid, seq_id)
vals['name'] = seq_number
vals['date'] = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
else:
raise osv.except_osv(_('Order Maintenance Sequence Error !'), _('You have not defined Maintenance Sequence for shop ' + shop.name))
return super(tms_maintenance_order, self).create(cr, uid, vals, context=context)
########## Metodo Create Sequence ##########
def create_sequence(self, cr, uid, vals, context=None):
shop = self.pool.get('sale.shop').browse(cr, uid, vals['shop_id'])
seq_id = shop.tms_maintenance_seq.id
secuencia = ''
if shop.tms_maintenance_seq:
seq_number = self.pool.get('ir.sequence').get_id(cr, uid, seq_id)
vals['name'] = seq_number
secuencia = seq_number
else:
raise osv.except_osv(_('Order Maintenance Sequence Error !'), _('You have not defined Maintenance Sequence for shop ' + shop.name))
return secuencia
########## Metodo Copy ##########
def copy(self, cr, uid, id, default=None, context=None):
maintenance = self.browse(cr, uid, id, context=context)
if not default:
default = {}
default.update({
'name' : False,
'state' : 'draft',
})
return super(tms_maintenance_order, self).copy(cr, uid, id, default, context=context)
##################### Metodos basicosssssssss #####################################################################################
def get_current_instance(self, cr, uid, id):
lines = self.browse(cr,uid,id)
obj = None
for i in lines:
obj = i
return obj
def get_activity_lines(self,cr,uid,id):
order_obj = self.get_current_instance(cr, uid, id)
activity_lines = order_obj['activities_ids']
return activity_lines
def is_exist_activity_lines(self, cr, uid, id):
order_obj = self.get_current_instance(cr,uid,id)
activity_lines = self.get_activity_lines_obj(cr, uid, id)
if not activity_lines:
#print 'Activity line Array es None...................##########################################################'
return False
return True
def get_activity_lines_obj(self, cr, uid, id):
return self.get_current_instance(cr, uid, id)['activities_ids']
########## comprueba el state de un objeto (activity_line) #########
def get_state_activity(self, activity_line):
return activity_line['state']
########## Comprueban si existen actividades en cancel, pending, process, done #########
def is_cancel_activity_line(self, activity_line):
if self.get_state_activity(activity_line) in 'cancel':
return True
return False
def is_pending_activity_line(self, activity_line):
if self.get_state_activity(activity_line) in 'pending':
return True
return False
def is_process_activity_line(self, activity_line):
if self.get_state_activity(activity_line) in 'process':
return True
return False
def is_done_activity_line(self, activity_line):
if self.get_state_activity(activity_line) in 'done':
return True
return False
########## Comprueban si existen actividades en cancel, pending, process, done #########
def is_exist_activities_cancel(self, cr, uid, id):
order = self.get_current_instance(cr, uid, id)
activities = self.get_activity_lines(cr,uid,id)
if not activities:
#print 'This order not have activity_lines .........'
return False
for activity_line in activities:
if self.is_cancel_activity_line(activity_line):
return True
#Si no escontro algo en el for y en el if un true, pues devuelve un false
return False
#End Def
def is_exist_activities_pending(self, cr, uid, id):
order = self.get_current_instance(cr, uid, id)
activities = self.get_activity_lines(cr,uid,id)
if not activities:
#print 'This order not have activity_lines .........'
return False
for activity_line in activities:
if self.is_pending_activity_line(activity_line):
return True
#Si no escontro algo en el for y en el if un true, pues devuelve un false
return False
#End Def
def is_exist_activities_process(self, cr, uid, id):
order = self.get_current_instance(cr, uid, id)
activities = self.get_activity_lines(cr,uid,id)
if not activities:
#print 'This order not have activity_lines .........'
return False
for activity_line in activities:
if self.is_process_activity_line(activity_line):
return True
#Si no escontro algo en el for y en el if un true, pues devuelve un false
return False
#End Def
def is_exist_activities_done(self, cr, uid, id):
order = self.get_current_instance(cr, uid, id)
activities = self.get_activity_lines(cr,uid,id)
if not activities:
#print 'This order not have activity_lines .........'
return False
for activity_line in activities:
if self.is_done_activity_line(activity_line):
return True
#Si no escontro algo en el for y en el if un true, pues devuelve un false
return False
#End Def
########## Comprueban si solo existen actividades en cancel, pending, process, done ######### ('cancel','Cancelled'), ('pending','Pending'), ('process','Process'),('done','Done')
def is_only_exist_activities_cancel(self, cr, uid, id):
order = self.get_current_instance(cr, uid, id)
activities = self.get_activity_lines(cr,uid,id)
if not activities:
#print 'This order not have activity_lines .........'
return False
if self.is_exist_activities_pending(cr, uid, id):
return False
if self.is_exist_activities_process(cr, uid, id):
return False
if self.is_exist_activities_done(cr, uid, id):
return False
return self.is_exist_activities_cancel(cr, uid, id)
#End Def
def is_only_exist_activities_pending(self, cr, uid, id):
order = self.get_current_instance(cr, uid, id)
activities = self.get_activity_lines(cr,uid,id)
if not activities:
#print 'This order not have activity_lines .........'
return False
if self.is_exist_activities_cancel(cr, uid, id):
return False
if self.is_exist_activities_process(cr, uid, id):
return False
if self.is_exist_activities_done(cr, uid, id):
return False
return self.is_exist_activities_pending(cr, uid, id)
#End Def
def is_only_exist_activities_process(self, cr, uid, id):
order = self.get_current_instance(cr, uid, id)
activities = self.get_activity_lines(cr,uid,id)
if not activities:
#print 'This order not have activity_lines .........'
return False
if self.is_exist_activities_cancel(cr, uid, id):
return False
if self.is_exist_activities_pending(cr, uid, id):
return False
if self.is_exist_activities_done(cr, uid, id):
return False
return self.is_exist_activities_process(cr, uid, id)
#End Def
def is_only_exist_activities_done(self, cr, uid, id):
order = self.get_current_instance(cr, uid, id)
activities = self.get_activity_lines(cr,uid,id)
if not activities:
#print 'This order not have activity_lines .........'
return False
if self.is_exist_activities_cancel(cr, uid, id):
return False
if self.is_exist_activities_pending(cr, uid, id):
return False
if self.is_exist_activities_process(cr, uid, id):
return False
return self.is_exist_activities_done(cr, uid, id)
#End Def
######################### Metodos Manipulacion de Stock_picking ########
######################### Metodos Manipulacion de Stock_picking ########
######################### Metodos Manipulacion de Stock_picking ########
def create_stock_picking(self,cr,uid,id,context=None):
seq_order=(self.get_current_instance(cr, uid, id))['name']
id_order=(self.get_current_instance(cr, uid, id))['id']
vals = {
'origin':':'+str(seq_order),
'type':'internal',
'state':'draft',
'move_type':'direct', # Delivery Method : partial=direct
'tms_order_id':''+str(id_order)
}
stock_id = self.pool.get('stock.picking').create(cr, uid, vals, context)
stock_obj = self.pool.get('stock.picking').browse(cr, uid, stock_id)
return stock_obj
def is_exist_stock_picking(self,cr,uid,id):
stocks_objs = self.get_stock_picking_obj_list(cr,uid,id)
if stocks_objs:
return True
return False
def get_stock_picking_obj_list(self,cr,uid,id):
#Obtiene el ID de tms_maintenance_order y construye el args para la busqueda
id_order=(self.get_current_instance(cr, uid, id))['id']
args = [('tms_order_id','=',id_order)]
# busca los stock_picking donde su atributo tms_order_id sea = al order_id,
# y devuelve una lista de id de stock_picking que encontro
stocks_id = self.pool.get('stock.picking').search(cr,uid,args)
# obtiene una lista de instancias de stock_picking, mediante una lista de ids de stock_piking
stocks_objs = self.pool.get('stock.picking').browse(cr,uid,stocks_id)
return stocks_objs
################################# Metodos Para Escribir Genericos ################################
def write_custom(self, cr, uid, id, vals, context=None):
self.write(cr,uid,id,vals,context)
def set_state(self, cr, uid, id, state, context=None):
vals = {'state':''+str(state)}
self.write_custom(cr, uid, id, vals, context)
######################### Metodos Para Cambiar Estados ################################
def change_state_to_cancel(self, cr, uid, id, context=None):
self.set_state(cr, uid, id, 'cancel')
def change_state_to_draft(self, cr, uid, id, context=None):
self.set_state(cr, uid, id, 'draft')
def change_state_to_open(self, cr, uid, id, context=None):
self.set_state(cr, uid, id, 'open')
def change_state_to_released(self, cr, uid, id, context=None):
self.set_state(cr, uid, id, 'released')
def change_state_to_done(self, cr, uid, id, context=None):
self.set_state(cr, uid, id, 'done')
########################################################################################################################
##################################### Metodos de prueba Impresion ####################################################
########################################################################################################################
def print_stock_picking(self,cr,uid,id, context=None):
#print '-----------------------------------------------------------------------------------------------------------------------'
#print '-----------------------------------------------------------------------------------------------------------------------'
band = self.is_exist_stock_picking(cr,uid,id)
#print 'Existen stock_picking relacionados a esta ORDEN: '+ str(band)
#print '-----------------------------------------------------------------------------------------------------------------------'
#print '-----------------------------------------------------------------------------------------------------------------------'
if band:
for line in self.get_stock_picking_obj_list(cr,uid,id):
#print 'id stock: '+str(line['id'])
#print 'name stock: '+str(line['name'])
#print 'state stock: '+str(line['state'])
#print 'maintenance_order_id: '+str(line['tms_order_id'])
#print 'maintenance_order_id[name]: '+str(line['tms_order_id']['name'])
#print 'maintenance_order_id[state]: '+str(line['tms_order_id']['state'])
#print 'maintenance_order_id[id]: '+str(line['tms_order_id']['id'])
#print '----------------------------------------------------------------------------------------------------------------'
def crear_stock_picking(self,cr,uid,id, context=None):
stock_line = self.create_stock_picking(cr,uid,id,context)
if stock_line:
#print 'Stock_line Fue Creado Exitosamente----------------------------------------------------------------------------------'
#print 'id stock: '+str(stock_line['id'])
#print 'name stock: '+str(stock_line['name'])
#print 'state stock: '+str(stock_line['state'])
#print 'maintenance_order_id: '+str(stock_line['tms_order_id'])
#print 'maintenance_order_id[name]: '+str(stock_line['tms_order_id']['name'])
#print 'maintenance_order_id[state]: '+str(stock_line['tms_order_id']['state'])
#print 'maintenance_order_id[id]: '+str(stock_line['tms_order_id']['id'])
########################### Valores por Defecto ########################################################################
_defaults = {
'state' : lambda *a: 'draft',
'date' : lambda *a: time.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'user_id' : lambda obj, cr, uid, context: uid,
'internal_repair' : True,
}
########################### Criterio de ordenamiento ###################################################################
_order = 'date, name'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"you@example.com"
] | you@example.com |
49f4d6d252100b0b949ba1b7bf23ef3ea84a80f8 | faf793376991092615975a559c6bed4e093acc44 | /SECTION 10 lists in python/52 multidimentional list.py | 56299802d4b05bc3d9ab01da570d5b5a043afac8 | [] | no_license | jdiaz-dev/practicing-python | 2385f2541759cfc9ed221b62030c28e8cf6ddde4 | 139b7dd4332e9ab3dd73abee0308cff41f4657fe | refs/heads/master | 2023-04-05T06:13:53.590830 | 2023-03-19T16:06:00 | 2023-03-19T16:06:00 | 320,443,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py |
contacts = [
[
'junior',
234234
],
[
'yessy',
1234
],
[
'frank',
543
],
[
'esteban',
54645
],
]
for contac in contacts :
for element in contac :
if type(element) == int :
print(element) | [
"lordgelsin26@gmail.com"
] | lordgelsin26@gmail.com |
710509fda1ddbdba3b9d7389f504685c47ba5004 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03060/s036965559.py | d6d7712aa32946ddbf4bfd84f61f44f5d904de74 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | N=int(input())
V=list(map(int, input().split()))
C=list(map(int, input().split()))
ans=0
for i in range(2**N):
X=0
for j in range(N):
if (i>>j)&1:
X+=(V[j]-C[j])
ans=max(ans,X)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
baa6261da09ef1142961ad1d5095576eea1b0295 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/stmt_for_list_nested_same_var-27.py | 35ee6738772f0e965c2ee7337e7b87203f15cdb9 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | x:int = 0
y:int = 0
z:[int] = None
$Statement
for x in z:
for x in z:
print(x)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
b90ad803daea0eea82e9295f45004cdbba4b9f07 | f0b741f24ccf8bfe9bd1950425d83b6291d21b10 | /kubernetes_platform/python/test/unit/test_volume.py | 87835ff6a63c9b890b3b70da6dde13dbc3de3e3b | [
"Apache-2.0"
] | permissive | kubeflow/pipelines | e678342b8a325559dec0a6e1e484c525fdcc8ce8 | 3fb199658f68e7debf4906d9ce32a9a307e39243 | refs/heads/master | 2023-09-04T11:54:56.449867 | 2023-09-01T19:07:33 | 2023-09-01T19:12:27 | 133,100,880 | 3,434 | 1,675 | Apache-2.0 | 2023-09-14T20:19:06 | 2018-05-12T00:31:47 | Python | UTF-8 | Python | false | false | 6,215 | py | # Copyright 2023 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.protobuf import json_format
from kfp import dsl
from kfp import kubernetes
import pytest
class TestMountPVC:
def test_mount_one(self):
@dsl.pipeline
def my_pipeline():
task = comp()
kubernetes.mount_pvc(
task,
pvc_name='pvc-name',
mount_path='path',
)
assert json_format.MessageToDict(my_pipeline.platform_spec) == {
'platforms': {
'kubernetes': {
'deploymentSpec': {
'executors': {
'exec-comp': {
'pvcMount': [{
'constant': 'pvc-name',
'mountPath': 'path'
}]
}
}
}
}
}
}
def test_mount_two(self):
@dsl.pipeline
def my_pipeline():
task = comp()
kubernetes.mount_pvc(
task,
pvc_name='pvc-name',
mount_path='path1',
)
kubernetes.mount_pvc(
task,
pvc_name='other-pvc-name',
mount_path='path2',
)
assert json_format.MessageToDict(my_pipeline.platform_spec) == {
'platforms': {
'kubernetes': {
'deploymentSpec': {
'executors': {
'exec-comp': {
'pvcMount': [
{
'constant': 'pvc-name',
'mountPath': 'path1'
},
{
'constant': 'other-pvc-name',
'mountPath': 'path2'
},
]
}
}
}
}
}
}
def test_mount_preserves_secret_as_env(self):
# checks that mount_pvc respects previously set secrets
@dsl.pipeline
def my_pipeline():
task = comp()
kubernetes.use_secret_as_env(
task,
secret_name='secret-name',
secret_key_to_env={'password': 'SECRET_VAR'},
)
kubernetes.mount_pvc(
task,
pvc_name='pvc-name',
mount_path='path',
)
assert json_format.MessageToDict(my_pipeline.platform_spec) == {
'platforms': {
'kubernetes': {
'deploymentSpec': {
'executors': {
'exec-comp': {
'pvcMount': [{
'constant': 'pvc-name',
'mountPath': 'path'
}],
'secretAsEnv': [{
'secretName':
'secret-name',
'keyToEnv': [{
'secretKey': 'password',
'envVar': 'SECRET_VAR'
}]
}]
}
}
}
}
}
}
def test_mount_preserves_secret_as_vol(self):
# checks that mount_pvc respects previously set secrets
@dsl.pipeline
def my_pipeline():
task = comp()
kubernetes.use_secret_as_volume(
task,
secret_name='secret-name',
mount_path='secretpath',
)
kubernetes.mount_pvc(
task,
pvc_name='pvc-name',
mount_path='path',
)
assert json_format.MessageToDict(my_pipeline.platform_spec) == {
'platforms': {
'kubernetes': {
'deploymentSpec': {
'executors': {
'exec-comp': {
'pvcMount': [{
'constant': 'pvc-name',
'mountPath': 'path'
}],
'secretAsVolume': [{
'secretName': 'secret-name',
'mountPath': 'secretpath'
}]
}
}
}
}
}
}
def test_illegal_pvc_name(self):
@dsl.component
def identity(string: str) -> str:
return string
with pytest.raises(
ValueError,
match=r'Argument for \'pvc_name\' must be an instance of str or PipelineChannel\. Got unknown input type: <class \'int\'>\.',
):
@dsl.pipeline
def my_pipeline(string: str = 'string'):
op1 = kubernetes.mount_pvc(
identity(string=string),
pvc_name=1,
mount_path='/path',
)
@dsl.component
def comp():
pass
| [
"noreply@github.com"
] | kubeflow.noreply@github.com |
b5328046f6cdb5aa66427ffe9e2707dabca2be0e | d9c95cd0efad0788bf17672f6a4ec3b29cfd2e86 | /disturbance/migrations/0216_proposal_fee_invoice_references.py | 6f6dc55ec362b2eba775bf211d8f1b787e1e6f82 | [
"Apache-2.0"
] | permissive | Djandwich/disturbance | cb1d25701b23414cd91e3ac5b0207618cd03a7e5 | b1ba1404b9ca7c941891ea42c00b9ff9bcc41237 | refs/heads/master | 2023-05-05T19:52:36.124923 | 2021-06-03T06:37:53 | 2021-06-03T06:37:53 | 259,816,629 | 1 | 1 | NOASSERTION | 2021-06-03T09:46:46 | 2020-04-29T03:39:33 | Python | UTF-8 | Python | false | false | 622 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-02-05 06:04
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('disturbance', '0215_merge_20210202_1343'),
]
operations = [
migrations.AddField(
model_name='proposal',
name='fee_invoice_references',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, default='', max_length=50, null=True), null=True, size=None),
),
]
| [
"katsufumi.shibata@dbca.wa.gov.au"
] | katsufumi.shibata@dbca.wa.gov.au |
5ed73b5cde48e571ff3a4f5ef8a0ff1133755556 | 675e4c77ea4f1053f1acf5b76b9711b53157d841 | /questions/serializers.py | 5cdbc43af6b1978b33f1b6684fb86a672d0fd672 | [] | no_license | moh-hosseini98/django-rest-quora-like | 6d67717be5afd708eacdd74bba706be90a73a1e9 | 7bf020515145a54dcc822d50584e12c0398e7ee5 | refs/heads/master | 2023-04-09T04:56:03.783036 | 2021-04-29T04:18:12 | 2021-04-29T04:18:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,629 | py | from rest_framework import serializers
from .models import Question,Answer,qlike,alike,Reply
class AnswerSerializer(serializers.ModelSerializer):
author = serializers.StringRelatedField(read_only=True)
answer_likes = serializers.SerializerMethodField()
number_of_replies = serializers.SerializerMethodField()
like_by_req_user = serializers.SerializerMethodField()
class Meta:
model = Answer
exclude = ('updated_at','question',)
def get_answer_likes(self,instance):
return instance.likers.count()
def get_number_of_replies(self,instance):
return instance.replies.count()
def get_like_by_req_user(self,instance):
request = self.context['request']
return instance.likers.filter(liker_id=request.user.id).exists()
class QuestionSerializer(serializers.ModelSerializer):
author = serializers.StringRelatedField(read_only=True)
slug = serializers.SlugField(read_only=True)
number_of_likes = serializers.SerializerMethodField()
number_of_answers = serializers.SerializerMethodField()
like_by_req_user = serializers.SerializerMethodField()
user_has_answered = serializers.SerializerMethodField()
class Meta:
model = Question
exclude = ('updated_at',)
lookup_field = 'slug'
def get_number_of_answers(self,instance):
return instance.answers.count()
def get_number_of_likes(self,instance):
'''return qlike.objects.filter(question=instance).count()'''
return instance.likes.count()
def get_like_by_req_user(self,instance):
request = self.context['request']
return instance.likes.filter(liker_id=request.user.id).exists()
def get_user_has_answered(self,instance):
request = self.context['request']
return instance.answers.filter(
author=request.user
).exists()
# return Answer.objects.filter(
# question=instance,author=request.user
# ).exists()
class QuestionLikeSerializer(serializers.ModelSerializer):
liker = serializers.StringRelatedField(read_only=True)
class Meta:
model = qlike
exclude = ('question',)
class AnswerLikeSerializer(serializers.ModelSerializer):
liker = serializers.StringRelatedField(read_only=True)
class Meta:
model = alike
exclude = ('answer',)
class ReplySerializer(serializers.ModelSerializer):
author = serializers.StringRelatedField(read_only=True)
class Meta:
model = Reply
exclude = ('answer',)
| [
"mamadhss@yahoo.com"
] | mamadhss@yahoo.com |
95bacd72df21ee4e7d6eba5d398151122f814f1e | fd18ce27b66746f932a65488aad04494202e2e0d | /d03_socket_http/pro_2.py | d3a7e2e1ff30f43c75d905ad8fb338e68a45f33f | [] | no_license | daofeng123/ClassCodes | 1acbd843836e550c9cebf67ef21dfca9f6b9fc87 | fbcd1f24d79b8bb56ad0669b07ad118064609612 | refs/heads/master | 2020-06-24T12:34:28.148197 | 2019-08-15T03:56:40 | 2019-08-15T03:56:40 | 198,963,469 | 3 | 0 | null | 2019-07-26T06:53:45 | 2019-07-26T06:53:44 | null | UTF-8 | Python | false | false | 193 | py | # coding=utf-8
import os
pipe_file = 'io.pipe'
os.mkfifo(pipe_file)
fd = os.open(pipe_file, os.O_RDONLY)
while True:
s = os.read(fd, 10)
if not s:
break
print(s.decode())
| [
"38395870@qq.com"
] | 38395870@qq.com |
0203efcddf9cbe42574c145dec465a59c98bc2b0 | 54df8336b50e8f2d7dbe353f0bc51a2b3489095f | /Django/Django_DB/Django_mysql/mysqlpro/mysqlpro/asgi.py | e8daa3c14429a4891a272b7a5bf0977492bd3708 | [] | no_license | SurendraKumarAratikatla/MyLenovolapCodes1 | 42d5bb7a14bfdf8d773ee60719380ee28ff4947a | 12c56200fcfd3e5229bfeec209fd03b5fc35b823 | refs/heads/master | 2023-06-17T15:44:18.312398 | 2021-07-19T10:28:11 | 2021-07-19T10:28:11 | 387,358,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for mysqlpro project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysqlpro.settings')
application = get_asgi_application()
| [
"suendra.aratikatla1608@gmail.com"
] | suendra.aratikatla1608@gmail.com |
7aa31c841c9d3967ee889869937a5741cd46cf68 | ed2389b9c0f8b45f4d1ac2e52815846ed37bc127 | /train_deep_logo_cnn.py | c95a538c88e80aa52713c4c770b95ccb8dbe4aea | [
"MIT"
] | permissive | tracxpoint/AIC_DeepLogo | 161281c443a6d1bc96556fa41d1818ff4609c5fa | 9b936208fcb785cc0affb6e2e9087d1bb83744d4 | refs/heads/master | 2021-09-06T22:03:45.942170 | 2018-01-04T08:27:55 | 2018-01-04T08:27:55 | 116,278,954 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,980 | py | # The MIT License (MIT)
# Copyright (c) 2016 satojkovic
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import tensorflow as tf
import numpy as np
from six.moves import cPickle as pickle
from six.moves import range
import sys
import os
import common
import model
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
"train_dir", "flickr_logos_27_dataset",
"Directory where to write event logs and checkpoint.")
tf.app.flags.DEFINE_integer("max_steps", 20001, "Number of batches to run.")
tf.app.flags.DEFINE_integer("image_width", common.CNN_IN_WIDTH,
"A width of an input image.")
tf.app.flags.DEFINE_integer("image_height", common.CNN_IN_HEIGHT,
"A height of an input image.")
tf.app.flags.DEFINE_integer("learning_rate", 0.0001, "Learning rate")
tf.app.flags.DEFINE_integer("batch_size", 64, "A batch size")
tf.app.flags.DEFINE_integer("num_channels", common.CNN_IN_CH,
"A number of channels of an input image.")
PICKLE_FILENAME = 'deep_logo.pickle'
def accuracy(predictions, labels):
return (100 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /
predictions.shape[0])
def reformat(dataset, labels):
dataset = dataset.reshape((-1, FLAGS.image_height, FLAGS.image_width,
FLAGS.num_channels)).astype(np.float32)
labels = (
np.arange(model.NUM_CLASSES) == labels[:, None]).astype(np.float32)
return dataset, labels
def read_data():
with open(PICKLE_FILENAME, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save
print('Training set', train_dataset.shape, train_labels.shape)
print('Valid set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
return [train_dataset, valid_dataset,
test_dataset], [train_labels, valid_labels, test_labels]
def main():
if len(sys.argv) > 1:
f = np.load(sys.argv[1])
# f.files has unordered keys ['arr_8', 'arr_9', 'arr_6'...]
# Sorting keys by value of numbers
initial_weights = [
f[n] for n in sorted(f.files, key=lambda s: int(s[4:]))
]
else:
initial_weights = None
# read input data
dataset, labels = read_data()
train_dataset, train_labels = reformat(dataset[0], labels[0])
valid_dataset, valid_labels = reformat(dataset[1], labels[1])
test_dataset, test_labels = reformat(dataset[2], labels[2])
print('Training set', train_dataset.shape, train_labels.shape)
print('Valid set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# Training model
graph = tf.Graph()
with graph.as_default():
# Weights and biases
model_params = model.params()
# Initial weights
if initial_weights is not None:
assert len(model_params) == len(initial_weights)
assign_ops = [
w.assign(v) for w, v in zip(model_params, initial_weights)
]
# Input data
tf_train_dataset = tf.placeholder(
tf.float32,
shape=(FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width,
FLAGS.num_channels))
tf_train_labels = tf.placeholder(
tf.float32, shape=(FLAGS.batch_size, model.NUM_CLASSES))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Training computation
logits = model.cnn(tf_train_dataset, model_params, keep_prob=0.5)
with tf.name_scope('loss'):
loss = tf.reduce_sum(
tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=tf_train_labels))
tf.summary.scalar('loss', loss)
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss)
# Predictions for the training, validation, and test data
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
model.cnn(tf_valid_dataset, model_params, keep_prob=1.0))
test_prediction = tf.nn.softmax(
model.cnn(tf_test_dataset, model_params, keep_prob=1.0))
# Merge all summaries
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.train_dir + '/train')
# Add ops to save and restore all the variables
saver = tf.train.Saver()
# Do training
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
if initial_weights is not None:
session.run(assign_ops)
print('initialized by pre-learned values')
else:
print('initialized')
for step in range(FLAGS.max_steps):
offset = (step * FLAGS.batch_size) % (
train_labels.shape[0] - FLAGS.batch_size)
batch_data = train_dataset[offset:(offset + FLAGS.batch_size
), :, :, :]
batch_labels = train_labels[offset:(offset + FLAGS.batch_size), :]
feed_dict = {
tf_train_dataset: batch_data,
tf_train_labels: batch_labels
}
try:
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if step % 50 == 0:
summary, _ = session.run(
[merged, optimizer], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(
predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
except KeyboardInterrupt:
last_weights = [p.eval() for p in model_params]
np.savez("weights.npz", *last_weights)
return last_weights
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(),
test_labels))
# Save the variables to disk.
save_dir = "models"
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, "deep_logo_model")
saved = saver.save(session, save_path)
print("Model saved in file: %s" % saved)
if __name__ == '__main__':
main()
| [
"satojkovic@gmail.com"
] | satojkovic@gmail.com |
f369dd0824db0d2646bb8503a57347fd157c1020 | 9e549ee54faa8b037f90eac8ecb36f853e460e5e | /venv/lib/python3.6/site-packages/future/backports/email/feedparser.py | e543f232bc70b05d6ffce2fddf6940acce824cc5 | [
"MIT"
] | permissive | aitoehigie/britecore_flask | e8df68e71dd0eac980a7de8c0f20b5a5a16979fe | eef1873dbe6b2cc21f770bc6dec783007ae4493b | refs/heads/master | 2022-12-09T22:07:45.930238 | 2019-05-15T04:10:37 | 2019-05-15T04:10:37 | 177,354,667 | 0 | 0 | MIT | 2022-12-08T04:54:09 | 2019-03-24T00:38:20 | Python | UTF-8 | Python | false | false | 22,840 | py | # Copyright (C) 2004-2006 Python Software Foundation
# Authors: Baxter, Wouters and Warsaw
# Contact: email-sig@python.org
"""FeedParser - An email feed parser.
The feed parser implements an interface for incrementally parsing an email
message, line by line. This has advantages for certain applications, such as
those reading email messages off a socket.
FeedParser.feed() is the primary interface for pushing new data into the
parser. It returns when there's nothing more it can do with the available
data. When you have no more data to push into the parser, call .close().
This completes the parsing and returns the root message object.
The other advantage of this parser is that it will never raise a parsing
exception. Instead, when it finds something unexpected, it adds a 'defect' to
the current message. Defects are just instances that live on the message
object's .defects attribute.
"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future.builtins import object, range, super
from future.utils import implements_iterator, PY3
__all__ = ["FeedParser", "BytesFeedParser"]
import re
from future.backports.email import errors
from future.backports.email import message
from future.backports.email._policybase import compat32
NLCRE = re.compile("\r\n|\r|\n")
NLCRE_bol = re.compile("(\r\n|\r|\n)")
NLCRE_eol = re.compile("(\r\n|\r|\n)\Z")
NLCRE_crack = re.compile("(\r\n|\r|\n)")
# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
# except controls, SP, and ":".
headerRE = re.compile(r"^(From |[\041-\071\073-\176]{1,}:|[\t ])")
EMPTYSTRING = ""
NL = "\n"
NeedMoreData = object()
# @implements_iterator
class BufferedSubFile(object):
"""A file-ish object that can have new data loaded into it.
You can also push and pop line-matching predicates onto a stack. When the
current predicate matches the current line, a false EOF response
(i.e. empty string) is returned instead. This lets the parser adhere to a
simple abstraction -- it parses until EOF closes the current message.
"""
def __init__(self):
# The last partial line pushed into this object.
self._partial = ""
# The list of full, pushed lines, in reverse order
self._lines = []
# The stack of false-EOF checking predicates.
self._eofstack = []
# A flag indicating whether the file has been closed or not.
self._closed = False
def push_eof_matcher(self, pred):
self._eofstack.append(pred)
def pop_eof_matcher(self):
return self._eofstack.pop()
def close(self):
# Don't forget any trailing partial line.
self._lines.append(self._partial)
self._partial = ""
self._closed = True
def readline(self):
if not self._lines:
if self._closed:
return ""
return NeedMoreData
# Pop the line off the stack and see if it matches the current
# false-EOF predicate.
line = self._lines.pop()
# RFC 2046, section 5.1.2 requires us to recognize outer level
# boundaries at any level of inner nesting. Do this, but be sure it's
# in the order of most to least nested.
for ateof in self._eofstack[::-1]:
if ateof(line):
# We're at the false EOF. But push the last line back first.
self._lines.append(line)
return ""
return line
def unreadline(self, line):
# Let the consumer push a line back into the buffer.
assert line is not NeedMoreData
self._lines.append(line)
def push(self, data):
"""Push some new data into this object."""
# Handle any previous leftovers
data, self._partial = self._partial + data, ""
# Crack into lines, but preserve the newlines on the end of each
parts = NLCRE_crack.split(data)
# The *ahem* interesting behaviour of re.split when supplied grouping
# parentheses is that the last element of the resulting list is the
# data after the final RE. In the case of a NL/CR terminated string,
# this is the empty string.
self._partial = parts.pop()
# GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r:
# is there a \n to follow later?
if not self._partial and parts and parts[-1].endswith("\r"):
self._partial = parts.pop(-2) + parts.pop()
# parts is a list of strings, alternating between the line contents
# and the eol character(s). Gather up a list of lines after
# re-attaching the newlines.
lines = []
for i in range(len(parts) // 2):
lines.append(parts[i * 2] + parts[i * 2 + 1])
self.pushlines(lines)
def pushlines(self, lines):
# Reverse and insert at the front of the lines.
self._lines[:0] = lines[::-1]
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if line == "":
raise StopIteration
return line
class FeedParser(object):
"""A feed-style parser of email."""
def __init__(self, _factory=message.Message, **_3to2kwargs):
if "policy" in _3to2kwargs:
policy = _3to2kwargs["policy"]
del _3to2kwargs["policy"]
else:
policy = compat32
"""_factory is called with no arguments to create a new message obj
The policy keyword specifies a policy object that controls a number of
aspects of the parser's operation. The default policy maintains
backward compatibility.
"""
self._factory = _factory
self.policy = policy
try:
_factory(policy=self.policy)
self._factory_kwds = lambda: {"policy": self.policy}
except TypeError:
# Assume this is an old-style factory
self._factory_kwds = lambda: {}
self._input = BufferedSubFile()
self._msgstack = []
if PY3:
self._parse = self._parsegen().__next__
else:
self._parse = self._parsegen().next
self._cur = None
self._last = None
self._headersonly = False
# Non-public interface for supporting Parser's headersonly flag
def _set_headersonly(self):
self._headersonly = True
def feed(self, data):
"""Push more data into the parser."""
self._input.push(data)
self._call_parse()
def _call_parse(self):
try:
self._parse()
except StopIteration:
pass
def close(self):
"""Parse all remaining data and return the root message object."""
self._input.close()
self._call_parse()
root = self._pop_message()
assert not self._msgstack
# Look for final set of defects
if root.get_content_maintype() == "multipart" and not root.is_multipart():
defect = errors.MultipartInvariantViolationDefect()
self.policy.handle_defect(root, defect)
return root
def _new_message(self):
msg = self._factory(**self._factory_kwds())
if self._cur and self._cur.get_content_type() == "multipart/digest":
msg.set_default_type("message/rfc822")
if self._msgstack:
self._msgstack[-1].attach(msg)
self._msgstack.append(msg)
self._cur = msg
self._last = msg
def _pop_message(self):
retval = self._msgstack.pop()
if self._msgstack:
self._cur = self._msgstack[-1]
else:
self._cur = None
return retval
def _parsegen(self):
# Create a new message and start by parsing headers.
self._new_message()
headers = []
# Collect the headers, searching for a line that doesn't match the RFC
# 2822 header or continuation pattern (including an empty line).
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
if not headerRE.match(line):
# If we saw the RFC defined header/body separator
# (i.e. newline), just throw it away. Otherwise the line is
# part of the body so push it back.
if not NLCRE.match(line):
defect = errors.MissingHeaderBodySeparatorDefect()
self.policy.handle_defect(self._cur, defect)
self._input.unreadline(line)
break
headers.append(line)
# Done with the headers, so parse them and figure out what we're
# supposed to see in the body of the message.
self._parse_headers(headers)
# Headers-only parsing is a backwards compatibility hack, which was
# necessary in the older parser, which could raise errors. All
# remaining lines in the input are thrown into the message body.
if self._headersonly:
lines = []
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
if line == "":
break
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
return
if self._cur.get_content_type() == "message/delivery-status":
# message/delivery-status contains blocks of headers separated by
# a blank line. We'll represent each header block as a separate
# nested message object, but the processing is a bit different
# than standard message/* types because there is no body for the
# nested messages. A blank line separates the subparts.
while True:
self._input.push_eof_matcher(NLCRE.match)
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
msg = self._pop_message()
# We need to pop the EOF matcher in order to tell if we're at
# the end of the current file, not the end of the last block
# of message headers.
self._input.pop_eof_matcher()
# The input stream must be sitting at the newline or at the
# EOF. We want to see if we're at the end of this subpart, so
# first consume the blank line, then test the next line to see
# if we're at this subpart's EOF.
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
break
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
break
if line == "":
break
# Not at EOF so this is a line we're going to need.
self._input.unreadline(line)
return
if self._cur.get_content_maintype() == "message":
# The message claims to be a message/* type, then what follows is
# another RFC 2822 message.
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
self._pop_message()
return
if self._cur.get_content_maintype() == "multipart":
boundary = self._cur.get_boundary()
if boundary is None:
# The message /claims/ to be a multipart but it has not
# defined a boundary. That's a problem which we'll handle by
# reading everything until the EOF and marking the message as
# defective.
defect = errors.NoBoundaryInMultipartDefect()
self.policy.handle_defect(self._cur, defect)
lines = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
return
# Make sure a valid content type was specified per RFC 2045:6.4.
if self._cur.get("content-transfer-encoding", "8bit").lower() not in (
"7bit",
"8bit",
"binary",
):
defect = errors.InvalidMultipartContentTransferEncodingDefect()
self.policy.handle_defect(self._cur, defect)
# Create a line match predicate which matches the inter-part
# boundary as well as the end-of-multipart boundary. Don't push
# this onto the input stream until we've scanned past the
# preamble.
separator = "--" + boundary
boundaryre = re.compile(
"(?P<sep>"
+ re.escape(separator)
+ r")(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$"
)
capturing_preamble = True
preamble = []
linesep = False
close_boundary_seen = False
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
if line == "":
break
mo = boundaryre.match(line)
if mo:
# If we're looking at the end boundary, we're done with
# this multipart. If there was a newline at the end of
# the closing boundary, then we need to initialize the
# epilogue with the empty string (see below).
if mo.group("end"):
close_boundary_seen = True
linesep = mo.group("linesep")
break
# We saw an inter-part boundary. Were we in the preamble?
if capturing_preamble:
if preamble:
# According to RFC 2046, the last newline belongs
# to the boundary.
lastline = preamble[-1]
eolmo = NLCRE_eol.search(lastline)
if eolmo:
preamble[-1] = lastline[: -len(eolmo.group(0))]
self._cur.preamble = EMPTYSTRING.join(preamble)
capturing_preamble = False
self._input.unreadline(line)
continue
# We saw a boundary separating two parts. Consume any
# multiple boundary lines that may be following. Our
# interpretation of RFC 2046 BNF grammar does not produce
# body parts within such double boundaries.
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
mo = boundaryre.match(line)
if not mo:
self._input.unreadline(line)
break
# Recurse to parse this subpart; the input stream points
# at the subpart's first line.
self._input.push_eof_matcher(boundaryre.match)
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
# Because of RFC 2046, the newline preceding the boundary
# separator actually belongs to the boundary, not the
# previous subpart's payload (or epilogue if the previous
# part is a multipart).
if self._last.get_content_maintype() == "multipart":
epilogue = self._last.epilogue
if epilogue == "":
self._last.epilogue = None
elif epilogue is not None:
mo = NLCRE_eol.search(epilogue)
if mo:
end = len(mo.group(0))
self._last.epilogue = epilogue[:-end]
else:
payload = self._last._payload
if isinstance(payload, str):
mo = NLCRE_eol.search(payload)
if mo:
payload = payload[: -len(mo.group(0))]
self._last._payload = payload
self._input.pop_eof_matcher()
self._pop_message()
# Set the multipart up for newline cleansing, which will
# happen if we're in a nested multipart.
self._last = self._cur
else:
# I think we must be in the preamble
assert capturing_preamble
preamble.append(line)
# We've seen either the EOF or the end boundary. If we're still
# capturing the preamble, we never saw the start boundary. Note
# that as a defect and store the captured text as the payload.
if capturing_preamble:
defect = errors.StartBoundaryNotFoundDefect()
self.policy.handle_defect(self._cur, defect)
self._cur.set_payload(EMPTYSTRING.join(preamble))
epilogue = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
self._cur.epilogue = EMPTYSTRING.join(epilogue)
return
# If we're not processing the preamble, then we might have seen
# EOF without seeing that end boundary...that is also a defect.
if not close_boundary_seen:
defect = errors.CloseBoundaryNotFoundDefect()
self.policy.handle_defect(self._cur, defect)
return
# Everything from here to the EOF is epilogue. If the end boundary
# ended in a newline, we'll need to make sure the epilogue isn't
# None
if linesep:
epilogue = [""]
else:
epilogue = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
epilogue.append(line)
# Any CRLF at the front of the epilogue is not technically part of
# the epilogue. Also, watch out for an empty string epilogue,
# which means a single newline.
if epilogue:
firstline = epilogue[0]
bolmo = NLCRE_bol.match(firstline)
if bolmo:
epilogue[0] = firstline[len(bolmo.group(0)) :]
self._cur.epilogue = EMPTYSTRING.join(epilogue)
return
# Otherwise, it's some non-multipart type, so the entire rest of the
# file contents becomes the payload.
lines = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
def _parse_headers(self, lines):
# Passed a list of lines that make up the headers for the current msg
lastheader = ""
lastvalue = []
for lineno, line in enumerate(lines):
# Check for continuation
if line[0] in " \t":
if not lastheader:
# The first line of the headers was a continuation. This
# is illegal, so let's note the defect, store the illegal
# line, and ignore it for purposes of headers.
defect = errors.FirstHeaderLineIsContinuationDefect(line)
self.policy.handle_defect(self._cur, defect)
continue
lastvalue.append(line)
continue
if lastheader:
self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
lastheader, lastvalue = "", []
# Check for envelope header, i.e. unix-from
if line.startswith("From "):
if lineno == 0:
# Strip off the trailing newline
mo = NLCRE_eol.search(line)
if mo:
line = line[: -len(mo.group(0))]
self._cur.set_unixfrom(line)
continue
elif lineno == len(lines) - 1:
# Something looking like a unix-from at the end - it's
# probably the first line of the body, so push back the
# line and stop.
self._input.unreadline(line)
return
else:
# Weirdly placed unix-from line. Note this as a defect
# and ignore it.
defect = errors.MisplacedEnvelopeHeaderDefect(line)
self._cur.defects.append(defect)
continue
# Split the line on the colon separating field name from value.
# There will always be a colon, because if there wasn't the part of
# the parser that calls us would have started parsing the body.
i = line.find(":")
assert i > 0, "_parse_headers fed line with no : and no leading WS"
lastheader = line[:i]
lastvalue = [line]
# Done with all the lines, so handle the last header.
if lastheader:
self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
class BytesFeedParser(FeedParser):
"""Like FeedParser, but feed accepts bytes."""
def feed(self, data):
super().feed(data.decode("ascii", "surrogateescape"))
| [
"aitoehigie@gmail.com"
] | aitoehigie@gmail.com |
3eb78ccf566602282c5c3dfa11e4a7b6bad48b99 | b7fab13642988c0e6535fb75ef6cb3548671d338 | /tools/ydk-py-master/cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_Ethernet_SPAN_datatypes.py | 5cf1f786c1167dbd94086d2115a83fb0bc7c7ecc | [
"Apache-2.0"
] | permissive | juancsosap/yangtraining | 6ad1b8cf89ecdebeef094e4238d1ee95f8eb0824 | 09d8bcc3827575a45cb8d5d27186042bf13ea451 | refs/heads/master | 2022-08-05T01:59:22.007845 | 2019-08-01T15:53:08 | 2019-08-01T15:53:08 | 200,079,665 | 0 | 1 | null | 2021-12-13T20:06:17 | 2019-08-01T15:54:15 | Python | UTF-8 | Python | false | false | 1,120 | py | """ Cisco_IOS_XR_Ethernet_SPAN_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
from ydk.entity_utils import get_relative_entity_path as _get_relative_entity_path
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YPYError, YPYModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class SpanSessionClass(Enum):
"""
SpanSessionClass
Span session class
.. data:: ethernet = 0
Mirror Ethernet packets
.. data:: ipv4 = 1
Mirror IPv4 packets
.. data:: ipv6 = 2
Mirror IPv6 packets
"""
ethernet = Enum.YLeaf(0, "ethernet")
ipv4 = Enum.YLeaf(1, "ipv4")
ipv6 = Enum.YLeaf(2, "ipv6")
class SpanSessionClassOld(Enum):
"""
SpanSessionClassOld
Span session class old
.. data:: true = 0
Mirror Ethernet packets
"""
true = Enum.YLeaf(0, "true")
| [
"juan.c.sosa.p@gmail.com"
] | juan.c.sosa.p@gmail.com |
8692b572824530ddb8d707d7d9cd3e25d74df493 | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/appengine/lib/external/admin/tools/conversion/yaml_schema.py | a5982a4efc0f604246f9948bda038bf50a2a5432 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 5,782 | py | # Copyright 2015 Google Inc. All Rights Reserved.
"""Definition for conversion between legacy YAML and One Platform protos."""
from googlecloudsdk.appengine.lib.external.admin.tools.conversion import converters as c
from googlecloudsdk.appengine.lib.external.admin.tools.conversion import schema as s
SCHEMA = s.Message(
api_config=s.Message(
url=s.Value(converter=c.ToJsonString),
login=s.Value(converter=c.EnumConverter('LOGIN')),
secure=s.Value('security_level', converter=c.EnumConverter('SECURE')),
auth_fail_action=s.Value(converter=c.EnumConverter('AUTH_FAIL_ACTION')),
script=s.Value(converter=c.ToJsonString)),
auto_id_policy=s.Value('beta_settings',
lambda val: {'auto_id_policy': val}),
automatic_scaling=s.Message(
converter=c.ConvertAutomaticScaling,
cool_down_period_sec=s.Value('cool_down_period',
converter=c.SecondsToDuration),
cpu_utilization=s.Message(
target_utilization=s.Value(),
aggregation_window_length_sec=s.Value('aggregation_window_length',
converter=c.SecondsToDuration)
),
max_num_instances=s.Value('max_total_instances'),
min_pending_latency=s.Value(converter=c.LatencyToDuration),
min_idle_instances=s.Value(converter=
c.StringToInt(handle_automatic=True)),
max_idle_instances=s.Value(converter=
c.StringToInt(handle_automatic=True)),
max_pending_latency=s.Value(converter=c.LatencyToDuration),
max_concurrent_requests=s.Value(converter=c.StringToInt()),
min_num_instances=s.Value('min_total_instances'),
target_network_sent_bytes_per_sec=s.Value(
'target_sent_bytes_per_sec'),
target_network_sent_packets_per_sec=s.Value(
'target_sent_packets_per_sec'),
target_network_received_bytes_per_sec=s.Value(
'target_received_bytes_per_sec'),
target_network_received_packets_per_sec=s.Value(
'target_received_packets_per_sec'),
target_disk_write_bytes_per_sec=s.Value(
'target_write_bytes_per_sec'),
target_disk_write_ops_per_sec=s.Value(
'target_write_ops_per_sec'),
target_disk_read_bytes_per_sec=s.Value(
'target_read_bytes_per_sec'),
target_disk_read_ops_per_sec=s.Value(
'target_read_ops_per_sec'),
target_request_count_per_sec=s.Value(),
target_concurrent_requests=s.Value()),
basic_scaling=s.Message(
idle_timeout=s.Value(converter=c.IdleTimeoutToDuration),
max_instances=s.Value(converter=c.StringToInt())),
beta_settings=s.Map(),
default_expiration=s.Value(converter=c.ExpirationToDuration),
env=s.Value(),
env_variables=s.Map(),
error_handlers=s.RepeatedField(element=s.Message(
error_code=s.Value(converter=c.EnumConverter('ERROR_CODE')),
file=s.Value('static_file', converter=c.ToJsonString),
mime_type=s.Value(converter=c.ToJsonString))),
# Restructure the handler after it's complete, since this is more
# complicated than a simple rename.
handlers=s.RepeatedField(element=s.Message(
converter=c.ConvertUrlHandler,
auth_fail_action=s.Value(converter=c.EnumConverter('AUTH_FAIL_ACTION')),
static_dir=s.Value(converter=c.ToJsonString),
secure=s.Value('security_level', converter=c.EnumConverter('SECURE')),
redirect_http_response_code=s.Value(
converter=c.EnumConverter('REDIRECT_HTTP_RESPONSE_CODE')),
http_headers=s.Map(),
url=s.Value('url_regex'),
expiration=s.Value(converter=c.ExpirationToDuration),
static_files=s.Value('path', converter=c.ToJsonString),
script=s.Value('script_path', converter=c.ToJsonString),
upload=s.Value('upload_path_regex', converter=c.ToJsonString),
api_endpoint=s.Value(),
application_readable=s.Value(),
position=s.Value(),
login=s.Value(converter=c.EnumConverter('LOGIN')),
mime_type=s.Value(converter=c.ToJsonString),
require_matching_file=s.Value())),
health_check=s.Message(
check_interval_sec=s.Value('check_interval',
converter=c.SecondsToDuration),
timeout_sec=s.Value('timeout', converter=c.SecondsToDuration),
healthy_threshold=s.Value(),
enable_health_check=s.Value('disable_health_check', converter=c.Not),
unhealthy_threshold=s.Value(),
host=s.Value(converter=c.ToJsonString),
restart_threshold=s.Value()),
inbound_services=s.RepeatedField(element=s.Value(
converter=c.EnumConverter('INBOUND_SERVICE'))),
instance_class=s.Value(converter=c.ToJsonString),
libraries=s.RepeatedField(element=s.Message(
version=s.Value(converter=c.ToJsonString),
name=s.Value(converter=c.ToJsonString))),
manual_scaling=s.Message(
instances=s.Value(converter=c.StringToInt())),
network=s.Message(
instance_tag=s.Value(converter=c.ToJsonString),
name=s.Value(converter=c.ToJsonString),
forwarded_ports=s.RepeatedField(element=s.Value(converter=
c.ToJsonString))),
nobuild_files=s.Value('nobuild_files_regex', converter=c.ToJsonString),
resources=s.Message(
memory_gb=s.Value(),
disk_size_gb=s.Value('disk_gb'),
cpu=s.Value()),
runtime=s.Value(converter=c.ToJsonString),
threadsafe=s.Value(),
version=s.Value('id', converter=c.ToJsonString),
vm=s.Value(),
vm_settings=s.Map('beta_settings'))
| [
"joe@longreen.io"
] | joe@longreen.io |
b945506a9f4a29f2511783145f6af33587bc473c | 3e5b2eb741f5ae52752328274a616b475dbb401a | /services/core-api/tests/now_applications/resources/test_now_application_put.py | ea8ee6fa40ba11e0a78d8a568d69b0bf9ac3409d | [
"Apache-2.0"
] | permissive | bcgov/mds | 165868f97d0002e6be38680fe4854319a9476ce3 | 60277f4d71f77857e40587307a2b2adb11575850 | refs/heads/develop | 2023-08-29T22:54:36.038070 | 2023-08-29T05:00:28 | 2023-08-29T05:00:28 | 131,050,605 | 29 | 63 | Apache-2.0 | 2023-09-14T21:40:25 | 2018-04-25T18:54:47 | JavaScript | UTF-8 | Python | false | false | 1,253 | py | import json, decimal, pytest
from flask_restplus import marshal, fields
from app.api.now_applications.response_models import NOW_APPLICATION_MODEL
from tests.now_application_factories import NOWApplicationIdentityFactory, NOWApplicationFactory
class TestNOWApplication:
"""PUT mines/now-applications/<guid>"""
@pytest.mark.skip(
reason='Application changes now fire a request to NROS so need to mock the service call.')
def test_put_application_field(self, test_client, db_session, auth_headers):
now_application = NOWApplicationFactory()
test_application = NOWApplicationIdentityFactory(now_application=now_application)
assert test_application.now_application
data = marshal(test_application.now_application, NOW_APPLICATION_MODEL)
new_latitude = '-55.111'
data['latitude'] = new_latitude
put_resp = test_client.put(
f'/now-applications/{test_application.now_application_guid}',
json=data,
headers=auth_headers['full_auth_header'])
assert put_resp.status_code == 200, put_resp.response
put_data = json.loads(put_resp.data.decode())
assert decimal.Decimal(put_data['latitude']) == decimal.Decimal(new_latitude)
| [
"noreply@github.com"
] | bcgov.noreply@github.com |
0412431b0da4e6ff054478296cc9d25714eb67c8 | fa8036fd416aecab3f1ca617acf0989f032f02ce | /abc165/A.py | d0cee18d96e021925e06a71e9d431a57b9a655f3 | [] | no_license | MitsuruFujiwara/Atcoder | e2e2e82014e33e3422ea40eca812c6fdd8bcaaaa | bc73c4cd35a80c106d0e9b14cee34a064d89d343 | refs/heads/master | 2022-12-14T23:50:56.843336 | 2020-09-17T22:25:57 | 2020-09-17T22:25:57 | 252,980,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | K = int(input())
A, B = map(int, input().split())
ans = 'NG'
for n in range(A,B+1):
if n%K ==0:
ans = 'OK'
print(ans)
| [
"fujiwara52jp@gmail.com"
] | fujiwara52jp@gmail.com |
4cc0049abb138568478357ab24aa4bfb3ca4fffb | dd4d2589d1f14303cacd3b7ee1dd5f6bacd3bf3c | /company/amazon/linked_list/intersection.py | 962fdaf38fc28e16cedeeda12cb343a8230f6c91 | [] | no_license | salujaharkirat/ds-algo | ec22eaae81bdb78f2818248508325a536aedbb7b | 819b5971826d97ec600b92776c5158518c9cbf22 | refs/heads/master | 2023-05-02T17:20:49.425484 | 2021-05-23T07:54:29 | 2021-05-23T07:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def get_length(self, head):
length = 0
while head:
length += 1
head = head.next
return length
# @param A : head node of linked list
# @param B : head node of linked list
# @return the head node in the linked list
def getIntersectionNode(self, A, B):
len_A = self.get_length(A)
len_B = self.get_length(B)
if not A or not B:
return None
p_A = A
p_B = B
diff = len_A - len_B
if len_A > len_B:
while diff > 0:
p_A = p_A.next
diff -= 1
else:
while diff < 0:
p_B = p_B.next
diff += 1
while p_A and p_B:
if p_A == p_B:
return p_A
p_A = p_A.next
p_B = p_B.next
return None
| [
"saluja.harkirat@gmail.com"
] | saluja.harkirat@gmail.com |
c69ab5d34d637db9d7131d63900bcf5f52226b04 | f17a78351f53086ce2f9a42bb4b67a0528e3f651 | /backend/main/urls.py | 5f6aa88e52359ba7cffc7b901f523ce4f9264e64 | [
"MIT"
] | permissive | tony/futurecoder | 556dad5c28d4317f0928d821e3e22592d03d09b3 | 986e23137ef9ea2ca267c8b51ab6e1dfe10e530e | refs/heads/master | 2022-11-19T06:20:20.703834 | 2020-07-21T20:37:24 | 2020-07-21T20:37:24 | 282,333,435 | 0 | 0 | MIT | 2020-07-24T23:21:20 | 2020-07-24T23:21:19 | null | UTF-8 | Python | false | false | 1,182 | py | """book URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import TemplateView
from main.text import chapters
from main.views import api_view, FrontendAppView, HomePageView
home_view = HomePageView.as_view()
urlpatterns = [
path('api/<method_name>/', api_view),
path('home/', home_view),
path('', home_view),
path('course/', ensure_csrf_cookie(FrontendAppView.as_view())),
path('toc/', TemplateView.as_view(template_name="toc.html", extra_context=dict(chapters=chapters))),
]
| [
"alex.mojaki@gmail.com"
] | alex.mojaki@gmail.com |
bbac5a99758351de35a1c9ee434cc4b0470a6ae4 | 394b5d87d193071e10d7f875e874edeb1720adbb | /staff/views.py | 1b780d23c1a05dde4c12d9f3c2a137c7137bf4f8 | [] | no_license | Andreyglass1989/Academy | 346f3f6d468f44aeed2f0e73b3ac6c1ef206fba4 | 79527e9752324cf820314114e1dc97962c92f2fc | refs/heads/master | 2021-01-20T15:12:49.542352 | 2017-05-26T07:11:57 | 2017-05-26T07:11:57 | 90,734,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # from django.shortcuts import render, render_to_response
# from .models import Staff
#
# # Create your views here.
#
# def main_menu(request):
# staffs = Staff.objects.all()
# title = "Staff"
# context = {"title_docum":title, "staffs": staffs}
# return render_to_response(request,"base.html",context) | [
"1989andreyglass@gmail.com"
] | 1989andreyglass@gmail.com |
7f82070c300a6bc1d32d4659948899e75073d7f1 | e82245a9e623ef3e2b4b9c02f0fd932c608c4484 | /pramp.com/07-busiest_time_in_the_mall-scratch_work.py | 7d56bc26cc311d181c29572540e87d3ffc59e514 | [] | no_license | Zylophone/Programming-for-Sport | 33e8161028cfddce3b7a1243eb092070107342e3 | 193d6184f939303d8661f68d6fd06bdec95df351 | refs/heads/master | 2020-06-16T23:11:44.719286 | 2017-05-21T17:10:46 | 2017-05-21T17:10:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | # doesn't handle case where people enter and exit at the same second
maintain a var that tracks the number of people in the mall
time count type number_of_people
1 4 enter 4
3 3 exit 1
10 10 enter 11
14 20 enter 31
19 5 exit 26
between [time 14 and time 19] there were 31 people and this was the busiest period
# [(1, 0, xxxx)]<< you won't get a list item that says 0 people entered/exited the mall
mall_traffic= [(1, 4, "enter"), (3, 3, "exit")]
# sort mall_traffic by timestamp
TIME= 0
mall_traffic.sort(key= lambda x : x[TIME])
def findBusiestPeriod(mall_traffic):
number_of_people= 0
max_number_of_people_so_far= 0
max_start= None
max_end= None
LAST= len(mall_traffic) - 1 # mustn't be -1
TIME= 0
TIME_YEAR_ENDS_AT= ???? # UNIX TIME corresponding to DEC 31, YYYY 11:59:59 PM
for idx, time, count, typ in enumerate(mall_traffic):
sign= 1 if typ == "enter" else -1
number_of_people+= (sign * count)
# if typ == "enter": number_of_people+= count
# elif typ == "exit": number_of_people-= count
# else: raise "error"
if number_of_people > max_number_of_people_so_far:
max_number_of_people_so_far= number_of_people
max_start= time
if idx == LAST:
max_end= TIME_YEAR_ENDS_AT
else:
max_end= mall_traffic[idx+1][TIME]
return [max_start, max_end] | [
"jfv33@cornell.edu"
] | jfv33@cornell.edu |
e231ce7dd808dce03eb6e5f868bfbb3469308c4f | 6f06a519bc5a89464d95702fa27fa663ad4eb8f8 | /logfile_analiser2.py | 551da5be08045a53d14c08ab541b7c2466b976c1 | [] | no_license | chrismaurer/chrism | 64e71397bfffcd76aa5a12cc1394fad2c8939d76 | b0a30d5cbe4602a32ad494e2550219d633edf2db | refs/heads/master | 2021-06-04T07:53:15.212812 | 2021-01-19T13:23:22 | 2021-01-19T13:23:22 | 112,128,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,464 | py | #~TestCaseName: logfile_analiser
#~TestCaseSummary: This program reads logfiles from C:\logs and returns any unexpected messages
'''.'''
__author__ = 'Chris Maurer (chris.maurer@tradingtechnologies.com)'
__version__ = '1.1'
import logging
from optparse import OptionParser
log = logging.getLogger(__name__)
class logfile_analiser():
def optmenu(self):
parser = OptionParser()
parser.add_option('-f', '--file', dest='filename',
help='logfile to be read', metavar='filename')
optmenu, args = parser.parse_args()
return optmenu.filename
def parseLogfile(self):
OrderBookIDs = ['0',]
logfile = self.optmenu()
total_count = 8
for OrderBookID in OrderBookIDs:
print '-'*40, '1.1 A, Section 1: Ref Data (Next Day Effective Series)', '-'*40
inputFile = open(logfile, 'rb')
found = False
count = 0
in_file = False
for line in inputFile.readlines():
if found:
if count == total_count or 'DERIVATIVES_' in line:
found = False
if not found:
if 'Contract Created' in line or 'DERIVATIVES_NON_SOM_SERIES_DEFINITIONS' in line:
if '[id:%s]' % OrderBookID in line and 'HSIQ4' in line:
count = 0
found = True
if found:
if count < total_count and '[id:%s]' % OrderBookID in line:
print line
in_file = True
count += 1
if not in_file:
print 'OrderBookID %s Not Found!' % OrderBookID
inputFile.close()
def parseLogfile2(self):
logfile = self.optmenu()
total_count = 4
print '.'*40, '1.6 C, Test Case 8: Market Alert', '.'*40
inputFile = open(logfile, 'rb')
in_file = False
found = False
count = 0
for line in inputFile.readlines():
if 'marketalert' in line and '[SESSION 5 - PART I]' in line:
found = True
in_file = True
if 'marketalert' in line:
print line
count += 1
if not in_file:
print 'Market Alert Messages Not Found!'
inputFile.close()
def parseLogfile3(self):
OrderBookIDs = ['6358831714613149778',
'6358831714613154242',
'6358831714613158706',
'6358831714613137822',
'6358831714613142286',
'6358831714613144471',
'6358831714613148935',
'6358831714613153399',
'6358831714613157863',
'6358831714613140535',
'6358831714613147184',
'6358831714613151648',
'6358831714613156112',
'6358831714613160576',
'6358831714613137940',
'6358831714613141041',
'6358831714613142404',
'6358831714613144589',
'6358831714613147690',
'6358831714613149053',
'6358831714613152154',
'6358831714613153517',
'6358831714613156618',
'6358831714613157981',
'6358831714613161082',
'6358831714613137978',
'6358831714613142442',
'6358831714613144627',
'6358831714613149091',
'6358831714613153555',
'6358831714613158019',
'6358831714613141240',
'6358831714613147889',
'6358831714613152353',
'6358831714613156817',
'6358831714613139754',
'6358831714613141739',
'6358831714613144218',
'6358831714613146403',
'6358831714613148388',
'6358831714613150867',
'6358831714613152852',
'6358831714613155331',
'6358831714613157316',
'6358831714613159795',
'6358831714613140766',
'6358831714613147415',
'6358831714613151879',
'6358831714613156343',
'6358831714613160807',
'6358831714613137906',
'6358831714613142370',
'6358831714613144555',
'6358831714613149019',
'6358831714613153483',
'6358831714613157947',
'6358831714613139644',
'6358831714613144108',
'6358831714613146293',
'6358831714613150757',
'6358831714613155221',
'6358831714613159685',
'6358831714613140172',
'6358831714613146821',
'6358831714613151285',
'6358831714613155749',
'6358831714613160213',
'6358831714613140373',
'6358831714613147022',
'6358831714613151486',
'6358831714613155950',
'6358831714613160414',
'6358831714613141583',
'6358831714613148232',
'6358831714613152696',
'6358831714613157160',
'6358831714613139252',
'6358831714613143716',
'6358831714613145901',
'6358831714613150365',
'6358831714613154829',
'6358831714613159293']
logfile = self.optmenu()
for OrderBookID in OrderBookIDs:
print '^'*40, '1.6 C, Test Case 11: Agg / Full Order Book', '^'*40
inputFile = open(logfile, 'r')
in_file = False
for line in inputFile.readlines():
if '[side:' in line and '[oid:%s]' % OrderBookID in line:
print line
in_file = True
if not in_file:
print 'OrderBookID %s Not Found!' % OrderBookID
inputFile.close()
def parseLogfile3a(self):
# OrderBookIDs = ['6358831714613161143',
# '6358831714613156886',
# '6358831714613156679',
# '6358831714613152422',
# '6358831714613152215',
# '6358831714613147958',
# '6358831714613147751',
# '6358831714613141309',
# '6358831714613141102',
# '6358831714613157082',
# '6358831714613152618',
# '6358831714613148154',
# '6358831714613141505',
# '6358831714613160725',
# '6358831714613156261',
# '6358831714613151797',
# '6358831714613147333',
# '6358831714613140684',
# '6358831714613159195',
# '6358831714613154731',
# '6358831714613150267',
# '6358831714613145803',
# '6358831714613143618',
# '6358831714613139154',
# '6358831714613160675',
# '6358831714613156211',
# '6358831714613151747',
# '6358831714613147283',
# '6358831714613140634',
# '6358831714613160983',
# '6358831714613156519',
# '6358831714613152055',
# '6358831714613147591',
# '6358831714613140942',
# '6358831714613156994',
# '6358831714613152530',
# '6358831714613148066',
# '6358831714613141417',
# '6358831714613158583',
# '6358831714613154119',
# '6358831714613149655',
# '6358831714613145191',
# '6358831714613143006',
# '6358831714613138542',
# '6358831714613158779',
# '6358831714613154315',
# '6358831714613149851',
# '6358831714613145387',
# '6358831714613143202',
# '6358831714613138738',
# '6358831714613160382',
# '6358831714613155918',
# '6358831714613151454',
# '6358831714613146990',
# '6358831714613140341',
# '6358831714613157377',
# '6358831714613152913',
# '6358831714613148449',
# '6358831714613141800',
# '6358831714613150693',
# '6358831714613155157',
# '6358831714613159621',
# '6358831714613140453',
# '6358831714613147102',
# '6358831714613151566',
# '6358831714613156030',
# '6358831714613160494']
OrderBookIDs = ['6358831714613150693']
logfile = self.optmenu()
for OrderBookID in OrderBookIDs:
print '^'*40, '1.6 C, Test Case 11: Agg / Full Order Book - SOM (p.375)', '^'*40
inputFile = open(logfile, 'r')
in_file = False
for line in inputFile.readlines():
if 'Add Order [id:8258864] [oid:%s]' % OrderBookID in line:
print line
in_file = True
if not in_file:
print 'OrderBookID %s Not Found!' % OrderBookID
inputFile.close()
def parseLogfile4(self):
OrderBookIDs = ['63588064559107415',]
logfile = self.optmenu()
for OrderBookID in OrderBookIDs:
print '^'*40, '1.6 C, Test Case 12: Trade and Trade Amend (p.377)', '^'*40
inputFile = open(logfile, 'rb')
found = False
in_file = False
for line in inputFile.readlines():
if not found:
if '[tid:%s]' % OrderBookID in line:
found = True
in_file = True
if found:
if '[tid:' in line and '[tid:%s]' % OrderBookID not in line:
found = False
if '[tid:' not in line and 'Deal Type' not in line:
found = False
if ('Deal Type' in line or '[tid:%s]' % OrderBookID in line):
print line
if not in_file:
print 'Trade ID %s Not Found!' % OrderBookID
inputFile.close()
def parseLogfile4a(self):
OrderBookIDs = ['63588060264139326',]
logfile = self.optmenu()
for OrderBookID in OrderBookIDs:
print '^'*40, '1.6 C, Test Case 12: Trade and Trade Amend - SOM (p.381)', '^'*40
inputFile = open(logfile, 'rb')
found = False
in_file = False
for line in inputFile.readlines():
if not found:
if '[tid:%s]' % OrderBookID in line:
found = True
in_file = True
if found:
if '[tid:' in line and '[tid:%s]' % OrderBookID not in line:
found = False
if '[tid:' not in line and 'Deal Type' not in line:
found = False
if ('Deal Type' in line or '[tid:%s]' % OrderBookID in line):
print line
if not in_file:
print 'Trade ID %s Not Found!' % OrderBookID
inputFile.close()
def parseLogfile5(self):
# OrderBookIDs = ['5181346', '656373', '331181', '19531733', '55185314', '56823714', '135074', '5836706', '855071', '16060718']
OrderBookIDs = ['5245703',]
logfile = self.optmenu()
total_count = 10
for OrderBookID in OrderBookIDs:
print '-'*40, '1.6 D, Test Case 1: Ref Data', '-'*40
inputFile = open(logfile, 'r')
found = False
count = 10
in_file = False
for line in inputFile.readlines():
if 'msgType' in line and '%s]' % OrderBookID in line:
print line
in_file = True
count += 1
inputFile.close()
def parseLogfile6(self):
OrderBookIDs = ['21956958',]
logfile = self.optmenu()
for OrderBookID in OrderBookIDs:
print '-'*40, '1.6 D, Test Case 7: Series Status Data', '-'*40
inputFile = open(logfile, 'r')
in_file = False
for line in inputFile.readlines():
if 'Series Status Received [obId:%s]' % OrderBookID in line:
print line
in_file = True
if not in_file:
print 'OrderBookID %s Not Found!' % OrderBookID
inputFile.close()
def parseLogfile7(self):
logfile = self.optmenu()
print '.'*40, '1.6 D, Test Case 7: Market Alert', '.'*40
inputFile = open(logfile, 'rb')
in_file = False
found = False
for line in inputFile.readlines():
if not found:
if 'marketalert' in line and '[SESSION 5 - PART II]' in line:
found = True
in_file = True
if found:
if 'Market Alert' in line:
print line
else:
found = False
if not in_file:
print 'Market Alert Messages Not Found!'
inputFile.close()
def parseLogfile8(self):
OrderBookIDs = ['6360365812506719900',]
logfile = self.optmenu()
for OrderBookID in OrderBookIDs:
print '-'*40, '1.6 D, Test Case 7: Order Book Data', '-'*40
inputFile = open(logfile, 'r')
in_file = False
for line in inputFile.readlines():
if 'Add Order [id:4589918] [oid:%s]' % OrderBookID in line:
print line
in_file = True
if not in_file:
print 'OrderBookID %s Not Found!' % OrderBookID
inputFile.close()
def parseLogfile9(self):
OrderBookIDs = ['6361949929589480942',]
logfile = self.optmenu()
for OrderBookID in OrderBookIDs:
print '^'*40, '1.7 A, TC 2: Special Trading Arrangement - SOM (p.404)', '^'*40
inputFile = open(logfile, 'r')
in_file = False
for line in inputFile.readlines():
if 'Add Order [id:4589918] [oid:%s]' % OrderBookID in line:
print line
in_file = True
if not in_file:
print 'OrderBookID %s Not Found!' % OrderBookID
inputFile.close()
def parseLogfile10(self):
OrderBookIDs = ['6361553830525603794', '6361553830525603949']
logfile = self.optmenu()
for OrderBookID in OrderBookIDs:
print '^'*40, '1.7 B, TC 2: Special Trading Arrangement - SOM (p.408)', '^'*40
inputFile = open(logfile, 'r')
in_file = False
for line in inputFile.readlines():
if 'Add Order [id:1051515] [oid:%s]' % OrderBookID in line:
print line
in_file = True
if not in_file:
print 'OrderBookID %s Not Found!' % OrderBookID
inputFile.close()
logRead = logfile_analiser()
logRead.optmenu()
# logRead.parseLogfile()
# logRead.parseLogfile2()
# logRead.parseLogfile3()
logRead.parseLogfile3a()
logRead.parseLogfile4()
logRead.parseLogfile4a()
# logRead.parseLogfile5()
# logRead.parseLogfile6()
# logRead.parseLogfile7()
# logRead.parseLogfile8()
# logRead.parseLogfile9()
# logRead.parseLogfile10() | [
"chris.maurer@tradingtechnologies.com"
] | chris.maurer@tradingtechnologies.com |
48e110407f51f654dff1452b5eb1425e8cd86c75 | cc310e5586d7f7b6824802d290ba15e72832b76e | /ssil_sso_ms/global_function.py | 97e7fd35cd07a01843dedf068f4176bf2aee5601 | [] | no_license | abhisek11/my_django_boiler | 9bccc1d57c8bab83f54f6083919531a7b6b97ff6 | af36011a86376291af01a1c3a569f999bed4cb0d | refs/heads/master | 2022-12-13T19:43:26.500510 | 2020-03-02T07:03:13 | 2020-03-02T07:03:13 | 244,302,032 | 0 | 0 | null | 2022-12-08T01:51:32 | 2020-03-02T06:57:59 | Python | UTF-8 | Python | false | false | 3,086 | py |
from django.shortcuts import render
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from hrms.models import *
from hrms.serializers import *
from pagination import CSLimitOffestpagination,CSPageNumberPagination
from rest_framework.views import APIView
from django.conf import settings
from rest_framework import mixins
from rest_framework import filters
from datetime import datetime,timedelta
import collections
from rest_framework.parsers import FileUploadParser
from django_filters.rest_framework import DjangoFilterBackend
from custom_decorator import *
import os
from django.http import JsonResponse
from datetime import datetime
from decimal import Decimal
import pandas as pd
import xlrd
import numpy as np
from django.db.models import Q
from custom_exception_message import *
from decimal import *
import math
from django.contrib.auth.models import *
from django.db.models import F
from django.db.models import Count
from core.models import *
from pms.models import *
import re
def userdetails(user):
# print(type(user))
if isinstance(user,(int)):
name = User.objects.filter(id =user)
for i in name:
# print("i",i)
f_name_l_name = i.first_name +" "+ i.last_name
# print("f_name_l_name",f_name_l_name)
elif isinstance(user,(str)):
# print(user ,"str")
name = User.objects.filter(username=user)
for i in name:
# print("i",i)
f_name_l_name = i.first_name +" "+ i.last_name
# print("f_name_l_name",f_name_l_name)
else:
f_name_l_name = None
return f_name_l_name
def designation(designation):
if isinstance(designation,(str)):
desg_data = TCoreUserDetail.objects.filter(cu_user__username =designation)
if desg_data:
for desg in desg_data:
return desg.designation.cod_name
else:
return None
elif isinstance(designation,(int)):
desg_data = TCoreUserDetail.objects.filter(cu_user =designation)
if desg_data:
for desg in desg_data:
return desg.designation.cod_name
else:
return None
def department(department):
if isinstance(department,(str)):
desg_data = TCoreUserDetail.objects.filter(cu_user__username =department)
if desg_data:
for desg in desg_data:
return desg.department.cd_name
else:
return None
elif isinstance(department,(int)):
desg_data = TCoreUserDetail.objects.filter(cu_user =department)
if desg_data:
for desg in desg_data:
return desg.department.cd_name
else:
return None
def getHostWithPort(request):
protocol = 'https://' if request.is_secure() else 'http://'
url = protocol+request.get_host()+'/'
#print('url',url)
return url
def raw_query_extract(query):
return query.query | [
"abhishek.singh@shyamfuture.com"
] | abhishek.singh@shyamfuture.com |
2aa9aa7a5e03efbfd148d48e3641f8958d935c5c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_117/682.py | fdb052b6206ab635aa8befdf177223b0490ce73e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from numpy import empty, array, reshape, zeros
def read_pattern(m,n):
ar = array(map(int, sum([sys.stdin.readline().strip().split(' ') for a in range(m)],[])))
return reshape(ar, (m,n))
def solve(n):
m, n = map(int, sys.stdin.readline().split(' '))
pattern = read_pattern(m,n)
lawn = zeros((m,n), dtype=int)
lawn.fill(100)
# lines
for j in range(m):
if pattern[j,:].max() <= lawn[j,:].min():
lawn[j,:] = lawn[j,:].clip(0, pattern[j,:].max())
for j in range(n):
if pattern[:,j].max() <= lawn[:,j].min():
lawn[:,j] = lawn[:,j].clip(0, pattern[:,j].max())
if (pattern == lawn).all():
return True
return False
if __name__ == "__main__":
N = int(sys.stdin.readline())
for n in range(N):
if solve(n):
print "Case #{n}: YES".format(n=n+1)
else:
print "Case #{n}: NO".format(n=n+1)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
9a6cf0b3eef8453bda85efcb047570913328df11 | fc3f784c8d00f419b11cbde660fe68a91fb080ca | /algoritm/20하반기 코딩테스트/.카카오기출/쿠키구입.py | 7aec711f931dadca16ca4ad6e2314022f0adfae9 | [] | no_license | choo0618/TIL | 09f09c89c8141ba75bf92657ac39978913703637 | 70437a58015aecee8f3d86e6bfd0aa8dc11b5447 | refs/heads/master | 2021-06-25T07:01:34.246642 | 2020-12-21T04:57:13 | 2020-12-21T04:57:13 | 163,782,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | def solution(cookie):
Len=len(cookie)
answer=0
Sum=sum(cookie)
def Chk(A,B):
if B%2:return 0
for m in range(a,b+1):
A+=cookie[m]
B-=cookie[m]
if A==B:return A
elif A>B:return 0
for a in range(Len-1):
tmp=Sum
for b in range(Len-1,a,-1):
answer=max(answer,Chk(0,tmp))
tmp-=cookie[b]
if tmp<2*answer:break
Sum-=cookie[a]
if Sum<answer*2:return answer
return answer
solution([1, 1, 2, 3]) | [
"choo0618@naver.com"
] | choo0618@naver.com |
3661e9be4ba4631580df807c9d3591b2478fc8ea | b34c2e2ccb3dcda09bab17e3082627c8401cc67b | /bank/api/bank_api.py | a60dab611be6a0126475132c99c93a73ee80bd38 | [] | no_license | udwivedi394/django_api_app | 7cc80c68064e9349bd0ca21b1f794be21ec78dfc | bcc918a3dc6263017e54a3ea34086ed51aa7e7cb | refs/heads/master | 2020-05-07T21:45:02.960050 | 2019-04-14T17:09:37 | 2019-04-14T17:09:37 | 180,916,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | import json
from django.db import transaction
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from rest_framework.exceptions import APIException
from rest_framework.views import APIView
from bank.api.bank_api_processor import BranchDetails, BranchFinderInCity
from bank.api.serializers import BranchIFSCInputSerializer, BranchFinderInputSerializer
@method_decorator(transaction.non_atomic_requests, name='dispatch')
class BranchDetailer(APIView):
def get(self, request):
try:
response = self._process_request(request)
response_json = json.dumps(response)
except Exception as e:
raise APIException(str(e))
return HttpResponse(response_json, status=200)
def _process_request(self, request):
serializer = BranchIFSCInputSerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
ifsc = serializer.validated_data['ifsc']
return BranchDetails().execute(ifsc=ifsc)
@method_decorator(transaction.non_atomic_requests, name='dispatch')
class BranchFinder(APIView):
def get(self, request):
try:
response = self._process_request(request)
response_json = json.dumps(response)
except Exception as e:
raise APIException(str(e))
return HttpResponse(response_json, status=200)
def _process_request(self, request):
serializer = BranchFinderInputSerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
name = serializer.validated_data['name']
city = serializer.validated_data['city']
return BranchFinderInCity().execute(name=name, city=city)
| [
"utkarshdwivedi394@gmail.com"
] | utkarshdwivedi394@gmail.com |
96247780730626ed382d3dfaee4df66524f0fc30 | 59080f5116b9e8f625b5cc849eb14b7ff9d19f3d | /124 rpc/msg_pb2.py | 23fe08aa72bfcbf531911d26beccb3cd695daaf0 | [] | no_license | yyq1609/Python_road | eda2bcd946b480a05ec31cdcb65e35b3f3e739d1 | e9ba2f47c8dd2d00a6e5ddff03c546152efd8f49 | refs/heads/master | 2020-09-11T11:51:35.903284 | 2019-11-11T13:02:21 | 2019-11-11T13:02:21 | 222,054,462 | 1 | 0 | null | 2019-11-16T05:58:13 | 2019-11-16T05:58:12 | null | UTF-8 | Python | false | true | 3,570 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: msg.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='msg.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\tmsg.proto\"\x1a\n\nMsgRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1a\n\x0bMsgResponse\x12\x0b\n\x03msg\x18\x01 \x01(\t23\n\nMsgService\x12%\n\x06GetMsg\x12\x0b.MsgRequest\x1a\x0c.MsgResponse\"\x00\x62\x06proto3')
)
_MSGREQUEST = _descriptor.Descriptor(
name='MsgRequest',
full_name='MsgRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='MsgRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=13,
serialized_end=39,
)
_MSGRESPONSE = _descriptor.Descriptor(
name='MsgResponse',
full_name='MsgResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='msg', full_name='MsgResponse.msg', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=41,
serialized_end=67,
)
DESCRIPTOR.message_types_by_name['MsgRequest'] = _MSGREQUEST
DESCRIPTOR.message_types_by_name['MsgResponse'] = _MSGRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MsgRequest = _reflection.GeneratedProtocolMessageType('MsgRequest', (_message.Message,), {
'DESCRIPTOR' : _MSGREQUEST,
'__module__' : 'msg_pb2'
# @@protoc_insertion_point(class_scope:MsgRequest)
})
_sym_db.RegisterMessage(MsgRequest)
MsgResponse = _reflection.GeneratedProtocolMessageType('MsgResponse', (_message.Message,), {
'DESCRIPTOR' : _MSGRESPONSE,
'__module__' : 'msg_pb2'
# @@protoc_insertion_point(class_scope:MsgResponse)
})
_sym_db.RegisterMessage(MsgResponse)
_MSGSERVICE = _descriptor.ServiceDescriptor(
name='MsgService',
full_name='MsgService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=69,
serialized_end=120,
methods=[
_descriptor.MethodDescriptor(
name='GetMsg',
full_name='MsgService.GetMsg',
index=0,
containing_service=None,
input_type=_MSGREQUEST,
output_type=_MSGRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_MSGSERVICE)
DESCRIPTOR.services_by_name['MsgService'] = _MSGSERVICE
# @@protoc_insertion_point(module_scope)
| [
"958976577@qq.com"
] | 958976577@qq.com |
8caada00d03e7730fef604f1bac57dc6925a29f7 | f51c6d0cebb27c377ce9830deec4b727b9b2ee90 | /AI/BOOK - Problem Solving with Algorithms and Data Structures - EXAMPLES/Listings/listing_1_1.py | 401dd53d09553f35e9b4e840ee00ed270ecae51c | [] | no_license | dbbudd/Python-Experiments | 1c3c1322583aaaf2016a2f2f3061e6d034c5d1c8 | b6d294bf11a5c92b8578d16aa2f63cc27fc47b07 | refs/heads/master | 2020-04-17T02:21:36.693593 | 2019-01-17T00:18:34 | 2019-01-17T00:18:34 | 166,130,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | def squareroot(n):
root = n/2 #initial guess will be 1/2 of n
for k in range(20):
root = (1/2)*(root + (n / root))
return root
| [
"dbbudd@gmail.com"
] | dbbudd@gmail.com |
addaa8d9c141661d40806d61bb19fb4cc977f2ec | 01fa2aca31eb73a559d192fd29e44350f26a13a9 | /HAX/18.CocoJoe/script.module.lambdascrapers/lib/lambdascrapers/sources_overeasy/en_de/iwantmyshow.py | b4f83e473feaf904ab832be9fa246515efd676a8 | [
"Beerware"
] | permissive | RandomIntermition/k4y108837s | b4beedeff375645bd4fa9ad348631a9a9f3640b6 | e9115aad49795dfe30a96c278cedaf089abcc11d | refs/heads/master | 2022-05-01T18:45:57.298903 | 2022-03-30T03:41:08 | 2022-03-30T03:41:08 | 109,356,425 | 1 | 0 | null | 2019-11-08T02:20:47 | 2017-11-03T05:36:48 | Python | UTF-8 | Python | false | false | 7,914 | py | # -*- coding: utf-8 -*-
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import urllib
import urlparse
import xbmcgui
from resources.lib.modules import cleantitle, client, debrid, log_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['myvideolinks.net', 'iwantmyshow.tk']
self.base_link = 'http://myvideolinks.net'
self.search_link = 'def/?s=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except Exception:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except Exception:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url is None:
return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except Exception:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (
data['tvshowtitle'],
int(data['season']),
int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
data['title'],
data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = urlparse.urljoin(self.base_link, self.search_link)
url = url % urllib.quote_plus(query)
r = client.request(url)
r = client.parseDOM(r, 'h2')
z = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
r = [
(i[0],
i[1],
re.sub('(\.|\(|\[|\s)(\d{4}|3D)(\.|\)|\]|\s|)(.+|)', '', i[1]),
re.findall('[\.|\(|\[|\s](\d{4}|)([\.|\)|\]|\s|].+)', i[1])) for i in z]
r = [(i[0], i[1], i[2], i[3][0][0], i[3][0][1]) for i in r if i[3]]
r = [(i[0], i[1], i[2], i[3], re.split('\.|\(|\)|\[|\]|\s|\-', i[4])) for i in r]
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[2]) and data['year'] == i[3]]
r = [i for i in r if not any(x in i[4]
for x in ['HDCAM', 'CAM', 'DVDR', 'DVDRip', 'DVDSCR', 'HDTS', 'TS', '3D'])]
r = [i for i in r if '1080p' in i[4]][:1] + [i for i in r if '720p' in i[4]][:1]
if 'tvshowtitle' in data:
posts = [(i[1], i[0]) for i in z]
else:
posts = [(i[1], i[0]) for i in z]
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = post[0]
u = client.request(post[1])
u = re.findall('\'(http.+?)\'', u) + re.findall('\"(http.+?)\"', u)
u = [i for i in u if '/embed/' not in i]
u = [i for i in u if 'youtube' not in i]
items += [(t, i) for i in u]
except Exception:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title):
raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr:
raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt):
raise Exception()
if any(i in ['extras'] for i in fmt):
raise Exception()
if '1080p' in fmt:
quality = '1080p'
elif '720p' in fmt:
quality = 'HD'
else:
quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt):
quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt):
quality = 'CAM'
info = []
if '3d' in fmt:
info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except Exception:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt):
info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']):
raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if host not in hostDict:
raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en',
'url': url, 'info': info, 'direct': False, 'debridonly': debrid.status()})
except Exception:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check:
sources = check
return sources
except Exception:
return sources
def resolve(self, url):
return url
| [
"github+github@github.github"
] | github+github@github.github |
6e3fc1ac707dd1993a33342c09e738411880714c | 7ab16fa64eedde37cefabdbb8b7e176b28590c36 | /controller/connector/docs/simple/reverse_tcp_agent.py | c4d562d1dd7a744c3adb1120a436effb3943dd28 | [] | no_license | how2how/PlayGround | 240b5ab512dc1992f551f4af9289362b5018dba3 | cc10ee74ee3ee86b8b769cbf6237745bf1614adb | refs/heads/master | 2020-03-07T00:49:02.530872 | 2018-05-08T15:09:13 | 2018-05-08T15:09:13 | 127,166,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | from server.core.orchestration import SimpleOrchestrator
orch_obj = SimpleOrchestrator(
"Our passphrase can be anything! &^&%{}",
out_length = 20,
in_length = 20,
)
import socket
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("127.0.0.1",1234))
def send( data ) :
s.sendall( data )
def recv() : # Return for every 20 bytes
return s.recv(20) # This will automatically block as socket.recv() is a blocking method
from server.core.handlers import BaseHandler
class MyAgent_Handler( BaseHandler ) :
""" This class tries hard to be self-explanatory """
def __init__(self, recv, send, orch, **kw) :
super( MyAgent_Handler, self ).__init__( recv, send, orch, **kw )
print ( "[!] Agent with Orchestrator ID: '{}' started!".format( orch.getIdentity() ) )
print()
def onMessage( self, stream, message ) :
print ( "[+] Message arrived!" )
print ( "{} -> {}".format(stream, message) )
print ("[>] Sending the received message in reverse order!")
self.preferred_send( message[::-1] ) # Will respond with the reverse of what was received!
def onChunk( self, stream, message ) :
print ( "[+] Chunk arrived for stream '{}' !".format(stream) )
if message :
print ("[*] Message assembled. onMessage() will be called next!")
print()
def onNotRecognised(self) :
print ("[-] Got some Gibberish")
print ("Initialized the Orchestrator with wrong passphrase?")
print()
handler_obj = MyAgent_Handler(recv, send, orch_obj)
from time import sleep
while True : sleep(10)
| [
"test@test.com"
] | test@test.com |
93978618a6ba0603136f2dbec1dfdfb4e5fe055a | 1e297340a3c85a29bbad1b27b076d8ad50086e7a | /algorithm/BinarySearch/BOJ_10815(binary_search).py | 47e0f364a9bb5bf3c8855715e7b29fec31491ba4 | [] | no_license | onewns/TIL | a5ee524604feb77d0d982ead2ea0265fa78c9349 | fa53ede5194979ccc54eeae882399799afe08bcf | refs/heads/master | 2023-08-29T02:50:19.982012 | 2021-10-11T12:24:59 | 2021-10-11T12:24:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | import sys
sys.stdin = open('../input.txt', 'r')
def binary_search(num, start, end):
left, right = start, end - 1
mid = (left + right) // 2
if left > right:
return 0
if num == cards[mid]:
return 1
elif num > cards[mid]:
return binary_search(num, mid + 1, end)
else:
return binary_search(num, left, mid)
n = int(input())
cards = sorted(list(map(int, input().split())))
m = int(input())
check_list = list(map(int, input().split()))
ans = []
for cn in check_list:
ans.append(binary_search(cn, 0, n))
print(*ans)
| [
"wonjun9090@naver.com"
] | wonjun9090@naver.com |
a55a94207d6eebdfea63144d689f0a8ed1c3a00b | ce8a7ed7afb9a11a22df905f55749c1e06a98b63 | /amazon/python/17. Letter Combinations of a Phone Number.py | e72e9ed254f36f029c3b55ccb3e87fde05050a16 | [] | no_license | suruisunstat/leetcode_practice | 9da68247263d9b9bec98ab7e0f78bfe58e9dc830 | 1c0ff616ee3753ac9d4571301313f7a6b8ba6f37 | refs/heads/master | 2022-11-09T05:05:55.946361 | 2022-11-05T08:24:41 | 2022-11-05T08:24:41 | 147,136,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | class Solution(object):
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
dict_num = {'2':['a','b','c'],'3':['d','e','f'],'4':['g','h','i'],'5':['j','k','l'],'6':['m','n','o'],'7':['p','q','r','s'], '8':['t','u','v'], '9':['w','x','y','z']}
def backtrack(combination, next_digits):
if len(next_digits) == 0:
output.append(combination)
else:
for letter in dict_num[next_digits[0]]:
backtrack(combination + letter, next_digits[1:])
output = []
if digits:
backtrack("",digits)
return output
# Time: O(3 ^ N * 4 ^ M)
# Space: O(3 ^ N * 4 ^ M)
| [
"noreply@github.com"
] | suruisunstat.noreply@github.com |
09e30b7fd5c4de83950b8216c5b2fdaeae13dbbf | 17c280ade4159d4d8d5a48d16ba3989470eb3f46 | /16/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisWprime4500.py | 0c7712eb078cfa84197fb5056d2af33b0c4f59b9 | [] | no_license | chengchen1993/run2_ntuple | 798ff18489ff5185dadf3d1456a4462e1dbff429 | c16c2b203c05a3eb77c769f63a0bcdf8b583708d | refs/heads/master | 2021-06-25T18:27:08.534795 | 2021-03-15T06:08:01 | 2021-03-15T06:08:01 | 212,079,804 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,256 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'Wprime_4500_weight_v2'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName='Analysis'
config.JobType.sendExternalFolder=True# = 'Analysis'
config.JobType.inputFiles=['L1PrefiringMaps_new.root','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles=['L1PrefiringMaps_new.root','PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WprimeToWZToWlepZhad_narrow_M-4500_13TeV-madgraph/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'Automatic'
config.Data.unitsPerJob =180#10
config.Data.totalUnits = -1
config.Data.publication = False
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'Wprime_4500_weight_v2'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"c.chen@cern.ch"
] | c.chen@cern.ch |
9de5d30fd5005247c0ea0b488e83799dad76357a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02546/s866918954.py | 5473133d05a55e007d9ae5c6bea35eb7c3726a25 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | line = input()
if (line[-1]) == "s":
print(line + "es")
else:
print(line + "s") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3b3a117221d6b53dde8de16074a991941662ffdb | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r8/Gen/DecFiles/options/11146113.py | 693fb033baff53bcb422a63ef8bda36e903bc01f | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/11146113.py generated: Fri, 27 Mar 2015 15:47:59
#
# Event Type: 11146113
#
# ASCII decay Descriptor: [B0 -> (J/psi(1S) -> mu+ mu-) (phi(1020) -> K+ K-) (K_S0 -> pi+ pi-)]cc
#
from Gaudi.Configuration import *
importOptions( "$DECFILESROOT/options/KKmumuInAcc.py" )
from Configurables import Generation
Generation().EventType = 11146113
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_JpsiphiKs,KKmumupipi=KKmumuInAcc.dec"
Generation().SignalRepeatedHadronization.CutTool = "ListOfDaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
a15867a35dd920ebf76e728cf713b109c778aac1 | a11f098121fc5446f8dc7a9555034c51512a4e26 | /app03.py | 5e9a5b21f9738b072a606de1943a4357df144e6c | [] | no_license | anjoe/flask-study | 30b7f2adb1265790dee246dd97fee51f496b046b | 2b5639c9ef4ae77672ff8f4df1c5e1164af6b962 | refs/heads/master | 2020-08-06T21:03:42.926786 | 2019-10-06T15:28:58 | 2019-10-06T15:28:58 | 213,153,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from flask import Blueprint
app03=Blueprint('app03',__name__)
@app03.route('/t3/')
def show():
return 'app03.hello'
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
0d61e717f8f7e75de0fcb1bd47dc911cd7bd82c8 | 137ba8a70dfcf94dfe7aeef1599341ecc06ca48f | /project_ex/10_lotto.py | a2e74e17dec7fa2d55a90d94167acf5e665eabba | [] | no_license | smtamh/oop_python_ex | e1d3a16ade54717d6cdf1759b6eba7b27cfc974e | bd58ee3bf13dad3de989d5fd92e503d5ff949dd9 | refs/heads/master | 2020-09-22T08:35:19.847656 | 2019-11-13T02:47:06 | 2019-11-13T02:47:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | """
Created on 2014. 8. 10.
@author: uyu423@gmail.com
http://luckyyowu.tistory.com/209
"""
import random
# 0. 객체지향을 통해 크게 3가지 형태의 클래스가 나온다. Data, B.O, U.I
# 1. 데이터를 클래스로 만들 수 있음(Data)
# 2. 프로그램이 해야하는 기능을 모아서 클래스로 만들 수 있음(Business Object(B.O). B.O는 입출력을 처리하지 않음(중요)
# 3. 실제 입출력을 담당하는 UI 클래스도 있음(UI)
# 4. 모든 프로그램은 CRUD(Create, Read, Update, Delete)가 기본
class LottoBall: # Data
def __init__(self, num):
self.num = num
class LottoMachine: # B.O
def __init__(self):
self.ballList = []
for i in range(1, 46):
self.ballList.append(LottoBall(i))
def selectBalls(self):
random.shuffle(self.ballList)
return self.ballList[0:6]
class LottoUI: # U.I
def __init__(self):
self.machine = LottoMachine()
def playLotto(self):
input("로또를 뽑을까요?")
selectedBalls = self.machine.selectBalls()
for ball in selectedBalls:
print("%d" % (ball.num))
# main
ui = LottoUI()
ui.playLotto()
| [
"kadragon@sasa.hs.kr"
] | kadragon@sasa.hs.kr |
a7f1d277305c8d83ed636dbd93ae990fb9151277 | 34652a47355a8dbe9200db229a1bbc62619de364 | /Maths/diff_eqns/electrogravRK45adaptive.py | c27cb2519beb5537ac886dfec50963acbdc8f86c | [] | no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,226 | py | from __future__ import division
from pylab import *
import os
#Evaluates the derivative functions
def derivs(time, last_values, properties, alpha):
particle_num = size(last_values, axis = 1)
spatial_dims = size(last_values, axis = 0)/2.
grad = zeros((spatial_dims * 2, particle_num))
for j in arange(particle_num):
#Calc position derivative
grad[0, j] = last_values[1, j]
grad[2, j] = last_values[3, j]
grad[4, j] = last_values[5, j]
#Calc velocity derivative
field_sum = calc_field(last_values, j, properties)
grad[1, j] = properties[1, j] / properties[0, j] * alpha * field_sum[0]
grad[3, j] = properties[1, j] / properties[0, j] * alpha * field_sum[1]
grad[5, j] = properties[1, j] / properties[0, j] * alpha * field_sum[2]
return grad
#Evaluate the summuation to calculate the field at one particle due to all the others
def calc_field(last_values, at_particle, properties):
particle_num = size(last_values, axis = 1)
spatial_dims = size(last_values, axis = 0)/2
#Calculate summation
field_sum = zeros(spatial_dims)
denominator = zeros(1)
for i in arange(particle_num):
if i != at_particle:
delx1 = last_values[0, at_particle] - last_values[0, i]
delx2 = last_values[2, at_particle] - last_values[2, i]
delx3 = last_values[4, at_particle] - last_values[4, i]
denominator = ((delx1**2 + delx2**2 + delx3**2)**(3./2.))
field_sum[0] = field_sum[0] + delx1 * properties[1, i] / denominator
field_sum[1] = field_sum[1] + delx2 * properties[1, i] / denominator
field_sum[2] = field_sum[2] + delx3 * properties[1, i] / denominator
return field_sum
#Energy calculator
def energy_calc(last_values, properties, alpha):
#Potential energy
pot = zeros(1)
for i in range(size(last_values, 1)):
for j in range(i+1, size(last_values, 1)):
delx1 = last_values[0, i] - last_values[0, j]
delx2 = last_values[2, i] - last_values[2, j]
delx3 = last_values[4, i] - last_values[4, j]
denominator = (delx1**2 + delx2**2 + delx3**2)**(1/2)
pot = pot + alpha * properties[1, i] * properties[1, j] / denominator
#Kinetic energy
kin = zeros(1)
for i in range(size(last_values, 1)):
speed = (last_values[1, i]**2 + last_values[3, i]**2 + last_values[5, i]**2)**(1/2)
kin = kin + 0.5 * properties[0, i] * speed**2
#Total energy
tot_energy = pot + kin
return [pot, kin, tot_energy]
#Coefficients used in the Runge-Kutta loop
def solver_coef():
a = zeros(7)
b = zeros((7, 7))
c = zeros(7)
cstar = zeros(7)
a[0], a[1], a[2], a[3], a[4], a[5], a[6] = 0, 1/5, 3/10, 4/5, 8/9, 1, 1
b[1, 0] = 1/5
b[2, 0], b[2, 1] = 3/40, 9/40
b[3, 0], b[3, 1], b[3, 2] = 44/45, -56/15, 32/9
b[4, 0], b[4, 1], b[4, 2], b[4, 3] = 19372/6561, -25360/2187, 64448/6561, -212/729
b[5, 0], b[5, 1], b[5, 2], b[5, 3], b[5, 4] = 9017/3168, -355/33, 46732/5247, 49/176, -5103/18656
b[6, 0], b[6, 1], b[6, 2], b[6, 3], b[6, 4], b[6, 5] = 35/384, 0, 500/1113, 125/192, -2187/6784, 11/84
c[0], c[1], c[2], c[3], c[4], c[5], c[6] = 5179/57600, 0, 7571/16695, 393/640, -92097/339200, 187/2100, 1/40
cstar[0], cstar[1], cstar[2], cstar[3], cstar[4], cstar[5], cstar[6] = 35/384, 0, 500/1113, 125/192, -2187/6784, 11/84, 0
return [a, b, c, cstar]
#The Runge-Kutta code
def rkstep(sol, time, h, k7):
k1 = k7
k2 = h * derivs(time + a[1] * h, sol + b[1, 0] * k1, properties, alpha)
k3 = h * derivs(time + a[2] * h, sol + b[2, 0] * k1 + b[2, 1] * k2, properties, alpha)
k4 = h * derivs(time + a[3] * h, sol + b[3, 0] * k1 + b[3, 1] * k2 + b[3, 2] * k3, properties, alpha)
k5 = h * derivs(time + a[4] * h, sol + b[4, 0] * k1 + b[4, 1] * k2 + b[4, 2] * k3 + b[4, 3] * k4, properties, alpha)
k6 = h * derivs(time + a[5] * h, sol + b[5, 0] * k1 + b[5, 1] * k2 + b[5, 2] * k3 + b[5, 3] * k4 + b[5, 4] * k5, properties, alpha)
k7 = h * derivs(time + a[6] * h, sol + b[6, 0] * k1 + b[6, 1] * k2 + b[6, 2] * k3 + b[6, 3] * k4 + b[6, 4] * k5 + b[6, 5] * k6, properties, alpha)
sol = sol + c[0] * k1 + c[1] * k2 + c[2] * k3 + c[3] * k4 + c[4] * k5 + c[5] * k6 + c[6] * k7
solstar = sol + cstar[0] * k1 + cstar[1] * k2 + cstar[2] * k3 + cstar[3] * k4 + cstar[4] * k5 + cstar[5] * k6 + cstar[6] * k7
return [sol, solstar, k7]
#Make the colour & size array for the particles
def plot_props(properties):
particle_colours = zeros((size(properties, 1), 4))
colour_strength = -1/(abs(properties[1,:]) + 2) + 1
particle_colours[:, 3] = colour_strength[:]
for idx in arange(size(properties, 1)):
#Make -ve charge blue, +ve charge red
if properties[1, idx] >= 0:
particle_colours[idx, 0] = 1
else:
particle_colours[idx, 2] = 1
particle_size = properties[0, :]**(2/3) * 4
return [particle_colours, particle_size]
#Calculate net momentum of the system
def momentum_calc(sol, properties):
px = 0.0
py = 0.0
pz = 0.0
for i in range(size(sol, 1)):
px = px + sol[1, i] * properties[0, i]
py = py + sol[3, i] * properties[0, i]
pz = pz + sol[5, i] * properties[0, i]
return [px, py, pz]
#Initial conditions
def initial_conditions(spatial_dims, particle_num, properties):
sol = zeros((spatial_dims * 2, particle_num))
v1bias = 0.1
v2bias = -0.70
#Patticle 0
sol[0, 0] = 0.0 #x1
sol[1, 0] = 0.0 + v1bias #v1
sol[2, 0] = 0.0 #x2
sol[3, 0] = 0.0 + v2bias #v2
sol[4, 0] = 0.0 #x3
sol[5, 0] = 0.0 #v3
properties[1, 0] = 50 #charge
properties[0, 0] = 50 #mass
#Particle 1
sol[0, 1] = 2.0 #x1
sol[1, 1] = 0.0 + v1bias #v1
sol[2, 1] = 0.0 #x2
sol[3, 1] = 6.0 + v2bias #v2
sol[4, 1] = 0.0 #x3
sol[5, 1] = 0.0 #v3
properties[1, 1] = 20 #charge
properties[0, 1] = 20 #mass
#Particle 2
sol[0, 2] = 1.7 #x1
sol[1, 2] = 0.0 + v1bias #v1
sol[2, 2] = 0.0 #x2
sol[3, 2] = -2.5 + v2bias #v2
sol[4, 2] = 0.0 #x3
sol[5, 2] = 0.0 #v3
properties[1, 2] = 2 #charge
properties[0, 2] = 2 #mass
#Particle 3
sol[0, 3] = -3.0 #x1
sol[1, 3] = 0.0 + v1bias #v1
sol[2, 3] = 0.0 #x2
sol[3, 3] = -4.5 + v2bias #v2
sol[4, 3] = 0.0 #x3
sol[5, 3] = 0.0 #v3
properties[1, 3] = 10 #charge
properties[0, 3] = 10 #mass
#Particle 4
sol[0, 4] = -2.7 #x1
sol[1, 4] = 0.0 + v1bias #v1
sol[2, 4] = 0.0 #x2
sol[3, 4] = 0.5 + v2bias #v2
sol[4, 4] = 0.0 #x3
sol[5, 4] = 0.0 #v3
properties[1, 4] = 1 #charge
properties[0, 4] = 1 #mass
#Particle 5
sol[0, 5] = -1.0 #x1
sol[1, 5] = 0.0 + v1bias #v1
sol[2, 5] = 0.0 #x2
sol[3, 5] = -7.0 + v2bias #v2
sol[4, 5] = 0.0 #x3
sol[5, 5] = 0.0 #v3
properties[1, 5] = 2 #charge
properties[0, 5] = 2 #mass
#Particle 6
sol[0, 6] = 0.0 #x1
sol[1, 6] = -5.0 + v1bias #v1
sol[2, 6] = 2.5 #x2
sol[3, 6] = 0.0 + v2bias #v2
sol[4, 6] = 0.0 #x3
sol[5, 6] = 0.0 #v3
properties[1, 6] = 2 #charge
properties[0, 6] = 2 #mass
return sol
###############################################################################
#Start of script. The script calculates in 3 spatial dims, but only plots 2.
t_min = 0.0
t_max = 1.0
epsilon = 1.0e-1
vdivx_errscale = 1
safety = 0.90
particle_num = 7
spatial_dims = 3
slice_interval = 0.01
slices_per_plot = 1
total_slices = floor((t_max - t_min) / slice_interval) + 1
#sol and associated arrays store things as [x1,v1,x2,v2,x3,v3]
data = zeros((total_slices, spatial_dims * 2, particle_num))
time_array = zeros(total_slices)
#Initial conditions
#Properties[0, :] is mass
#properties[1, :] is charge
properties = ones((2, particle_num))
sol = initial_conditions(spatial_dims, particle_num, properties)
data[0, :, :] = sol[:, :]
#Calc net momentum and print
pnet = momentum_calc(sol, properties)
print ('Net momentum = ' + str(pnet))
#The field constant. -ve for gravity, +ve for electric charges
alpha = -1.
#System energy
energy = ones((3, total_slices))
energy[:, 0] = energy_calc(sol, properties, alpha)
#Get solver coefficients
[a, b, c, cstar] = solver_coef()
t = t_min
cnt = 0
cnt1 = 0
slice_num_temp = -1
#Allows you to scale the error for x and v differently
err_scale = ones((spatial_dims * 2, particle_num))
err_scale[1, :] = vdivx_errscale * err_scale[1, :]
err_scale[3, :] = vdivx_errscale * err_scale[3, :]
err_scale[5, :] = vdivx_errscale * err_scale[5, :]
#Dormand - Prince RK loop
h_try = 0.001
h = h_try
#Dormand - Prince has "first same as last" property.
k7 = h * derivs(t, sol, properties, alpha)
while t < t_max:
[soltemp, solstar, k7temp] = rkstep(sol, t, h, k7)
delta = soltemp - solstar
max_delta = epsilon * err_scale
err_prop = abs(delta) / abs(max_delta)
maxprop = err_prop.max()
maxprop_idx = err_prop.argmax()
if maxprop > 1:
#Decrease step size
htemp = safety * h * abs(max_delta.ravel()[maxprop_idx] / delta.ravel()[maxprop_idx])**0.2
h = max(0.1 * h, htemp)
else:
#Increase step size
htemp = safety * h * abs(max_delta.ravel()[maxprop_idx] / delta.ravel()[maxprop_idx])**0.25
h = min(10 * h, htemp, slice_interval)
#Update vals
sol = soltemp
k7 = k7temp
t = t + h
#Print update to the terminal every so often
slice_num = floor(t / slice_interval)
if cnt1 % 500 == 0:
print ('Slice:' + str(int(slice_num)) + ', t = ' + str(t) + ', h = ' +str(h))
cnt1 = cnt1 + 1
#Record data after defined time interval
if slice_num != slice_num_temp:
data[cnt, :, :] = sol[:, :]
energy[:, cnt] = energy_calc(sol, properties, alpha)
time_array[cnt] = t
cnt = cnt + 1
slice_num_temp = slice_num
#Plot energy
plot(time_array[:cnt], energy[0, :cnt], '--', time_array[:cnt], energy[1, :cnt], '.', time_array[:cnt], energy[2, :cnt])
xlabel('t')
ylabel('energy')
legend(('potential', 'kinetic', 'total'), loc = 'lower left')
title('Total energy of the system')
savefig('system_energy.png', dpi = 300)
show()
clf()
#Make colour array
[particle_colours, particle_size] = plot_props(properties)
#Plot pictures
destpath = 'Images/'
filename = '%03d'
filetype = '.png'
res_dpi = 150
for a in arange(cnt/slices_per_plot):
x_vals = data[a * slices_per_plot, 0, :cnt]
y_vals = data[a * slices_per_plot, 2, :cnt]
scatter(x_vals, y_vals, s = particle_size, c = particle_colours, edgecolors = particle_colours)
xlabel('x')
ylabel('y')
axis([-5.0, 5.0, -5.0, 5.0])
axes().set_aspect('equal')
title('Interaction of "charged" particles')
fullfilename = destpath + str(filename % a) + filetype
savefig(fullfilename, dpi = res_dpi)
clf()
#Make a movie
movie_name = ' gravo_movie.mp4'
savedPath = os.getcwd()
os.chdir(destpath)
movie_command = 'ffmpeg -qscale 1 -r 25 -i ' + filename + filetype + movie_name
os.system(movie_command)
os.chdir(savedPath)
| [
"bogdan.evanzo@gmail.com"
] | bogdan.evanzo@gmail.com |
e15fbbc01d14573d8ad538799b6a7f1880f85372 | d03a31d080267010c7bbd5ac9cbaa94cffa23292 | /tests/test_utils.py | 19edf3ba3488d535380466b4472b639ffe03e69d | [] | no_license | chmouel/python-nonobot | 0d654f4e608f77bb85d0adb16b9d3639a2586f0b | 4e07ec1a4ba755a6f7070f5778fe734a3180ad70 | refs/heads/master | 2021-01-01T18:02:30.832406 | 2014-12-05T10:48:14 | 2014-12-05T10:48:14 | 17,373,952 | 0 | 4 | null | 2014-03-15T09:04:21 | 2014-03-03T17:34:52 | Python | UTF-8 | Python | false | false | 1,231 | py | # -*- coding: utf-8 -*-
# Author: Chmouel Boudjnah <chmouel@chmouel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import urllib
import nonobot.utils as nutils
class UtilsTest(unittest.TestCase):
def test_clean_nick(self):
self.assertEqual(nutils.clean_nick("foo_____"),
'foo')
def test_clean_nick_nothing_on_empty(self):
self.assertIsNone(nutils.clean_nick(""))
def test_quoted(self):
self.assertEqual(nutils.clean_nick("foo***"),
urllib.quote("foo***"))
def test_clean_nick_with_space(self):
name = "foo bar"
self.assertEqual(nutils.clean_nick(name),
urllib.quote(name))
| [
"chmouel@chmouel.com"
] | chmouel@chmouel.com |
7aff18c1f4eb1588572df7d1e9b78d0096ea42c8 | 8dbe574f3b20308d79ef37643570d7dec15e67d9 | /cn.zero/py.ori.fmt/m1140.bin.py | d1675897ba84651c10bf4eed04b87afd190c32a7 | [] | no_license | xaeingking/ZeroAoVoiceScripts | 62526d004bd02e645970930ecd4b6053809092ab | 512c1fd544954a38c92fc097f5b0c006031ee87d | refs/heads/master | 2020-05-20T17:04:55.028776 | 2019-01-29T10:40:44 | 2019-01-29T10:40:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,701 | py | from ZeroScenarioHelper import *
def main():
CreateScenaFile(
"m1140.bin", # FileName
"m1140", # MapName
"m1140", # Location
0x006E, # MapIndex
"ed7304",
0x00080000, # Flags
("", "", "", "", "", ""), # include
0x00, # PlaceNameNumber
0x00, # PreInitFunctionIndex
b'\x00\xff\xff', # Unknown_51
# Information
[0, 0, -1000, 0, 0, 0, 34000, 262, 30, 45, 0, 360, 0, -28000, -30000, 0, 0, 1, 110, 0, 1, 0, 2],
)
BuildStringList((
"m1140", # 0
"钢铁完全体", # 1
"bm1040", # 2
"bm1040", # 3
"bm1040", # 4
))
ATBonus("ATBonus_24C", 100, 5, 1, 5, 1, 5, 1, 5, 5, 5, 5, 5, 5, 0, 0, 0)
ATBonus("ATBonus_23C", 100, 5, 1, 5, 1, 5, 1, 5, 5, 0, 0, 0, 1, 0, 0, 0)
Sepith("Sepith_851", 6, 6, 15, 9, 0, 0, 0)
Sepith("Sepith_859", 4, 4, 0, 0, 9, 9, 9)
MonsterBattlePostion("MonsterBattlePostion_29C", 8, 8, 180)
MonsterBattlePostion("MonsterBattlePostion_2A0", 5, 9, 180)
MonsterBattlePostion("MonsterBattlePostion_2A4", 11, 10, 180)
MonsterBattlePostion("MonsterBattlePostion_2A8", 6, 12, 180)
MonsterBattlePostion("MonsterBattlePostion_2AC", 10, 12, 180)
MonsterBattlePostion("MonsterBattlePostion_2B0", 13, 13, 180)
MonsterBattlePostion("MonsterBattlePostion_2B4", 4, 14, 180)
MonsterBattlePostion("MonsterBattlePostion_2B8", 8, 14, 180)
MonsterBattlePostion("MonsterBattlePostion_2FC", 7, 4, 0)
MonsterBattlePostion("MonsterBattlePostion_300", 10, 11, 225)
MonsterBattlePostion("MonsterBattlePostion_304", 4, 7, 90)
MonsterBattlePostion("MonsterBattlePostion_308", 12, 7, 270)
MonsterBattlePostion("MonsterBattlePostion_30C", 4, 11, 135)
MonsterBattlePostion("MonsterBattlePostion_310", 11, 4, 315)
MonsterBattlePostion("MonsterBattlePostion_314", 7, 12, 180)
MonsterBattlePostion("MonsterBattlePostion_318", 5, 5, 45)
MonsterBattlePostion("MonsterBattlePostion_27C", 7, 9, 180)
MonsterBattlePostion("MonsterBattlePostion_280", 11, 10, 180)
MonsterBattlePostion("MonsterBattlePostion_284", 10, 13, 180)
MonsterBattlePostion("MonsterBattlePostion_288", 5, 11, 180)
MonsterBattlePostion("MonsterBattlePostion_28C", 12, 12, 180)
MonsterBattlePostion("MonsterBattlePostion_290", 4, 14, 180)
MonsterBattlePostion("MonsterBattlePostion_294", 14, 14, 180)
MonsterBattlePostion("MonsterBattlePostion_298", 2, 13, 180)
MonsterBattlePostion("MonsterBattlePostion_31C", 8, 12, 180)
MonsterBattlePostion("MonsterBattlePostion_320", 3, 8, 180)
MonsterBattlePostion("MonsterBattlePostion_324", 12, 8, 180)
MonsterBattlePostion("MonsterBattlePostion_328", 0, 0, 180)
MonsterBattlePostion("MonsterBattlePostion_32C", 0, 0, 180)
MonsterBattlePostion("MonsterBattlePostion_330", 0, 0, 180)
MonsterBattlePostion("MonsterBattlePostion_334", 0, 0, 180)
MonsterBattlePostion("MonsterBattlePostion_338", 0, 0, 180)
# monster count: 8
BattleInfo(
"BattleInfo_33C", 0x0000, 21, 6, 60, 8, 1, 25, 0, "bm1040", "Sepith_851", 60, 25, 10, 5,
(
("ms65000.dat", 0, 0, 0, 0, 0, 0, 0, "MonsterBattlePostion_29C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
("ms65000.dat", "ms65000.dat", 0, 0, 0, 0, 0, 0, "MonsterBattlePostion_27C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
("ms65000.dat", "ms62700.dat", "ms65000.dat", 0, 0, 0, 0, 0, "MonsterBattlePostion_29C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
("ms65000.dat", "ms65000.dat", "ms62700.dat", "ms65000.dat", 0, 0, 0, 0, "MonsterBattlePostion_27C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
)
)
BattleInfo(
"BattleInfo_404", 0x0000, 21, 6, 60, 8, 1, 25, 0, "bm1040", "Sepith_859", 60, 25, 10, 5,
(
("ms62700.dat", 0, 0, 0, 0, 0, 0, 0, "MonsterBattlePostion_29C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
("ms62700.dat", "ms62700.dat", 0, 0, 0, 0, 0, 0, "MonsterBattlePostion_27C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
("ms62700.dat", "ms65000.dat", "ms62700.dat", 0, 0, 0, 0, 0, "MonsterBattlePostion_29C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
("ms62700.dat", "ms62700.dat", "ms65000.dat", "ms62700.dat", 0, 0, 0, 0, "MonsterBattlePostion_27C", "MonsterBattlePostion_2FC", "ed7400", "ed7403", "ATBonus_24C"),
)
)
# event battle count: 1
BattleInfo(
"BattleInfo_4CC", 0x0000, 40, 6, 0, 0, 1, 0, 0, "bm1040", 0x00000000, 100, 0, 0, 0,
(
("ms72900.dat", "ms72900.dat", "ms72900.dat", 0, 0, 0, 0, 0, "MonsterBattlePostion_31C", "MonsterBattlePostion_2FC", "ed7401", "ed7403", "ATBonus_23C"),
(),
(),
(),
)
)
AddCharChip((
"chr/ch00000.itc", # 00
"chr/ch00000.itc", # 01
"chr/ch00000.itc", # 02
"chr/ch00000.itc", # 03
"chr/ch00000.itc", # 04
"chr/ch00000.itc", # 05
"chr/ch00000.itc", # 06
"chr/ch00000.itc", # 07
"chr/ch00000.itc", # 08
"chr/ch00000.itc", # 09
"chr/ch00000.itc", # 0A
"chr/ch00000.itc", # 0B
"chr/ch00000.itc", # 0C
"chr/ch00000.itc", # 0D
"chr/ch00000.itc", # 0E
"chr/ch00000.itc", # 0F
"monster/ch65050.itc", # 10
"monster/ch65050.itc", # 11
"monster/ch62750.itc", # 12
"monster/ch62750.itc", # 13
"monster/ch72950.itc", # 14
"monster/ch72951.itc", # 15
))
DeclNpc(0, -27500, 23000, 0, 484, 0x0, 0, 20, 0, 0, 0, 255, 255, 255, 0)
DeclMonster(20770, 2670, -28000, 0x1010000, "BattleInfo_33C", 0, 16, 0xFFFF, 0, 1)
DeclMonster(7940, 18540, -29190, 0x1010000, "BattleInfo_404", 0, 18, 0xFFFF, 2, 3)
DeclMonster(-1260, 21090, -29200, 0x1010000, "BattleInfo_404", 0, 18, 0xFFFF, 2, 3)
DeclMonster(-12520, 16300, -29190, 0x1010000, "BattleInfo_33C", 0, 16, 0xFFFF, 0, 1)
DeclMonster(-21210, -550, -28000, 0x1010000, "BattleInfo_404", 0, 18, 0xFFFF, 2, 3)
DeclMonster(4300, 4670, -27200, 0x1010000, "BattleInfo_33C", 0, 16, 0xFFFF, 0, 1)
DeclMonster(-7580, 4170, -27200, 0x1010000, "BattleInfo_404", 0, 18, 0xFFFF, 2, 3)
DeclMonster(320, -6970, -27200, 0x1010000, "BattleInfo_404", 0, 18, 0xFFFF, 2, 3)
DeclActor(0, -29000, 23000, 1200, 0, -28000, 23000, 0x007C, 0, 3, 0x0000)
ChipFrameInfo(1000, 0, [0, 1, 2, 3, 4, 5]) # 0
ChipFrameInfo(2000, 0, [0, 1, 2, 3, 4, 5]) # 1
ChipFrameInfo(1000, 0, [0, 1, 2, 3, 4, 5]) # 2
ChipFrameInfo(2000, 0, [0, 1, 2, 3, 4, 5]) # 3
ScpFunction((
"Function_0_550", # 00, 0
"Function_1_56F", # 01, 1
"Function_2_570", # 02, 2
"Function_3_588", # 03, 3
))
def Function_0_550(): pass
label("Function_0_550")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_56E")
OP_A1(0xFE, 0x3E8, 0x8, 0x0, 0x1, 0x2, 0x3, 0x4, 0x3, 0x2, 0x1)
Jump("Function_0_550")
label("loc_56E")
Return()
# Function_0_550 end
def Function_1_56F(): pass
label("Function_1_56F")
Return()
# Function_1_56F end
def Function_2_570(): pass
label("Function_2_570")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x11B, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_583")
OP_70(0x0, 0x0)
Jump("loc_587")
label("loc_583")
OP_70(0x0, 0x1E)
label("loc_587")
Return()
# Function_2_570 end
def Function_3_588(): pass
label("Function_3_588")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x72, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_617")
TalkBegin(0xFF)
SetMapFlags(0x8000000)
SetChrName("")
#A0001
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"从宝箱中感觉到了高级魔兽的气息。\x01",
"【推测魔兽等级40】\x01",
"要打开宝箱吗?\x02",
)
)
Menu(
0,
-1,
-1,
1,
(
"是\x01", # 0
"否\x01", # 1
)
)
MenuEnd(0x0)
OP_60(0x0)
OP_57(0x0)
OP_5A()
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_617")
ClearMapFlags(0x8000000)
TalkEnd(0xFF)
Return()
label("loc_617")
SetMapFlags(0x8000000)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x11B, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_7D3")
Sound(14, 0, 100, 0)
OP_71(0x0, 0x0, 0x1E, 0x0, 0x0)
Sleep(500)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x72, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_710")
OP_A7(0x8, 0xFF, 0xFF, 0xFF, 0x0, 0x0)
TurnDirection(0x8, 0x0, 0)
OP_98(0x8, 0x0, 0x3E8, 0x0, 0x0, 0x0)
def lambda_670():
OP_98(0xFE, 0x0, 0xFFFFFC18, 0x0, 0x3E8, 0x0)
ExitThread()
QueueWorkItem(0x8, 1, lambda_670)
def lambda_68A():
OP_A7(0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x3E8)
ExitThread()
QueueWorkItem(0x8, 2, lambda_68A)
ClearChrFlags(0x8, 0x80)
SetChrFlags(0x8, 0x8000)
#A0002
AnonymousTalk(
0x3E7,
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"出现了魔兽!\x07\x00\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
WaitChrThread(0x8, 1)
Battle("BattleInfo_4CC", 0x0, 0x0, 0x0, 0x0, 0xFF)
SetChrFlags(0x8, 0x80)
ClearChrFlags(0x8, 0x8000)
Switch(
(scpexpr(EXPR_PUSH_VALUE_INDEX, 0x3), scpexpr(EXPR_END)),
(0, "loc_6F1"),
(2, "loc_700"),
(1, "loc_70D"),
(SWITCH_DEFAULT, "loc_710"),
)
label("loc_6F1")
SetScenarioFlags(0x72, 4)
OP_70(0x0, 0x1E)
Sleep(500)
Jump("loc_710")
label("loc_700")
OP_70(0x0, 0x0)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
label("loc_70D")
OP_B7(0x0)
Return()
label("loc_710")
Jc((scpexpr(EXPR_EXEC_OP, "AddItemNumber('暗之刃', 1)"), scpexpr(EXPR_END)), "loc_767")
FadeToDark(300, 0, 100)
Sound(17, 0, 100, 0)
SetMessageWindowPos(-1, -1, -1, -1)
#A0003
AnonymousTalk(
0x3E7,
(
scpstr(SCPSTR_CODE_ITEM, '暗之刃'),
scpstr(SCPSTR_CODE_COLOR, 0x0),
"获得了。\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
SetMessageWindowPos(14, 280, 60, 3)
FadeToBright(300, 0)
SetScenarioFlags(0x11B, 0)
OP_DE(0x6, 0x0)
Jump("loc_7CE")
label("loc_767")
FadeToDark(300, 0, 100)
#A0004
AnonymousTalk(
0x3E7,
(
"宝箱里装有",
scpstr(SCPSTR_CODE_ITEM, '暗之刃'),
scpstr(SCPSTR_CODE_COLOR, 0x0),
"。\x01",
"不过现有的数量太多,",
scpstr(SCPSTR_CODE_ITEM, '暗之刃'),
scpstr(SCPSTR_CODE_COLOR, 0x0),
"不能再拿更多了。\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
FadeToBright(300, 0)
Sound(15, 0, 100, 0)
OP_71(0x0, 0x1E, 0x0, 0x0, 0x0)
label("loc_7CE")
Jump("loc_805")
label("loc_7D3")
FadeToDark(300, 0, 100)
#A0005
AnonymousTalk(
0x3E7,
(
scpstr(0x6),
scpstr(SCPSTR_CODE_COLOR, 0x5),
"宝箱里什么都没有。\x07\x00\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
FadeToBright(300, 0)
label("loc_805")
Sleep(30)
TalkEnd(0xFF)
ClearMapFlags(0x8000000)
Return()
# Function_3_588 end
SaveToFile()
Try(main)
| [
"zj.yang@qq.com"
] | zj.yang@qq.com |
835478f976795573d8355bbee93293234d4cb55f | c87ae09a0229b4b4620c511b0c51eb685ec22b99 | /Python全栈学习/第四模块 网络编程进阶&数据库开发/practise/基于多进程的socket通信/队列的使用.py | b186e6d31bb0a9672c52aa8b644e8f974705eaaf | [] | no_license | caideyang/python2018 | 050f4c29c37b5bec963e77e0724cd05a9350deed | b7a3a728ef36b43448dc5ff594fdba500b67ad53 | refs/heads/master | 2020-03-25T06:02:54.699941 | 2018-11-01T23:04:29 | 2018-11-01T23:04:29 | 143,480,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | #!/usr/bin/python3
#@Author:CaiDeyang
#@Time: 2018/9/26 10:41
from multiprocessing import Queue
import time
if __name__ == "__main__":
q = Queue(3) # 创建队列,最大深度3
q.put("hello") # 往队列存放消息
q.put([1,2,3,4])
q.put({"name": "caideyang"})
# time.sleep(1)
print(q.empty()) # 判断队列是否为空
print(q.full()) # 判断队列是否满了
print(q.get()) # 从队列取数据
print(q.get()) | [
"deyangcai@163.com"
] | deyangcai@163.com |
515436b4d5fe3ddd0030470fde74f0965147a76f | 96cfaaa771c2d83fc0729d8c65c4d4707235531a | /Configuration/Spring08Production/python/Spring08_PhotonJetpt30-50_GEN_cfg.py | 8cf29fde0fbca5f06b831fcb9e3f0f9fe8054a8d | [] | no_license | khotilov/cmssw | a22a160023c7ce0e4d59d15ef1f1532d7227a586 | 7636f72278ee0796d0203ac113b492b39da33528 | refs/heads/master | 2021-01-15T18:51:30.061124 | 2013-04-20T17:18:07 | 2013-04-20T17:18:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("Gen")
process.load("FWCore.MessageService.MessageLogger_cfi")
# control point for all seeds
process.load("Configuration.StandardSequences.SimulationRandomNumberGeneratorSeeds_cff")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("Configuration.Spring08Production.Spring08_PhotonJetpt30_50_cfi")
process.load("Configuration.EventContent.EventContent_cff")
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/Spring08Production/data/Spring08_PhotonJetpt30-50_GEN.cfg,v $'),
annotation = cms.untracked.string('FastSim PhotonJet Pthat 30-50 for Spring08')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.GEN = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN')
),
fileName = cms.untracked.string('PhotonJetpt30-50.root')
)
process.e = cms.EndPath(process.GEN)
process.schedule = cms.Schedule(process.e)
| [
"sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch"
] | sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch |
2d4343cd3084b61e1e48672feeb7de774d2d4833 | 6f3b3f29b0ed43f056526a7d96df27c623cf8a29 | /czsc/enum.py | ccd2fd8436e1ff3e6e6431b4b1183683bb279deb | [
"MIT"
] | permissive | dst1213/czsc | a163c362d162110557e64e8ea8b41350d4d90a00 | 939803a9b196c19db3d8498f63276a4fdb8a442b | refs/heads/master | 2023-04-22T04:17:22.703347 | 2021-04-30T13:53:40 | 2021-04-30T13:53:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,016 | py | # coding: utf-8
from enum import Enum
class Mark(Enum):
D = "底分型"
G = "顶分型"
class Direction(Enum):
Up = "向上"
Down = "向下"
class Freq(Enum):
F1 = "1分钟"
F5 = "5分钟"
F15 = "15分钟"
F30 = "30分钟"
F60 = "60分钟"
D = "日线"
W = "周线"
M = "月线"
class Signals(Enum):
Other = "Other~其他"
Y = "Y~是"
N = "N~否"
INB = "INB~向下笔买点区间"
INS = "INS~向上笔卖点区间"
FXB = "FXB~向下笔结束分型左侧高点升破"
FXS = "FXS~向上笔结束分型左侧低点跌破"
BU0 = "BU0~向上笔顶分完成"
BU1 = "BU1~向上笔走势延伸"
BD0 = "BD0~向下笔底分完成"
BD1 = "BD1~向下笔走势延伸"
# TK = Triple K
TK1 = "TK1~三K底分"
TK2 = "TK2~三K上涨"
TK3 = "TK3~三K顶分"
TK4 = "TK4~三K下跌"
# ==================================================================================================================
# 信号值编码规则:
# 多空:L - 多头信号;S - 空头信号;
# 编号:A0 - A类基础型;A1 - A类变种1 ... 以此类推;基础型有着特殊含义,用于因子组合,各种变种形态编号主要用于形态对比研究。
# 组合规则:笔数_多空_编号;如 LA0 表示多头信号A0
# ==================================================================================================================
LA0 = "LA0~aAb式底背驰"
LB0 = "LB0~aAbcd式底背驰"
LC0 = "LC0~aAbBc式底背驰"
LD0 = "LD0~abcAd式底背驰"
LE0 = "LE0~ABC式底背驰"
LF0 = "LF0~类趋势底背驰"
LG0 = "LG0~上颈线突破"
LH0 = "LH0~向上中枢完成"
LI0 = "LI0~三买"
LJ0 = "LJ0~向上三角扩张中枢"
LK0 = "LK0~向上三角收敛中枢"
LL0 = "LL0~向上平台型中枢"
# ------------------------------------------------------------------------------------------------------------------
LA1 = "LA1~aAb式底背驰特例一"
LA2 = "LA2~aAb式底背驰特例二"
LA3 = "LA3~aAb式底背驰特例三"
LB1 = "LB1~aAbcd式底背驰特例一"
LB2 = "LB2~aAbcd式底背驰特例二"
LB3 = "LB3~aAbcd式底背驰特例三"
LC1 = "LC1~aAbBc式底背驰特例一"
LC2 = "LC2~aAbBc式底背驰特例二"
LC3 = "LC3~aAbBc式底背驰特例三"
LD1 = "LD1~abcAd式底背驰特例一"
LD2 = "LD2~abcAd式底背驰特例二"
LD3 = "LD3~abcAd式底背驰特例三"
LE1 = "LE1~ABC式底背驰特例一"
LE2 = "LE2~ABC式底背驰特例二"
LE3 = "LE3~ABC式底背驰特例三"
LF1 = "LF1~类趋势底背驰特例一"
LF2 = "LF2~类趋势底背驰特例二"
LF3 = "LF3~类趋势底背驰特例三"
LG1 = "LG1~上颈线突破特例一"
LG2 = "LG2~上颈线突破特例二"
LG3 = "LG3~上颈线突破特例三"
LH1 = "LH1~向上中枢完成特例一"
LH2 = "LH2~向上中枢完成特例二"
LH3 = "LH3~向上中枢完成特例三"
LI1 = "LI1~三买特例一"
LI2 = "LI2~三买特例二"
LI3 = "LI3~三买特例三"
LJ1 = "LJ1~向上三角扩张中枢特例一"
LJ2 = "LJ2~向上三角扩张中枢特例二"
LJ3 = "LJ3~向上三角扩张中枢特例三"
LK1 = "LK1~向上三角收敛中枢特例一"
LK2 = "LK2~向上三角收敛中枢特例二"
LK3 = "LK3~向上三角收敛中枢特例三"
LL1 = "LL1~向上平台型中枢特例一"
LL2 = "LL2~向上平台型中枢特例二"
LL3 = "LL3~向上平台型中枢特例三"
# ------------------------------------------------------------------------------------------------------------------
SA0 = "SA0~aAb式顶背驰"
SB0 = "SB0~aAbcd式顶背驰"
SC0 = "SC0~aAbBc式顶背驰"
SD0 = "SD0~abcAd式顶背驰"
SE0 = "SE0~ABC式顶背驰"
SF0 = "SF0~类趋势顶背驰"
SG0 = "SG0~下颈线突破"
SH0 = "SH0~向下中枢完成"
SI0 = "SI0~三卖"
SJ0 = "SJ0~向下三角扩张中枢"
SK0 = "SK0~向下三角收敛中枢"
SL0 = "SL0~向下平台型中枢"
# ------------------------------------------------------------------------------------------------------------------
SA1 = "SA1~aAb式顶背驰特例一"
SA2 = "SA2~aAb式顶背驰特例二"
SA3 = "SA3~aAb式顶背驰特例三"
SB1 = "SB1~aAbcd式顶背驰特例一"
SB2 = "SB2~aAbcd式顶背驰特例二"
SB3 = "SB3~aAbcd式顶背驰特例三"
SC1 = "SC1~aAbBc式顶背驰特例一"
SC2 = "SC2~aAbBc式顶背驰特例二"
SC3 = "SC3~aAbBc式顶背驰特例三"
SD1 = "SD1~abcAd式顶背驰特例一"
SD2 = "SD2~abcAd式顶背驰特例二"
SD3 = "SD3~abcAd式顶背驰特例三"
SE1 = "SE1~ABC式顶背驰特例一"
SE2 = "SE2~ABC式顶背驰特例二"
SE3 = "SE3~ABC式顶背驰特例三"
SF1 = "SF1~类趋势顶背驰特例一"
SF2 = "SF2~类趋势顶背驰特例二"
SF3 = "SF3~类趋势顶背驰特例三"
SG1 = "SG1~下颈线突破特例一"
SG2 = "SG2~下颈线突破特例二"
SG3 = "SG3~下颈线突破特例三"
SH1 = "SH1~向下中枢完成特例一"
SH2 = "SH2~向下中枢完成特例二"
SH3 = "SH3~向下中枢完成特例三"
SI1 = "SI1~三卖特例一"
SI2 = "SI2~三卖特例二"
SI3 = "SI3~三卖特例三"
SJ1 = "SJ1~向下三角扩张中枢特例一"
SJ2 = "SJ2~向下三角扩张中枢特例二"
SJ3 = "SJ3~向下三角扩张中枢特例三"
SK1 = "SK1~向下三角收敛中枢特例一"
SK2 = "SK2~向下三角收敛中枢特例二"
SK3 = "SK3~向下三角收敛中枢特例三"
SL1 = "SL1~向下平台型中枢特例一"
SL2 = "SL2~向下平台型中枢特例二"
SL3 = "SL3~向下平台型中枢特例三"
# --------------------------------------------------------------------------------------------
# 信号值编码规则:
# 笔数:X3 - 三笔信号;X5 - 五笔信号;X7 - 七笔信号;X9 - 九笔信号;
# 多空:L - 多头信号;S - 空头信号;
# 编号:A0 - A类基础型;A1 - A类变种1 ... 以此类推
# 组合规则:笔数_多空_编号;如 X5LA0 表示五笔多头信号A0
# ============================================================================================
# 三笔形态信号
# 具体描述:
# --------------------------------------------------------------------------------------------
X3LA0 = "X3LA0~向下不重合"
X3LB0 = "X3LB0~向下奔走型中枢"
X3LC0 = "X3LC0~向下三角收敛中枢"
X3LD0 = "X3LD0~向下三角扩张中枢"
X3LE0 = "X3LE0~向下盘背中枢"
X3LF0 = "X3LF0~向下无背中枢"
X3SA0 = "X3SA0~向上不重合"
X3SB0 = "X3SB0~向上奔走型中枢"
X3SC0 = "X3SC0~向上三角收敛中枢"
X3SD0 = "X3SD0~向上三角扩张中枢"
X3SE0 = "X3SE0~向上盘背中枢"
X3SF0 = "X3SF0~向上无背中枢"
class Factors(Enum):
Other = "Other~其他"
Y = "Y~是"
N = "N~否"
# ==================================================================================================================
# 因子值编码规则:
# 类型:
# L1 - 一买/类一买;L2 - 二买/类二买;L3 - 三买/类三买;
# S1 - 一卖/类一卖;S2 - 二卖/类二卖;S3 - 三卖/类三卖;
# 编号:A0 - A类基础型;A1 - A类变种1 ... 以此类推
# 组合规则为 类型_编号
# ==================================================================================================================
L1A0 = "L1A0~一买"
L1A1 = "L1A1~一买特例一"
L1A2 = "L1A2~一买特例二"
L1A3 = "L1A3~一买特例三"
L1A4 = "L1A4~一买特例四"
L1A5 = "L1A5~一买特例五"
L2A0 = "L2A0~二买"
L2A1 = "L2A1~二买特例一"
L2A2 = "L2A2~二买特例二"
L2A3 = "L2A3~二买特例三"
L2A4 = "L2A4~二买特例四"
L2A5 = "L2A5~二买特例五"
L3A0 = "L3A0~三买"
L3A1 = "L3A1~三买特例一"
L3A2 = "L3A2~三买特例二"
L3A3 = "L3A3~三买特例三"
L3A4 = "L3A4~三买特例四"
L3A5 = "L3A5~三买特例五"
# ------------------------------------------------------------------------------------------------------------------
S1A0 = "S1A0~一卖"
S1A1 = "S1A1~一卖特例一"
S1A2 = "S1A2~一卖特例二"
S1A3 = "S1A3~一卖特例三"
S1A4 = "S1A4~一卖特例四"
S1A5 = "S1A5~一卖特例五"
S2A0 = "S2A0~二卖"
S2A1 = "S2A1~二卖特例一"
S2A2 = "S2A2~二卖特例二"
S2A3 = "S2A3~二卖特例三"
S2A4 = "S2A4~二卖特例四"
S2A5 = "S2A5~二卖特例五"
S3A0 = "S3A0~三卖"
S3A1 = "S3A1~三卖特例一"
S3A2 = "S3A2~三卖特例二"
S3A3 = "S3A3~三卖特例三"
S3A4 = "S3A4~三卖特例四"
S3A5 = "S3A5~三卖特例五"
# ==================================================================================================================
| [
"zeng_bin8888@163.com"
] | zeng_bin8888@163.com |
cc8b434ce82b6e1625a617bbbd89b70bd16b8524 | f225b35d49562e7a1114968bdf9128dbc4cd91ab | /myspider/items.py | fa8e60c784f6743287563124add7390aebc383f6 | [] | no_license | 15032373556/scrapy_exercise | 1948ce42102f99e414ae214b27163eb1d9e3b338 | 7a6e8b7a395044bda3acb649ab8f5a74bc854d82 | refs/heads/master | 2022-11-25T13:29:28.726984 | 2020-07-25T03:09:41 | 2020-07-25T03:09:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ItcastItem(scrapy.Item):
# 抓取 1.讲师姓名 2.讲师职称 3.讲师个人信息
# 测试提交代码
name = scrapy.Field()
title = scrapy.Field()
info = scrapy.Field()
| [
"1798549164@qq.com"
] | 1798549164@qq.com |
25da0744ea358f58944383d0fa56dbde72f8ca7e | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/rbd.py | 4548339f733e7c118189a771baa2ea2fcc9e9962 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'rBD':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
705796328fb21af7633d2c714b02f3ea143c60d9 | bd6e9f4f6261795fd876107b21b04cafec00b5d7 | /web/code/src/my_proj/settings/base.py | e4e7135dce4402369d49d6a25f66aef51faf12d5 | [] | no_license | hema71024/StudentPortal | 774f4a7167ce624c60e5b6c8a3c8ccac18860616 | 2aa53f57120acfd93178e5a5eee49c69a1527fdd | refs/heads/master | 2021-04-24T07:21:05.921571 | 2018-04-03T08:56:59 | 2018-04-03T08:56:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,286 | py | """
Django settings for my_proj project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from django.core.urlresolvers import reverse_lazy
from os.path import dirname, join, exists
# Build paths inside the project like this: join(BASE_DIR, "directory")
BASE_DIR = dirname(dirname(dirname(__file__)))
STATICFILES_DIRS = [join(BASE_DIR, 'static')]
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = "/media/"
# Use Django templates using the new Django 1.8 TEMPLATES settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'templates'),
# insert more TEMPLATE_DIRS here
join(BASE_DIR, 'forumapp/templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Added Bt Me
# 'social.apps.django_app.context_processors.backends',
# 'social.apps.django_app.context_processors.login_redirect',
],
},
},
]
# Use 12factor inspired environment variables or from a file
import environ
env = environ.Env()
# Ideally move env file should be outside the git repo
# i.e. BASE_DIR.parent.parent
env_file = join(dirname(__file__), 'local.env')
if exists(env_file):
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Raises ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = env('SECRET_KEY')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'authtools',
'crispy_forms',
'easy_thumbnails',
'profiles',
'accounts',
# My Added
'forumapp',
# 'social.apps.django_app.default',
# 'social_django',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'my_proj.urls'
WSGI_APPLICATION = 'my_proj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in
# os.environ
'default': env.db(),
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Calcutta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
ALLOWED_HOSTS = []
# Crispy Form Theme - Bootstrap 3
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# For Bootstrap 3, change error alert to 'danger'
from django.contrib import messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Authentication Settings
AUTH_USER_MODEL = 'authtools.User'
LOGIN_REDIRECT_URL = reverse_lazy("profiles:show_self")
LOGIN_URL = reverse_lazy("accounts:login")
THUMBNAIL_EXTENSION = 'png' # Or any extn for your thumbnails
################################################################################
# My Added #####################################################################
# FOr Social Login##############################################################
AUTHENTICATION_BACKENDS = (
'social_core.backends.open_id.OpenIdAuth', # for Google authentication
'social_core.backends.google.GoogleOpenId', # for Google authentication
'social_core.backends.google.GoogleOAuth2', # for Google authentication
'social_core.backends.github.GithubOAuth2', # for Github authentication
'social_core.backends.facebook.FacebookOAuth2', # for Facebook authentication
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '365081772682-um9ctc90c5g9a7n63l3drhhmeqlvgvk8.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'tGur-f8jM7wHO-pL3uNsYqLv'
################################################################################
# For Mail Sending | [
"ashutoshhathidara98@gmail.com"
] | ashutoshhathidara98@gmail.com |
80d6314141d4f24833a5ea2410e5ce6f0c2c9472 | 074afd26d00bb742b03c12891b057ab263e640bf | /LeetCode 30 days/week1.2.py | ffcfa748b1935152b9419bb6cf112f940f619277 | [] | no_license | IsmailTitas1815/Data-Structure | 7a898800b1e53c778b1f2f11b0df259e52c20140 | fece8dd97d3e162e39fc31d5f3498a6dac49b0f0 | refs/heads/master | 2023-02-05T10:39:49.349484 | 2020-12-21T13:37:22 | 2020-12-21T13:37:22 | 296,343,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | # import re
# s = '123 456-7890'
# new_s = [int(i) for i in re.findall('\d', s)]
# unformattedPhone = "1239084590348509 456-7890"
# numbersList = [int(s) for s in unformattedPhone if s.isdigit()]
# print(numbersList)
class Solution:
def isHappy(self,num):
setofvalue = set()
while num!=1:
num = sum(int(i)**2 for i in str(num))
if num in setofvalue:
return False
setofvalue.add(num)
return True
s=0
old = 0
num = int(input())
obj = Solution()
boo = obj.isHappy(num)
print(boo)
#
# def happy_numbers(n):
# past = set()
# while n != 1:
# n = sum(int(i)**2 for i in str(n))
# if n in past:
# return False
# past.add(n)
# return True
# print([x for x in range(500) if happy_numbers(x)][:10]) | [
"titas.sarker1234@gmail.com"
] | titas.sarker1234@gmail.com |
def2f5d76a06abfa75bee8540d5a5982b97fa204 | 9fb1c85a6d39c08e2a3cc235335bc482ad909b71 | /prowler/providers/aws/services/route53/route53_domains_transferlock_enabled/route53_domains_transferlock_enabled.py | d49160981b34ad878af72233ab6ebf3869bfae89 | [
"Apache-2.0"
] | permissive | muharihar/prowler | 06dbdeaa0696dd65d72c33ff3c9f957b97f83d7a | 25c9bc07b219cc02004cc0b84adcfdcf18d5ad2b | refs/heads/master | 2023-02-18T01:26:54.161003 | 2023-02-10T11:38:13 | 2023-02-10T11:38:13 | 238,623,868 | 0 | 0 | null | 2020-02-06T06:36:36 | 2020-02-06T06:36:35 | null | UTF-8 | Python | false | false | 1,012 | py | from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.route53.route53domains_client import (
route53domains_client,
)
class route53_domains_transferlock_enabled(Check):
def execute(self) -> Check_Report_AWS:
findings = []
for domain in route53domains_client.domains.values():
report = Check_Report_AWS(self.metadata())
report.resource_id = domain.name
report.region = domain.region
if domain.status_list and "clientTransferProhibited" in domain.status_list:
report.status = "PASS"
report.status_extended = (
f"Transfer Lock is enabled for the {domain.name} domain"
)
else:
report.status = "FAIL"
report.status_extended = (
f"Transfer Lock is disabled for the {domain.name} domain"
)
findings.append(report)
return findings
| [
"noreply@github.com"
] | muharihar.noreply@github.com |
e5d247bf7cc030a77b8a76348eccb8ccd9311a7f | f91f9330179236025528682df90c4ba9baaae49d | /backend/manage.py | f4749adf415cbcfc37b8618e105dd6561a310b4a | [] | no_license | crowdbotics-apps/test-4316-dev-9267 | 96e7b04626045a40333ef81f161bf7e096c68931 | 3a0ae32df3ea8175d4aa37a20dec262086e00eb4 | refs/heads/master | 2022-12-04T10:11:57.679231 | 2020-08-24T08:22:33 | 2020-08-24T08:22:33 | 289,866,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_4316_dev_9267.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
15d8600fcc62bae425faaf90085a8f09360ab77e | df038c9a84ca9b11bbef86d84d2e6feb6fd733bf | /setup.py | d7698c8b3fa3d0b9da3a92af8fb21d3751e3cf58 | [
"BSD-2-Clause"
] | permissive | wybaby/PSpider | d31ff8cbde1a3f23d05c1684c455beea2b48c915 | 5087fc20589878fa123daa113213fbf17282a35b | refs/heads/master | 2021-01-22T01:55:16.258596 | 2017-06-23T03:35:04 | 2017-06-23T07:40:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | # _*_ coding: utf-8 _*_
"""
install script: python3 setup.py install
"""
from setuptools import setup, find_packages
setup(
name="spider",
version="2.4.5",
author="xianhu",
keywords=["spider", "crawler", "multi-threads", "asyncio", "distributed"],
packages=find_packages(exclude=("otherfiles", "test.*")),
package_data={
"": ["*.conf"], # include all *.conf files
},
install_requires=[
"aiohttp>=2.0.0", # aiohttp, http for asyncio
"pybloom_live>=2.0.0", # pybloom-live, fork from pybloom
"redis>=2.10.0", # redis, python client for redis
"requests>=2.10.0", # requests, http for humans
]
)
| [
"qixianhu@qq.com"
] | qixianhu@qq.com |
99cbb1c2c9693fe423a01b59ef5289715abab28f | 396ee8958eb753d96a62b1199103c2c1194c08e0 | /creme/ensemble/bagging.py | a8509f03c6ffe22b2ed05d0f2a2d8f770954a48a | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ZuoMatthew/creme | fe1499a9071a994587172f908a530522be6b915b | 27d40fa7a5014c94d7f95dee259368c0adc7115c | refs/heads/master | 2020-04-22T20:46:58.100005 | 2019-02-12T17:13:15 | 2019-02-12T17:13:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,843 | py | import collections
import copy
from sklearn import utils
from .. import base
__all__ = ['BaggingClassifier']
class BaggingClassifier(base.BinaryClassifier):
"""Bagging for classification.
For each incoming observation, each model's `fit_one` method is called `k` times where `k`
is sampled from a Poisson distribution of parameter 1. `k` thus has a 36% chance of being equal
to 0, a 36% chance of being equal to 1, an 18% chance of being equal to 2, a 6% chance of being
equal to 3, a 1% chance of being equal to 4, etc. You can do `scipy.stats.poisson(1).pmf(k)`
for more detailed values.
Parameters:
base_estimator (creme.base.Classifier): The estimator to bag.
Example:
In the following example three logistic regressions are bagged together. The performance is
slightly better than when using a single logistic regression.
::
>>> import creme.compose
>>> import creme.ensemble
>>> import creme.linear_model
>>> import creme.model_selection
>>> import creme.optim
>>> import creme.preprocessing
>>> import creme.stream
>>> from sklearn import datasets
>>> from sklearn import metrics
>>> X_y = creme.stream.iter_sklearn_dataset(
... load_dataset=datasets.load_breast_cancer,
... shuffle=True,
... random_state=42
... )
>>> optimiser = creme.optim.VanillaSGD()
>>> model = creme.compose.Pipeline([
... ('scale', creme.preprocessing.StandardScaler()),
... ('learn', creme.linear_model.LogisticRegression(optimiser))
... ])
>>> model = creme.ensemble.BaggingClassifier(model, n_estimators=3)
>>> metric = metrics.roc_auc_score
>>> creme.model_selection.online_score(X_y, model, metric)
0.991497...
References:
- `Online Bagging and Boosting <https://ti.arc.nasa.gov/m/profile/oza/files/ozru01a.pdf>`_
"""
def __init__(self, base_estimator=None, n_estimators=10, random_state=42):
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.estimators = [copy.deepcopy(base_estimator) for _ in range(n_estimators)]
self.rng = utils.check_random_state(random_state)
def fit_one(self, x, y):
y_pred = self.predict_proba_one(x)
for estimator in self.estimators:
for _ in range(self.rng.poisson(1)):
estimator.fit_one(x, y)
return y_pred
def predict_one(self, x):
votes = collections.Counter((estimator.predict_one(x) for estimator in self.estimators))
return max(votes, key=votes.get)
def predict_proba_one(self, x):
return sum(estimator.predict_proba_one(x) for estimator in self.estimators) / len(self.estimators)
| [
"maxhalford25@gmail.com"
] | maxhalford25@gmail.com |
c44508356d98bca92ede322dff36b8fe69233790 | 316a07bd7ab47d447606d341c5d221d8318f65b9 | /python-quantumclient/quantumclient/__init__.py | d7a6aab476a7fd877ee47d88223005f3700109e1 | [] | no_license | kumarcv/openstack-nf | 791d16a4844df4666fb2b82a548add98f4832628 | ad2d8c5d49f510292b1fe373c7c10e53be52ba23 | refs/heads/master | 2020-05-20T03:10:54.495411 | 2013-06-16T23:44:11 | 2013-06-16T23:44:11 | 7,497,218 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Citrix Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Tyler Smith, Cisco Systems
import gettext
# gettext must be initialized before any quantumclient imports
gettext.install('quantumclient', unicode=1)
| [
"b37839@freescale.com"
] | b37839@freescale.com |
ee819a6e8372d9aa07f36cdf730a81eaea0c1055 | 18b977dccd70e9e5a1b553b28ab0413fb3f54f4b | /SoftUni/Python Developmen/Python-Fundamentals/04_Lists/the_office.py | 12c13b6f4c2e6e1f5e8584f7e661696c2d418881 | [] | no_license | stevalang/Coding-Lessons | 7203e3a18b20e33e8d596e3dfb58d26c50b74530 | 2d0060c2268ad966efdcae4e6e994ac15e57243a | refs/heads/master | 2023-06-05T08:28:33.290530 | 2021-06-16T19:37:29 | 2021-06-16T19:37:29 | 284,852,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | employees = input().split()
factor = int(input())
employee_happiness = list(map(lambda x: int(x) * factor, employees))
avg_happiness = sum(employee_happiness) / len(employee_happiness)
# above_avg_happy = [employee for employee in employee_happiness if employee >= avg_happiness]
above_avg_happy = list(filter(lambda employee: employee >= avg_happiness, employee_happiness))
if int(len(above_avg_happy)) >= len(employee_happiness) / 2:
print(f'Score: {len(above_avg_happy)}/{len(employee_happiness)}. Employees are happy!')
else:
print(f'Score: {len(above_avg_happy)}/{len(employee_happiness)}. Employees are not happy!')
| [
"rbeecommerce@gmail.com"
] | rbeecommerce@gmail.com |
e0315093c2111b0b43b0c96efd9f3b6ae0dd7d10 | 1a639d185f9c883b7bebf33c577c58b22ac93c7e | /other/sound.py | d69bb732b93754a0a9bbad2d5b75c7350984b2d5 | [] | no_license | gofr1/python-learning | bd09da5b5850b1533a88b858690ed4380b55d33e | 19343c985f368770dc01ce415506506d62a23285 | refs/heads/master | 2023-09-02T15:42:27.442735 | 2021-11-12T10:17:13 | 2021-11-12T10:17:13 | 237,828,887 | 0 | 0 | null | 2021-11-12T10:17:14 | 2020-02-02T20:03:42 | Python | UTF-8 | Python | false | false | 568 | py | #!/usr/bin/env python3
# gTTS (Google Text-to-Speech), a Python library and CLI tool to interface with Google Translate text-to-speech API
# sudo pip3 install gtts
from io import BytesIO
from pygame import mixer
from gtts import gTTS
def speak(text):
with BytesIO() as f:
gTTS(text=text, lang="en").write_to_fp(f)
f.seek(0)
mixer.init()
mixer.music.load(f)
mixer.music.play()
while mixer.music.get_busy():
continue
if __name__ == '__main__':
text = input("What should I say? >>")
speak(text) | [
"gofr.one@gmail.com"
] | gofr.one@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.