blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1cd7578ae2c49e8fc2d355e4c3815d9c7ca0f211
|
384efc85a7845caa10c1090c80243a9a29215d8a
|
/02_RE_Objects_Methods/05_findall.py
|
b7b39951a5c4adbf13b0a3637478ed0cd36154dd
|
[] |
no_license
|
Do-code-ing/Python_Module_RE
|
032754e8c5b9e619d83602bdff4b91747b419b21
|
01bdd0202fdc7040971f3132d4cbbbde11175bb9
|
refs/heads/master
| 2023-06-02T22:32:37.543180
| 2021-06-20T05:07:34
| 2021-06-20T05:07:34
| 358,269,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
# Pattern.findall(string[, pos[, endpos]])
# findall() 함수와 유사한데, 컴파일된 패턴을 사용한다.
# 하지만 search() 처럼 검색 영역을 제한하는 선택적 pos와 endpos 매개 변수도 받아들인다.
import re
p = re.compile("a")
print(p.findall("abcabc"))
# ['a', 'a']
|
[
"zxcvbn658@naver.com"
] |
zxcvbn658@naver.com
|
90ce63a11f08d9fa3007f7ad5fc753a346e978e4
|
88c5045676e32eb7ca1a40d8a4379c7c83f6faca
|
/tests/trinity/integration/test_plugin_discovery.py
|
b462334b35106f5d31d7acd6159c76f62dfeba05
|
[
"MIT"
] |
permissive
|
cd4761/ecc-trinity
|
9983a4f29c9262293379ff03931e652b00f398a3
|
ed27c5c8bc2b94b796e74f271594f138c2aa5b36
|
refs/heads/master
| 2020-06-23T21:23:50.829092
| 2019-07-29T10:45:20
| 2019-07-29T10:45:20
| 198,756,315
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
from trinity.plugins.registry import (
discover_plugins
)
# This plugin is external to this code base and installed by tox
# In order to install it locally run:
# pip install -e trinity-external-plugins/examples/peer_count_reporter
from peer_count_reporter_plugin import PeerCountReporterPlugin
def test_plugin_discovery():
plugins = [type(plugin) for plugin in discover_plugins()]
assert PeerCountReporterPlugin in plugins
|
[
"christoph.burgdorf@gmail.com"
] |
christoph.burgdorf@gmail.com
|
3ebf29cec10ec32a1feaddd87586c0b85af2e132
|
90cea58e80309d2dff88f73f3a43ed5f943ff97d
|
/MaxSubArray.py
|
aa062ccb2dac884a1a93196e6e3675cdc0c47d68
|
[] |
no_license
|
SaiSujithReddy/CodePython
|
0b65c82b0e71dba2bbd4c1aefec4e6cd6fd42341
|
4c05b7909092009afffa4536fd284060d20e462d
|
refs/heads/master
| 2022-02-24T09:21:15.284745
| 2019-10-07T23:36:17
| 2019-10-07T23:36:17
| 106,611,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
import sys
def max_sub_array(array):
max_sum = -sys.maxsize - 1
temp_sum = 0
for x in range(len(array)):
print("entered for loop")
if array[x] >= 0:
temp_sum += array[x]
if temp_sum > max_sum:
max_sum = temp_sum
else:
temp_sum += array[x]
if temp_sum <0:
temp_sum = 0
print(temp_sum,max_sum)
print(max(max_sum,temp_sum))
# array = [1,2,3,-1,-5,-5,4]
# array = [1,2,3,-1,-5,-5,4,3]
# array = [1]
# array = [0]
# array = [-1]
# array = [-1,-1,3,0,-100,101]
# array = [1000,1,2,3,4,-2]
array = [-100,-99,-9800,1000,-2,2000,-90,-9000]
max_sub_array(array)
|
[
"sai.marapareddy@gmail.com"
] |
sai.marapareddy@gmail.com
|
f9d3e18d03b7ad0e4bff8ac8ad3eda798bb6f2e8
|
5d6365f4cc81272f8c481ee31f1111e8eca6dca5
|
/alipay/aop/api/domain/AlipayCommerceOperationPromoterRankingQueryModel.py
|
f9cf8ced33270bccfe1a08deb642d7a7987eeba1
|
[
"Apache-2.0"
] |
permissive
|
barrybbb/alipay-sdk-python-all
|
9e99b56138e6ca9c0b236707c79899d396ac6f88
|
1b63620431d982d30d39ee0adc4b92463cbcee3c
|
refs/heads/master
| 2023-08-22T20:16:17.242701
| 2021-10-11T08:22:44
| 2021-10-11T08:22:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceOperationPromoterRankingQueryModel(object):
def __init__(self):
self._pid = None
self._type = None
self._user_id = None
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, value):
self._pid = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.pid:
if hasattr(self.pid, 'to_alipay_dict'):
params['pid'] = self.pid.to_alipay_dict()
else:
params['pid'] = self.pid
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceOperationPromoterRankingQueryModel()
if 'pid' in d:
o.pid = d['pid']
if 'type' in d:
o.type = d['type']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
a40f3a2fe76ca92d94c31c07ec545abcd156d9e2
|
bd696223aaf5404987df11832b4c17c916b9690f
|
/nlp_sample/fugashi_generic_tagger_sample/main.py
|
575cf1bca8948151845f4b8df85265ae91a5b162
|
[] |
no_license
|
wararaki718/scrapbox3
|
000a285477f25c1e8a4b6017b6ad06c76f173342
|
9be5dc879a33a1988d9f6611307c499eec125dc2
|
refs/heads/master
| 2023-06-16T08:46:32.879231
| 2021-07-17T14:12:54
| 2021-07-17T14:12:54
| 280,590,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
from fugashi import Tagger, GenericTagger
def main():
text = 'softbank'
tagger = Tagger()
gtagger = GenericTagger()
print('Tagger:')
print(tagger.parse(text))
for word in tagger(text):
print(word.surface)
print(word.feature)
print()
print('GenericTagger:')
print(gtagger.parse(text))
for word in gtagger(text):
print(word.surface)
print(word.feature)
print()
print('DONE')
if __name__ == '__main__':
main()
|
[
"ky7.ott.w@gmail.com"
] |
ky7.ott.w@gmail.com
|
8fee0875e9f03699c64938e05824c350f5061964
|
b9c7c3433675278dcbd6e52056a299ccd2a2a122
|
/sword/match.py
|
e41aa854102ee47f79dadfacd3c8871d0e16ec4b
|
[] |
no_license
|
smileshy777/practice
|
3d6b8412138c94e75810298bc2dcde52d374826b
|
a0bc7d7fb9fe2db958c3ee2671df927ce136ecff
|
refs/heads/master
| 2020-03-31T15:44:45.868628
| 2018-11-29T05:08:02
| 2018-11-29T05:08:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
class Solution:
# s, pattern都是字符串
def match(self, s, pattern):
len_s = len(s)
len_p = len(pattern)
if len_s == 0 and len_p == 0:
return True
if len_s > 0 and len_p == 0:
return False
if len_p > 1 and pattern[1] == '*':
if len_s > 0 and (s[0] == pattern[0] or pattern[0] == '.'):
return self.match(s, pattern[2:]) or self.match(s[1:], pattern[2:]) or self.match(s[1:], pattern)
else:
return self.match(s, pattern[2:])
if len_s > 0 and (s[0] == pattern[0] or pattern[0] == '.'):
return self.match(s[1:], pattern[1:])
return False
|
[
"13141462249@163.com"
] |
13141462249@163.com
|
790b229755e6c18385e27601143ce131475c697b
|
a1cd1135cd7bc3255e29632fe6c025cffd231285
|
/rpc/retrying.py
|
34ab31def9d6707643ec0c70eb3fa9ad7a504739
|
[] |
no_license
|
liguopeng80/gcommon.py27
|
5f8d3ac9fe85c7134cfbb557ec06a61184b58fd1
|
900cd0717c7a9db90793752fd5cbf9a576286497
|
refs/heads/master
| 2023-08-11T16:01:16.566945
| 2021-10-10T07:08:54
| 2021-10-10T07:08:54
| 404,542,040
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,579
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# created: 2015-05-19
"""对失败的操作进行重试
TODO: 为 RPC 调用增加超时检测 - 当 rabbitmq 或者 rpc server 出现问题时,客户端不能长久等待。
"""
import time
import logging
from twisted.internet.defer import inlineCallbacks, maybeDeferred, returnValue
from gcommon.rpc import RpcServerException
from gcommon.utils.counters import Sequence
from gcommon.utils.timer import AsyncTimer
logger = logging.getLogger('rpc')
def patch_thrift_client(retrying, client):
"""为 thrift client 的类成员函数(RPC 接口函数)增加 retry 功能。
client -> Thrift Client class (not object).
"""
client_interface = client.__implemented__.declared[0]
for name in client_interface._InterfaceClass_attrs:
value = getattr(client, name, None)
if callable(value):
new_value = retrying.rpc_retry(value)
setattr(client, name, new_value)
class TwistedRetrying(object):
"""重试异步操作"""
RETRY_INTERVAL = 1.1
STOP_MAX_RETRY_TIMES = 4
STOP_MAX_DELAY = 10
_sequence = Sequence()
def __init__(self, identifier='', retry_interval=0, max_retry_times=0, max_delay=0):
self.retry_interval = retry_interval or self.RETRY_INTERVAL
self.max_retry_times = max_retry_times or self.STOP_MAX_RETRY_TIMES
self.max_delay = max_delay or self.STOP_MAX_DELAY
self._id = self._sequence.next_value()
if identifier:
self._id = "%06d.%s" % (self._id, identifier)
else:
self._id = "%06d" % self._id
def rpc_retry(self, func):
"""Decorator"""
@inlineCallbacks
def __wrap(client_obj, *args, **kwargs):
result = yield self.call(client_obj, func, *args, **kwargs)
returnValue(result)
__wrap.__name__ = func.__name__
return __wrap
@inlineCallbacks
def call(self, client_obj, func, *args, **kwargs):
"""带有重试功能的 RPC 调用
client_obj -> RPC client 实例
func -> 未绑定的 RPC client 成员函数
"""
member_func = func.__get__(client_obj)
result = yield self.call_member_func(member_func, *args, **kwargs)
returnValue(result)
@inlineCallbacks
def call_member_func(self, func, *args, **kwargs):
"""带有重试功能的 RPC 调用"""
start_time = int(round(time.time() * 1000))
attempt_number = 0
while True:
attempt_number += 1
try:
logger.debug('[%s] try rpc request: %s, %s, args: %s', attempt_number, self._id, func.__name__, args)
result = yield maybeDeferred(func, *args, **kwargs)
except RpcServerException, e:
logger.warn('[%s] server error on rpc request: %s, error: %s', self._id, func.__name__, e)
yield AsyncTimer.start(self.retry_interval)
# 判断是否还可以继续重试
now_time = int(round(time.time() * 1000))
if (now_time - start_time > self.max_delay) or (attempt_number > self.max_retry_times):
raise
else:
continue
except Exception, e:
logger.warn('[%s] unexpected error on rpc request: %s, error: %s', self._id, func.__name__, e)
raise
else:
logger.debug('[%s] rpc request finished with result: %s', self._id, func.__name__, result)
returnValue(result)
|
[
"liguopeng@liguopeng.net"
] |
liguopeng@liguopeng.net
|
fe3d52f03d805c065f4d5d608a4a3edca9d48773
|
739e19aea52a747a818ccaa1e941f11328ca9783
|
/PatternRecognitionPractice/opencv-python/opencv_test0.py
|
c32ce4c4f68f1071569db1fa4e71bb065257ddeb
|
[] |
no_license
|
MoCuishle28/python-practice
|
d12edb4866361f55354da53875475f05c209254c
|
cc557fcdd3fec2cb67efeb1f875b4d7d9d85b5b4
|
refs/heads/master
| 2020-03-28T03:52:36.060049
| 2019-01-19T11:53:15
| 2019-01-19T11:53:15
| 147,677,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,817
|
py
|
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
SZ=20
bin_n = 16 # Number of bins
affine_flags = cv.WARP_INVERSE_MAP|cv.INTER_LINEAR
# 使用二阶矩矫正图像
def deskew(img):
m = cv.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
# 图像的平移,参数:输入图像、变换矩阵、变换后的大小
img = cv.warpAffine(img, M, (SZ, SZ), flags=affine_flags)
return img
def hog(img):
'''先用sobel提取轮廓信息 再做轮廓信息的统计直方图'''
# 计算图像的 X 方向和 Y 方向的 Sobel 导数(梯度滤波器)
gx = cv.Sobel(img, cv.CV_32F, 1, 0)
gy = cv.Sobel(img, cv.CV_32F, 0, 1)
mag, ang = cv.cartToPolar(gx, gy) # 笛卡尔坐标转换为极坐标, → magnitude, angl
bins = np.int32(bin_n*ang/(2*np.pi))
bin_cells = bins[:10,:10], bins[10:,:10], bins[:10,10:], bins[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists) # hist 是64位的向量
return hist
img = cv.imread('digits.png',0)
if img is None:
raise Exception("we need the digits.png image from samples/data here !")
cells = [np.hsplit(row,100) for row in np.vsplit(img,50)] # 横轴切成50份 -> 每份纵轴切成100份
# 二分训练集和测试集
train_cells = [ i[:50] for i in cells ]
test_cells = [ i[50:] for i in cells]
deskewed = [list(map(deskew, row)) for row in train_cells] # 对训练集每个样例做抗扭曲处理
hogdata = [list(map(hog, row)) for row in deskewed] # 提取特征值
trainData = np.float32(hogdata).reshape(-1,64)
# 显示特征值
print(trainData[0].shape)
print(trainData[0])
pca = PCA(n_components=2, svd_solver='arpack')
test_pca = trainData
pca_mat = pca.fit_transform(test_pca)
np.savetxt("trainMat.txt", pca_mat)
pca_3d = PCA(n_components=3, svd_solver='arpack')
test_pca_3d = trainData
pca_mat = pca_3d.fit_transform(test_pca_3d)
np.savetxt("trainMat_3D.txt", pca_mat)
responses = np.repeat(np.arange(10),250)[:,np.newaxis]
np.savetxt("labels.txt", responses)
svm = cv.ml.SVM_create()
svm.setKernel(cv.ml.SVM_LINEAR)
svm.setType(cv.ml.SVM_C_SVC)
svm.setC(2.67)
svm.setGamma(5.383)
svm.train(trainData, cv.ml.ROW_SAMPLE, responses)
svm.save('svm_data.dat')
# 预处理测试数据集
deskewed = [list(map(deskew,row)) for row in test_cells]
hogdata = [list(map(hog,row)) for row in deskewed]
testData = np.float32(hogdata).reshape(-1,bin_n*4)
result = svm.predict(testData)[1]
mask = result==responses
correct = np.count_nonzero(mask)
print(correct*100.0/result.size)
|
[
"20164706@s.hlju.edu.cn"
] |
20164706@s.hlju.edu.cn
|
e2f402bfc9e62ee0c6c90852bd8a66c383ce4be4
|
c09e0d3dd9105e131b5c9cc0c2076e7103263d9f
|
/bigiq/tests/unit/mock/procenv.py
|
5359764b2fe65685847374c18a884ad5ba39a9e9
|
[] |
no_license
|
gundalow-collections/f5
|
693166aa8f270df37a763084d45d7f318b1c63e4
|
cdd14055c1615225e0050b6e7b47c38513bcd4c6
|
refs/heads/master
| 2020-07-24T05:51:08.722791
| 2019-09-16T20:11:07
| 2019-09-16T20:11:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,728
|
py
|
# (c) 2016, Matt Davis <mdavis@ansible.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import json
from contextlib import contextmanager
from io import BytesIO, StringIO
from ansible_collections.f5.bigiq.tests.unit.compat import unittest
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_bytes
@contextmanager
def swap_stdin_and_argv(stdin_data='', argv_data=tuple()):
"""
context manager that temporarily masks the test runner's values for stdin and argv
"""
real_stdin = sys.stdin
real_argv = sys.argv
if PY3:
fake_stream = StringIO(stdin_data)
fake_stream.buffer = BytesIO(to_bytes(stdin_data))
else:
fake_stream = BytesIO(to_bytes(stdin_data))
try:
sys.stdin = fake_stream
sys.argv = argv_data
yield
finally:
sys.stdin = real_stdin
sys.argv = real_argv
@contextmanager
def swap_stdout():
"""
context manager that temporarily replaces stdout for tests that need to verify output
"""
old_stdout = sys.stdout
if PY3:
fake_stream = StringIO()
else:
fake_stream = BytesIO()
try:
sys.stdout = fake_stream
yield fake_stream
finally:
sys.stdout = old_stdout
class ModuleTestCase(unittest.TestCase):
def setUp(self, module_args=None):
if module_args is None:
module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False}
args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args))
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap.__enter__()
def tearDown(self):
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap.__exit__(None, None, None)
|
[
"brian.coca+git@gmail.com"
] |
brian.coca+git@gmail.com
|
5e33b27fa72491228a4057a288364b7805bb6d9e
|
a140fe192fd643ce556fa34bf2f84ddbdb97f091
|
/.history/자료구조_20200628162237.py
|
fbca173ec511a428e941dea3044ae6928b57b31c
|
[] |
no_license
|
sangha0719/py-practice
|
826f13cb422ef43992a69f822b9f04c2cb6d4815
|
6d71ce64bf91cc3bccee81378577d84ba9d9c121
|
refs/heads/master
| 2023-03-13T04:40:55.883279
| 2021-02-25T12:02:04
| 2021-02-25T12:02:04
| 342,230,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
# 자료구조의 변경
# 커피숍
menu = {"커피", "우유", "주스"}
print(menu, type(menu))
menu = list(menu)
print(menu, type(menu))
menu = tuple(menu)
print(men)
|
[
"sangha0719@gmail.com"
] |
sangha0719@gmail.com
|
3686c5bf4ae77bfa15f56b8cddff65549105c820
|
ded0c895f6e1f8853f2222ae498bdc7ae52ef0e4
|
/week-09/colori_quad.py
|
727580ef3a1faf286bfeb82c09060522fe57c161
|
[] |
no_license
|
LorenzoLMP/TD2015
|
6a8846b4592b32db81338b8522a10a2dc52531c1
|
e39b51d48149d07c3cea682a02eeec4e69ffbabd
|
refs/heads/master
| 2021-01-17T14:47:20.378339
| 2016-05-31T11:33:50
| 2016-05-31T11:33:50
| 43,302,288
| 0
| 0
| null | 2015-09-28T13:58:46
| 2015-09-28T13:23:45
| null |
UTF-8
|
Python
| false
| false
| 4,251
|
py
|
from pylab import *
from scipy import *
from scipy import optimize
from scipy import misc
import math
data = genfromtxt('col_mon_precisi_3.txt')
coeff = genfromtxt('coeffic_matrix_315')
xdata = data[:,0]
ydata = data[:,1]
cr = coeff[:,0]
cg = coeff[:,1]
cb = coeff[:,2]
#zdata = sfasa
#########################################
#r = (ydata[1348] + ydata[1347])/2
r = 0.0075
#g = (ydata[922] + ydata[923] + ydata[924] + ydata[925])/4
g = 0.0087
#b = (ydata[1131] + ydata[1132] + ydata[1133] + ydata[1134])/4
b = 0.00573
print('signal red = ', r)
print('signal green = ', g)
print('signal blue = ', b)
################################################
l = 718
ydata_norm = []
ydata_norm2 = []
for i in range(630):
ydata_norm.append( r*(cr[math.modf(i/2)[1]])**2 + g*(cg[math.modf(i/2)[1]])**2 + b*(cb[math.modf(i/2)[1]])**2 )
ydata_norm2.append( (r*(cr[math.modf(i/2)[1]])**2 + g*(cg[math.modf(i/2)[1]])**2 + b*(cb[math.modf(i/2)[1]])**2)/( (cr[math.modf(i/2)[1]])**2 + (cg[math.modf(i/2)[1]])**2 + (cb[math.modf(i/2)[1]])**2) )
ydata_norm = array(ydata_norm)
ydata_norm2 = array(ydata_norm2)
xdata_norm = []
for i in range(630):
xdata_norm.append(l+i)
xdata_norm = array(xdata_norm)
xdata1 = []
for i in range(630):
xdata1.append(xdata[i+718])
#xdata1.append(xdata[i+718]*(1 - 30*i/(600*1348)) )
xdata1 = array(xdata1)
ydata1 = []
for i in range(630):
ydata1.append(ydata[i+718])
ydata1 = array(ydata1)
sig_norm = []
for i in range(630):
sig_norm.append(ydata1[i]/( (cr[math.modf(i/2)[1]])**2 + (cg[math.modf(i/2)[1]])**2 + (cb[math.modf(i/2)[1]])**2 ) )
sig_norm = array(sig_norm)
##############################################
def fitfunc(x, *par):
return par[0]*10**(-27) + par[1]*10**(-25)*x
p0 = [1 , 6] #metti il valore inizialeee
##def fitfunc(x, *par):
## return par[0]*10**(-25)*x
##
##p0 = [5] #metti il valore inizialeee
rc('font', size=15)
#xlabel(r'$frequenza [Hz]$')
#ylabel(r'$Gain $')
#minorticks_on()
#Attivare per scala bilog
#xscale('log')
#yscale('log')
#xlim(80,30000)
#ylim(35,103)
############################################################
#Parte per plot dati
#grid('on', which = "both")
#title("Bode Diagram Gain-Phase", size = 15)
plot(xdata, ydata, linestyle="None",marker=".", color="black", markersize= 9, label='dati sperimentali')
#plot(xdata1, sig_norm, linestyle="None",marker=".", color="red", markersize= 9, label='dati sp. norm.')
#plot(xdata_norm, ydata_norm, linestyle="None",marker=".", color="green", markersize= 9, label='modello quadratico')
plot(xdata_norm, ydata_norm2, linestyle="None",marker=".", color="blue", markersize= 9, label='modello normalizzato')
#plot(xdata1, ydata1, linestyle="None",marker=".", color="brown", markersize= 10)
title("Photovoltaic cell - response from monitor (quad)", size = 15)
annotate("red",
xy=(718, 0.0075), xycoords='data',
xytext=(750, 0.005), textcoords='data',
size=20, va="center", ha="center",
arrowprops=dict(arrowstyle="simple",
connectionstyle="arc3,rad=-0.4", color="r"),
)
annotate("green",
xy=(926, 0.0086), xycoords='data',
xytext=(900, 0.0055), textcoords='data',
size=20, va="center", ha="center",
arrowprops=dict(arrowstyle="simple",
connectionstyle="arc3,rad=-0.4", color="g"),
)
annotate("blue",
xy=(1130, 0.0055), xycoords='data',
xytext=(1100, 0.0040), textcoords='data',
size=20, va="center", ha="center",
arrowprops=dict(arrowstyle="simple",
connectionstyle="arc3,rad=-0.4", color="b"),
)
legend()
#errorbar(xdata, ydata, sigmay, sigmax, linestyle="None", color="black")
#xscale('log')
xlim(690,1370)
ylim(0.001,0.022)
xlabel(r'time $ [ms]$')
ylabel(r'Tensione $ [V]$')
grid('on', which = "both")
out_file = open('intens_screen_week9.txt', 'w')
out_file.write("#t(s)\tintens\n")
i= 0
while i< len(xdata_norm):
out_file.write("%s\t%s\n"%((xdata_norm[i]-718)/2, ydata_norm2[i]))
i = i+2
out_file.close()
savefig('dati_normal.png', dpi=400)
show()
|
[
"lorenzo.perrone.lmp@gmail.com"
] |
lorenzo.perrone.lmp@gmail.com
|
6bcffb1fb992aa4432f982b6faf84cacf761eb87
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2824/60891/284880.py
|
8c2661ffda0fed50b0e0e907e437fd1d941f63fa
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
n_t_c = [int(i) for i in input().split()]
n = n_t_c[0]
t = n_t_c[1]
c = n_t_c[2]
p = [int(i) for i in input().split()]
list_index = []
for i in range(n):
if p[i] > t:
list_index.append(i)
list_num = []
if len(list_index) == 0:
ans = n - (c - 1)
else:
list_num = [list_index[0] - 0]
for i in range(len(list_index) - 1):
list_num.append(list_index[i + 1] - list_index[i] - 1)
ans = 0
for i in list_num:
if i>=c:
ans += i - (c - 1)
print(ans)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
882da9b06c3c8215bad47901f52fb0b1a2f538be
|
9e201dfe87446274995add9a1436d392ced616c9
|
/draco2/draco/__init__.py
|
0d19522fdb4d56bebbbeb78c05eaa465d8784dc1
|
[
"MIT"
] |
permissive
|
geertj/draco2
|
9da00f68016a16a82be9c7556e08ca06611bba9b
|
3a533d3158860102866eaf603840691618f39f81
|
refs/heads/master
| 2021-01-01T06:45:37.111786
| 2007-04-30T13:37:00
| 2007-04-30T13:37:00
| 2,787,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 415
|
py
|
# vi: ts=8 sts=4 sw=4 et
#
# __init__.py: draco2.draco package definition
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
|
[
"geertj@gmail.com"
] |
geertj@gmail.com
|
0a724a2e65e58755be2cbd93ecc23710dc2da8e5
|
0d4139330dda389664df2e79b397f8153e6c1189
|
/backend/site_management_25562/urls.py
|
726931ba898e10c60a17c2b354ef452b4cc09a43
|
[] |
no_license
|
crowdbotics-apps/site-management-25562
|
d7a4d08ed286838814d892096510ca642e59a574
|
704d21f8eeec8f4bc20d4478e34e5196e76c03d0
|
refs/heads/master
| 2023-04-08T11:40:14.414118
| 2021-04-08T18:13:21
| 2021-04-08T18:13:21
| 356,007,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,253
|
py
|
"""site_management_25562 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Site_Management"
admin.site.site_title = "Site_Management Admin Portal"
admin.site.index_title = "Site_Management Admin"
# swagger
api_info = openapi.Info(
title="Site_Management API",
default_version="v1",
description="API documentation for Site_Management App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
edcb661a53d5e24b760420759edcc68d6943de55
|
0523136530c8caf2a7aacbc52aae43dc8998ca18
|
/ship.py
|
5b3fb775270d8b76d0692d8664f7cd786c3d7505
|
[] |
no_license
|
irfan87/alien_invasion_pygame
|
1d90eb1914e16a84b8318af92dd102f3a4201313
|
71587cbb13d5ea157e325e19c439ceb94c029d5d
|
refs/heads/master
| 2020-07-06T17:13:44.520471
| 2019-08-20T03:12:58
| 2019-08-20T03:12:58
| 203,088,226
| 0
| 0
| null | 2019-08-20T03:12:59
| 2019-08-19T03:00:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,519
|
py
|
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
# a class to manage the ship
def __init__(self, ai_game):
# initialize the ship and set its statring position
super().__init__()
self.screen = ai_game.screen
self.screen_rect = ai_game.screen.get_rect()
self.settings = ai_game.settings
# load the space ship image
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
# start each new ship at the bottom center of the screen
self.rect.midbottom = self.screen_rect.midbottom
# store a decimal value for the ship's horizontal position
self.x = float(self.rect.x)
# movement flag
self.moving_right = False
self.moving_left = False
def update(self):
# update the ship's position based on the movement flag
# update the ship's value, not the
if self.moving_right and self.rect.right < self.screen_rect.right:
self.x += self.settings.ship_speed
if self.moving_left and self.rect.left > 0:
self.x -= self.settings.ship_speed
# update rect object form self.x
self.rect.x = self.x
def center_ship(self):
# center the ship on the screen
self.rect.midbottom = self.screen_rect.midbottom
self.x = float(self.rect.x)
def blitme(self):
# draw the ship at its current location
self.screen.blit(self.image, self.rect)
|
[
"nerve2009@yahoo.com"
] |
nerve2009@yahoo.com
|
bb73179a28bb142c274961bea46d714b9cd87d26
|
923a3f7be34e10931936823df0740d5d845d26e5
|
/Courses/MCCT2009/Intro/execute_request.py
|
ff0b0046dc6b4bed302c6269c14964844dc8cb8d
|
[] |
no_license
|
o-smirnov/public-documents
|
0572ccef548a321e70b8cad2e2f2c249926f017d
|
9e758ddf375c0f748376d2e37d0fea9661ed7c37
|
refs/heads/master
| 2021-01-15T23:40:03.183127
| 2015-06-04T17:18:22
| 2015-06-04T17:18:22
| 31,723,928
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,620
|
py
|
# file: ../beginners_guide/execute_request.py
from Timba.TDL import *
from Timba.Meq import meq
request_counter = 0
#------------------------------------------------------------------------
TDLRuntimeMenu("Parameters of the Request domain:",
TDLOption('ropt_num_freq', 'nr of freq channels',
[10,11,1,20,50,100], more=int,
doc='nr of domain cells in the freq direction'),
TDLOption('ropt_num_time', 'nr of time channels',
[11,10,1,20,50,100], more=int,
doc='nr of domain cells in the time direction'),
TDLMenu("time size:",
TDLOption('ropt_t1', 'start time (s)',
[1.0,0.0,-1.0], more=float,
doc='min time (s) of the domain (edge)'),
TDLOption('ropt_t2', 'stop time (s)',
[10.0,1.0], more=float,
doc='max time (s) of the domain (edge)'),
),
TDLMenu("freq size:",
TDLOption('ropt_f1', 'start freq (Hz)',
[1.0], more=float,
doc='min freq (Hz) of the domain (edge)'),
TDLOption('ropt_f2', 'stop freq (Hz)',
[11.0], more=float,
doc='max freq (Hz) of the domain (edge)'),
)
)
#------------------------------------------------------------------------
def execute_request (mqs, node,
# f1=None, f2=None, t1=None, t2=None,
# num_freq=None, num_time=None,
freq_offset=0.0, time_offset=0.0,
parent=None, trace=False):
"""
Execute the given node with the specified time-freq domain (size and cells)
The (optional) freq and time offsets are fractions of the domain size.
"""
foffset = (ropt_f1-ropt_f2)*freq_offset
toffset = (ropt_t1-ropt_t2)*time_offset
domain = meq.domain(ropt_f1+foffset, ropt_f2+foffset,
ropt_t1+toffset, ropt_t2+toffset)
cells = meq.cells(domain, num_freq=ropt_num_freq,
num_time=ropt_num_time)
global request_counter
request_counter += 1
rqid = meq.requestid(request_counter)
request = meq.request(cells, rqid=rqid)
result = mqs.execute(node, request)
return None
#------------------------------------------------------------------------
|
[
"osmirnov@gmail.com"
] |
osmirnov@gmail.com
|
5d6c44da5a001474f63de5209baa264dfce32af8
|
353626e216085601f8be641be4c775a563fdc95e
|
/word_discovery.py
|
c4428bb88d5996cf78f774e79fa5218f517f8dff
|
[] |
no_license
|
houking-can/english-word-discovery
|
148729a4cda980b1b6a67ef49e6a934ffc53ac90
|
1c6276642ac360f411c9841daa2edd4bc4b959d2
|
refs/heads/master
| 2020-09-24T18:59:44.044460
| 2019-12-04T08:56:40
| 2019-12-04T08:56:40
| 225,821,558
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,307
|
py
|
# ! -*- coding: utf-8 -*-
import struct
import os
import math
import logging
from collections import Counter
logging.basicConfig(level=logging.INFO, format=u'%(asctime)s - %(levelname)s - %(message)s')
from tqdm import tqdm
class KenlmNgrams:
"""加载Kenlm的ngram统计结果
vocab_file: Kenlm统计出来的词(字)表;
ngram_file: Kenlm统计出来的ngram表;
order: 统计ngram时设置的n,必须跟ngram_file对应;
min_count: 自行设置的截断频数。
"""
def __init__(self, vocab_file, ngram_file, order, min_count):
self.vocab_file = vocab_file
self.ngram_file = ngram_file
self.order = order
self.min_count = min_count
self.read_chars()
self.read_ngrams()
def read_chars(self):
f = open(self.vocab_file, encoding='utf-8')
chars = f.read()
f.close()
chars = chars.split('\x00')
self.chars = chars
def read_ngrams(self):
"""读取思路参考https://github.com/kpu/kenlm/issues/201
"""
self.ngrams = [{} for _ in range(self.order)]
self.total = 0
size_per_item = self.order * 4 + 8
f = open(self.ngram_file, 'rb')
filedata = f.read()
filesize = f.tell()
f.close()
for i in tqdm(range(0, filesize, size_per_item)):
s = filedata[i: i + size_per_item]
n = self.unpack('l', s[-8:])
if n >= self.min_count:
self.total += n
c = [self.unpack('i', s[j * 4: (j + 1) * 4]) for j in range(self.order)]
c = [self.chars[j] for j in c if j > 2]
for j in range(len(c)):
self.ngrams[j][' '.join(c[:j + 1])] = self.ngrams[j].get(' '.join(c[:j + 1]), 0) + n
def unpack(self, t, s):
return struct.unpack(t, s)[0]
def write_corpus(texts, corpus):
"""将语料写到文件中,词与词(字与字)之间用空格隔开
"""
print('exporting corpus...')
with open(corpus, 'w', encoding='utf-8') as f:
for s in texts:
f.write(s)
def write_vocab(corpus, vocab):
print('writing vocab...')
tmp = open(corpus, encoding='utf-8').read().split()
words = []
for w in tqdm(tmp):
w = w.strip('.')
w = w.strip(',')
w = w.strip(')')
w = w.strip('(')
w = w.strip('\'')
w = w.strip('?')
w = w.strip('!')
w = w.strip('.')
w = w.strip(',')
w = w.strip(')')
w = w.strip('(')
w = w.strip('\'')
w = w.strip('?')
w = w.strip('!')
words.append(w)
words = list(Counter(words).items())
words.sort(key=lambda k: k[1], reverse=True)
with open(vocab, 'w', encoding='utf-8') as f:
for w in words:
# f.write(w[0] + ' ' + str(w[1])+'\n')
f.write(w[0] + '\n')
def count_ngrams(corpus_file, order, vocab_file, ngram_file):
"""通过os.system调用Kenlm的count_ngrams来统计频数
"""
return os.system(
'/home/yhj/paper/ijcai-2020/kenlm/build/bin/count_ngrams -o %s --write_vocab_list %s <%s >%s'
% (order, vocab_file, corpus_file, ngram_file)
)
def filter_ngrams(ngrams, total, min_pmi=1):
"""通过互信息过滤ngrams,只保留“结实”的ngram。
"""
order = len(ngrams)
if hasattr(min_pmi, '__iter__'):
min_pmi = list(min_pmi)
else:
min_pmi = [min_pmi] * order
output_ngrams = set()
total = float(total)
for i in range(order - 1, 0, -1):
print('order: ', i)
for w, v in tqdm(ngrams[i].items()):
w = w.split(' ')
pmi = min([
total * v / (ngrams[j].get(' '.join(w[:j + 1]), total) * ngrams[i - j - 1].get(' '.join(w[j + 1:]),
total))
for j in range(i)
])
if math.log(pmi) >= min_pmi[i]:
output_ngrams.add(' '.join(w))
return output_ngrams
class SimpleTrie:
"""通过Trie树结构,来搜索ngrams组成的连续片段
"""
def __init__(self):
self.dic = {}
self.end = True
def add_word(self, word):
_ = self.dic
for c in word:
if c not in _:
_[c] = {}
_ = _[c]
_[self.end] = word
def tokenize(self, sent): # 通过最长联接的方式来对句子进行分词
result = []
start, end = 0, 1
for i, c1 in tqdm(enumerate(sent), total=len(sent)):
_ = self.dic
if i == end:
result.append(sent[start: end])
start, end = i, i + 1
for j, c2 in enumerate(sent[i:]):
if c2 in _:
_ = _[c2]
if self.end in _:
if i + j + 1 > end:
end = i + j + 1
else:
break
result.append(sent[start: end])
return result
def filter_vocab(candidates, ngrams, order):
"""通过与ngrams对比,排除可能出来的不牢固的词汇(回溯)
"""
result = {}
for i, j in candidates.items():
if len(i) < 3:
result[i] = j
elif len(i) <= order and i in ngrams:
result[i] = j
elif len(i) > order:
flag = True
for k in range(len(i) + 1 - order):
if i[k: k + order] not in ngrams:
flag = False
if flag:
result[i] = j
return result
# ======= 算法构建完毕,下面开始执行完整的构建词库流程 =======
import re
import glob
import json
# 语料生成器,并且初步预处理语料
# 这个生成器例子的具体含义不重要,只需要知道它就是逐句地把文本yield出来就行了
def text_generator():
txts = [os.path.join('./data', each) for each in os.listdir('./data')]
for txt in txts:
d = open(txt, encoding='utf-8').read()
d = d.split('\n')
res = ''
for line in d:
if '\t' in line:
line = line.split('\t')[1]
line.rstrip('.')
line.strip()
res += line + ' '
yield res
min_count = 4
order = 6
corpus_file = 'scierc.corpus' # 语料保存的文件名
vocab_file = 'scierc.vocab' # 字符集保存的文件名
ngram_file = 'scierc.ngrams' # ngram集保存的文件名
output_file = 'scierc.phrase' # 最后导出的词表文件名
chars_file = 'scierc.chars'
ngrams_json = 'scierc.ngrams.json'
# write_corpus(text_generator(), corpus_file) # 将语料转存为文本
# write_vocab(corpus_file, vocab_file)
# count_ngrams(corpus_file, order, chars_file, ngram_file) # 用Kenlm统计ngram
# ngrams = KenlmNgrams(chars_file, ngram_file, order, min_count)
# json.dump({'ngrams': ngrams.ngrams, 'total': ngrams.total}, open(ngrams_json, 'w'), indent=4)
ngrams = json.load(open(ngrams_json))
ngrams = filter_ngrams(ngrams['ngrams'], ngrams['total'], [0, 2, 4, 6, 8, 10]) # 过滤ngram
ngrams = list(ngrams)
ngrams.sort(key=lambda k: (k, len(k)))
with open(output_file, 'w') as f:
f.write('\n'.join(ngrams))
# ngtrie = SimpleTrie() # 构建ngram的Trie树
# print('build ngram trie...')
# for w in tqdm(ngrams):
# _ = ngtrie.add_word(w)
#
# candidates = {} # 得到候选词
# print('discovering words...')
# txts = [os.path.join('./data', each) for each in os.listdir('./data')]
# for txt in txts:
# d = open(txt, encoding='utf-8').read()
# d = d.replace(u'\u3000', ' ').strip()
# d = re.sub(u'[^\u4e00-\u9fa50-9a-zA-Z ]+', '\n', d)
# print(txt, 'tokenizing...')
# tokens = ngtrie.tokenize(d)
# print(txt, 'gen candidates...')
# for w in tqdm(tokens): # 预分词
# candidates[w] = candidates.get(w, 0) + 1
#
# # 频数过滤
# candidates = {i: j for i, j in candidates.items() if j >= min_count}
# # 互信息过滤(回溯)
# candidates = filter_vocab(candidates, ngrams, order)
#
# # 输出结果文件
# with open(output_file, 'w', encoding='utf-8') as f:
# for i, j in sorted(candidates.items(), key=lambda s: -s[1]):
# s = '%s %s\n' % (i, j)
# f.write(s)
|
[
"1240723224@qq.com"
] |
1240723224@qq.com
|
4890cec1b8a3eaa8cba2cc2ab2ac83c56273c669
|
9d4a03990d94e9fb0248ec94875376c82139891a
|
/Python实战:四周实现爬虫系统/week_2/2_1/coed_of_video_test.py
|
3c7ec1007817205d0834c78e477ba2709266b5bf
|
[] |
no_license
|
wenhaoliang/learn-python
|
114744f3c94859f665a998b03c6d5f5d908fb94d
|
bd31daa34cc79f3171a2e195c149af60a7e0ebed
|
refs/heads/master
| 2020-04-16T02:14:36.345629
| 2019-07-06T12:26:23
| 2019-07-06T12:26:23
| 58,053,238
| 7
| 7
| null | 2017-07-21T09:35:55
| 2016-05-04T13:14:26
|
Python
|
UTF-8
|
Python
| false
| false
| 574
|
py
|
import pymongo
client = pymongo.MongoClient('localhost',27017)
walden = client['walden']
sheet_ta = walden['sheet_ta']
path = 'walden.txt'
# with open(path,'r') as f:
# lines = f.readlines()
# for index,line in enumerate(lines):
# data = {
# '序列':index,
# '句子' :line,
# '单词数量':len(line.split())
# }
# sheet_ta.insert_one(data)
# $lt/$lte/$gt/$gte/$ne,依次等价于</<=/>/>=/!=。(l表示less g表示greater e表示equal n表示not )
for item in sheet_ta.find():
print(item)
|
[
"641541452@qq.com"
] |
641541452@qq.com
|
9aab9e36ef99567c70e4715d279ae9ef813a672e
|
04d8f0b5a291ec6c3470f4498dd64ab9c1845f96
|
/library/built-in/database/dbm/ex.py
|
e2347d6fe6f3c862de6cdd12869a7f251d5b925c
|
[] |
no_license
|
volitilov/Python_learn
|
8c0f54d89e0ead964320d17eeddeacd5b704b717
|
f89e52655f83a9f1105689f0302ef5b0ee30a25c
|
refs/heads/master
| 2022-01-10T13:39:59.237716
| 2019-07-17T11:39:10
| 2019-07-17T11:39:10
| 70,601,503
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
import dbm
# :::::::::::::::::::::::::::::::::::::::::::::::::::
db = dbm.open('test_db', 'c')
db['color'] = 'yellow'
db['base'] = 'postgresql'
# print(len(db)) # 2
# print(db['base']) # b'postgresql'
db.close()
|
[
"volitilov@gmail.com"
] |
volitilov@gmail.com
|
b3a5bd338a2ef2f1cffb1d0d5665b8859fd3b5f5
|
d785e993ed65049c82607a1482b45bddb2a03dda
|
/nano2017/cfg2018/WZTo3LNu_0Jets_MLL-4to50_cfg.py
|
78a3135cc48d53686e3d778eb84aa962dc40c43e
|
[] |
no_license
|
PKUHEPEWK/ssww
|
eec02ad7650014646e1bcb0e8787cf1514aaceca
|
a507a289935b51b8abf819b1b4b05476a05720dc
|
refs/heads/master
| 2020-05-14T04:15:35.474981
| 2019-06-28T23:48:15
| 2019-06-28T23:48:15
| 181,696,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = Configuration()
config.section_("General")
config.General.requestName = 'WZTo3LNu_0Jets_MLL-4to50_2018'
config.General.transferLogs= False
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.scriptExe = 'crab_script_2018.sh'
config.JobType.inputFiles = ['crab_script_2018.py','ssww_keep_and_drop_2018.txt','ssww_output_branch_selection_2018.txt','haddnano.py'] #hadd nano will not be needed once nano tools are in cmssw
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/WZTo3LNu_0Jets_MLL-4to50_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIAutumn18NanoAODv4-Nano14Dec2018_102X_upgrade2018_realistic_v16-v1/NANOAODSIM'
#config.Data.inputDBS = 'phys03'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.unitsPerJob = 20
config.Data.totalUnits = -1
config.Data.outLFNDirBase ='/store/user/%s/nano2018_v0' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'WZTo3LNu_0Jets_MLL-4to50_2018'
config.section_("Site")
config.Site.storageSite = "T2_CN_Beijing"
#config.Site.storageSite = "T2_CH_CERN"
#config.section_("User")
#config.User.voGroup = 'dcms'
|
[
"jiexiao@pku.edu.cn"
] |
jiexiao@pku.edu.cn
|
b7a171a8ca7594633d34875c4d50705544339839
|
3b13020b492003912b2da62ff29a00e584a63766
|
/examples/porta.py
|
0dd8608dd36b1443ee39805620dc2511b8d38284
|
[
"MIT"
] |
permissive
|
tigertv/secretpy
|
c0d62a2934fa5ac1e07f1c848429fc062e2f2976
|
e464f998e5540f52e269fe360ec9d3a08e976b2e
|
refs/heads/master
| 2021-08-16T00:05:53.089587
| 2021-08-09T23:55:42
| 2021-08-09T23:58:09
| 147,110,283
| 65
| 15
|
MIT
| 2021-05-28T16:49:09
| 2018-09-02T18:15:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,755
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
from secretpy import Porta, CryptMachine, alphabets as al
from secretpy.cmdecorators import UpperCase, Block, SaveAll
alphabet = al.GERMAN
plaintext = u"schweißgequältvomödentextzürnttypografjakob"
key = u"schlüssel"
cipher = Porta()
print(plaintext)
enc = cipher.encrypt(plaintext, key, alphabet)
print(enc)
dec = cipher.decrypt(enc, key, alphabet)
print(dec)
#######################################################
def encdec(machine, plaintext):
print("--------------------------------------------------------------------")
print(plaintext)
enc = machine.encrypt(plaintext)
print(enc)
print(machine.decrypt(enc))
cm0 = CryptMachine(cipher, key)
cm = cm0
cm.set_alphabet(al.ENGLISH)
cm.set_key("keys")
plaintext = "I don't love non-alphabet characters. I will remove all of them: ^,&@$~(*;?&#. Great!"
encdec(cm, plaintext)
cm = Block(cm, length=4, sep="::")
plaintext = "This text is divided by blocks of length 5!"
encdec(cm, plaintext)
cm = SaveAll(cm0)
plaintext = "I love non-alphabet characters. These are : ^,&@$~(*;?&#. That's it!"
encdec(cm, plaintext)
cm.set_alphabet(al.ENGLISH_SQUARE_IJ)
plaintext = "Jj becomes Ii because we use ENGLISH_SQUARE_IJ!"
encdec(cm, plaintext)
cm.set_alphabet(al.JAPANESE_HIRAGANA)
cm.set_key(u"かぎ")
plaintext = u"text いろはにほへと ちりぬるを わかよたれそ つねならむ うゐのおくやま けふこえて あさきゆめみし ゑひもせす !"
encdec(cm, plaintext)
cm = UpperCase(cm)
alphabet = al.GREEK
cm.set_alphabet(alphabet)
cm.set_key(u"κλειδί")
plaintext = u"Θέλει αρετή και τόλμη η ελευθερία. (Ανδρέας Κάλβος)"
encdec(cm, plaintext)
'''
'''
|
[
"maxvetrov555@yandex.ru"
] |
maxvetrov555@yandex.ru
|
3b22ec060a5ee9a4d7e472fcd58efc85a0fa7166
|
05caf48bd067c050666026b75686f23d02327378
|
/190.reverse-bits.py
|
0354dd2a79eead8c8799d5b9bbb18ef9c64541ca
|
[
"MIT"
] |
permissive
|
elfgzp/Leetcode
|
3b6fa307c699fd5a1ba5ea88988c324c33a83eb7
|
964c6574d310a9a6c486bf638487fd2f72b83b3f
|
refs/heads/master
| 2023-08-21T23:11:38.265884
| 2020-10-17T11:55:45
| 2020-10-17T11:55:45
| 168,635,331
| 3
| 0
|
MIT
| 2023-07-21T03:50:43
| 2019-02-01T03:14:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,718
|
py
|
#
# @lc app=leetcode.cn id=190 lang=python
#
# [190] 颠倒二进制位
#
# https://leetcode-cn.com/problems/reverse-bits/description/
#
# algorithms
# Easy (34.07%)
# Total Accepted: 10.9K
# Total Submissions: 28.7K
# Testcase Example: '00000010100101000001111010011100'
#
# 颠倒给定的 32 位无符号整数的二进制位。
#
#
#
# 示例 1:
#
# 输入: 00000010100101000001111010011100
# 输出: 00111001011110000010100101000000
# 解释: 输入的二进制串 00000010100101000001111010011100 表示无符号整数 43261596,
# 因此返回 964176192,其二进制表示形式为 00111001011110000010100101000000。
#
# 示例 2:
#
# 输入:11111111111111111111111111111101
# 输出:10111111111111111111111111111111
# 解释:输入的二进制串 11111111111111111111111111111101 表示无符号整数 4294967293,
# 因此返回 3221225471 其二进制表示形式为 10101111110010110010011101101001。
#
#
#
# 提示:
#
#
# 请注意,在某些语言(如
# Java)中,没有无符号整数类型。在这种情况下,输入和输出都将被指定为有符号整数类型,并且不应影响您的实现,因为无论整数是有符号的还是无符号的,其内部的二进制表示形式都是相同的。
# 在 Java 中,编译器使用二进制补码记法来表示有符号整数。因此,在上面的 示例 2 中,输入表示有符号整数 -3,输出表示有符号整数
# -1073741825。
#
#
#
#
# 进阶:
# 如果多次调用这个函数,你将如何优化你的算法?
#
#
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
bits = "{:0>32b}".format(n)
return int(bits[::-1], 2)
|
[
"741424975@qq.com"
] |
741424975@qq.com
|
28a5e30cebf3bae89a61faf4acd46d0b226d4ceb
|
8567438779e6af0754620a25d379c348e4cd5a5d
|
/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
|
9f7c39861ca14381afdc1e8db9e3c93347493dcf
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
thngkaiyuan/chromium
|
c389ac4b50ccba28ee077cbf6115c41b547955ae
|
dab56a4a71f87f64ecc0044e97b4a8f247787a68
|
refs/heads/master
| 2022-11-10T02:50:29.326119
| 2017-04-08T12:28:57
| 2017-04-08T12:28:57
| 84,073,924
| 0
| 1
|
BSD-3-Clause
| 2022-10-25T19:47:15
| 2017-03-06T13:04:15
| null |
UTF-8
|
Python
| false
| false
| 8,191
|
py
|
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationLine
from webkitpy.common.net.layout_test_results import LayoutTestResults
class BuildBotPrinter(object):
# This output is parsed by buildbots and must only be changed in coordination with buildbot scripts (see webkit.org's
# Tools/BuildSlaveSupport/build.webkit.org-config/master.cfg: RunWebKitTests._parseNewRunWebKitTestsOutput
# and chromium.org's buildbot/master.chromium/scripts/master/log_parser/webkit_test_command.py).
def __init__(self, stream, debug_logging):
self.stream = stream
self.debug_logging = debug_logging
def print_results(self, run_details):
if self.debug_logging:
self.print_run_results(run_details.initial_results)
self.print_unexpected_results(run_details.summarized_full_results, run_details.enabled_pixel_tests_in_retry)
def _print(self, msg):
self.stream.write(msg + '\n')
def print_run_results(self, run_results):
failed = run_results.total_failures
total = run_results.total
passed = total - failed - run_results.remaining
percent_passed = 0.0
if total > 0:
percent_passed = float(passed) * 100 / total
self._print("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, percent_passed))
self._print("")
self._print_run_results_entry(run_results, test_expectations.NOW, "Tests to be fixed")
self._print("")
# FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
self._print_run_results_entry(run_results, test_expectations.WONTFIX,
"Tests that will only be fixed if they crash (WONTFIX)")
self._print("")
def _print_run_results_entry(self, run_results, timeline, heading):
total = len(run_results.tests_by_timeline[timeline])
not_passing = (total -
len(run_results.tests_by_expectation[test_expectations.PASS] &
run_results.tests_by_timeline[timeline]))
self._print("=> %s (%d):" % (heading, not_passing))
for result in TestExpectations.EXPECTATION_DESCRIPTIONS.keys():
if result in (test_expectations.PASS, test_expectations.SKIP):
continue
results = (run_results.tests_by_expectation[result] & run_results.tests_by_timeline[timeline])
desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
if not_passing and len(results):
pct = len(results) * 100.0 / not_passing
self._print(" %5d %-24s (%4.1f%%)" % (len(results), desc, pct))
def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False):
passes = {}
flaky = {}
regressions = {}
def add_to_dict_of_lists(dict, key, value):
dict.setdefault(key, []).append(value)
def add_result(result):
test = result.test_name()
actual = result.actual_results().split(" ")
expected = result.expected_results().split(" ")
if result.did_run_as_expected():
# Don't print anything for tests that ran as expected.
return
if actual == ['PASS']:
if 'CRASH' in expected:
add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
elif 'TIMEOUT' in expected:
add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
else:
add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT']:
add_to_dict_of_lists(regressions, actual[0], test)
elif len(actual) > 1 and bool(set(actual[1:]) & set(expected)):
# We group flaky tests by the first actual result we got.
add_to_dict_of_lists(flaky, actual[0], test)
else:
add_to_dict_of_lists(regressions, actual[0], test)
test_results = LayoutTestResults(summarized_results)
test_results.for_each_test(add_result)
if len(passes) or len(flaky) or len(regressions):
self._print("")
if len(passes):
for key, tests in passes.iteritems():
self._print("%s: (%d)" % (key, len(tests)))
tests.sort()
for test in tests:
self._print(" %s" % test)
self._print("")
self._print("")
if len(flaky):
descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
for key, tests in flaky.iteritems():
result_type = TestExpectations.EXPECTATIONS[key.lower()]
self._print("Unexpected flakiness: %s (%d)" % (descriptions[result_type], len(tests)))
tests.sort()
for test in tests:
result = test_results.result_for_test(test)
actual = result.actual_results().split(" ")
expected = result.expected_results().split(" ")
# FIXME: clean this up once the old syntax is gone
new_expectations_list = [TestExpectationLine.inverted_expectation_tokens[exp]
for exp in list(set(actual) | set(expected))]
self._print(" %s [ %s ]" % (test, " ".join(new_expectations_list)))
self._print("")
self._print("")
if len(regressions):
descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
for key, tests in regressions.iteritems():
result_type = TestExpectations.EXPECTATIONS[key.lower()]
self._print("Regressions: Unexpected %s (%d)" % (descriptions[result_type], len(tests)))
tests.sort()
for test in tests:
result = test_results.result_for_test(test)
actual = result.actual_results().split(" ")
expected = result.expected_results().split(" ")
new_expectations_list = [TestExpectationLine.inverted_expectation_tokens[exp] for exp in actual]
self._print(" %s [ %s ]" % (test, " ".join(new_expectations_list)))
self._print("")
if len(summarized_results['tests']) and self.debug_logging:
self._print("%s" % ("-" * 78))
|
[
"hedonist.ky@gmail.com"
] |
hedonist.ky@gmail.com
|
ebc6bcb0e32507f21be5c31bd75c0749d8cfa2a2
|
d7532e2ac4983c042f50525aab564597db154719
|
/day2/strings_2/5.py
|
28eea9134e6d119f85cd7e9ebcb7e881cc234697
|
[] |
no_license
|
shobhit-nigam/qti_panda
|
d53195def05605ede24a5108de1dbfbe56cbffe7
|
35d52def5d8ef1874e795a407768fd4a02834418
|
refs/heads/main
| 2023-08-24T14:56:34.934694
| 2021-10-22T09:59:05
| 2021-10-22T09:59:05
| 418,381,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
spain = [34, 67, 12, 55, 89]
sep = '__'
strx = sep.join(str(x) for x in spain)
print(strx)
# another way
# list comprehensions
|
[
"noreply@github.com"
] |
shobhit-nigam.noreply@github.com
|
67b64deafbb427a7ebd867ae2548343252614cb8
|
13ea6fa027c8ae33852bde3335846cdaab78ee71
|
/DataScienceWithPython/sample_python_code/ml/supervised/MNIST-knn.py
|
2b9c0d32ff6783e60788238adaea7dd368a1f00c
|
[] |
no_license
|
dmonisankar/pythonworks
|
c98de04b191135451556ca9d1ee513a0a69f2edb
|
4f3a14460272ec959c2f2e6975814d9ac43cb90a
|
refs/heads/master
| 2023-03-31T00:36:46.016403
| 2020-06-11T05:39:36
| 2020-06-11T05:39:36
| 271,455,493
| 0
| 0
| null | 2021-03-20T04:19:45
| 2020-06-11T05:00:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,736
|
py
|
# Train/Test Split + Fit/Predict/Accuracy
# Now that you have learned about the importance of splitting your data into training and test sets, it's time to practice doing this on the digits dataset! After creating arrays for the features and target variable, you will split them into training and test sets, fit a k-NN classifier to the training data, and then compute its accuracy using the .score() method.
# Instructions
# Import KNeighborsClassifier from sklearn.neighbors and train_test_split from sklearn.model_selection.
# Create an array for the features using digits.data and an array for the target using digits.target.
# Create stratified training and test sets using 0.2 for the size of the test set. Use a random state of 42. Stratify the split according to the labels so that they are distributed in the training and test sets as they are in the original dataset.
# Create a k-NN classifier with 7 neighbors and fit it to the training data.
# Compute and print the accuracy of the classifier's predictions using the .score() method.
# Import necessary modules
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
digits = datasets.load_digits()
# Create feature and target arrays
X = digits.data
y = digits.target
# Split into training and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=42, stratify=y)
# Create a k-NN classifier with 7 neighbors: knn
knn = KNeighborsClassifier(n_neighbors=7)
# Fit the classifier to the training data
knn.fit(X_train,y_train)
# Print the accuracy
print(knn.score(X_test, y_test))
# -------------------------------------------------------------------------------------------------------------------------------
# Remember the model complexity curve that Hugo showed in the video? You will now construct such a curve for the digits dataset! In this exercise, you will compute and plot the training and testing accuracy scores for a variety of different neighbor values. By observing how the accuracy scores differ for the training and testing sets with different values of k, you will develop your intuition for overfitting and underfitting.
# The training and testing sets are available to you in the workspace as X_train, X_test, y_train, y_test. In addition, KNeighborsClassifier has been imported from sklearn.neighbors.
# Instructions
# Inside the for loop:
# Setup a k-NN classifier with the number of neighbors equal to k.
# Fit the classifier with k neighbors to the training data.
# Compute accuracy scores the training set and test set separately using the .score() method and assign the results to the train_accuracy and test_accuracy arrays respectively.
# Setup arrays to store train and test accuracies
neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
# Loop over different values of k
for i, k in enumerate(neighbors):
# Setup a k-NN Classifier with k neighbors: knn
knn = KNeighborsClassifier(n_neighbors=k)
# Fit the classifier to the training data
knn.fit(X_train,y_train)
#Compute accuracy on the training set
train_accuracy[i] = knn.score(X_train, y_train)
#Compute accuracy on the testing set
test_accuracy[i] = knn.score(X_test, y_test)
# Generate plot
plt.title('k-NN: Varying Number of Neighbors')
plt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')
plt.plot(neighbors, train_accuracy, label = 'Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.ylabel('Accuracy')
plt.show()
|
[
"das.monisankar@gmail.com"
] |
das.monisankar@gmail.com
|
78d27f7b1092241934f1510f40ad8bfc3ece1523
|
8be2c3a2ee48b004f5894899f5b06d2c8a91d044
|
/1290. Convert Binary Number in a Linked List to Integer.py
|
ae85f70de8e54ae6a54c9caa78f230b822a70c87
|
[] |
no_license
|
themockingjester/leetcode-python-
|
8ea8caf047b4ad2ebc63d98278d96f0bdd788a34
|
eda7d6d1d1860c4382b20acfb69e03c648845e72
|
refs/heads/main
| 2023-07-07T10:02:45.796512
| 2021-08-11T03:53:42
| 2021-08-11T03:53:42
| 337,762,767
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def check(self,root):
if root:
self.s+=str(root.val)
self.check(root.next)
def getDecimalValue(self, head: ListNode) -> int:
if head:
self.s = ""
self.check(head)
print(self.s)
return int(self.s,2)
|
[
"noreply@github.com"
] |
themockingjester.noreply@github.com
|
288debbd2d6ead371f0da1214457fd35434bc1eb
|
51d0377511a5da902033fb9d80184db0e096fe2c
|
/10-merging-dataframes-with-pandas/3-merging-data/05-merging-dataframes-with-outer-join.py
|
7a31458d2aa2968025b9b9bae577f0e16510903a
|
[] |
no_license
|
sashakrasnov/datacamp
|
c28c6bda178163337baed646220b2f7dcc36047d
|
759f4cec297883907e21118f24a3449d84c80761
|
refs/heads/master
| 2021-12-07T02:54:51.190672
| 2021-09-17T21:05:29
| 2021-09-17T21:05:29
| 157,093,632
| 6
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,734
|
py
|
'''
Merging DataFrames with outer join
This exercise picks up where the previous one left off. The DataFrames revenue, managers, and sales are pre-loaded into your namespace (and, of course, pandas is imported as pd). Moreover, the merged DataFrames revenue_and_sales and sales_and_managers have been pre-computed exactly as you did in the previous exercise.
The merged DataFrames contain enough information to construct a DataFrame with 5 rows with all known information correctly aligned and each branch listed only once. You will try to merge the merged DataFrames on all matching keys (which computes an inner join by default). You can compare the result to an outer join and also to an outer join with restricted subset of columns as keys.
'''
import pandas as pd
revenue = pd.DataFrame({
'branch_id': [10, 20, 30, 47],
'city': ['Austin', 'Denver', 'Springfield', 'Mendocino'],
'revenue': [100, 83, 4, 200],
'state': ['TX', 'CO', 'IL', 'CA']
})
managers = pd.DataFrame({
'branch': ['Austin', 'Denver', 'Mendocino', 'Springfield'],
'branch_id': [10, 20, 47, 31],
'manager': ['Charles', 'Joel', 'Brett', 'Sally'],
'state': ['TX', 'CO', 'CA', 'MO']
})
sales = pd.DataFrame({
'city': [ 'Mendocino', 'Denver', 'Austin', 'Springfield', 'Springfield'],
'state': ['CA', 'CO', 'TX', 'MO', 'IL'],
'units': [1, 4, 2, 5, 1]
})
revenue_and_sales = pd.merge(revenue, sales, how='right', on=['city','state'])
sales_and_managers = pd.merge(sales, managers, how='left', left_on=['city','state'], right_on=['branch','state'])
'''
INSTRUCTIONS
* Merge sales_and_managers with revenue_and_sales. Store the result as merge_default.
* Print merge_default. This has been done for you.
* Merge sales_and_managers with revenue_and_sales using how='outer'. Store the result as merge_outer.
* Print merge_outer. This has been done for you.
* Merge sales_and_managers with revenue_and_sales only on ['city','state'] using an outer join. Store the result as merge_outer_on and hit 'Submit Answer' to see what the merged DataFrames look like!
'''
# Perform the first merge: merge_default
merge_default = pd.merge(sales_and_managers, revenue_and_sales)
# Print merge_default
print(merge_default)
# Perform the second merge: merge_outer
merge_outer = pd.merge(sales_and_managers, revenue_and_sales, how='outer')
# Print merge_outer
print(merge_outer)
# Perform the third merge: merge_outer_on
merge_outer_on = pd.merge(sales_and_managers, revenue_and_sales, on=['city','state'], how='outer')
# Print merge_outer_on
print(merge_outer_on)
'''
> revenue_and_sales
branch_id city revenue state units
0 10.0 Austin 100.0 TX 2
1 20.0 Denver 83.0 CO 4
2 30.0 Springfield 4.0 IL 1
3 47.0 Mendocino 200.0 CA 1
4 NaN Springfield NaN MO 5
> sales_and_managers
city state units branch branch_id manager
0 Mendocino CA 1 Mendocino 47.0 Brett
1 Denver CO 4 Denver 20.0 Joel
2 Austin TX 2 Austin 10.0 Charles
3 Springfield MO 5 Springfield 31.0 Sally
4 Springfield IL 1 NaN NaN NaN
> merge_default
city state units branch branch_id manager revenue
0 Mendocino CA 1 Mendocino 47.0 Brett 200.0
1 Denver CO 4 Denver 20.0 Joel 83.0
2 Austin TX 2 Austin 10.0 Charles 100.0
> merge_outer
city state units branch branch_id manager revenue
0 Mendocino CA 1 Mendocino 47.0 Brett 200.0
1 Denver CO 4 Denver 20.0 Joel 83.0
2 Austin TX 2 Austin 10.0 Charles 100.0
3 Springfield MO 5 Springfield 31.0 Sally NaN
4 Springfield IL 1 NaN NaN NaN NaN
5 Springfield IL 1 NaN 30.0 NaN 4.0
6 Springfield MO 5 NaN NaN NaN NaN
> merge_outer_on
city state units_x branch branch_id_x manager branch_id_y revenue units_y
0 Mendocino CA 1 Mendocino 47.0 Brett 47.0 200.0 1
1 Denver CO 4 Denver 20.0 Joel 20.0 83.0 4
2 Austin TX 2 Austin 10.0 Charles 10.0 100.0 2
3 Springfield MO 5 Springfield 31.0 Sally NaN NaN 5
4 Springfield IL 1 NaN NaN NaN 30.0 4.0 1
'''
|
[
"a@skrasnov.com"
] |
a@skrasnov.com
|
667999762976f5897af604a84543897c3dfe2b68
|
568d7d17d09adeeffe54a1864cd896b13988960c
|
/month03.2/django/day05/mysitel3/otm/migrations/0001_initial.py
|
b086e0cf6fc07d1aa13bbaa6ef12c79c1ce841bb
|
[
"Apache-2.0"
] |
permissive
|
Amiao-miao/all-codes
|
e2d1971dfd4cecaaa291ddf710999f2fc4d8995f
|
ec50036d42d40086cac5fddf6baf4de18ac91e55
|
refs/heads/main
| 2023-02-24T10:36:27.414153
| 2021-02-01T10:51:55
| 2021-02-01T10:51:55
| 334,908,634
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 980
|
py
|
# Generated by Django 2.2.12 on 2021-01-13 07:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True, verbose_name='出版社名')),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='书名')),
('publisher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='otm.Publisher')),
],
),
]
|
[
"895854566@qq.com"
] |
895854566@qq.com
|
89bd7df5f7a432880d48493b2c552aee2bc579cf
|
a19275ff09caf880e135bce76dc7a0107ec0369e
|
/catkin_ws/src/robot_python/nodes/int_adp_imp_gazebo_node.py
|
507b24e5a419629eee10af64ead79937980c61a4
|
[] |
no_license
|
xtyzhen/Multi_arm_robot
|
e201c898a86406c1b1deb82326bb2157d5b28975
|
15daf1a80c781c1c929ba063d779c0928a24b117
|
refs/heads/master
| 2023-03-21T14:00:24.128957
| 2021-03-10T12:04:36
| 2021-03-10T12:04:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,051
|
py
|
#!/usr/bin/env python
# -*-coding:utf-8-*-
#本文档用于上层积分自适应阻抗控制仿真
#程序员:陈永厅
#版权:哈尔滨工业大学
#日期:初稿:2020.1.11
import rospy
from std_msgs.msg import Float64MultiArray
from sensor_msgs.msg import JointState
from geometry_msgs.msg import WrenchStamped
import threading
import time
import numpy as np
#自定义函数
from robot_python import ImpedanceControl as imc
from robot_python import integral_adaption_impedance_controller as ia_imc
##定义全局变量
F = np.zeros(6)
qq = np.zeros(7)
##关节角订阅函数
def joint_callback(msg):
global qq
global X
global n
for i in range(7):
qq[i] = msg.position[i]
##六维力回调函数
def force_callback(msg):
global F
Fe = np.zeros(6)
Fe[0] = msg.wrench.force.x
Fe[1] = msg.wrench.force.y
Fe[2] = msg.wrench.force.z
Fe[3] = msg.wrench.torque.x
Fe[4] = msg.wrench.torque.y
Fe[5] = msg.wrench.torque.z
#对力进行坐标变换,同时更新最新时刻的力
F = imc.force_end_to_base(qq, Fe)
##末端力订阅线程
def thread_spin():
rospy.spin()
##阻抗控制运行节点
def node_cloop(pub):
##控制参数
global F #当前3时刻位置偏差,全局变量
global qq #当前关节角位置
Ex = np.zeros([3, 6]) #最近3时刻位置位置偏差
Ef = np.zeros([4, 6]) #最近4个时刻末端力偏差
T = 0.01 #时间周期
Fd = np.zeros(6)
[Xd, qq_init] = ia_imc.get_init_expect_pos() #末端期望位姿,可放入循环中
qq_guess = np.copy(qq_init)
[Md, Bd, Kd, Ki] = ia_imc.get_imc_parameter() #可放入循环中,实时改变参数
rate = rospy.Rate(100)
while not rospy.is_shutdown():
#更新末端力与期望位力的误差
Ef[0, :] = Ef[1, :]
Ef[1, :] = Ef[2, :]
Ef[2, :] = Ef[3, :]
Ef[3, :] = F - Fd
#调用积分自适应阻抗实时计算修正修正值
ia_imc.get_current_error(Md, Bd, Kd, Ki,T, Ef, Ex) #通过引用改变内部参数
print "当前末端力:%s" % F
print "当前关节角:%s" % qq
print "误差计算:%s" % Ex[2,:]
qr = imc.get_current_joint(Xd, Ex[2, :], qq_guess)
command_data = Float64MultiArray()
command_data.data = qr
print "当前command关节角:%s" % qr
pub.publish(command_data)
rate.sleep()
def main():
rospy.init_node('ImpendanceController_node')
rospy.Subscriber('/robot1/joint_states', JointState, joint_callback)
rospy.Subscriber('/robot1/ft_sensor_topic', WrenchStamped, force_callback)
pub = rospy.Publisher('/robot1/armc_position_controller/command', Float64MultiArray, queue_size=10)
#建立用于引用的变量
t1 = threading.Thread(target=thread_spin) # 末端位置订阅线程
print "Impendance controller begain run!;"
t1.start()
node_cloop(pub)
if __name__ == '__main__':
main()
|
[
"qyz146006@163.com"
] |
qyz146006@163.com
|
1a12a25abec059bc393ea4132c9fce703a963a74
|
d6ea8bd65e7fffc12575737817083d6456eec828
|
/vigenere.py
|
2bb25639d7d300467bcedb4607ff5a9a22628d91
|
[] |
no_license
|
informatiquecsud/cs-simply-utils
|
3888ce47d77e2732044efe555a66c87c25d100e7
|
4c6d82897fbd96f3718f81920a324c379988e403
|
refs/heads/master
| 2022-10-05T02:27:25.209972
| 2022-09-16T07:53:39
| 2022-09-16T07:53:39
| 217,407,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
from utils import alphabet, shift_char
class VigenereCypher:
def __init__(self, key):
self.key = [0] * len(key)
alpha = alphabet()
for i, el in enumerate(key):
if isinstance(el, int):
self.key[i] = el
elif isinstance(el, str):
if el in '0123456789':
self.key[i] = int(el)
elif el == '_':
self.key[i] = el
elif el in alpha:
self.key[i] = ord(el.upper()) - ord('A')
# try:
# int(key)
# self.key = [int(c) for c in key]
# except:
# self.key = [ord(c) - ord('A') for c in key]
def encrypt(self, message, key=None):
key = key or self.key
N = len(key)
result = [''] * len(message)
for i, char in enumerate(message):
result[i] = shift_char(char, key[i % N]) if key[i %
N] != '_' else key[i % N]
return ''.join(result)
def decrypt(self, crypted):
return self.encrypt(crypted, key=[-k if isinstance(k, int) else k for k in self.key])
if __name__ == "__main__":
c = VigenereCypher(key='BC')
assert c.key == [1, 2]
c = VigenereCypher(key='26')
assert c.key == [2, 6]
crypted = c.encrypt('LINFORMATIQUESIMPLEMENT')
print(crypted)
print(c.decrypt('NOPLQXOGVOSAGYKSRRGSGTV'))
crypted = 'OGOPFFJGDNFXFTTVFPHGIGJOTEITJHUGOFFTHGTEIKDJUG'
# for i in range(1, 26):
for i in [1]:
for j in range(1, 26):
c = VigenereCypher(key=[i, j])
print(c.decrypt(crypted))
print(i, j)
message = 'NOMMEZLESCODESSECRETSLESPLUSCELEBRESDELHISTOIRE'
c = VigenereCypher(key=[1, 2])
print(c.encrypt(message))
|
[
"cedonner@gmail.com"
] |
cedonner@gmail.com
|
b81111166f9a39b8a29e524ea7739667bfcca158
|
350db570521d3fc43f07df645addb9d6e648c17e
|
/0355_Design_Twitter/solution.py
|
8d3edca5abd4d8fcb391e591c9019e0f9172a4af
|
[] |
no_license
|
benjaminhuanghuang/ben-leetcode
|
2efcc9185459a1dd881c6e2ded96c42c5715560a
|
a2cd0dc5e098080df87c4fb57d16877d21ca47a3
|
refs/heads/master
| 2022-12-10T02:30:06.744566
| 2022-11-27T04:06:52
| 2022-11-27T04:06:52
| 236,252,145
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,189
|
py
|
'''
355. Design Twitter
Design a simplified version of Twitter where users can post tweets, follow/unfollow another user and is able
to see the 10 most recent tweets in the user's news feed. Your design should support the following methods:
postTweet(userId, tweetId): Compose a new tweet.
getNewsFeed(userId): Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.
follow(followerId, followeeId): Follower follows a followee.
unfollow(followerId, followeeId): Follower unfollows a followee.
'''
import collections
# https://www.hrwhisper.me/leetcode-design-twitter/
class Twitter(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.tweets_cnt = 0
self.tweets = collections.defaultdict(list)
self.follower_ship = collections.defaultdict(set)
def postTweet(self, userId, tweetId):
"""
Compose a new tweet.
:type userId: int
:type tweetId: int
:rtype: void
"""
self.tweets[userId].append([tweetId, self.tweets_cnt])
self.tweets_cnt += 1
def getNewsFeed(self, userId):
"""
Retrieve the 10 most recent tweet ids in the user's news feed.
Each item in the news feed must be posted by users who the user followed or by the user herself.
Tweets must be ordered from most recent to least recent.
:type userId: int
:rtype: List[int]
"""
recent_tweets = []
user_list = list(self.follower_ship[userId]) + [userId]
userId_tweet_index = [[userId, len(self.tweets[userId]) - 1] for userId in user_list if userId in self.tweets]
for _ in xrange(10):
max_index = max_tweet_id = max_user_id = -1
for i, (user_id, tweet_index) in enumerate(userId_tweet_index):
if tweet_index >= 0:
tweet_info = self.tweets[user_id][tweet_index]
if tweet_info[1] > max_tweet_id:
max_index, max_tweet_id, max_user_id = i, tweet_info[1], user_id
if max_index < 0: break
recent_tweets.append(self.tweets[max_user_id][userId_tweet_index[max_index][1]][0])
userId_tweet_index[max_index][1] -= 1
return recent_tweets
def follow(self, followerId, followeeId):
"""
Follower follows a followee. If the operation is invalid, it should be a no-op.
:type followerId: int
:type followeeId: int
:rtype: void
"""
if followerId != followeeId:
self.follower_ship[followerId].add(followeeId)
def unfollow(self, followerId, followeeId):
"""
Follower unfollows a followee. If the operation is invalid, it should be a no-op.
:type followerId: int
:type followeeId: int
:rtype: void
"""
if followerId in self.follower_ship and followeeId in self.follower_ship[followerId]:
self.follower_ship[followerId].remove(followeeId)
|
[
"bhuang@rms.com"
] |
bhuang@rms.com
|
95612479cdfabbcffd276a39d0b4137bbd5a1c0d
|
62ef47b5e63042c46f2a5dd360bfb3dc65cc611f
|
/geomdl/vis.py
|
5bf49d4e15e5a13a3f41e9ef6283fa2b9c3ceca2
|
[
"MIT",
"Python-2.0"
] |
permissive
|
Mopolino8/NURBS-Python
|
3abdd7266312779cc1e860608c304b5703420ace
|
009089b27b2a8308b1834ba41b429471346b2654
|
refs/heads/master
| 2020-04-17T22:29:48.418346
| 2019-01-18T17:09:19
| 2019-01-18T17:09:19
| 166,996,473
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,589
|
py
|
"""
.. module:: vis
:platform: Unix, Windows
:synopsis: Provides abstract base classes for visualization modules
.. moduleauthor:: Onur Rauf Bingol <orbingol@gmail.com>
"""
import abc
import six
from typing import Any, Sequence, List
# Initialize an empty __all__ for controlling imports
__all__ = []
@six.add_metaclass(abc.ABCMeta)
class VisConfigAbstract(object):
""" Abstract base class for user configuration of the visualization module
Defines an abstract base for NURBS-Python visualization configuration.
"""
def __init__(self, **kwargs):
# type: (**Any) -> None
pass
@six.add_metaclass(abc.ABCMeta)
class VisAbstract(object):
""" Abstract base class for visualization
Defines an abstract base for NURBS-Python visualization modules.
:param config: configuration class
:type config: VisConfigAbstract
"""
def __init__(self, config):
# type: (VisConfigAbstract()) -> None
if not isinstance(config, VisConfigAbstract):
raise TypeError("Config variable must be an instance of vis.VisAbstractConfig")
self._user_config = config
self._module_config = {'ctrlpts': 'points', 'evalpts': 'points', 'others': None}
self._plots = [] # type: List[dict]
self._ctrlpts_offset = 0.0
def clear(self):
# type: () -> None
""" Clears the points, colors and names lists. """
self._plots[:] = []
def add(self, ptsarr, plot_type, name="", color=""):
# type: (Sequence[Sequence[float]], str, str, str) -> None
""" Adds points sets to the visualization instance for plotting.
:param ptsarr: control or evaluated points
:type ptsarr: list, tuple
:param plot_type: type of the plot, e.g. ctrlpts, evalpts, bbox, etc.
:type plot_type: str
:param name: name of the plot displayed on the legend
:type name: str
:param color: plot color
:type color: str
"""
# ptsarr can be a list, a tuple or an array
if ptsarr is None or len(ptsarr) == 0:
return
# Add points, size, plot color and name on the legend
elem = {'ptsarr': ptsarr, 'name': name, 'color': color, 'type': plot_type}
self._plots.append(elem)
@property
def vconf(self):
# type: () -> VisConfigAbstract()
""" User configuration class for visualization
:getter: Gets the user configuration class
:type: vis.VisConfigAbstract
"""
return self._user_config
@property
def mconf(self):
# type: () -> dict
""" Visualization module internal configuration directives
This property controls the internal configuration of the visualization module. It is for advanced use and
testing only.
The visualization module is mainly designed to plot the control points (*ctrlpts*) and the surface points
(*evalpts*). These are called as *plot types*. However, there is more than one way to plot the control points
and the surface points. For instance, a control points plot can be a scatter plot or a quad mesh, and a
surface points plot can be a scatter plot or a tessellated surface plot.
This function allows you to change the type of the plot, e.g. from scatter plot to tessellated surface plot.
On the other than, some visualization modules also defines some specialized classes for this purpose as it might
not be possible to change the type of the plot at the runtime due to visualization library internal API
differences (i.e. different backends for 2- and 3-dimensional plots).
By default, the following plot types and values are available:
Curve:
* For control points (*ctrlpts*): points
* For evaluated points (*evalpts*): points
Surface:
* For control points (*ctrlpts*): points, quads, quadmesh
* For evaluated points (*evalpts*): points, quads, triangles
Volume:
* For control points (*ctrlpts*): points
* For evaluated points (*evalpts*): points, voxels
:getter: Gets the visualization module configuration
:setter: Sets the visualization module configuration
"""
return self._module_config
@mconf.setter
def mconf(self, value):
# type: (Sequence[str]) -> None
if not isinstance(value[0], str) or not isinstance(value[1], str):
raise TypeError("Plot type and its value should be string type")
if value[0] not in self._module_config.keys():
raise KeyError(value[0] + " is not a configuration directive. Possible directives: " +
", ".join([k for k in self._module_config.keys()]))
self._module_config[value[0]] = value[1]
@property
def ctrlpts_offset(self):
# type: () -> float
""" Defines an offset value for the control points grid plots
Only makes sense to use with surfaces with dense control points grid.
:getter: Gets the offset value
:setter: Sets the offset value
:type: float
"""
return self._ctrlpts_offset
@ctrlpts_offset.setter
def ctrlpts_offset(self, offset_value):
# type: (float) -> None
self._ctrlpts_offset = float(offset_value)
def size(self, plot_type):
# type: (str) -> int
""" Returns the number of plots defined by the plot type.
:param plot_type: plot type
:type plot_type: str
:return: number of plots defined by the plot type
:rtype: int
"""
count = 0
for plot in self._plots:
if plot['type'] == plot_type:
count += 1
return count
def animate(self, **kwargs):
# type: (**Any) -> None
""" Generates animated plots (if supported).
If the implemented visualization module supports animations, this function will create an animated figure.
Otherwise, it will call :py:meth:`render` method by default.
"""
# Call render() by default
self.render(**kwargs)
@abc.abstractmethod
def render(self, **kwargs):
# type: (**Any) -> None
""" Abstract method for rendering plots of the point sets.
This method must be implemented in all subclasses of ``VisAbstract`` class.
"""
# We need something to plot
if self._plots is None or len(self._plots) == 0:
raise ValueError("Nothing to plot")
|
[
"orbingol@gmail.com"
] |
orbingol@gmail.com
|
81c2bbd50fbc15ba6b4e23ef213f426d6fd669ee
|
1eaa6c2500868d0c60b5b2cd552cd671b635de32
|
/Algorithm/sword of offer/14.链表中倒数第k个结点.py
|
cb3067b70c193a8dddb22fc816f0869477255b87
|
[] |
no_license
|
jiangyuwei666/my-study-demo
|
f85f14a599c328addb5af09078d404f1139e0a82
|
9e2baef2f36f071f8903768adb8d5a5a8c1123f6
|
refs/heads/master
| 2022-04-30T16:47:24.715570
| 2022-03-24T09:08:43
| 2022-03-24T09:08:43
| 152,565,041
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,436
|
py
|
"""
两个指针
首先两个指针都在头部,然后让第一个指针移动k-1个位置,此时第一个指针指向+k个位置的那个节点。
这个时候再同时移动两个节点当第一个指针移动到最后一个节点是时,第二个指针就指向了-k位置的节点。
"""
class ListNode:
def __init__(self, x):
self.x = x
self.next = None
def init_list(num_list):
node_list = []
for i in num_list:
node = ListNode(i)
node_list.append(node)
for j in range(len(node_list)):
if j == len(node_list) - 1:
return node_list[0]
node_list[j].next = node_list[j + 1]
def print_list(head):
result = []
while head:
result.append(head.x)
head = head.next
return result
def get_value(head, k):
"""
:param head: 传入链表的头节点
:param k:传入k
:return: 返回-k位置的值
"""
if k < 0:
return "你输你🐎个🔨"
first = head
second = head
for i in range(k - 1):
if not first:
return "k比他🐎滴链表还长"
first = first.next # 此时first指向第+k个元素
while first.next:
first = first.next
second = second.next
return second.x
head = init_list([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
print(get_value(head, 3))
print(get_value(head, -1))
print(get_value(head, 123))
print(get_value(head, 10))
|
[
"739843128@qq.com"
] |
739843128@qq.com
|
b63b7c3218e2814ed3fcd990e0e23b066dfd88dd
|
ecf77933549cb56ebde35df35556accc9684424d
|
/html_to_css.py
|
1ddd48e7590e6bf5d37ee74ba9c3c1f7474dce08
|
[] |
no_license
|
jayd2446/cuda_html_ops
|
3ba102fb87ba9f591fb9cd4ccad7dbc8fab53bf4
|
0fea8430aa057feafac167adbc18f08c647ef099
|
refs/heads/master
| 2020-05-31T12:18:38.938276
| 2018-08-31T09:35:20
| 2018-08-31T09:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
import re
from cudatext import *
REGEX1 = r'\bclass\s*=\s*"(.+?)"'
REGEX2 = r"\bclass\s*=\s*'(.+?)'"
def do_html_to_css_clipboard(compact):
text = ed.get_text_sel()
if not text: return
res = re.findall(REGEX1, text, 0) + re.findall(REGEX2, text, 0)
res = sorted(list(set(res)))
if not res:
msg_status('No CSS classes found')
app_proc(PROC_SET_CLIP, '')
return
text_in = ' ' if compact else '\n\n'
out = ['.'+name+' {'+text_in+'}\n' for name in res]
text = '\n'.join(out)+'\n'
app_proc(PROC_SET_CLIP, text)
msg_status('CSS styles ({}) copied to clipboard'.format(len(res)) )
|
[
"support@uvviewsoft.com"
] |
support@uvviewsoft.com
|
934c03e88433588a4a2cb7d674fb33c1b3da2a36
|
d50bb3387316a4f1a06fe4c84714568a73b2a125
|
/tasks/utils.py
|
b47f431f31a462efa50b11ae15be1623ac038375
|
[] |
no_license
|
pmav99/fastapi_docker_compose
|
8830006876db35d43614a38945581f9868aa31fc
|
491ad3e31fc9ef1ac4306624ba27c945733ac103
|
refs/heads/master
| 2022-12-09T16:33:19.924385
| 2020-07-30T20:22:01
| 2020-07-30T20:22:01
| 249,724,494
| 1
| 0
| null | 2022-12-08T03:52:43
| 2020-03-24T14:06:22
|
Dockerfile
|
UTF-8
|
Python
| false
| false
| 209
|
py
|
import contextlib
import os
import pathlib
@contextlib.contextmanager
def chdir(dirname: str):
curdir = os.getcwd()
try:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
|
[
"pmav99@gmail.com"
] |
pmav99@gmail.com
|
1ab6d8949c8d7742e9cf369e05fdc49f447c63d2
|
60096eba428275a28ab53d364aef0b9bc29e71c8
|
/hris/api/users.py
|
712b95f287ba42e8411e092d49989f3123ba0dca
|
[] |
no_license
|
RobusGauli/hris_new
|
30ef8d17aceceb5f6c8f69f65df508228cb31f33
|
634f18d162310df9331543f7a877cac619ee1622
|
refs/heads/master
| 2021-01-19T21:55:39.279378
| 2017-04-29T04:32:38
| 2017-04-29T04:32:38
| 88,724,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,091
|
py
|
from hris.utils import hash_password, gen_access_token, decode_access_token
from flask import request, abort, jsonify, g
from functools import wraps
from hris.api import api
from sqlalchemy.exc import IntegrityError #foreign key violation #this won't come up oftern
from sqlalchemy.orm.exc import NoResultFound
from hris import db_session
#auth
###
from hris.models import (
User,
CompanyDetail,
Employee
)
from hris.api.response_envelop import (
records_json_envelop,
record_exists_envelop,
record_json_envelop,
record_created_envelop,
record_notfound_envelop,
record_updated_envelop,
record_not_updated_env,
fatal_error_envelop,
missing_keys_envelop,
length_require_envelop
)
from hris.api.auth import (
allow_permission,
create_update_permission,
read_permission
)
@api.route('/users', methods=['POST'])
def register_user():
'''This view register the user by generating ht access token with the given role'''
if request.args and request.args['action'] == 'register':
#check if all key existst
if not set(request.json.keys()) == {'user_name', 'password', 'role_id'}:
return jsonify({'message' : 'missing keys'})
#lower case the user_name
if any(len(val.strip()) < 5 for val in request.json.values() if isinstance(val, str)):
return jsonify({'message' : 'Not adequate length of values'})
#lower case the user_name
user_name = request.json['user_name'].strip().lower()
role_id = request.json['role_id']
hashed_pass = hash_password(request.json['password'].encode())
#get the user access_token
user_access_token = gen_access_token(role_id, user_name)
user = User(user_name=user_name, password=hashed_pass, role_id=role_id, access_token=user_access_token.decode('utf-8'))
try:
db_session.add(user)
db_session.commit()
except IntegrityError as ie:
#hadle the error here
return record_exists_envelop()
else:
return jsonify({'message' : 'user_added_successfully', 'access_token' : user_access_token.decode('utf-8')})
elif request.args['action'] == 'login':
if request.json:
if not set(request.json.keys()) == {'user_name', 'password'}:
return jsonify({'message' : 'missing keys'})
else:
return jsonify({'message': 'json object'})
user_name = request.json['user_name']
password = request.json['password']
#now hass the password
hashed_pass = hash_password(password)
#get the user from the users for the password and user name
try:
user = db_session.query(User).filter(User.user_name==user_name).one()
if not user:
return record_notfound_envelop('User doesn\'t exists')
#if there is user check for the password
if hashed_pass == user.password:
return record_json_envelop({'access_token' : user.access_token, 'activate' : user.activate, 'role_id' : user.role_id, 'permissions' : user.role.to_dict()})
else:
return record_notfound_envelop('Password doesn\'t match')
except NoResultFound as e:
return record_notfound_envelop('User doesn\'t exists')
###to register the user with the employee
elif request.args['action'] == 'registeruserforemployee':
if not request.args.get('e_id', None):
return 'please send the e_id'
e_id = int(request.args['e_id'])
if not set(request.json.keys()) == {'user_name', 'password', 'role_id'}:
return jsonify({'message' : 'missing keys'})
#lower case the user_name
if any(len(val.strip()) < 5 for val in request.json.values() if isinstance(val, str)):
return jsonify({'message' : 'Not adequate length of values'})
#lower case the user_name
user_name = request.json['user_name'].strip().lower()
role_id = request.json['role_id']
hashed_pass = hash_password(request.json['password'].encode())
#get the user access_token
user_access_token = gen_access_token(role_id, user_name)
user = User(user_name=user_name, password=hashed_pass, role_id=role_id, access_token=user_access_token.decode('utf-8'))
try:
emp = db_session.query(Employee).filter(Employee.id==e_id).one()
db_session.add(user)
emp.user = user
db_session.add(emp)
db_session.commit()
except IntegrityError as ie:
#hadle the error here
return record_exists_envelop()
except NoResultFound as e:
return record_notfound_envelop()
else:
return jsonify({'message' : 'user_added_successfully', 'access_token' : user_access_token.decode('utf-8'), 'status': 'success'})
@api.route('/company', methods=['POST'])
def add_company_detail():
if not set(request.json.keys()) == {'name', 'currency_symbol', 'is_prefix', 'country', 'description'}:
return missing_keys_envelop()
if len(request.json['name']) < 4 or len(request.json['country']) < 3 or len(request.json['currency_symbol']) < 1:
return length_require_envelop()
#now shape up the fields
name = request.json['name'].strip()
currency_symbol = request.json['currency_symbol'].lower().strip()
is_prefix = request.json['is_prefix']
country = request.json['country'].strip()
des = request.json['description'].strip()
company = CompanyDetail(name=name, currency_symbol=currency_symbol, is_prefix=is_prefix, country=country, description=des)
try:
db_session.add(company)
db_session.commit()
except IntegrityError as e:
return record_exists_envelop()
else:
return record_created_envelop(request.json)
@api.route('/users', methods = ['GET'])
@read_permission('read_management_perm')
def get_users():
try:
users = db_session.query(User).filter(User.user_name != 'admin').all()
except NoResultFound as e:
return record_notfound_envelop()
except Exception as e:
return fatal_error_envelop()
else:
return records_json_envelop(list(user.to_dict() for user in users))
@api.route('/users/<int:u_id>', methods=['PUT'])
@create_update_permission('user_management_perm')
def update_user(u_id):
if not request.json:
abort(400)
if not request.args.get('action') == 'update_role':
if 'password' not in request.json.keys():
return missing_keys_envelop()
try:
user = db_session.query(User).filter(User.id==u_id).one()
if user is None:
return record_notfound_envelop()
hashed_pass = hash_password(request.json['password'].encode())
old_hashed_pass = user.password
if old_hashed_pass == hashed_pass:
return jsonify({'message' : 'Please dont\'t use old password', 'status': 'fail'})
else:
user.password = hashed_pass
db_session.add(user)
db_session.commit()
except NoResultFound as e:
return record_notfound_envelop()
except Exception as e:
return fatal_error_envelop()
else:
return record_updated_envelop('Password updated Successfully.')
#update the role
if 'role_id' not in request.json:
return missing_keys_envelop()
try:
user = db_session.query(User).filter(User.id==u_id).one()
if user is None:
return record_notfound_envelop()
user.role_id = int(request.json['role_id'])
db_session.add(user)
db_session.commit()
except NoResultFound as e:
return record_notfound_envelop()
except Exception as e:
raise
return fatal_error_envelop()
else:
return record_updated_envelop('Role updated successfully.')
|
[
"user@Users-MacBook-Air.local"
] |
user@Users-MacBook-Air.local
|
c2f6f2417db54c1403f0fd7961a24ede9f71b21c
|
76b5be6d12c6885c8cb9ae458bf878a3dcf0401c
|
/DojoAssignments/Python2/PythonAssignments/Django/DjangoIntro/SessionWords/apps/session_words_app/views.py
|
539759b21089b070bef198b2336a8eced707120c
|
[] |
no_license
|
DaseinUXD/CodingDojo
|
ba1d532750d61a21feb401243c49e05623e9b8c2
|
19b2d0f0ce9f8c9d08747438412e5c988073f385
|
refs/heads/master
| 2020-03-11T16:36:51.312297
| 2018-09-19T22:32:09
| 2018-09-19T22:32:09
| 130,121,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, HttpResponse
# Create your views here.
# Index view
def index(request):
return render(request, 'session_words_app/index.html')
def add(request):
if request.method=="POST":
return redirect('/')
else:
# do something else
return redirect('/')
def clear(request):
return redirect('/')
|
[
"markmatthewsphd@gmail.com"
] |
markmatthewsphd@gmail.com
|
79aa73692fd55784d617c6924ab841e36efee841
|
fd97689f062e6d90837ea27b9a5e3de87bcd1e92
|
/Servidor/MET-Server-udp.py
|
c79bc598c86aa8500e06b913a44a9d4c03e14fb1
|
[] |
no_license
|
Edresson/MET
|
9f7b8a43bdea29ee844d0c98a20f0aef4afbcdd2
|
5945116d0d52fdf8f892a5f266bf6b51afb529eb
|
refs/heads/master
| 2023-08-31T10:18:35.942324
| 2019-10-29T12:17:15
| 2019-10-29T12:17:15
| 93,848,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,525
|
py
|
# -*- coding: utf-8 -*-
from threading import Thread
from PyQt4 import QtTest
from time import time
import time
from socket import *
import Motor
import celula
serverSocketMotor = socket(AF_INET, SOCK_DGRAM)
serverSocketMotor.bind(('', 12001))
serverSocketCelula = socket(AF_INET, SOCK_DGRAM)
serverSocketCelula.bind(('', 12002))
class thread_motor(Thread):
def run(self):
global freq_botao
inicio= time.time()
while True:
fim = time.time()
if fim - inicio > 6:
Motor.Parar()
message, address = serverSocketMotor.recvfrom(1024)
message = str(message).replace("b'",'')
message = message.replace("'",'')
if message == "1":
inicio = time.time()
if message == "STOP":
print('parar')
serverSocketMotor.sendto(B'1', address)
Motor.Parar()
elif message == "ping":
print('p')
serverSocketMotor.sendto(B'ping', address)
elif message[0:2] =='fb':
print('fb')
string,freqfb = message.split(':')
Motor.freq_botao = float(freqfb)
elif message[0:3] =='SeD':
print('SeD')
serverSocketMotor.sendto(B'1', address)
string,vel,freq,controle,deslocamento = message.split(':')
Motor.Subir_descer(float(vel),float(freq),int(controle),float(deslocamento))
elif message[0:3] =='Cal':
print('cal')
serverSocketMotor.sendto(B'1', address)
cmd,valor,freq = message.split(':')
Motor.calcular(float(valor),float(freq))
elif message =="SUBIR":
print('Subir')
serverSocketMotor.sendto(B'1', address)
Motor.subir()
elif message =="BAIXAR":
print('Baixar')
####print(message)
serverSocketMotor.sendto(B'1', address)
Motor.baixar()
class thread_celula(Thread):
def run(self):
while True:
message, address = serverSocketCelula.recvfrom(1024)
message = str(message).replace("b'",'')
message = message.replace("'",'')
if message == "gv":
val = str(celula.getvalue())
val =val.encode()
serverSocketCelula.sendto(val, address)
elif message == "ping":
serverSocketMotor.sendto(B'ping', address)
elif message =='ca':
val = str(celula.calibrar())
val =val.encode()
serverSocketCelula.sendto(val, address)
elif message[0:3] =='ini':
serverSocketCelula.sendto(B'1', address)
cmd,valor = message.split(':')
celula.iniciarcel(float(valor))
elif message =="tr":
serverSocketCelula.sendto(B'1', address)
celula.tare()
thrCelula=thread_celula()
thrCelula.start()
thrMotor = thread_motor()
thrMotor.start()
|
[
"edresson1@gmail.com"
] |
edresson1@gmail.com
|
54203602ebefeb4d7e49a6c2cd32adf327c9e6e9
|
df20dc807d2d9ba666377a2a23cbe80b268c75cd
|
/0 Python Fundamental/32a5_readCSVasDICT_noCSVpkg.py
|
70ff4ebfcfdaa9b57a22e5f3a9a52a19d38883f8
|
[] |
no_license
|
stevenwongso/Python_Fundamental_DataScience
|
e9bb294017d0fcc05e2079f79f880ac8be726e11
|
706c61c8bdfcec1461328fa7a58a55a2d0f9f2d8
|
refs/heads/master
| 2021-01-04T07:32:11.856979
| 2020-02-13T05:18:06
| 2020-02-13T05:18:06
| 240,449,486
| 0
| 1
| null | 2020-02-14T07:12:16
| 2020-02-14T07:12:15
| null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
# read csv as dict without csv package
myfile = open('32a4.csv', 'r')
data = []
for i in myfile.readlines()[1:]:
no = int(i.split(';')[0])
nama = i.split(';')[1].replace('\n','')
x = {'no': no, 'nama': nama}
data.append(x)
print(data)
|
[
"lintangwisesa@ymail.com"
] |
lintangwisesa@ymail.com
|
86ee833065018990ffb7d10de8b4eae90e0400fa
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Scripts/weather3.py
|
da29540c7ea77779f74cf0c9f89c0af03f26e5b3
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:01477c0dba3b4f14c6e88c20abc1a741e612745ccaadd12514f86251fcb26f5d
size 1382
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
c2c3b84b358107d0e1be19f8975fcf001efefb02
|
9147a96572715604a31b6c026b7608b6d26276e8
|
/cfn_model/model/IAMManagedPolicy.py
|
4d24b42f6b0516a2f905011d5e7faab8fef05b51
|
[
"MIT"
] |
permissive
|
rubelw/cloudformation-validator
|
9890c024174640c79914f2f8bd153dc2900fc078
|
4ba3b05ae3abd3a941aa6a34419c594d8e0d0e5d
|
refs/heads/master
| 2020-03-23T18:31:29.084701
| 2019-01-17T21:55:04
| 2019-01-17T21:55:04
| 141,914,515
| 6
| 1
|
NOASSERTION
| 2019-01-17T21:55:05
| 2018-07-22T16:58:55
|
Python
|
UTF-8
|
Python
| false
| false
| 699
|
py
|
from __future__ import absolute_import, division, print_function
from cfn_model.model.ModelElement import ModelElement
class IAMManagedPolicy(ModelElement):
"""
IAM managed policy model
"""
def __init__(self, cfn_model):
"""
Initialize
:param cfn_model:
"""
ModelElement.__init__(self,cfn_model)
self.groups = []
self.roles = []
self.users = []
self.policy_document=None
self.resource_type= 'AWS::IAM::ManagedPolicy'
def policy_document(self, document):
"""
Set the policy document
:param document:
:return:
"""
self.policy_document=document
|
[
"rubelwi@Wills-MacBook-Pro.local"
] |
rubelwi@Wills-MacBook-Pro.local
|
5591a8edf408ec43904d5a0e73a02795dc193eee
|
a183a600e666b11331d9bd18bcfe1193ea328f23
|
/pdt/core/admin/__init__.py
|
58e1d13908fc242d51b379fa54a382780b919b22
|
[
"MIT"
] |
permissive
|
AbdulRahmanAlHamali/pdt
|
abebc9cae04f4afa1fc31b87cbf4b981affdca62
|
5c32aab78e48b5249fd458d9c837596a75698968
|
refs/heads/master
| 2020-05-15T07:51:09.877614
| 2015-12-01T18:22:56
| 2015-12-01T18:22:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
"""PDT core admin interface."""
from .case import CaseAdmin # NOQA
from .case_category import CaseCategoryAdmin # NOQA
from .case_edit import CaseEditAdmin # NOQA
from .ci_project import CIProjectAdmin # NOQA
from .deployment_report import DeploymentReportAdmin # NOQA
from .instance import InstanceAdmin # NOQA
from .migration import MigrationAdmin # NOQA
from .migration_report import MigrationReportAdmin # NOQA
from .release import ReleaseAdmin # NOQA
from .notification import NotificationTemplateAdmin # NOQA
|
[
"bubenkoff@gmail.com"
] |
bubenkoff@gmail.com
|
01e5ffbf994198ee773823897db4431035f17668
|
4e5141121d8b4015db233cbc71946ec3cfbe5fe6
|
/samples/basic/crud/models/cisco-ios-xr/Cisco-IOS-XR-lib-keychain-cfg/nc-create-xr-lib-keychain-cfg-20-ydk.py
|
5c5384fcd79c34f4269bfd0585bb229e990d76bd
|
[
"Apache-2.0"
] |
permissive
|
itbj/ydk-py-samples
|
898c6c9bad9d6f8072892300d42633d82ec38368
|
c5834091da0ebedbb11af7bbf780f268aad7040b
|
refs/heads/master
| 2022-11-20T17:44:58.844428
| 2020-07-25T06:18:02
| 2020-07-25T06:18:02
| 282,382,442
| 1
| 0
| null | 2020-07-25T06:04:51
| 2020-07-25T06:04:50
| null |
UTF-8
|
Python
| false
| false
| 3,690
|
py
|
#!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create configuration for model Cisco-IOS-XR-lib-keychain-cfg.
usage: nc-create-xr-lib-keychain-cfg-20-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_lib_keychain_cfg \
as xr_lib_keychain_cfg
import logging
def config_keychains(keychains):
"""Add config data to keychains object."""
keychain = keychains.Keychain()
keychain.chain_name = "CHAIN1"
key = keychain.macsec_keychain.macsec_keys.MacsecKey()
key.key_id = "10"
key.macsec_key_string = key.MacsecKeyString()
key.macsec_key_string.string = "101E584B5643475D5B547B79777C6663754356445055030F0F03055C504C430F0F"
key.macsec_key_string.cryptographic_algorithm = xr_lib_keychain_cfg.MacsecCryptoAlg.aes_128_cmac
key.macsec_key_string.encryption_type = xr_lib_keychain_cfg.MacsecEncryption.type7
key.macsec_lifetime = key.MacsecLifetime()
key.macsec_lifetime.start_hour = 0
key.macsec_lifetime.start_minutes = 0
key.macsec_lifetime.start_seconds = 0
key.macsec_lifetime.start_date = 1
key.macsec_lifetime.start_month = xr_lib_keychain_cfg.KeyChainMonth.jan
key.macsec_lifetime.start_year = 2017
key.macsec_lifetime.infinite_flag = True
keychain.macsec_keychain.macsec_keys.macsec_key.append(key)
keychains.keychain.append(keychain)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create CRUD service
crud = CRUDService()
keychains = xr_lib_keychain_cfg.Keychains() # create object
config_keychains(keychains) # add object configuration
# create configuration on NETCONF device
crud.create(provider, keychains)
exit()
# End of script
|
[
"deom119@gmail.com"
] |
deom119@gmail.com
|
889eb827f803875363cea68f5952d72e22de0ae9
|
3db5eeeb0d34e7f093a9f3d9750c270df4ba3845
|
/blog/admin.py
|
a4138cc33360d508be75461be64727fa3c2f6d3d
|
[] |
no_license
|
DeepakDarkiee/stackunderflow
|
b0d052d2b1ef62dbb948a2789abfb80fd097191b
|
d68161e5729bdb8033f5ae0c28379b1e89c31044
|
refs/heads/master
| 2022-10-13T10:16:34.104129
| 2020-06-04T07:44:38
| 2020-06-04T07:44:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
from django.contrib import admin
from .models import Post, Comment ,Category,Contact
from django_summernote.admin import SummernoteModelAdmin
class PostAdmin(SummernoteModelAdmin):
list_display = ('title', 'slug', 'status', 'category', 'created_on')
list_filter = ('status', 'created_on' ,'category')
search_fields = ['title', 'content']
summernote_fields = ('content')
prepopulated_fields = {'slug': ('title',)}
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('name', 'body', 'post', 'created_on', 'active')
list_filter = ('active', 'created_on')
search_fields = ('name', 'email', 'body')
actions = ['approve_comments']
def approve_comments(self, request, queryset):
queryset.update(active=True)
class ContactAdmin(admin.ModelAdmin):
list_display = ('name','email','Content', 'created_on')
list_filter = ('email','created_on')
search_fields = ('name', 'email', 'body')
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Contact,ContactAdmin)
|
[
"mdipakpatidar@gmail.com"
] |
mdipakpatidar@gmail.com
|
80ed4fcaa32b7bbe66686eaeffe9a665e2afbf26
|
629a62dc600b356e55b25b21c93d088f5bc8aa64
|
/source/webapp/forms.py
|
3710a393967147c566da58a87bcfcf5247950ce3
|
[] |
no_license
|
Azer-Denker/ex_9
|
70e70408355d602ff11817d6cc53d53c0f1e3b1f
|
bfb8272ebb1764a9a3b382f2cabb19778d5f5541
|
refs/heads/main
| 2023-05-04T23:41:22.362929
| 2021-05-29T13:05:06
| 2021-05-29T13:05:06
| 306,867,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from django import forms
from webapp.models import Photo, Album
class PhotoForm(forms.ModelForm):
class Meta:
model = Photo
fields = ['photo_img', 'signature', 'album', 'status']
class AlbumForm(forms.ModelForm):
class Meta:
model = Album
fields = ['name', 'description']
|
[
"azerarlen312@gmail.com"
] |
azerarlen312@gmail.com
|
43b9a78891cc24a21f373baf585e2e5eac2ae706
|
b872ccff0c2f79886c0136b32da5f04cb8d3276c
|
/etcewrappers/emane/emaneshsnapshot.py
|
41b82af702fc1382f8e41bee540758ecd9d07d0d
|
[] |
no_license
|
prj8121/python-etce
|
9c22b3a182f103f46b1d865d13ded277482e4a34
|
bbd74a65280a09f3edc05457961b8c51ec009165
|
refs/heads/master
| 2022-11-18T05:19:19.324966
| 2020-04-02T15:15:47
| 2020-04-02T15:15:47
| 276,674,792
| 0
| 0
| null | 2020-07-02T14:57:07
| 2020-07-02T14:57:06
| null |
UTF-8
|
Python
| false
| false
| 6,808
|
py
|
#
# Copyright (c) 2015-2018 - Adjacent Link LLC, Bridgewater, New Jersey
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Adjacent Link LLC nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import, division, print_function
import os.path
from etce.wrapper import Wrapper
try:
from emanesh.emaneshell import ControlPortClient
except:
from emane.shell import ControlPortClient
class EmaneshSnapshot(Wrapper):
"""
Log the stats, tables and config of a running emulator
instance to file.
"""
def register(self, registrar):
registrar.register_infile_name('emaneshsnapshot.flag')
registrar.register_argument('controlportendpoint',
'127.0.0.1:47000',
'The control port endpoint of the target ' \
'EMANE instance.')
def run(self, ctx):
# query emane instance if there is a local platform file
if not ctx.args.infile:
return
cp = None
try:
layermapping = {}
ipaddr,port = ctx.args.controlportendpoint.split(':')
cp = ControlPortClient(ipaddr, int(port))
logdirectory = ctx.args.logdirectory
# nem 3 shim0(phyapitestshim) phy(universalphy)
showfile = os.path.join(logdirectory, 'emaneshow.log')
with open(showfile, 'w') as showf:
for nemid, layertuples in list(cp.getManifest().items()):
layermapping[nemid] = []
line = 'nem %d ' % nemid
for buildid,layertype,layername in layertuples:
layerlabel = '%d-%s' % (buildid, layertype.lower())
layermapping[nemid].append((buildid, layertype.lower(), layerlabel))
line += ' %s(%s)' % (layertype.lower(), layername)
showf.write(line + '\n')
# statistics
statsfile = os.path.join(logdirectory, 'emanestats.log')
with open(statsfile, 'w') as sf:
# nems
for nemid,layertuples in sorted(layermapping.items()):
for buildid,_,layerlabel in layertuples:
for statname,statval in sorted(cp.getStatistic(buildid).items()):
sf.write('nem %d %s %s = %s\n' % (nemid, layerlabel, statname, str(statval[0])))
# emulator
for statname,statval in sorted(cp.getStatistic(0).items()):
sf.write('emulator %s = %s\n' % (statname, str(statval[0])))
# configuration
configfile = os.path.join(logdirectory, 'emaneconfig.log')
with open(configfile, 'w') as sf:
# nems
for nemid,layertuples in sorted(layermapping.items()):
for buildid,_,layerlabel in layertuples:
for configname,configvaltuples in sorted(cp.getConfiguration(buildid).items()):
configvalstr = ''
if configvaltuples:
configvalstr = ','.join(map(str, list(zip(*configvaltuples))[0]))
sf.write('nem %d %s %s = %s\n' % (nemid, layerlabel, configname, configvalstr))
# emulator
for configname,configvaltuples in sorted(cp.getConfiguration(0).items()):
configvalstr = ''
if configvaltuples:
configvalstr = ','.join(map(str, list(zip(*configvaltuples))[0]))
sf.write('emulator %s = %s\n' % (configname, configvalstr))
# statistic tables
tablefile = os.path.join(logdirectory, 'emanetables.log')
with open(tablefile, 'w') as tf:
# nems
for nemid,layertuples in sorted(layermapping.items()):
for buildid,layertype,_ in layertuples:
for tablename,data in sorted(cp.getStatisticTable(buildid).items()):
tf.write('nem %d %s %s\n' % (nemid, layertype, tablename))
self.write_table_cells(tf, data)
# emulator
for tablename,data in sorted(cp.getStatisticTable(0).items()):
tf.write('emulator %s\n' % tablename)
self.write_table_cells(tf, data)
finally:
if cp:
cp.stop()
def write_table_cells(self, tf, data):
labels,rowtuples = data
widths = [];
for label in labels:
widths.append(len(label))
rows = []
for rowtuple in rowtuples:
rows.append(list(map(str, list(zip(*rowtuple))[0])))
for row in rows:
for i,value in enumerate(row):
widths[i] = max(widths[i],len(value))
line = ''
for i,label in enumerate(labels):
line += '|' + label.ljust(widths[i])
line += "|\n"
tf.write(line)
for row in rows:
line = ''
for i,value in enumerate(row):
line += '|' + value.rjust(widths[i])
line += "|\n"
tf.write(line)
tf.write('\n')
def stop(self, ctx):
pass
|
[
"eschreiber@adjacentlink.com"
] |
eschreiber@adjacentlink.com
|
2825170807eb9df9a190facef2b577aa2de44ffe
|
764f63ef031b38bde74657cd9bd198014ecfa0c7
|
/alien.py
|
82a55cbe8717ccdbb9cc68b2ba4f7b9fe6945b1e
|
[] |
no_license
|
turkey66/alien_invasion
|
4396668a8ca9737d340da9532d448cc90c7c3ed6
|
dae2df7175a885c32951804c6ede6c53cedc749f
|
refs/heads/master
| 2020-03-20T20:03:12.467496
| 2018-06-17T16:17:35
| 2018-06-17T16:17:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
"""表示单个外星人的类"""
def __init__(self, ai_settings, screen):
"""初始化外星人并设置其起始位置"""
super().__init__()
self.screen = screen
self.ai_settings = ai_settings
# 加载外星人图像,并设置其rect属性
self.image = pygame.image.load('images/alien.bmp')
self.rect = self.image.get_rect()
# 每个外星人最初都在屏幕左上角附近
self.rect.x = self.rect.width
self.rect.y = self.rect.height
# 储存外星人的准确位置
self.x = float(self.rect.x)
def check_edges(self):
"""如果外星人位于屏幕边缘,就返回True"""
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
def update(self):
"""向右或向左移动外星人"""
self.x += self.ai_settings.alien_speed_factor * self.ai_settings.fleet_direction
self.rect.x = self.x
def blitme(self):
"""在指定位置绘制外星人"""
self.screen.blit(self.image, self.rect)
|
[
"eric@example.com"
] |
eric@example.com
|
100386502e3c3fa6f1d4f6adbb120b741631dee0
|
113f803b721984992bdc8f2177056b0f60de546a
|
/ex20/ex20.py
|
196cbc7f5b0a27f8d770e8b025470d262aa428c5
|
[] |
no_license
|
Na-Young-Lee/16PFA-Na-Young-Lee
|
ddb215b0dc9cb0572b96aa90d8db71fbbea13c13
|
6cdcea12fd46a5218f9b6a7cd4ac5ee5e347cbb7
|
refs/heads/master
| 2021-01-17T15:15:27.429510
| 2016-06-01T11:37:20
| 2016-06-01T11:37:20
| 53,923,583
| 0
| 0
| null | null | null | null |
UHC
|
Python
| false
| false
| 1,160
|
py
|
#-*-coding:cp949
from sys import argv # 시스템으로부터 매개변수를 가져온다
script, input_file = argv # 매개변수는 input_file(test.txt)이다.
def print_all(f): # 함수 print_all, f는 변수
print(f.read()) # 이 파일의 모든 내용을 읽어라
def rewind(f): # 함수 rewind, f는 변수
f.seek(0) # 시작 위치로 돌아감.
# seek(): 자기 테이프 위에 원하는 위차를 지정하기 위함.
# seek(n): 시작위치로부터 n byte 위치의 자료를 읽기/쓰기 준비하라는 의미
def print_a_line(line_count,f):
print("%d %s" % (line_count, f.readline())) # read_line 은 파일을 한 행씩 읽으라는 의미
current_file = open(input_file)
print ("First let's print the whole file:굈")
print_all(current_file) # print_all 호출
print ("Now let's rewind, kind of like a tape.")
rewind(current_file) # rewind 호출
print("Let's print three lines:")
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line +1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file)
|
[
"CAD Client"
] |
CAD Client
|
63da80f93496f9f06ca03c36ca38b215fd4ab8d9
|
d7a4701e18be0f38820f5c15d80099fda6385f9f
|
/ABC106/B.py
|
dd13915b9a187916ed4f7f263d2be9013e8353ba
|
[] |
no_license
|
shiki7/Atcoder
|
979a6f0eeb65f3704ea20a949940a0d5e3434579
|
c215c02d3bfe1e9d68846095b1bd706bd4557dd0
|
refs/heads/master
| 2022-05-21T16:59:01.529489
| 2022-04-29T11:26:42
| 2022-04-29T11:26:42
| 201,536,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
N = int(input())
def is_divisor_counter8(num):
count = 0
for i in range(1, num+1):
if num % i == 0:
count += 1
return True if count == 8 else False
count = 0
for i in range(1, N+1, 2):
if is_divisor_counter8(i):
count += 1
print(count)
|
[
"bitbite8@gmail.com"
] |
bitbite8@gmail.com
|
d06f80d7cbcfeb09aca48198e849b2aeff779bcd
|
1cff45f4b9beab91e06c30f4f2de2d719706fcdd
|
/tools/log2thbrep.py
|
a1b5b9a2906431d63e81738290657503cf7cb0ac
|
[] |
no_license
|
chilamkatana/thbattle
|
2c1b30bd243a216454e075f636b5c92c1df77f86
|
5219509c58f1b96bfd431f84e405f4f9aa981809
|
refs/heads/master
| 2021-01-18T00:07:46.283892
| 2016-06-19T15:12:30
| 2016-06-19T15:12:30
| 62,997,020
| 1
| 0
| null | 2016-07-10T12:13:04
| 2016-07-10T12:13:03
| null |
UTF-8
|
Python
| false
| false
| 2,158
|
py
|
# -*- coding: utf-8 -*-
# -- prioritized --
import sys
sys.path.append('../src')
# -- stdlib --
from urlparse import urljoin
import argparse
import gzip
import json
# -- third party --
# -- own --
from client.core.replay import Replay
from game import autoenv
from settings import ACCOUNT_FORUMURL
# -- code --
def gen_fake_account(name, is_freeplay):
if is_freeplay:
acc = ['freeplay', 1, name]
else:
acc = ['forum', 1, name, {
'title': u'转换的Replay只有名字啊……',
'avatar': urljoin(ACCOUNT_FORUMURL, '/maoyu.png'),
'credits': 1000,
'games': 0,
'drops': 0,
'badges': [],
}]
return {'account': acc, 'state': 'ingame'}
def main():
autoenv.init('Client')
parser = argparse.ArgumentParser('log2thbrep')
parser.add_argument('replay_file', help='Server side replay')
parser.add_argument('client_version', help='Desired client version (git commit)')
parser.add_argument('--freeplay', action='store_true', help='Use freeplay account module?')
options = parser.parse_args()
if options.replay_file.endswith('.gz'):
data = gzip.open(options.replay_file, 'r').read()
else:
data = open(options.replay_file, 'r').read()
data = data.decode('utf-8').split('\n')
names = data.pop(0)[2:].split(', ') # Names
data.pop(0) # Ver
gid = int(data.pop(0).split()[-1]) # GameId
data.pop(0) # Time
game_mode, game_params, rnd_seed, usergdhist, gdhist = data
gdhist = json.loads(gdhist)
game_params = json.loads(game_params)
rep = Replay()
rep.client_version = options.client_version
rep.game_mode = game_mode
rep.game_params = game_params
rep.users = [gen_fake_account(i, options.freeplay) for i in names]
assert len(names) == len(gdhist), [names, len(gdhist)]
for i, gd in enumerate(gdhist):
fn = '%s_%s.thbrep' % (gid, i)
with open(fn, 'w') as f:
print 'Writing %s...' % fn
rep.me_index = i
rep.gamedata = gd
f.write(rep.dumps())
if __name__ == '__main__':
main()
|
[
"feisuzhu@163.com"
] |
feisuzhu@163.com
|
3c12d70f689a90719f80a3bd3c5077bf23834e0f
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/OEFModel/Classes/Dummy.py
|
e96d20609045111125c43f47cdf1e3ee6f8fca51
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734
| 2023-01-10T15:26:39
| 2023-01-10T15:26:39
| 432,681,113
| 3
| 1
|
MIT
| 2022-06-20T20:36:00
| 2021-11-28T10:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
# coding=utf-8
from OTLMOW.OEFModel.EMObject import EMObject
from OTLMOW.OEFModel.EMAttribuut import EMAttribuut
from OTLMOW.OTLModel.Datatypes.StringField import StringField
# Generated with OEFClassCreator. To modify: extend, do not edit
class Dummy(EMObject):
"""DUMMY Installatie"""
typeURI = 'https://lgc.data.wegenenverkeer.be/ns/installatie#Dummy'
label = 'Dummy'
def __init__(self):
super().__init__()
self._notitieDummy = EMAttribuut(field=StringField,
naam='notitie (DUMMY)',
label='notitie (DUMMY)',
objectUri='https://ond.data.wegenenverkeer.be/ns/attribuut#Dummy.notitieDummy',
definitie='Definitie nog toe te voegen voor eigenschap notitie (DUMMY)',
owner=self)
@property
def notitieDummy(self):
"""Definitie nog toe te voegen voor eigenschap notitie (DUMMY)"""
return self._notitieDummy.waarde
@notitieDummy.setter
def notitieDummy(self, value):
self._notitieDummy.set_waarde(value, owner=self)
|
[
"david.vlaminck@mow.vlaanderen.be"
] |
david.vlaminck@mow.vlaanderen.be
|
aaf45a9200b30d752b7d7761ba15eabd843892ff
|
71c7683331a9037fda7254b3a7b1ffddd6a4c4c8
|
/PIDCalib/CalibDataScripts/jobs/Stripping5TeV/Lam0/ganga_Lam0Fit_MagUp.py
|
f823e503e3df060e3276df738366d614877ed200
|
[] |
no_license
|
pseyfert-cern-gitlab-backup/Urania
|
edc58ba4271089e55900f8bb4a5909e9e9c12d35
|
1b1c353ed5f1b45b3605990f60f49881b9785efd
|
refs/heads/master
| 2021-05-18T13:33:22.732970
| 2017-12-15T14:42:04
| 2017-12-15T14:42:04
| 251,259,622
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,567
|
py
|
# set the stripping version
stripVersion = "5TeV"
# magnet 'Up' or 'Down'?
magPol='Up'
# file suffix:
#
# dst_k_and_pi: Kaons and pions from D*
# lam0_p: Protons from Lambda0
# jpsi_mu: Muons from J/psi
# dst_k_and_pi_muonUnBiased: 'MuonUnBiased' kaons + pions from D*
# lam0_p_muonUnBiased: 'MuonUnBiased' protons from Lambda0
fileSuffix='lam0_p'
# set the pbs options (e.g. CPU/walltime)
pbsopts = "-l cput=8:00:00,walltime=12:00:00"
# track name (e.g. K, Pi)
trackName="p"
# particle type (e.g. DSt, Lam0, Jpsi)
partType="Lam0"
# the platform to run on
# if this is not set, it will default to the value of CMTCONFIG
platform=''
# the job name (which will be appended with the stripping version, magnet polarity etc)
jobname="Lam0Fit_P"
# is this a test job?
isTest=False
##########################################################################################################
# The following lines should not need to be changed in most cases, as they are autoconfigured from the
# above options
##########################################################################################################
import os
import re
import sys
# set the platform (if not already specified)
if len(platform)==0:
platform=os.getenv('CMTCONFIG')
# get the Urania version from the script path
abspath = os.path.abspath(os.path.dirname(sys.argv[0]))
rematch = re.search('.Urania_(?P<ver>v\d+r\d+p?\d?).', abspath)
UraniaVersion=rematch.group('ver')
# uncomment to set the Urania version manually
#UraniaVersion="v1r1"
# get the User_release_area (i.e. top-level CMT directory,
# which defaults to $HOME/cmtuser)
User_release_area = os.getenv('User_release_area')
if len(User_release_area)==0:
User_release_area="%s/cmtuser" %os.getenv('HOME')
# uncomment to set the User_release_area manually
#User_release_area="/home/huntp/cmtuser"
# base directory of $CALIBDATASCRIPTSROOT
basedir = '%s/Urania_%s/PIDCalib/CalibDataScripts' %(User_release_area,
UraniaVersion)
# location of the executable
exeFile = '%s/scripts/sh/%sJob_runRange.sh' %(basedir, partType)
# read the configuration script
import imp
gangaJobFuncs=imp.load_source('gangaJobFuncs',
'%s/scripts/python/gangaJobFuncs.py' %basedir)
gangaJobFuncs.updateEnvFromShellScript( ('{bdir}/jobs/Stripping{strp}'
'/configureGangaJobs.sh').format(
bdir=basedir,strp=stripVersion))
jidVar = ''
if magPol=='Down':
jidVar='CALIBDATA_JIDS_DOWN'
elif magPol=='Up':
jidVar='CALIBDATA_JIDS_UP'
else:
raise NameError('Unknown magnet polarity %s' %magPol)
jids_str=os.getenv(jidVar)
if len(jids_str)==0:
raise NameError('Environmental variable %s is not set' %jidVar)
jobIDs=[int(jid) for jid in jids_str.split()]
# uncomment to set the input job IDs manually
#jobIDs=[7,9]
# assume the user's ganga directory is the input directory
gangadir='%s/workspace/%s/%s' %(config['Configuration']['gangadir'],
config['Configuration']['user'],
config['Configuration']['repositorytype'])
# uncomment to use a different input directory
#gangadir='$DATADISK/gangadir_calib/workspace/powell/LocalXML'
#gangadir= '/data/lhcb/users/hunt/gangadir_calib/workspace/huntp/LocalXML'
# use the PBS backend and set the CPU/walltime etc.
bck = PBS()
bck.extraopts = pbsopts
if isTest:
bck.queue = 'testing'
# Uncomment to use the local backend
#bck = Local()
subIDString="*"
## configure the jobs
if isTest:
jobname='Test'+jobname
for jid in jobIDs:
# configure the job comment
jobcomment='Input from Job ID %d' %jid
if isTest:
jobcomment='TEST - '+jobcomment
# get the number of chopped tree
nChoppedTrees = gangaJobFuncs.getNumChoppedTrees(gangadir, jid,
fileSuffix)
if isTest:
# run over ~10% of all events,
# and only process one "chopped tree" (index 0)
nChoppedTrees = 1
nSubJobs =len(jobs(jid).subjobs)
subIDString = "{"+",".join([str(s) for s in range(nSubJobs/10)])+"}"
# Make the lists of arguments used by the ArgSplitter
#
# Arguments are:
#
# 1) top-level input directory (usually the ganga repository)
# 2) Urania version
# 3) platform (e.g. 'x86_64-slc6-gcc46-opt')
# 4) track type
# 5) magnet polarity
# 6) stripping version
# 7) index
# 8) file suffix (e.g. 'dst_k_and_pi')
# 9) verbose flag (0 = no verbose info, 1 = verbose info)
# 10) exit on bad fit flag ( 0 = don't exit, 1 = do exit (
# 11) subjobID string ('*' for all subjobs, '{0,1,2}' for first 3
# subjobs etc.)
argLists = [ [ gangadir,
UraniaVersion,
platform,
trackName,
magPol,
stripVersion,
str(idx),
fileSuffix,
str(int(isTest)),
'1',
subIDString ] for idx in range(nChoppedTrees) ]
splitter = ArgSplitter(args=argLists)
# configure the application
app= Executable(
exe = File(exeFile),
)
j = Job(
name = '{jname}_S{strp}_Mag{pol}_{suf}'.format(jname=jobname,
strp=stripVersion, pol=magPol, suf=fileSuffix),
comment = jobcomment,
outputfiles = ['*.root', '*.eps'] ,
application=app,
backend=bck,
splitter=splitter
)
j.submit()
|
[
"liblhcb@cern.ch"
] |
liblhcb@cern.ch
|
9a050e8af3a0f33b423d7e500a3c375688e6fc12
|
ba91eb5329fd8e69aa9d9fe1e74e2c7b968806c7
|
/robocode-python-ls-core/src/robocode_ls_core/unittest_tools/cases_fixture.py
|
ba40c23945a6b2d060d0511504b35d5688caabc8
|
[
"Apache-2.0"
] |
permissive
|
emanlove/robotframework-lsp
|
aba9deb43ee7fdd3328e08b4d904d6c4ca44e185
|
b0d8862d24e3bc1b72d8ce9412a671571520e7d9
|
refs/heads/master
| 2022-12-06T01:04:04.103593
| 2020-08-30T15:56:43
| 2020-08-30T15:56:43
| 292,014,577
| 1
| 0
|
NOASSERTION
| 2020-09-01T14:05:52
| 2020-09-01T14:05:51
| null |
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
import os.path
class CasesFixture(object):
def __init__(self, copy_to_dir: str, original_resources_dir: str):
"""
Upon initialization copies the `original_resources_dir` to
`copy_to_dir`.
So, for instance, we may copy the contents from
/my/test/resource
to
/temp/pytest-101/folder with spaces/resource
Subsequent requests to get the path will access it in the
place we copied it to.
Note: it should usually be bound to a session scope so that
the copy isn't done at each call.
"""
from robocode_ls_core.copytree import copytree_dst_exists
copytree_dst_exists(original_resources_dir, copy_to_dir)
self.resources_dir = copy_to_dir
assert os.path.exists(self.resources_dir)
def get_path(self, resources_relative_path: str, must_exist=True) -> str:
"""
Returns a path from the resources dir.
"""
path = os.path.join(self.resources_dir, resources_relative_path)
if must_exist:
assert os.path.exists(path), "%s does not exist." % (path,)
return path
def copy_to(self, case: str, dest_dir: str):
"""
Helper to copy a given path to a given directory.
To be used if a given path should be within another structure or
if its contents should be mutated.
"""
import shutil
src = self.get_path(case, must_exist=True)
shutil.copytree(src, dest_dir)
|
[
"fabiofz@gmail.com"
] |
fabiofz@gmail.com
|
2f0695813aafeda05fd430df7c0449e407cb4e4a
|
ac2f43c8e0d9649a7f063c59b3dffdfed9fd7ed7
|
/common/recipes-rest/rest-api/files/common_logging.py
|
cb60bf695450c4cc5c97453d1354d71f6db5c614
|
[] |
no_license
|
facebook/openbmc
|
bef10604ced226288600f55248b7f1be9945aea4
|
32777c66a8410d767eae15baabf71c61a0bef13c
|
refs/heads/helium
| 2023-08-17T03:13:54.729494
| 2023-08-16T23:24:18
| 2023-08-16T23:24:18
| 31,917,712
| 684
| 331
| null | 2023-07-25T21:19:08
| 2015-03-09T19:18:35
|
C
|
UTF-8
|
Python
| false
| false
| 3,993
|
py
|
#!/usr/bin/env python3
#
# Copyright 2014-present Facebook. All Rights Reserved.
#
# This program file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program in a file named COPYING; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import datetime
import logging
import os
import sys
from typing import Any, Dict
import json_log_formatter
class OpenBMCJSONFormatter(json_log_formatter.JSONFormatter):
def json_record(
self, message: str, extra: Dict[str, Any], record: logging.LogRecord
) -> Dict[str, Any]:
# for access logs discard the message field
# all information is already included in extra
if record.name != "aiohttp.access":
extra["message"] = message
else:
# reformat access log request time to isoformat
extra["request_time"] = datetime.datetime.strptime(
extra["request_time"][1:-1], "%d/%b/%Y:%H:%M:%S %z"
).isoformat()
# Include loglevel
extra["level"] = record.levelname
if "time" not in extra:
extra["time"] = datetime.datetime.utcnow()
if record.exc_info:
extra["exc_info"] = self.formatException(record.exc_info)
return extra
def mutate_json_record(self, json_record: Dict[str, Any]):
for attr_name, attr in json_record.items():
if isinstance(attr, datetime.datetime):
json_record[attr_name] = attr.isoformat()
return json_record
class JsonSyslogFormatter(OpenBMCJSONFormatter):
def format(self, record) -> str:
return "rest-api: %s" % (super(JsonSyslogFormatter, self).format(record))
ACCESS_LOG_FORMAT = (
'%a %l %u %t "%r" %s %b %Dus "%{identity}o" "%{Referrer}i" "%{User-Agent}i"'
)
def get_logger_config(config):
if os.path.exists("/dev/log"):
rsyslog_config = {
"level": "INFO",
"formatter": "syslog_" + config["logformat"],
"class": "logging.handlers.SysLogHandler",
"address": "/dev/log",
}
else:
rsyslog_config = {
"level": "INFO",
"formatter": config["logformat"],
"class": "logging.StreamHandler",
"stream": sys.stdout,
}
LOGGER_CONF = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {"format": "%(message)s"},
"json": {"()": "common_logging.OpenBMCJSONFormatter"},
"syslog_json": {"()": "common_logging.JsonSyslogFormatter"},
"syslog_default": {"format": "rest-api: %(message)s"},
},
"handlers": {
"file": {
"level": "INFO",
"formatter": config["logformat"],
"class": "logging.handlers.RotatingFileHandler",
"filename": config["logfile"],
"maxBytes": 1048576,
"backupCount": 3,
"encoding": "utf8",
},
"syslog": rsyslog_config,
"stdout": {
"level": "INFO",
"formatter": config["logformat"],
"class": "logging.StreamHandler",
"stream": sys.stdout,
},
},
"loggers": {
"": {
"handlers": [config["loghandler"]],
"level": "DEBUG",
"propagate": True,
}
},
}
return LOGGER_CONF
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
d9eb34855131358c7270e0fa5ad93d1566736595
|
8f46693b9933763cadb8f9272e6451cb0f7a3e8e
|
/conductor/devices/blue_pmt/__init__.py
|
3a587eb13c200e44055625096de8e262ae4b1103
|
[] |
no_license
|
yesrgang/labrad_tools
|
f4099a2c365400b4501a261855dd122b3845c09f
|
7441b2cb00f851b491136a8e0e9a3bf374c132c4
|
refs/heads/master
| 2018-10-05T11:03:20.386418
| 2018-09-20T22:13:18
| 2018-09-20T22:13:18
| 28,723,788
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
import json
from labrad.wrappers import connectAsync
from time import strftime
from twisted.internet.defer import inlineCallbacks
from conductor_device.conductor_parameter import ConductorParameter
class Recorder(ConductorParameter):
priority = 1
recorders = {
'image': 'record_g',
'image_clock': 'record_eg',
'image_ft': 'record_eg',
}
@inlineCallbacks
def initialize(self):
yield self.connect()
yield self.cxn.yesr10_andor.select_device('ikon')
@inlineCallbacks
def update(self):
recorder_type = ''
sequence = self.conductor.parameters['sequencer']['sequence'].value
for subsequence, recorder in self.recorders.items():
if subsequence in sequence:
recorder_type = recorder
experiment_name = self.conductor.experiment_name
experiment_number = self.conductor.experiment_number
point_number = self.conductor.point_number
if experiment_name is not None:
record_name = '{}#{}-image#{}.hdf5'.format(experiment_name,
experiment_number, point_number)
else:
record_name = 'current-image.hdf5'
record_path = [strftime('%Y%m%d'), record_name]
if self.value is None:
self.value = {}
if recorder_type:
# recorder_type = self.value.get('type', recorder_type)
recorder_config = json.dumps(self.value.get('config', {}))
yield self.cxn.yesr10_andor.record(record_path, recorder_type,
recorder_config)
yield self.conductor.set_parameter_value('andor', 'image_path',
record_path, True)
|
[
"yesrgang@gmail.com"
] |
yesrgang@gmail.com
|
541985950b3efb0c43cbfe5c764f2684dae8b431
|
990a8f72428655d22775ee9dc2c52b7d6de98e4d
|
/config.py
|
099c5e6c905132b2718e17f60e5c5c6b52cbbc51
|
[] |
no_license
|
saulshanabrook/cosc465-iprouter
|
a2c55906d2ef16e101f9eab7459a3a9cc49de1a0
|
a8b08e4929ca13fe34f33c7a433f6cf9670ddf3d
|
refs/heads/master
| 2021-01-18T10:32:59.159476
| 2015-04-15T22:21:58
| 2015-04-15T22:21:58
| 31,486,286
| 0
| 1
| null | 2015-03-01T04:05:49
| 2015-03-01T04:05:49
| null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
python_interpreter = "python" # the interpreter to use
project = "anaconda" # the name of the project
extra_paths = None # a list of extra paths
port = '19360' # the port to listen on (as string)
|
[
"s.shanabrook@gmail.com"
] |
s.shanabrook@gmail.com
|
25be5db700331b2813ceafbe554648b1b9bac0b5
|
959122eea21cec24a4cf32808a24482feda73863
|
/store/admin.py
|
2688e5c86b17695b37c5f342258f8b995eb6a74d
|
[] |
no_license
|
AsadullahFarooqi/InventoryWebApp
|
9fbe6ccafcb93bb5cb1879b728954867014d0afd
|
07e8e6cb06e11f8ef6ada6a590e52f569a8c2d6b
|
refs/heads/master
| 2020-06-18T15:06:18.612258
| 2019-07-11T07:32:00
| 2019-07-11T07:32:00
| 196,341,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
from django.contrib import admin
from .models import (
Store,
StoreEmployers,
Customer,
Supplier,
Products,
# ContainersTypes,
Imported,
Exported,
PaymentsToSuppliers,
PaymentsOfCustomers,
EmployersLedger,
)
# Register your models here.
admin.site.register(Store)
admin.site.register(StoreEmployers)
admin.site.register(Customer)
admin.site.register(Supplier)
admin.site.register(Products)
admin.site.register(Imported)
admin.site.register(Exported)
admin.site.register(EmployersLedger)
# admin.site.register(ContainersTypes)
admin.site.register(PaymentsToSuppliers)
admin.site.register(PaymentsOfCustomers)
|
[
"asadullah.itcgcs@gmail.com"
] |
asadullah.itcgcs@gmail.com
|
e5b2ae4c4440478fd5c24f1011f898ba4711d6d7
|
153da69b35f032f5b83a06f17008ba41a1b336b4
|
/src/main/hspylib/modules/mock/mock_request.py
|
4742062ecc57e21e043658fa728a7644c1327e85
|
[
"MIT"
] |
permissive
|
TrendingTechnology/hspylib
|
6400cadf9dfe6ab5733712dcfeccf8022d61c589
|
c79a2c17e89fe21d00ccd9c1646a03407cd61839
|
refs/heads/master
| 2023-06-20T15:47:35.962661
| 2021-07-19T22:12:18
| 2021-07-19T23:45:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,564
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
TODO Purpose of the file
@project: HSPyLib
hspylib.main.hspylib.modules.mock
@file: mock_request.py
@created: Tue, 4 May 2021
@author: <B>H</B>ugo <B>S</B>aporetti <B>J</B>unior"
@site: https://github.com/yorevs/hspylib
@license: MIT - Please refer to <https://opensource.org/licenses/MIT>
Copyright 2021, HSPyLib team
"""
from typing import Any
from requests.structures import CaseInsensitiveDict
from hspylib.core.enums.charset import Charset
from hspylib.core.enums.content_type import ContentType
from hspylib.core.enums.http_code import HttpCode
from hspylib.core.enums.http_method import HttpMethod
from hspylib.modules.fetch.http_response import HttpResponse
class MockResponse(HttpResponse):
"""TODO"""
def __init__(self,
parent,
method: HttpMethod,
url: str,
status_code: HttpCode = None,
body: str = None,
headers=None,
encoding: Charset = Charset.UTF_8,
content_type=ContentType.APPLICATION_JSON):
super().__init__(method, url, status_code, body, headers, encoding, content_type)
self.parent = parent
self.received_body = False
def then_return(
self,
code: HttpCode,
body: str = None,
headers=None,
encoding: Charset = Charset.UTF_8,
content_type=ContentType.APPLICATION_JSON) -> Any:
"""TODO"""
response = self.parent.mock(self.method, self.url)
response.status_code = code
response.body = body
response.headers = headers if headers else []
response.encoding = encoding
response.content_type = content_type
if response.content_type:
response.content_type.charset = encoding
return self.parent
def then_return_with_received_body(
self,
code: HttpCode,
headers: CaseInsensitiveDict = None,
encoding: Charset = Charset.UTF_8,
content_type=ContentType.APPLICATION_JSON) -> Any:
"""TODO"""
response = self.parent.mock(self.method, self.url)
response.received_body = True
response.body = None
response.status_code = code
response.headers = headers if headers else []
response.encoding = encoding
response.content_type = content_type
if response.content_type:
response.content_type.charset = encoding
return self.parent
|
[
"yorevs@gmail.com"
] |
yorevs@gmail.com
|
8309f762bf07a0defed03dbce33a49f1d8a33ac1
|
3f1fb9704f76f0fa29723267595be1cc68a55248
|
/Alignment/OfflineValidation/test/PrimaryVertexResolution_templ_cfg.py
|
8c2c84451bec65c0380fa4d278e51e02e60b8eaa
|
[
"Apache-2.0"
] |
permissive
|
jeongsumin/cmssw
|
639838651cbaf007a4c3d0df6fa33f705326c95d
|
54acaec3dc59abda01c018920077db98db976746
|
refs/heads/master
| 2021-07-05T15:32:01.271717
| 2020-10-15T04:31:06
| 2020-10-15T04:31:06
| 189,147,548
| 0
| 0
|
Apache-2.0
| 2019-05-29T03:57:00
| 2019-05-29T03:56:59
| null |
UTF-8
|
Python
| false
| false
| 7,514
|
py
|
#! /bin/env cmsRun
'''
cfg to produce pv resolution plots
here doing refit of tracks and vertices using latest alignment
'''
from __future__ import print_function
import FWCore.ParameterSet.Config as cms
from fnmatch import fnmatch
import FWCore.ParameterSet.VarParsing as VarParsing
from pdb import set_trace
process = cms.Process("PrimaryVertexResolution")
###################################################################
def best_match(rcd):
###################################################################
'''
find out where to best match the input conditions
'''
print(rcd)
for pattern, string in connection_map:
print(pattern, fnmatch(rcd, pattern))
if fnmatch(rcd, pattern):
return string
options = VarParsing.VarParsing("analysis")
options.register('lumi',
1.,
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.float, # string, int, or float
"luminosity used")
options.register ('outputRootFile',
"pvresolution_YYY_KEY_YYY_XXX_RUN_XXX.root",
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"output root file")
options.register ('records',
[],
VarParsing.VarParsing.multiplicity.list, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"record:tag names to be used/changed from GT")
options.register ('external',
[],
VarParsing.VarParsing.multiplicity.list, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"record:fle.db picks the following record from this external file")
options.register ('GlobalTag',
'110X_dataRun3_Prompt_v3',
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"Global Tag to be used")
options.parseArguments()
print("conditionGT : ", options.GlobalTag)
print("conditionOverwrite: ", options.records)
print("external conditions:", options.external)
print("outputFile : ", options.outputRootFile)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr = cms.untracked.PSet(placeholder = cms.untracked.bool(True))
process.MessageLogger.cout = cms.untracked.PSet(INFO = cms.untracked.PSet(
reportEvery = cms.untracked.int32(1000) # every 100th only
# limit = cms.untracked.int32(10) # or limit to 10 printouts...
))
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(150000) )
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff")
process.load('Configuration.Geometry.GeometryRecoDB_cff')
process.load('Configuration/StandardSequences/Services_cff')
process.load('TrackingTools.TransientTrack.TransientTrackBuilder_cfi')
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(XXX_FILES_XXX)
)
###################################################################
# Tell the program where to find the conditons
connection_map = [
('Tracker*', 'frontier://PromptProd/CMS_CONDITIONS'),
('SiPixel*', 'frontier://PromptProd/CMS_CONDITIONS'),
('SiStrip*', 'frontier://PromptProd/CMS_CONDITIONS'),
('Beam*', 'frontier://PromptProd/CMS_CONDITIONS'),
]
if options.external:
connection_map.extend(
(i.split(':')[0], 'sqlite_file:%s' % i.split(':')[1]) for i in options.external
)
connection_map.sort(key=lambda x: -1*len(x[0]))
###################################################################
# creat the map for the GT toGet
records = []
if options.records:
for record in options.records:
rcd, tag = tuple(record.split(':'))
records.append(
cms.PSet(
record = cms.string(rcd),
tag = cms.string(tag),
connect = cms.string(best_match(rcd))
)
)
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cfi")
process.GlobalTag.globaltag = options.GlobalTag
#process.GlobalTag.DumpStat = cms.untracked.bool(True)
process.GlobalTag.toGet = cms.VPSet(*records)
process.load("RecoTracker.TrackProducer.TrackRefitters_cff")
# remove the following lines if you run on RECO files
process.TrackRefitter.src = 'ALCARECOTkAlMinBias'
process.TrackRefitter.NavigationSchool = ''
## PV refit
process.load("TrackingTools.TransientTrack.TransientTrackBuilder_cfi")
from RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi import offlinePrimaryVertices
process.offlinePrimaryVerticesFromRefittedTrks = offlinePrimaryVertices.clone()
process.offlinePrimaryVerticesFromRefittedTrks.TrackLabel = cms.InputTag("TrackRefitter")
process.offlinePrimaryVerticesFromRefittedTrks.vertexCollections.maxDistanceToBeam = 1
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxNormalizedChi2 = 20
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.minSiliconLayersWithHits = 5
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxD0Significance = 5.0
# as it was prior to https://github.com/cms-sw/cmssw/commit/c8462ae4313b6be3bbce36e45373aa6e87253c59
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxD0Error = 1.0
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.maxDzError = 1.0
process.offlinePrimaryVerticesFromRefittedTrks.TkFilterParameters.minPixelLayersWithHits = 2
process.PrimaryVertexResolution = cms.EDAnalyzer('SplitVertexResolution',
storeNtuple = cms.bool(False),
intLumi = cms.untracked.double(options.lumi),
vtxCollection = cms.InputTag("offlinePrimaryVerticesFromRefittedTrks"),
trackCollection = cms.InputTag("TrackRefitter"),
minVertexNdf = cms.untracked.double(10.),
minVertexMeanWeight = cms.untracked.double(0.5),
runControl = cms.untracked.bool(True),
runControlNumber = cms.untracked.vuint32(int(XXX_RUN_XXX))
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string(options.outputRootFile),
closeFileFast = cms.untracked.bool(False)
)
process.p = cms.Path(process.offlineBeamSpot +
process.TrackRefitter +
process.offlinePrimaryVerticesFromRefittedTrks +
process.PrimaryVertexResolution)
|
[
"marco.musich@cern.ch"
] |
marco.musich@cern.ch
|
d7c88e4ca86943f69f81f3921df26bdc568e03f7
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/gbd_2017/shared_code/central_comp/nonfatal/dismod/cascade/varnish.py
|
12362e5c319099abfeea8ab6a7ed8f6bbac1abc4
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,164
|
py
|
import sys
import os
import subprocess
import logging
import upload
import fit_stats
import drill
from db_tools.ezfuncs import query
from cascade_ode.demographics import Demographics
from cascade_ode import importer
from cascade_ode import __version__
from setup_logger import setup_logger
from jobmon import sge
from save_results._save_results import DismodSaveResults
# Set default file mask to readable-for all users
os.umask(0o0002)
AGE_GROUP_SET_ID = 12
def main():
'''Set commit hash, upload model, try to write effects_plots pdfs,
aggregate model version draws up location hierarchy
'''
setup_logger()
mvid = sys.argv[1]
log = logging.getLogger(__name__)
log.info("Varnish started for mvid {}".format(mvid))
try:
try:
commit_hash = sge.get_commit_hash(dir='%s/..' % drill.this_path)
except subprocess.CalledProcessError:
# in site-packages, not git repo
commit_hash = __version__
upload.set_commit_hash(mvid, commit_hash)
upload.upload_model(mvid)
outdir = "%s/%s/full" % (
drill.settings['cascade_ode_out_dir'],
str(mvid))
joutdir = "%s/%s" % (drill.settings['diag_out_dir'], mvid)
fit_stats.write_fit_stats(mvid, outdir, joutdir)
upload.upload_fit_stat(mvid)
# Write effect PDFs
plotter = "{}/effect_plots.r".format(drill.this_path)
plotter = os.path.realpath(plotter)
demo = Demographics()
try:
subprocess.check_output([
"FILEPATH",
plotter,
str(mvid),
joutdir,
drill.settings['cascade_ode_out_dir'],
str(max(demo.year_ids))],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
log.exception("Error in effect plots")
# Launch final aggregations
log.info("Starting Save Results")
aggregate_model(mvid, demo=demo)
except Exception:
log.exception("Error in varnish")
raise
def aggregate_model(mvid, demo):
'''call save_results to create location aggregates,
upload summaries to epi.model_estimate_final,
mark model as finished'''
agg_args = get_aggregation_arguments(mvid, demo)
dsr = DismodSaveResults(
input_dir=agg_args['input_dir'],
input_file_pattern=agg_args['input_file_pattern'],
model_version_id=mvid,
modelable_entity_id=agg_args['modelable_entity_id'],
description=agg_args['description'],
year_id=agg_args['year_id'],
sex_id=agg_args['sex_id'],
measure_id=agg_args['measure_id'],
db_env=agg_args['db_env'],
gbd_round_id=agg_args['gbd_round_id'],
birth_prevalence=agg_args['birth_prevalence'])
dsr.run()
return dsr
def get_aggregation_arguments(mvid, demo):
casc = drill.Cascade(
mvid, root_dir=drill.settings['cascade_ode_out_dir'],
reimport=False)
mvm = casc.model_version_meta
db_env = drill.settings['env_variables']['ENVIRONMENT_NAME']
agg_args = {}
agg_args['input_dir'] = os.path.join(casc.root_dir, 'draws')
agg_args['input_file_pattern'] = '{location_id}_{year_id}_{sex_id}.h5'
agg_args['modelable_entity_id'] = mvm.modelable_entity_id.item()
agg_args['description'] = mvm.description.item()
agg_args['year_id'] = demo.year_ids
agg_args['sex_id'] = demo.sex_ids
agg_args['measure_id'] = get_measures_from_casc(casc)
agg_args['db_env'] = db_env
agg_args['gbd_round_id'] = demo.gbd_round_id
agg_args['birth_prevalence'] = mvm.birth_prev.fillna(0).replace(
{0: False, 1: True}).item()
return agg_args
def get_measures_from_casc(casc):
measure_only = casc.model_version_meta.measure_only
if measure_only.notnull().all():
return measure_only.item()
q = "select measure_id from shared.measure where measure in ('{}')".format(
"', '".join(importer.integrand_pred))
df = query(q, conn_def="epi")
return sorted(df.measure_id.tolist())
if __name__ == '__main__':
main()
|
[
"nsidles@uw.edu"
] |
nsidles@uw.edu
|
e5e2e59c9ab3d23f95baa947c2fa5b9cadd2f20c
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01_preview/models/_network_management_client_enums.py
|
928973d87b1432a5f248e44814aab212aca2c8e4
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 5,160
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from azure.core import CaseInsensitiveEnumMeta
class AccessRuleDirection(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Direction that specifies whether the access rules is inbound/outbound."""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class AddressPrefixType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Address prefix type."""
IP_PREFIX = "IPPrefix"
SERVICE_TAG = "ServiceTag"
class AdminRuleKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Whether the rule is custom or default."""
CUSTOM = "Custom"
DEFAULT = "Default"
class AssociationAccessMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Access mode on the association."""
LEARNING = "Learning"
ENFORCED = "Enforced"
AUDIT = "Audit"
class ConfigurationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Configuration Deployment Type."""
SECURITY_ADMIN = "SecurityAdmin"
SECURITY_USER = "SecurityUser"
CONNECTIVITY = "Connectivity"
class ConnectivityTopology(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Connectivity topology type."""
HUB_AND_SPOKE = "HubAndSpoke"
MESH = "Mesh"
class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The type of identity that created the resource."""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class DeleteExistingNSGs(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Flag if need to delete existing network security groups."""
FALSE = "False"
TRUE = "True"
class DeleteExistingPeering(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Flag if need to remove current existing peerings."""
FALSE = "False"
TRUE = "True"
class DeploymentStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Deployment Status."""
NOT_STARTED = "NotStarted"
DEPLOYING = "Deploying"
DEPLOYED = "Deployed"
FAILED = "Failed"
class EffectiveAdminRuleKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Whether the rule is custom or default."""
CUSTOM = "Custom"
DEFAULT = "Default"
class EffectiveUserRuleKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Whether the rule is custom or default."""
CUSTOM = "Custom"
DEFAULT = "Default"
class GroupConnectivity(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Group connectivity type."""
NONE = "None"
DIRECTLY_CONNECTED = "DirectlyConnected"
class IsGlobal(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Flag if global mesh is supported."""
FALSE = "False"
TRUE = "True"
class MembershipType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Membership Type."""
STATIC = "Static"
DYNAMIC = "Dynamic"
class NspLinkStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The NSP link state."""
APPROVED = "Approved"
PENDING = "Pending"
REJECTED = "Rejected"
DISCONNECTED = "Disconnected"
class NspProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The current provisioning state."""
SUCCEEDED = "Succeeded"
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
ACCEPTED = "Accepted"
FAILED = "Failed"
class ProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The current provisioning state."""
SUCCEEDED = "Succeeded"
UPDATING = "Updating"
DELETING = "Deleting"
FAILED = "Failed"
class SecurityConfigurationRuleAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Whether network traffic is allowed or denied."""
ALLOW = "Allow"
DENY = "Deny"
ALWAYS_ALLOW = "AlwaysAllow"
class SecurityConfigurationRuleDirection(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The direction of the rule. The direction specifies if the rule will be evaluated on incoming or
outgoing traffic.
"""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class SecurityConfigurationRuleProtocol(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Network protocol this rule applies to."""
TCP = "Tcp"
UDP = "Udp"
ICMP = "Icmp"
ESP = "Esp"
ANY = "Any"
AH = "Ah"
class SecurityType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Security Type."""
ADMIN_POLICY = "AdminPolicy"
USER_POLICY = "UserPolicy"
class UseHubGateway(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Flag if need to use hub gateway."""
FALSE = "False"
TRUE = "True"
class UserRuleKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Whether the rule is custom or default."""
CUSTOM = "Custom"
DEFAULT = "Default"
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
a0a6f63d91b35f4e1b0adeff1d440b64e0536984
|
3cdd7019f3acbf7b7a7e879444454703fcc73d62
|
/solutions/319.bulb-switcher.py
|
49544fa00a51fc10ac7fe2b65b52d85034bc6e67
|
[] |
no_license
|
quixoteji/Leetcode
|
1dc2e52e53a7b58d9bae15ce2d5c4142cbd365af
|
00bf9a8164008aa17507b1c87ce72a3374bcb7b9
|
refs/heads/master
| 2021-07-15T07:59:21.294297
| 2020-05-13T03:08:47
| 2020-05-13T03:08:47
| 138,812,553
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
#
# @lc app=leetcode id=319 lang=python3
#
# [319] Bulb Switcher
#
# @lc code=start
class Solution:
def bulbSwitch(self, n: int) -> int:
return self.sol1(n)
def sol1(self, n) :
# 0 : off 1 : on
ons = 1
while ons * ons <= n : ons += 1
return ons-1
# @lc code=end
|
[
"jxj405@case.edu"
] |
jxj405@case.edu
|
9165e19f93dedecb81d6600c76cdff8a1644df31
|
262311e60529868e38c2c57ee3db573f8e11c458
|
/qa-automated/pages/app/guide_page.py
|
4c174083b2c451704f06adb30ab6f1ce1a034b09
|
[] |
no_license
|
huileizhan227/untitled
|
1c5604736d9ffcce6f7cb7e308cdc0ebd07e116a
|
07df74c89291b1664a28e3c8dcba51a917f1835f
|
refs/heads/master
| 2023-01-27T11:51:37.609210
| 2020-04-16T11:49:59
| 2020-04-16T11:49:59
| 150,606,504
| 1
| 0
| null | 2023-01-09T12:00:12
| 2018-09-27T15:12:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 380
|
py
|
from poium import Page
from poium import PageElement
from poium import PageElements
class GuidPage(Page):
skip_btn = PageElement(id_='com.transsnet.news.more:id/skip')
topic_btn_list = PageElements(xpath='//*[@id="com.transsnet.news.more:id/recycler"]//*[@id="com.transsnet.news.more:id/name"]')
confirm_btn = PageElement(id_='com.transsnet.news.more:id/select_btn')
|
[
"374826581@qq.com"
] |
374826581@qq.com
|
a17a0329e2df352957ffa0acef086b440581e622
|
c0717724c7dc3937252bb4a7bd7c796088db4c5d
|
/solutions/judgingmoose.py
|
58fb1ab1e281b3934eb20ba684504c74ec01dce3
|
[] |
no_license
|
matthew-cheney/kattis-solutions
|
58cd03394ad95e9ca7ffa3de66b69d90647b31ff
|
d9397ca4715a3ad576046a62bdd6c0fb9542d838
|
refs/heads/main
| 2023-01-24T12:49:18.871137
| 2020-12-10T04:10:48
| 2020-12-10T04:10:48
| 318,857,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
l, r = [int(x) for x in input().split(' ')]
if l == 0 and r == 0:
print('Not a moose')
elif l != r:
print(f'Odd {2 * max(l, r)}')
else:
print(f'Even {2 * l}')
|
[
"m.cheney95@outlook.com"
] |
m.cheney95@outlook.com
|
bfd7447e28cd315f542b2e4c3883455d18c758b4
|
55dc6e337e634acb852c570274a1d0358b7300a5
|
/tests/extension/resolver_/single_module/test_resolver_single_module.py
|
91d5af2b94b4d6d03772bf6913b03b34a1fe7c9a
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
fifoteam/veriloggen
|
97ad45671f053c85f495b08a030f735fd9822146
|
23cb7251c0f126d40d249982cad33ef37902afef
|
refs/heads/master
| 2020-05-27T00:28:37.575411
| 2017-02-20T01:47:00
| 2017-02-20T01:47:00
| 82,518,602
| 2
| 0
| null | 2017-02-20T05:02:37
| 2017-02-20T05:02:37
| null |
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import resolver_single_module
expected_verilog = """
module blinkled #
(
parameter WIDTH = 8,
parameter INC = 1
)
(
input CLK,
input RST,
output reg [8-1:0] LED
);
reg [18-1:0] count;
always @(posedge CLK) begin
if(RST) begin
count <= 0;
end else begin
if(count == 1023) begin
count <= 0;
end else begin
count <= count + 1;
end
end
end
always @(posedge CLK) begin
if(RST) begin
LED <= 0;
end else begin
if(count == 1023) begin
LED <= LED + 1;
end
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = resolver_single_module.mkLed()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
[
"shta.ky1018@gmail.com"
] |
shta.ky1018@gmail.com
|
b78531b1f1e38104f41d807a2b3eaea73882c0fe
|
6f05f7d5a67b6bb87956a22b988067ec772ba966
|
/data/train/python/e658c13140c18d36a72cabf611a7dcaca154d8deurls.py
|
e658c13140c18d36a72cabf611a7dcaca154d8de
|
[
"MIT"
] |
permissive
|
harshp8l/deep-learning-lang-detection
|
93b6d24a38081597c610ecf9b1f3b92c7d669be5
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
refs/heads/master
| 2020-04-07T18:07:00.697994
| 2018-11-29T23:21:23
| 2018-11-29T23:21:23
| 158,597,498
| 0
| 0
|
MIT
| 2018-11-21T19:36:42
| 2018-11-21T19:36:41
| null |
UTF-8
|
Python
| false
| false
| 2,069
|
py
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'acidentes_em_rodovias.controller.home', name='home'),
# url(r'^acidentes_em_rodovias/', include('acidentes_em_rodovias.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^acidentes_rodovias/$', 'app.controller.index_controller.index'),
url(r'^acidentes_rodovias/regiao$', 'app.controller.consultabasica_regiao_controller.consulta_por_regiao'),
url(r'^acidentes_rodovias/periodo$', 'app.controller.consultabasica_periodo_controller.consulta_por_periodo'),
url(r'^acidentes_rodovias/municipios-regiao$', 'app.controller.consultabasica_regiao_controller.consulta_municipios_na_regiao'),
url(r'^acidentes_rodovias/consulta/municipio$', 'app.controller.consultabasica_regiao_controller.consulta_ocorrencias_por_municipio'),
url(r'^acidentes_rodovias/consulta/periodo$', 'app.controller.consultabasica_periodo_controller.consulta_ocorrencias_por_periodo'),
url(r'^acidentes_rodovias/estatisticas/tipos-acidentes$', 'app.controller.estatisticas_tipos_controller.tipos_acidentes'),
url(r'^acidentes_rodovias/estatisticas/causas-acidentes$', 'app.controller.estatisticas_causas_controller.causas_acidentes'),
url(r'^acidentes_rodovias/estatisticas/ocorrencias-envolvidos$', 'app.controller.estatisticas_envolvidos_controller.ocorrencias_e_envolvidos'),
url(r'^acidentes_rodovias/estatisticas/acidentes-sexo$', 'app.controller.estatisticas_envolvidos_controller.acidentes_sexo'),
url(r'^acidentes_rodovias/estatisticas/br$', 'app.controller.estatisticas_br_controller.acidentes_br'),
url(r'^acidentes_rodovias/estatisticas/uf$', 'app.controller.estatisticas_uf_controller.acidentes_uf'),
)
|
[
"aliostad+github@gmail.com"
] |
aliostad+github@gmail.com
|
c142dee0cde1b43a81c0d5697afeee12e008eb37
|
243d0543f8d38f91954616c014456122292a1a3c
|
/CS1/0320_herbivwar/draft04/critterAI1.py
|
90f7ec363bc8911db4de6b7fb77c35166c061e2a
|
[
"MIT"
] |
permissive
|
roni-kemp/python_programming_curricula
|
758be921953d82d97c816d4768fbcf400649e969
|
eda4432dab97178b4a5712b160f5b1da74c068cb
|
refs/heads/master
| 2023-03-23T13:46:42.186939
| 2020-07-15T17:03:34
| 2020-07-15T17:03:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,944
|
py
|
import random, critter
class CritterAI1(critter.Critter):
def __init__(self, screen, row, col, image, team, name):
super().__init__(screen, row, col, image, team, name)
def reproduce(self, critter_cells):
'''You have to have this in each critter child
class so that CritterAIs can reproduce with the
same AI and not with a generic critter that
happens to share the same image!'''
new_child = CritterAI1(self.screen, self.row, self.col, self.original_image, self.team, self.name)
self.customReproduce(critter_cells, new_child)
def takeAction(self, board, critter_cells):
'''TODO: Students write code here.
Each creature only takes one action per turn.
The action is the value returned, one of:
'reproduce', 'eat', 'attack', 'left',
'right', 'move', 'rest'
Fatigue is the amount of energy consumed by
each action.
#how much fatigue do I have?
print(self.fatigue)
print("losing "+str(self.fatigue)+" energy this turn")
#If fatigue is too high, reset fatigue to zero
if self.fatigue > 4:
return 'rest'
The eat action consumes all the energy at the
current location.
#If there is lots of energy here, then eat.
if self.energyHere(board) > 10:
return 'eat'
#Eat if I am low on energy
if self.energy < 3:
return 'eat'
#Don't walk out of bounds
if self.aheadInBounds(critter_cells):
return 'move'
else:
return 'right'
#Check to make sure ahead is inbounds and
#not blocked by another creature before
#reproducing.
if self.aheadInBounds(critter_cells) and self.getCritterAhead(critter_cells)==None:
return 'reproduce'
#If there is an enemy ahead, attack them.
other_critter = self.getCritterAhead(critter_cells)
if other_critter != None and other_critter.team != self.team:
return 'attack'
#Choose one of three random actions
r = random.randint(0,2)
if r==0:
return 'left'
elif r==1:
return 'right'
else:
return 'rest'
'''
#reproduce
if self.energy > 4:
if self.aheadInBounds(critter_cells) and self.getCritterAhead(critter_cells)==None:
return 'reproduce'
#rest
if self.fatigue > 1:
return 'rest'
#eat
if self.energyHere(board) > 4:
return 'eat'
#fight
other = self.getCritterAhead(critter_cells)
if other!=None and other.team!=self.team:
return 'attack'
#Otherwise random
r = random.randint(0,2)
if r==0:
return 'left'
elif r==1:
return 'right'
else:
return 'rest'
|
[
"neal.holts@gmail.com"
] |
neal.holts@gmail.com
|
4685ca9231aef2f67fedb4b4c61bf9b489279acf
|
9cd27764abf5412dffd351a0a4956075191dd67e
|
/day 2/rename_file.py
|
932ad167449c991f82661fd48217b3e1ca8221aa
|
[] |
no_license
|
EHwooKim/Start_camp
|
1cebb6775f68af1ca6ecaf37bc8ad4d0864e77de
|
d582e20ce1a3ccb9cd31422b3d5be3b64c2627d5
|
refs/heads/master
| 2020-06-17T15:26:14.898280
| 2019-08-05T00:12:59
| 2019-08-05T00:12:59
| 195,963,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
import os
# 1. dummy 폴더로 들어간다.
os.chdir('./dummy')
print(os.getcwd())
# 2. 하나씩 파일명을 변경한다. => 반복문
files = os.listdir('.')
print(type(files))
#for file in files:
# os.rename(file, f'SAMSUNG_{file}')
# 3. SAMSUNG이 아니라 SSAFY를 붙였어야지!
for file in files:
os.rename(file, file.replace('SAMSUNG_SAMSUNG','SSAFY'))
|
[
"ehwoo0707@naver.com"
] |
ehwoo0707@naver.com
|
057ae25723d470ecd9c4657c06e722e7cb93acd7
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/50/usersdata/111/17678/submittedfiles/contido.py
|
67535e3f4f65734c36914824446b50ff8537f736
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def incluso(a,b):
cont=0
for i in range(0,len(a),1):
if a[i] in b:
cont = cont +1
if cont!=0:
return True
else:
return False
n=input('Tamanho da lista : ')
a=[]
for i in range(0,n,1):
a.append(input('Elementos da lista1: '))
n2=input('Tamanho da lista : ')
b=[]
for i in range(0,n2,1):
b.append(input('Elementos da lista2: ')
if incluso(a,b)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
4c371b4f7afd64a5b4c2bd58df54c3b208d71660
|
55a4cd36bbea38fda0792020225d849dbc23f4a8
|
/initUserState.py
|
866f870de02309f8d88e1dc455f61454eb88a387
|
[] |
no_license
|
liyonghelpme/findAMatch
|
4e4e4960715292e14aa59bbec5407d38562fb4c5
|
3e8d96106247b72c4b89412ab290d4f6462bdcea
|
refs/heads/master
| 2021-01-19T08:10:31.909865
| 2013-05-04T11:06:16
| 2013-05-04T11:06:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
#coding:utf8
import MySQLdb
import random
myCon = MySQLdb.connect(host='localhost', passwd='badperson3', db='UserMatch', user='root', charset='utf8')
for i in xrange(0, 100):
sql = 'insert into UserState (uid, shieldTime, attackTime, onlineTime, score) values(%d, %d, %d, %d, %d)' % (i, 0, 0, 0, random.randint(0, 1000))
myCon.query(sql)
#测试保护状态的用户数据
#攻击状态用户数据
#在线状态用户数据
myCon.commit()
myCon.close()
|
[
"liyonghelpme@gmail.com"
] |
liyonghelpme@gmail.com
|
bd679510fd69971fb1346300fec7f9ac0725b03b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/453/usersdata/281/109875/submittedfiles/programa.py
|
22338be239a906640e2e45b03fa532fc2036ad96
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
# -*- coding: utf-8 -*-
n=int(input('Digite a dimensão do tabuleiro: '))
m=[]
for i in range (0,n,1):
m_linha=[]
for j in range(0,n,1):
m_linha.append(int(input('Digite o números (%d,%d) de entrada: '% (i+1,j+1))))
m.append(m_linha)
soma_linha=[]
for i in range(0,n,1):
c=0
for j in range(0.n,1):
c=c+m[i][j]
soma_linha.append(c)
soma_coluna=0
for j in range(0,n,1):
c2=0
for i in range(0,n,1):
c2=c2+m[i][j]
soma_coluna.append(c2)
peça=0
for i in range(0,n,1):
for j in range(0,n,1):
if (soma_linha[i]+soma_coluna[j]-2*m[i][j]>peça:
peça=soma_linha[i]+soma_coluna[j]-2*m[i][j]
print(peça)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
5acb43c07eab61bc8a8fd5259359e49af52e3a64
|
bbb21bb79c8c3efbad3dd34ac53fbd6f4590e697
|
/restAPI/manage.py
|
6f0f73c179c0948e0594011d582e9bccf06ad320
|
[] |
no_license
|
Nusmailov/BFDjango
|
b14c70c42da9cfcb68eec6930519da1d0b1f53b6
|
cab7f0da9b03e9094c21efffc7ab07e99e629b61
|
refs/heads/master
| 2020-03-28T21:11:50.706778
| 2019-01-21T07:19:19
| 2019-01-21T07:19:19
| 149,136,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'restAPI.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"nusmailov@gmail.com"
] |
nusmailov@gmail.com
|
050629c4fcfc80b4ca7aa69dbbfa34c66605e8f1
|
76050b0002dac757866a9fb95dc199918da665bb
|
/examples/multiagent/multigrid/run_multigrid.py
|
0b5ef0a06d99e8fcd41152dacded9ea9f749c7e6
|
[
"Apache-2.0"
] |
permissive
|
RaoulDrake/acme
|
2829f41688db68d694da2461d301fd6f9f27edff
|
97c50eaa62c039d8f4b9efa3e80c4d80e6f40c4c
|
refs/heads/master
| 2022-12-29T01:16:44.806891
| 2022-12-21T14:09:38
| 2022-12-21T14:10:06
| 300,250,466
| 0
| 0
|
Apache-2.0
| 2020-10-01T11:13:03
| 2020-10-01T11:13:02
| null |
UTF-8
|
Python
| false
| false
| 4,123
|
py
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiagent multigrid training run example."""
from typing import Callable, Dict
from absl import flags
from acme import specs
from acme.agents.jax.multiagent import decentralized
from absl import app
import helpers
from acme.jax import experiments
from acme.jax import types as jax_types
from acme.multiagent import types as ma_types
from acme.utils import lp_utils
from acme.wrappers import multigrid_wrapper
import dm_env
import launchpad as lp
FLAGS = flags.FLAGS
_RUN_DISTRIBUTED = flags.DEFINE_bool(
'run_distributed', True, 'Should an agent be executed in a distributed '
'way. If False, will run single-threaded.')
_NUM_STEPS = flags.DEFINE_integer('num_steps', 10000,
'Number of env steps to run training for.')
_EVAL_EVERY = flags.DEFINE_integer('eval_every', 1000,
'How often to run evaluation.')
_ENV_NAME = flags.DEFINE_string('env_name', 'MultiGrid-Empty-5x5-v0',
'What environment to run.')
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 64, 'Batch size.')
_SEED = flags.DEFINE_integer('seed', 0, 'Random seed.')
def _make_environment_factory(env_name: str) -> jax_types.EnvironmentFactory:
def environment_factory(seed: int) -> dm_env.Environment:
del seed
return multigrid_wrapper.make_multigrid_environment(env_name)
return environment_factory
def _make_network_factory(
agent_types: Dict[ma_types.AgentID, ma_types.GenericAgent]
) -> Callable[[specs.EnvironmentSpec], ma_types.MultiAgentNetworks]:
def environment_factory(
environment_spec: specs.EnvironmentSpec) -> ma_types.MultiAgentNetworks:
return decentralized.network_factory(environment_spec, agent_types,
helpers.init_default_multigrid_network)
return environment_factory
def build_experiment_config() -> experiments.ExperimentConfig[
ma_types.MultiAgentNetworks, ma_types.MultiAgentPolicyNetworks,
ma_types.MultiAgentSample]:
"""Returns a config for multigrid experiments."""
environment_factory = _make_environment_factory(_ENV_NAME.value)
environment = environment_factory(_SEED.value)
agent_types = {
str(i): decentralized.DefaultSupportedAgent.PPO
for i in range(environment.num_agents) # pytype: disable=attribute-error
}
# Example of how to set custom sub-agent configurations.
ppo_configs = {'unroll_length': 16, 'num_minibatches': 32, 'num_epochs': 10}
config_overrides = {
k: ppo_configs for k, v in agent_types.items() if v == 'ppo'
}
configs = decentralized.default_config_factory(agent_types, _BATCH_SIZE.value,
config_overrides)
builder = decentralized.DecentralizedMultiAgentBuilder(
agent_types=agent_types, agent_configs=configs)
return experiments.ExperimentConfig(
builder=builder,
environment_factory=environment_factory,
network_factory=_make_network_factory(agent_types=agent_types),
seed=_SEED.value,
max_num_actor_steps=_NUM_STEPS.value)
def main(_):
config = build_experiment_config()
if _RUN_DISTRIBUTED.value:
program = experiments.make_distributed_experiment(
experiment=config, num_actors=4)
lp.launch(program, xm_resources=lp_utils.make_xm_docker_resources(program))
else:
experiments.run_experiment(
experiment=config, eval_every=_EVAL_EVERY.value, num_eval_episodes=5)
if __name__ == '__main__':
app.run(main)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
e7cb34776c3c785a60bea9c5a22fb9ffeb4f16e9
|
6220d04a60dae0e44ba2232cba6c79114bf2fd3f
|
/test_remote_project/test_remote_project/urls.py
|
565409fca290c5720a80e2fa65d99ec0a96bd81b
|
[
"MIT"
] |
permissive
|
ollytheninja/django-autocomplete-light
|
bc65e92ffddbe1332a59bf4d6248976e8c0542b4
|
6e4723c4a75870e7421cb5656248ef2b61eeeca6
|
refs/heads/master
| 2021-01-21T03:50:46.975650
| 2015-10-07T23:20:47
| 2015-10-07T23:20:47
| 44,034,996
| 0
| 1
|
MIT
| 2020-10-01T19:22:48
| 2015-10-11T01:36:02
|
Python
|
UTF-8
|
Python
| false
| false
| 761
|
py
|
from django.conf.urls import patterns, include, url
import autocomplete_light
autocomplete_light.autodiscover()
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'test_project.views.home', name='home'),
# url(r'^test_project/', include('test_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^autocomplete/', include('autocomplete_light.urls')),
url(r'^navigation/', include('navigation_autocomplete.urls')),
)
|
[
"jamespic@gmail.com"
] |
jamespic@gmail.com
|
cccc7cd7cbbfc831f29622384dbebe04357cdfdf
|
d5682d2ef13ad63c68d59d3d0706853a88035ff1
|
/week3/snmp_app.py
|
b8ca881ab0a218a4afdf708477df3f8fd7f7a8a6
|
[
"Apache-2.0"
] |
permissive
|
mikealford/ktbyers_automation
|
66467f5352a3fbb111fc18f9c90b83cf97a75e79
|
d8b30e7ddbe27b4bc62b74bfc051b6d1c099f7f9
|
refs/heads/master
| 2020-04-17T19:37:42.365653
| 2019-02-19T01:16:41
| 2019-02-19T01:16:41
| 166,872,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
import snmp_helper
IP = '192.168.122.172'
COMMUNITY_STRING = 'galileo1'
SNMP_PORT = 161
pynet_rtr1 = (IP, COMMUNITY_STRING, SNMP_PORT)
snmp_data = snmp_helper.snmp_get_oid(pynet_rtr1, oid='1.3.6.1.2.1.1.5.0')
output = snmp_helper.snmp_extract(snmp_data)
print(output)
|
[
"mike.alford13@gmail.com"
] |
mike.alford13@gmail.com
|
db966fb201c69d1a48e53305e1b2f6e3e7545f15
|
d8cf5130deaf3bfba0e5b9d326012fbcddd18fb2
|
/embed/models/position_embedding_heads/avg_embedding_head.py
|
7600e1b37195e715a9b593977585a70e4af69f1a
|
[] |
no_license
|
xmyqsh/embed
|
0ed297e636fb9d9db20dd908f92ef6212b9c3549
|
46d07ddf6c5908bdfff86ca3203c325ad34423dc
|
refs/heads/master
| 2023-06-11T08:25:37.733238
| 2021-06-30T10:41:24
| 2021-06-30T10:41:24
| 379,320,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,392
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from embed.cv.cnn import ConvModule, bias_init_with_prob, normal_init
from embed.cv.runner import auto_fp16
from embed.core.utils import multi_apply
from embed.models import POSITION_EMBEDDING_HEADS
from .base_position_embedding_head import BasePositionEmbeddingHead
import numpy as np
@POSITION_EMBEDDING_HEADS.register_module()
class AvgEmbeddingHead(BasePositionEmbeddingHead):
r"""Average Embedding Head.
Args:
in_channels (int):
num_classes (int):
scale_ranges (List[Tuple]):
thres (float):
"""
def __init__(self,
ignore_val=255,
*args,
**kwargs):
super(AvgEmbeddingHead, self).__init__(*args, **kwargs)
self.ignore_val = ignore_val
# TODO(ljm) add this param into a proper place
self.scale_factor = 1. / 4
def forward_train(self, x, pred_weights, gt_semantic_seg):
pred_regions = self(x)
gt_scoremap, gt_sem_label, gt_sem_mask, gt_sem_class, num_sts = \
self.get_targets(pred_regions, gt_semantic_seg)
loss_pos_st = self.loss(pred_regions, gt_scoremap)
gt_guided_idx_feat_sts = self.get_gt_guided_positions(pred_weights,
gt_sem_mask, num_sts)
return dict(loss_pos_st=loss_pos_st), \
dict(gt_guided_idx_feat_sts=gt_guided_idx_feat_sts,
gt_sem_label=gt_sem_label,
gt_sem_class=gt_sem_class,
num_sts=num_sts)
def get_targets(self, pred_regions, gt_semantic_seg):
gt_semantic_seg[gt_semantic_seg == self.ignore_val] = self.num_classes
gt_semantic_seg = F.one_hot(gt_semantic_seg.squeeze(1).long(),
num_classes=self.num_classes + 1)[..., :-1]
gt_semantic_seg = gt_semantic_seg.permute(0, 3, 1, 2).float().contiguous()
'''
gt_semantic_seg = F.interpolate(gt_semantic_seg, scale_factor=self.scale_factor,
mode='bilinear',
align_corners=False).clamp(max=1.0)
'''
return multi_apply(self.get_target_single_level, pred_regions,
gt_semantic_seg=gt_semantic_seg)
def get_target_single_level(self, pred_region, gt_semantic_seg):
gt_scoremap = F.interpolate(gt_semantic_seg, size=pred_region.shape[-2:],
mode='bilinear',
align_corners=False).clamp(max=1.0)
gt_scoremap[gt_scoremap < 0.5] = 0.0
gt_assign_mask = gt_scoremap.reshape(*gt_scoremap.shape[:-2], -1).sum(dim=-1) > 0
gt_sem_label, gt_sem_mask, gt_sem_class, num_sts = \
multi_apply(self.get_target_single_image, gt_semantic_seg, gt_scoremap, gt_assign_mask)
return gt_scoremap, gt_sem_label, gt_sem_mask, gt_sem_class, num_sts
def get_target_single_image(self, gt_semantic_seg, gt_scoremap, gt_assign_mask):
gt_sem_class = torch.nonzero(gt_assign_mask, as_tuple=False).squeeze(-1)
num_sem = gt_assign_mask.sum().item()
gt_sem_label = gt_semantic_seg[gt_assign_mask]
gt_sem_mask = gt_scoremap[gt_assign_mask].bool().float()
return gt_sem_label, gt_sem_mask, gt_sem_class, num_sem
def loss(self, pred_regions, gt_scoremap):
return list(map(self.loss_single_level, pred_regions, gt_scoremap))
def loss_single_level(self, pred_region, gt_scoremap):
b, c = pred_region.shape[:2]
loss_pos = self.loss_pos(pred_region, gt_scoremap, reduction_override='none')
loss_pos = loss_pos.reshape(b, c, -1).mean(dim=-1)
loss_pos = loss_pos.sum() / b
return loss_pos
def get_gt_guided_positions(self, pred_weights, gt_sem_mask, num_sts):
return list(map(self.get_gt_guided_position_single_level, pred_weights,
gt_sem_mask,
num_sts))
def get_gt_guided_position_single_level(self, pred_weight, gt_sem_mask, num_sts):
idx_feat_sts = list(map(lambda a, b: a.unsqueeze(0) * b.unsqueeze(1),
pred_weight, gt_sem_mask))
idx_feat_st = torch.cat(idx_feat_sts, dim=0)
idx_feat_st = F.adaptive_avg_pool2d(idx_feat_st, output_size=1).squeeze(-1).squeeze(-1)
return torch.split(idx_feat_st, num_sts, dim=0)
def get_positions(self, pred_regions, pred_weights):
return multi_apply(self.get_position_single_level, pred_regions, pred_weights)
def get_position_single_level(self, pred_region, pred_weight):
pred_region = pred_region.sigmoid()
pred_cate = pred_region.argmax(dim=1)
pred_st_mask = F.one_hot(pred_cate, num_classes=self.num_classes)
pred_st_mask = pred_st_mask.permute(0, 3, 1, 2).contiguous()
score_st = (pred_region * pred_st_mask).reshape(*pred_region.shape[:2], -1)
idx_feat_sts, class_sts, score_sts, num_sts = \
multi_apply(self.get_position_single_image, pred_cate,
pred_st_mask,
score_st,
pred_weight)
idx_feat_st = torch.cat(idx_feat_sts, dim=0)
idx_feat_st = F.adaptive_avg_pool2d(idx_feat_st, output_size=1).squeeze(-1).squeeze(-1)
idx_feat_sts = torch.split(idx_feat_st, num_sts, dim=0)
return idx_feat_sts, class_sts, score_sts, num_sts
def get_position_single_image(self, pred_cate, pred_st_mask, score_st, pred_weight):
class_st, num_class_st = torch.unique(pred_cate, return_counts=True)
score_st = (score_st[class_st].sum(dim=-1) / num_class_st)
pred_st_mask = pred_st_mask[class_st]
keep = score_st > self.thres
class_st, score_st, pred_st_mask = class_st[keep], score_st[keep], pred_st_mask[keep]
num_st = keep.sum()
idx_feat_st = pred_st_mask.unsqueeze(1) * pred_weight.unsqueeze(0)
return idx_feat_st, class_st, score_st, num_st
|
[
"xmyqsh@gmail.com"
] |
xmyqsh@gmail.com
|
c607e65351a974ba2577b564935d2ab18480067d
|
d039f2a5bbf15cb8bfb99e3b67354a23f2065745
|
/sisco1/app1/migrations/0001_initial.py
|
5d6ceeea2fd02745addadf912f0c2a3ff4d28b0e
|
[] |
no_license
|
prasadnaidu1/django
|
22ad934eaee1079fbcd0a988245bdc18789a48f1
|
b64a341d7c8916dd8000d85b738241c0b369b229
|
refs/heads/master
| 2020-04-12T19:21:42.565686
| 2019-04-18T16:35:16
| 2019-04-18T16:35:16
| 162,707,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
# Generated by Django 2.1.1 on 2018-10-18 11:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='friends',
fields=[
('entry', models.IntegerField(default=10, primary_key=True, serialize=False)),
('date', models.DateField()),
('amount', models.DecimalField(decimal_places=2, max_digits=10)),
('members', models.CharField(max_length=50)),
],
),
]
|
[
"you@example.com"
] |
you@example.com
|
87c583b7c77c5623c85ce162a31ae8d56854f57f
|
d5ed141e513dcb6fc8ab851835ec9a4630e3651b
|
/anaconda/anaconda/lib/python2.7/site-packages/anaconda_navigator/widgets/dialogs/tests/test_update_dialog.py
|
133fa37a1c4ac99aae7e8bf6af80e4f6edc4f01d
|
[
"Python-2.0"
] |
permissive
|
starrysky1211/starrysky
|
713998b366449a5ae4371e38723c56ea40532593
|
abb642548fb9b431551133657f1a67858041a7e6
|
refs/heads/master
| 2022-11-09T21:51:22.558151
| 2017-02-25T14:42:37
| 2017-02-25T14:42:37
| 67,608,074
| 0
| 1
| null | 2022-10-16T05:17:25
| 2016-09-07T13:16:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016 Continuum Analytics, Inc.
#
# May be copied and distributed freely only as part of an Anaconda or
# Miniconda installation.
# -----------------------------------------------------------------------------
"""
Tests for update dialog.
"""
# Third party imports
from qtpy.QtCore import Qt # analysis:ignore
import pytest
import pytestqt.qtbot as qtbot # analysis:ignore
# Local imports
from anaconda_navigator.widgets.dialogs.update import DialogUpdateApplication
@pytest.fixture
def updatedialog(qtbot):
widget = DialogUpdateApplication("1.0")
widget.show()
qtbot.addWidget(widget)
return qtbot, widget
class TestUpdateDialog:
def test_yes(self, updatedialog):
qtbot, widget = updatedialog
with qtbot.waitSignal(widget.accepted, 1000, True):
qtbot.mouseClick(widget.button_yes, Qt.LeftButton)
def test_no(self, updatedialog):
qtbot, widget = updatedialog
with qtbot.waitSignal(widget.rejected, 1000, True):
qtbot.mouseClick(widget.button_no, Qt.LeftButton)
|
[
"starry_sky_@outlook.com"
] |
starry_sky_@outlook.com
|
013dd0590c4d0f506c064b1f272085f45e765844
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2541/60698/283571.py
|
dff37af14ff1eafccf659e6c4b6fba4047e355ac
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
def test():
n = int(input())
matches = list(eval(input()))
segList = []
for match in matches:
if not getSegList(segList, match):
print('[]')
return
res = []
for seg in segList:
topology(res, seg)
for i in range(0, n):
if i not in res:
res.append(i)
print(res)
def getSegList(segList, match) -> bool:
thisCourse = match[0]
beforeCourse = match[1]
if segList == []:
segList.append([beforeCourse, thisCourse])
return True
else:
for i in range(0, len(segList)):
seg = list(segList[i])
for j in range(0, len(seg)):
for k in range(0,j):
if seg[k]==thisCourse and seg[j]==beforeCourse:
return False
if seg[-1] == beforeCourse:
seg.append(thisCourse)
segList.pop(i)
segList.insert(i,seg)
return True
segList.append([beforeCourse, thisCourse])
return True
def topology(res, seg):
ind = -1
for i in range(0, len(seg)):
if seg[i] in res:
ind = res.index(seg[i]) + 1
continue
else:
if ind == -1 or ind >= len(res):
res.append(seg[i])
else:
res.insert(ind, seg[i])
ind = ind + 1
test()
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
de3583f4711cd44ff8a4fe3228a6ec6a3a7093e0
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/samples/cli/accelbyte_py_sdk_cli/achievement/_admin_create_new_achievement.py
|
a6fedd2b1b5536aa5a2b987bb622cd5b4b5b5def
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Achievement Service (2.21.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.achievement import (
admin_create_new_achievement as admin_create_new_achievement_internal,
)
from accelbyte_py_sdk.api.achievement.models import ModelsAchievementRequest
from accelbyte_py_sdk.api.achievement.models import ModelsAchievementResponse
from accelbyte_py_sdk.api.achievement.models import ResponseError
@click.command()
@click.argument("body", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def admin_create_new_achievement(
body: str,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(admin_create_new_achievement_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
if body is not None:
try:
body_json = json.loads(body)
body = ModelsAchievementRequest.create_from_dict(body_json)
except ValueError as e:
raise Exception(f"Invalid JSON for 'body'. {str(e)}") from e
result, error = admin_create_new_achievement_internal(
body=body,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"AdminCreateNewAchievement failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
admin_create_new_achievement.operation_id = "AdminCreateNewAchievement"
admin_create_new_achievement.is_deprecated = False
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
050ffca21e8e249c55e742ca4256ecf84715d92b
|
52a32a93942b7923b7c0c6ca5a4d5930bbba384b
|
/unittests/test_adminsite.py
|
bc48a7ea4540ec9daadd22614619ced502491a61
|
[
"MIT-open-group",
"GCC-exception-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LGPL-3.0-only",
"GPL-3.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-3.0-or-later",
"IJG",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"PSF-2.0",
"LicenseRef-scancode-python-cwi",
"GPL-2.0-or-later",
"HPND",
"libtiff",
"LGPL-2.1-or-later",
"EPL-2.0",
"GPL-3.0-only",
"MIT",
"BSD-3-Clause-Modification",
"LicenseRef-scancode-public-domain-disclaimer",
"HPND-Markus-Kuhn",
"CC-BY-SA-4.0",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"LicenseRef-scancode-openssl-exception-lgpl3.0plus",
"Libpng",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"BSD-Advertising-Acknowledgement",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"MIT-Modern-Variant",
"ISC",
"GPL-2.0-only",
"LicenseRef-scancode-xfree86-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"BSD-2-Clause"
] |
permissive
|
DefectDojo/django-DefectDojo
|
43bfb1c728451335661dadc741be732a50cd2a12
|
b98093dcb966ffe972f8719337de2209bf3989ec
|
refs/heads/master
| 2023-08-21T13:42:07.238370
| 2023-08-14T18:00:34
| 2023-08-14T18:00:34
| 31,028,375
| 2,719
| 1,666
|
BSD-3-Clause
| 2023-09-14T19:46:49
| 2015-02-19T17:53:47
|
HTML
|
UTF-8
|
Python
| false
| false
| 938
|
py
|
from .dojo_test_case import DojoTestCase
from django.contrib import admin
import django.apps
class AdminSite(DojoTestCase):
fixtures = ['dojo_testdata.json']
def test_is_model_defined(self):
for subclass in django.apps.apps.get_models():
if subclass._meta.proxy:
continue
if subclass.__module__ == 'dojo.models':
if not ((subclass.__name__[:9] == "Tagulous_") and (subclass.__name__[-5:] == "_tags")):
with self.subTest(type="base", subclass=subclass):
self.assertIn(subclass, admin.site._registry.keys(), "{} is not registered in 'admin.site' in models.py".format(subclass))
else:
with self.subTest(type="tag", subclass=subclass):
self.assertIn(subclass, admin.site._registry.keys(), "{} is not registered in 'tagulous.admin' in models.py".format(subclass))
|
[
"noreply@github.com"
] |
DefectDojo.noreply@github.com
|
2dd33060faa5ea97aa63c818cbb22f222dffe9f7
|
85e3baf2668db0592df3d9f9aa447b9f20ef25d6
|
/ImageManipulations/BitwiseOperations.py
|
72025d6bb36fc3b91511cc5460f59a6ff32501ee
|
[] |
no_license
|
pekkipo/Computer_Vision
|
cd33a93f5706cd933761d02735f943354c6468fc
|
55aaeae5b955540722270aab07295e85cfa26a95
|
refs/heads/master
| 2021-01-20T06:54:45.912254
| 2017-05-22T12:59:28
| 2017-05-22T12:59:28
| 89,944,925
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
# Very useful for masking the images
import cv2
import numpy as np
# CREATE HALF AN ELLIPSE AND A RECTANGLE
# If you're wondering why only two dimensions, well this is a grayscale image,
# if we doing a colored image, we'd use
# rectangle = np.zeros((300, 300, 3),np.uint8)
# Making a square
square = np.zeros((300, 300), np.uint8)
cv2.rectangle(square, (50, 50), (250, 250), 255, -2)
cv2.imshow("Square", square)
cv2.waitKey(0)
# Making a ellipse
ellipse = np.zeros((300, 300), np.uint8)
cv2.ellipse(ellipse, (150, 150), (150, 150), 30, 0, 180, 255, -1)
cv2.imshow("Ellipse", ellipse)
cv2.waitKey(0)
cv2.destroyAllWindows()
# BITWISE OPERATIONS
# square and ellipse have to be of same dimensions
# Shows only where they intersect
And = cv2.bitwise_and(square, ellipse)
cv2.imshow("AND", And)
cv2.waitKey(0)
# Shows where either square or ellipse is
bitwiseOr = cv2.bitwise_or(square, ellipse)
cv2.imshow("OR", bitwiseOr)
cv2.waitKey(0)
# Shows where either exist by itself. Everything that both ellipse and rectangle will be black
bitwiseXor = cv2.bitwise_xor(square, ellipse)
cv2.imshow("XOR", bitwiseXor)
cv2.waitKey(0)
# Shows everything that isn't part of the square
bitwiseNot_sq = cv2.bitwise_not(square) # takes one figure. Doing inverse
cv2.imshow("NOT - square", bitwiseNot_sq)
cv2.waitKey(0)
### Notice the last operation inverts the image totally
cv2.destroyAllWindows()
|
[
"pekkipodev@gmail.com"
] |
pekkipodev@gmail.com
|
9605f4f4ccc4457804b9e7322ac882c255c923d5
|
691f49708fa5121e261650f01f2e9b93e9bdd26f
|
/skills.py
|
01571c068d6ecf27898b218f52ef8c80bffac8a8
|
[] |
no_license
|
michelelee/skills-dictionaries-
|
29a14980308ea1cc3b76bcc1f1c72a0236fd91e8
|
9c029792ab238f463b3642815b5dd6316299a6b3
|
refs/heads/master
| 2021-01-10T19:43:29.314898
| 2015-04-21T15:59:29
| 2015-04-21T15:59:29
| 34,235,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,307
|
py
|
# To work on the advanced problems, set to True
ADVANCED = False
def count_unique(string1):
"""Count unique words in a string.
This function should take a single string and return a dictionary
that has all of the distinct words as keys, and the number of times
that word appears in the string.
For example:
>>> print_dict(count_unique("each word appears once"))
{'appears': 1, 'each': 1, 'once': 1, 'word': 1}
Words that appear more than once should be counted each time:
>>> print_dict(count_unique("rose is a rose is a rose"))
{'a': 2, 'is': 2, 'rose': 3}
It's fine to consider punctuation part of a word (e.g., a comma
at the end of a word can be counted as part of that word) and
to consider differently-capitalized words as different:
>>> print_dict(count_unique("Porcupine see, porcupine do."))
{'Porcupine': 1, 'do.': 1, 'porcupine': 1, 'see,': 1}
"""
uniquedict = {}
string1 = string1.split()
for word in string1:
uniquedict[word] = uniquedict.setdefault(word, 0) + 1
return uniquedict
print count_unique("each word weird word word appears once")
def common_items(list1, list2):
"""Produce the set of common items in two lists.
Given two lists, return a list of the common items shared between
the lists.
IMPORTANT: you may not not 'if ___ in ___' or the method 'index'.
For example:
>>> sorted(common_items([1, 2, 3, 4], [1, 2]))
[1, 2]
If an item appears more than once in both lists, return it each
time:
>>> sorted(common_items([1, 2, 3, 4], [1, 1, 2, 2]))
[1, 1, 2, 2]
(And the order of which has the multiples shouldn't matter, either):
>>> sorted(common_items([1, 1, 2, 2], [1, 2, 3, 4]))
[1, 1, 2, 2]
"""
pass
def unique_common_items(list1, list2):
"""Produce the set of *unique* common items in two lists.
Given two lists, return a list of the *unique* common items shared between
the lists.
IMPORTANT: you may not not 'if ___ in ___' or the method 'index'.
Just like `common_items`, this should find [1, 2]:
>>> sorted(unique_common_items([1, 2, 3, 4], [1, 2]))
[1, 2]
However, now we only want unique items, so for these lists, don't show
more than 1 or 2 once:
>>> sorted(unique_common_items([1, 2, 3, 4], [1, 1, 2, 2]))
[1, 2]
"""
listdict = {}
newlist = []
for i in list1:
listdict[i] = listdict.setdefault(i, 0) + 1
for i in list2:
listdict[i] = listdict.setdefault(i, 0) + 1
for key in listdict:
if listdict[key] > 1:
newlist.append(key)
return newlist
print unique_common_items([1, 2, 3, 4], [1, 2, 2, 3, 7, 8, 9])
def sum_zero(list1):
"""Return list of x,y number pair lists from a list where x+y==0
Given a list of numbers, add up each individual pair of numbers.
Return a list of each pair of numbers that adds up to 0.
For example:
>>> sort_pairs( sum_zero([1, 2, 3, -2, -1]) )
[[-2, 2], [-1, 1]]
This should always be a unique list, even if there are
duplicates in the input list:
>>> sort_pairs( sum_zero([1, 2, 3, -2, -1, 1, 1]) )
[[-2, 2], [-1, 1]]
Of course, if there are one or more zeros to pair together,
that's fine, too:
>>> sort_pairs( sum_zero([1, 2, 3, -2, -1, 1, 0, 1, 0]) )
[[-2, 2], [-1, 1], [0, 0]]
"""
return []
def find_duplicates(words):
"""Given a list of words, return the list with duplicates removed.
For example:
>>> sorted(find_duplicates(
... ["rose", "is", "a", "rose", "is", "a", "rose"]))
['a', 'is', 'rose']
You should treat differently-capitalized words as different:
>>> sorted(find_duplicates(
... ["Rose", "is", "a", "rose", "is", "a", "rose"]))
['Rose', 'a', 'is', 'rose']
"""
listdict = {}
newlist = []
for i in words:
listdict[i] = listdict.setdefault(i, 0) + 1
print listdict
for key in listdict:
if listdict[key] == 1:
newlist.append(key)
return newlist
def word_length(words):
"""Given list of words, return list of ascending [(len, [words])].
Given a list of words, return a list of tuples, ordered by word-length.
Each tuple should have two items--the length of the words for that
word-length, and the list of words of that word length.
For example:
>>> word_length(["ok", "an", "apple", "a", "day"])
[(1, ['a']), (2, ['ok', 'an']), (3, ['day']), (5, ['apple'])]
"""
return []
def pirate_talk(phrase):
"""Translate phrase to pirate talk.
Given a phrase, translate each word to the Pirate-speak equivalent.
Words that cannot be translated into Pirate-speak should pass through
unchanged. Return the resulting sentence.
Here's a table of English to Pirate translations:
English Pirate
---------- ----------------
sir matey
hotel fleabag inn
student swabbie
boy matey
madam proud beauty
professor foul blaggart
restaurant galley
your yer
excuse arr
students swabbies
are be
lawyer foul blaggart
the th'
restroom head
my me
hello avast
is be
man matey
For example:
>>> pirate_talk("my student is not a man")
'me swabbie be not a matey'
You should treat words with punctuation as if they were different
words:
>>> pirate_talk("my student is not a man!")
'me swabbie be not a man!'
"""
return ""
def adv_word_length_sorted_words(words):
"""Given list of words, return list of ascending [(len, [sorted-words])].
Given a list of words, return a list of tuples, ordered by word-length.
Each tuple should have two items--the length of the words for that
word-length, and the list of words of that word length. The list of words
for that length should be sorted alphabetically.
For example:
>>> adv_word_length_sorted_words(["ok", "an", "apple", "a", "day"])
[(1, ['a']), (2, ['an', 'ok']), (3, ['day']), (5, ['apple'])]
"""
return []
##############################################################################
# You can ignore everything after here
def print_dict(d):
# This method is just used to print dictionaries in key-alphabetical
# order, and is only used for our documentation tests. You can ignore it.
if isinstance(d, dict):
print "{" + ", ".join("%r: %r" % (k, d[k]) for k in sorted(d)) + "}"
else:
print d
def sort_pairs(l):
# Print sorted list of pairs where the pairs are sorted. This is used only
# for documentation tests. You can ignore it.
return sorted(sorted(pair) for pair in l)
if __name__ == "__main__":
print
import doctest
for k, v in globals().items():
if k[0].isalpha():
if k.startswith('adv_') and not ADVANCED:
continue
a = doctest.run_docstring_examples(v, globals(), name=k)
print "** END OF TEST OUTPUT"
print
|
[
"info@hackbrightacademy.com"
] |
info@hackbrightacademy.com
|
eef5b333c3116e66c2bb74ed66d69cdc15a106b0
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/LightGBM_sklearn_scipy_numpy/source/sklearn/feature_selection/from_model.py
|
2502643453d797d2ecf32c119697a97f4ab76e5e
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 7,363
|
py
|
# Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena
# License: BSD 3 clause
import numpy as np
from .base import SelectorMixin
from ..base import BaseEstimator, clone, MetaEstimatorMixin
from ..externals import six
from ..exceptions import NotFittedError
from ..utils.metaestimators import if_delegate_has_method
def _get_feature_importances(estimator, norm_order=1):
"""Retrieve or aggregate feature importances from estimator"""
importances = getattr(estimator, "feature_importances_", None)
if importances is None and hasattr(estimator, "coef_"):
if estimator.coef_.ndim == 1:
importances = np.abs(estimator.coef_)
else:
importances = np.linalg.norm(estimator.coef_, axis=0,
ord=norm_order)
elif importances is None:
raise ValueError(
"The underlying estimator %s has no `coef_` or "
"`feature_importances_` attribute. Either pass a fitted estimator"
" to SelectFromModel or call fit before calling transform."
% estimator.__class__.__name__)
return importances
def _calculate_threshold(estimator, importances, threshold):
"""Interpret the threshold value"""
if threshold is None:
# determine default from estimator
est_name = estimator.__class__.__name__
if ((hasattr(estimator, "penalty") and estimator.penalty == "l1") or
"Lasso" in est_name):
# the natural default threshold is 0 when l1 penalty was used
threshold = 1e-5
else:
threshold = "mean"
if isinstance(threshold, six.string_types):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
raise ValueError("Expected threshold='mean' or threshold='median' "
"got %s" % threshold)
else:
threshold = float(threshold)
return threshold
class SelectFromModel(BaseEstimator, SelectorMixin, MetaEstimatorMixin):
"""Meta-transformer for selecting features based on importance weights.
.. versionadded:: 0.17
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
This can be both a fitted (if ``prefit`` is set to True)
or a non-fitted estimator. The estimator must have either a
``feature_importances_`` or ``coef_`` attribute after fitting.
threshold : string, float, optional default None
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the ``threshold`` value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None and if the
estimator has a parameter penalty set to l1, either explicitly
or implicitly (e.g, Lasso), the threshold used is 1e-5.
Otherwise, "mean" is used by default.
prefit : bool, default False
Whether a prefit model is expected to be passed into the constructor
directly or not. If True, ``transform`` must be called directly
and SelectFromModel cannot be used with ``cross_val_score``,
``GridSearchCV`` and similar utilities that clone the estimator.
Otherwise train the model using ``fit`` and then ``transform`` to do
feature selection.
norm_order : non-zero int, inf, -inf, default 1
Order of the norm used to filter the vectors of coefficients below
``threshold`` in the case where the ``coef_`` attribute of the
estimator is of dimension 2.
Attributes
----------
estimator_ : an estimator
The base estimator from which the transformer is built.
This is stored only when a non-fitted estimator is passed to the
``SelectFromModel``, i.e when prefit is False.
threshold_ : float
The threshold value used for feature selection.
"""
def __init__(self, estimator, threshold=None, prefit=False, norm_order=1):
self.estimator = estimator
self.threshold = threshold
self.prefit = prefit
self.norm_order = norm_order
def _get_support_mask(self):
# SelectFromModel can directly call on transform.
if self.prefit:
estimator = self.estimator
elif hasattr(self, 'estimator_'):
estimator = self.estimator_
else:
raise ValueError(
'Either fit SelectFromModel before transform or set "prefit='
'True" and pass a fitted estimator to the constructor.')
scores = _get_feature_importances(estimator, self.norm_order)
threshold = _calculate_threshold(estimator, scores, self.threshold)
return scores >= threshold
def fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **fit_params)
return self
@property
def threshold_(self):
scores = _get_feature_importances(self.estimator_, self.norm_order)
return _calculate_threshold(self.estimator, scores, self.threshold)
@if_delegate_has_method('estimator')
def partial_fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer only once.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
if not hasattr(self, "estimator_"):
self.estimator_ = clone(self.estimator)
self.estimator_.partial_fit(X, y, **fit_params)
return self
|
[
"ryfeus@gmail.com"
] |
ryfeus@gmail.com
|
d6c2bad547afdcc4851a6ed9eed95609d1640240
|
2043a3bbe2c2ac4fcf293a578b4c82018871290b
|
/495-提莫攻击/495.py
|
61a9feaa9420b9b22f46819187a87d008b5eb0e4
|
[] |
no_license
|
agave233/leetcode
|
7f3804a5ec544a9fb1a6ae34a91886d283075e2a
|
57629c9b0f9a7539bed9f28ba6771ee9d13aafaa
|
refs/heads/master
| 2021-09-13T21:42:55.535234
| 2018-05-04T15:57:24
| 2018-05-04T15:57:24
| 124,670,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
class Solution(object):
def findPoisonedDuration(self, timeSeries, duration):
"""
:type timeSeries: List[int]
:type duration: int
:rtype: int
"""
if timeSeries == []:
return 0
res = 0
for i in range(1, len(timeSeries)):
res += min(duration, timeSeries[i] - timeSeries[i - 1])
return res + duration
|
[
"632679697@qq.com"
] |
632679697@qq.com
|
7cd3431aa501d19d14be6f45d4b21f1e5eda4263
|
12317e3617b1bd900d131c2047ec2e3211bcb16b
|
/musictrack-albumadd.py
|
3cc9dec9f6c567bfb41ce67dad1f7e0dd357bf09
|
[] |
no_license
|
apocalyptech/musictrack-cli
|
b5fd7dedb4c6bb23ceea8e9e717ad2062a087a15
|
6563f9177df7f0a28039ffeabae005d940bdb205
|
refs/heads/master
| 2021-08-12T05:53:44.831965
| 2021-08-09T15:58:35
| 2021-08-09T15:58:35
| 72,576,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
#!/usr/bin/env python3
# vim: set expandtab tabstop=4 shiftwidth=4:
# Adds a new album to the database
from app import App, AppArgumentParser
# Parse arguments
parser = AppArgumentParser(description='Adds a new album to the database')
group = parser.add_mutually_exclusive_group()
group.add_argument('-l', '--live',
action='store_true',
help='Store as a live album')
group.add_argument('-e', '--ep',
action='store_true',
help='Store as an EP')
parser.add_argument('-f', '--force',
action='store_true',
help='Force an update, if the album already exists')
parser.add_argument('filenames',
type=str,
nargs='+',
metavar='filename',
help='Filenames which make up the album')
args = parser.parse_args()
# Collapse our album type down a bit
if args.live:
album_type = 'live'
elif args.ep:
album_type = 'ep'
else:
album_type = 'album'
# Do the work
app = App(args.database)
(added, status) = app.add_album(args.filenames, album_type, force_update=args.force)
print(status)
app.close()
|
[
"pez@apocalyptech.com"
] |
pez@apocalyptech.com
|
760804ea8af855cfc7abba00fb01151a90c8e877
|
0e820627e68413aebe27fbc32dde66a3c99651d3
|
/flamingo/core/context.py
|
3a16f23a65abc8322d5d15a2a9a1734cc8c6f8c1
|
[
"Apache-2.0"
] |
permissive
|
ejoerns/flamingo
|
17de116008561a4a64613b2b6528eb6d52706281
|
1d61c99c9ad34dd0a2a652f80783226051e07238
|
refs/heads/master
| 2020-04-11T02:53:47.295752
| 2018-12-14T11:17:23
| 2018-12-14T11:37:39
| 161,460,547
| 0
| 0
|
Apache-2.0
| 2018-12-12T09:00:34
| 2018-12-12T09:00:34
| null |
UTF-8
|
Python
| false
| false
| 7,368
|
py
|
import logging
import shutil
import os
from flamingo.core.parser import FileParser, ParsingError
from flamingo.core.data_model import ContentSet, Content
from flamingo.core.utils.imports import acquire
class Context:
def __init__(self, settings):
self.settings = settings
# setup logging
self.logger = logging.getLogger('flamingo')
self.logger.debug('setting up context')
# setup plugins
self.plugins = []
plugins = (self.settings.CORE_PLUGINS +
self.settings.DEFAULT_PLUGINS +
self.settings.PLUGINS)
for plugin in plugins:
self.logger.debug("setting up plugin '%s' ", plugin)
try:
plugin_class = acquire(plugin)
self.plugins.append(plugin_class())
except Exception:
self.logger.error('plugin setup failed', exc_info=True)
# setup parser
self.parser = FileParser()
self.run_plugin_hook('parser_setup')
# parse contents
self.contents = ContentSet()
self.content = None
self._media = [] # FIXME: this should be part of Content()
for path in self.get_source_paths():
self.logger.debug("reading %s ", path)
try:
self.content = Content(
path=os.path.relpath(path, settings.CONTENT_ROOT))
self.parser.parse(path, self.content)
self.run_plugin_hook('content_parsed', self.content)
self.contents.add(self.content)
except ParsingError as e:
self.logger.error('%s: %s', path, e)
except Exception:
self.logger.error('exception occoured while reading %s',
path, exc_info=True)
del self.content
self.run_plugin_hook('contents_parsed')
# setup templating engine
templating_engine_class = acquire(settings.TEMPLATING_ENGINE)
self.templating_engine = templating_engine_class(
settings.THEME_PATHS + settings.CORE_THEME_PATHS
)
self.run_plugin_hook('templating_engine_setup', self.templating_engine)
self.run_plugin_hook('context_setup')
def get_source_paths(self):
self.logger.debug('searching for content')
supported_extensions = self.parser.get_extensions()
if self.settings.CONTENT_PATHS:
self.logger.debug('using user defined content paths')
for path in self.settings.CONTENT_PATHS:
path = os.path.join(self.settings.CONTENT_ROOT, path)
extension = os.path.splitext(path)[1][1:]
if extension not in supported_extensions:
self.logger.debug(
"skipping '%s'. extension '%s' is not supported",
path, extension)
continue
yield path
else:
self.logger.debug("searching content recursive in %s",
self.settings.CONTENT_ROOT)
for root, dirs, files in os.walk(self.settings.CONTENT_ROOT):
for name in files:
extension = os.path.splitext(name)[1][1:]
if extension not in supported_extensions:
continue
yield os.path.join(root, name)
def run_plugin_hook(self, name, *args, **kwargs):
self.logger.debug("running plugin hook '%s'", name)
for plugin in self.plugins:
hook = getattr(plugin, name, None)
if not hook:
continue
self.logger.debug('running %s.%s', plugin.__class__.__name__, name)
hook(self, *args, **kwargs)
def render(self, content, template_name=''):
template_name = template_name or content['template']
template_context = {
'content': content,
'context': self,
}
return self.templating_engine.render(template_name, template_context)
def copy_media(self, filename, content_source_path):
# gen source_path
if filename.startswith('/'):
source_path = os.path.join(
self.settings.CONTENT_ROOT, filename[1:])
else:
source_path = os.path.join(
os.path.dirname(
os.path.join(self.settings.CONTENT_ROOT,
content_source_path)
),
filename,
)
source_path = os.path.normpath(source_path)
# gen destination_path
destination_path = os.path.join(
self.settings.MEDIA_ROOT,
os.path.relpath(source_path, self.settings.CONTENT_ROOT),
)
# gen link
link = os.path.join(
'/media',
os.path.relpath(destination_path, self.settings.MEDIA_ROOT),
)
# check if media exists
if not os.path.exists(source_path):
self.logger.critical(
"media '%s' does not exist (used as '%s' in '%s')",
source_path, filename, content_source_path,
)
else:
self._media.append((source_path, destination_path, ))
return source_path, destination_path, link
def build(self, clean=True):
self.run_plugin_hook('pre_build')
def makedirs(path):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
self.logger.debug('mkdir -p %s', dirname)
os.makedirs(dirname)
# remove previous artifacts
if clean and os.path.exists(self.settings.OUTPUT_ROOT):
self.logger.debug('rm -rf %s', self.settings.OUTPUT_ROOT)
shutil.rmtree(self.settings.OUTPUT_ROOT)
# render contents
for content in self.contents:
output_path = os.path.join(self.settings.OUTPUT_ROOT,
content['output'])
makedirs(output_path)
# render and write content
with open(output_path, 'w+') as f:
self.logger.debug("writing '%s'...", output_path)
if content['template']:
output = self.render(content)
else:
output = content['content']
f.write(output)
if self.settings.CONTENT_PATHS:
return
# copy media
for source_path, destination_path in self._media:
makedirs(destination_path)
self.logger.debug('cp %s %s', source_path, destination_path)
shutil.copy(source_path, destination_path)
# copy static
for static_dir in self.templating_engine.find_static_dirs():
for root, dirs, files in os.walk(static_dir):
for f in files:
src = os.path.join(root, f)
dst = os.path.join(
self.settings.STATIC_ROOT,
os.path.relpath(root, static_dir),
f,
)
self.logger.debug('cp %s %s', src, dst)
makedirs(dst)
shutil.copy(src, dst)
self.run_plugin_hook('post_build')
|
[
"f.scherf@pengutronix.de"
] |
f.scherf@pengutronix.de
|
f79ed3b7e1c14de99d256cd73c36799358a75bf8
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_enthroning.py
|
9e1ce436218ffae4fce5553d7c0bc87d31485be5
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
#calss header
class _ENTHRONING():
def __init__(self,):
self.name = "ENTHRONING"
self.definitions = enthrone
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['enthrone']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f4d1328cdd7e3637702bea1c4b7253ed5dfb96ad
|
206330d8ce5266630f4500be8df90e59d72addc2
|
/xcube/api/chunk.py
|
0262939968fce4992e3e91ed0596b24ac8e8afd3
|
[
"MIT"
] |
permissive
|
dzelge/xcube
|
4c70dda7e1efc4517e558af7ac9a2d1ae440b782
|
1e5049a227df4a50435d9aac6aacf2bcbaa3e2dd
|
refs/heads/master
| 2020-06-17T15:56:54.187694
| 2019-07-08T07:39:08
| 2019-07-08T07:39:08
| 195,969,440
| 0
| 0
|
MIT
| 2019-07-09T08:46:03
| 2019-07-09T08:46:03
| null |
UTF-8
|
Python
| false
| false
| 3,171
|
py
|
import itertools
from typing import Dict, Tuple, Iterable
import numpy as np
import xarray as xr
from xcube.util.dsio import FORMAT_NAME_ZARR, FORMAT_NAME_NETCDF4
def chunk_dataset(dataset: xr.Dataset,
chunk_sizes: Dict[str, int] = None,
format_name: str = None) -> xr.Dataset:
"""
Chunk dataset and update encodings for given format.
:param dataset: input dataset
:param chunk_sizes: mapping from dimension name to new chunk size
:param format_name: format, e.g. "zarr" or "netcdf4"
:return: the re-chunked dataset
"""
chunked_ds = dataset.chunk(chunks=chunk_sizes)
# Update encoding so writing of chunked_ds recognizes new chunks
chunk_sizes_attr_name = None
if format_name == FORMAT_NAME_ZARR:
chunk_sizes_attr_name = "chunks"
if format_name == FORMAT_NAME_NETCDF4:
chunk_sizes_attr_name = "chunksizes"
if chunk_sizes_attr_name:
for var_name in chunked_ds.variables:
var = chunked_ds[var_name]
if chunk_sizes:
sizes = tuple(chunk_sizes[dim_name] if dim_name in chunk_sizes
else var.shape[var.dims.index(dim_name)]
for dim_name in var.dims)
var.encoding.update({chunk_sizes_attr_name: sizes})
elif chunk_sizes_attr_name in var.encoding:
# Remove any explicit and wrong specification so writing will use Dask chunks (TBC!)
del var.encoding[chunk_sizes_attr_name]
return chunked_ds
def get_empty_dataset_chunks(dataset: xr.Dataset) -> Dict[str, Tuple[Tuple[int, ...]]]:
"""
Identify empty dataset chunks and return their indices.
:param dataset: The dataset.
:return: A mapping from variable name to a list of block indices.
"""
return {var_name: get_empty_var_chunks(dataset[var_name]) for var_name in dataset.data_vars}
def get_empty_var_chunks(var: xr.DataArray) -> Tuple[Tuple[int, ...]]:
"""
Identify empty variable chunks and return their indices.
:param var: The variable.
:return: A list of block indices.
"""
chunks = var.chunks
if chunks is None:
raise ValueError('data array not chunked')
chunk_slices = compute_chunk_slices(chunks)
empty_chunk_indexes = []
for chunk_index, chunk_slice in chunk_slices:
data_index = tuple(slice(start, end) for start, end in chunk_slice)
data = var[data_index]
if np.all(np.isnan(data)):
empty_chunk_indexes.append(chunk_index)
# print(f'empty: {var.name}/{".".join(map(str, chunk_index))}')
# noinspection PyTypeChecker
return tuple(empty_chunk_indexes)
def compute_chunk_slices(chunks: Tuple[Tuple[int, ...], ...]) -> Iterable:
chunk_indices = []
for c in chunks:
chunk_indices.append(tuple(i for i in range(len(c))))
chunk_slices = []
for c in chunks:
x = []
o = 0
for s in c:
x.append((o, o + s))
o += s
chunk_slices.append(tuple(x))
return zip(itertools.product(*chunk_indices), itertools.product(*chunk_slices))
|
[
"norman.fomferra@gmail.com"
] |
norman.fomferra@gmail.com
|
3ac5574809b0a987b095ff860930ce7889a6010d
|
3b9338d99cf8090387418e32ca81617f072c39fb
|
/build_system_kit/extpy/runme.py
|
9984d9c89d00a26c95c9f157fb45f863659100e9
|
[] |
no_license
|
sillsdevarchive/wsiwaf
|
8ca14c286bafceb9ee6fad740b64ad7131282dc3
|
2dcddafc3602a7220acbe995df4ba85abb06b767
|
refs/heads/master
| 2020-12-30T17:10:21.701380
| 2017-05-12T05:12:17
| 2017-05-12T05:12:17
| 91,052,898
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 715
|
py
|
#! /usr/bin/env python
# encoding: utf-8
"""
Create a waf file able to read wscript files ending in ".py"
execute a small test to show that it works
The waf file includes "extpy.py" which performs the required modifications
"""
import os, subprocess
up = os.path.dirname
join = os.path.join
cwd = os.getcwd()
extpy = join(cwd, 'extpy.py')
args = 'python waf-light --tools=compat15,%s --prelude=$"\tfrom waflib.extras import extpy\n" ' % extpy
root = up(up(cwd))
subprocess.Popen(args, cwd=root, shell=True).wait()
os.rename(join(root, 'waf'), join(cwd, 'waf.py'))
env = dict(os.environ)
if 'WAFDIR' in env:
del env['WAFDIR']
subprocess.Popen('python waf.py configure', cwd=cwd, shell=True, env=env).wait()
|
[
"tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85"
] |
tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85
|
6370647795edf476bd74936a53934d7af363aa50
|
a6ed990fa4326c625a2a02f0c02eedf758ad8c7b
|
/meraki/sdk/python/getOrganizationSamlRole.py
|
59bfeed4271e31d35e33f75e08f64889f85458c1
|
[] |
no_license
|
StevenKitavi/Meraki-Dashboard-API-v1-Documentation
|
cf2352976c6b6c00c17a5f6442cedf0aeed46c22
|
5ed02a7def29a2ce455a3f2cfa185f76f44789f5
|
refs/heads/main
| 2023-03-02T08:49:34.846055
| 2021-02-05T10:31:25
| 2021-02-05T10:31:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
import meraki
# Defining your API key as a variable in source code is not recommended
API_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'
# Instead, use an environment variable as shown under the Usage section
# @ https://github.com/meraki/dashboard-api-python/
dashboard = meraki.DashboardAPI(API_KEY)
organization_id = '549236'
saml_role_id = ''
response = dashboard.organizations.getOrganizationSamlRole(
organization_id, saml_role_id
)
print(response)
|
[
"shiychen@cisco.com"
] |
shiychen@cisco.com
|
af55da00f419a54f60e6f9d444592cf6fc9dfe8a
|
09301c71638abf45230192e62503f79a52e0bd80
|
/besco_erp/besco_sale/general_sale_margin/report/__init__.py
|
2b722f4eaeb7ee64f222222d8f751f72bc36c203
|
[] |
no_license
|
westlyou/NEDCOFFEE
|
24ef8c46f74a129059622f126401366497ba72a6
|
4079ab7312428c0eb12015e543605eac0bd3976f
|
refs/heads/master
| 2020-05-27T06:01:15.188827
| 2017-11-14T15:35:22
| 2017-11-14T15:35:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
#
##############################################################################
import sales_fiscalyear
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"son.huynh@nedcoffee.vn"
] |
son.huynh@nedcoffee.vn
|
dd080f3fb34b5813c336f55367f9d6d793eaf669
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/apimanagement/v20190101/get_api_version_set.py
|
c4fb809e041874e3b5c0a04404aedfa32889fdd7
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 5,626
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetApiVersionSetResult',
'AwaitableGetApiVersionSetResult',
'get_api_version_set',
]
@pulumi.output_type
class GetApiVersionSetResult:
"""
Api Version Set Contract details.
"""
def __init__(__self__, description=None, display_name=None, name=None, type=None, version_header_name=None, version_query_name=None, versioning_scheme=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if version_header_name and not isinstance(version_header_name, str):
raise TypeError("Expected argument 'version_header_name' to be a str")
pulumi.set(__self__, "version_header_name", version_header_name)
if version_query_name and not isinstance(version_query_name, str):
raise TypeError("Expected argument 'version_query_name' to be a str")
pulumi.set(__self__, "version_query_name", version_query_name)
if versioning_scheme and not isinstance(versioning_scheme, str):
raise TypeError("Expected argument 'versioning_scheme' to be a str")
pulumi.set(__self__, "versioning_scheme", versioning_scheme)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of API Version Set.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Name of API Version Set
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="versionHeaderName")
def version_header_name(self) -> Optional[str]:
"""
Name of HTTP header parameter that indicates the API Version if versioningScheme is set to `header`.
"""
return pulumi.get(self, "version_header_name")
@property
@pulumi.getter(name="versionQueryName")
def version_query_name(self) -> Optional[str]:
"""
Name of query parameter that indicates the API Version if versioningScheme is set to `query`.
"""
return pulumi.get(self, "version_query_name")
@property
@pulumi.getter(name="versioningScheme")
def versioning_scheme(self) -> str:
"""
An value that determines where the API Version identifer will be located in a HTTP request.
"""
return pulumi.get(self, "versioning_scheme")
class AwaitableGetApiVersionSetResult(GetApiVersionSetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiVersionSetResult(
description=self.description,
display_name=self.display_name,
name=self.name,
type=self.type,
version_header_name=self.version_header_name,
version_query_name=self.version_query_name,
versioning_scheme=self.versioning_scheme)
def get_api_version_set(resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
version_set_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiVersionSetResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
:param str version_set_id: Api Version Set identifier. Must be unique in the current API Management service instance.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['versionSetId'] = version_set_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/v20190101:getApiVersionSet', __args__, opts=opts, typ=GetApiVersionSetResult).value
return AwaitableGetApiVersionSetResult(
description=__ret__.description,
display_name=__ret__.display_name,
name=__ret__.name,
type=__ret__.type,
version_header_name=__ret__.version_header_name,
version_query_name=__ret__.version_query_name,
versioning_scheme=__ret__.versioning_scheme)
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
7fb792bad28f1c928186db0504bcf02a1cacecad
|
aa81fe0b271ffddfd9f3bac6ebf0d4c503fb1ad8
|
/MultiPlanarUNet/sequences/isotrophic_live_view_sequence_3d.py
|
97bb66d6d3081b0ef8919a52bcde07fff1c54eca
|
[
"MIT"
] |
permissive
|
admshumar/MultiPlanarUNet
|
dd295d182e82ce903025ff2cb4895b2727c7b56a
|
6b59e8f2e0fb9601c17dc32eec4bb114971bf0a1
|
refs/heads/master
| 2020-07-31T12:12:41.695269
| 2019-08-01T07:36:04
| 2019-08-01T07:36:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,050
|
py
|
from MultiPlanarUNet.sequences.isotrophic_live_view_sequence import IsotrophicLiveViewSequence
from MultiPlanarUNet.interpolation.sample_grid import sample_box, sample_box_at
from MultiPlanarUNet.interpolation.linalg import mgrid_to_points
import numpy as np
class IsotrophicLiveViewSequence3D(IsotrophicLiveViewSequence):
def __init__(self, image_pair_loader, real_box_dim, bg_class, scaler,
no_log=False, **kwargs):
super().__init__(image_pair_loader, **kwargs)
self.bg_class = bg_class
self.scaler = scaler
self.real_box_dim = real_box_dim
self.batch_shape = (self.batch_size, self.sample_dim, self.sample_dim,
self.sample_dim, self.n_classes)
if not no_log:
self.log()
def log(self):
self.logger("Using sample dim: %s" % self.sample_dim)
self.logger("Using box real dim: %s" % self.real_box_dim)
self.logger("Using real space sample res: %s" % (self.real_box_dim/
self.sample_dim))
self.logger("N fg slices: %s" % self.n_fg_slices)
self.logger("Batch size: %s" % self.batch_size)
self.logger("Force all FG: %s" % self.force_all_fg)
def __len__(self):
""" Controlled in train.py """
return 10000
@staticmethod
def _intrp_and_norm(image, grid, intrp_lab):
# Interpolate
im = image.interpolator.intrp_image(grid)
# Normalize
im = image.scaler.transform(im)
lab = None
if intrp_lab:
lab = image.interpolator.intrp_labels(grid)
return im, lab
def get_base_patches_from(self, image, return_y=False, batch_size=1):
real_dims = image.real_shape
# Calculate positions
sample_space = np.asarray([max(i, self.real_box_dim) for i in real_dims])
d = (sample_space - self.real_box_dim)
min_cov = [np.ceil(sample_space[i]/self.real_box_dim).astype(np.int) for i in range(3)]
ds = [np.linspace(0, d[i], min_cov[i]) - sample_space[i]/2 for i in range(3)]
# Get placement coordinate points
placements = mgrid_to_points(np.meshgrid(*tuple(ds)))
for p in placements:
grid, axes, inv_mat = sample_box_at(real_placement=p,
sample_dim=self.sample_dim,
real_box_dim=self.real_box_dim,
noise_sd=0.0,
test_mode=True)
im, lab = self._intrp_and_norm(image, grid, return_y)
if return_y:
yield im, lab, grid, axes, inv_mat, len(placements)
else:
yield im, grid, axes, inv_mat, len(placements)
def get_N_random_patches_from(self, image, N, return_y=False):
if N > 0:
# Sample N patches from X
for i in range(N):
# Get grid and interpolate
grid, axes, inv_mat = sample_box(sample_dim=self.sample_dim,
real_box_dim=self.real_box_dim,
real_dims=image.real_shape,
noise_sd=self.noise_sd,
test_mode=True)
im, lab = self._intrp_and_norm(image, grid, return_y)
if return_y:
yield im, lab, grid, axes, inv_mat
else:
yield im, grid, axes, inv_mat
else:
return []
def __getitem__(self, idx):
"""
Used by keras.fit_generator to fetch mini-batches during training
"""
# If multiprocessing, set unique seed for this particular process
self.seed()
# Store how many slices has fg so far
has_fg = 0
has_fg_vec = np.zeros_like(self.fg_classes)
# Interpolate on a random index for each sample image to generate batch
batch_x, batch_y, batch_w = [], [], []
# Get a random image
max_tries = self.batch_size * 15
# Number of images to use in each batch. Number should be low enough
# to not exhaust queue generator.
N = 2 if self.image_pair_loader.queue else self.batch_size
cuts = np.round(np.linspace(0, self.batch_size, N+1)[1:])
scalers = []
bg_values = []
for i, image in enumerate(self.image_pair_loader.get_random(N=N)):
tries = 0
# Sample a batch from the image
while len(batch_x) < cuts[i]:
# Get grid and interpolate
mgrid = sample_box(sample_dim=self.sample_dim,
real_box_dim=self.real_box_dim,
real_dims=image.real_shape,
noise_sd=self.noise_sd)
# Get interpolated labels
lab = image.interpolator.intrp_labels(mgrid)
valid_lab, fg_change = self.validate_lab(lab, has_fg, len(batch_y))
if self.force_all_fg and tries < max_tries:
valid, has_fg_vec = self.validate_lab_vec(lab,
has_fg_vec,
len(batch_y))
if not valid:
tries += 1
continue
if valid_lab or tries > max_tries:
# Get interpolated image
im = image.interpolator.intrp_image(mgrid)
if tries > max_tries or self.is_valid_im(im, image.bg_value):
# Update foreground counter
has_fg += fg_change
# Save scaler to normalize image later (after potential
# augmentation)
scalers.append(image.scaler)
# Save bg value if needed in potential augmenters
bg_values.append(image.bg_value)
# Add to batches
batch_x.append(im)
batch_y.append(lab)
batch_w.append(image.sample_weight)
# Apply augmentation if specified
batch_x, batch_y, batch_w = self.augment(batch_x, batch_y,
batch_w, bg_values)
# Normalize images
batch_x = self.scale(batch_x, scalers)
# Reshape, one-hot encode etc.
batch_x, batch_y, batch_w = self.prepare_batches(batch_x,
batch_y,
batch_w)
assert len(batch_x) == self.batch_size
return batch_x, batch_y, batch_w
|
[
"mathias@perslev.com"
] |
mathias@perslev.com
|
a5f77e33734ac4166f39a977b6fb45bb27e0877b
|
0ce10b36e9e886c79dca46e2247603b17475206a
|
/cell2cell/external/__init__.py
|
e8bb83dc0a7e1ded1f6450b3e063913378edc7e8
|
[
"BSD-3-Clause"
] |
permissive
|
earmingol/cell2cell
|
dd5e5186a8793097db4e28bdf23c340399effd22
|
9fa855d48dc9c4b132fc59e2de1db23a37dc7c5e
|
refs/heads/master
| 2023-05-31T02:12:51.127542
| 2023-04-20T19:39:54
| 2023-04-20T19:39:54
| 182,623,687
| 46
| 10
|
BSD-3-Clause
| 2023-04-20T19:39:55
| 2019-04-22T04:37:54
|
Python
|
UTF-8
|
Python
| false
| false
| 257
|
py
|
from cell2cell.external.pcoa import (pcoa, pcoa_biplot, _check_ordination)
from cell2cell.external.goenrich import (goa, ontology)
from cell2cell.external.gseapy import (load_gmt, generate_lr_geneset, run_gsea)
from cell2cell.external.umap import (run_umap)
|
[
"earmingol14@gmail.com"
] |
earmingol14@gmail.com
|
79b59580a3c12ad1a907baf87ad0c83e41650f0c
|
73744790709a75fa15bd3a9c141777b81acf6402
|
/dsl_parser/tests/test_get_consumers.py
|
e7612b10eeaeb7bd0d54a87f5825c0e58de59c58
|
[
"Apache-2.0"
] |
permissive
|
cloudify-cosmo/cloudify-common
|
aea8f9b9b1f5d85a23b6f8d3f61b8a445d2fc27e
|
246550c150e33e3e8cf815e1ecff244d82293832
|
refs/heads/master
| 2023-08-24T12:44:40.277500
| 2023-08-17T08:54:31
| 2023-08-17T08:54:31
| 132,621,915
| 8
| 16
|
Apache-2.0
| 2023-08-24T12:27:03
| 2018-05-08T14:35:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,239
|
py
|
from dsl_parser import functions
from dsl_parser.tasks import prepare_deployment_plan
from dsl_parser.tests.abstract_test_parser import AbstractTestParser
class TestGetConsumers(AbstractTestParser):
def setUp(self):
super(TestGetConsumers, self).setUp()
self.mock_storage = self.mock_evaluation_storage(
consumers={
'app1': 'App1',
'app2': 'My Second App',
'app3': 'App #3'
})
def test_node_template_properties_simple(self):
yaml = """
node_types:
type:
properties:
property: {}
node_templates:
node:
type: type
properties:
property: { get_consumers: ids }
"""
parsed = prepare_deployment_plan(self.parse_1_3(yaml))
node = self.get_node_by_name(parsed, 'node')
self.assertEqual({'get_consumers': 'ids'},
node['properties']['property'])
functions.evaluate_functions(parsed, {}, self.mock_storage)
self.assertEqual(set(node['properties']['property']),
{'app1', 'app2', 'app3'})
def test_consumers_in_outputs(self):
yaml = """
node_types:
type: {}
node_templates:
node:
type: type
outputs:
consumers:
value: { get_consumers: ids }
consumer_count:
value: { get_consumers: count }
"""
parsed = prepare_deployment_plan(self.parse_1_3(yaml))
outputs = parsed.outputs
self.assertEqual({'get_consumers': 'ids'},
outputs['consumers']['value'])
functions.evaluate_functions(parsed, {}, self.mock_storage)
self.assertEqual(set(outputs['consumers']['value']),
{'app1', 'app2', 'app3'})
self.assertEqual(outputs['consumer_count']['value'], 3)
def test_consumers_in_inputs(self):
yaml = """
inputs:
consumer_count:
default: { get_consumers: count }
consumer_names:
default: { get_consumers: names }
node_types:
type: {}
node_templates:
node:
type: type
outputs:
consumers:
value: { get_input: consumer_names }
consumer_count:
value: { get_input: consumer_count }
"""
parsed = prepare_deployment_plan(self.parse_1_3(yaml))
outputs = parsed.outputs
# `get_input` is evaluated at parse time, so we expect to see it
# replaced here with the `get_consumers_count` function
self.assertEqual({'get_consumers': 'count'},
outputs['consumer_count']['value'])
functions.evaluate_functions(parsed, {}, self.mock_storage)
self.assertEqual(outputs['consumer_count']['value'], 3)
self.assertEqual(set(outputs['consumers']['value']),
{'App1', 'My Second App', 'App #3'})
def test_illegal_arguments(self):
yaml = """
node_types:
type:
properties:
property: {}
node_templates:
node:
type: type
properties:
property: { get_consumers: [a, b] }
"""
self.assertRaisesRegex(
ValueError,
"Illegal argument passed to get_consumers",
self.parse_1_3,
yaml)
|
[
"noreply@github.com"
] |
cloudify-cosmo.noreply@github.com
|
26d99bcd4bb2a02032289382381ac6d94fc386f1
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sdssj_092520.01+273619.7/sdB_SDSSJ_092520.01+273619.7_coadd.py
|
3c92de12f087f3a8d072508af616ae85d6589595
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[141.333375,27.605472], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_SDSSJ_092520.01+273619.7/sdB_SDSSJ_092520.01+273619.7_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_SDSSJ_092520.01+273619.7/sdB_SDSSJ_092520.01+273619.7_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
0f95cdce65712bdf588c43e499e4f3c3114fd710
|
e8a8d8099419306feb8849625f65357456b6a2ae
|
/proj/app/models.py
|
4beb0e5041f2baa1d0e3b62ee95cc1796dee1cb1
|
[] |
no_license
|
Sentret/classificator
|
57224597f64f44462be61228e6ed7e18b6973181
|
86aafc5884215918f1cdac92f17360ac671dc703
|
refs/heads/master
| 2021-08-30T09:52:48.157374
| 2017-12-17T10:17:39
| 2017-12-17T10:17:39
| 114,526,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
from django.db import models
class Topic(models.Model):
name = models.CharField(max_length=100)
classifier = models.ForeignKey('Classifier', blank=True, null=True)
class Meta:
unique_together = ('name', 'classifier',)
def __str__(self):
return self.name
class Classifier(models.Model):
name = models.CharField(max_length=100)
discription = models.TextField(max_length=1000, null=True)
version = models.ForeignKey('ClassifierVersion', blank=True, null=True)
status = models.CharField(max_length=8, default='Не обучен')
path_to_bin = models.CharField(max_length=100, null=True)
def save(self, *args, **kwargs):
self.path_to_bin = str(self.name) + '_pickle'
return super(Classifier, self).save(*args, **kwargs)
def __str__(self):
return self.name
class ClassifierVersion(models.Model):
topics = models.ManyToManyField('Topic')
version = models.IntegerField()
#путь до файла, с сериализованным объектом
path_to_bin = models.CharField(max_length=25)
def __str__(self):
return self.classifier.name + ' ' + str(version)
|
[
"you@example.com"
] |
you@example.com
|
34d9562b56d5ed5af1c74797c367485b191fa909
|
2844812adf8e919c6629463e33a1d9a3634ec0cc
|
/tests/benchmarks/base_utils.py
|
648eb5f0e2caa2f0b29dac0e485f6bc089b50e29
|
[
"Apache-2.0"
] |
permissive
|
tods-doc/d3m
|
0d920a4a88172c925cce7cd75c37296c3522a0c5
|
e25793d4aaa9a8fdb63ac33bf1c045b96d6067a6
|
refs/heads/master
| 2023-02-08T22:07:43.279961
| 2021-01-05T21:27:15
| 2021-01-05T21:27:15
| 296,895,826
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,618
|
py
|
from d3m import container
from d3m.base import utils as base_utils
class CombineColumns:
params = [[100, 300, 500, 700, 900]]
param_names = ['columns']
def setup(self, columns):
self.large_dataframe_with_many_columns = container.DataFrame({str(i): [j for j in range(5)] for i in range(columns)}, columns=[str(i) for i in range(columns)], generate_metadata=True)
self.list_of_many_dataframe_columns = [
container.DataFrame({str(i): [j for j in range(5, 10)]}, columns=[str(i)], generate_metadata=True)
for i in range(int(columns / 2))
]
def time_append(self, columns):
base_utils.combine_columns(
self.large_dataframe_with_many_columns,
list(range(int(columns / 4), int(columns / 2))), # Just 1/4 of columns.
self.list_of_many_dataframe_columns,
return_result='append',
add_index_columns=True,
)
def time_replace(self, columns):
base_utils.combine_columns(
self.large_dataframe_with_many_columns,
list(range(int(columns / 4), int(columns / 2))), # Just 1/4 of columns.
self.list_of_many_dataframe_columns,
return_result='replace',
add_index_columns=True,
)
def time_new(self, columns):
base_utils.combine_columns(
self.large_dataframe_with_many_columns,
list(range(int(columns / 4), int(columns / 2))), # Just 1/4 of columns.
self.list_of_many_dataframe_columns,
return_result='new',
add_index_columns=True,
)
|
[
"daochen.zha@tamu.edu"
] |
daochen.zha@tamu.edu
|
9fd4dc4e6e75819bbed57836d6ef6fe3242ab4b4
|
bd71b063f13958e07c9e16cd171d3fc0e1c58e4d
|
/0x0A-python-inheritance/100-my_int.py
|
f8cf0a3390d1775fbb032aa8af847e38377d73ed
|
[] |
no_license
|
feliciahsieh/holbertonschool-higher_level_programming
|
2aecd291f85fe69ab11331bb2d5372c6d67e1af6
|
017e8b87f9d8967b55ccc68ed30921572d4ddb65
|
refs/heads/master
| 2021-01-01T20:46:36.901665
| 2019-04-10T18:24:59
| 2019-04-10T18:24:59
| 98,931,138
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
#!/usr/bin/python3
class MyInt(int):
"""
MyInt - class that inherits from int
"""
def __eq__(self, other):
"""
__eq__ - redefine == to !=
Args:
other - other operand
Return:
True if Not_Equal to
"""
return int(self) != int(other)
def __ne__(self, other):
"""
__ne__ - redefine != to ==
Args:
other - other operand
Return:
True if Equal to
"""
return int(self) == int(other)
|
[
"felicia@tixwriteoff.com"
] |
felicia@tixwriteoff.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.