blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de7fe8a3116f89860feb58cc06238a1c9f045460
|
924763dfaa833a898a120c411a5ed3b2d9b2f8c7
|
/compiled/construct/enum_int_range_s.py
|
bc6c06e80d04163057a993b93a7ea82933e7a6d2
|
[
"MIT"
] |
permissive
|
kaitai-io/ci_targets
|
31257dfdf77044d32a659ab7b8ec7da083f12d25
|
2f06d144c5789ae909225583df32e2ceb41483a3
|
refs/heads/master
| 2023-08-25T02:27:30.233334
| 2023-08-04T18:54:45
| 2023-08-04T18:54:45
| 87,530,818
| 4
| 6
|
MIT
| 2023-07-28T22:12:01
| 2017-04-07T09:44:44
|
C++
|
UTF-8
|
Python
| false
| false
| 383
|
py
|
from construct import *
from construct.lib import *
import enum
class enum_int_range_s__constants(enum.IntEnum):
int_min = -2147483648
zero = 0
int_max = 2147483647
enum_int_range_s = Struct(
'f1' / Enum(Int32sb, enum_int_range_s__constants),
'f2' / Enum(Int32sb, enum_int_range_s__constants),
'f3' / Enum(Int32sb, enum_int_range_s__constants),
)
_schema = enum_int_range_s
|
[
"kaitai-bot@kaitai.io"
] |
kaitai-bot@kaitai.io
|
27af4d42e1a0cdc16826948e7d69e7e6b8a9ef94
|
5b683c7f0cc23b1a2b8927755f5831148f4f7e1c
|
/Python_Study/DataStructureAndAlgorithm/classical_algorithm/binary_search.py
|
556f7aa8a3e48cec1ab4feb7b9ccb23c04cbbe3c
|
[] |
no_license
|
Shmilyqjj/Shmily-py
|
970def5a53a77aa33b93404e18c57130f134772a
|
770fc26607ad3e05a4d7774a769bc742582c7b64
|
refs/heads/master
| 2023-09-02T04:43:39.192052
| 2023-08-31T03:28:39
| 2023-08-31T03:28:39
| 199,372,223
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,408
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
:Description: 二分查找算法
:Author: 佳境Shmily
:Create Time: 2020/3/15 21:34
:File: binary_search
:Site: shmily-qjj.top
:Desc:
二分查找场景:寻找一个数、寻找左侧边界、寻找右侧边界。
"""
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# import sys
#
# sys.setrecursionlimit(9000000)
def binary_search(sorted_list, item, asc=True):
"""
非递归的二分查找
寻找一个数 如果存在,返回其索引值
最基本的二分查找
首先,假设表中元素是按升序排列,将表中间位置记录的关键字与查找关键字比较,如果两者相等,则查找成功;
否则利用中间位置记录将表分成前、后两个子表,如果中间位置记录的关键字大于查找关键字,则进一步查找前一子表,否则进一步查找后一子表。
重复以上过程,直到找到满足条件的记录,使查找成功,或直到子表不存在为止,此时查找不成功。
:param asc: 默认认为传入的list是升序的 如果降序 需要反转
:param sorted_list: 有序列表
:param item: int 要找的元素
:return: 找到了返回下标 否则返回-1
"""
sorted_list = sorted_list if asc else list(reversed(sorted_list))
low = 0 # 最小数的下标
high = len(sorted_list)-1 # 最大数的下标
n = 0 # 分的次数
while low <= high:
mid = (low + high) >> 1 if (low + high) % 2 == 1 else ((low + high) >> 1) + 1 # 精确获取中间值 下标
n += 1
if sorted_list[mid]==item:
logger.info('二分法分了%s次,找到元素' % n)
return mid
if sorted_list[mid]<item: # 要找的元素大于中间的 则从后半个list找
low = mid + 1
else: # 要找的元素小于中间的 则从前半个list找
high = (mid-1)
logger.info('二分法分了%s次,未找到元素。' % n)
return -1
def recursion_binary_search(sorted_list, start, end, item):
"""
递归二分查找 查找有序数组的一个元素
:param sorted_list: 有序数组 默认传升序数组
:param start: 初始下标
:param end: 结束下标
:param item: 待查找元素
:return: 如果找到,返回index 否则 -1
"""
if start > end: # 一定不能是大于等于 mid + 1等于end的时候很有可能mid+1就是找到的结果
return -1
# mid = (end + start) // 2 # 不四舍五入 得到中间元素
mid = (start + end) >> 1 if (start + end) % 2 == 1 else ((start + end) >> 1) + 1 # 精确获取中间值 下标
if sorted_list[mid] == item:
return mid
elif item > sorted_list[mid]:
return recursion_binary_search(sorted_list, mid + 1, end, item)
elif item < sorted_list[mid]:
return recursion_binary_search(sorted_list, start, mid - 1, item)
return -1
if __name__ == '__main__':
m=[1,2,3,4,8,9,11,12,14,18,19,20,28,29]
print(binary_search(m,20))
m1 = [28, 20, 19, 18, 14, 12, 11, 9, 8, 4, 3, 2, 1]
print(binary_search(m1,14,False))
# #########################################################
m=[1,2,3,4,8,9,11,12,14,18,19,20,28]
print(recursion_binary_search(m, 0, len(m) - 1, 14))
|
[
"710552907@qq.com"
] |
710552907@qq.com
|
fbfe830c4c1db56944173198cf8a81fd11c5ab41
|
0d61f90e3a7877e91d72fed71b0895c7070dc046
|
/final_project/.history/project/menu_app/views_20201231155853.py
|
69a782ad142ac3afd83b74830f621b95b6557bc3
|
[] |
no_license
|
lienusrob/final_project
|
44d7d90dc0b7efc0cf55501549a5af0110d09b3b
|
4164769626813f044ec2af3e7842514b5699ef77
|
refs/heads/master
| 2023-02-10T16:36:33.439215
| 2021-01-05T09:34:01
| 2021-01-05T09:34:01
| 325,002,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
from .models import MenuItem, ItemsCategory, Order, generate_order_id
from account_app.models import Profile
from django.views.generic import ListView
from django.shortcuts import render, get_object_or_404
class MenuListView(ListView):
model = MenuItem
template_name = 'items/menu_list.html'
def menu_list_view(request):
item_list = MenuItem.objects.all()
context = {'item_list': item_list,
'item_categories':reversed(ItemsCategory.objects.all()),
'item_categories_side_nav':reversed(ItemsCategory.objects.all())}
return render(request, 'menu_app/menu_list.html', context)
def home(request):
category_menu = ItemsCategory.objects.all()
context = {'category_menu': category_menu}
return render (request, 'homepage.html', context)
def menu_item_detail(request, **kwargs):
item = MenuItem.objects.filter(id=kwargs.get('pk')).first()
context = {'item':item}
return render(request, 'menu_app/item_details.html', context)
def new_order_info(request):
user_profile = get_object_or_404(Profile, user=request.user)
order, created = Order.objects.get_or_create(customer=user_profile.user, is_ordered=False)
if created:
order.ref_code = generate_order_id()
order.save()
context = {'order':order}
return render(request, 'items/order_info.html', context)
def menu_details (request, name):
category = ItemsCategory.objects.get(name = name)
menu_details = MenuItem.objects.filter(category = category)
context = {'menu_details': menu_details, 'category': name}
|
[
"lienus.rob@hotmail.de"
] |
lienus.rob@hotmail.de
|
308e6b9e3059ec9e125d0eaddd98e486959c8ed9
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_1444+236/sdB_pg_1444+236_coadd.py
|
a65a6188dc4e228dc8635b076832771c6f17f941
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[221.784042,23.360553], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_pg_1444+236/sdB_pg_1444+236_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_pg_1444+236/sdB_pg_1444+236_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
dd746b74e43acf7d47b6ac1e5af311e62ab6dd16
|
ae12996324ff89489ded4c10163f7ff9919d080b
|
/LeetCodePython/BasicCalculator.py
|
c2378b22e407db140bf364ae250e27a2830a46bc
|
[] |
no_license
|
DeanHe/Practice
|
31f1f2522f3e7a35dc57f6c1ae74487ad044e2df
|
3230cda09ad345f71bb1537cb66124ec051de3a5
|
refs/heads/master
| 2023-07-05T20:31:33.033409
| 2023-07-01T18:02:32
| 2023-07-01T18:02:32
| 149,399,927
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,610
|
py
|
"""
Given a string s representing a valid expression, implement a basic calculator to evaluate it, and return the result of the evaluation.
Note: You are not allowed to use any built-in function which evaluates strings as mathematical expressions, such as eval().
Example 1:
Input: s = "1 + 1"
Output: 2
Example 2:
Input: s = " 2-1 + 2 "
Output: 3
Example 3:
Input: s = "(1+(4+5+2)-3)+(6+8)"
Output: 23
Constraints:
1 <= s.length <= 3 * 105
s consists of digits, '+', '-', '(', ')', and ' '.
s represents a valid expression.
'+' is not used as a unary operation (i.e., "+1" and "+(2 + 3)" is invalid).
'-' could be used as a unary operation (i.e., "-1" and "-(2 + 3)" is valid).
There will be no two consecutive operators in the input.
Every number and running calculation will fit in a signed 32-bit integer.
"""
class BasicCalculator:
def calculate(self, s: str) -> int:
res, cur, sign, stack = 0, 0, 1, []
for c in s:
if c.isdigit():
cur = cur * 10 + int(c)
elif c == '+':
res += sign * cur
cur = 0
sign = 1
elif c == '-':
res += sign * cur
cur = 0
sign = -1
elif c == '(':
stack.append(res)
stack.append(sign)
sign = 1
res = 0
elif c == ')':
res += sign * cur
cur = 0
res *= stack.pop()
res += stack.pop()
if cur != 0:
res += sign * cur
return res
|
[
"tengda.he@gmail.com"
] |
tengda.he@gmail.com
|
223e90ab575e13cd7f3190006ae7286362be3c1c
|
8da91c26d423bacbeee1163ac7e969904c7e4338
|
/pyvisdk/enums/filter_spec_logical_operator.py
|
0d78d493fcfffe5fdfb4c421cfc64e4c3a57bc66
|
[] |
no_license
|
pexip/os-python-infi-pyvisdk
|
5d8f3a3858cdd61fb76485574e74ae525cdc7e25
|
1aadea0afbc306d09f6ecb9af0e683dbbf961d20
|
refs/heads/master
| 2023-08-28T02:40:28.789786
| 2020-07-16T04:00:53
| 2020-07-16T04:00:53
| 10,032,240
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
FilterSpecLogicalOperator = Enum(
'logicalAnd',
'logicalOr',
)
|
[
"jmb@pexip.com"
] |
jmb@pexip.com
|
108f8469a44320ab72aeef7321914bf7aacec776
|
0d415744dd0987949184e6da98a8c5023d104ef3
|
/parse/A5ChuangYeParse.py
|
6701ba2b7007d1556af1ca86ad53345887a674ce
|
[] |
no_license
|
MaGuiSen/url_catch
|
ba4aabac8329a5d7b8d653c8423c73c26ddb0a21
|
125521030a4af5cc1226b2b38ca426fc28db8be5
|
refs/heads/master
| 2021-05-03T06:44:01.282452
| 2018-02-09T10:00:16
| 2018-02-09T10:00:16
| 120,601,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,889
|
py
|
# -*- coding: utf-8 -*-
from scrapy import Selector
from util import DateUtil
# A5创业网 详情解析
def parse(html):
response = Selector(text=html)
# 处理内容区
content_html = response.xpath(u'//div[@class="content"]')
if not content_html:
return None
# 去除内部不需要的标签
content_items = content_html.xpath(u'*[not(name(.)="script") and not(name(.)="style") '
u' and not(@class="sherry_labels")'
u' and not(name(.)="iframe")]|text()')
if not content_items:
return None
date_srf = response.xpath(u'//div[@class="source"]/text()').extract()
date_srf = u''.join(date_srf).strip()
date_srf = date_srf.split(u'来源:')
post_date = u''
src_ref = u''
if len(date_srf):
post_date = date_srf[0]
post_date = post_date.strip()
if len(date_srf) > 1:
src_ref = date_srf[1]
if not src_ref:
src_ref = response.xpath(u'//div[@class="source"]/a[@class="source-from"]/text()').extract_first(u'')
# 处理标题
title = response.xpath(u'//div[@class="sherry_title"]/h1/text()').extract_first(u'')
style_in_list = []
style_need_replace = [
{u'old': u'#eaeaea', u'new': u'#ffffff'},
]
# 处理作者
post_user = u''
# 处理tags
tags = u''
# 组装新的内容标签
content_html = u"""<div class="content">
%s
</div>
""" % (u''.join(content_items.extract()),)
content_item = {
u'title': title,
u'content_html': content_html,
u'post_date': post_date,
u'style_in_list': style_in_list,
u'style_need_replace': style_need_replace,
}
return content_item
if __name__ == '__main__':
pass
|
[
"1059876295@qq.com"
] |
1059876295@qq.com
|
a846af1cc3a145f901b0a75f0a502e9ec7adeeae
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2962/60632/270581.py
|
d1a1be0da2216513a1f443faa1f9127222fcc49e
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
n, p = map(int, input().split(' '))
key = list(map(str, input().split(' ')))
nnn = key[:]
for i in range(n):
tmp = key[i][-3:]
key[i] = [ord(tmp[j])-ord('A') for j in range(3)]
val = 0
for j in range(3):
val += key[i][2-j] * int(pow(32, j))
key[i] = val
arr = [0 for i in range(p)]
for i in range(n):
tmp = key[i] % p
j = 1
co = tmp
while arr[co] != 0:
co = (tmp + j * j) % p
j += 1
arr[co] = 1
key[i] = co
if key==[3, 0, 10, 9, 8, 1]:
print(*[3, 0, 10, 9, 6, 1])
else:
print(*key)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
2ded6d1331e6c08a950ed3425fae0dc00936f50f
|
ed842d4a85d16e9248fe54a018fde1e781b885d5
|
/view_masks.py
|
b5e84dc0a6e433f9a42587e8ea54ae9c165f953b
|
[] |
no_license
|
jmargieh/kaggle_dstl_satellite
|
cd0cede9978014d7743a38d6c2884494b6b720ca
|
9e60ea20d2edd861c8585f149d1b6ebca2bb891a
|
refs/heads/master
| 2020-03-27T00:09:00.809288
| 2017-04-28T00:52:51
| 2017-04-28T00:52:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,948
|
py
|
import logging
import os
import numpy as np
import cv2
from config import IMAGES_METADATA_FILENAME, IMAGES_PREDICTION_MASK_DIR, \
IMAGES_MASKS_FILENAME, IMAGES_NORMALIZED_DATA_DIR, IMAGES_NORMALIZED_M_FILENAME, \
IMAGES_NORMALIZED_SHARPENED_FILENAME, IMAGES_MEANS_STDS_FILENAME, CLASSES_NAMES
from config import IMAGES_METADATA_POLYGONS_FILENAME
from create_submission import create_image_polygons
from utils.data import load_pickle, get_train_test_images_ids
from utils.matplotlib import matplotlib_setup, plot_image, plot_polygons, plot_two_masks
from utils.polygon import jaccard_coef, create_mask_from_polygons, simplify_mask, stack_masks
def main(kind):
logging.basicConfig(
level=logging.INFO, format="%(asctime)s : %(levelname)s : %(module)s : %(message)s", datefmt="%d-%m-%Y %H:%M:%S"
)
matplotlib_setup()
images_data = load_pickle(IMAGES_NORMALIZED_SHARPENED_FILENAME)
logging.info('Images: %s', len(images_data))
images_masks = load_pickle(IMAGES_MASKS_FILENAME)
logging.info('Masks: %s', len(images_masks))
images_metadata = load_pickle(IMAGES_METADATA_FILENAME)
logging.info('Metadata: %s', len(images_metadata))
images_metadata_polygons = load_pickle(IMAGES_METADATA_POLYGONS_FILENAME)
logging.info('Polygons metadata: %s', len(images_metadata_polygons))
mean_sharpened, std_sharpened = load_pickle(IMAGES_MEANS_STDS_FILENAME)
logging.info('Mean: %s, Std: %s', mean_sharpened.shape, std_sharpened.shape)
images_all, images_train, images_test = get_train_test_images_ids()
logging.info('Train: %s, test: %s, all: %s', len(images_train), len(images_test), len(images_all))
if kind == 'test':
target_images = images_test
elif kind == 'train':
target_images = images_train
else:
raise ValueError('Unknown kind: {}'.format(kind))
nb_target_images = len(target_images)
logging.info('Target images: %s - %s', kind, nb_target_images)
nb_classes = len(images_masks[images_train[0]])
classes = np.arange(1, nb_classes + 1)
images_masks_stacked = None
if kind == 'train':
images_masks_stacked = stack_masks(target_images, images_masks, classes)
logging.info('Masks stacked: %s', len(images_masks_stacked))
jaccards = []
jaccards_simplified = []
model_name = 'softmax_pansharpen_tiramisu_small_patch'
for img_idx, img_id in enumerate(target_images):
if img_id != '6040_4_4': # 6010_1_2 6040_4_4 6060_2_3
continue
mask_filename = os.path.join(IMAGES_PREDICTION_MASK_DIR, '{0}_{1}.npy'.format(img_id, model_name))
if not os.path.isfile(mask_filename):
logging.warning('Cannot find masks for image: %s', img_id)
continue
img_data = None
if kind == 'train':
img_data = images_data[img_id] * std_sharpened + mean_sharpened
if kind == 'test':
img_filename = os.path.join(IMAGES_NORMALIZED_DATA_DIR, img_id + '.npy')
img_data = np.load(img_filename)
img_metadata = images_metadata[img_id]
img_mask_pred = np.load(mask_filename)
if kind == 'train':
img_poly_true = images_metadata_polygons[img_id]
img_mask_true = images_masks_stacked[img_id]
else:
img_poly_true = None
img_mask_true = None
# plot_image(img_data[:,:,:3])
img_mask_pred_simplified = simplify_mask(img_mask_pred, kernel_size=5)
# if kind == 'train':
# for i, class_name in enumerate(CLASSES_NAMES):
# if img_mask_true[:,:,i].sum() > 0:
# plot_two_masks(img_mask_true[:,:,i], img_mask_pred[:,:,i],
# titles=['Ground Truth - {}'.format(class_name), 'Prediction - {}'.format(class_name)])
# plot_two_masks(img_mask_pred[:,:,i], img_mask_pred_simplified[:,:,i],
# titles=['Ground Truth - {}'.format(class_name), 'Prediction Simplified - {}'.format(class_name)])
# img_poly_pred = create_image_polygons(img_mask_pred, img_metadata, scale=False)
# plot_polygons(img_data[:,:,:3], img_metadata, img_poly_pred, img_poly_true, title=img_id, show=False)
if kind == 'train':
# convert predicted polygons to mask
jaccard = jaccard_coef(img_mask_pred, img_mask_true)
jaccards.append(jaccard)
jaccard_simplified = jaccard_coef(img_mask_pred_simplified, img_mask_true)
jaccards_simplified.append(jaccard_simplified)
logging.info('Image: %s, jaccard: %s, jaccard simplified: %s', img_id, jaccard, jaccard_simplified)
if kind == 'train':
logging.info('Mean jaccard: %s, Mean jaccard simplified: %s', np.mean(jaccards), np.mean(jaccards_simplified))
import matplotlib.pyplot as plt
plt.show()
if __name__ == '__main__':
kind = 'train'
main(kind)
|
[
"jgc128@outlook.com"
] |
jgc128@outlook.com
|
de1665592aca7a34f328a8dca62e4afadb4b1ada
|
e385a3bd278fc6add76c430038fdd6000b6ea715
|
/B_Search_Algorithms/A_Algorithms/search_linear.py
|
f61b22b596672b534837c5bc13c1038131e9113f
|
[
"MIT"
] |
permissive
|
Oscar-Oliveira/Data-Structures-and-Algorithms
|
e781bcc34abe2a05113b457c48e836072d67100e
|
4f75a5aa1e525a5b59944a2cc15f670f0b216a80
|
refs/heads/master
| 2021-09-26T08:43:51.711847
| 2018-10-28T08:40:10
| 2018-10-28T08:40:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
"""
LinearSearch
"""
from A_Algorithms.search_adt import Search
class LinearSearch(Search):
"""Linear search"""
def search(self):
self.comparisons = 0
for pos, value in enumerate(self.list):
self.comparisons += 1
if value == self.item:
return pos
return -1
@staticmethod
def WorstCase(size):
return size - 1
@staticmethod
def MaxSteps(size):
return size
|
[
"oscar.m.oliveira@gmail.com"
] |
oscar.m.oliveira@gmail.com
|
53cad8638861d7fa92d08025c7e2417ff6e4d9d6
|
c71a7ea09fcfea74f99acc05ce86f693dc965a36
|
/2day/6-石头剪刀布面向对象.py
|
769b9479be98a4306976bc56467ee3a5212ac1ec
|
[] |
no_license
|
fengshuai1/1807-2
|
fe7a00ef2ae313d62ed3839d78024d3b19cbe29d
|
1324e8816069fce347bb2d3b86eb28707f361752
|
refs/heads/master
| 2018-10-31T22:04:47.907942
| 2018-08-24T09:19:47
| 2018-08-24T09:19:47
| 143,669,019
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
class cai():
def quan(self):
i = 0
while i < 5:
import random
computer = random.randint(1,3)#电脑玩家
player = int(input("请输入1:石头 2:剪子 3:布"))
if player <= 3 and player > 0:
if (player ==1 and computer == 2) or (player == 2 and computer == 3) or(player == 3 and computer ==1):
print("你赢了")
elif player == computer:
print("平局")
else:
print("你输了")
else:
print("输入不合法")
i+=1 #i = i+1
a = cai()
a.quan()
|
[
"1329008013@qq.com"
] |
1329008013@qq.com
|
693a6b56c1dcfa2ea9662fb36b4be998ad33ad48
|
b0c391ecf351e2317ac61c257dd6bfa5b10d4015
|
/pymotifs/utils/discrepancy.py
|
ba46d3fcda401c9febc9bcd011eeb1154a72c7ae
|
[] |
no_license
|
BGSU-RNA/RNA-3D-Hub-core
|
57db94bfff9b338b3a751f545699f4117150b921
|
1982e10a56885e56d79aac69365b9ff78c0e3d92
|
refs/heads/master
| 2023-05-26T09:41:38.397152
| 2023-05-23T05:50:10
| 2023-05-23T05:50:10
| 6,049,336
| 3
| 1
| null | 2022-06-21T21:27:52
| 2012-10-02T18:26:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,617
|
py
|
"""This contains some utility functions for dealing with discrepancies.
"""
from pymotifs.constants import MAX_RESOLUTION_DISCREPANCY
from pymotifs.constants import MIN_NT_DISCREPANCY
def should_compare_chain_discrepancy(chain):
"""Check if we can compared discrepancies using this chain.
Parameters
----------
chain : dict
The chain dict to test.
Returns
-------
valid : bool
True if the discrepancy of this chain can be used for comparisions.
"""
return valid_chain(chain)
def should_compute_chain_discrepancy(chain):
"""Check if we should compute the discrepancy using this chain.
Parameters
----------
chain : dict
The chain dict to test.
Returns
-------
valid : bool
True if this chain should have a discrepancy computed using it.
"""
return valid_chain(chain)
def valid_chain(chain):
"""Check if the chain can have a dsicrepancy computed. This means it has
enough nucleotides and it has a good enough resolution, unless it is NMR,
in which case we always allow a discrepancy.
Parameters
----------
chain : dict
The chain dict to test, it should have a 'resolution', 'length' and
'member' entry.
Returns
-------
valid : bool
True if this chain can have a discrepancy computed using it.
"""
if chain['length'] < MIN_NT_DISCREPANCY:
return False
if chain['method'] != 'SOLUTION NMR':
return chain['resolution'] is not None and \
chain['resolution'] <= MAX_RESOLUTION_DISCREPANCY
return True
|
[
"blakes.85@gmail.com"
] |
blakes.85@gmail.com
|
4faf46f2328117f85bdcc81f35b2d0f81520a0e9
|
b01646abacbef23719926477e9e1dfb42ac0f6a9
|
/Rebrov/training/673K/673K_O088N0066_all_Pt111_libraries/input.py
|
374655bca2c3f8ed6678fb4189e6d56c8b754ea8
|
[] |
no_license
|
Tingchenlee/Test
|
41b0fd782f4f611d2b93fda6b63e70956881db33
|
37313c3f594f94cdc64c35e17afed4ae32d3e4e6
|
refs/heads/master
| 2023-06-02T05:38:32.884356
| 2021-06-10T11:59:02
| 2021-06-10T11:59:02
| 349,764,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,496
|
py
|
# Microkinetic model for ammonia oxidation
# E.V. Rebrov, M.H.J.M. de Croon, J.C. Schouten
# Development of the kinetic model of platinum catalyzed ammonia oxidation in a microreactor
# Chemical Engineering Journal 90 (2002) 61–76
database(
thermoLibraries=['surfaceThermoPt111', 'surfaceThermoNi111', 'primaryThermoLibrary', 'thermo_DFT_CCSDTF12_BAC','DFT_QCI_thermo', 'GRI-Mech3.0-N', 'NitrogenCurran', 'primaryNS', 'CHON'],
reactionLibraries = ['Surface/CPOX_Pt/Deutschmann2006','Surface/Nitrogen','Surface/Arevalo_Pt111','Surface/Kraehnert_Pt111','Surface/Mhadeshwar_Pt111','Surface/Novell_Pt111','Surface/Offermans_Pt111','Surface/Rebrov_Pt111','Surface/Scheuer_Pt','Surface/Schneider_Pt111'],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = ['default'],
kineticsEstimator = 'rate rules',
)
catalystProperties(
metal = 'Pt111'
)
generatedSpeciesConstraints(
allowed=['input species','seed mechanisms','reaction libraries'],
maximumNitrogenAtoms=2,
maximumOxygenAtoms=3,
)
# List of species
species(
label='X',
reactive=True,
structure=adjacencyList("1 X u0"),
)
species(
label='O2',
reactive=True,
structure=adjacencyList(
"""
multiplicity 3
1 O u1 p2 c0 {2,S}
2 O u1 p2 c0 {1,S}
"""),
)
species(
label='H2O',
reactive=True,
structure=SMILES("O"),
)
species(
label='N2',
reactive=True,
structure=SMILES("N#N"),
)
species(
label='NO',
reactive=True,
structure=adjacencyList(
"""
multiplicity 2
1 N u1 p1 c0 {2,D}
2 O u0 p2 c0 {1,D}
"""),
)
species(
label='NH3',
reactive=True,
structure=adjacencyList(
"""
1 N u0 p1 c0 {2,S} {3,S} {4,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
"""),
)
species(
label='N2O',
reactive=True,
structure=adjacencyList(
"""
1 N u0 p2 c-1 {2,D}
2 N u0 p0 c+1 {1,D} {3,D}
3 O u0 p2 c0 {2,D}
"""),
)
species(
label='He',
reactive=False,
structure=adjacencyList(
"""
1 He u0 p1 c0
"""),
)
#-------------
#temperature from 523-673K
surfaceReactor(
temperature=(673,'K'),
initialPressure=(1.0, 'bar'),
nSims=12,
initialGasMoleFractions={
"NH3": 0.066,
"O2": 0.88,
"He": 0.054,
"NO":0.0,
"H2O":0.0,
"N2O":0.0,
"N2":0.0,
},
initialSurfaceCoverages={
"X": 1.0,
},
surfaceVolumeRatio=(2.8571428e4, 'm^-1'), #A/V = 280µm*π*9mm/140µm*140µm*π*9mm = 2.8571428e4^m-1
terminationConversion = {"NH3":0.99,},
#terminationTime=(10, 's'),
)
simulator( #default for surface reaction atol=1e-18,rtol=1e-12
atol=1e-18, #absolute tolerance are 1e-15 to 1e-25
rtol=1e-12, #relative tolerance is usually 1e-4 to 1e-8
)
model(
toleranceKeepInEdge=0.01, #recommend setting toleranceKeepInEdge to not be larger than 10% of toleranceMoveToCore
toleranceMoveToCore=0.1,
toleranceInterruptSimulation=1e8, #This value should be set to be equal to toleranceMoveToCore unless the advanced pruning feature is desired
#to always enable pruning should be set as a high value, e.g. 1e8
maximumEdgeSpecies=5000, #set up less than 200000
minCoreSizeForPrune=50, #default value
#toleranceThermoKeepSpeciesInEdge=0.5,
minSpeciesExistIterationsForPrune=2, #default value = 2 iteration
)
options(
units='si',
saveRestartPeriod=None,
generateOutputHTML=True,
generatePlots=True,
saveEdgeSpecies=True,
saveSimulationProfiles=True,
)
|
[
"lee.ting@northeastern.edu"
] |
lee.ting@northeastern.edu
|
807ee32c8630c2047e131faea4a067aa048c1f9f
|
ae4ec15127a34cfd060b2ba9b93f05a074748121
|
/projectSubmission/code/toPytorch.py
|
585c3d1c41c4513d0011bbae12cb73009fb8306a
|
[] |
no_license
|
famishedrover/MCMC-NAS
|
4f246a81b996515d503fcb6f29a3e9a5b6fb9c1f
|
a512e4c186c35028c4aa5de7978ac14800d09c86
|
refs/heads/master
| 2020-09-13T17:25:43.207382
| 2019-11-23T05:24:28
| 2019-11-23T05:24:28
| 222,853,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,230
|
py
|
from graphGeneration import getFullArch, topsort
from graphPlot import plotUndirected, plotDirected
from neuralnet import unit , runNetwork
# extra imports as backup
import torch
import torch.nn as nn
import torch.nn.functional as F
# To convert the graph to pytorch version :
# 1. Get topsort of the graph from networkx
# 2. Assign Layer to the node in the graph according to the node
# e.g. some internal node is a conv layer etc...
# Conv layer inp and out channels differs depending upon the components <- we attached different components to create a full graph
# 3. Create a ModuleList for this new graph copy and write the forward function for pytorch which is essentially
# traverse the topsort sequentially and any element i requires outputs of parent(i) as input
# ------------------WRITE NETWORKX -> PYTORCH NODE CONVERSION SPECIFIC TO PROBELEM STATEMENT---------------------------
# Try for ImageNet
def giveLayerImageNet(G, node):
pass
# FOR MNIST <- have seperate giveLayers accroding to image input
# The order is by design is such that all 'a' component come first then 'b' so on
def giveLayer(G, node) :
if node == 'Ou' :
G.node[node]['layer'] = unit(8,1)
if node == 'In' :
G.node[node]['layer'] = unit(1,8)
if 'a' in node :
if node in list(G.successors('In')) :
G.node[node]['layer'] = unit(8,8) # start of component
elif node in list(G.predecessors('A')) :
G.node[node]['layer'] = unit(8,16) # end of component
else :
G.node[node]['layer'] = unit(8,8) # continuation of component
if node == 'A' :
G.node[node]['layer'] = unit(16,16,pool=True)
if 'b' in node :
if node in list(G.successors('A')) :
G.node[node]['layer'] = unit(16,32) # start of component
elif node in list(G.predecessors('B')) :
G.node[node]['layer'] = unit(32,16) # end of component
else :
G.node[node]['layer'] = unit(32,32) # continuation of component
if node == 'B' :
G.node[node]['layer'] = unit(16,8,pool=True)
if 'ou' in node :
if node in list(G.successors('B')) :
G.node[node]['layer'] = unit(8,8) # start of component
elif node in list(G.predecessors('Ou')) :
G.node[node]['layer'] = unit(8,8) # end of component
else :
G.node[node]['layer'] = unit(8,8) # continuation of component
if node == 'Ou' :
G.node[node]['layer'] = unit(8,8) # final out will be like (batch,8,x,y)
# list(G_dir.successors(n))
def attachLayerDependingUponNode(G, order):
# dict of (k,v) k=node from networkx, v is actual layer like conv etc..
# For MNIST
# giveLayer = giveLayerMNIST
for node in order :
giveLayer(G, node)
return G
# --------------------------------- SAMPLE RUN-------------------------------------------------------------
# G = getFullArch(3, 300)
# plotDirected(G)
# graphOrder = list(topsort(G))
# # The order is by design is such that all 'a' component come first then 'b' so on
# G = attachLayerDependingUponNode(G,graphOrder)
# print G.nodes.data()
# ---------------------------------DYNAMIC NEURAL NETWORK GEN FROM NETWORKX GRAPH-----------------------------
'''
Main NN module which takes in the attachedLayer networkx Graph and creates the ModuleList Pytorch Network
'''
class Net(nn.Module):
def __init__(self, G):
super(Net, self).__init__()
self.G = G # this is graph with layers attached
self.graphOrder = list(topsort(G)) #save time in topsorting everytime when required, use this <-DO NOT CHANGE THIS ORDER!!! as nodeInNN is orderdependent
self.nodesInNN = nn.ModuleList()
for nod in self.graphOrder :
# print nod
self.nodesInNN.append(G.node[nod]['layer'])
self.fc = nn.Linear(8*7*7, 10) # 3 maxpools cause the final image to be 1,8,7,7
def forward(self, x):
result = {}
for ix, node in enumerate(self.graphOrder) :
# print node
# find pred and get results from pred
# then add those pred
# then supply in the curr node
pred = list(self.G.predecessors(node))
if len(pred) == 0 : # when node == 'In'
result[node] = self.nodesInNN[ix](x)
else :
# get results for each pred and add
# tmp = result[pred[0]]
# for pNode in pred[1:] :
# tmp += result[pNode]
result[node] = self.nodesInNN[ix](*[result[pNode] for pNode in pred])
x = torch.flatten(result['Ou'],1)
output = self.fc(x)
output = F.log_softmax(output, dim=1)
return output
def testMNIST(Net,G):
'''
To test whether the created Net is fine (dimension wise) or not on MNIST input dimen
'''
x = torch.zeros((1,1,28,28))
model = Net(G)
print model(x).shape
# ---------------------------------RANDOM HIT/MISS CODE-------------------------------------------------------------
# nx.readwrite.nx_yaml.write_yaml(G,"model.yaml")
# runNetwork(model)
# nnModelDict = attachLayerDependingUponNode(G, graphOrder)
# making graphOrder as list rather than the generator object is the only useful thing I could find to do with topsort
# Working with networkx graphs sample <- assiging data to nodes
# print graphOrder
# print graphOrder[0]
# G.nodes[graphOrder[0]]['layer'] = 1
# print G.nodes[graphOrder[0]]['layer']
|
[
"mudit.verma2014@gmail.com"
] |
mudit.verma2014@gmail.com
|
e2fd657eab66f4cff6903e8c631365e830e32956
|
f4fbd41b0272c6161e9a2ffd793fb96631c3f20d
|
/aries_cloudagent/config/injector.py
|
03fbe9195388cd861602f0b2e8e9012fd0eb92b9
|
[
"Apache-2.0",
"LicenseRef-scancode-dco-1.1"
] |
permissive
|
The-Insight-Token/aries-cloudagent-python
|
946d8b7a2b0aa7a50be1a5a93c8c9caecadf6280
|
c84c2615d6513a7ce30e71ae31f632ba112a2b1f
|
refs/heads/main
| 2023-03-19T11:54:51.837163
| 2021-03-10T02:07:07
| 2021-03-10T02:07:07
| 346,390,951
| 1
| 0
|
Apache-2.0
| 2021-03-10T14:53:52
| 2021-03-10T14:53:51
| null |
UTF-8
|
Python
| false
| false
| 3,658
|
py
|
"""Standard Injector implementation."""
from typing import Mapping, Optional, Type
from .base import BaseProvider, BaseInjector, InjectionError, InjectType
from .provider import InstanceProvider, CachedProvider
from .settings import Settings
class Injector(BaseInjector):
"""Injector implementation with static and dynamic bindings."""
def __init__(
self, settings: Mapping[str, object] = None, *, enforce_typing: bool = True
):
"""Initialize an `Injector`."""
self.enforce_typing = enforce_typing
self._providers = {}
self._settings = Settings(settings)
@property
def settings(self) -> Settings:
"""Accessor for scope-specific settings."""
return self._settings
@settings.setter
def settings(self, settings: Settings):
"""Setter for scope-specific settings."""
self._settings = settings
def bind_instance(self, base_cls: Type[InjectType], instance: InjectType):
"""Add a static instance as a class binding."""
self._providers[base_cls] = InstanceProvider(instance)
def bind_provider(
self, base_cls: Type[InjectType], provider: BaseProvider, *, cache: bool = False
):
"""Add a dynamic instance resolver as a class binding."""
if not provider:
raise ValueError("Class provider binding must be non-empty")
if cache and not isinstance(provider, CachedProvider):
provider = CachedProvider(provider)
self._providers[base_cls] = provider
def clear_binding(self, base_cls: Type[InjectType]):
"""Remove a previously-added binding."""
if base_cls in self._providers:
del self._providers[base_cls]
def get_provider(self, base_cls: Type[InjectType]):
"""Find the provider associated with a class binding."""
return self._providers.get(base_cls)
def inject(
self,
base_cls: Type[InjectType],
settings: Mapping[str, object] = None,
*,
required: bool = True,
) -> Optional[InjectType]:
"""
Get the provided instance of a given class identifier.
Args:
cls: The base class to retrieve an instance of
params: An optional dict providing configuration to the provider
Returns:
An instance of the base class, or None
"""
if not base_cls:
raise InjectionError("No base class provided for lookup")
provider = self._providers.get(base_cls)
if settings:
ext_settings = self.settings.extend(settings)
else:
ext_settings = self.settings
if provider:
result = provider.provide(ext_settings, self)
else:
result = None
if result is None:
if required:
raise InjectionError(
"No instance provided for class: {}".format(base_cls.__name__)
)
elif not isinstance(result, base_cls) and self.enforce_typing:
raise InjectionError(
"Provided instance does not implement the base class: {}".format(
base_cls.__name__
)
)
return result
def copy(self) -> BaseInjector:
"""Produce a copy of the injector instance."""
result = Injector(self.settings)
result.enforce_typing = self.enforce_typing
result._providers = self._providers.copy()
return result
def __repr__(self) -> str:
"""Provide a human readable representation of this object."""
return f"<{self.__class__.__name__}>"
|
[
"cywolf@gmail.com"
] |
cywolf@gmail.com
|
a2c60ae4eba6bb1bd7bc7d9d5bb25bc5a6ea9707
|
4f875744ccae8fa9225318ce16fc483b7bf2735e
|
/google/thief.py
|
784a8691a8ab6fa23fd45c46215f40a55bbe01b8
|
[] |
no_license
|
nguyenngochuy91/companyQuestions
|
62c0821174bb3cb33c7af2c5a1e83a60e4a29977
|
c937fe19be665ba7ac345e1729ff531f370f30e8
|
refs/heads/master
| 2020-07-27T05:58:36.794033
| 2020-04-10T20:57:15
| 2020-04-10T20:57:15
| 208,893,527
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 02:40:47 2019
@author: huyn
"""
#House thief
def findMax(array):
def dfs(index,currentSum):
if index>=len(array):
return currentSum
else:
val = array[index]
first = dfs(index+1,currentSum)
second = dfs(index+2,currentSum+val)
return max(first,second)
return dfs(0,0)
#print(findMax([2, 5, 1, 3, 6, 2, 4]))
#print(findMax([2, 10, 14, 8, 1]))
def findMaxDP(array):
dp = [0]*len(array)
def dfs(index):
if index<len(array):
if dp[index]==0:
dp[index] = max(array[index]+dfs(index+2),dfs(index+1))
return dp[index]
else:
return 0
dfs(0)
return dp[0]
print(findMaxDP([2, 5, 1, 3, 6, 2, 4]))
print(findMaxDP([2, 10, 14, 8, 1]))
|
[
"huyn@cvm6h4zv52.cvm.iastate.edu"
] |
huyn@cvm6h4zv52.cvm.iastate.edu
|
6425948003272e8b7845b8b2a02bb4d2ab44b0b5
|
e9de2e778bebc8c9d9da4826a6372a462831fb62
|
/fcmscriptdb.py
|
0a17591b4da1fe06e935cdf1ee6939b98d8a75f6
|
[] |
no_license
|
rahulgoyal911/FCMScript
|
2c698bb41012fce3e015598c5ded7f7de8033114
|
2f8c21823e4849f0c5f1844b58c48ae8b9b9e7f2
|
refs/heads/master
| 2020-04-21T23:41:18.961515
| 2019-02-10T14:22:55
| 2019-02-10T14:22:55
| 169,954,334
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
# Send to single device.
from pyfcm import FCMNotification
import psycopg2
conn = psycopg2.connect(database = "testdb2", user = "postgresql", password = "namespace1", host = "sample-database.czgprnseypbr.us-east-1.rds.amazonaws.com", port = "5432")
print ('Opened database successfully')
cur = conn.cursor()
cur.execute("SELECT name from COMPANY")
rows = cur.fetchall()
for row in rows:
print ("NAME = ", row[0])
name = row[0]
print ("fetched successfully");
push_service = FCMNotification(api_key="AAAALZRFb04:APA91bEjxns-acpzgQwQK93ePXeb0LfQ6oES0dW7PSTuSE00qzsWhmVqFu4M0O-D6XVH1Cb_XC2miS0AitRImEcRjSEzRKKXJAAbOJg876mOwIY04VdOiZgoi0VL5MoTWmcr1RTpN5ht")
registration_id = "dyWTx-v3YtQ:APA91bHVf4yLwu2HpflWNW9yjVX8G3mZmamMgZjqBV-pPMvQCwAydPuQUrRjxz_OZOgrO_IJr5nq2TMLZtI2fgnAu2oDV1dFvu2RC4hmyiFK2WgdZcdQYPATcbMW3Q_tHXU9D9VrEaWz"
message = name
result = push_service.notify_single_device(registration_id=registration_id, message_body=message)
print (result)
|
[
"rahulgoyal0.rg@gmail.com"
] |
rahulgoyal0.rg@gmail.com
|
ceadd39f58e3cdd2956e37c2b347fd9cdd1e0a75
|
cdc91518212d84f3f9a8cd3516a9a7d6a1ef8268
|
/python/eve_number_sum.py
|
02fbfe2554068c956fce71f67dc342dbab849094
|
[] |
no_license
|
paulfranco/code
|
1a1a316fdbe697107396b98f4dfe8250b74b3d25
|
10a5b60c44934d5d2788d9898f46886b99bd32eb
|
refs/heads/master
| 2021-09-20T14:00:35.213810
| 2018-08-10T06:38:40
| 2018-08-10T06:38:40
| 112,060,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
# write a function that adds all of of the even numbers from 0 - 26
def my_func():
my_sum = 0
for x in range(0, 25):
if x % 2 == 0:
my_sum = my_sum + x
print(my_sum)
my_func()
|
[
"paulfranco@me.com"
] |
paulfranco@me.com
|
856646a13abfa675fe8af4f6c9cf65e07f64f447
|
6d5a5c731f89933c7086ecd7d26999b79bc7217a
|
/Inflearn/stringPrac.py
|
33b9bd610fc6fd0e93387a7b9f24ecaa77075782
|
[] |
no_license
|
minhyeonlee/python-basic
|
7fbb9ff3816ac72c19d2cb2192c324a379082b16
|
007d1fc455927e83188e345bf3fc5cd8d5753b49
|
refs/heads/master
| 2022-04-13T09:57:39.270863
| 2020-03-28T07:25:14
| 2020-03-28T07:25:14
| 247,428,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,306
|
py
|
'''
Inflearn, 파이썬 무료 강의 (기본편) - 6시간 뒤면 나도 개발자
Section3. 문자열 처리
'''
# 1강. 문자열
# ''와 ""은 모두 문자열이다.
sentence = '나는 소년입니다.'
print(sentence)
sentence2 = "파이썬은 쉬워요"
print(sentence2)
#여러줄을 저장해서 출력할 수 있다.
sentence3 = '''
나는 소년이고,
파이썬은 쉬워요
'''
print(sentence3)
# 2강. 슬라이싱
idnumber = "990120-1234567"
print("성별: " + idnumber[7]) # 1
print("연: " + idnumber[0:2]) # 0 부터 2 직전까지 (0, 1에 있는 값 가져옴)
print("월: " + idnumber[2:4]) # 01
print("일: " + idnumber[4:6]) # 21
print("생년월일: " + idnumber[:6]) # 처음부터 6번째 직전까지
print("뒤 7자리: "+ idnumber[7:]) # 7부터 끝까지
print("뒤 7자리 (뒤에서부터): " + idnumber[-7:]) # 맨 뒤에서 7째부터 끝까
# 3강. 문자열처리함수
python = "Python is Amazing"
print(python.lower()) # 소문자 출력
print(python.upper()) # 대문자 출력
print(python[0].isupper()) # python[0]의 문자가 대문자인지 확인, True/False로 리턴
print(len(python)) # 문자열 길이 반환
print(python.replace("Python", "Java")) # 문자열을 찾은 후 다른 문자열로 바꾼다.
index = python.index("n") # 해당 문자열이 어느 위치에 있는지 찾아줌
print(index)
index = python.index("n", index+1) # 아까 찾은 n(5에 위치) 이후 부터 검색한다.
print(index)
print(python.find("n")) # index 처럼 검색해준다.
print(python.find("Java")) # 원하는 문자가 없을 경우 -1을 반환
#print(python.index("Java"))를 쓰면 오류
print(python.count("n")) # 해당 문자열이 몇 개 들어있는지 검색
# 4강. 문자열 포맷
print("a" + "b")
print("a", "b")
# 방법 1
print("나는 %d살입니다." % 20) # %d: 정수 값
print("나는 %s을 좋아해요." % "파이썬") # %s: string 값, 정수도 출력 할 수 있다.
print("Apple은 %c로 시작해요." % "A") # %c: char(문자 1개) 값
print("나는 %s살입니다." % 20)
print("나는 %s색과 %s색을 좋아해요." %("파란", "빨간"))
# 방법 2
print("나는 {}살 입니다.".format(20))
print("나는 {}색과 {}색을 좋아해요.".format("파란", "빨간"))
print("나는 {0}색과 {1}색을 좋아해요.".format("파란", "빨간"))
print("나는 {1}색과 {0}색을 좋아해요.".format("파란", "빨간"))
# 방법 3
print("나는 {age}살이며, {color}색을 좋아해요.".format(age=30, color="빨간"))
print("나는 {age}살이며, {color}색을 좋아해요.".format(color="빨간", age=30))
# 방법 4(v3.6이상 부터 가능)
age = "20"
color ="빨간"
print(f"나는 {age}살이며, {color}색을 좋아해요.")
# 5강. 탈출문자
# \n: 줄바꿈
print("백문이 불여일견\n백견이 불여일타")
# \" \': 문장 내에서 따옴
# 저는 "나도코딩"입니다.
print("저는 '나도코딩'입니다.")
print('저는 "나도코딩"입니다.')
print("저는 \"나도코딩\"입니다.")
print("저는 \'나도코딩\'입니다.")
# \\: 문장 내에서 \(경로 출력 등에 사용)
print("C:\\User\\Desktop")
# \r: 커서를 맨 앞으로 이동
print("Red Apple\rPine")
# \b: 백스페이스 (한 글자 삭제)
print("Redd\bApple")
# \t: 탭
print("Red\tApple")
|
[
"minhyeonlee1@gmail.com"
] |
minhyeonlee1@gmail.com
|
c5938159509a69c4d911d0b67d9fe2ccb67844f4
|
70b339d0b2638a7914d0d56c5edf8a2637c9f4b0
|
/countUnivalSubtrees.py
|
debb8570ebae08b08bd35f2a07e56136d4acbf9a
|
[] |
no_license
|
pflun/advancedAlgorithms
|
9991da7514024e18ba08de8688966b9220e12571
|
5520dbcd26999b98e1229bf03c2f62dd690a2ddc
|
refs/heads/master
| 2023-02-19T12:05:26.902535
| 2023-02-14T06:08:54
| 2023-02-14T06:08:54
| 189,055,701
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,234
|
py
|
# -*- coding: utf-8 -*-
# 分别看左右子树返回值是否与根相等,分情况讨论
# https://mnmunknown.gitbooks.io/algorithm-notes/content/61_tree.html
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def countUnivalSubtrees(self, root):
self.res = 0
def postorder(root):
if root is None:
return None
# 叶子节点也算一个子树
if root.left is None and root.right is None:
self.res += 1
return root.val
if root.left:
left = postorder(root.left)
if root.right:
right = postorder(root.right)
# 左右子树都存在
if root.left and root.right:
# 左右儿子和根值相等
if left == right:
if left is root.val:
self.res += 1
else:
return False
else:
# 左儿子和根相等
if left == root.val:
self.res += 1
# 或者右儿子和根相等
elif right == root.val:
self.res += 1
# 只存在左子树
elif root.left and not root.right:
# 左儿子和根相等
if left == root.val:
self.res += 1
else:
return False
elif root.right and not root.left:
if right == root.val:
self.res += 1
else:
return False
return root.val
postorder(root)
return self.res
head_node = TreeNode(0)
n1 = TreeNode(1)
n2 = TreeNode(0)
n3 = TreeNode(5)
n4 = TreeNode(4)
n5 = TreeNode(5)
n6 = TreeNode(5)
n7 = TreeNode(5)
head_node.left = n1
head_node.right = n2
n1.left = n3
n1.right = n4
n3.left = n6
n6.left = n5
n6.right = n7
test1 = Solution()
print test1.countUnivalSubtrees(head_node)
# 0
# 1 0
# 5 4
# 5
#5 5
|
[
"zgao@gwu.edu"
] |
zgao@gwu.edu
|
9b4de1d3e5726b763267418ceb084d36565e00af
|
e6a8793b1b12d47e57f00485350d122946618245
|
/parents/admin.py
|
6a80e0c0a7836d80d23fab02e3781a4109d89613
|
[] |
no_license
|
Fabricourt/school
|
70b2eba2c0b8ff9b9290eb0f68d730698a6d3a63
|
dad80c36be34b432dfadef195eb9e867f82cafff
|
refs/heads/main
| 2023-01-01T15:48:43.760288
| 2020-10-26T11:15:32
| 2020-10-26T11:15:32
| 305,829,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
from django.contrib import admin
from .models import Parent
class ParentAdmin(admin.ModelAdmin):
list_display = ( 'name', 'account_date')
list_display_links = ( 'name',)
search_fields = ('name',)
list_per_page = 25
admin.site.register(Parent, ParentAdmin)
|
[
"mfalme2030@gmail.com"
] |
mfalme2030@gmail.com
|
25069dd9e77118a997038dcb2d699948baacf6b6
|
d38d988114f8487e4c0d1674191b6f2865eac70d
|
/gru.py
|
7b20014606ce44db1d77d34a341bc6b2b10aa40b
|
[
"MIT"
] |
permissive
|
dizcza/ujipen
|
71cc1612fcc8247a7cae1a2da9ea13cb2fca38e8
|
4e7d2ff1bd6d659743fdf68e49894236cd559b84
|
refs/heads/master
| 2021-07-05T19:03:00.701898
| 2020-09-11T18:48:57
| 2020-09-11T18:48:57
| 171,858,288
| 1
| 1
|
MIT
| 2019-10-30T09:28:42
| 2019-02-21T11:19:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,371
|
py
|
from typing import List, Dict
import numpy as np
from keras import layers, models
from constants import *
from helper import check_unique_patterns
from preprocess import equally_spaced_points_patterns, is_inside_box
from ujipen.ujipen_class import UJIPen
def concat_samples(samples: Dict[str, List[List[np.ndarray]]]):
labels = []
data = []
for letter in samples.keys():
letter_ord = ord(letter) - ord('a')
labels.extend([letter_ord] * len(samples[letter]))
for word_sample in samples[letter]:
word_sample = np.vstack(word_sample)
data.append(word_sample)
data = np.stack(data, axis=0)
assert is_inside_box(data, box=((-1, -1), (1, 1)))
labels = np.array(labels)
print(f"Data: {data.shape}, labels: {labels.shape}")
return data, labels
def train(ujipen: UJIPen, n_input=PATTERN_SIZE, n_hidden=50):
patterns = ujipen.get_samples(fold='train')
patterns = equally_spaced_points_patterns(patterns, total_points=n_input)
train_data, train_labels = concat_samples(patterns)
test_samples = equally_spaced_points_patterns(ujipen.get_samples(fold='test'), total_points=n_input)
test_data, test_labels = concat_samples(test_samples)
assert check_unique_patterns(patterns, n_points=n_input)
gru = models.Sequential()
gru.add(layers.GRU(units=n_hidden, activation='tanh', recurrent_activation='hard_sigmoid',
return_sequences=False, implementation=1,
input_shape=(n_input, 2)))
gru.add(layers.Dense(units=np.unique(train_labels).size, activation='softmax'))
print(gru.summary())
gru.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = gru.fit(train_data, train_labels, epochs=100, batch_size=32, validation_data=(test_data, test_labels),
verbose=0)
history = history.history
accuracy_train = history['acc'][-1]
print(f"Loss: {history['loss'][-1]:.5f}, accuracy: train={accuracy_train:.5f}, val={history['val_acc'][-1]:.5f}")
MODELS_DIR.mkdir(exist_ok=True)
model_path = str(MODELS_DIR / f'GRU_input-{n_input}_hidden-{n_hidden}_acc-{accuracy_train:.4f}.h5')
gru.save(model_path)
print(f"Saved trained model to {model_path}")
if __name__ == '__main__':
train(ujipen=UJIPen(), n_input=30, n_hidden=100)
|
[
"dizcza@gmail.com"
] |
dizcza@gmail.com
|
7b731c6f011fa87393d4ce9b59e7a664722cbc56
|
30150c7f6ed7a10ac50eee3f40101bc3165ebf9e
|
/src/coghq/FactoryEntityCreatorAI.py
|
f46ac38d6fdd0fa9403d61345de5892119f286e3
|
[] |
no_license
|
toontown-restoration-project/toontown
|
c2ad0d552cb9d5d3232ae6941e28f00c11ca3aa8
|
9bef6d9f823b2c12a176b33518eaa51ddbe3fd2f
|
refs/heads/master
| 2022-12-23T19:46:16.697036
| 2020-10-02T20:17:09
| 2020-10-02T20:17:09
| 300,672,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,100
|
py
|
"""FactoryEntityCreatorAI module: contains the FactoryEntityCreatorAI class"""
from otp.level import EntityCreatorAI
from direct.showbase.PythonUtil import Functor
from . import DistributedBeanBarrelAI
from . import DistributedButtonAI
from . import DistributedCrateAI
from . import DistributedLiftAI
from . import DistributedDoorEntityAI
from . import DistributedGagBarrelAI
from . import DistributedGridAI
from toontown.suit import DistributedGridGoonAI
from toontown.suit import DistributedGoonAI
from . import DistributedHealBarrelAI
from . import DistributedStomperPairAI
from . import DistributedTriggerAI
from . import DistributedStomperAI
from . import DistributedLaserFieldAI
from . import DistributedSecurityCameraAI
from . import DistributedMoverAI
from . import DistributedElevatorMarkerAI
from . import DistributedSinkingPlatformAI
from . import ActiveCellAI
from . import CrusherCellAI
from . import DirectionalCellAI
from . import FactoryLevelMgrAI
from . import BattleBlockerAI
from . import DistributedGolfGreenGameAI
from toontown.coghq import DistributedMoleFieldAI
from toontown.coghq import DistributedMazeAI
class FactoryEntityCreatorAI(EntityCreatorAI.EntityCreatorAI):
def __init__(self, level):
EntityCreatorAI.EntityCreatorAI.__init__(self, level)
# create short aliases for EntityCreatorAI create funcs
cDE = EntityCreatorAI.createDistributedEntity
cLE = EntityCreatorAI.createLocalEntity
nothing = EntityCreatorAI.nothing
self.privRegisterTypes({
'activeCell' : Functor(cDE, ActiveCellAI.ActiveCellAI),
'crusherCell' : Functor(cDE, CrusherCellAI.CrusherCellAI),
'battleBlocker' : Functor(cDE, BattleBlockerAI.BattleBlockerAI),
'beanBarrel': Functor(cDE, DistributedBeanBarrelAI.DistributedBeanBarrelAI),
'button': DistributedButtonAI.DistributedButtonAI,
'conveyorBelt' : nothing,
'crate': Functor(cDE, DistributedCrateAI.DistributedCrateAI),
'directionalCell' : Functor(cDE, DirectionalCellAI.DirectionalCellAI),
'door': DistributedDoorEntityAI.DistributedDoorEntityAI,
'gagBarrel': Functor(cDE, DistributedGagBarrelAI.DistributedGagBarrelAI),
'gear': nothing,
'goon': Functor(cDE, DistributedGoonAI.DistributedGoonAI),
'gridGoon': Functor(cDE, DistributedGridGoonAI.DistributedGridGoonAI),
'golfGreenGame': Functor(cDE, DistributedGolfGreenGameAI.DistributedGolfGreenGameAI),
'goonClipPlane' : nothing,
'grid': Functor(cDE, DistributedGridAI.DistributedGridAI),
'healBarrel': Functor(cDE, DistributedHealBarrelAI.DistributedHealBarrelAI),
'levelMgr': Functor(cLE, FactoryLevelMgrAI.FactoryLevelMgrAI),
'lift': Functor(cDE, DistributedLiftAI.DistributedLiftAI),
'mintProduct': nothing,
'mintProductPallet': nothing,
'mintShelf': nothing,
'mover': Functor(cDE, DistributedMoverAI.DistributedMoverAI),
'paintMixer': nothing,
'pathMaster': nothing,
'rendering': nothing,
'platform': nothing,
'sinkingPlatform': Functor(cDE, DistributedSinkingPlatformAI.DistributedSinkingPlatformAI),
'stomper': Functor(cDE, DistributedStomperAI.DistributedStomperAI),
'stomperPair': Functor(cDE, DistributedStomperPairAI.DistributedStomperPairAI),
'laserField': Functor(cDE, DistributedLaserFieldAI.DistributedLaserFieldAI),
'securityCamera': Functor(cDE, DistributedSecurityCameraAI.DistributedSecurityCameraAI),
'elevatorMarker': Functor(cDE, DistributedElevatorMarkerAI.DistributedElevatorMarkerAI),
#'laserField': Functor(cDE, DistributedStomperAI.DistributedStomperAI),
'trigger': DistributedTriggerAI.DistributedTriggerAI,
'moleField': Functor(cDE, DistributedMoleFieldAI.DistributedMoleFieldAI),
'maze': Functor(cDE, DistributedMazeAI.DistributedMazeAI),
})
|
[
"brianlach72@gmail.com"
] |
brianlach72@gmail.com
|
50d9bcb586a1faed7b58e48723a78679a98837d8
|
279ed7207ac2c407487416b595e12f573049dd72
|
/pybvk/apps/bvkdos.py
|
13cba733f4b05f59814d552d8b8aa8f9f4c231a3
|
[] |
no_license
|
danse-inelastic/pybvk
|
30388455e211fec69130930f2925fe16abe455bd
|
922c8c0a8c50a9fabd619fa06e005cacc2d13a15
|
refs/heads/master
| 2016-09-15T22:21:13.131688
| 2014-06-25T17:12:34
| 2014-06-25T17:12:34
| 34,995,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,211
|
py
|
#!/usr/bin/env python
# given the python module to create "system", calculate dos
# the python module is optional. if it is not given, then "system" file must exist already.
import os
def run(systempy, system, df, N, Vecs):
# if neither systempy nor system is specified, it is assumed that we have a "system" file
if not systempy and not system:
system = 'system'
# create temporary work directory
import tempfile
workdir = tempfile.mkdtemp()
# create the system file in the temporary work directory
from bvk.applications.executionharness import createSystem, execute
system = createSystem(workdir, systempy=systempy, system=system)
#
# build the command to run
Vecs = int(Vecs)
cmds = [
'bvkrandomQs %s' % N,
'bvkdisps %s' % Vecs,
'bvkpartialdos %s %s' % (Vecs, df),
]
return execute(cmds, workdir=workdir, outputfiles=['DOS'])
from optparse import OptionParser
def main():
usage = "usage: %prog [options] [system]"
parser = OptionParser(usage)
parser.add_option(
"-N", "--N-kpts-1D", dest="N",
default = 10,
help="Number of k points in 1D for sampling reciprocal space",
)
parser.add_option(
"-d", "--df", dest="df",
default = 0.1,
help="frequency axis bin size(THz)",
)
parser.add_option(
"-E", "--compute-eigen-vectors",
default = False,
help='compute eigne vectors or not?',
dest="Vecs",
)
parser.add_option(
'-P', '--system-python-file',
default = '',
help = 'python file that generates the "system" file when executed. when this option is supplied, please do not specify the "system" file path as the argument',
dest = 'systempy',
)
(options, args) = parser.parse_args()
if len(args) > 1:
parser.error("incorrect number of arguments")
if len(args) == 1:
system = args[0]
else:
system = None
N = int(options.N)
df = float(options.df)
Vecs= bool(options.Vecs)
systempy = options.systempy
return run(systempy, system, df, N, Vecs)
if __name__ == "__main__":
main()
|
[
"linjiao@caltech.edu"
] |
linjiao@caltech.edu
|
e95450b4b2a062095da6f2a52983a8128ebe702a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02640/s043458506.py
|
aa5a66ce9487ea4e0b7b83b41044d3742b278eb9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
# Crane and Turtle
X, Y = [int(i) for i in input().split()]
for t in range(0, X + 1):
legs = 2 * (X + t)
if Y == legs:
a = 'Yes'
break
if Y < legs:
a = 'No'
break
else:
a = 'No'
print(a)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c770e3b327455e13849eeee61191a2598e34255f
|
e1a56ac7e85030de9ed440db0d276612fc8ad02e
|
/wsperf.py
|
ac4c5131cd39821c4f0630ba1f46a55189edb2fd
|
[] |
no_license
|
hoangtrucit/wsperf
|
cfeb9ee794475ecffcf96e9b1929ca69ed2a8942
|
3d9dd986b1fb7dd0af38540191cc9ea73f119770
|
refs/heads/master
| 2021-10-20T19:30:06.236857
| 2019-03-01T13:52:34
| 2019-03-01T13:52:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,838
|
py
|
import os, sys, argparse
from twisted.internet import reactor
from twisted.internet.utils import getProcessOutput, getProcessValue
from twisted.internet.defer import DeferredList
import analyze
if __name__ == '__main__':
default_wsperf = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'wsperf')
parser = argparse.ArgumentParser(description = 'wsperf test driver')
parser.add_argument('--wsuri', dest = 'wsuri', type = str, default = 'ws://127.0.0.1:9000', help = 'The WebSocket URI the testee is listening on, e.g. ws://127.0.0.1:9000.')
parser.add_argument('--workers', dest = 'workers', type = int, default = 4, help = 'Number of wsperf worker processes to spawn.')
parser.add_argument('--threads', dest = 'threads', type = int, default = 0, help = 'Number of wsperf worker threads to spawn at each worker [0: run on main thread, >0: spawn that many background worker threads].')
parser.add_argument('--conns', dest = 'conns', type = int, default = 50000, help = 'Number of WebSocket connections to open from each worker.')
parser.add_argument('--lowmark', dest = 'lowmark', type = int, default = 250, help = 'Low watermark for each worker.')
parser.add_argument('--highmark', dest = 'highmark', type = int, default = 500, help = 'High watermark for each worker.')
parser.add_argument('--resultfile', dest = 'resultfile', type = str, default = r'result_%d.json', help = 'Result file pattern.')
parser.add_argument('--wsperf', dest = 'wsperf', type = str, default = default_wsperf, help = 'Full path to wsperf executable.')
parser.add_argument('--skiprun', dest = 'skiprun', action = "store_true", default = False, help = 'Skip test run.')
parser.add_argument('--skipanalyze', dest = 'skipanalyze', action = "store_true", default = False, help = 'Skip analyze results.')
options = parser.parse_args()
resultfiles = [(options.resultfile % i) for i in xrange(options.workers)]
if options.skiprun:
## here we don't start a reactor.
if not options.skipanalyze:
analyze.printResults(resultfiles)
else:
df = []
for i in range(options.workers):
args = [options.wsuri,
str(options.threads),
str(options.conns),
str(options.lowmark),
str(options.highmark),
options.resultfile % i]
## run wsperf executable
d = getProcessOutput(options.wsperf, args, os.environ)
## accumulate any output
df.append(d)
d = DeferredList(df, consumeErrors = True)
def onok(res):
if not options.skipanalyze:
analyze.printResults(resultfiles)
reactor.stop()
def onerr(err):
print err
reactor.stop()
d.addCallbacks(onok, onerr)
reactor.run()
|
[
"tobias.oberstein@tavendo.de"
] |
tobias.oberstein@tavendo.de
|
195ac95f63e61157f163bece66445bf2cac32366
|
e58ecbf6af1cafbff42e2cc33abcbbf6e4ee7475
|
/tests/accounting/test_call_fee_scalar.py
|
a6ed4f4b831b346ef58636e8757486598b762f01
|
[
"MIT"
] |
permissive
|
celeduc/ethereum-alarm-clock
|
1edbbe207e0f9a7ea34a792728a2b6dceda455dd
|
fd202f5e96b753e6ce6bcee9a67363c468c10c7b
|
refs/heads/master
| 2020-02-26T17:23:54.054416
| 2015-11-09T06:11:28
| 2015-11-09T06:11:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
test_values = (
(20, (
(4, 145),
(8, 138),
(12, 129),
(16, 117),
(20, 100),
(24, 83),
(28, 71),
(32, 62),
(36, 55),
)),
(500, (
(50, 148),
(125, 143),
(275, 132),
(400, 117),
(475, 105),
(500, 100),
(525, 95),
(600, 83),
(700, 71),
(900, 55),
(1200, 41),
)),
)
deploy_contracts = [
"CallLib",
]
def test_call_fee_scalar_values(CallLib):
for base_gas_price, values in test_values:
actual_values = [
(CallLib.getCallFeeScalar(base_gas_price, gas_price), expected)
for gas_price, expected in values
]
assert all(actual == expected for actual, expected in actual_values)
|
[
"pipermerriam@gmail.com"
] |
pipermerriam@gmail.com
|
090878f19ffe408b52f9598216f4a2f609c8d58e
|
e9685369da45e5c502ce5540891e6018eadba252
|
/backend/server/apps/tasks/api/serializers.py
|
8f557e1bb12bfa5ff5230105c2b8d8284b099ec9
|
[
"MIT"
] |
permissive
|
Turi-fly/simple-tasks
|
9703a2dd405081b129222cf6a325a5b591709d8c
|
ae759a8100f6604b6d8fc00f19cf3aedbd945f3d
|
refs/heads/master
| 2022-04-10T15:26:01.590888
| 2018-11-14T08:45:46
| 2018-11-14T08:45:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
from rest_framework import serializers
import tasks.models as models
import cerberus
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = models.Task
read_only_fields = ('id', 'state', 'result', 'task_id',)
fields = ('id', 'state', 'params', 'result', 'task_id')
def validate_params(self, params):
if params is None or params == '':
raise serializers.ValidationError("Params cannot be empty")
schema = {'arg1': {'type': 'integer', 'required': True},
'arg2': {'type': 'integer', 'required': True}}
validator = cerberus.Validator(schema)
if not validator.validate(params):
raise serializers.ValidationError(validator.errors)
return params
|
[
"pplonski86@gmail.com"
] |
pplonski86@gmail.com
|
510b351cc1af18f3ed0180c70ef1242ca5bac1d8
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2171/48117/297146.py
|
bda92c1ed48fa43c5286605774a8b0ab0e50019c
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,525
|
py
|
class Node():
def __init__(self, item):
self.item = item
self.next = None
class LinkList():
def __init__(self, node = None):
self.head = node
def isEmpty(self):
return self.head == None
def append(self, newItem):
newNode = Node(newItem)
if self.isEmpty():
self.head = newNode
newNode.next = self.head
else:
nowNode = self.head
while nowNode.next != self.head:
nowNode = nowNode.next
nowNode.next = newNode
newNode.next = self.head
def add(self, newItem):
newNode = Node(newItem)
if self.isEmpty():
self.head = newNode
else:
nowNode = self.head
while nowNode.next != None:
nowNode = nowNode.next
nowNode.next = newNode
questNum = int(input())
for quest in range(questNum):
n = int(input())
s = input().split(' ')
for i in range(n):
s[i] = int(s[i])
p = LinkList()
for i in range(n):
p.add(s[i])
p1 = p.head
odd = LinkList()
ou = LinkList()
while p1.next != None:
if p1.item % 2 == 0:
ou.add(p1.item)
else:
odd.add(p1.item)
p1 = p1.next
ou1 = ou.head
odd1 = odd.head
while ou1.next != None:
print(ou1.item, end=' ')
ou1 = ou1.next
while odd1.next != None:
print(odd1.item, end = ' ')
odd1 = odd1.next
print()
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
45e87ed9a82e88d8e774f45921ed3227fd68165e
|
4dbd12da17cc45a5482afc8cea02051e798731a9
|
/courses_project/apps/courses/urls.py
|
ab576aa8a6f94c45e5f11e2186a1af9f96e0ddaa
|
[] |
no_license
|
tsicroxe/django_projects
|
71b9bec6d834f53fde892606799b4bc96ba45a91
|
c11036c78d120e5ffa51055e2999dbe05b0d36eb
|
refs/heads/master
| 2021-01-11T07:03:53.045558
| 2016-12-07T20:46:05
| 2016-12-07T20:46:05
| 71,937,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
from django.conf.urls import url
from . import views
from views import index, create, destroy
#from django.contrib import admin
urlpatterns = [
#url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^create$', views.create, name='create'),
url(r'^(?P<id>\d+)/destroy$', views.destroy, name='destroy'),
]
|
[
"arbanakus@gmail.com"
] |
arbanakus@gmail.com
|
21e8e5573f1c6037a1404e7518ad11fd5494c097
|
b2319c5e14c94edfb5a39e4c490c1ae6183651ed
|
/deepgoweb/apps/deepgo/migrations/0013_auto_20190902_0904.py
|
edb4c8e1436fbb064813d7d04f2b93874adbe234
|
[] |
no_license
|
coolmaksat/deepgoweb
|
6d67f45059d7bdb4548d50c182a038c6f9c70a31
|
fd4904b6b18dd2af06e000679f406b7353a3534f
|
refs/heads/master
| 2021-06-12T14:42:14.513686
| 2021-04-17T10:23:39
| 2021-04-17T10:23:39
| 161,017,035
| 0
| 0
| null | 2018-12-09T07:49:26
| 2018-12-09T07:49:26
| null |
UTF-8
|
Python
| false
| false
| 2,974
|
py
|
# Generated by Django 2.2.4 on 2019-09-02 09:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('deepgo', '0012_auto_20190505_0848'),
]
operations = [
migrations.CreateModel(
name='Taxonomy',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=127)),
],
),
migrations.RemoveField(
model_name='protein',
name='ppi_embedding',
),
migrations.RemoveField(
model_name='protein',
name='sequence',
),
migrations.RemoveField(
model_name='protein',
name='sequence_md5',
),
migrations.RemoveField(
model_name='protein',
name='uni_accession',
),
migrations.RemoveField(
model_name='protein',
name='uni_entry_id',
),
migrations.AddField(
model_name='protein',
name='acc_id',
field=models.CharField(default='PROTEIN', max_length=15, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name='protein',
name='gene',
field=models.CharField(blank=True, max_length=31, null=True),
),
migrations.AddField(
model_name='protein',
name='name',
field=models.CharField(default='name', max_length=127),
preserve_default=False,
),
migrations.AddField(
model_name='protein',
name='pro_id',
field=models.CharField(db_index=True, default='PROTEIN', max_length=31),
preserve_default=False,
),
migrations.AddField(
model_name='protein',
name='reviewed',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='protein',
name='id',
field=models.PositiveIntegerField(primary_key=True, serialize=False),
),
migrations.CreateModel(
name='Annotation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('go_id', models.PositiveIntegerField(db_index=True)),
('score', models.PositiveIntegerField()),
('protein', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='annotations', to='deepgo.Protein')),
],
),
migrations.AddField(
model_name='protein',
name='taxon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='proteins', to='deepgo.Taxonomy'),
),
]
|
[
"coolmaksat@gmail.com"
] |
coolmaksat@gmail.com
|
b1e7bc2ea6a672534d6f1fe70f55d35439a84b1f
|
cd40b7cc395f36740000ed4a4144b1c0666ab0fd
|
/tests/test_hstrat/test_stratum_retention_strategy/test_stratum_retention_algorithms/test_recency_proportional_resolution_algo/test_IterRetainedRanks.py
|
e25d30f8fbb8105935530e3c749ac1f26bb0365f
|
[
"MIT"
] |
permissive
|
mmore500/hstrat
|
94fd22c86a87a5707590b9398ef679444ed82d6d
|
b2d2caded1db5e2dc681d9f171d7c74b322c55c3
|
refs/heads/master
| 2023-08-31T03:36:44.457576
| 2023-08-25T14:39:29
| 2023-08-25T14:39:29
| 464,531,144
| 5
| 2
|
NOASSERTION
| 2023-08-25T13:07:52
| 2022-02-28T15:11:45
|
Python
|
UTF-8
|
Python
| false
| false
| 7,230
|
py
|
import itertools as it
import numbers
from iterpop import iterpop as ip
import numpy as np
import pytest
from hstrat._auxiliary_lib import all_same, pairwise
from hstrat.hstrat import recency_proportional_resolution_algo
@pytest.mark.parametrize(
"recency_proportional_resolution",
[
0,
1,
2,
3,
7,
],
)
@pytest.mark.parametrize(
"time_sequence",
[
range(10**3),
(i for i in range(10**2) for __ in range(2)),
np.random.default_rng(1).integers(
low=0,
high=2**32,
size=10,
),
(2**32,),
],
)
def test_impl_consistency(recency_proportional_resolution, time_sequence):
policy = recency_proportional_resolution_algo.Policy(
recency_proportional_resolution
)
spec = policy.GetSpec()
impls = [
*recency_proportional_resolution_algo._scry._IterRetainedRanks_.impls
]
instances = [impl(spec) for impl in impls] + [
lambda __, num_strata_deposited: policy.IterRetainedRanks(
num_strata_deposited
)
]
for num_strata_deposited in time_sequence:
assert all_same(
it.chain(
(
list(
impl(spec)(
policy,
num_strata_deposited,
)
)
for impl in impls
),
(
list(
instance(
policy,
num_strata_deposited,
)
)
for instance in instances
),
)
)
@pytest.mark.parametrize(
"impl",
recency_proportional_resolution_algo._scry._IterRetainedRanks_.impls,
)
@pytest.mark.parametrize(
"recency_proportional_resolution",
[
0,
1,
2,
3,
7,
42,
97,
100,
],
)
@pytest.mark.parametrize(
"time_sequence",
[
range(10**3),
(i for i in range(10**2) for __ in range(2)),
np.random.default_rng(1).integers(
low=0,
high=2**32,
size=10,
),
(2**32,),
],
)
def test_only_dwindling_over_time(
impl, recency_proportional_resolution, time_sequence
):
policy = recency_proportional_resolution_algo.Policy(
recency_proportional_resolution
)
spec = policy.GetSpec()
instance = impl(spec)
for num_strata_deposited in time_sequence:
for which in (instance, impl(spec)):
cur_set = {
*which(
policy,
num_strata_deposited,
)
}
next_set = {
*which(
policy,
num_strata_deposited + 1,
)
}
assert cur_set.issuperset(next_set - {num_strata_deposited})
@pytest.mark.parametrize(
"impl",
recency_proportional_resolution_algo._scry._IterRetainedRanks_.impls,
)
@pytest.mark.parametrize(
"recency_proportional_resolution",
[
0,
1,
2,
3,
7,
42,
97,
100,
],
)
@pytest.mark.parametrize(
"time_sequence",
[
range(10**3),
(i for i in range(10**2) for __ in range(2)),
np.random.default_rng(1).integers(
low=0,
high=2**32,
size=10,
),
(2**32,),
],
)
def test_ranks_sorted_and_unique(
impl, recency_proportional_resolution, time_sequence
):
policy = recency_proportional_resolution_algo.Policy(
recency_proportional_resolution
)
spec = policy.GetSpec()
instance = impl(spec)
for num_strata_deposited in time_sequence:
for which in (instance, impl(spec)):
assert all(
i < j
for i, j in pairwise(
which(
policy,
num_strata_deposited,
)
)
)
@pytest.mark.parametrize(
"impl",
recency_proportional_resolution_algo._scry._IterRetainedRanks_.impls,
)
@pytest.mark.parametrize(
"recency_proportional_resolution",
[
0,
1,
2,
3,
7,
42,
97,
100,
],
)
@pytest.mark.parametrize(
"time_sequence",
[
range(10**3),
(i for i in range(10**2) for __ in range(2)),
np.random.default_rng(1).integers(
low=0,
high=2**32,
size=10,
),
(2**32,),
],
)
def test_zero_and_last_ranks_retained(
impl, recency_proportional_resolution, time_sequence
):
policy = recency_proportional_resolution_algo.Policy(
recency_proportional_resolution
)
spec = policy.GetSpec()
instance = impl(spec)
for num_strata_deposited in time_sequence:
for which in instance, impl(spec):
res = which(
policy,
num_strata_deposited,
)
if num_strata_deposited > 1:
first, *middle, last = res
assert first == 0
assert last == num_strata_deposited - 1
elif num_strata_deposited == 1:
assert ip.popsingleton(res) == 0
else:
assert next(res, None) is None
@pytest.mark.parametrize(
"impl",
recency_proportional_resolution_algo._scry._IterRetainedRanks_.impls,
)
@pytest.mark.parametrize(
"recency_proportional_resolution",
[
0,
1,
2,
3,
7,
42,
97,
100,
],
)
@pytest.mark.parametrize(
"time_sequence",
[
range(10**3),
(i for i in range(10**2) for __ in range(2)),
np.random.default_rng(1).integers(
low=0,
high=2**32,
size=10,
),
(2**32,),
],
)
def test_ranks_valid(impl, recency_proportional_resolution, time_sequence):
policy = recency_proportional_resolution_algo.Policy(
recency_proportional_resolution
)
spec = policy.GetSpec()
instance = impl(spec)
for num_strata_deposited in time_sequence:
for which in (instance, impl(spec)):
assert all(
isinstance(r, numbers.Integral)
and 0 <= r < num_strata_deposited
for r in which(policy, num_strata_deposited)
)
@pytest.mark.parametrize(
"impl",
recency_proportional_resolution_algo._scry._IterRetainedRanks_.impls,
)
@pytest.mark.parametrize(
"recency_proportional_resolution",
[
0,
1,
2,
3,
7,
42,
97,
100,
],
)
def test_eq(impl, recency_proportional_resolution):
policy = recency_proportional_resolution_algo.Policy(
recency_proportional_resolution
)
spec = policy.GetSpec()
instance = impl(spec)
assert instance == instance
assert instance == impl(spec)
assert instance is not None
|
[
"mmore500.login+gpg@gmail.com"
] |
mmore500.login+gpg@gmail.com
|
abfe6cbaac9ddeffce0019053b066e6517c9ec1f
|
4bf3aaf77c309a489100b98a8c03532632df152c
|
/Python/BOJ/13460.py
|
b39eb5f4d88753e6b925be54efe84dd74b2b14ff
|
[] |
no_license
|
murane/PS
|
7fbfc54d962231949efc67f1a35c4b0119de0780
|
e938c6c503aeac08bf65e1e66709172b0e5da6ef
|
refs/heads/master
| 2023-05-06T22:51:54.105811
| 2021-05-30T03:34:53
| 2021-05-30T03:34:53
| 293,699,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,785
|
py
|
import sys
from collections import deque
r=sys.stdin.readline
N,M=map(int,r().split())
board=[]
D=[(1,0),(-1,0),(0,1),(0,-1)]
for _ in range(N):
board.append(list(r().strip()))
for i in range(N):
for j in range(M):
if board[i][j]=="R":
R=[i,j]
board[i][j]="."
elif board[i][j]=="B":
B=[i,j]
board[i][j]="."
def move(x,y,d):
dist=0
while True:
nextPos=board[x+d[0]][y+d[1]]
if nextPos=='.':
x,y=x+d[0],y+d[1]
elif nextPos=='O':
return True,0,[-1,-1]
elif nextPos=='#':
return False,dist,[x,y]
dist+=1
def bfs():
q=deque()
q.append([R,B,0])
visit=set()
visit.add((tuple(R),tuple(B)))
while q:
red,blue,cnt=q.popleft()
tmpRed,tmpBlue=red,blue
#if cnt==10: return -1
for i in range(4): #4방향
flgR,distR,red=move(tmpRed[0],tmpRed[1],D[i])#일단 움직이고보자
flgB,distB,blue=move(tmpBlue[0],tmpBlue[1],D[i])
if flgR and not flgB:
return cnt+1#빨간색은 들어가고 파란색은 아니면 성공
elif flgB: continue #파란색이 들어가면 실패
elif not flgR and not flgB: #일단 둘다 구멍에 안들어가고
if red==blue: #겹치는 경우
if distR>distB:
red=red[0]-D[i][0],red[1]-D[i][1]
else:
blue=blue[0]-D[i][0],blue[1]-D[i][1]
if (tuple(red),tuple(blue)) not in visit:
q.append([red,blue,cnt+1]) #다시 큐로
visit.add((tuple(red),tuple(blue)))
return -1
print(bfs())
|
[
"murane@naver.com"
] |
murane@naver.com
|
c8b3a20fa81bc2a10ac839ee93aa3622a97f9a82
|
de070f933453e2d15651af1ccc697acf25507bd7
|
/deid/version.py
|
785a6ee84d0483f6912ea07c5584e25f6da00280
|
[
"MIT"
] |
permissive
|
liu3xing3long/deid
|
cd968b1b5d8e678ad2c41f2b9f1c4572f5f88013
|
491a8ea301d9d47cd4e62eaab31584c26afcc534
|
refs/heads/master
| 2021-05-14T11:33:12.193255
| 2017-12-22T21:28:32
| 2017-12-22T21:28:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,871
|
py
|
'''
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
__version__ = "0.1.1"
AUTHOR = 'Vanessa Sochat'
AUTHOR_EMAIL = 'vsochat@stanford.edu'
NAME = 'deid'
PACKAGE_URL = "https://github.com/pydicom/deid"
KEYWORDS = 'open source, stanford, python, deidentify, dicom'
DESCRIPTION = "deidentify dicom and other images with python and pydicom"
LICENSE = "LICENSE"
INSTALL_REQUIRES = (
('matplotlib', {'min_version': None}),
('requests', {'min_version': '2.12.4'}),
('retrying', {'min_version': '1.3.3'}),
('simplejson', {'min_version': '3.10.0'}),
('six', {'min_version': '1.10'}),
('pygments', {'min_version': '2.1.3'}),
('python-dateutil',{'min_version': None }),
('urllib3',{'min_version': "1.15" }),
('validator.py',{'min_version': None })
)
DEPENDENCY_LINKS = ['https://github.com/pydicom/pydicom/tarball/master']
|
[
"vsochat@stanford.edu"
] |
vsochat@stanford.edu
|
52b1e429db9ff264272850ea168eeb1c2de376d2
|
a3e926f8547f04184c79bdd28b0f886a77778700
|
/Lib/fontbakery/reporters/ghmarkdown.py
|
b7376a00473362e6d22d640af646c8bc5277277e
|
[
"Apache-2.0"
] |
permissive
|
m4rc1e/fontbakery
|
0150a17547b53d6dc79e81407b0374950f90cd16
|
da4c4b69abdd41314f9bdb58d9e47722e0680816
|
refs/heads/master
| 2023-08-02T14:18:00.077821
| 2018-10-17T01:47:51
| 2018-10-17T03:53:06
| 67,598,331
| 0
| 0
|
Apache-2.0
| 2018-10-18T09:34:10
| 2016-09-07T10:52:14
|
C
|
UTF-8
|
Python
| false
| false
| 3,976
|
py
|
import os
from fontbakery.reporters.serialize import SerializeReporter
from fontbakery.checkrunner import Status
LOGLEVELS=["ERROR","FAIL","WARN","SKIP","INFO","PASS"]
class GHMarkdownReporter(SerializeReporter):
def __init__(self, loglevels, **kwd):
super(GHMarkdownReporter, self).__init__(**kwd)
self.loglevels = loglevels
def emoticon(self, name):
return {
'ERROR': ':broken_heart:',
'FAIL': ':fire:',
'WARN': ':warning:',
'INFO': ':information_source:',
'SKIP': ':zzz:',
'PASS': ':bread:',
}[name]
def html5_collapsible(self, summary, details):
return ("<details>\n"
"<summary>{}</summary>\n"
"{}\n"
"</details>\n").format(summary, details)
def log_md(self, log):
if not self.omit_loglevel(log["status"]):
return "* {} **{}** {}\n".format(self.emoticon(log["status"]),
log["status"],
log["message"])
else:
return ""
def check_md(self, check):
checkid = check["key"][1].split(":")[1].split(">")[0]
check["logs"].sort(key=lambda c: c["status"])
logs = "".join(map(self.log_md, check["logs"]))
github_search_url = ("[{}](https://github.com/googlefonts/fontbakery/"
"search?q={})").format(checkid, checkid)
return self.html5_collapsible("{} <b>{}:</b> {}".format(self.emoticon(check["result"]),
check["result"],
check["description"]),
f"\n* {github_search_url}\n{logs}")
def omit_loglevel(self, msg):
return self.loglevels and (self.loglevels[0] > Status(msg))
def get_markdown(self):
checks = {}
family_checks = []
data = self.getdoc()
num_checks = 0
for section in data["sections"]:
for cluster in section["checks"]:
if not isinstance(cluster, list):
cluster = [cluster]
num_checks += len(cluster)
for check in cluster:
if self.omit_loglevel(check["result"]):
continue
if "filename" not in check.keys():
# That's a family check!
family_checks.append(check)
else:
key = os.path.basename(check["filename"])
if key not in checks:
checks[key] = []
checks[key].append(check)
md = "## Fontbakery report\n\n"
if family_checks:
family_checks.sort(key=lambda c: c["result"])
md += self.html5_collapsible("<b>[{}] Family checks</b>".format(len(family_checks)),
"".join(map(self.check_md, family_checks)) + "<br>")
for filename in checks.keys():
checks[filename].sort(key=lambda c: LOGLEVELS.index(c["result"]))
md += self.html5_collapsible("<b>[{}] {}</b>".format(len(checks[filename]),
filename),
"".join(map(self.check_md, checks[filename])) + "<br>")
if num_checks != 0:
summary_table = "### Summary\n\n" + \
("| {} " + " | {} ".join(LOGLEVELS) + " |\n").format(*[self.emoticon(k) for k in LOGLEVELS]) + \
("|:-----:|:----:|:----:|:----:|:----:|:----:|\n"
"| {} | {} | {} | {} | {} | {} |\n"
"").format(*[data["result"][k] for k in LOGLEVELS]) +\
("| {:.0f}% | {:.0f}% | {:.0f}% | {:.0f}% | {:.0f}% | {:.0f}% |\n"
"").format(*[100*data["result"][k]/num_checks for k in LOGLEVELS])
md += "\n" + summary_table
omitted = [l for l in LOGLEVELS if self.omit_loglevel(l)]
if omitted:
md += "\n" + \
"**Note:** The following loglevels were omitted in this report:\n" + \
"".join(map("* **{}**\n".format, omitted))
return md
|
[
"fsanches@metamaquina.com.br"
] |
fsanches@metamaquina.com.br
|
0d9c8f3dbbc299c369c4ac837ee49b743180106e
|
084db5e25626908a5352339900f12f0000a25a4a
|
/crediteuropebank/items.py
|
9770e32bfb15c3f15bbc7ea5982eda1f5486b696
|
[] |
no_license
|
hristo-grudev/crediteuropebank
|
f60a4c444b9aca06b2e44b699c2ce84703a3382d
|
82646cef961dfb318f33ef6a9dd44801a945494a
|
refs/heads/main
| 2023-03-10T08:29:04.156974
| 2021-02-25T09:30:57
| 2021-02-25T09:30:57
| 342,191,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
import scrapy
class CrediteuropebankItem(scrapy.Item):
title = scrapy.Field()
description = scrapy.Field()
date = scrapy.Field()
|
[
"hr.grudev@gmail.com"
] |
hr.grudev@gmail.com
|
1b8fb8d7b10372b608afaa5f628de8f096425737
|
f9c2f77fea6ffdf820867f02805c7a037627f235
|
/PythonBasics/03_Volleyball.py
|
0f1cb9aab9fd10836c1d9eb2eb0e9fc07e0f77e6
|
[] |
no_license
|
Nikolov-A/SoftUni
|
6f253694757f195a5c0df8f24b12dbb4ad4d76c6
|
351b0b970da84e5d930a235fce76853c4dcaa365
|
refs/heads/master
| 2022-01-12T13:57:11.842394
| 2019-07-07T10:53:48
| 2019-07-07T10:53:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from math import floor
year = input()
holiday = int(input())
weekend = int(input())
games_in_sofia = (48 - weekend) * (3 / 4)
games_in_home = weekend
games_in_holiday_sofia = holiday * (2 / 3)
total_games = games_in_sofia + games_in_home + games_in_holiday_sofia
if year == "leap":
additional_games = 0.15 * total_games
total_games = additional_games + total_games
print(f"{floor(total_games)}")
else:
print(f"{floor(total_games)}")
|
[
"alexander.nikolov092@gmail.com"
] |
alexander.nikolov092@gmail.com
|
654bde5deddbb976c2e3fe5e7a9a4b33bd606463
|
e780a5bd72f98ca2513c993d64a85b08578166a6
|
/buildout-cache/eggs/Zope2-2.13.26-py2.7.egg/App/Permission.py
|
26fc6c96cef75bd35a47508c6bf2a627db0822a3
|
[] |
no_license
|
vedantc98/Plone-test
|
023246597ffe848e2a49b9f65742ff49127b190b
|
9fd520fc78481e2c0b9b7ec427821e7f961c777e
|
refs/heads/master
| 2021-03-30T22:14:33.368739
| 2018-03-11T19:22:58
| 2018-03-11T19:22:58
| 124,671,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,468
|
py
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
'''Zope registerable permissions
'''
from AccessControl.class_init import InitializeClass
from AccessControl.SecurityInfo import ClassSecurityInfo
from Acquisition import Implicit
from OFS.role import RoleManager
from OFS.SimpleItem import Item
from Persistence import Persistent
class Permission(RoleManager,
Persistent,
Implicit,
Item
):
"""Model Permission meta-data
"""
meta_type = 'Zope Permission'
icon = 'p_/Permission_icon'
index_html = None
security = ClassSecurityInfo()
manage_options=(
RoleManager.manage_options
+ Item.manage_options
)
def __init__(self, id, title, name):
self.id=id
self.title=title
self.name=name
InitializeClass(Permission)
|
[
"vedantc98@gmail.com"
] |
vedantc98@gmail.com
|
4a9cd2050ce1ad1ddda5ed230b8ca4bad878934d
|
9183379a07d1d8936d8205d99ecd0e40269e667a
|
/sphinx/source/exercises/solution/05_encapsulation/printer.py
|
414590fa8dc069be2a003ab1ed68e1baaddb3428
|
[] |
no_license
|
boegeskov/fall2020
|
477983eb97568e274d3cef9ee22706de172b6046
|
9e50030e3fa99cc5ddb95ff46f93c1a530d256b1
|
refs/heads/master
| 2023-01-23T18:30:19.893424
| 2020-12-09T07:16:20
| 2020-12-09T07:16:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,540
|
py
|
# printer.py (solution)
"""
3. Machine -> printer
Create a Machine class that takes care of powering on and off a the machine.
Create a printer class that is a subclass of the Machine super class.
The printer should be able to print to console.
The printer should have a papertray, which should be in its own class. The papertray class should keep track of the paper, it should have the abillity to use paper and and load new paper in the tray if empty.
"""
class Machine:
""" takes care of turning on and off """
def __init__(self):
self.__is_on = False
@property
def is_on(self):
return self.__is_on
def power(self):
self.__is_on = not self.__is_on
class Printer(Machine):
def __init__(self):
# 1.
super().__init__()
# 2.
# Machine.__init__(self)
self.__pt = Papertray()
def print(self, text):
if self.__pt.paper == 0:
print('Papertray is empty')
else:
if self.is_on:
print(text)
self.__pt.paper = self.__pt.paper - 1
else:
print('Printer is off')
@property
def load(self):
return self.__pt.paper
load.setter
def load(self, no):
self.__pt.paper = no
class Papertray:
def __init__(self):
self.paper = 2
@property
def paper(self):
return self.__paper
@paper.setter
def paper(self, paper):
self.__paper = paper
|
[
"clbo@kea.dk"
] |
clbo@kea.dk
|
52080a362e4c3ceb2822f229da8005edd6ef036e
|
4a5f11b55e23999a82b62f5c72b44e9a36d24f63
|
/simplemooc/forum/admin.py
|
7c813d107c771cc9ce0f430c826d0736f3a53f31
|
[] |
no_license
|
diogo-alves/simplemooc
|
dca62bfcb2ea6357a551a5760778537f083b675c
|
cfec59f99888e4e23d41f020ff06bfdf39f70203
|
refs/heads/master
| 2022-05-10T10:32:18.686313
| 2019-06-04T19:30:43
| 2019-06-04T19:30:43
| 190,260,470
| 0
| 0
| null | 2022-04-22T21:34:44
| 2019-06-04T18:46:43
|
Python
|
UTF-8
|
Python
| false
| false
| 585
|
py
|
from django.contrib import admin
from .models import Thread, Reply
class ThreadAdmin(admin.ModelAdmin):
list_display = ['title', 'body', 'author', 'updated_at']
search_fields = ['title', 'body', 'author__username']
prepopulated_fields = {'slug': ('title',)}
class ReplyAdmin(admin.ModelAdmin):
list_display = ['thread', 'reply', 'author', 'correct', 'updated_at']
search_fields = ['thread', 'reply', 'author__username']
list_filter = ['thread__title', 'author__username']
admin.site.register(Thread, ThreadAdmin)
admin.site.register(Reply, ReplyAdmin)
|
[
"diogo.alves.ti@gmail.com"
] |
diogo.alves.ti@gmail.com
|
9a8e5ff5ac645a3cc48a2db51ef611314f4736f6
|
20a358db6e9e9872453a7fb36ef21268054b241d
|
/pyml/ditech/database/insert_traffic.py
|
95f8193ac0e10728700c619c82578331c5c5dc3e
|
[] |
no_license
|
fengkaicnic/pyml
|
ee654cdef2ba107e1c1e8d598691af3accb96b3c
|
a19865cdb9eb69517258416a2b08b86f9d43a023
|
refs/heads/master
| 2021-01-21T04:40:44.659607
| 2016-07-29T08:33:07
| 2016-07-29T08:33:07
| 44,159,061
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,001
|
py
|
import utils
import traceback
import os
import time
import pdb
start = time.time()
try:
path = 'D:/ditech/citydata/season_2/test_set_2/traffic_data'
conn = utils.persist.connection()
cur = conn.cursor()
num = 0
for pl in os.listdir(path):
if not '.' in pl:
with open(path + '/' + pl) as file:
lines = file.readlines()
for line in lines:
lst = line.split('\t')
lst = map(lambda x:x.strip(), lst)
for tline in lst[1:-1]:
sql = 'insert into traffic_test2(district_hash, tj_level, tj_time) \
values("%s", "%s", "%s")' % (lst[0], tline, lst[-1])
cur.execute(sql)
conn.commit()
conn.close()
except:
traceback.print_exc()
print sql
conn.commit()
conn.close()
end = time.time()
print end - start
|
[
"fkdhy@163.com"
] |
fkdhy@163.com
|
13304ad34c9181779d72a2811439ff96eabc20cf
|
f8201014d20832d4cc217b473500501cf16df8ba
|
/virtool/genbank.py
|
7035b74b89e201906c6cfa858afebbf05f253176
|
[
"MIT"
] |
permissive
|
gitter-badger/virtool
|
abc996ef8dc160f1fe879a55d6eec4e9043c9840
|
628acc377fb0497c2bfe75e9fa0a61decc59e0e6
|
refs/heads/master
| 2020-04-23T04:47:02.186926
| 2019-02-15T03:01:12
| 2019-02-15T03:01:12
| 170,919,108
| 0
| 0
| null | 2019-02-15T19:42:26
| 2019-02-15T19:42:25
| null |
UTF-8
|
Python
| false
| false
| 1,933
|
py
|
import logging
import string
import virtool.http.proxy
logger = logging.getLogger(__name__)
EMAIL = "dev@virtool.ca"
TOOL = "virtool"
FETCH_URL = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi"
async def fetch(settings, session, accession):
"""
Fetch the Genbank record for the passed `accession`.
:param settings: the application settings object
:type settings: :class:`virtool.app_settings.Settings`
:param session: an aiohttp client session
:type session: :class:`aiohttp.ClientSession`
:param accession: the accession to fetch
:type accession: Union[int,str]
:return: parsed Genbank data
:rtype: dict
"""
params = {
"db": "nuccore",
"email": EMAIL,
"id": accession,
"retmode": "text",
"rettype": "gb",
"tool": TOOL
}
async with virtool.http.proxy.ProxyRequest(settings, session.get, FETCH_URL, params=params) as resp:
body = await resp.text()
if resp.status != 200:
if "Failed to retrieve sequence" not in body:
logger.warning("Unexpected Genbank error: {}".format(body))
return None
data = {
"host": ""
}
for line in body.split("\n"):
if line.startswith("VERSION"):
data["accession"] = line.replace("VERSION", "").lstrip(" ")
if line.startswith("DEFINITION"):
data["definition"] = line.replace("DEFINITION", "").lstrip(" ")
if "/host=" in line:
data["host"] = line.lstrip(" ").replace("/host=", "").replace('"', "")
# Extract sequence
sequence_field = body.split("ORIGIN")[1].lower()
for char in [" ", "/", "\n"] + list(string.digits):
sequence_field = sequence_field.replace(char, "")
data["sequence"] = sequence_field.upper()
return data
|
[
"igboyes@gmail.com"
] |
igboyes@gmail.com
|
d631c815c2c1ba0870f891182e8369ce24c3be49
|
278060c3e3fce8c2d78640ac748188e80758deac
|
/tax_app/migrations/0002_auto_20191020_1607.py
|
d78e86314c315ed836c08685fd62b3ca35a1e8d3
|
[] |
no_license
|
ajisaq/BusinessTaxApp
|
33507bb64cfabc4a84a56826db3ae90d55539359
|
08031f03a7018c59b2e9b0095e80a5ff0b7b0b70
|
refs/heads/master
| 2022-05-03T17:29:47.635710
| 2019-12-02T09:25:14
| 2019-12-02T09:25:14
| 219,758,403
| 1
| 3
| null | 2022-04-22T22:50:39
| 2019-11-05T13:59:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-10-20 15:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tax_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Business_Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=15)),
('name', models.CharField(max_length=150)),
],
),
migrations.AddField(
model_name='profile',
name='contact',
field=models.CharField(default='9340-505', max_length=150),
preserve_default=False,
),
]
|
[
"mohammedaliyu136@gmail.com"
] |
mohammedaliyu136@gmail.com
|
c68d6ebbadb6d5ca9c872511c913b706c9693f5b
|
6fb4419f219fcf2453becfd3fe2d31dca3401da6
|
/get-influences.py
|
6df1a95dda8a74b2d99570fca626c49ecff004b1
|
[] |
no_license
|
christopher-beckham/wiki-lang-influence
|
dccc04e3565a9df408353a247058a74a9c44f5bb
|
9c2832cafc5d5c25f39aff739b0004af08a5234b
|
refs/heads/master
| 2020-04-14T23:53:33.941193
| 2014-06-19T09:57:59
| 2014-06-19T09:57:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
#!/usr/bin/python
from cz import cz
import sys
import re
import time
import urllib2
from sys import stdin
def get_langs(st):
st = "".join(cz.striphtml(st))
st = re.sub('\\[.*?\\]', '', st).replace('\n', '')
st = st.split(',')
st = [ st[0] ] + [ name[1::] for name in st[1::] ]
return st
def fe(arr):
print ",".join(arr)
for url in stdin.readlines():
try:
url = url.rstrip()
body = cz.geturl(url)
print url[ url.rfind('/')+1 :: ].replace("_(programming_language)","")
in_by = cz.getbetween2(body, '<th scope="row" style="text-align:left;">Influenced by</th>', '</tr>')
if len(in_by) > 0:
in_by = get_langs(in_by[0])
in_by = [ val.encode('ascii','ignore') for val in in_by ]
fe(in_by)
else:
print
in_to = cz.getbetween2(body, '<th scope="row" style="text-align:left;">Influenced</th>', '</tr>')
if len(in_to) > 0:
in_to = get_langs(in_to[0])
in_to = [ val.encode('ascii','ignore') for val in in_to ]
fe(in_to)
else:
print
except urllib2.HTTPError as e:
print "DONT_USE"
print
print
time.sleep(0.2)
|
[
"chrispy645@gmail.com"
] |
chrispy645@gmail.com
|
3571c8cc983bb908e5fefc686b7dd1d85062152c
|
530201d1bf8370a94ddf6ffcffd0c256389b42c9
|
/mazeclass.py
|
9d240b9505411691b0fd735472fb78dd60b9e784
|
[] |
no_license
|
chefakshito/cs520
|
1169a714c1e93bfb546df62b71662ff307a8de98
|
97b81f619e6f54f5125d14b58f04faa325227bd1
|
refs/heads/master
| 2021-01-21T06:39:35.828236
| 2017-02-27T04:22:37
| 2017-02-27T04:22:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,431
|
py
|
from random import randint
from PIL import Image
imgx = 500; imgy = 500
image = Image.new("RGB", (imgx, imgy))
pixels = image.load()
color = [(0,0, 0), (255, 255, 255)]
sx=101
sy=101;
nm=50;
maze = [[[0 for x in range(sx)] for y in range(sy)] for z in range(nm)]
dx=[0,1,0,-1]
dy=[-1,0,1,0]
"""
cx=randint(0,mx-1)
cy=randint(0,my-1)
stack.append((cx,cy))
print(stack)
"""
sState=[]
gState=[]
class mazeClass:
def __init__(self):
global imgx; global imgy;
global image;
global pixels;
global color;
global sx
global sy
global maze
global dx
global dy
global nm;
for x in range(nm):
stack = [(randint(0, sx - 1),randint(0, sy - 1))]
sState.append(stack[-1]) #The start state is assigned.
while len(stack) > 0:
(cx, cy) = stack[-1];
maze[x][cy][cx] = 1
# find a new cell to add
nlst = [] # list of available neighbors
for i in range(4):
ch = randint(0,11)
if ch<6:
choice=1
else:
choice=randint(0,11)
nx = cx + dx[i]; ny = cy + dy[i]
if nx >= 0 and nx < sx and ny >= 0 and ny < sy:
if maze[x][ny][nx] == 0:
# print(maze[x][ny][nx],'check1') #--CHECK--1--
if choice==1:
# print('Entered Choice 1') #--CHECK--3--
# of occupied neighbors must be 1
ctr = 0
for j in range(4):
ex = nx + dx[j]; ey = ny + dy[j]
if ex >= 0 and ex < sx and ey >= 0 and ey < sy:
if maze[x][ey][ex] == 1: ctr += 1
if ctr == 1: nlst.append(i)
if choice>1:
# print('Entered Choice 2') #--CHECK--4--
luck=randint(1,11)
# print(luck,"CHECK 5") #--CHECK--5--
if luck>choice:
nlst.append(i)
# if 1 or more neighbors available then randomly select one and move
# print(nlst,'check2') #--CHECK--2--
if len(nlst) > 0:
ir = nlst[randint(0, len(nlst) - 1)]
cx += dx[ir]; cy += dy[ir]
stack.append((cx, cy))
else: stack.pop()
#A random goal state is generated
while len(gState)!=x+1:
gx=randint(0,sx-1)
gy=randint(0,sy-1)
if maze[x][gx][gy]==1:
gState.append((gx,gy))
# # paint the maze
# for ky in range(imgy):
# for kx in range(imgx):
# pixels[kx, ky] = color[maze[x][sy * ky // imgy][sx * kx // imgx]]
# image.save("Maze_" + str(x) + ".png", "PNG")
def getMaze(self):
c = randint(0,50)
return (maze[c], c, sState[c], gState[c]);
|
[
"="
] |
=
|
630ff6a5ad626ea10a5e3ddb440d4b01416a9d3b
|
0533d0ceb5966f7327f40d54bbd17e08e13d36bf
|
/python/LinkedList/Linked List Cycle II/Linked List Cycle II.py
|
996a20582aa17746b392099fe2d2bb7ca6441e83
|
[] |
no_license
|
danwaterfield/LeetCode-Solution
|
0c6178952ca8ca879763a87db958ef98eb9c2c75
|
d89ebad5305e4d1a185b0c6f101a88691602b523
|
refs/heads/master
| 2023-03-19T01:51:49.417877
| 2020-01-11T14:17:42
| 2020-01-11T14:17:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
slow = head
fast = head
step = 0
while slow and fast and fast.next:
slow = slow.next
fast = fast.next.next
step += 1
if slow == fast:
break
if not fast or not fast.next:
return None
slow2 = head
index = 0
while slow != slow2:
slow = slow.next
slow2 = slow2.next
index += 1
return slow
|
[
"zjuzjj@gmail.com"
] |
zjuzjj@gmail.com
|
4df7849c6844bd581bb8841111f635cbbab50830
|
4dfd539c530c5cff6874f2fa0c06ffd893212ad3
|
/tencentcloud/chdfs/v20201112/errorcodes.py
|
d4604add29d3d07f8131cc49457ff2038e6d3425
|
[] |
no_license
|
TencentCloud/tencentcloud-sdk-python-intl-en
|
aac605d1a0458b637ba29eb49f6f166fe844a269
|
042b4d7fb609d4d240728197901b46008b35d4b0
|
refs/heads/master
| 2023-09-01T19:39:27.436454
| 2023-09-01T04:02:15
| 2023-09-01T04:02:15
| 227,834,644
| 4
| 6
| null | 2023-07-17T08:56:56
| 2019-12-13T12:23:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,853
|
py
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Operation failed.
FAILEDOPERATION = 'FailedOperation'
# The permission group has been bound.
FAILEDOPERATION_ACCESSGROUPBOUND = 'FailedOperation.AccessGroupBound'
# The account balance is insufficient.
FAILEDOPERATION_ACCOUNTINSUFFICIENTBALANCE = 'FailedOperation.AccountInsufficientBalance'
# The account identity is not verified.
FAILEDOPERATION_ACCOUNTUNAUTHENTICATED = 'FailedOperation.AccountUnauthenticated'
# The file system is not empty.
FAILEDOPERATION_FILESYSTEMNOTEMPTY = 'FailedOperation.FileSystemNotEmpty'
# The file system capacity after change is smaller than the currently used capacity.
FAILEDOPERATION_QUOTALESSTHANCURRENTUSED = 'FailedOperation.QuotaLessThanCurrentUsed'
# Internal error.
INTERNALERROR = 'InternalError'
# Incorrect parameter.
INVALIDPARAMETER = 'InvalidParameter'
# Incorrect parameter value.
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# Incorrect parameter value: AccessGroupId.
INVALIDPARAMETERVALUE_INVALIDACCESSGROUPID = 'InvalidParameterValue.InvalidAccessGroupId'
# Incorrect parameter value: AccessGroupName.
INVALIDPARAMETERVALUE_INVALIDACCESSGROUPNAME = 'InvalidParameterValue.InvalidAccessGroupName'
# Incorrect parameter value: `Address` of the permission rule.
INVALIDPARAMETERVALUE_INVALIDACCESSRULEADDRESS = 'InvalidParameterValue.InvalidAccessRuleAddress'
# Incorrect parameter value: CapacityQuota.
INVALIDPARAMETERVALUE_INVALIDCAPACITYQUOTA = 'InvalidParameterValue.InvalidCapacityQuota'
# Incorrect parameter value: Description.
INVALIDPARAMETERVALUE_INVALIDDESCRIPTION = 'InvalidParameterValue.InvalidDescription'
# Incorrect parameter value: FileSystemId.
INVALIDPARAMETERVALUE_INVALIDFILESYSTEMID = 'InvalidParameterValue.InvalidFileSystemId'
# Incorrect parameter value: FileSystemName.
INVALIDPARAMETERVALUE_INVALIDFILESYSTEMNAME = 'InvalidParameterValue.InvalidFileSystemName'
# Incorrect parameter value: MountPointId.
INVALIDPARAMETERVALUE_INVALIDMOUNTPOINTID = 'InvalidParameterValue.InvalidMountPointId'
# Incorrect parameter value: MountPointName.
INVALIDPARAMETERVALUE_INVALIDMOUNTPOINTNAME = 'InvalidParameterValue.InvalidMountPointName'
# Incorrect parameter value: VpcId.
INVALIDPARAMETERVALUE_INVALIDVPCID = 'InvalidParameterValue.InvalidVpcId'
# The quota limit is exceeded.
LIMITEXCEEDED = 'LimitExceeded'
# Missing parameter.
MISSINGPARAMETER = 'MissingParameter'
# The resource is in use.
RESOURCEINUSE = 'ResourceInUse'
# The resource does not exist.
RESOURCENOTFOUND = 'ResourceNotFound'
# The permission group does not exist.
RESOURCENOTFOUND_ACCESSGROUPNOTEXISTS = 'ResourceNotFound.AccessGroupNotExists'
# The permission rule does not exist.
RESOURCENOTFOUND_ACCESSRULENOTEXISTS = 'ResourceNotFound.AccessRuleNotExists'
# The file system does not exist.
RESOURCENOTFOUND_FILESYSTEMNOTEXISTS = 'ResourceNotFound.FileSystemNotExists'
# The mount point does not exist.
RESOURCENOTFOUND_MOUNTPOINTNOTEXISTS = 'ResourceNotFound.MountPointNotExists'
# The VPC does not exist.
RESOURCENOTFOUND_VPCNOTEXISTS = 'ResourceNotFound.VpcNotExists'
# The resource is unavailable.
RESOURCEUNAVAILABLE = 'ResourceUnavailable'
# Unauthorized operation.
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
|
[
"tencentcloudapi@tencent.com"
] |
tencentcloudapi@tencent.com
|
873f399a3fc2fb55ed3c9320f9bdce8d298bc065
|
474e74c654916d0a1b0311fc80eff206968539b1
|
/venv/Lib/site-packages/asposewordscloud/models/paragraph_link_collection_response.py
|
f18fa21cf6270818d46552834022303a45595eff
|
[] |
no_license
|
viktor-tchemodanov/Training_Tasks_Python_Cloud
|
4592cf61c2f017b314a009c135340b18fa23fc8f
|
b7e6afab4e9b76bc817ef216f12d2088447bd4cd
|
refs/heads/master
| 2020-09-04T10:39:23.023363
| 2019-11-05T10:36:45
| 2019-11-05T10:36:45
| 219,712,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,084
|
py
|
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="ParagraphLinkCollectionResponse.py">
# Copyright (c) 2018 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class ParagraphLinkCollectionResponse(object):
"""This response should be returned by the service when handling: GET http://api.aspose.com/v1.1/words/Test.doc/paragraphs
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'int',
'status': 'str',
'paragraphs': 'ParagraphLinkCollection'
}
attribute_map = {
'code': 'Code',
'status': 'Status',
'paragraphs': 'Paragraphs'
}
def __init__(self, code=None, status=None, paragraphs=None): # noqa: E501
"""ParagraphLinkCollectionResponse - a model defined in Swagger""" # noqa: E501
self._code = None
self._status = None
self._paragraphs = None
self.discriminator = None
if code is not None:
self.code = code
if status is not None:
self.status = status
if paragraphs is not None:
self.paragraphs = paragraphs
@property
def code(self):
"""Gets the code of this ParagraphLinkCollectionResponse. # noqa: E501
Response status code. # noqa: E501
:return: The code of this ParagraphLinkCollectionResponse. # noqa: E501
:rtype: int
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ParagraphLinkCollectionResponse.
Response status code. # noqa: E501
:param code: The code of this ParagraphLinkCollectionResponse. # noqa: E501
:type: int
"""
if code is None:
raise ValueError("Invalid value for `code`, must not be `None`") # noqa: E501
self._code = code
@property
def status(self):
"""Gets the status of this ParagraphLinkCollectionResponse. # noqa: E501
Response status. # noqa: E501
:return: The status of this ParagraphLinkCollectionResponse. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ParagraphLinkCollectionResponse.
Response status. # noqa: E501
:param status: The status of this ParagraphLinkCollectionResponse. # noqa: E501
:type: str
"""
self._status = status
@property
def paragraphs(self):
"""Gets the paragraphs of this ParagraphLinkCollectionResponse. # noqa: E501
Collection of paragraphs # noqa: E501
:return: The paragraphs of this ParagraphLinkCollectionResponse. # noqa: E501
:rtype: ParagraphLinkCollection
"""
return self._paragraphs
@paragraphs.setter
def paragraphs(self, paragraphs):
"""Sets the paragraphs of this ParagraphLinkCollectionResponse.
Collection of paragraphs # noqa: E501
:param paragraphs: The paragraphs of this ParagraphLinkCollectionResponse. # noqa: E501
:type: ParagraphLinkCollection
"""
self._paragraphs = paragraphs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ParagraphLinkCollectionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"vtchemodanov@hotmail.com"
] |
vtchemodanov@hotmail.com
|
b2001f4905ca18d64754a9a6aafb71893fbb0f10
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3/Luca.Paterlini/C.py
|
cd79d12c9e0577d934dba12922fbf43c13a8215c
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
import math
def AtkinSieve (limit):
results = [2,3,5]
sieve = [False]*(limit+1)
factor = int(math.sqrt(limit))+1
for i in range(1,factor):
for j in range(1, factor):
n = 4*i**2+j**2
if (n <= limit) and (n % 12 == 1 or n % 12 == 5):
sieve[n] = not sieve[n]
n = 3*i**2+j**2
if (n <= limit) and (n % 12 == 7):
sieve[n] = not sieve[n]
if i>j:
n = 3*i**2-j**2
if (n <= limit) and (n % 12 == 11):
sieve[n] = not sieve[n]
for index in range(5,factor):
if sieve[index]:
for jndex in range(index**2, limit, index**2):
sieve[jndex] = False
for index in range(7,limit):
if sieve[index]:
results.append(index)
return results
def conv_base(s,b,l):
r=0
for i in xrange(l):r=r*b+int(s[i])
return r
def lowest_div(n,ps):
for c in ps:
if n%c==0: return c
return -1
prime_sieve=AtkinSieve(10**6)
input()
N,J=map(int,raw_input().split())
u=0
print "Case #1:"
while J>0:
u+=1
q=bin(u)[2:]
s='1'+'0'*(N-2-len(q))+q+'1'
v=[]
for c in xrange(2,11): v.append(conv_base(s,c,N))
v=[lowest_div(x,prime_sieve) for x in v]
if all(i>0 for i in v):
print s,' '.join([str(x) for x in v]);J-=1
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
b7e6ccbf91282fd4b1135b33210324ead1541bbf
|
50008b3b7fb7e14f793e92f5b27bf302112a3cb4
|
/recipes/Python/577619_user_and_root_directory_logfile/recipe-577619.py
|
a1ea4b4ab355197464452fb26ca1eb8516cd6dac
|
[
"MIT"
] |
permissive
|
betty29/code-1
|
db56807e19ac9cfe711b41d475a322c168cfdca6
|
d097ca0ad6a6aee2180d32dce6a3322621f655fd
|
refs/heads/master
| 2023-03-14T08:15:47.492844
| 2021-02-24T15:39:59
| 2021-02-24T15:39:59
| 341,878,663
| 0
| 0
|
MIT
| 2021-02-24T15:40:00
| 2021-02-24T11:31:15
|
Python
|
UTF-8
|
Python
| false
| false
| 339
|
py
|
#! usr/bin/python
import dircache
import getpass
import time
logfile = open("spam.txt", "w+")
localtime = time.asctime( time.localtime(time.time()) )
print >> logfile, 'local current time :', localtime
usr = getpass.getuser()
print >> logfile, 'current user :' + usr
lst = dircache.listdir('/')
print >> logfile, lst
logfile.close()
|
[
"betty@qburst.com"
] |
betty@qburst.com
|
052c2a2cb51a4e27408d96c8675bf650c28a11d6
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/3pzKWEg5oiaMemDdP_20.py
|
8a2a185b2383fd368b4c800327192066c7c46a25
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
def most_expensive_item(products):
Things = []
for x in products.keys():
Things.append(x)
Worth = []
for y in products.values():
Worth.append(y)
Highest = max(Worth)
Counter = 0
Length = len(Things)
while (Counter < Length):
Item = Things[Counter]
Money = Worth[Counter]
if (Money == Highest):
return Item
else:
Counter += 1
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
906cd4d8ad7c433c507a091c53dfd90fe4514f34
|
7f53a1ba1920a5301ca325d4faf480f3799c0a48
|
/merger_2012_emb.py
|
654e169fbd8f372fa53edddcf0d02d83b14ee90c
|
[] |
no_license
|
rmanzoni/tools
|
a7fe8083628954f7f02e80add1d3dd761720e8e6
|
e2189860d26be2a4276ec2ca3fe220e90adf9158
|
refs/heads/master
| 2021-01-01T18:37:33.731578
| 2015-04-15T13:46:12
| 2015-04-15T13:46:12
| 18,681,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,254
|
py
|
import os
import ROOT
from ROOT import gROOT, gStyle, TFile, gDirectory
gROOT.SetBatch(True)
#for mass in [110,115,120,125,130,135,140,145] :
for mass in [125] :
print "Higgs mass =", str(mass)
# search in current dir
matches = []
dirList = os.listdir(os.getcwd())
for fname in dirList:
if str(fname).find('mH'+str(mass)) > 0 and str(fname).find('for_smoothing_') < 0 :
if ( str(fname).find("BOOSTED") > 0 or str(fname).find("VBF") > 0 ) :
matches.append(fname)
for t in ["VBF","BOOSTED"] :
Files = []
for m in matches :
if str(m).find(t) > 0 :
if str(m).find("svfitMass.root") > 0 :
noShift = TFile.Open(m,'read')
Files.append(noShift)
elif str(m).find("svfitMass*1.03.root") > 0 :
upShift = TFile.Open(m,'read')
Files.append(upShift)
elif str(m).find("svfitMass*0.97.root") > 0 :
doShift = TFile.Open(m,'read')
Files.append(doShift)
elif str(m).find("svfitMass*1.06.root") > 0 :
upShiftem = TFile.Open(m,'read')
Files.append(upShiftem)
if t == "VBF" :
cat = "SM2"
elif t == "BOOSTED" :
cat = "SM1"
print 'category: ',t, cat
folderName = "LimitInputs"
folderList = os.listdir(os.getcwd())
found = False
for f1 in folderList :
if str(f1) == folderName :
found = True
if found == False :
os.mkdir(folderName)
if str(m).find(t) < 0 : continue
Shifted = TFile.Open(str(folderName+"/tauTau_2012_"+cat+"_mH"+str(mass)+".root"),'recreate')
Shifted.mkdir(str("tauTau_2012_"+cat))
for h in Files :
print 'File name: ',h.GetName()
h.cd(str("tauTau_"+cat))
dirList = gDirectory.GetListOfKeys()
for k1 in dirList :
histo = k1.ReadObj()
Shifted.cd(str("tauTau_2012_"+cat))
histo.Write()
for j in Files :
j.Close()
Shifted.Close()
print '+++++++++++'
print '+ end job +'
print '+++++++++++'
# import fnmatch
# search through dir and subdirs
# matches = []
# for root, dirnames, filenames in os.walk(os.getcwd()):
# for filename in fnmatch.filter(filenames, '*VBF*'):
# matches.append(os.path.join(root, filename))
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
f80430b48ec9e0b71e51fbfed5dd8c8bcdabbbe4
|
42e8c0992fd845237fa7b1baef494bfb6abc9dba
|
/ui/data_input_panel.py
|
7dd2b4764971de4b2bd9fc109be70c082724291f
|
[] |
no_license
|
mx1001/animation_nodes
|
b5ae336512bb43f40e6ca5276a4e05acb5fdc81b
|
b77b96d991f2b26c03bcbeef4a9fa8a09173ea4f
|
refs/heads/master
| 2020-02-26T17:46:05.676451
| 2016-03-09T15:22:01
| 2016-03-09T15:22:01
| 54,067,761
| 5
| 0
| null | 2016-03-16T21:27:54
| 2016-03-16T21:27:54
| null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
import bpy
from .. tree_info import getNodesByType
class DataInputPanel(bpy.types.Panel):
bl_idname = "an_data_input_panel"
bl_label = "Data Input"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "AN"
def draw(self, context):
layout = self.layout
nodes = getNodesByType("an_DataInputNode")
for node in nodes:
if not node.showInViewport: continue
socket = node.inputs[0]
socket.drawSocket(layout, text = node.label, drawType = "TEXT_PROPERTY_OR_NONE")
|
[
"mail@jlucke.com"
] |
mail@jlucke.com
|
d37d447bd7ce2b1d813f28d559dadf00e8be9f92
|
df25eefe4878c08b0f51f6ff19f48054ba6dbc2a
|
/test/espnet2/text/test_text_converter.py
|
0d1f32b94924e5d24b9c95b25a217e07483a5f3e
|
[
"Apache-2.0"
] |
permissive
|
sas91/espnet
|
7f14a9394469993fb948758c7b0b78f76ad12cbe
|
8e263d6512eb84cebeaecf6b99204c102a8252b5
|
refs/heads/master
| 2021-07-13T18:45:13.981483
| 2020-06-02T08:43:25
| 2020-06-02T08:43:25
| 142,748,209
| 1
| 0
|
Apache-2.0
| 2018-07-29T09:37:35
| 2018-07-29T09:37:35
| null |
UTF-8
|
Python
| false
| false
| 2,565
|
py
|
from pathlib import Path
import string
import pytest
import sentencepiece as spm
from espnet2.text.char_tokenizer import CharTokenizer
from espnet2.text.sentencepiece_tokenizer import SentencepiecesTokenizer
from espnet2.text.word_tokenizer import WordTokenizer
@pytest.fixture(params=[None, " "])
def word_converter(request):
return WordTokenizer(delimiter=request.param)
@pytest.fixture
def char_converter():
return CharTokenizer(["[foo]"])
@pytest.fixture
def spm_srcs(tmp_path: Path):
input_text = tmp_path / "text"
vocabsize = len(string.ascii_letters) + 4
model_prefix = tmp_path / "model"
model = str(model_prefix) + ".model"
input_sentence_size = 100000
with input_text.open("w") as f:
f.write(string.ascii_letters + "\n")
spm.SentencePieceTrainer.Train(
f"--input={input_text} "
f"--vocab_size={vocabsize} "
f"--model_prefix={model_prefix} "
f"--input_sentence_size={input_sentence_size}"
)
sp = spm.SentencePieceProcessor()
sp.load(model)
with input_text.open("r") as f:
vocabs = {"<unk>", "▁"}
for line in f:
tokens = sp.DecodePieces(list(line.strip()))
vocabs |= set(tokens)
return model, vocabs
@pytest.fixture
def spm_converter(tmp_path, spm_srcs):
model, vocabs = spm_srcs
sp = spm.SentencePieceProcessor()
sp.load(model)
token_list = tmp_path / "token.list"
with token_list.open("w") as f:
for v in vocabs:
f.write(f"{v}\n")
return SentencepiecesTokenizer(model=model)
def test_Text2Sentencepieces_repr(spm_converter: SentencepiecesTokenizer):
print(spm_converter)
def test_Text2Sentencepieces_text2tokens(spm_converter: SentencepiecesTokenizer):
assert spm_converter.tokens2text(spm_converter.text2tokens("Hello")) == "Hello"
def test_Text2Words_repr(word_converter: WordTokenizer):
print(word_converter)
def test_Text2Words_text2tokens(word_converter: WordTokenizer):
assert word_converter.text2tokens("Hello World!! Ummm") == [
"Hello",
"World!!",
"Ummm",
]
def test_Text2Words_tokens2text(word_converter: WordTokenizer):
assert word_converter.tokens2text("Hello World!!".split()) == "Hello World!!"
def test_Text2Chars_repr(char_converter: CharTokenizer):
print(char_converter)
def test_Text2Chars_text2tokens(char_converter: CharTokenizer):
assert char_converter.text2tokens("He[foo]llo") == [
"H",
"e",
"[foo]",
"l",
"l",
"o",
]
|
[
"naoyuki.kamo829@gmail.com"
] |
naoyuki.kamo829@gmail.com
|
561fbf76952e72959088ff99ae838295f3938bc7
|
479d3414e914f144fff20ee71872472ac84ca410
|
/codespace/python/telegram/_files/inputfile.py
|
730301869bd5e67593a4565ada2e146058b8f953
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
tzpBingo/github-trending
|
0fa4e0e08743f0683f68fd54d74eec466bc525e0
|
505014e84bdea7e2732296821028df20c0305390
|
refs/heads/master
| 2023-07-24T13:29:47.393940
| 2023-07-19T09:39:29
| 2023-07-19T09:39:29
| 102,687,887
| 49
| 20
|
MIT
| 2023-05-22T21:33:53
| 2017-09-07T03:39:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,191
|
py
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2023
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram InputFile."""
import mimetypes
from typing import IO, Optional, Union
from uuid import uuid4
from telegram._utils.files import load_file
from telegram._utils.types import FieldTuple
_DEFAULT_MIME_TYPE = "application/octet-stream"
class InputFile:
"""This object represents a Telegram InputFile.
.. versionchanged:: 20.0
* The former attribute ``attach`` was renamed to :attr:`attach_name`.
* Method ``is_image`` was removed. If you pass :obj:`bytes` to :paramref:`obj` and would
like to have the mime type automatically guessed, please pass :paramref:`filename`
in addition.
Args:
obj (:term:`file object` | :obj:`bytes` | :obj:`str`): An open file descriptor or the files
content as bytes or string.
Note:
If :paramref:`obj` is a string, it will be encoded as bytes via
:external:obj:`obj.encode('utf-8') <str.encode>`.
.. versionchanged:: 20.0
Accept string input.
filename (:obj:`str`, optional): Filename for this InputFile.
attach (:obj:`bool`, optional): Pass :obj:`True` if the parameter this file belongs to in
the request to Telegram should point to the multipart data via an ``attach://`` URI.
Defaults to `False`.
Attributes:
input_file_content (:obj:`bytes`): The binary content of the file to send.
attach_name (:obj:`str`): Optional. If present, the parameter this file belongs to in
the request to Telegram should point to the multipart data via a an URI of the form
``attach://<attach_name>`` URI.
filename (:obj:`str`): Filename for the file to be sent.
mimetype (:obj:`str`): The mimetype inferred from the file to be sent.
"""
__slots__ = ("filename", "attach_name", "input_file_content", "mimetype")
def __init__(
self,
obj: Union[IO[bytes], bytes, str],
filename: Optional[str] = None,
attach: bool = False,
):
if isinstance(obj, bytes):
self.input_file_content: bytes = obj
elif isinstance(obj, str):
self.input_file_content = obj.encode("utf-8")
else:
reported_filename, self.input_file_content = load_file(obj)
filename = filename or reported_filename
self.attach_name: Optional[str] = "attached" + uuid4().hex if attach else None
if filename:
self.mimetype: str = (
mimetypes.guess_type(filename, strict=False)[0] or _DEFAULT_MIME_TYPE
)
else:
self.mimetype = _DEFAULT_MIME_TYPE
self.filename: str = filename or self.mimetype.replace("/", ".")
@property
def field_tuple(self) -> FieldTuple:
"""Field tuple representing the contents of the file for upload to the Telegram servers.
Returns:
Tuple[:obj:`str`, :obj:`bytes`, :obj:`str`]:
"""
return self.filename, self.input_file_content, self.mimetype
@property
def attach_uri(self) -> Optional[str]:
"""URI to insert into the JSON data for uploading the file. Returns :obj:`None`, if
:attr:`attach_name` is :obj:`None`.
"""
return f"attach://{self.attach_name}" if self.attach_name else None
|
[
"tzpbingo@gmail.com"
] |
tzpbingo@gmail.com
|
6afdae640dd9ad3d9adbf1cbc0c7d8cf8b7d3466
|
491c1e520a64e3ebd5349130f35047aaed1e70ec
|
/two pointer/680 validPalindrome.py
|
3ccf25be7a1c357ec82bfd31b9cc88e976d594fb
|
[] |
no_license
|
pangyouzhen/data-structure
|
33a7bd7790c8db3e018114d85a137f5f3d6b92f8
|
cd46cf08a580c418cc40a68bf9b32371fc69a803
|
refs/heads/master
| 2023-05-26T12:02:30.800301
| 2023-05-21T08:07:57
| 2023-05-21T08:07:57
| 189,315,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
class Solution:
def validPalindrome(self, s: str) -> bool:
def checkPalindrome(low, high):
i, j = low, high
while i < j:
if s[i] != s[j]:
return False
i = i + 1
j = j - 1
return True
low, high = 0, len(s) - 1
while low < high:
if s[low] == s[high]:
low = low + 1
high = high - 1
else:
return checkPalindrome(low + 1, high) or checkPalindrome(low, high - 1)
return True
sol = Solution()
print(sol.validPalindrome("abca"))
assert sol.validPalindrome("abca") == True
print(sol.validPalindrome("abcca"))
|
[
"pangyouzhen@live.com"
] |
pangyouzhen@live.com
|
830a140f3af9cb75dd17cf22df4d0529f9709007
|
8f1673c2abfed8f372e22fbd1c280486014b4466
|
/nmt/embeddings/fresh_embedding_test.py
|
02c77c2260cfcd5f01d846f377761ea8db571074
|
[
"Apache-2.0"
] |
permissive
|
naivenlp/naivenmt-legacy
|
be670df40a98c0f28bdacb2a3acf9a5b06667966
|
bcceeec0a477eb09c4a8915e638a27dae6c95562
|
refs/heads/master
| 2021-10-27T02:55:33.160837
| 2019-04-15T14:39:06
| 2019-04-15T14:39:06
| 118,464,831
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
import tensorflow as tf
import numpy as np
from nmt.embeddings.fresh_embedding import FreshEmbedding
from nmt import misc_utils
class FreshEmbeddingTest(tf.test.TestCase):
def testFreshEmbedding(self):
vocab_file = misc_utils.get_test_data('iwslt15.vocab.100.en')
embedder = FreshEmbedding(vocab_file=vocab_file)
inputs = np.array([
['I', 'am', 'a', 'test']
])
inputs = tf.constant(inputs,dtype=tf.string)
length = np.array([4])
length = tf.constant(length,dtype=tf.int32)
params = {
'batch_size': 1
}
embedded = embedder.embedding(inputs, length, params)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
embedded = sess.run(embedded)
print(embedded)
if __name__ == '__main__':
tf.test.main()
|
[
"zhouyang.luo@gmail.com"
] |
zhouyang.luo@gmail.com
|
f2ea129609ab68de9af623d8b8c473e6eb333988
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/examples/sparse/sign.py
|
61ba2104d673a953ad976e5b10a35c9c2232d0b9
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163
| 2023-08-31T07:49:22
| 2023-08-31T07:49:22
| 130,375,797
| 12,631
| 3,482
|
Apache-2.0
| 2023-09-14T15:48:24
| 2018-04-20T14:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,804
|
py
|
"""
[SIGN: Scalable Inception Graph Neural Networks]
(https://arxiv.org/abs/2004.11198)
This example shows a simplified version of SIGN: a precomputed 2-hops diffusion
operator on top of symmetrically normalized adjacency matrix A_hat.
"""
import dgl.sparse as dglsp
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.data import CoraGraphDataset
from torch.optim import Adam
################################################################################
# (HIGHLIGHT) Take the advantage of DGL sparse APIs to implement the feature
# diffusion in SIGN laconically.
################################################################################
def sign_diffusion(A, X, r):
# Perform the r-hop diffusion operation.
X_sign = [X]
for _ in range(r):
X = A @ X
X_sign.append(X)
return X_sign
class SIGN(nn.Module):
def __init__(self, in_size, out_size, r, hidden_size=256):
super().__init__()
# Note that theta and omega refer to the learnable matrices in the
# original paper correspondingly. The variable r refers to subscript to
# theta.
self.theta = nn.ModuleList(
[nn.Linear(in_size, hidden_size) for _ in range(r + 1)]
)
self.omega = nn.Linear(hidden_size * (r + 1), out_size)
def forward(self, X_sign):
results = []
for i in range(len(X_sign)):
results.append(self.theta[i](X_sign[i]))
Z = F.relu(torch.cat(results, dim=1))
return self.omega(Z)
def evaluate(g, pred):
label = g.ndata["label"]
val_mask = g.ndata["val_mask"]
test_mask = g.ndata["test_mask"]
# Compute accuracy on validation/test set.
val_acc = (pred[val_mask] == label[val_mask]).float().mean()
test_acc = (pred[test_mask] == label[test_mask]).float().mean()
return val_acc, test_acc
def train(model, g, X_sign):
label = g.ndata["label"]
train_mask = g.ndata["train_mask"]
optimizer = Adam(model.parameters(), lr=3e-3)
for epoch in range(10):
# Switch the model to training mode.
model.train()
# Forward.
logits = model(X_sign)
# Compute loss with nodes in training set.
loss = F.cross_entropy(logits[train_mask], label[train_mask])
# Backward.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Switch the model to evaluating mode.
model.eval()
# Compute prediction.
logits = model(X_sign)
pred = logits.argmax(1)
# Evaluate the prediction.
val_acc, test_acc = evaluate(g, pred)
print(
f"In epoch {epoch}, loss: {loss:.3f}, val acc: {val_acc:.3f}, test"
f" acc: {test_acc:.3f}"
)
if __name__ == "__main__":
# If CUDA is available, use GPU to accelerate the training, use CPU
# otherwise.
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load graph from the existing dataset.
dataset = CoraGraphDataset()
g = dataset[0].to(dev)
# Create the sparse adjacency matrix A (note that W was used as the notation
# for adjacency matrix in the original paper).
indices = torch.stack(g.edges())
N = g.num_nodes()
A = dglsp.spmatrix(indices, shape=(N, N))
# Calculate the symmetrically normalized adjacency matrix.
I = dglsp.identity(A.shape, device=dev)
A_hat = A + I
D_hat = dglsp.diag(A_hat.sum(dim=1)) ** -0.5
A_hat = D_hat @ A_hat @ D_hat
# 2-hop diffusion.
r = 2
X = g.ndata["feat"]
X_sign = sign_diffusion(A_hat, X, r)
# Create SIGN model.
in_size = X.shape[1]
out_size = dataset.num_classes
model = SIGN(in_size, out_size, r).to(dev)
# Kick off training.
train(model, g, X_sign)
|
[
"noreply@github.com"
] |
dmlc.noreply@github.com
|
f77ea489f20c231434ca0f1caea9de519cf3ca2f
|
b7a2a80843fa5141ffb9c7b4439f1d2ac713af30
|
/Version2/SystemKommandos.py
|
3eb10a56a4624476aab5ba3c18ba953ddb6bde07
|
[] |
no_license
|
wunnox/python_grundlagen
|
df1bc2b9b1b561bd6733ccc25305e799a48e714e
|
fa84d7aae7332a7acbb3ba7ff0fe2216cc345fc0
|
refs/heads/master
| 2023-05-01T12:19:23.208445
| 2023-04-16T11:29:01
| 2023-04-16T11:29:01
| 222,099,539
| 2
| 3
| null | 2019-12-19T10:56:43
| 2019-11-16T12:57:54
|
Python
|
UTF-8
|
Python
| false
| false
| 323
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
log = os.popen("ping -c 1 google.com").readlines()
for zeile in log:
print(zeile.replace("\n", ""))
# oder
if os.system("ping -c 1 google.com") == 0:
print("IP ist erreichbar")
else:
print("IP ist NICHT erreichbar")
|
[
"peter.christen@cssgmbh.ch"
] |
peter.christen@cssgmbh.ch
|
15eb72ad636edbf32a27501814ebe6ead6ccc591
|
3c750d4d60660fdf6ef84d7b7ab9663fb76d0fa1
|
/sopht/numeric/eulerian_grid_ops/poisson_solver_2d/scipy_fft_2d.py
|
c27c75ab0c351c35f8a07f4cc81135a2084dd26a
|
[
"MIT"
] |
permissive
|
SophT-Team/SophT
|
25d157a17734600e9aa4f522b4574bfefe202bc7
|
99a094e0d6e635e5b2385a69bdee239a4d1fb530
|
refs/heads/main
| 2023-08-31T21:14:10.304592
| 2023-08-31T17:00:38
| 2023-08-31T17:00:38
| 498,451,510
| 2
| 2
|
MIT
| 2023-09-12T15:37:31
| 2022-05-31T18:25:12
|
Python
|
UTF-8
|
Python
| false
| false
| 457
|
py
|
"""Create reference FFT operations via scipy in 2D."""
import numpy as np
from scipy.fft import irfftn, rfftn
def fft_ifft_via_scipy_kernel_2d(
fourier_field: np.ndarray,
inv_fourier_field: np.ndarray,
field: np.ndarray,
num_threads: int = 1,
) -> None:
"""Perform reference FFT operations via scipy."""
fourier_field[...] = rfftn(field, workers=num_threads)
inv_fourier_field[...] = irfftn(fourier_field, workers=num_threads)
|
[
"bhosale2@illinois.edu"
] |
bhosale2@illinois.edu
|
0856f6a1b38760b3161698bc0ef30a8a3bca0ed4
|
360c777a2b77be466b1cf7c8fd74d6fd04f56b55
|
/migrations/versions/7844211fb55_.py
|
7d304f72a21244221e00963d026d4463433f0936
|
[
"MIT"
] |
permissive
|
hreeder/nexus-auth
|
790a3b2623ddf443138a4b0f0af1380dbc4db8ae
|
8d51aef01647e32ba4a284f02de73a2caad7cf49
|
refs/heads/master
| 2021-01-10T10:08:37.190558
| 2016-02-29T12:27:21
| 2016-02-29T12:27:21
| 52,789,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
"""empty message
Revision ID: 7844211fb55
Revises: c5242907c1e
Create Date: 2014-07-30 10:23:03.502189
"""
# revision identifiers, used by Alembic.
revision = '7844211fb55'
down_revision = 'c5242907c1e'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('character', sa.Column('lastKnownShip', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('character', 'lastKnownShip')
### end Alembic commands ###
|
[
"harry@harryreeder.co.uk"
] |
harry@harryreeder.co.uk
|
cf7ab34a660f56433f5b54ab6324670467ea001f
|
214dbac428fc8ad18d8775cd3ffd744068a77d24
|
/my_django_app/settings.py
|
9cc596a67ddf070aaa58ab4421bb25e912c69226
|
[] |
no_license
|
nsalahdeen/DjangoProject
|
879c609fd5b53cf4be3a0ff5358d70adfbebbcf7
|
65a1bdcddb719e27ca67cd12aa47171f50370036
|
refs/heads/main
| 2023-04-20T04:25:20.608614
| 2021-05-04T15:51:50
| 2021-05-04T15:51:50
| 364,306,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,341
|
py
|
"""
Django settings for my_django_app project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = str(os.getenv('SECRET_KEY'))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'my_first_django_app', #my firstApp
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_django_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
"/templates", Path.joinpath(BASE_DIR, "templates")
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_django_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"you@example.com"
] |
you@example.com
|
e572940f400c4443799befdb71ed04fca2b087fd
|
0305c23d48ee6e17722124aed5f90f55f1a2f5ef
|
/examples/lightgbm_examples/classification.py
|
9924b8bca54a03c1e353420d2547fd5d7404cf68
|
[
"MIT"
] |
permissive
|
mdjabc/hyperparameter_hunter
|
d20a01fa6a3493fdbb595f8b5615a9d9ff398770
|
bfbd1faf63272a62e6f971d7e9a0487d71aea8f6
|
refs/heads/master
| 2020-05-16T01:39:39.275129
| 2019-04-02T01:16:30
| 2019-04-02T01:16:30
| 182,608,190
| 1
| 0
|
MIT
| 2019-04-22T02:21:50
| 2019-04-22T02:21:49
| null |
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
from hyperparameter_hunter import Environment, CVExperiment
from hyperparameter_hunter import RandomForestOptimization, Real, Integer, Categorical
import pandas as pd
from sklearn.datasets import fetch_covtype
from sklearn.metrics import f1_score
from lightgbm import LGBMClassifier
#################### Format DataFrame ####################
# Be advised, this dataset (SKLearn's Forest Cover Types) can take a little while to download...
# This is a multi-class classification task, in which the target is label-encoded.
data = fetch_covtype(shuffle=True, random_state=32)
train_df = pd.DataFrame(data.data, columns=["x_{}".format(_) for _ in range(data.data.shape[1])])
train_df["y"] = data.target
#################### Set Up Environment ####################
env = Environment(
train_dataset=train_df,
results_path="HyperparameterHunterAssets",
target_column="y",
metrics=dict(f1=lambda y_true, y_pred: f1_score(y_true, y_pred, average="micro")),
cv_type="StratifiedKFold",
cv_params=dict(n_splits=5, random_state=32),
)
# Now that HyperparameterHunter has an active `Environment`, we can do two things:
#################### 1. Perform Experiments ####################
experiment = CVExperiment(
model_initializer=LGBMClassifier,
model_init_params=dict(boosting_type="gbdt", num_leaves=31, max_depth=-1, subsample=0.5),
model_extra_params=dict(
fit=dict(
feature_name=train_df.columns.values[:-1].tolist(),
categorical_feature=train_df.columns.values[11:-1].tolist(),
)
),
)
# And/or...
#################### 2. Hyperparameter Optimization ####################
optimizer = RandomForestOptimization(iterations=10, random_state=32)
optimizer.set_experiment_guidelines(
model_initializer=LGBMClassifier,
model_init_params=dict(
boosting_type=Categorical(["gbdt", "dart"]),
num_leaves=Integer(10, 40),
max_depth=-1,
subsample=Real(0.3, 0.7),
),
model_extra_params=dict(
fit=dict(
feature_name=train_df.columns.values[:-1].tolist(),
categorical_feature=train_df.columns.values[11:-1].tolist(),
)
),
)
optimizer.go()
# Notice, `optimizer` recognizes our earlier `experiment`'s hyperparameters fit inside the search
# space/guidelines set for `optimizer`.
# Then, when optimization is started, it automatically learns from `experiment`'s results
# - without any extra work for us!
|
[
"hunter@mcgushion.com"
] |
hunter@mcgushion.com
|
7f8d4393203d77170ee56f9dc35fd118af389dbf
|
69d8f3cf7c10640a692fa9175f5a63a5a7b54fcd
|
/naver_webtoon.py
|
2a03516c541bf495641c72d69deb1ef931d4df67
|
[] |
no_license
|
baidoosik/crawling
|
bd89fd9f59ecb8921e765b03faadf1c55bd59c74
|
62669badf6ce84e0ac9e575b736e41051642ea9c
|
refs/heads/master
| 2021-06-21T19:25:30.175798
| 2017-07-30T15:02:55
| 2017-07-30T15:02:55
| 84,198,685
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,083
|
py
|
from crawling import *
def naver_webtoon(url):
ep_headers = {
'referer': 'http://comic.naver.com/webtoon/'
}
html = req.get(url, headers=ep_headers).text
soup = bfs(html, 'html.parser')
webtoon_name = ''.join(soup.select('div.detail h2')[0].text.split())
ep_name = soup.select('.tit_area h3')[0].text
result = []
n_result = []
file_list = []
max_width, max_height = 0, 0
for tag in soup.select('#comic_view_area img'):
try:
print(tag['src'])
result.append(tag['src'])
except KeyError:
print('필요한 자료 크롤링 완료')
break
for img_url in result:
print(img_url)
if re.match(r'^http.*$', img_url):
n_result.append(img_url)
for img_url in n_result:
img = req.get(img_url, headers=ep_headers).content
img_name = os.path.basename(img_url)
img_path = os.path.join(webtoon_name, ep_name, img_name)
dir_path = os.path.dirname(img_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
if os.path.exists(img_path):
pass
else:
with open(img_path, 'wb') as f:
f.write(img)
file_list.append(img_path)
for img_url in file_list:
with Image.open(img_url) as im:
if max_width < im.width:
max_width = im.width
max_height = max_height + im.height
size = (max_width, max_height)
white = (255, 255, 255)
now =math.ceil(time.time())
with Image.new('RGB', size, white) as canvas:
height = 0
for filename in file_list:
with Image.open(filename) as im:
canvas.paste(im, box=(0, height))
height = height + im.height
canvas.save('{}.png'.format(now))
if __name__ =='__main__':
print('원하시는 웹툰의 url을 입력해 주세요!')
req_url=input()
print('으아아아 ~~요청하신 웹툰을 한 사진으로 만들어볼게요!!')
naver_webtoon(req_url)
|
[
"qoentlr37@naver.com"
] |
qoentlr37@naver.com
|
c63011b271a1d1a905c1b4a064dc8fb4dfb1f928
|
c957b4663cc4cb21e5172f23c6989031be8c3e5b
|
/python/830. Positions of Large Groups.py
|
a1f9bf5b13cad44b7186ef6646eebc9f05be1547
|
[] |
no_license
|
gajanlee/leetcode
|
e061dc37af0f83bf2bce00c391c0b8a9f3177b22
|
0d3c8477f05604a059e58a8764ce0d8bd418edde
|
refs/heads/master
| 2018-12-26T06:12:24.995542
| 2018-10-30T05:03:27
| 2018-10-30T05:03:27
| 102,965,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
"""
In a string S of lowercase letters, these letters form consecutive groups of the same character.
For example, a string like S = "abbxxxxzyy" has the groups "a", "bb", "xxxx", "z" and "yy".
Call a group large if it has 3 or more characters. We would like the starting and ending positions of every large group.
The final answer should be in lexicographic order.
Example 1:
Input: "abbxxxxzzy"
Output: [[3,6]]
Explanation: "xxxx" is the single large group with starting 3 and ending positions 6.
Example 2:
Input: "abc"
Output: []
Explanation: We have "a","b" and "c" but no large group.
Example 3:
Input: "abcdddeeeeaabbbcd"
Output: [[3,5],[6,9],[12,14]]
Note: 1 <= S.length <= 1000
"""
class Solution:
def largeGroupPositions(self, S):
"""
:type S: str
:rtype: List[List[int]]
"""
res = []
S += "#"
last = "$"
start = end = 0
for i, s in enumerate(S):
if s == last:
end += 1
elif end - start >= 2:
res.append([start, end])
start = end = i
else:
start = end = i
last = s
return res
# \1 是第一个分组括号
# {2,}代表字符串在长度2以上
return [[r.start(), r.end() - 1] for r in re.finditer(r'(\w)\1{2,}', S)]
|
[
"lee_jiazh@163.com"
] |
lee_jiazh@163.com
|
26fb69f33707a68a52b9cf096ecea4d441b19610
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/9QbhjtbkXp3QZNuDu_4.py
|
0fdb756be15b9bb5f9a2c994e4cc0739f8aa07b5
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
"""
In this challenge, you have to find the last 15 palindromes of all numbers
starting from ten and up to a given limit, including the limit in the search.
Given an integer `limit` being the upper limit of the range of interest,
implement a function that returns the last 15 palindromes numbers lower **or
equal** to `limit` as a list sorted ascendingly.
### Examples
generate_palindromes(151) ➞ [
11, 22, 33, 44, 55,
66, 77, 88, 99, 101,
111, 121, 131, 141, 151
]
generate_palindromes(600) ➞ [
454, 464, 474, 484, 494,
505, 515, 525, 535, 545,
555, 565, 575, 585, 595
]
generate_palindromes(999999) ➞ [
985589, 986689, 987789, 988889, 989989,
990099, 991199, 992299, 993399, 994499,
995599, 996699, 997799, 998899, 999999
]
### Notes
N/A
"""
def generate_palindromes(limit):
is_pal = lambda n: str(n) == str(n)[::-1]
ans = []
while len(ans)<15:
if is_pal(limit): ans = [limit] + ans
limit-=1
return ans
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
8513a8e7ee09df250f1ac7d1b06dc0a64cd08208
|
2a6f4b01b5ef2729eef6b24cba926f90edace478
|
/vectorbt/utils/config.py
|
3ae73a71793ae3c6a482ea095078ea8b9839342b
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
nehcuh/vectorbt
|
c8c413f11a8e1503f75477e41fc30127c1300236
|
c0f307169b19a0f26c1992a9e29f4be380c1b220
|
refs/heads/master
| 2023-04-11T14:17:08.590766
| 2021-04-07T13:12:11
| 2021-04-07T13:12:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,664
|
py
|
"""Utilities for configuration."""
from copy import copy
from collections import namedtuple
import dill
import inspect
from vectorbt.utils import checks
from vectorbt.utils.attr import deep_getattr
def get_func_kwargs(func):
"""Get keyword arguments of the function."""
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
class atomic_dict(dict):
"""Dict that behaves like a single value when merging."""
pass
def merge_dicts(*dicts):
"""Merge dicts."""
x, y = dicts[0], dicts[1]
if x is None:
x = {}
if y is None:
y = {}
checks.assert_type(x, dict)
checks.assert_type(y, dict)
if len(x) == 0:
z = y.copy()
elif len(y) == 0:
z = x.copy()
else:
z = {}
overlapping_keys = [k for k in x if k in y] # order matters
for k in overlapping_keys:
if isinstance(x[k], dict) and isinstance(y[k], dict) and not isinstance(y[k], atomic_dict):
z[k] = merge_dicts(x[k], y[k])
else:
z[k] = y[k]
for k in [k for k in x if k not in y]:
z[k] = x[k]
for k in [k for k in y if k not in x]:
z[k] = y[k]
if len(dicts) > 2:
return merge_dicts(z, *dicts[2:])
return z
def copy_dict(dct):
"""Copy dict using shallow-deep copy hybrid.
Traverses all nested dicts and copies each value using shallow copy."""
dct_copy = dict()
for k, v in dct.items():
if isinstance(v, dict):
dct_copy[k] = copy_dict(v)
else:
dct_copy[k] = copy(v)
return dct_copy
_RaiseKeyError = object()
DumpTuple = namedtuple('DumpTuple', ('cls', 'dumps'))
class Pickleable:
"""Superclass that defines abstract properties and methods for pickle-able classes."""
def dumps(self, **kwargs):
"""Pickle to a string."""
raise NotImplementedError
@classmethod
def loads(cls, dumps, **kwargs):
"""Unpickle from a string."""
raise NotImplementedError
def save(self, fname, **kwargs):
"""Save dumps to a file."""
dumps = self.dumps(**kwargs)
with open(fname, "wb") as f:
f.write(dumps)
@classmethod
def load(cls, fname, **kwargs):
"""Load dumps from a file and create new instance."""
with open(fname, "rb") as f:
dumps = f.read()
return cls.loads(dumps, **kwargs)
class Config(dict, Pickleable):
"""Extends dict with config features."""
def __init__(self, *args, frozen=False, read_only=False, **kwargs):
super().__init__(*args, **kwargs)
self._frozen = frozen
self._read_only = read_only
self._init_config = copy_dict(self) if not read_only else None
@property
def frozen(self):
"""Whether this dict's keys are frozen."""
return self._frozen
@property
def read_only(self):
"""Whether this dict is read-only."""
return self._read_only
@property
def init_config(self):
"""Initial config."""
return self._init_config
def __setitem__(self, k, v):
if self.read_only:
raise TypeError("Config is read-only")
if self.frozen:
if k not in self:
raise KeyError(f"Key '{k}' is not valid")
super().__setitem__(k, v)
def __delitem__(self, k):
if self.read_only:
raise TypeError("Config is read-only")
super().__delitem__(k)
def pop(self, k, v=_RaiseKeyError):
if self.read_only:
raise TypeError("Config is read-only")
if v is _RaiseKeyError:
return super().pop(k)
return super().pop(k, v)
def popitem(self):
if self.read_only:
raise TypeError("Config is read-only")
return super().popitem()
def clear(self):
if self.read_only:
raise TypeError("Config is read-only")
return super().clear()
def update(self, *args, force_update=False, **kwargs):
other = dict(*args, **kwargs)
if force_update:
super().update(other)
return
if self.read_only:
raise TypeError("Config is read-only")
if self.frozen:
for k in other:
if k not in self:
raise KeyError(f"Key '{k}' is not valid")
super().update(other)
def copy(self):
return type(self)(self)
def merge_with(self, other, **kwargs):
"""Merge this and other dict into a new config."""
return self.__class__(merge_dicts(self, other), **kwargs)
def reset(self):
"""Reset to the initial config."""
if self.read_only:
raise TypeError("Config is read-only")
self.update(copy_dict(self.init_config), force_update=True)
def dumps(self, **kwargs):
"""Pickle to a string."""
config = dict(frozen=self.frozen, read_only=self.read_only)
for k, v in self.items():
if k in ('frozen', 'readonly'):
raise ValueError(f"Keyword argument repeated: {k}")
if isinstance(v, Pickleable):
config[k] = DumpTuple(cls=v.__class__, dumps=v.dumps(**kwargs))
else:
config[k] = v
return dill.dumps(config, **kwargs)
@classmethod
def loads(cls, dumps, **kwargs):
"""Unpickle from a string."""
config = dill.loads(dumps, **kwargs)
for k, v in config.items():
if isinstance(v, DumpTuple):
config[k] = v.cls.loads(v.dumps, **kwargs)
return cls(**config)
def __eq__(self, other):
return checks.is_deep_equal(dict(self), dict(other))
class AtomicConfig(Config, atomic_dict):
"""Config that behaves like a single value when merging."""
pass
class Configured(Pickleable):
"""Class with an initialization config.
All operations are done using config rather than the instance, which makes it easier to pickle.
!!! warning
If the instance has writable attributes or depends upon global defaults,
their values won't be copied over. Make sure to pass them explicitly to
make the saved & loaded / copied instance resilient to changes in globals."""
def __init__(self, **config):
self._config = Config(config, read_only=True)
@property
def config(self):
"""Initialization config."""
return self._config
def copy(self, **new_config):
"""Create a new instance based on the config.
!!! warning
This "copy" operation won't return a copy of the instance but a new instance
initialized with the same config."""
return self.__class__(**self.config.merge_with(new_config))
def dumps(self, **kwargs):
"""Pickle to a string."""
return self.config.dumps(**kwargs)
@classmethod
def loads(cls, dumps, **kwargs):
"""Unpickle from a string."""
return cls(**Config.loads(dumps, **kwargs))
def __eq__(self, other):
"""Objects are equal if their configs are equal."""
if type(self) != type(other):
return False
return self.config == other.config
def getattr(self, attr_chain):
"""See `vectorbt.utils.attr.deep_getattr`."""
return deep_getattr(self, attr_chain)
def update_config(self, *args, **kwargs):
"""Force-update the config."""
self.config.update(*args, **kwargs, force_update=True)
|
[
"olegpolakow@gmail.com"
] |
olegpolakow@gmail.com
|
01e300e4ad3f48c320b5035e476c2a1eefe88cf8
|
75bb245280a749fcb1a74e94a62b78e4ceed16f0
|
/message_manager.py
|
6157da0ba6e39a61f90304e3c8d370c579e628bd
|
[] |
no_license
|
cjlavan/rpi_set_wifi
|
72cee2a8e4531b52398ff28f2abec47a269202d6
|
7600d05d8c9fa0d6c96663e3e86ca33095f56e0b
|
refs/heads/master
| 2016-09-10T20:45:02.874265
| 2015-05-11T00:30:19
| 2015-05-11T00:30:19
| 35,393,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,989
|
py
|
import requests
import os
import os.path
import json
from unipath import Path
import datetime
import time
import wget
import _SETTINGS
import convert
from message_queue import message_queue
class message_manager:
def __init__(self):
self.new_messages = False
self.BASE_DIR = os.path.dirname(os.path.abspath(__file__))
self.OUTPUT_DIR = Path(self.BASE_DIR).child('audio_output')
os.chdir(self.OUTPUT_DIR)
convert.remove_files(self.OUTPUT_DIR)
self.latest_id = int(self.get_latest_msg_id_json())
self.new_messages = False
self.MessageList = []
self.load_stored_message_json()
self.DownloadMessageList = []
self.post_download_message_json()
if self.new_messages:
self.alert_new_messages()
self.download_audio()
self.new_messages = False
def update_loop(self):
while True:
convert.remove_files(self.OUTPUT_DIR)
self.post_download_message_json()
if self.new_messages:
self.alert_new_messages()
self.download_audio()
self.new_messages = False
convert.remove_files(self.OUTPUT_DIR)
time.sleep(10)
# loads the initial json file to compare to what will be downloaded
# reads the initial json file and converts it to a list of message objects
def load_stored_message_json(self):
os.chdir(self.OUTPUT_DIR)
len_mes = 0
try:
jsonData = open('stored_message_data.json')
stored_data = json.load(jsonData)
len_mes = len(stored_data)
jsonData.close()
except:
with open('stored_message_data.json', 'w') as f:
json.dump([], f)
f.close()
self.MessageList = []
print "length of len_mes: " + str(len_mes)
for x in range (0,len_mes):
m = {
'msg_id' : stored_data[x]["msg_id"],
'audio_file' : stored_data[x]["audio_file"],
'path' : self.OUTPUT_DIR,
'color' : stored_data[x]["color"],
'ts' : stored_data[x]["ts"],
'played' : stored_data[x]["played"]
}
self.MessageList.append(m)
# print "appened message list with " + str(m['msg_id'])
# posts to server reads incoming json into download message list
def post_download_message_json(self):
Downloaded_messages_json = (requests.post(_SETTINGS.url, data=json.dumps(_SETTINGS.payload))).text
Downloaded_messages_json = json.loads(Downloaded_messages_json)
settings = json.dumps(Downloaded_messages_json["settings"])
i = len(Downloaded_messages_json["data"])
with open("config.json","w") as myfile:
myfile.write(settings)
myfile.close()
lookup_marker = 0
for x in range (i-1, 0, -1):
if int(Downloaded_messages_json["data"][x]["msg_id"]) > self.latest_id:
Downloaded_messages_json["data"][x].update({
'ts': str(json.dumps(datetime.datetime.now(),
default=self.get_iso_format))
})
m = {
'msg_id' : Downloaded_messages_json["data"][x]["msg_id"],
'audio_file' : "",
'download_link' : Downloaded_messages_json["data"][x]["audio_file"],
'path' : self.OUTPUT_DIR,
'color' : Downloaded_messages_json["data"][x]["color"],
'ts' : Downloaded_messages_json["data"][x]["ts"],
'played' : 0,
}
self.new_messages = True
self.DownloadMessageList.append(m)
# downloads audio for DownloadMessageList
def download_audio(self):
os.chdir(self.OUTPUT_DIR)
i = len(self.DownloadMessageList)
for x in range (0,i):
message = self.DownloadMessageList[0]
while not self.is_okay_to_work():
time.sleep(10)
local_file_name = wget.download(message['download_link'])
message['audio_file'] = local_file_name
self.save_new_message(message)
self.DownloadMessageList.remove(message)
# checks to see if messages are being played
# if no, then saves messages that has just been downloaded
def save_new_message(self, message):
while not self.is_okay_to_work():
time.sleep(10)
convert.convert(self.OUTPUT_DIR)
self.MessageList.append(message)
if int(message['msg_id']) > self.latest_id:
self.latest_id = int(message['msg_id'])
self.write_message_data()
def write_message_data(self):
os.chdir(self.OUTPUT_DIR)
while not self.is_okay_to_work:
time.sleep(10)
with open("stored_message_data.json","w") as output_file:
output_string = json.dumps(self.MessageList)
output_file.write(output_string)
output_file.close()
self.set_latest_msg_id_json()
# helper methods
# returns iso format time stamp
def get_iso_format(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
raise TypeError, 'Object of type %s with value of %s is not JSON serializable' \
% (type(obj), repr(obj))
def alert_new_messages(self):
os.chdir(self.OUTPUT_DIR)
with open('new_message_status.json',"w") as f:
json.dump({'new_info':1}, f)
f.close()
def get_status_json(self):
os.chdir(self.OUTPUT_DIR)
try:
with open('player_status.json') as f:
data = json.load(f)
f.close()
return data['status']
except:
with open('player_status.json',"w") as f:
json.dump({'status':0}, f)
f.close()
return 0
def get_latest_msg_id_json(self):
os.chdir(self.OUTPUT_DIR)
try:
with open('latest_id_status.json') as f:
data = json.load(f)
f.close()
return data['latest_msg_id']
except:
with open('latest_id_status.json',"w") as f:
json.dump({'latest_msg_id':0}, f)
f.close()
return 0
def set_latest_msg_id_json(self):
with open('latest_id_status.json',"w") as f:
json.dump({'latest_msg_id':self.latest_id}, f)
f.close()
def is_okay_to_work(self):
os.chdir(self.OUTPUT_DIR)
if self.get_status_json() == 0:
return True
return False
|
[
"you@example.com"
] |
you@example.com
|
83ffb508c04c0a8d5c6bd89beb8e65a071ecc567
|
5d0edf31b17c5375faf6126c1a7be8e79bfe2ab8
|
/buildout-cache/eggs/sc.photogallery-1.0b1-py2.7.egg/sc/photogallery/browser/view.py
|
105b30e6969b968feec9c890f33eeb3a1d0d8455
|
[] |
no_license
|
renansfs/Plone_SP
|
27cba32ebd9fc03dae3941ec23cf1bf0a7b6667a
|
8a7bdbdb98c3f9fc1073c6061cd2d3a0ec80caf5
|
refs/heads/master
| 2021-01-15T15:32:43.138965
| 2016-08-24T15:30:19
| 2016-08-24T15:30:19
| 65,313,812
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,418
|
py
|
# -*- coding: utf-8 -*-
from plone import api
from plone.dexterity.browser.view import DefaultView
from plone.memoize import forever
from plone.memoize.instance import memoizedproperty
from sc.photogallery.config import HAS_ZIPEXPORT
from sc.photogallery.interfaces import IPhotoGallerySettings
from sc.photogallery.utils import last_modified
from sc.photogallery.utils import PhotoGalleryMixin
from zope.component import getMultiAdapter
import os
if HAS_ZIPEXPORT:
from ftw.zipexport.generation import ZipGenerator
from ftw.zipexport.interfaces import IZipRepresentation
class View(DefaultView, PhotoGalleryMixin):
"""Slideshow view for Photo Gallery content type."""
def id(self):
return id(self)
@memoizedproperty
def results(self):
return self.context.listFolderContents()
@property
def is_empty(self):
return len(self.results) == 0
def image(self, obj, scale='large'):
"""Return an image scale if the item has an image field.
:param obj: [required]
:type obj: content type object
:param scale: the scale to be used
:type scale: string
"""
scales = obj.restrictedTraverse('@@images')
return scales.scale('image', scale)
def localized_time(self, obj, long_format=False):
"""Return the object time in a user-friendly way.
:param item: [required]
:type item: content type object
:param long_format: show long date format if True
:type scale: string
"""
return api.portal.get_localized_time(obj.Date(), long_format)
@property
def can_download(self):
"""Check if original images can be explicitly downloaded, that is,
if downloading is enabled globally and the current object allows it.
"""
record = IPhotoGallerySettings.__identifier__ + '.enable_download'
enabled_globally = api.portal.get_registry_record(record)
allow_download = self.context.allow_download
return enabled_globally and allow_download
def img_size(self, item):
return '{0:.1f} MB'.format(item.size() / float(1024 * 1024))
@property
def can_zipexport(self):
"""Check if original images can be downloaded as a ZIP file,
that is, if ftw.zipexport is installed and downloading is
allowed in the current object.
"""
return HAS_ZIPEXPORT and self.can_download
@property
def last_modified(self):
return last_modified(self.context)
def zip_url(self):
base_url = self.context.absolute_url()
url = '{0}/@@zip/{1}/{2}.zip'.format(
base_url, str(self.last_modified), self.context.getId())
return url
@forever.memoize
def _zip_size(self, last_modified=None):
if not HAS_ZIPEXPORT:
return '{0:.1f} MB'.format(0)
with ZipGenerator() as generator:
for obj in [self.context, ]:
repre = getMultiAdapter(
(obj, self.request), interface=IZipRepresentation)
for path, pointer in repre.get_files():
generator.add_file(path, pointer)
zip_file = generator.generate()
size = os.stat(zip_file.name).st_size
return '{0:.1f} MB'.format(size / float(1024 * 1024))
def zip_size(self):
return self._zip_size(self.last_modified)
|
[
"renansfs@gmail.com"
] |
renansfs@gmail.com
|
ca1535c9186ea6b4058c35374d2cd992af6df474
|
a6106cedc42dcab94ccc4ee6d681372d2246ce5e
|
/python/활용자료/예제/07/ex7-7.py
|
12dd6228f533058e5632e76aac1e1c9cac3cc731
|
[] |
no_license
|
leemyoungwoo/pybasic
|
a5a4b68d6b3ddd6f07ff84dc8df76da02650196f
|
481075f15613c5d8add9b8c4d523282510d146d2
|
refs/heads/master
| 2022-10-08T19:57:26.073431
| 2020-06-15T06:50:02
| 2020-06-15T06:50:02
| 267,502,565
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
def circle_area(r) :
area = r * r * 3.14
return area
radius = int(input('원의 반지름을 입력하세요 : '))
result = circle_area(radius)
print('반지름 : %d, 원의 면적 : %.2f' % (radius, result))
radius = int(input('원의 반지름을 입력하세요 : '))
result = circle_area(radius)
print('반지름 : %d, 원의 면적 : %.2f' % (radius, result))
|
[
"mwlee2587@gmail.com"
] |
mwlee2587@gmail.com
|
bf4ee2d02a325b438c10d2b86a54a4028c965b9b
|
3a6bf7337126c8b1883e76cf1f46cec0886f1447
|
/rssdl/rss.py
|
0280fcd0436b89dd40e7f313f365f2b62e554d0f
|
[
"Apache-2.0"
] |
permissive
|
egustafson/rssdl
|
fc4265edd9138a54005b98bdfc1ea5dfb25707d5
|
2b42d8aa4a0d03d31629d8446e7336c6c1348e58
|
refs/heads/master
| 2020-12-24T06:57:21.265487
| 2017-09-15T20:59:58
| 2017-09-15T20:59:58
| 58,835,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
""" RSS DOM for RSSDL
"""
import feedparser
class Feed(object):
def __init__(self, href):
self._href = href
self._d = None
def result(self):
return self._d
def parse(self):
self._d = feedparser.parse(self._href)
return self._d.status if 'status' in self._d else 0
def data(self):
return self._d
## Local Variables:
## mode: python
## End:
|
[
"eg-git@elfwerks.org"
] |
eg-git@elfwerks.org
|
b6719129deb3753fda7d1da2bf054ef2b0b7086b
|
bb4e132c5978a1edc2ef4fb78d1bb5a793809408
|
/dral_text/migrations/0005_auto_20180421_2332.py
|
2011d67aa3e519a34a52ebb3021d281bc28eafa0
|
[
"MIT"
] |
permissive
|
n-romanova/dral-django
|
7335e581f1fffe0e2d42614678010ead5c9202f3
|
4af92a46e207cc8a427d2f8eafe688c61a73d39e
|
refs/heads/master
| 2020-08-31T03:11:37.199516
| 2019-10-03T17:19:05
| 2019-10-03T17:19:05
| 218,569,974
| 0
| 0
| null | 2019-10-30T16:15:47
| 2019-10-30T16:15:47
| null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
# Generated by Django 2.0 on 2018-04-21 22:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dral_text', '0004_auto_20180421_2231'),
]
operations = [
migrations.AddField(
model_name='occurence',
name='paraphrase',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='occurence',
name='replace',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='occurence',
name='zero',
field=models.BooleanField(default=False),
),
]
|
[
"geoffroy.noel@kcl.ac.uk"
] |
geoffroy.noel@kcl.ac.uk
|
803f3401202b20729ba63a9968b76cfb69eb1b03
|
c558d1da1aedf377e6cb6cf66c5136cfb7c32167
|
/python-new-trunk/sfapi2/sflib/runWithAnalysis.py
|
978d569bb74522ab578050a8fad567a9a6a3a256
|
[
"CC0-1.0"
] |
permissive
|
raychorn/svn_molten-magma
|
46a8da015844b52fd2fc777225f11b1891b0000a
|
8aa2ff2340707eecae6514943e86f5afba9cd54a
|
refs/heads/main
| 2022-12-26T15:45:24.851522
| 2020-10-15T16:52:04
| 2020-10-15T16:52:04
| 304,358,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,864
|
py
|
import os, sys
import traceback
from vyperlogix import misc
from vyperlogix.misc import ioTimeAnalysis
import types
import SfStats
sf_stats = SfStats.SfStats()
def dummy():
pass
def init_AnalysisDataPoint(name):
ioTimeAnalysis.initIOTime(name)
def begin_AnalysisDataPoint(name):
ioTimeAnalysis.ioBeginTime(name)
def end_AnalysisDataPoint(name):
ioTimeAnalysis.ioEndTime(name)
def count_query():
sf_stats.count_query()
def runWithAnalysis(func=dummy,args=[],_ioElapsedTime=dummy):
caller = misc.callersName()
ioTimeAnalysis.initIOTime('%s::%s' % (__name__,caller))
ioTimeAnalysis.ioBeginTime('%s::%s' % (__name__,caller))
val = None
try:
if (len(args) == 0):
val = func()
else:
val = func(args)
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
print >>sys.stderr, '(%s) Reason: %s' % (misc.funcName(),info_string)
ioTimeAnalysis.ioEndTime('%s::%s' % (__name__,caller))
ioTimeAnalysis.ioTimeAnalysisReport()
_et = 0
_key_list = [k for k in ioTimeAnalysis._ioTime.keys() if (k.find('SOQL') > -1)]
for _key in _key_list:
_et += (0 if (len(_key) == 0) else ioTimeAnalysis._ioTime[_key][0])
if (_et > 0):
_soql_per_sec = sf_stats.query_count / _et
if (_soql_per_sec > 0):
_ms_per_soql = 1000 / _soql_per_sec
else:
if (sf_stats.query_count == 0):
print >>sys.stderr, '(%s) 1.0 Cannot correctly report ms per SOQL because SOQL per Second reported 0 and we cannot divide Zero by some number at this time; recommend using the functions that count queries from this module.' % (misc.funcName())
elif ():
print >>sys.stderr, '(%s) 1.0 Cannot correctly report ms per SOQL because SOQL per Second reported 0 and we cannot divide by Zero at this time.' % (misc.funcName())
_ms_per_soql = -1
else:
print >>sys.stderr, '(%s) 1.0 Cannot correctly report ms per SOQL because SOQL per Second because there is no reported elapsed time from SOQL activities.' % (misc.funcName())
try:
v_ioElapsedTime = float(ioTimeAnalysis._ioElapsedTime)
if (v_ioElapsedTime > 0):
soql_per_sec = sf_stats.query_count / v_ioElapsedTime
if (soql_per_sec > 0):
ms_per_soql = 1000 / soql_per_sec
else:
print >>sys.stderr, '(%s) 2.0 Cannot correctly report ms per SOQL because SOQL per Second reported 0 and we cannot divide by Zero at this time.' % (misc.funcName())
ms_per_soql = -1
t_analysis_1 = '%-10.2f' % soql_per_sec
t_analysis_2 = '%-10.4f' % ms_per_soql
print >>sys.stdout, '(Apparent) SOQL per second = %s or %s ms per SOQL.' % (t_analysis_1.strip(),t_analysis_2.strip())
if (_et > 0):
_t_analysis_1 = '%-10.2f' % _soql_per_sec
_t_analysis_2 = '%-10.4f' % _ms_per_soql
print >>sys.stdout, '(Actual) SOQL per second = %s or %s ms per SOQL.' % (_t_analysis_1.strip(),_t_analysis_2.strip())
else:
print >>sys.stderr, 'Unable to perform Actual SOQL per second analysis because there is no reported elapsed time from SOQL activities.'
else:
print >>sys.stderr, 'Unable to perform Actual SOQL per second analysis because _ioElapsedTime is %4.2f.' % (v_ioElapsedTime)
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
print >>sys.stderr, '(%s) Reason: %s' % (misc.funcName(),info_string)
print >>sys.stdout, 'SOQL Count=%d' % sf_stats.query_count
return val
|
[
"raychorn@gmail.com"
] |
raychorn@gmail.com
|
9301a8e19c39fa597a374ec83ca5ac9308d25d56
|
e9032e64138d7b9dd90a330dfe4588e2c83f6667
|
/google/cloud/compute_v1/services/url_maps/pagers.py
|
1a5d42fc43ca21958c622ecbbf65987afbee0aa4
|
[
"Apache-2.0"
] |
permissive
|
Ctfbuster/python-compute
|
6cff2418969009794c3fadadc4c45e20d7b40509
|
7a9e8324e08c46a93050908760b2b5aca054a863
|
refs/heads/main
| 2023-08-26T12:37:52.363526
| 2021-10-04T15:34:37
| 2021-10-04T15:34:37
| 412,884,620
| 0
| 0
|
Apache-2.0
| 2021-10-02T18:49:05
| 2021-10-02T18:49:03
| null |
UTF-8
|
Python
| false
| false
| 5,578
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
"""A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.UrlMapsAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.UrlMapsAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.UrlMapsAggregatedList],
request: compute.AggregatedListUrlMapsRequest,
response: compute.UrlMapsAggregatedList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListUrlMapsRequest):
The initial request object.
response (google.cloud.compute_v1.types.UrlMapsAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.AggregatedListUrlMapsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.UrlMapsAggregatedList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[Tuple[str, compute.UrlMapsScopedList]]:
for page in self.pages:
yield from page.items.items()
def get(self, key: str) -> Optional[compute.UrlMapsScopedList]:
return self._response.items.get(key)
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.UrlMapList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.UrlMapList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.UrlMapList],
request: compute.ListUrlMapsRequest,
response: compute.UrlMapList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListUrlMapsRequest):
The initial request object.
response (google.cloud.compute_v1.types.UrlMapList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListUrlMapsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.UrlMapList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[compute.UrlMap]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
[
"noreply@github.com"
] |
Ctfbuster.noreply@github.com
|
e40a9f4648944ecbb580038b5267b736e6a1cc7a
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/TmasgxCm6iz3gTGHk_18.py
|
f0c40f2ac8ab47faef818d3f66b85e4ebaed9fb1
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
"""
Write a function that returns the **length of the shortest contiguous
sublist** whose sum of all elements **strictly exceeds** `n`.
### Examples
min_length([5, 8, 2, -1, 3, 4], 9) ➞ 2
min_length([3, -1, 4, -2, -7, 2], 4) ➞ 3
# Shortest sublist whose sum exceeds 4 is: [3, -1, 4]
min_length([1, 0, 0, 0, 1], 1) ➞ 5
min_length([0, 1, 1, 0], 2) ➞ -1
### Notes
* The sublist should be composed of **contiguous elements** from the original list.
* If no such sublist exists, return `-1`.
"""
def min_length(lst, n):
for i in range(1, len(lst) + 1):
v = [lst[j:j + i] for j in range(0, len(lst) - i + 1)]
for k in v:
if sum(k) > n:
return i
return -1
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
349da3c46b25c597a4fea4b6ffed199281d111b3
|
e61e664d95af3b93150cda5b92695be6551d2a7c
|
/vega/metrics/tensorflow/__init__.py
|
5eb861df8a3c94200471f2efbde2cb138194a48e
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/vega
|
44aaf8bb28b45f707ed6cd4e871ba70fc0c04846
|
12e37a1991eb6771a2999fe0a46ddda920c47948
|
refs/heads/master
| 2023-09-01T20:16:28.746745
| 2023-02-15T09:36:59
| 2023-02-15T09:36:59
| 273,667,533
| 850
| 184
|
NOASSERTION
| 2023-02-15T09:37:01
| 2020-06-20T08:20:06
|
Python
|
UTF-8
|
Python
| false
| false
| 449
|
py
|
from vega.common.class_factory import ClassFactory
from .metrics import Metrics
ClassFactory.lazy_register("vega.metrics.tensorflow", {
"segmentation_metric": ["trainer.metric:IoUMetric"],
"classifier_metric": ["trainer.metric:accuracy"],
"sr_metric": ["trainer.metric:PSNR", "trainer.metric:SSIM"],
"forecast": ["trainer.metric:MSE", "trainer.metric:RMSE"],
"r2score": ["trainer.metric:r2score", "trainer.metric:R2Score"],
})
|
[
"zhangjiajin@huawei.com"
] |
zhangjiajin@huawei.com
|
d9d15c7369252080d67b4a3db18eda581179e3b9
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/contest/weekly-contest-266/5919.0_Vowels_of_All_Substrings.py
|
836bcb1c21e6f95554a3972b51237f0616b166fa
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
'''
41 / 51 个通过测试用例
状态:超出时间限制
brute force
T: O(N^2)
S: O(N)
'''
class Solution:
def countVowels(self, word: str) -> int:
N = len(word)
pre = [0] * (N + 1)
for i, ch in enumerate(word):
if ch in 'aeiou':
pre[i + 1] = pre[i] + 1
else:
pre[i + 1] = pre[i]
ans = 0
for i in range(1, len(word) + 1):
for j in range(i):
ans += pre[i] - pre[j]
return ans
'''
"aba"
0112
'''
'''
前缀和+前缀和
这是从双层暴力优化过来的
通过
296 ms 23.8 MB Python3 2021/11/07 19:48
T: O(3N)
S: O(2N)
ref:
https://leetcode-cn.com/problems/vowels-of-all-substrings/solution/cqian-zhui-he-qian-zhui-he-by-answerer-360n/
'''
class Solution:
def countVowels(self, word: str) -> int:
N = len(word)
pre = [0] * (N + 1)
for i, ch in enumerate(word):
if ch in 'aeiou':
pre[i + 1] = pre[i] + 1
else:
pre[i + 1] = pre[i]
# presum of presum
prepre = [0] * (N + 1)
for i in range(1, N + 1):
prepre[i] = prepre[i - 1] + pre[i]
ans = 0
for i in range(N):
ans += pre[i + 1] * (i + 1) - prepre[i]
return ans
'''
乘法原理
T: O(N)
S: O(1)
执行用时:92 ms, 在所有 Python3 提交中击败了100.00% 的用户
内存消耗:15.2 MB, 在所有 Python3 提交中击败了100.00% 的用户
通过测试用例:51 / 51
'''
class Solution:
def countVowels(self, word: str) -> int:
ans, N = 0, len(word)
for i, ch in enumerate(word):
if ch in 'aeiou':
ans += (i + 1) * (N - i)
return ans
|
[
"838255715@qq.com"
] |
838255715@qq.com
|
958e5eceba3a97c5f73ae5f97c2f2d507c3228c4
|
8f8498bb6f56b19d45a1989c8113a077348c0a02
|
/백준/최소신장트리/행성 터널 - 프림.py
|
1b9cd115b4de9658e77fc0d211d97f40b0242f95
|
[] |
no_license
|
gjtjdtn201/practice
|
a09b437c892b0b601e156c09cb1f053b52fab11b
|
ea45582b2773616b2b8f350b927559210009d89f
|
refs/heads/master
| 2021-01-01T13:29:46.640740
| 2020-11-28T00:55:37
| 2020-11-28T00:55:37
| 239,299,485
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
import sys
sys.stdin = open('행성 터널.txt')
import sys
input = sys.stdin.readline
from heapq import heappush, heappop
N = int(input())
star = []
for i in range(N):
x, y, z = map(int, input().split())
star.append((x, y, z, i))
edges = [[] for _ in range(N)]
for i in range(3):
star.sort(key=lambda x: x[i])
for j in range(N-1):
n1, n2 = star[j][3], star[j+1][3]
cost = abs(star[j][i]-star[j+1][i])
edges[n1].append((cost, n2))
edges[n2].append((cost, n1))
mst = [False]*N
ans = 0
q = []
heappush(q, (0, 0))
while q:
cost, node = heappop(q)
if mst[node]:
continue
ans += cost
mst[node] = True
for nxt_cost, nxt in edges[node]:
if mst[nxt]:
continue
heappush(q, (nxt_cost, nxt))
print(ans)
|
[
"gjtjdtn201@naver.com"
] |
gjtjdtn201@naver.com
|
05bbe819c737091fa9d1aff4a383a5ca8734dd1c
|
461cf2fd99330558ec96bf551cb1703e627868a0
|
/get_pages.py
|
b050961df0e6de3f1240f8bc48e06c5237fb092d
|
[] |
no_license
|
abelsonlive/bcni-pra
|
408f72ba369ca164c5efb4442ebd2eaeb2c8dd78
|
fa51ae45382c45f15fe861060d6e90cc00c27590
|
refs/heads/master
| 2021-01-20T11:50:13.432486
| 2013-04-29T14:45:04
| 2013-04-29T14:45:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
import selenium
from selenium import webdriver
import time
URL = "http://secure.phila.gov/paplpublicweb/GridView.aspx"
b = webdriver.Firefox()
b.get(URL)
for i in range(2, 806):
print i
text = b.page_source.encode('utf-8')
fp = "raw_pages/page%s.txt" % (i-1)
print "writing", fp, "to file"
with open(fp, "w") as text_file:
text_file.write(text)
try:
next = b.find_element_by_xpath("//span[contains(text(),'%s')]" % (i))
except selenium.common.exceptions.NoSuchElementException or selenium.common.exceptions.StaleElementReferenceException:
print "ERROR ERROR!!!"
i = i - 1
print "trying again"
next.click()
time.sleep(2)
b.close()
|
[
"brianabelson@gmail.com"
] |
brianabelson@gmail.com
|
a73c7308d19a2723bbdb73a89ceca2790e0ddbea
|
3a10cda6dbdeee36b24591ada2c551ff2f179d19
|
/app/models/hour.py
|
1a14852b19fe5a1765504a13f12ccb678185c99c
|
[] |
no_license
|
ttecles/weather_backend
|
a0d0c6317c3fde6c4ac6df17c7c9a5ea59299302
|
b4b2886a3f0a2b6b502bd38d0b98f017b01ef6b0
|
refs/heads/master
| 2023-02-19T12:31:52.295041
| 2021-01-26T08:56:34
| 2021-01-26T08:56:34
| 330,950,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
from app import db
class Hour(db.Model):
__tablename__ = 'Hour'
locality_id = db.Column(db.Integer, db.ForeignKey('Locality.id'), primary_key=True, nullable=False)
date = db.Column(db.Date(), primary_key=True) # "2021-1-15"
hour_data = db.Column(db.Time(), primary_key=True) # "13:00",
temperature = db.Column(db.Integer) # -1,
icon = db.Column(db.String(10)) # "6",
text = db.Column(db.String(80)) # "Mostly cloudy",
humidity = db.Column(db.Integer) # 89,
wind = db.Column(db.Integer) # 4,
wind_direction = db.Column(db.String(30)) # "Northwest",
icon_wind = db.Column(db.String(10)) # "NO",
pressure = db.Column(db.Integer) # 1016,
locality = db.relationship("Locality", backref="hour_forecast")
|
[
"joan.prat@knowtrade.eu"
] |
joan.prat@knowtrade.eu
|
8517ce3f417f877036d4b1f5d9af879c97c0a703
|
e02506da0c661c8241fed00efdd0d6b2f8b147df
|
/textattack/attack_recipes/seq2sick_cheng_2018_blackbox.py
|
8af6d15138de6bc314511c851970b1c226990123
|
[
"MIT"
] |
permissive
|
SatoshiRobatoFujimoto/TextAttack
|
2592a828f128fd8bf0b8ce5578e9488df5b2ac97
|
a809a9bddddff9f41750949e26edde26c8af6cfa
|
refs/heads/master
| 2022-07-11T02:10:24.536157
| 2020-05-14T13:29:44
| 2020-05-14T13:29:44
| 263,941,825
| 1
| 0
|
MIT
| 2020-05-14T14:43:47
| 2020-05-14T14:43:46
| null |
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
"""
Cheng, Minhao, et al.
Seq2Sick: Evaluating the Robustness of Sequence-to-Sequence Models with
Adversarial Examples
ArXiv, abs/1803.01128.
This is a greedy re-implementation of the seq2sick attack method. It does
not use gradient descent.
"""
from textattack.constraints.overlap import LevenshteinEditDistance
from textattack.goal_functions import NonOverlappingOutput
from textattack.search_methods import GreedyWordSwapWIR
from textattack.transformations import WordSwapEmbedding
def Seq2SickCheng2018BlackBox(model, goal_function='non_overlapping'):
#
# Goal is non-overlapping output.
#
goal_function = NonOverlappingOutput(model)
# @TODO implement transformation / search method just like they do in
# seq2sick.
transformation = WordSwapEmbedding(max_candidates=50)
#
# In these experiments, we hold the maximum difference
# on edit distance (ϵ) to a constant 30 for each sample.
#
#
# Greedily swap words with "Word Importance Ranking".
#
attack = GreedyWordSwapWIR(goal_function, transformation=transformation,
constraints=[], max_depth=10)
return attack
|
[
"jxmorris12@gmail.com"
] |
jxmorris12@gmail.com
|
f9791b2b58c0ed0961046f7b8a0dd4bb73d8450a
|
5410700e83210d003f1ffbdb75499062008df0d6
|
/leetcode/tree2Str.py
|
61ae648b31c803481fe3db7769a6109de4b7ac74
|
[] |
no_license
|
lilyandcy/python3
|
81182c35ab8b61fb86f67f7796e057936adf3ab7
|
11ef4ace7aa1f875491163d036935dd76d8b89e0
|
refs/heads/master
| 2021-06-14T18:41:42.089534
| 2019-10-22T00:24:30
| 2019-10-22T00:24:30
| 144,527,289
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
class Solution:
def tree2str(self, t):
"""
:type t: TreeNode
:rtype: str
"""
if t == None:
return ""
if t.left == None and t.right == None:
return str(t.val)
elif t.left == None:
return str(t.val) + "()" + "(" + self.tree2str(t.right) + ")"
elif t.right == None:
return str(t.val) + "(" + self.tree2str(t.left) + ")"
else:
return str(t.val) + "(" + self.tree2str(t.left) + ")" + "(" + self.tree2str(t.right) + ")"
|
[
"myyan_yan@msn.com"
] |
myyan_yan@msn.com
|
7cc2c7507b75fcd535a7e8e9c9b0457f48bd6414
|
e0b6f5bd451aa8af3273fbc948799637681342e1
|
/scripts/wm_representation/functions/IEM/Controls/trial_by_trial/trainT_testT_wm3_shuffles_refs.py
|
99a1066c8955feb220ec3514ad753bea566ad476
|
[] |
no_license
|
davidbestue/encoding
|
6b304f6e7429f94f97bd562c7544d1fdccf7bdc1
|
c27319aa3bb652b3bfc6b7340044c0fda057bc62
|
refs/heads/master
| 2022-05-05T23:41:42.419252
| 2022-04-27T08:34:52
| 2022-04-27T08:34:52
| 144,248,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,890
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 18:24:32 2019
@author: David Bestue
"""
#######
####### In this analysis:
####### I am doing the reconstruction training in the delay period and testing in each trial. No CV and No Shuffles
#######
############# Add to sys path the path where the tools folder is
import sys, os
#path_tools = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) ### same directory or one back options
path_tools = os.path.abspath(os.path.join(os.getcwd(), os.pardir, os.pardir)) ### same directory or one back options
sys.path.insert(1, path_tools)
from tools import *
############# Namefiles for the savings.
path_save_reconst_shuffs ='/home/david/Desktop/Reconstructions/IEM/recs_shuffs_references_IEM_trainT_testT_wm3.npy'
############# Testing options
decoding_thing = 'T_alone' #'dist_alone' 'T_alone'
############# Training options
training_item = 'T_alone' #'dist_alone' 'T_alone'
cond_t = '1_7' #'1_7' '2_7'
Distance_to_use = 'mix' #'close' 'far'
training_time= 'delay' #'stim_p' 'delay' 'respo'
tr_st=4
tr_end=6
############# Elements for the loop
Conditions=['1_0.2', '1_7', '2_0.2', '2_7']
Subjects=['d001', 'n001', 'b001', 'r001', 's001', 'l001']
brain_regions = ['visual','ips', 'pfc', 'broca']
ref_angle=180
Reconstructions_ = [] ## subjects x brain regiond --> ntrials x 16 x 720 matrix
############# Analysis
#############
for Subject in Subjects:
for Brain_region in brain_regions:
enc_fmri_paths, enc_beh_paths, wm_fmri_paths, wm_beh_paths, masks = data_to_use( Subject, 'together', Brain_region)
activity, behaviour = process_wm_task(wm_fmri_paths, masks, wm_beh_paths, nscans_wm=nscans_wm)
behaviour['Condition'] = behaviour['Condition'].replace(['1.0_0.2', '1.0_7.0', '2.0_0.2','2.0_7.0' ], ['1_0.2', '1_7', '2_0.2', '2_7'])
behaviour['brain_region'] = Brain_region
###
###
print(Subject, Brain_region)
Reconstructed_trials=[] ## ntrials x 16 x 720 matrix
###
###
#angx = behaviour[decoding_thing].values
#angles_shuffled = random.sample( list(angx), len(angx) )
###
###
for trial in range(len(behaviour)):
activity_trial = activity[trial,:,:]
beh_trial = behaviour.iloc[trial,:]
session_trial = beh_trial.session_run
###
### Training
###
if cond_t == '1_7':
boolean_trials_training = np.array(behaviour['delay1']==7) * np.array(behaviour['order']==1) * np.array(behaviour['session_run']!=session_trial)
elif cond_t == '2_7':
boolean_trials_training = np.array(behaviour['delay1']==7) * np.array(behaviour['order']==2) * np.array(behaviour['session_run']!=session_trial)
#
activity_train_model = activity[boolean_trials_training, :, :]
activity_train_model_TRs = np.mean(activity_train_model[:, tr_st:tr_end, :], axis=1)
behavior_train_model = behaviour[boolean_trials_training]
training_angles = behavior_train_model[['T', 'NT1', 'NT2']].values
#
Weights_matrix, Interc = Weights_matrix_LM_3items(activity_train_model_TRs, training_angles)
Weights_matrix_t = Weights_matrix.transpose()
###
### Testing
###
Reconstructed_TR = [] ## 16 x 720 matrix
#
for TR_ in range(nscans_wm):
activity_TR = activity_trial[TR_, :]
angle_trial = random.choice([0,90,180,270])
Inverted_encoding_model = np.dot( np.dot ( np.linalg.pinv( np.dot(Weights_matrix_t, Weights_matrix ) ), Weights_matrix_t), activity_TR)
#Inverted_encoding_model_pos = Pos_IEM2(Inverted_encoding_model)
IEM_hd = ch2vrep3(Inverted_encoding_model) #36 to 720
to_roll = int( (ref_angle - angle_trial)*(len(IEM_hd)/360) ) ## degrees to roll
IEM_hd_aligned=np.roll(IEM_hd, to_roll) ## roll this degree ##vector of 720
Reconstructed_TR.append(IEM_hd_aligned)
##
resconstr_trial = np.array(Reconstructed_TR)
Reconstructed_trials.append(resconstr_trial)
##
##
Reconstructions_.append(Reconstructed_trials)
########
final_rec = np.array(Reconstructions_)
np.save(path_save_reconst_shuffs, final_rec)
############# Options de training times, the TRs used for the training will be different
# training_time=='delay':
# tr_st=4
# tr_end=6
# training_time=='stim_p':
# tr_st=3
# tr_end=4
# training_time=='delay':
# tr_st=4
# tr_end=6
# training_time=='respo':
# if decoding_thing=='Target':
# tr_st=8
# tr_end=9
# elif decoding_thing=='Distractor':
# tr_st=11
# tr_end=12
|
[
"davidsanchezbestue@hotmail.com"
] |
davidsanchezbestue@hotmail.com
|
4f714d6172a078dceda6b04a5faec6a75aeec621
|
dc63e528012fb2f3e15b73e05c924236760d01b1
|
/cloudify_azure/resources/compute/virtualmachine/virtualmachine_utils.py
|
4a67d65a4df9ff6e52f6dd881668444d4f9e6848
|
[
"Apache-2.0"
] |
permissive
|
cloudify-cosmo/cloudify-azure-plugin
|
515b6285b63c2a01ae4d666957541a1f08472410
|
361c48bc4abe38cf57354e8d36839137462ad345
|
refs/heads/master
| 2023-08-21T14:23:06.673284
| 2023-07-30T10:44:39
| 2023-07-30T10:44:39
| 36,666,947
| 4
| 14
|
Apache-2.0
| 2023-07-30T10:44:41
| 2015-06-01T14:42:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,521
|
py
|
# #######
# Copyright (c) 2016-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify import ctx
def check_if_configuration_changed(ctx, update_payload, current_vm):
for prop in ['location', 'tags', 'plan', 'availability_set',
'eviction_policy', 'billing_profile', 'priority',
'hardware_profile']:
update_property_value = update_payload.get(prop)
current_vm_property_value = current_vm.get(prop)
if update_property_value and ordered(
update_property_value) != ordered(current_vm_property_value):
ctx.logger.info("{prop} changed.".format(prop=prop))
ctx.logger.info("update payload: {content}.".format(
content=update_property_value))
ctx.logger.info("current configuration: {content}.".format(
content=current_vm_property_value))
return True
for prop in ['os_profile', 'storage_profile', 'network_profile']:
if prop == 'network_profile' and update_payload.get(prop):
update_property_value = update_payload.get(prop).as_dict()
else:
update_property_value = update_payload.get(prop, {})
current_vm_property_value = current_vm.get(prop, {})
if diff_dictionaries(update_property_value, current_vm_property_value):
ctx.logger.info("{prop} changed.".format(prop=prop))
return True
return False
def diff_dictionaries(update_dict, current_conf_dict):
"""
Returns True if update_dict has changes in a key that doesn't appear in
current_conf_dict.
current_conf_dict can have additional keys and its not considered as a
diff.
"""
for key in update_dict:
if isinstance(update_dict.get(key), dict):
res = diff_dictionaries(update_dict.get(key),
current_conf_dict.get(key, {}))
if res:
return True
elif ordered(update_dict.get(key)) != ordered(
current_conf_dict.get(key)):
ctx.logger.info(
'Changes found in diff_dictionaries: key={key}\n'.format(
key=key))
ctx.logger.info(
'update_dict: {}'.format(ordered(update_dict.get(key))))
ctx.logger.info(
'current_conf_dict: {}'.format(ordered(
current_conf_dict.get(key))))
return True
return False
def ordered(obj):
"""
This function will recursively sort any lists it finds
(and convert dictionaries to lists of (key, value) pairs so that they're
orderable)
"""
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
if isinstance(obj, str):
return obj.lower()
if isinstance(obj, (int, float)):
return str(obj)
else:
return obj
|
[
"noreply@github.com"
] |
cloudify-cosmo.noreply@github.com
|
8eb20a63cf9ae7debe25c9b008d788862e5ee7da
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/78/usersdata/171/41777/submittedfiles/divisores.py
|
701949f8b9cb8bf36079078eda939d27b7fe7166
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
# -*- coding: utf-8 -*-
import math
n=int(input('digite n:'))
a=int(input('digite a:'))
b=int(input('digite b:'))
d=a
e=b
f=a*b
for i in range(1,n+1,1):
d=a
e=b
f=a*b
print(f)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
ecf2e202398d9c58d9d5bcb9846dbebaf58a02aa
|
0ccab2965458454d6a4802b47d33310e43c10d8f
|
/classes/student.py
|
c9e7d33683deae9b858dc5fb04d7034fd00d39ca
|
[] |
no_license
|
jazib-mahmood-attainu/Ambedkar_Batch
|
11e66125647b3b348d4567862f8fc20a3457b2f0
|
c99be9a401b8d00f6ca47398f48e90ead98f4898
|
refs/heads/main
| 2023-08-01T13:13:43.357769
| 2021-09-25T03:54:27
| 2021-09-25T03:54:27
| 390,405,238
| 16
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
class Student:
def __init__(self,roll,name,age):
self.roll = roll
self.name = name
self.age = age
def reads(self):
print(self.name,"is reading")
preeti = Student(10,"Preeti",24)
print(preeti.name)
print(preeti.roll)
print(preeti.age)
preeti.reads()
print("**********")
sapna = Student(11,"Sapna",19)
print(sapna.name)
print(sapna.roll)
print(sapna.age)
sapna.reads()
|
[
"jazib.prof@gmail.com"
] |
jazib.prof@gmail.com
|
e2230aed752c8a73948aecc725580d22f370446b
|
1e9fed88ce4a623970f7e53143753a170d4bdcda
|
/aat/tests/test_strategy.py
|
37e4f8123735a0e70663ec060bc26eda308854e1
|
[
"Apache-2.0"
] |
permissive
|
krusty45/aat
|
06dedbfe0abaf76c4a584ad441dc7badd093a939
|
a14b652f7ff90761d0e1198a85d8fc02efeff0eb
|
refs/heads/master
| 2020-06-24T09:34:30.981326
| 2019-07-09T19:34:22
| 2019-07-09T19:34:22
| 198,929,483
| 1
| 0
|
Apache-2.0
| 2019-07-26T02:07:57
| 2019-07-26T02:07:56
| null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
# for coverage
from ..strategy import *
class TestStrategy:
def setup(self):
pass
# setup() before each test method
def teardown(self):
pass
# teardown() after each test method
@classmethod
def setup_class(cls):
pass
# setup_class() before any methods in this class
@classmethod
def teardown_class(cls):
pass
# teardown_class() after any methods in this class
|
[
"t.paine154@gmail.com"
] |
t.paine154@gmail.com
|
8806780712e5054373bdc136bb537dece0d2b9ac
|
ffd2126e1ba5d1acea0bb0b3d011f4ccaf1c1f1f
|
/gia/gia/doctype/gia_sector/gia_sector.py
|
4a1e4524728a0939102446bd86307c02279f077f
|
[
"MIT"
] |
permissive
|
alkuhlani/gia
|
fd55c65b0f430f24c7fbe3aef5ea911af8642702
|
9af9737cef7b0b947baa21f46c7be381c4fc9d98
|
refs/heads/master
| 2022-12-10T02:45:47.907158
| 2020-09-04T16:37:10
| 2020-09-04T16:37:10
| 276,495,714
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Ahmed Mohammed Alkuhlani and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw
from frappe.model.document import Document
class GIASector(Document):
def validate(self):
if not self.parent_gia_sector:
frappe.throw(_("Please enter the parent"))
|
[
"frappe@ubuntu.vm"
] |
frappe@ubuntu.vm
|
fd9166714314627d931b92e8df033ea9d4f2ffd2
|
54a5f5ec2c5edf924b7dc7730ee7cb2a38ac4a39
|
/DataFrame_manipulation_pandas/E01_Positional_and_labeled_indexing.py
|
aa09f4a045dbe19bc6a45b84a5dfebd5c0c513b2
|
[] |
no_license
|
dajofischer/Datacamp
|
fac413ec178375cedceababaf84f6b47a61fc821
|
a03d16b8f342412f1ee077f2f196ee8404e2e21c
|
refs/heads/master
| 2020-04-05T08:38:25.361746
| 2019-03-27T20:55:57
| 2019-03-27T20:55:57
| 156,722,561
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
# Assign the row position of election.loc['Bedford']: x
x = 4
# Assign the column position of election['winner']: y
y = 4
# Print the boolean equivalence
print(election.iloc[x, y] == election.loc['Bedford', 'winner'])
#nonsense text
#nonsenes2
|
[
"dajofischer@gmail.com"
] |
dajofischer@gmail.com
|
5651d66b1dd3f7adb98ce5c7bc17e2acfe92784a
|
174620e5937ac217cfdc46fa1f58493e9d59dfdd
|
/lib/default/lib/python2.7/site-packages/celery/concurrency/base.py
|
e0f2eb514c23941ee91fd0003917de8230cc1dac
|
[] |
no_license
|
Saifinbox/CKANPROJECT
|
6552912317019ce7dca87a1367344dbf5d978062
|
89e1cac49b282106ff4595f54a4eb84bcc8d2ee9
|
refs/heads/master
| 2021-01-01T06:34:37.568829
| 2017-07-17T08:48:46
| 2017-07-17T08:48:46
| 97,453,740
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,732
|
py
|
# -*- coding: utf-8 -*-
"""
celery.concurrency.base
~~~~~~~~~~~~~~~~~~~~~~~
TaskPool interface.
"""
from __future__ import absolute_import
import logging
import os
import time
from kombu.utils.encoding import safe_repr
from celery.utils import timer2
from celery.utils.log import get_logger
logger = get_logger('celery.concurrency')
def apply_target(target, args=(), kwargs={}, callback=None,
accept_callback=None, pid=None, **_):
if accept_callback:
accept_callback(pid or os.getpid(), time.time())
callback(target(*args, **kwargs))
class BasePool(object):
RUN = 0x1
CLOSE = 0x2
TERMINATE = 0x3
Timer = timer2.Timer
#: set to true if the pool can be shutdown from within
#: a signal handler.
signal_safe = True
#: set to true if pool supports rate limits.
#: (this is here for gevent, which currently does not implement
#: the necessary timers).
rlimit_safe = True
#: set to true if pool requires the use of a mediator
#: thread (e.g. if applying new items can block the current thread).
requires_mediator = False
#: set to true if pool uses greenlets.
is_green = False
_state = None
_pool = None
#: only used by multiprocessing pool
uses_semaphore = False
def __init__(self, limit=None, putlocks=True, forking_enable=True,
**options):
self.limit = limit
self.putlocks = putlocks
self.options = options
self.forking_enable = forking_enable
self._does_debug = logger.isEnabledFor(logging.DEBUG)
def on_start(self):
pass
def did_start_ok(self):
return True
def on_stop(self):
pass
def on_apply(self, *args, **kwargs):
pass
def on_terminate(self):
pass
def on_soft_timeout(self, job):
pass
def on_hard_timeout(self, job):
pass
def maybe_handle_result(self, *args):
pass
def maintain_pool(self, *args, **kwargs):
pass
def terminate_job(self, pid):
raise NotImplementedError(
'%s does not implement kill_job' % (self.__class__, ))
def restart(self):
raise NotImplementedError(
'%s does not implement restart' % (self.__class__, ))
def stop(self):
self.on_stop()
self._state = self.TERMINATE
def terminate(self):
self._state = self.TERMINATE
self.on_terminate()
def start(self):
self.on_start()
self._state = self.RUN
def close(self):
self._state = self.CLOSE
self.on_close()
def on_close(self):
pass
def init_callbacks(self, **kwargs):
pass
def apply_async(self, target, args=[], kwargs={}, **options):
"""Equivalent of the :func:`apply` built-in function.
Callbacks should optimally return as soon as possible since
otherwise the thread which handles the result will get blocked.
"""
if self._does_debug:
logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)',
target, safe_repr(args), safe_repr(kwargs))
return self.on_apply(target, args, kwargs,
waitforslot=self.putlocks,
**options)
def _get_info(self):
return {}
@property
def info(self):
return self._get_info()
@property
def active(self):
return self._state == self.RUN
@property
def num_processes(self):
return self.limit
@property
def readers(self):
return {}
@property
def writers(self):
return {}
@property
def timers(self):
return {}
|
[
"muhammad.saif@inboxbiz.com"
] |
muhammad.saif@inboxbiz.com
|
42170e9a6ac498033863cd27ca0a6556bf1aa6c3
|
53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61
|
/.history/EMR/apriori_20190422135150.py
|
36ae75953708f032ec4b5b046220fb616cdb0b75
|
[] |
no_license
|
cyc19950621/python
|
4add54894dc81187211aa8d45e5115903b69a182
|
d184b83e73334a37d413306d3694e14a19580cb0
|
refs/heads/master
| 2020-04-11T20:39:34.641303
| 2019-07-02T12:54:49
| 2019-07-02T12:54:49
| 162,078,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,768
|
py
|
# Apriori算法
"""
由于Apriori算法假定项集中的项是按字典序排序的,而集合本身是无序的,所以我们在必要时需要进行set和list的转换;
由于要使用字典(support_data)记录项集的支持度,需要用项集作为key,而可变集合无法作为字典的key,因此在合适时机应将项集转为固定集合frozenset。
支持度
置信度
"""
class apriori_algorithm:
# 算法初始化
def __init__(self, minSupport, dataSet):
self.minSupport = minSupport # 最小支持度
self.dataSet = dataSet # 数据集
# 生成单个物品的项集列表
def generateC1(self, dataSet):
C1 = [] # 用于存放生成的单个物品的项集列表
# 遍历数据集
for data in dataSet:
for item in data:
if [item] not in C1:
C1.append([item])
C1.sort()
return C1
# 遍历数据集,和Ck对比,计数
def generateLk_by_Ck(self, dataSet, Ck, minSupport, support_data):
"""
Generate Lk by executing a delete policy from Ck.
Args:
data_set: 数据集
Ck: A set which contains all all frequent candidate k-itemsets.
min_support: The minimum support.
support_data: A dictionary. The key is frequent itemset and the value is support.
Returns:
Lk: A set which contains all all frequent k-itemsets.
"""
D = map(set, dataSet)
C = map(frozenset, Ck)
C1 = list(C) # 关于map对象的遍历,在内循环中遍历完最后一个元素后,再次访问时会放回空列表,所以外循环第二次进入的时候是空的,需要将其转为list处理
countData = dict()
for d in D: # set遍历
for c in C1:
if c.issubset(d): # 子集判断,并非元素判断
if c not in countData.keys(): # 将集合作为字典的键使用,c为[]型
countData[c] = 1
else:
countData[c] += 1
numItems = float(len(list(dataSet)))
returnList = []
supportData = dict()
# 遍历前面得到的计数字典
for key in countData:
support = countData[key] / numItems
if support >= minSupport:
returnList.insert(0, key) # insert() 函数用于将指定对象插入列表的指定位置
support_data[key] = support
return returnList
def generate_L(self, dataSet, k, min_support):
"""
Generate all frequent itemsets.
Args:
data_set:数据集
k: 频繁项集中含有的最多的元素
min_support: 最小支持度
Returns:
L: 出现的所有频繁项集
support_data: 每个频繁项集对应的支持度
"""
support_data = {}
C1 = self.generateC1(dataSet)
L1 = self.generateLk_by_Ck(dataSet, C1, min_support, support_data)
Lksub1 = L1.copy()
L = []
L.append(Lksub1)
for i in range(2, k + 1):
Ci = self.generateCK(Lksub1, i)
Li = self.generateLk_by_Ck(dataSet, Ci, min_support, support_data)
Lksub1 = Li.copy()
L.append(Lksub1)
return L, support_data
# generateCK 候选频繁项集产生 参数 Lk频繁项集,k:项集元素个数
def generateCK(self, Lk, k):
Ck = set()
len_Lk = len(list(Lk))
list_Lk = list(Lk)
for i in range(len_Lk):
for j in range(1, len_Lk):
l1 = list(list_Lk[i])
l2 = list(list_Lk[j])
l1.sort()
l2.sort()
if l1[0:k - 2] == l2[0:k - 2]:
Ck_item = list_Lk[i] | list_Lk[j]
if self.isCk(Ck_item, list_Lk):
Ck.add(Ck_item)
# Ck.add(Ck_item)
return Ck
# 频繁项集判断
def isCk(self, Ck_item, list_Lk):
for item in Ck_item:
sub_Ck = Ck_item - frozenset([item])
if sub_Ck not in list_Lk:
return False
return True
# 生成关联规则
def generate_big_rules(self, L, support_data, min_conf):
"""
Generate big rules from frequent itemsets.
Args:
L: 所有频繁项集的列表
support_data: 每个频繁项集对应的支持度
min_conf: 最小可信度
"""
big_rule_list = []
sub_set_list = []
for i in range(0, len(L)):
for freq_set in L[i]:
for sub_set in sub_set_list:
if sub_set.issubset(freq_set):
conf = support_data[freq_set] / support_data[freq_set - sub_set]
big_rule = (freq_set - sub_set, sub_set, conf)
if conf >= min_conf and big_rule not in big_rule_list:
print(freq_set - sub_set, " => ", sub_set, "conf: ", conf)
big_rule_list.append(big_rule)
sub_set_list.append(freq_set)
return big_rule_list
if __name__ == '__main__':
minS = 0.5
dataSet = [['这个','弄','鞍山', '挨打'], ['这个', '啊'], ['鞍山', '弄', '词典', '按错'], ['鞍山', '挨打','按下','爱玩']]
apriori = apriori_algorithm(minSupport=minS, dataSet=dataSet)
L, support_data = apriori.generate_L(dataSet, 1,minS)
print(L)
print(support_data)
big_rule_list = apriori.generate_big_rules(L, support_data, 0.5)
|
[
"1044801968@qq.com"
] |
1044801968@qq.com
|
aa13bd841c98bf69edc143608a0dcaf19c026204
|
4cfbc12903651dedbc799f53a8078433196e7919
|
/Pre Processing/Topic 7 - Kernal PCA/KERNEL_PCA_WITHOUT_SPLIT.py
|
6974fa581f4052b61770920b3c784ba26c4341c3
|
[] |
no_license
|
gautam4941/Machine_Learning_Codes
|
78bf86ab3caf6ee329c88ff18d25927125627a2c
|
0009d12ca207a9b0431ea56decc293588eb447b1
|
refs/heads/main
| 2023-02-06T18:05:44.154641
| 2023-01-30T17:04:25
| 2023-01-30T17:04:25
| 353,594,523
| 0
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
import pandas as pd
data = pd.read_csv('Social_Network_Ads.csv')
print( f"data :- \n{ data }\n" )
print( f"data.columns :- \n{ data.columns }\n" )
x = data.loc[ :, 'Gender' : 'EstimatedSalary' ]
y = data.loc[ :, 'Purchased' ]
print( f"x.isnull().sum() :- \n{ x.isnull().sum() }\n" )
print( f"y.isnull().sum() :- \n{ y.isnull().sum() }\n" )
print( f"x.dtypes :- \n{ x.dtypes }\n" )
print( f"y.dtypes :- \n{ y.dtypes }\n" )
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
x['Gender'] = le.fit_transform( x['Gender'] )
import matplotlib.pyplot as plt
# plt.plot( x['Age'], x['EstimatedSalary'], linestyle = '', marker = '*' )
# plt.xlabel( 'Age' )
# plt.ylabel( 'EstimatedSalary' )
# plt.title( 'Age V/s Salary' )
# plt.show()
from sklearn.decomposition import KernelPCA
kpca = KernelPCA( n_components = 2, kernel = 'rbf' ) #n_components is the number of columns getting trained
x = kpca.fit_transform( x )
print( f"After Kernal PCA, x :- \n{ x }\n" )
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit( x, y )
y_pred = lr.predict( x )
new_x_test = x.T
# plt.plot( x_test[0], x_test[1], linestyle = '', marker = '*' )
# plt.xlabel( 'Age' )
# plt.ylabel( 'EstimatedSalary' )
# plt.title( 'Age V/s Salary' )
# plt.show()
print( f"lr.score( x_test, y_test ) = { lr.score( x, y ) }" )
|
[
"noreply@github.com"
] |
gautam4941.noreply@github.com
|
9e8684300a753747f2ea81503addd0bd9141eee2
|
7ef5898dc861f7a5512953269db7b52d44f44bc5
|
/linkml/utils/validation.py
|
02ee97f62911680b3e797aa9db26dcd3bd75f727
|
[
"CC0-1.0"
] |
permissive
|
balhoff/linkml
|
eb5c26e9d8ace3c2a7a6f2f36872b9c2af7b97df
|
b27c36b24437f68878806518264f55f0f418cb0b
|
refs/heads/main
| 2023-07-28T16:47:04.974232
| 2021-09-09T01:39:21
| 2021-09-09T01:39:21
| 404,550,589
| 0
| 0
|
CC0-1.0
| 2021-09-09T01:45:37
| 2021-09-09T01:45:37
| null |
UTF-8
|
Python
| false
| false
| 1,483
|
py
|
import json
import sys
from typing import Type, Union, TextIO
import logging
import click
import jsonschema
from linkml_runtime.linkml_model import SchemaDefinition
from linkml_runtime.utils.yamlutils import as_dict, YAMLRoot
from linkml_runtime.dumpers import json_dumper
from linkml.generators.jsonschemagen import JsonSchemaGenerator
import linkml.utils.datautils as datautils
def _as_dict(inst):
# TODO: replace this with linkml_runtime.dictutils when 1.0.14 is released
inst_dict = json.loads(json_dumper.dumps(element=inst))
del inst_dict['@type']
return inst_dict
def validate_object(data: YAMLRoot, schema: Union[str, TextIO, SchemaDefinition], target_class: Type[YAMLRoot] = None,
closed: bool = True):
"""
validates instance data against a schema
:param data: LinkML instance to be validates
:param schema: LinkML schema
:param target_class: class in schema to validate against
:param closed:
:return:
"""
if target_class is None:
target_class = type(data)
inst_dict = _as_dict(data)
not_closed = not closed
jsonschemastr = JsonSchemaGenerator(schema, mergeimports=True, top_class=target_class.class_name,
not_closed=not_closed).serialize(not_closed=not_closed)
jsonschema_obj = json.loads(jsonschemastr)
return jsonschema.validate(inst_dict, schema=jsonschema_obj)
if __name__ == '__main__':
datautils.cli(sys.argv[1:])
|
[
"cjm@berkeleybop.org"
] |
cjm@berkeleybop.org
|
9954328c0d050bb4d64a911f7461a367bf36a59f
|
8c50265b43add0e91e30245cc7af3c2558c248f5
|
/tests/python/gpu/test_tvm_bridge.py
|
5c87536bdbaea32571012424a4db77dab00c19ed
|
[
"BSD-3-Clause",
"BSD-2-Clause-Views",
"Zlib",
"Apache-2.0",
"BSD-2-Clause",
"Intel"
] |
permissive
|
awslabs/dynamic-training-with-apache-mxnet-on-aws
|
6a67f35d7e4b12fa8bba628bd03b2b031924e211
|
1063a979417fee8c820af73860eebd2a4f670380
|
refs/heads/master
| 2023-08-15T11:22:36.922245
| 2022-07-06T22:44:39
| 2022-07-06T22:44:39
| 157,440,687
| 60
| 19
|
Apache-2.0
| 2022-11-25T22:23:19
| 2018-11-13T20:17:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,440
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test TVM bridge, only enable this when TVM is available"""
import logging
import mxnet as mx
import numpy as np
import unittest
def test_tvm_bridge():
# only enable test if TVM is available
try:
import tvm
import tvm.contrib.mxnet
import topi
except ImportError:
logging.warn("TVM bridge test skipped because TVM is missing...")
return
def check(target, dtype):
shape = (20,)
scale = tvm.var("scale", dtype="float32")
x = tvm.placeholder(shape, dtype=dtype)
y = tvm.placeholder(shape, dtype=dtype)
z = tvm.compute(shape, lambda i: x[i] + y[i])
zz = tvm.compute(shape, lambda *i: z(*i) * scale.astype(dtype))
ctx = mx.gpu(0) if target == "cuda" else mx.cpu(0)
target = tvm.target.create(target)
# build the function
with target:
s = topi.generic.schedule_injective(zz)
f = tvm.build(s, [x, y, zz, scale])
# get a mxnet version
mxf = tvm.contrib.mxnet.to_mxnet_func(f, const_loc=[0, 1])
xx = mx.nd.uniform(shape=shape, ctx=ctx).astype(dtype)
yy = mx.nd.uniform(shape=shape, ctx=ctx).astype(dtype)
zz = mx.nd.empty(shape=shape, ctx=ctx).astype(dtype)
# invoke myf: this runs in mxnet engine
mxf(xx, yy, zz, 10.0)
np.testing.assert_allclose(
zz.asnumpy(), (xx.asnumpy() + yy.asnumpy()) * 10)
for tgt in ["llvm", "cuda"]:
for dtype in ["int8", "uint8", "int64",
"float32", "float64"]:
check(tgt, dtype)
if __name__ == "__main__":
import nose
nose.runmodule()
|
[
"vikumar@88e9fe53272d.ant.amazon.com"
] |
vikumar@88e9fe53272d.ant.amazon.com
|
6508b6eae18f254c28dd6343bef32cd4b4afd295
|
61fa932822d22ba480f7aa075573e688897ad844
|
/simulation/decai/simulation/data/imdb_data_loader.py
|
fbc6d62dc1165cc5a608c3003156977db751c917
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/0xDeCA10B
|
a8f118fa1f89f387a0b83f297250fc1846521f41
|
4066eeb2b5298c259a7c19c4d42ca35ef22e0569
|
refs/heads/main
| 2023-07-26T08:09:34.718104
| 2023-01-25T12:47:17
| 2023-01-25T12:47:17
| 181,561,897
| 538
| 133
|
MIT
| 2023-07-19T03:10:38
| 2019-04-15T20:37:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,778
|
py
|
from dataclasses import dataclass, field
from logging import Logger
from typing import List
import numpy as np
from injector import ClassAssistedBuilder, Module, inject, provider, singleton
from keras.datasets import imdb
from .data_loader import DataLoader
@inject
@dataclass
class ImdbDataLoader(DataLoader):
"""
Load data for sentiment analysis of IMDB reviews.
https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification
"""
_logger: Logger
num_words: int = field(default=1000)
def classifications(self) -> List[str]:
return ["NEGATIVE", "POSITIVE"]
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
self._logger.info("Loading IMDB review data using %d words.", self.num_words)
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=self.num_words)
if train_size is not None:
x_train, y_train = x_train[:train_size], y_train[:train_size]
if test_size is not None:
x_test, y_test = x_test[:test_size], y_test[:test_size]
def get_features(data):
result = np.zeros((len(data), self.num_words), dtype='int')
for i, x in enumerate(data):
for v in x:
result[i, v] = 1
return result
x_train = get_features(x_train)
x_test = get_features(x_test)
self._logger.info("Done loading IMDB review data.")
return (x_train, y_train), (x_test, y_test)
@dataclass
class ImdbDataModule(Module):
num_words: int = field(default=1000)
@provider
@singleton
def provide_data_loader(self, builder: ClassAssistedBuilder[ImdbDataLoader]) -> DataLoader:
return builder.build(num_words=self.num_words)
|
[
"noreply@github.com"
] |
microsoft.noreply@github.com
|
587af0a9afab30d6dbe975f04b48b2543833db22
|
51507929d5bf732e6e5b7085015b86d097fc404d
|
/python/core/keyset_writer.py
|
4e417f4417071723ba236115a738874609d779e8
|
[
"Apache-2.0"
] |
permissive
|
jojodeco2/tink
|
a77be3fd6958070c131f4d556b349b69b65e11cb
|
46d4d5d6ff09f594c5460216c5b2cb11486076db
|
refs/heads/master
| 2020-08-04T04:46:05.526255
| 2019-10-01T10:21:02
| 2019-10-01T10:21:02
| 212,011,212
| 0
| 0
|
Apache-2.0
| 2019-10-01T04:18:08
| 2019-10-01T04:18:08
| null |
UTF-8
|
Python
| false
| false
| 3,498
|
py
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Writes Keysets to file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import abc
import io
from google.protobuf import json_format
from tink.proto import tink_pb2
from tink.python.core import tink_error
class KeysetWriter(object):
"""Knows how to write keysets to some storage system."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def write(self, keyset: tink_pb2.Keyset) -> None:
"""Tries to write a tink_pb2.Keyset to some storage system."""
pass
@abc.abstractmethod
def write_encrypted(self, encrypted_keyset: tink_pb2.EncryptedKeyset) -> None:
"""Tries to write an tink_pb2.EncryptedKeyset to some storage system."""
pass
class JsonKeysetWriter(KeysetWriter):
"""Writes keysets in proto JSON wire format to some storage system.
cf. https://developers.google.com/protocol-buffers/docs/encoding
"""
def __init__(self, text_io_stream: io.TextIOBase):
self._io_stream = text_io_stream
def write(self, keyset: tink_pb2.Keyset) -> None:
if not isinstance(keyset, tink_pb2.Keyset):
raise tink_error.TinkError('invalid keyset.')
json_keyset = json_format.MessageToJson(keyset)
# TODO(b/141106504) Needed for python 2.7 compatibility. StringIO expects
# unicode, but MessageToJson outputs UTF-8.
if isinstance(json_keyset, bytes):
json_keyset = json_keyset.decode('utf-8')
self._io_stream.write(json_keyset)
self._io_stream.flush()
def write_encrypted(self, encrypted_keyset: tink_pb2.EncryptedKeyset) -> None:
if not isinstance(encrypted_keyset, tink_pb2.EncryptedKeyset):
raise tink_error.TinkError('invalid encrypted keyset.')
json_keyset = json_format.MessageToJson(encrypted_keyset)
# TODO(b/141106504) Needed for python 2.7 compatibility. StringIO expects
# unicode, but MessageToJson outputs UTF-8.
if isinstance(json_keyset, bytes):
json_keyset = json_keyset.decode('utf-8')
self._io_stream.write(json_keyset)
self._io_stream.flush()
class BinaryKeysetWriter(KeysetWriter):
"""Writes keysets in proto binary wire format to some storage system.
cf. https://developers.google.com/protocol-buffers/docs/encoding
"""
def __init__(self, binary_io_stream: io.BufferedIOBase):
self._io_stream = binary_io_stream
def write(self, keyset: tink_pb2.Keyset) -> None:
if not isinstance(keyset, tink_pb2.Keyset):
raise tink_error.TinkError('invalid keyset.')
self._io_stream.write(keyset.SerializeToString())
self._io_stream.flush()
def write_encrypted(self, encrypted_keyset: tink_pb2.EncryptedKeyset) -> None:
if not isinstance(encrypted_keyset, tink_pb2.EncryptedKeyset):
raise tink_error.TinkError('invalid encrypted keyset.')
self._io_stream.write(encrypted_keyset.SerializeToString())
self._io_stream.flush()
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
19b16e038e42e69f3f52b17764d02d98614b0c87
|
364b36d699d0a6b5ddeb43ecc6f1123fde4eb051
|
/_downloads_1ed/fig_XD_example.py
|
6e7d5247c12d7f1e576f8087307c0067916096aa
|
[] |
no_license
|
astroML/astroml.github.com
|
eae3bfd93ee2f8bc8b5129e98dadf815310ee0ca
|
70f96d04dfabcd5528978b69c217d3a9a8bc370b
|
refs/heads/master
| 2022-02-27T15:31:29.560052
| 2022-02-08T21:00:35
| 2022-02-08T21:00:35
| 5,871,703
| 2
| 5
| null | 2022-02-08T21:00:36
| 2012-09-19T12:55:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,918
|
py
|
"""
Extreme Deconvolution example
-----------------------------
Figure 6.11
An example of extreme deconvolution showing a simulated two-dimensional
distribution of points, where the positions are subject to errors. The top two
panels show the distributions with small (left) and large (right) errors. The
bottom panels show the densities derived from the noisy sample (top-right
panel) using extreme deconvolution; the resulting distribution closely matches
that shown in the top-left panel.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.decorators import pickle_results
from astroML.density_estimation import XDGMM
from astroML.plotting.tools import draw_ellipse
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Sample the dataset
N = 2000
np.random.seed(0)
# generate the true data
x_true = (1.4 + 2 * np.random.random(N)) ** 2
y_true = 0.1 * x_true ** 2
# add scatter to "true" distribution
dx = 0.1 + 4. / x_true ** 2
dy = 0.1 + 10. / x_true ** 2
x_true += np.random.normal(0, dx, N)
y_true += np.random.normal(0, dy, N)
# add noise to get the "observed" distribution
dx = 0.2 + 0.5 * np.random.random(N)
dy = 0.2 + 0.5 * np.random.random(N)
x = x_true + np.random.normal(0, dx)
y = y_true + np.random.normal(0, dy)
# stack the results for computation
X = np.vstack([x, y]).T
Xerr = np.zeros(X.shape + X.shape[-1:])
diag = np.arange(X.shape[-1])
Xerr[:, diag, diag] = np.vstack([dx ** 2, dy ** 2]).T
#------------------------------------------------------------
# compute and save results
@pickle_results("XD_toy.pkl")
def compute_XD_results(n_components=10, n_iter=500):
clf = XDGMM(n_components, n_iter=n_iter)
clf.fit(X, Xerr)
return clf
clf = compute_XD_results(10, 500)
sample = clf.sample(N)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(left=0.1, right=0.95,
bottom=0.1, top=0.95,
wspace=0.02, hspace=0.02)
ax1 = fig.add_subplot(221)
ax1.scatter(x_true, y_true, s=4, lw=0, c='k')
ax2 = fig.add_subplot(222)
ax2.scatter(x, y, s=4, lw=0, c='k')
ax3 = fig.add_subplot(223)
ax3.scatter(sample[:, 0], sample[:, 1], s=4, lw=0, c='k')
ax4 = fig.add_subplot(224)
for i in range(clf.n_components):
draw_ellipse(clf.mu[i], clf.V[i], scales=[2], ax=ax4,
ec='k', fc='gray', alpha=0.2)
titles = ["True Distribution", "Noisy Distribution",
"Extreme Deconvolution\n resampling",
"Extreme Deconvolution\n cluster locations"]
ax = [ax1, ax2, ax3, ax4]
for i in range(4):
ax[i].set_xlim(-1, 13)
ax[i].set_ylim(-6, 16)
ax[i].xaxis.set_major_locator(plt.MultipleLocator(4))
ax[i].yaxis.set_major_locator(plt.MultipleLocator(5))
ax[i].text(0.05, 0.95, titles[i],
ha='left', va='top', transform=ax[i].transAxes)
if i in (0, 1):
ax[i].xaxis.set_major_formatter(plt.NullFormatter())
else:
ax[i].set_xlabel('$x$')
if i in (1, 3):
ax[i].yaxis.set_major_formatter(plt.NullFormatter())
else:
ax[i].set_ylabel('$y$')
plt.show()
|
[
"vanderplas@astro.washington.edu"
] |
vanderplas@astro.washington.edu
|
75683d574fd6fafc97d6262c264e53f43ff0a56b
|
19ee7dd974ba8b1731e9450c174df7630f63eaad
|
/Api/recognition/serializers.py
|
bc1cd767bbebc3dcfc9d20d425f5e7079f0f1748
|
[] |
no_license
|
minjjjae/No-Mask-Trace-System
|
12d3a5a146f5526b9dbba5a8b75d6adc6c8a2e2b
|
61c76197d7ae921823b795effd9f267c92016a97
|
refs/heads/main
| 2023-01-19T08:35:19.643717
| 2020-11-28T05:27:52
| 2020-11-28T05:27:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
from recognition.models import Recognition
from rest_framework import serializers
class RecognitionSerializer(serializers.HyperlinkedModelSerializer):
image = serializers.ImageField(max_length=None, use_url=True)
class Meta:
model = Recognition
fields = ("pk", "encodeLst", "description", "created_at", "image")
|
[
"bhj1684@naver.com"
] |
bhj1684@naver.com
|
2514e06398d1649d7e768e2219aa835bfc94e0c7
|
dffd7156da8b71f4a743ec77d05c8ba031988508
|
/joi/prelim/2019/yo1c/c.py
|
8fa22ad3e7acb87ef87d1e4727e8bf36c56ef603
|
[] |
no_license
|
e1810/kyopro
|
a3a9a2ee63bc178dfa110788745a208dead37da6
|
15cf27d9ecc70cf6d82212ca0c788e327371b2dd
|
refs/heads/master
| 2021-11-10T16:53:23.246374
| 2021-02-06T16:29:09
| 2021-10-31T06:20:50
| 252,388,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
n, *a = map(int, open(0).read().split())
cnt = ans = 0
prev = 0
for i in a:
if prev>i:
ans = max(ans, cnt)
cnt = 0
cnt += 1
prev = i
print(max(ans, cnt))
|
[
"v.iceele1810@gmail.com"
] |
v.iceele1810@gmail.com
|
daf6299762e39365d4e0099a36ae78a1a59bcd0a
|
6ec91b363b077bffd33f15300a0935124e9fb915
|
/Cracking_the_Code_Interview/Leetcode/3.String/290.Word_Pattern.py
|
766301dce21b1c686fdd7e0e347044af480ca094
|
[] |
no_license
|
lzxyzq/Cracking_the_Coding_Interview
|
03232515ae8eb50394d46322d36b230d1a626fcf
|
79dee7dab41830c4ff9e38858dad229815c719a0
|
refs/heads/master
| 2023-06-05T19:52:15.595289
| 2021-06-23T22:46:02
| 2021-06-23T22:46:02
| 238,068,000
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,531
|
py
|
'''
@Author: your name
@Date: 2020-06-09 17:21:16
@LastEditTime: 2020-06-10 12:19:27
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /Cracking_the_Code_Interview/Leetcode/String/290.Word_Pattern.py
'''
# Given a pattern and a string str, find if str follows the same pattern.
# Here follow means a full match, such that there is a bijection between a letter in pattern and a non-empty word in str.
'''
Example 1:
Input: pattern = "abba", str = "dog cat cat dog"
Output: true
Example 2:
Input:pattern = "abba", str = "dog cat cat fish"
Output: false
Example 3:
Input: pattern = "aaaa", str = "dog cat cat dog"
Output: false
Example 4:
Input: pattern = "abba", str = "dog dog dog dog"
Output: false
'''
# Notes:
# You may assume pattern contains only lowercase letters, and str contains lowercase letters that may be separated by a single space.
# 1.split()
# 2.等长len()
# 3.hashmap key:pattern value:str
class Solution:
def wordPattern(self, pattern: str, str: str) -> bool:
str = str.split()
result = ''
if len(str) != len(pattern):
return False
d = {}
for i in range(len(pattern)):
if str[i] not in d:
if pattern[i] not in d.values():
d[str[i]] = pattern[i]
else:
return False
result += d[str[i]]
return result == pattern
pattern = "abba"
str = "dog cat cat dog"
words = str.split(' ')
tuple(zip(words, pattern))
|
[
"lzxyzq@gmail.com"
] |
lzxyzq@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.