blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
69399a49153db50e70168d25d6dff65962822c66 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/KCB_YCHF/KCB_YCHF_MM/OMS/YCHF_KCBYCHF_OMS_099.py | 0aa0a7ef5c462c08e02381b15485dddec0029a36 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,485 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from ARmainservice import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
from env_restart import *
class YCHF_KCBYCHF_OMS_099(xtp_test_case):
def setUp(self):
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YCHF_KCBYCHF_OMS_099')
#clear_data_and_restart_all()
#Api.trade.Logout()
#Api.trade.Login()
pass
#
def test_YCHF_KCBYCHF_OMS_099(self):
title = '停止OMS服务(沪A本方最优部撤卖出)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '部撤',
'errorID': 0,
'errorMSG': queryOrderErrorMsg(0),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'报单测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['报单测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':3,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 300,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['报单测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
## 还原可用资金
#sql_transfer = SqlData_Transfer()
#sql_transfer.transfer_fund_asset('YW_KCB_BAK_000')
#oms_restart()
self.assertEqual(rs['报单测试结果'], True) # 211
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
a5bc5ff0db7914cddcbf2343968e56b7a15e5726 | e6445a08328f35da144e40d995fde37111c33f59 | /docs/04_队列/array_queue.py | 4b45b94a363a88d22b5b5b79116c7b1ad97dc8cf | [
"MIT"
] | permissive | Mark24Code/python_data_structures_and_algorithms | 6f069eed04e8c44bc4f04845755a1b598606d4ad | 3469a79c34b6c08ae52797c3974b49fbfa8cca51 | refs/heads/master | 2023-03-15T22:54:02.786294 | 2022-06-13T01:53:41 | 2022-06-13T01:53:41 | 586,735,464 | 1 | 0 | MIT | 2023-01-09T05:12:16 | 2023-01-09T05:12:16 | null | UTF-8 | Python | false | false | 1,688 | py | # -*- coding: utf-8 -*-
# NOTE: 从 array_and_list 第一章拷贝的代码
class Array(object):
def __init__(self, size=32):
self._size = size
self._items = [None] * size
def __getitem__(self, index):
return self._items[index]
def __setitem__(self, index, value):
self._items[index] = value
def __len__(self):
return self._size
def clear(self, value=None):
for i in range(len(self._items)):
self._items[i] = value
def __iter__(self):
for item in self._items:
yield item
class FullError(Exception):
pass
class ArrayQueue(object):
def __init__(self, maxsize):
self.maxsize = maxsize
self.array = Array(maxsize)
self.head = 0
self.tail = 0
def push(self, value):
if len(self) >= self.maxsize:
raise FullError('queue full')
self.array[self.head % self.maxsize] = value
self.head += 1
def pop(self):
value = self.array[self.tail % self.maxsize]
self.tail += 1
return value
def __len__(self):
return self.head - self.tail
def test_queue():
import pytest # pip install pytest
size = 5
q = ArrayQueue(size)
for i in range(size):
q.push(i)
with pytest.raises(FullError) as excinfo: # 我们来测试是否真的抛出了异常
q.push(size)
assert 'full' in str(excinfo.value)
assert len(q) == 5
assert q.pop() == 0
assert q.pop() == 1
q.push(5)
assert len(q) == 4
assert q.pop() == 2
assert q.pop() == 3
assert q.pop() == 4
assert q.pop() == 5
assert len(q) == 0
| [
"291374108@qq.com"
] | 291374108@qq.com |
2c5181d620658d03ac9d21ae8318395a7c20f37e | e2ac8f82d611955b311226a278328718628ca667 | /api/api_using_viewset/urls.py | 258e7d41b4126e7a5dbaecaf5dc6b6528c99b87c | [] | no_license | ShakilAhmmed/apis | 86a6d0ded03724ca1b1f4912026dac1b42f3d4ea | 1f8309d08c288b2412f5c85c4297fe3cc9289f1b | refs/heads/master | 2020-05-22T18:26:13.256094 | 2019-05-13T18:19:02 | 2019-05-13T18:19:02 | 186,471,211 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from django.urls import path, include
from rest_framework import routers
from .views import StudentViewSet
router = routers.DefaultRouter()
router.register('api_using_viewset', StudentViewSet)
urlpatterns = [
path('', include(router.urls))
]
| [
"shakilfci461@gmail.com"
] | shakilfci461@gmail.com |
f0ac079eb68e8fbe90313d1152398a2a07f928fc | cedab14839cfc276f028436ba79d103a8aff0d5b | /Philippines/Subject5_AP/E1_AP_Landsat/4_Eval.py | ed2439d4199b7f752ef9ee7820fbce6c2f2bada2 | [] | no_license | wmgeolab/schoolCNN | aa686a4103695c1e10f5afa68ec2919761d33c15 | 1c73ec90732ec565ce552b27e4b2108a8ee916da | refs/heads/master | 2021-01-09T03:25:44.895023 | 2020-02-21T22:52:41 | 2020-02-21T22:52:41 | 242,230,029 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | from __future__ import print_function, division
from torchvision import datasets, models, transforms
from imgaug import parameters as iap
from imgaug import augmenters as iaa
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torchsummary import summary
import matplotlib.pyplot as plt
import torch.optim as optim
import torch.nn as nn
from PIL import Image
import pandas as pd
import torchvision
import numpy as np
import pickle
import joblib
import torch
import time
import copy
import PIL
import os
class ImgAugTransform:
def __init__(self):
self.aug = iaa.Sequential([
iaa.Scale((224, 224)),
iaa.Sometimes(0.30, iaa.GaussianBlur(sigma=(0, 3.0))),
iaa.Sometimes(0.25, iaa.Multiply((0.5, 1.5), per_channel=0.5)),
iaa.Sometimes(0.20, iaa.Invert(0.25, per_channel=0.5)),
iaa.Sometimes(0.25, iaa.ReplaceElementwise(
iap.FromLowerResolution(iap.Binomial(0.1), size_px=8),
iap.Normal(128, 0.4*128),
per_channel=0.5)
),
iaa.Sometimes(0.30, iaa.AdditivePoissonNoise(40)),
iaa.Fliplr(0.5),
iaa.Affine(rotate=(-20, 20), mode='symmetric'),
iaa.Sometimes(0.30,
iaa.OneOf([iaa.Dropout(p=(0, 0.1)),
iaa.CoarseDropout(0.1, size_percent=0.5)])),
iaa.AddToHueAndSaturation(value=(-10, 10), per_channel=True)
])
def __call__(self, img):
img = np.array(img)
return self.aug.augment_image(img)
model_ft = joblib.load("./Philippines/Subject5_AP/E1_AP_Landsat/models/Landsat_AP_50epoch.sav")
directory = "./Philippines/Subject5_AP/E1_AP_Landsat/data/pass/"
transform = transforms.Compose([
ImgAugTransform(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def EvalModel(model, directory, transforms):
model = model.cuda()
df = pd.DataFrame()
cpass, cfail, ids, class_pred = [], [], [], []
count = 0
for filename in os.listdir(directory):
count += 1
school_id = filename[0:6]
ids.append(school_id)
to_open = directory + filename
png = Image.open(to_open)
img_t = transform(png)
batch_t = torch.unsqueeze(img_t, 0).cuda()
model_ft.eval()
out = model_ft(batch_t)
_, index = torch.max(out, 1)
percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100
# print(percentage)
cfail.append(percentage[0].tolist())
cpass.append(percentage[1].tolist())
print("Predicted " + str(count) + " out of " + str(len(os.listdir(directory))) + " images." )
df['school_id'] = ids
df['prob_fail'] = cfail
df['prob_pass'] = cpass
return df
pass_preds = EvalModel(model_ft, directory, transform)
pass_preds.to_csv("./Philippines/Subject5_AP/Ensemble/data/LandsatPassPreds_GPU.csv")
directory = "./Philippines/Subject5_AP/E1_AP_Landsat/data/fail/"
transform = transforms.Compose([
ImgAugTransform(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
fail_preds = EvalModel(model_ft, directory, transform)
fail_preds.to_csv("./Philippines/Subject5_AP/Ensemble/data/LandsatFailPreds_GPU.csv")
| [
"hmbaier@email.wm.edu"
] | hmbaier@email.wm.edu |
734a6e61332e4f46f8990d6102e2d09d754cc202 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2787/60617/264026.py | 6fc9957c7ad27cbe8e5b15d744283c7712053524 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | def construct_sequence():
length=int(input())
sequence=list(map(int, input().split(" ")))
upCounts=0
downCounts=0
for i in range(0, len(sequence)):
upCounts+=abs((length-len(sequence)+(i+1))-sequence[i])
for i in range(0, len(sequence)):
downCounts+=abs((length-i)-sequence[i])
print(min(upCounts, downCounts))
if __name__=='__main__':
construct_sequence() | [
"1069583789@qq.com"
] | 1069583789@qq.com |
7bae3b372c02e98b1ec84432aad1e72aad131396 | 885a722e3e5814ae4942ac5e8cf8d0091e734b4c | /LEETCODE/10. Regular Expression Matching - Python/CodingTest.py | 4f1f9a0e534fa0242d2adb715e7c95a102ea3fc3 | [] | no_license | ledpear/algorithm | 52f3ea25842eee20b3bbd48e51825b9df4942e03 | 4922c6fe5ca0b98a90dee218b756006e7ba05d82 | refs/heads/master | 2023-06-09T17:47:45.674244 | 2023-06-03T13:47:11 | 2023-06-03T13:47:11 | 133,370,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | class Solution:
def isMatch(self, s: str, p: str) -> bool:
p_list = list(p).reverse()
s_list = list(s).reverse()
bResult = True
s_pos = 0
for i in p_list :
if i == '.' :
s_pos += 1
elif i == '*' :
p_list[]
else
if i != s_list[s_pos] :
bResult = False
break;
print(Solution.isMatch(None, "aa","a")) | [
"tjsrb75@gmail.com"
] | tjsrb75@gmail.com |
7ad54719c3793799d0c3f52d695fe1971882d080 | 26243715cd618b07e95ea2a12f04aa750cb359af | /Help_Qiu/plotData.py | 4921f9a83a94ab6545b54d69f0f250cb6449b8ec | [] | no_license | 43reyerhrstj/DataAnalysis_water | 5fdf3e86249d8f47e54dc174edf9a55854e23b49 | 0be734284587b86857044dafa18af62268f64979 | refs/heads/master | 2022-04-05T22:52:07.147298 | 2018-05-22T14:00:03 | 2018-05-22T14:00:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#保证正常显示汉字
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
path='orgData.xls'
df=pd.read_excel(path)
dates=df['采集时间'] #获得数据集列
secWenCha=df['二次温差(℃)']
secCalcu=df['二次供水温度计算值(℃)']
#输入的起止日期格式
format='%Y/%m/%d %H:%M:%S'
start_str="2017/12/1 0:08:40"
end_str="2017/12/21 20:48:50"
start=datetime.strptime(start_str,format)
end=datetime.strptime(end_str,format)
xs = pd.Series([datetime.strptime(d,format) for d in dates])
great=xs>=start
less=xs<=end
index_range=xs[great & less].index
x_data=[datetime.strptime(str(d), '%Y-%m-%d %H:%M:%S') for d in xs[great & less]]
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(format))
plt.plot(x_data, df.loc[index_range,'二次温差(℃)'],label="二次温差(℃)")
plt.plot(x_data, df.loc[index_range,'二次供水温度计算值(℃)'],label="二次供水温度计算值(℃)")
plt.legend()
plt.gcf().autofmt_xdate() # 自动旋转日期标记
plt.show()
| [
"qzq2514@outlook.com"
] | qzq2514@outlook.com |
cc630d94e4aedb407b5660de8a965fe98411f2dc | 65ae896aa7a9b9cae6b90be8f5900ec2940ff65b | /03. Characters in Range.py | 25b6b3841475fd390e67849899d1ca8e44e51470 | [] | no_license | antondelchev/Functions---Exercise | dd1ea014e4e177618c3394ce7052a082dc1ab462 | 6445f767d092c06e7b1c8921ee030b5a0a71e832 | refs/heads/main | 2023-04-08T22:48:33.515213 | 2021-04-21T12:06:23 | 2021-04-21T12:06:23 | 360,154,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | def chars_in_range(char1, char2):
all_char = ""
for i in range(ord(char1) + 1, ord(char2)):
all_char += chr(i)
all_char += " "
return all_char
first_character = input()
second_character = input()
print(chars_in_range(first_character, second_character))
| [
"noreply@github.com"
] | antondelchev.noreply@github.com |
8192e6bcce172127eb1ea134d03f012f502a1dd6 | 65f8211fc33eb5f9ac1ff0d68902226ca9a58692 | /graph_algorithms/bridge_matrix.py | 55f4e2bc177df53169cbfcff9d38915a46080d96 | [] | no_license | szarbartosz/asd-python | 46869f5699a1ef661e2df02e523af0adcddbbbda | 0130cc3dcbba6ad62e1516c98b5cbab85848d619 | refs/heads/master | 2022-12-13T19:02:53.699381 | 2020-09-11T13:29:31 | 2020-09-11T13:29:31 | 242,975,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | def find_bridge_matrix(G):
V = len(G)
visited = [False] * V
parent = [None] * V
visit_time = [float('inf')] * V
low = [float('inf')] * V
time = 1
def visit(u):
visited[u] = True
nonlocal time
visit_time[u] = time
low[u] = time
time += 1
for v in range(len(G[u])):
if G[u][v] != 0:
if not visited[v]:
parent[v] = u
visit(v)
low[u] = min(low[u], low[v])
if low[v] == visit_time[v]:
print(f'bridge: ({u}, {v})')
elif v != parent[u]:
low[u] = min(low[u], visit_time[v])
for i in range(V):
if not visited[i]:
visit(i)
G = [[0, 1, 0, 0, 0],
[1, 0, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 1],
[0, 0, 0, 1, 0]]
find_bridge_matrix(G)
| [
"szarbartosz@gmail.com"
] | szarbartosz@gmail.com |
586d1e32398855039ebd98ed2777d3e23edc8872 | c8b18f0530f290fcd451b2a34d8e64d62477c3e5 | /codes/Tools/02_Pillow-string_picture/02_add_filter.py | 48869d863c1449f4ae23959df938a9076fd99c05 | [] | no_license | YorkFish/hello-world | a8e2e019e51a814bae4dbb134abce90fd02317d4 | 31f8b807c57bd942fc805466ad9d5ff9b9614b55 | refs/heads/master | 2021-10-23T00:41:09.867934 | 2021-10-12T01:25:20 | 2021-10-12T01:25:20 | 165,218,921 | 0 | 0 | null | 2019-01-11T11:19:18 | 2019-01-11T09:42:33 | null | UTF-8 | Python | false | false | 182 | py | # coding:utf-8
# 此程序功能:加滤镜
from PIL import Image, ImageFilter
im1 = Image.open("fish.jpg")
im2 = im1.filter(ImageFilter.BLUR)
im2.save("fish_blur.jpg", "jpeg")
| [
"18258788231@163.com"
] | 18258788231@163.com |
01c1848a705ab7ce9f6c85d9acac02035d7114c7 | 4e5141121d8b4015db233cbc71946ec3cfbe5fe6 | /samples/basic/codec/models/openconfig/openconfig-lldp/cd-encode-oc-lldp-24-ydk.py | ad1ae2444ce86c3e012ba744c874c3714dcb2793 | [
"Apache-2.0"
] | permissive | itbj/ydk-py-samples | 898c6c9bad9d6f8072892300d42633d82ec38368 | c5834091da0ebedbb11af7bbf780f268aad7040b | refs/heads/master | 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null | UTF-8 | Python | false | false | 2,400 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Encode configuration for model openconfig-lldp.
usage: cd-encode-oc-lldp-24-ydk.py [-h] [-v]
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CodecService
from ydk.providers import CodecServiceProvider
from ydk.models.openconfig import openconfig_lldp \
as oc_lldp
from ydk.models.openconfig import openconfig_lldp_types \
as oc_lldp_types
import logging
def config_lldp(lldp):
"""Add config data to lldp object."""
lldp.config.enabled = True
lldp.config.hello_timer = 15
suppress_tlv_advertisement = oc_lldp_types.MANAGEMENTADDRESS()
lldp.config.suppress_tlv_advertisement.append(suppress_tlv_advertisement)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
args = parser.parse_args()
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create codec provider
provider = CodecServiceProvider(type="xml")
# create codec service
codec = CodecService()
lldp = oc_lldp.Lldp() # create config object
config_lldp(lldp) # add object configuration
# encode and print object
print(codec.encode(provider, lldp))
exit()
# End of script
| [
"deom119@gmail.com"
] | deom119@gmail.com |
1bdaa614a033c2f7efd3ffb2527bb415bcc34af0 | 30754a148b79903d6e49399f1f270c79934ce389 | /tests/fuzzer/test_list_directory.py | 725dbe57113993c9a112d67576d1dceac33558d4 | [
"BSD-3-Clause"
] | permissive | syedkhalid/fuzzinator | 720ffc552c595b50de46e4e4e51f3a01cdc9aa77 | f90b58605de563e77b85ed0d54d2beb29efc7d14 | refs/heads/master | 2021-04-09T17:31:06.625840 | 2018-03-12T14:37:18 | 2018-03-12T15:21:27 | 125,814,277 | 1 | 0 | BSD-3-Clause | 2018-03-19T06:53:29 | 2018-03-19T06:53:29 | null | UTF-8 | Python | false | false | 1,279 | py | # Copyright (c) 2016-2017 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import pytest
import fuzzinator
from os.path import join
from common_fuzzer import resources_dir
mock_tests = join(resources_dir, 'mock_tests')
@pytest.mark.parametrize('pattern, contents, exp', [
(join(mock_tests, '*'), True, {b'foo\n', b'bar\n', b'baz\n'}),
(join(mock_tests, '**', '*'), True, {b'foo\n', b'bar\n', b'baz\n', b'qux\n'}),
(join(mock_tests, '*'), False, {join(mock_tests, 'baz.txt'), join(mock_tests, 'bar.txt'), join(mock_tests, 'foo.txt')}),
(join(mock_tests, '**', '*'), False, {join(mock_tests, 'baz.txt'), join(mock_tests, 'bar.txt'), join(mock_tests, 'foo.txt'), join(mock_tests, 'subdir', 'qux.txt')}),
])
def test_list_directory(pattern, contents, exp):
fuzzer = fuzzinator.fuzzer.ListDirectory(pattern=pattern, contents=contents)
with fuzzer:
tests = set()
index = 0
while True:
test = fuzzer(index=index)
if test is None:
break
tests.add(test)
index += 1
assert tests == exp
| [
"reni@inf.u-szeged.hu"
] | reni@inf.u-szeged.hu |
d1cf4cdbc0a102d31030cbcf3d9e0f6eb536d7a3 | 4bb66e64121d3f4eff4ca0809929983a5c354e3f | /backend/platform_3278/urls.py | fec873c25f27088090a61652926ddb6a711430f2 | [] | no_license | crowdbotics-apps/platform-3278 | 1f1697e33d692550a617466f240d0398b7ab8020 | f78b936584b8db09ab6eeb514335e8605ff0dfdb | refs/heads/master | 2020-05-22T17:33:42.090603 | 2019-05-13T16:04:40 | 2019-05-13T16:04:40 | 186,452,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | """platform_3278 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
admin.site.site_header = 'Platform'
admin.site.site_title = 'Platform Admin Portal'
admin.site.index_title = 'Platform Admin'
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
2b75945fc4280780dbf75164ac96a04676e50cfa | 9b54e3d58447e917a238b85891020c392c4ac601 | /acmicpc/9506/9506.py | 1c348d45c3fb17732c03fd82af2a1c1cdf2c030f | [
"MIT"
] | permissive | love-adela/algorithm-ps | ea0ebcd641a4c309348b389b8618daa83973f4b2 | c92d105d8ad344def001160367115ecf99d81c0d | refs/heads/master | 2023-05-11T03:37:11.750692 | 2023-04-30T17:31:30 | 2023-04-30T17:31:30 | 174,651,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | def get_divisor(k):
divisors = []
for i in range(1, k):
if k % i == 0:
divisors.append(i)
return divisors
while True:
n = int(input())
if n == -1:
break
divisors = get_divisor(n)
if n == sum(divisors):
print(f'{n}', end=' = ')
print(' + '.join(list(map(str, divisors))))
elif n != sum(divisors):
print(f'{n} is NOT perfect.')
| [
"love.adelar@gmail.com"
] | love.adelar@gmail.com |
1215004b25792a229f83ffa57541e9cf3c11ce07 | b1c412822f856bb2dddd9ffac00b3aeee7794961 | /lab1/resnet.py | 20ca9ef1d201882595ea7fe8e71e39c724c43afb | [] | no_license | gavin0430/Deep-Learning | 61db627d3ff6239e6219a31b0a647cd4fb86df07 | ed14037eb83adc4a21b11aed14d3d93fb7045e02 | refs/heads/master | 2020-03-07T14:41:29.127217 | 2018-08-11T15:35:03 | 2018-08-11T15:35:03 | 127,532,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 30 20:48:37 2018
@author: gavin
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
# self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(64*4*4, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = self.layer4(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet20():
return ResNet(BasicBlock, [3,3,3])
def ResNet56():
return ResNet(BasicBlock, [9,9,9])
def ResNet110():
return ResNet(BasicBlock, [18,18,18])
| [
"mz@email.com"
] | mz@email.com |
1533273c46723599a3a8ff52b8f6694d34df94c6 | b007d88e6726452ffa8fe80300614f311ae5b318 | /array/text_dollar_coding.py | 6c9bd8b5f829472c4ab277b35f5c3d0ee5056863 | [] | no_license | jinurajan/Datastructures | ec332b12b8395f42cb769e771da3642f25ba7e7f | 647fea5d2c8122468a1c018c6829b1c08717d86a | refs/heads/master | 2023-07-06T14:42:55.168795 | 2023-07-04T13:23:22 | 2023-07-04T13:23:22 | 76,943,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,342 | py |
DIGITS_TEXT = {1:'One', 2: 'Two', 3:'Three', 4: 'Four', 5: 'Five', 6: 'Six', 7: 'Seven',
8: 'Eight', 9: 'Nine'}
TENS_TEXT = {10: 'Ten', 20: 'Twenty', 30: 'Thirty', 40: 'Forty', 50: 'Fifty', 60: 'Sixty',
70: 'Seventy', 80: 'Eighty', 90: 'Ninety'}
TEENS_TEXT = {11: 'Eleven', 12: 'Twelve', 13: 'Thirteen', 14: 'Fourteen', 15: 'Fifteen',
16: 'Sixteen', 17: 'Seventeen', 18: 'Eighteen', 19: 'Nineteen'}
HUNDREDS_TEXT = {100: 'Hundred', 1000: 'Thousand', 1000000: 'Million'}
def Textify(number, result):
if number >=1 and number <=9:
result.append(DIGITS_TEXT[number])
elif number >=11 and number <=19:
result.append(TEENS_TEXT[number])
else:
thousand = number/100
if thousand > 0:
Textify(thousand, result)
number = number -(thousand*100)
Textify(number, result)
else:
tens = number / 10
if tens > 0:
print("tens: ", tens)
result.append(TENS_TEXT[tens])
number = number-(tens*10)
Textify(number, result)
return result
def NumberToText(number):
final_text = []
n = len(str(number))
if n > 6:
# million value
mill = number/1000000
Textify(mill, final_text)
final_text.append(HUNDREDS_TEXT[1000000])
number = number - (mill*1000000)
thousand = number/1000
Textify(thousand, final_text)
final_text.append(HUNDREDS_TEXT[1000])
number = number - (thousand*1000)
hundred = number/100
final_text.append(Textify(hundred, final_text))
final_text.append(HUNDREDS_TEXT[100])
number = number - (hundred*100)
Textify(number, final_text)
if n >3:
thousand = number/1000
Textify(thousand, final_text)
final_text.append(HUNDREDS_TEXT[1000])
number = number - (thousand*1000)
hundred = number/100
Textify(hundred, final_text)
final_text.append(HUNDREDS_TEXT[100])
number = number - (hundred*100)
Textify(number, final_text)
elif n > 2:
hundred = number/100
Textify(hundred, final_text)
final_text.append(HUNDREDS_TEXT[100])
number = number - (hundred*100)
Textify(number, final_text)
else:
Textify(number, final_text)
final_text.append("Dollars")
return final_text
def DollarCoding(number):
value = NumberToText(number)
print(' '.join(val for val in value))
if __name__ == "__main__":
DollarCoding(1234567)
# DollarCoding(123456)
# DollarCoding(12645)
# DollarCoding(126)
# DollarCoding(12)
# DollarCoding(1)
| [
"jinu.p.r@gmail.com"
] | jinu.p.r@gmail.com |
a47735081eff681ee355b014db90218c4a1692f6 | 1b6dff9c7e9ee5eac407a9fd97391536742e88fc | /servers/Radiation/archive_scrapper/archive_scrapper/spiders/prediccs_archive.py | 89d2054ad3b80658bccbe8dc6d7536cc37360c77 | [
"BSD-2-Clause"
] | permissive | mars-planet/mars_city | 00793d486361c130347d5fe513927368b9807b70 | 29df75e9037eec50672fd33295fc70d6f6e4b325 | refs/heads/master | 2020-04-12T06:46:16.073407 | 2018-08-12T13:30:57 | 2018-08-12T13:30:57 | 63,711,405 | 25 | 50 | NOASSERTION | 2023-07-04T15:43:23 | 2016-07-19T16:43:52 | C++ | UTF-8 | Python | false | false | 1,816 | py | import scrapy
class PrediccsArchiveScrapper(scrapy.Spider):
flag = 0
count = 0
string = 'bryn/31daysMars.plot'
name = "prediccs"
links = []
handle_httpstatus_list = [404]
def start_requests(self):
url = 'http://prediccs.sr.unh.edu/data/goesPlots/archive/'
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
if response.status == 404:
self.count = self.count + 1
scrap_url = 'http://prediccs.sr.unh.edu/' + \
'data/goesPlots/archive/' + \
self.links[0][self.count] + self.string
yield scrapy.Request(scrap_url, self.parse)
if not self.flag:
self.flag = 1
linkobj = response.css("a")[5:-2]
self.links.append(linkobj.css("a::attr(href)").extract())
if self.count < len(self.links[0]):
self.count = self.count + 1
scrap_url = 'http://prediccs.sr.unh.edu/' + \
'data/goesPlots/archive/' + \
self.links[0][self.count] + self.string
yield scrapy.Request(scrap_url, self.parse)
datas = response.css("p::text").extract_first()
datas = datas.split("\n")[22:]
data = []
for i in datas:
i = i.split('\t')
d = i[:6]
d.append(i[-2])
data.append(d)
yield{
'data': data,
}
if self.count < len(self.links[0]):
self.count = self.count + 1
scrap_url = 'http://prediccs.sr.unh.edu/' + \
'data/goesPlots/archive/' + \
self.links[0][self.count] + self.string
yield scrapy.Request(scrap_url, self.parse)
| [
"nivedn3@gmail.com"
] | nivedn3@gmail.com |
8765f592786aa47d3f4c5bc20ae6abd6057c68dc | b6a48f9a6158bcb7e6fc75e5eacaef19250fc4c5 | /cosmos/ingestion/ingest/process/detection/src/utils/split_train_val_test.py | 469996ade467d8c8f626e1ddad9407a3f23584a5 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | UW-COSMOS/Cosmos | dcde3be6534e411a20fcf1ff36e422fc8af2ac8a | 5ed4a4c149e03773690668437d2f93aa532453c6 | refs/heads/master | 2023-09-01T18:03:20.525760 | 2023-08-31T13:56:21 | 2023-08-31T13:56:21 | 159,849,583 | 39 | 14 | null | 2023-09-13T14:39:45 | 2018-11-30T16:24:59 | Python | UTF-8 | Python | false | false | 1,164 | py | #!/usr/bin/env python3
"""
"""
import os
from random import shuffle
if __name__ == '__main__':
files = os.listdir('data/images')
try:
files.remove('README.md')
except ValueError:
pass
files = [f[:-4] for f in files]
shuffle(files)
# Split is 70/10/20
filelen = len(files)
train = int(.7 * filelen)
val = int(.1 * filelen)
train_filelist = files[:train]
val_filelist = files[train:train+val]
test_filelist = files[train+val:]
print('There are {} train files, {} val files, and {} test files'.format(len(train_filelist),
len(val_filelist),
len(test_filelist)))
with open('data/train.txt', 'w') as wf:
for t in train_filelist:
wf.write(t)
wf.write('\n')
with open('data/val.txt', 'w') as wf:
for t in val_filelist:
wf.write(t)
wf.write('\n')
with open('data/test.txt', 'w') as wf:
for t in test_filelist:
wf.write(t)
wf.write('\n')
| [
"ankur.goswami12@gmail.com"
] | ankur.goswami12@gmail.com |
40da452a78a197b37a9af20397a0cc9dd41ab09a | af39e4ee0383b1ecb16511aa0df6404a0936b475 | /Lib/site-packages/crispy_forms/templates/bootstrap4/layout/checkboxselectmultiple_inline.html.py | abdb4d0b0af7c14dec0779ae8a0edd79268196a1 | [] | no_license | SheraramPrajapat1998/ecommerce | 009f58023086e6a182b8b0a9385a37f4a82c59c5 | 4ecbd2edd53e1b79a81760fc95ea2c2a97f205d7 | refs/heads/main | 2023-01-07T19:51:35.242839 | 2020-11-04T03:32:36 | 2020-11-04T03:32:36 | 308,618,518 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | BB BBBBBBBBBBBBBBB
BBBB
XXXX XXXXXXXXX XXXXXXXXXXXXXXXXXBB BBBBBBBBBBBBBBBBB BB BBBBBBBBBB XXXBBBBBBB BBBBBBBBBBBBB BBBBBBB BBBBBBBBBBBBBBBB BBB BBBBBBBBBBBB XXXXXXXXXXBBBBBBB BBBBBBBBBBBBBBBBB BBBBBXX
BB BBBBBBBBBBB
XXXXXX XXXXXX XXXXXXXBB BBB BBBBBBBBBBBB XXXXXXXXXXXXXXBBBBBBB BBBBBBBBBBBBBBBBBBBB XXXXXXXXXXXXXBBBBBXX
FFFFBB BBBBBBBBBBBBBBBBBBBBXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXBBBBB
XXXXXXXX
BBBBB
BBBBBBB BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
XXXXXX
BBBBB
| [
"sheraramprajapat1998@gmail.com"
] | sheraramprajapat1998@gmail.com |
db45bd54e042a773500a1ed7c1fc6e739e033d33 | 6f3cf8c3af7e194982ab07bbd15217d799a53381 | /Intermediate_Python/dictionaryManipulation2.py | 4ff5621fd47b2a908ac56faa6b282c49cde1d3c0 | [] | no_license | AnjaliSharma1234/Datacamp---Python-Track | 2ec9c61dbcda69f33bdf097d585b6e838a34f2e1 | 2e7fbaa6a9c2507e94689612dfa9650c5810f3cc | refs/heads/master | 2020-12-20T18:32:28.459549 | 2020-03-05T07:23:59 | 2020-03-05T07:23:59 | 236,170,988 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | # Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'bonn',
'norway':'oslo', 'italy':'rome', 'poland':'warsaw',
'australia':'vienna' }
# Update capital of germany
europe['germany'] = 'berlin'
# Remove australia
del(europe['australia'])
# Print europe
print(europe)
| [
"noreply@github.com"
] | AnjaliSharma1234.noreply@github.com |
089a33ea10a5cc90572d949e6dba551a403df523 | 98cb2f2afbe57bdda9d6b8b1dd8cf624987d91bc | /torchdp/utils/tests/module_inspection_test.py | 3906d7f03090ec40c3963e92691fd128175284ba | [
"Apache-2.0"
] | permissive | jyhong836/pytorch-dp | 0e7613b01f09ceb2c3787284372f8e887bf0deb3 | e050b98d630d4db50cacc4fff82575daf345f012 | refs/heads/master | 2023-01-03T15:08:54.976598 | 2020-08-18T01:26:07 | 2020-08-18T01:27:02 | 260,974,801 | 0 | 0 | Apache-2.0 | 2020-05-03T16:40:11 | 2020-05-03T16:40:11 | null | UTF-8 | Python | false | false | 4,572 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch.nn as nn
from torchdp.utils import module_inspection as mi
from torchvision import models
class utils_ModelInspector_test(unittest.TestCase):
def setUp(self):
def pred_supported(module):
return isinstance(module, (nn.Conv2d, nn.Linear))
def pred_not_unsupported(module):
return not isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d))
def pred_requires_grad(module):
return all(
p.requires_grad for p in module.parameters(recurse=False)
)
self.pred_supported = pred_supported
self.pred_not_unsupported = pred_not_unsupported
self.pred_mix = lambda m: (not pred_requires_grad(m)) or pred_not_unsupported(m)
def test_validate_basic(self):
inspector = mi.ModelInspector(
"pred", lambda model: isinstance(model, nn.Linear)
)
model = nn.Conv1d(1, 1, 1)
valid = inspector.validate(model)
self.assertFalse(valid, inspector.violators)
def test_validate_positive_predicate_valid(self):
# test when a positive predicate (e.g. supported) returns true
inspector = mi.ModelInspector("pred", self.pred_supported)
model = nn.Conv2d(1, 1, 1)
valid = inspector.validate(model)
self.assertTrue(valid)
list_len = len(inspector.violators)
self.assertEqual(list_len, 0, f"violators = {inspector.violators}")
def test_validate_positive_predicate_invalid(self):
# test when a positive predicate (e.g. supported) returns false
inspector = mi.ModelInspector("pred", self.pred_supported)
model = nn.Conv1d(1, 1, 1)
valid = inspector.validate(model)
self.assertFalse(valid)
list_len = len(inspector.violators)
self.assertEqual(list_len, 1, f"violators = {inspector.violators}")
def test_validate_negative_predicate_ture(self):
# test when a negative predicate (e.g. not unsupported) returns true
inspector = mi.ModelInspector("pred1", self.pred_not_unsupported)
model = nn.Sequential(nn.Conv2d(1, 1, 1), nn.Linear(1, 1))
valid = inspector.validate(model)
self.assertTrue(valid)
list_len = len(inspector.violators)
self.assertEqual(list_len, 0)
def test_validate_negative_predicate_False(self):
# test when a negative predicate (e.g. not unsupported) returns false
inspector = mi.ModelInspector("pred", self.pred_not_unsupported)
model = nn.Sequential(nn.Conv2d(1, 1, 1), nn.BatchNorm2d(1))
valid = inspector.validate(model)
self.assertFalse(valid)
list_len = len(inspector.violators)
self.assertEqual(list_len, 1, f"violators = {inspector.violators}")
def test_validate_mix_predicate(self):
# check with a mix predicate not requires grad or is not unsupported
inspector = mi.ModelInspector("pred1", self.pred_mix)
model = nn.Sequential(nn.Conv2d(1, 1, 1), nn.BatchNorm2d(1))
for p in model[1].parameters():
p.requires_grad = False
valid = inspector.validate(model)
self.assertTrue(valid)
def test_check_everything_flag(self):
# check to see if a model does not containt nn.sequential
inspector = mi.ModelInspector(
"pred",
lambda model: not isinstance(model, nn.Sequential),
check_leaf_nodes_only=False,
)
model = nn.Sequential(nn.Conv1d(1, 1, 1))
valid = inspector.validate(model)
self.assertFalse(valid, f"violators = {inspector.violators}")
def test_complicated_case(self):
def good(x):
return isinstance(x, (nn.Conv2d, nn.Linear))
def bad(x):
return isinstance(x, nn.modules.batchnorm._BatchNorm)
inspector1 = mi.ModelInspector("good_or_bad", lambda x: good(x) or bad(x))
inspector2 = mi.ModelInspector("not_bad", lambda x: not bad(x))
model = models.resnet50()
valid = inspector1.validate(model)
self.assertTrue(valid, f"violators = {inspector1.violators}")
self.assertEqual(
len(inspector1.violators), 0, f"violators = {inspector1.violators}"
)
valid = inspector2.validate(model)
self.assertFalse(valid, f"violators = {inspector2.violators}")
self.assertEqual(
len(inspector2.violators), 53, f"violators = {inspector2.violators}"
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
ec2c912e7c7005b94043d783105dc593306f7d9e | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_2/models/directory_service_role.py | ecaf432596bb712e118a0b77f274050314c70b13 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,947 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.2, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_2 import models
class DirectoryServiceRole(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'group': 'str',
'group_base': 'str',
'id': 'str',
'role': 'Reference'
}
attribute_map = {
'group': 'group',
'group_base': 'group_base',
'id': 'id',
'role': 'role'
}
required_args = {
}
def __init__(
self,
group=None, # type: str
group_base=None, # type: str
id=None, # type: str
role=None, # type: models.Reference
):
"""
Keyword args:
group (str): Common Name (CN) of the directory service group containing users with authority level of the specified role name.
group_base (str): Specifies where the configured group is located in the directory tree.
id (str): A non-modifiable, globally unique ID chosen by the system.
role (Reference): A reference to the role; can be any role that exists on the system.
"""
if group is not None:
self.group = group
if group_base is not None:
self.group_base = group_base
if id is not None:
self.id = id
if role is not None:
self.role = role
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectoryServiceRole`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DirectoryServiceRole, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DirectoryServiceRole):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"msholes@purestorage.com"
] | msholes@purestorage.com |
e89f0990de02f0eb310ce96228fe222f38c31b01 | a0784b1a66a6c1a89ee8a75e32cd48d2c168931b | /setup.py | 40566faaa85ccd97f99feba5f88e28650bf18897 | [
"MIT"
] | permissive | cltrudeau/purdy | ebe5d8b556dadc0a4eb04018826c066b83617f71 | 4ff2d5b33771266d46260ee9ba6503bb4895ab2f | refs/heads/master | 2023-07-08T08:23:08.409053 | 2023-06-29T21:37:29 | 2023-06-29T21:37:29 | 210,162,520 | 10 | 3 | MIT | 2021-03-10T21:55:26 | 2019-09-22T14:40:17 | Python | UTF-8 | Python | false | false | 1,608 | py | import os, sys, re
from purdy import __version__
readme = os.path.join(os.path.dirname(__file__), 'README.rst')
long_description = open(readme).read()
SETUP_ARGS = dict(
name='purdy',
version=__version__,
description=('Terminal based code snippet display tool '),
long_description=long_description,
url='https://github.com/cltrudeau/purdy',
author='Christopher Trudeau',
author_email='ctrudeau+pypi@arsensa.com',
license='MIT',
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='code display',
py_modules = ['purdy',],
scripts=['bin/purdy', 'bin/subpurdy', 'bin/pat', 'bin/prat'],
install_requires = [
'asttokens>=2.0.4',
'Pygments>=2.14.0',
'urwid>=2.0.1',
'colored>=1.4.2',
],
tests_require = [
'waelstow>=0.10.2',
]
)
if __name__ == '__main__':
from setuptools import setup, find_packages
SETUP_ARGS['packages'] = find_packages()
setup(**SETUP_ARGS)
| [
"ctrudeau@arsensa.com"
] | ctrudeau@arsensa.com |
5bd2492ab863dfbedab466259b236c2258d8fbbc | dda618067f13657f1afd04c94200711c1920ea5f | /scoop/rogue/models/blocklist.py | 94cccf5a3c7319106c15d5b4887e0aa255763a05 | [] | no_license | artscoop/scoop | 831c59fbde94d7d4587f4e004f3581d685083c48 | 8cef6f6e89c1990e2b25f83e54e0c3481d83b6d7 | refs/heads/master | 2020-06-17T20:09:13.722360 | 2017-07-12T01:25:20 | 2017-07-12T01:25:20 | 74,974,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,857 | py | # coding: utf-8
from annoying.fields import AutoOneToOneField
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy
from scoop.core.abstract.core.data import DataModel
from scoop.core.abstract.core.datetime import DatetimeModel
from scoop.core.util.data.typeutil import make_iterable
from scoop.core.util.model.model import SingleDeleteManager
from scoop.core.util.shortcuts import addattr
DEFAULT_LIST = 'blacklist'
class BlocklistManager(SingleDeleteManager):
""" Manager de listes de blocage """
# Getter
def get_by_user(self, user):
""" Renvoyer l'objet blocklist pour un utilisateur """
return self.get_or_create(user=user)[0] if user is not None else self.get_global()
def get_global(self):
""" Renvoyer l'objet blocklist global """
blocklist, _ = self.get_or_create(user=None)
return blocklist
def is_safe(self, sender, recipients, name=None):
""" Renvoyer si deux utilisateurs n'ont pas de blocklist entre eux """
if sender.is_staff or getattr(sender, 'bot', False) or sender.has_perm('rogue.can_bypass_blocks'):
return True
if self.is_globally_listed(sender):
return False
recipients = make_iterable(recipients)
blocklists = self.filter(user__in=recipients)
sender_blocks = sender.blocklist.get_data(name or DEFAULT_LIST) or []
for recipient in recipients:
if recipient.pk in sender_blocks:
return False
for blocklist in blocklists:
items = blocklist.get_data(name or DEFAULT_LIST) or []
if sender.pk in items:
return False
return True
def exists(self, recipient, sender, name=None):
""" Renvoyer s'il existe une blocklist créée par recipient vers sender """
blocklist = self.get_by_user(recipient)
return blocklist.is_listed(sender, name)
def get_user_ids(self, user, name=None):
""" Renvoyer les ids d'utilisateurs dans une blocklist de user """
blocklist = self.get_by_user(user)
return blocklist.get_ids(name)
def users_listed_by(self, user, name=None):
""" Renvoyer les utilisateurs dans une blocklist de user """
from django.contrib.auth import get_user_model
# Liste de blocage
ids_listed = self.get_user_ids(user, name=name)
return get_user_model().objects.filter(pk__in=ids_listed)
def is_globally_listed(self, user, name=None):
""" Renvoyer si un utilisateur est dans une blocklist globale """
return self.exists(None, user, name=name)
def exclude_users(self, queryset, user, name=None):
""" Renvoyer un queryset ne contenant pas les utilisateurs d'une blocklist """
if queryset.model.__name__ in {'User', 'Profile'}:
return queryset.exclude(pk__in=self.get_user_ids(user, name))
return queryset
# Setter
def add(self, recipient, sender, name=None):
""" Ajouter un utilisateur dans une blocklist """
blocklist = self.get_by_user(recipient)
return blocklist.add(sender, name)
def remove(self, recipient, sender, name=None):
""" Retirer un utilisateur d'une blocklist """
blocklist = self.get_by_user(recipient)
return blocklist.remove(sender, name)
def toggle(self, recipient, sender, name=None):
""" Basculer le listage d'un utilisateur dans une blocklist """
blocklist = self.get_by_user(recipient)
return blocklist.toggle(sender, name)
def clear(self, user, name=None):
""" Réinitialiser une blocklist """
blocklist = self.get_by_user(user)
return blocklist.clear(name=name)
class Blocklist(DatetimeModel, DataModel):
""" Liste noire : bloquer des membres """
# Constantes
DATA_KEYS = ['blacklist', 'hidelist']
# Champs
user = AutoOneToOneField(settings.AUTH_USER_MODEL, null=True, related_name='blocklist', on_delete=models.CASCADE, verbose_name=_("Blocker"))
objects = BlocklistManager()
# Getter
@addattr(short_description=pgettext_lazy('users', "Blacklisted"))
def get_count(self, name=None):
""" Renvoyer le nombre d'entrées dans une blocklist """
return len(self.get_data(name or DEFAULT_LIST, []))
@addattr(short_description=pgettext_lazy('users', "Total"))
def get_total_count(self):
""" Renvoyer le nombre total d'entrées dans toutes les blocklists """
return sum([self.get_count(name) for name in self.DATA_KEYS if 'list' in name])
def get_ids(self, name=None):
""" Renvoyer les ID d'utilisateurs d'une blocklist """
return self.get_data(name or DEFAULT_LIST, {}).keys()
def is_listed(self, sender, name=None):
""" Renvoyer si un utilisateur est bloqué par une blocklist """
return getattr(sender, 'pk', sender) in self.get_ids(name)
def get_list_date(self, sender, name=None):
""" Renvoyer la date de mise en blocklist d'un utilisateur """
data = self.get_data(name or DEFAULT_LIST, {})
if getattr(sender, 'pk', sender) in data:
return data[getattr(sender, 'pk', sender)][0]
return None
# Setter
def add(self, sender, name=None):
"""
Ajouter un utilisateur à une blocklist
Un utilisateur du staff ne peut pas être ajouté à une blocklist
:type sender: scoop.user.models.User or int
:param name: nom de la liste de blocage
"""
pk = getattr(sender, 'pk', sender)
if pk not in self.get_ids(name) and not getattr(sender, 'is_staff', False):
now = timezone.now()
data = self.get_data(name or DEFAULT_LIST, {})
data[pk] = [now]
success = self.set_data(name or DEFAULT_LIST, data)
if success:
self.save()
return True
return False
def remove(self, sender, name=None):
"""
Retirer un utilisateur d'une blocklist
:type sender: scoop.user.models.User or int
:param name: nom de la liste de blocage
"""
if getattr(sender, 'pk', sender) in self.get_ids(name):
data = self.get_data(name or DEFAULT_LIST)
del data[getattr(sender, 'pk', sender)]
self.set_data(name or DEFAULT_LIST, data)
self.save()
return True
return False
def toggle(self, sender, name=None):
"""
Basculer l'enrôlement d'un utilisateur à une blocklist
:type sender: scoop.user.models.User or int
:param name: nom de la liste de blocage
:returns: False si l'utilisateur est supprimé, True si l'utilisateur est ajouté
"""
if self.is_listed(sender, name or DEFAULT_LIST):
self.remove(sender, name or DEFAULT_LIST)
return False
else:
self.add(sender, name or DEFAULT_LIST)
return True
def clear(self, name=None):
"""
Remettre une blocklist à zéro
:returns: True si une modification a été nécessaire, False sinon
"""
if self.data[name] != {}:
self.set_data(name or DEFAULT_LIST, {}, save=True)
return True
return False
# Overrides
def save(self, *args, **kwargs):
""" Enregistrer l'objet dans la base de données """
self.time = self.now()
super(Blocklist, self).save(*args, **kwargs)
# Métadonnées
class Meta:
verbose_name = _("blocklists")
verbose_name_plural = _("blocklists")
permissions = [['bypass_block', "Can bypass blocks"]]
app_label = "rogue"
| [
"steve.kossouho@gmail.com"
] | steve.kossouho@gmail.com |
b6bc63045d050bbbf0d2fb8f082e5bbf7c7d4687 | ebd9c249d446d809abc9a0f3e4593f34922a1b93 | /lintcode/823_input_stream.py | 51b3d8039db3e05f53130546eef3a2a40a6ec09b | [] | no_license | jaychsu/algorithm | ac7a9dc7366f58c635a68bc46bf1640d2f5ff16d | 91892fd64281d96b8a9d5c0d57b938c314ae71be | refs/heads/master | 2023-05-11T00:40:39.237813 | 2022-09-14T07:43:12 | 2022-09-14T07:43:12 | 106,277,156 | 143 | 39 | null | 2022-09-14T07:43:13 | 2017-10-09T11:51:48 | Python | UTF-8 | Python | false | false | 1,810 | py | """
Merge Sort
time: O(n)
space: O(1)
"""
class Solution:
def inputStream(self, a, b):
"""
:type a: str
:type b: str
:rtype: str, 'NO' or 'YES'
"""
RES = ('NO', 'YES')
if a == b == '':
return RES[1]
BACK = '<'
m, n = len(a), len(b)
i, j = m - 1, n - 1
acnt = bcnt = 0 # count the backspace in both a and b
while i >= 0 and j >= 0:
while i >= 0 and (a[i] == BACK or acnt):
acnt += 1 if a[i] == BACK else -1
i -= 1
while j >= 0 and (b[j] == BACK or bcnt):
bcnt += 1 if b[j] == BACK else -1
j -= 1
if a[i] != b[j]:
return RES[0]
i -= 1
j -= 1
while i >= 0 and (a[i] == BACK or acnt):
acnt += 1 if a[i] == BACK else -1
i -= 1
while j >= 0 and (b[j] == BACK or bcnt):
bcnt += 1 if b[j] == BACK else -1
j -= 1
return RES[int(i == j)]
"""
Stack
time: O(n)
space: O(n)
"""
class Solution:
def inputStream(self, a, b):
"""
:type a: str
:type b: str
:rtype: str, 'NO' or 'YES'
"""
RES = ('NO', 'YES')
if a == '' and b == '':
return RES[1]
if a is None or b is None:
return RES[0]
RM = '<'
stack = []
for c in a:
if c != RM:
stack.append(c)
elif stack:
# c == RM
stack.pop()
_stack = []
for c in b:
if c != RM:
_stack.append(c)
elif _stack:
# c == RM
_stack.pop()
return RES[int(stack == _stack)]
| [
"hi@jaych.su"
] | hi@jaych.su |
f3c9db1941951eeea6159cc39ba7e755aeae4d03 | e18a353582609732c795401f1a01bc762bd939f2 | /top/python/get_mumubb.py | bdc0e23a32be777ced04b9beba3a374a9629f348 | [] | no_license | s-farry/workspaces | 06741807bb464bb0712d52108c2d1b7ae62b1353 | 0dcf3868dcbe110206ea88ff5c9e04a3b44b1ca1 | refs/heads/master | 2020-04-03T00:45:39.152227 | 2017-06-15T16:33:33 | 2017-06-15T16:33:33 | 64,213,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from ROOT import *
from Jawa import *
from PlotTools import *
from Utils import Bunch
f = TFile("/hepstore/sfarry/GridOutput/2520/A2MuMuJet.MD.2015.root")
g = TFile("/hepstore/sfarry/GridOutput/2521/A2MuMuJet.MU.2015.root")
t = f.Get("ZMuMu/DecayTree")
u = g.Get("ZMuMu/DecayTree")
selection = TCut("boson_jet_tag == 1 && boson_jet2_tag == 1")
vars = [
Bunch(name='m', var='boson_M', bins = 20, lo = 10, hi = 100)
]
a2mumu = Template("a2mumu")
a2mumu.SetSelCut(selection)
a2mumu.AddTree(t)
a2mumu.AddTree(u)
for v in vars:
a2mumu.AddVar(v.name, v.var, v.bins, v.lo, v.hi)
a2mumu.Run()
a2mumu.SaveToFile()
| [
"sfarry@hep.ph.liv.ac.uk"
] | sfarry@hep.ph.liv.ac.uk |
b7e6835eb984e22224ba8954a80b3c2d30e12e9e | 71c7683331a9037fda7254b3a7b1ffddd6a4c4c8 | /Phys/Ks2MuMu/python/Ks2MuMu/Joban.py | 4e506118ebbb17400c7d16486cac5815866d3a10 | [] | no_license | pseyfert-cern-gitlab-backup/Urania | edc58ba4271089e55900f8bb4a5909e9e9c12d35 | 1b1c353ed5f1b45b3605990f60f49881b9785efd | refs/heads/master | 2021-05-18T13:33:22.732970 | 2017-12-15T14:42:04 | 2017-12-15T14:42:04 | 251,259,622 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | from ROOT import *
from Urania import *
AccessPackage("Bs2MuMu")
from XTuple import *
f = TFile("KsPiPi_MinBiasData_TriggerUnbiased_ntuple.root")
t = f.Get("Kspipi")
tup = XTuple("Joban",["evt/F", "AP_pt/F","AP_alpha/F", "time/F"])
i = 0
for entry in t:
tup.fillItem("AP_pt",entry.AP_pt)
tup.fillItem("AP_alpha",entry.AP_alpha)
tup.fillItem("time",entry.Blife_ps)
tup.fillItem("evt",i)
i += 1
tup.fill()
tup.close()
| [
"liblhcb@cern.ch"
] | liblhcb@cern.ch |
506ea85ae8a5646591bb9ace63949654f6b5e4e6 | d10dc6ee16ddcbf4cf6dc4ce43c332d6d375f2ee | /ccompras/apps/home/views.py | dd58c96127a153d4d49703261eaed95d68c2627a | [] | no_license | Alfredynho/DjCompras | 993bec2195734af911e0355327c477aa8a49c9d6 | d6829d2b5efe3ff871bab449d8e440908136d71e | refs/heads/master | 2021-01-11T11:07:31.933034 | 2016-04-08T10:52:12 | 2016-04-08T10:52:12 | 55,133,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,826 | py | from django.shortcuts import render, render_to_response
from django.template import RequestContext
from ccompras.apps.ventas.models import producto
from ccompras.apps.home.forms import ContactForm
from django.core.mail import EmailMultiAlternatives
# Create your views here.
def index_view(request):
return render_to_response('home/index.html',context_instance=RequestContext(request))
def about_view(request):
mensaje = "esto es un mensaje desde mi vista"
ctx = {
'msg':mensaje
}
return render_to_response('home/about.html',ctx,context_instance=RequestContext(request))
def home(request):
return render_to_response('home/home.html',context_instance=RequestContext(request))
def productos_view(request):
prod = producto.objects.filter(status=True)
ctx = {'productos':prod}
return render_to_response('home/productos.html',ctx,context_instance=RequestContext(request))
def contacto_view(request):
info_enviado = False
email = ""
titulo = ""
texto = ""
if request.method == 'POST':
formulario = ContactForm(request.POST)
if formulario.is_valid():
info_enviado = True
email = formulario.cleaned_data['Email']
titulo = formulario.cleaned_data['Titulo']
texto = formulario.cleaned_data['Texto']
#configurando el envio de mensaje a Gmail
to_admin = 'callizayagutierrezalfredo@gmail.com'
html_content = "Informacion recibida de [%s] <br><br><br>***Mensaje****<br><br>%s"%(email,texto)
msg = EmailMultiAlternatives('Correo de Contacto',html_content,'from@server.com',[to_admin])
msg.attach_alternative(html_content,'text/html') # Definimos el contenido como HTML
msg.send() # Enviamos en correo
else:
formulario = ContactForm()
ctx ={'form':formulario,'email':email,'titulo':titulo,'texto':texto,'info_enviado':info_enviado}
return render(request,'home/contacto.html',ctx)
| [
"callizayagutierrezalfredo@gmail.com"
] | callizayagutierrezalfredo@gmail.com |
2a44f2e6a644cd7d7898e30231a59efd400ac4ea | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_ploughmen.py | 0bce07a61a1b6df4625d86e50109e91c254c23c2 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py |
from xai.brain.wordbase.nouns._plowman import _PLOWMAN
#calss header
class _PLOUGHMEN(_PLOWMAN, ):
def __init__(self,):
_PLOWMAN.__init__(self)
self.name = "PLOUGHMEN"
self.specie = 'nouns'
self.basic = "plowman"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
6240cebc86ce6d41872d24623533177bf895670c | 694c187c8a00bee8c670c1690170099bad9b16b3 | /templeland.py | e7334bf535a376a68ca8f56696553e012eb666e5 | [] | no_license | ajayvenkat10/Competitive | 301f220b6d296f7e34328f192c43c4d7ef208cb1 | 14f2ecebe10eb19f72cc412dd0c414b3b1de9b4d | refs/heads/master | 2022-11-20T14:31:33.590099 | 2020-07-23T15:39:14 | 2020-07-23T15:39:14 | 281,599,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | t = int(input())
for i in range(t):
N = int(input())
ans=True
temple_strips_list = []
temple_strips = input()
temple_strips = temple_strips.split()
for i in range(N):
temple_strips_list.append(int(temple_strips[i]))
if(N%2==0 or temple_strips_list[0]!=1 or temple_strips_list[0]!=temple_strips_list[-1]):
ans = False
else:
mid = temple_strips_list[len(temple_strips_list)//2]
if(mid == max(temple_strips_list)):
part1 = temple_strips_list[:len(temple_strips_list)//2]
part2 = temple_strips_list[(len(temple_strips_list)//2)+1:]
for i in range(1,(len(temple_strips_list)//2)+1):
if(temple_strips_list[i]-temple_strips_list[i-1]!=1):
ans=False
break
if(part1!=part2[::-1]):
ans = False
else:
ans = False
if(ans):
print("yes")
else:
print("no")
| [
"37923623+ajayvenkat10@users.noreply.github.com"
] | 37923623+ajayvenkat10@users.noreply.github.com |
0b0301d80d6a9bdbe6e753e89f1201d6161efb00 | e53c13f2236960456a412af2c2617148a2c6153e | /ethnode/celeryapp.py | f43e1ee23bb92059029844b910fdaa445c838c73 | [] | no_license | ethgis/ethnode | d57b9660174acb737f96aea8013717b1f1a00ea1 | 9fe6f3add95bb5c5fb6dc9d2135f3ec48547c981 | refs/heads/master | 2021-08-29T17:19:44.169638 | 2017-12-14T12:23:04 | 2017-12-14T12:23:04 | 109,628,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ethnode.settings')
app = Celery('ethnode')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
| [
"ingenieroariel@gmail.com"
] | ingenieroariel@gmail.com |
e39c1c212e8848b309a577602ba1e9f51e7615a1 | 60290cb3fdb4d4a97a38f921b7de2160c2af70de | /utest/editor/test_clipboard.py | 415b75aad0c6487519b8702649a102f8308e8b8c | [
"Apache-2.0"
] | permissive | crylearner/RIDE3X | 231431222dc679b38831bd75db5a81062327e91c | 767f45b0c908f18ecc7473208def8dc7489f43b0 | refs/heads/master | 2021-01-19T12:22:59.553847 | 2017-08-23T15:11:17 | 2017-08-23T15:11:17 | 100,781,873 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | import os
import unittest
from nose.tools import assert_equals
# Needed to be able to create wx components
from resources import PYAPP_REFERENCE as _
from robotide.context import IS_WINDOWS
from robotide.editor.clipboard import _GridClipboard
if not IS_WINDOWS:
class TestGridClipBoard(unittest.TestCase):
def test_with_string_content(self):
self._test_clipboard('Hello, world!', 'Hello, world!')
def test_with_list_content(self):
self._test_clipboard([['Hello', 'world!']], 'Hello\tworld!')
def test_with_multiple_rows(self):
self._test_clipboard([['Hello', 'world!'], ['Another', 'row']],
'Hello\tworld!\nAnother\trow')
def _test_clipboard(self, content, expected=''):
clipb = _GridClipboard()
clipb.set_contents(content)
assert_equals(clipb._get_contents(),
expected.replace('\n', os.linesep))
| [
"sunshyran@gmail.com"
] | sunshyran@gmail.com |
627bce2f20fb134fccac1dafa3531fcd824aa73e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/330/usersdata/301/93575/submittedfiles/lista1.py | ab9a94ef91b97d13a029f17baed6ea875918b4b4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | # -*- coding: utf-8 -*-
n=int(input('digite o valor de n: '))
a=[]
soma1=0
soma2=0
c1=0
c2=0
for i in range (0,n,1):
a.append(int(input('Digite o número: ')))
for i in range (0,n,1):
if a[i]%2==0:
soma1+=a[i]
print(soma1)
else:
soma2+=a[i]
print(soma2)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e306d7ab0034c1a495ace1e6d3ffc29368c1c07b | 0420ce2fc8799d5fbd6e96313e6716f5e2ef825b | /bagogold/fundo_investimento/urls.py | 3ce146b0130e1143243e3427a8cc656404895e28 | [] | no_license | nizbel/bag-of-gold | 1da10acef4d73b8426ca3329b37a28c5f9587af4 | a3fd89eb47d33d546bd91947f033d71218c8700f | refs/heads/master | 2022-11-13T01:07:26.934813 | 2020-01-14T16:00:16 | 2020-01-14T16:00:16 | 275,689,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | # -*- coding: utf-8 -*-
from django.conf.urls import url
import views
urlpatterns = [
url(r'^detalhar-fundo/(?P<id_fundo>\d+)/$', views.detalhar_fundo_id, name='detalhar_fundo_id'),
url(r'^detalhar-fundo/(?P<slug_fundo>[-\w]+)/$', views.detalhar_fundo, name='detalhar_fundo'),
url(r'^editar-operacao/(?P<id_operacao>\d+)/$', views.editar_operacao_fundo_investimento, name='editar_operacao_fundo_investimento'),
url(r'^historico/$', views.historico, name='historico_fundo_investimento'),
url(r'^inserir-operacao-fundo-investimento/$', views.inserir_operacao_fundo_investimento, name='inserir_operacao_fundo_investimento'),
url(r'^listar-fundos/$', views.listar_fundos, name='listar_fundo_investimento'),
url(r'^listar-fundos-por-nome/$', views.listar_fundos_por_nome, name='listar_fundos_por_nome'),
url(r'^listar-historico-fundo-investimento/(?P<id_fundo>\d+)/$', views.listar_historico_fundo_investimento, name='listar_historico_fundo_investimento'),
url(r'^painel/$', views.painel, name='painel_fundo_investimento'),
url(r'^sobre/$', views.sobre, name='sobre_fundo_investimento'),
url(r'^verificar-historico-fundo-na-data/$', views.verificar_historico_fundo_na_data, name='verificar_historico_fundo_na_data'),
] | [
"kingbowserii@gmail.com"
] | kingbowserii@gmail.com |
f356700d720093d7363d0e8af6602a0d4b53452c | 8f26514c451e2398d5e3688c184ea74d1dad21b2 | /month_02/teacher/day03/file_write.py | 0715768cdacd3e3c0fcdcb38b8a2deee1c09b74e | [] | no_license | CircularWorld/Python_exercise | 25e7aebe45b4d2ee4e3e3afded082c56483117de | 96d4d9c5c626f418803f44584c5350b7ce514368 | refs/heads/master | 2022-11-21T07:29:39.054971 | 2020-07-20T10:12:24 | 2020-07-20T10:12:24 | 281,081,559 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | """
文件写操作示例
"""
# 写方式打开
# f = open("file.txt","w")
# f = open("file.txt","a") # 追加
# 读写方式打开,写入文本会从头开始往后覆盖内容
# f = open("file.txt","r+")
f = open("file.txt",'w')
# 写入操作了
n = f.write("Hello world\n")
print("写入了 %d 个字节"%n)
n = f.write("Hello Kitty\n")
print("写入了 %d 个字节"%n)
# 将列表中的内容分别写入到文件中
l = ["哈喽,死鬼\n","哎呀,干啥\n"]
f.writelines(l)
# 关闭
f.close() | [
"jiayuhaowork@163.com"
] | jiayuhaowork@163.com |
616478b663bd431079caa7f8d6a770e823eab73b | a7dc07cadaf735a66f459831cdc4c4d0dbbafcd7 | /land_new_info.py | cbee34fc5949c5eb84d70cbef1230fe943ab79ba | [] | no_license | ymJung/study | 2a15f2e036fc9a5c2c78ea7783a73143e443f4b8 | 37e0bb45b7250ed6ee777a6a48e504ad3b12165e | refs/heads/master | 2023-08-31T18:11:01.534192 | 2023-08-20T14:08:10 | 2023-08-20T14:08:10 | 10,260,555 | 0 | 0 | null | 2014-10-20T00:46:50 | 2013-05-24T06:25:20 | null | UTF-8 | Python | false | false | 2,540 | py | import requests
from bs4 import BeautifulSoup
import configparser
import random
import sys
import datetime
import time
cf = configparser.ConfigParser()
cf.read('config.cfg')
landUrls = cf.get('land_url','URLS')
land_price_tuples = landUrls.split(',')
def get_sale_products(findUrl, limit_price) :
soup = BeautifulSoup(requests.get(findUrl).text, "html.parser")
table = soup.find("table", { "class" : "sale_list _tb_site_img NE=a:cpm"})
trs = table.find("tbody").find_all('tr')
name = soup.find(id='complexListLayer').find('a', {'class':'on'}).text.strip()
results = list()
for tr in trs:
try :
price = tr.find('td', {'class':'num align_r'}).find('strong').text
dong = tr.find_all('td', {'class':"num2"})[0].text
floor = tr.find_all('td', {'class':"num2"})[1].text
budongsan = tr.find('td', {'class':'contact'}).find_all('span')[0]['title']
contact = tr.find('td', {'class':'contact'}).find_all('span')[1].text
crol = {'name':name,'price':price,'dong':dong,'floor':floor,'budongsan':budongsan,'contact':contact}
if int(limit_price) > int(price.replace(',','')) :
results.append(crol)
else :
continue
except AttributeError:
continue
return results
def get_line_up(products):
result = ''
for product in products:
result += '[' + product['name'] + '] \t' + str(product) + '\n'
return result
def get_new():
products=[]
for idx in range(int(len(land_price_tuples)/2)):
getProducts = get_sale_products(land_price_tuples[idx*2], land_price_tuples[(idx*2)+1])[0:3]
products.extend(getProducts)
return products
def is_break():
retryCnt = get_date_retry_limit(datetime.date.today())
if retryCnt<0:
return True
return False
def get_date_retry_limit(date):
dateStr = str(date)
if dateStr in RETRY_LIMIT:
print('reduce today limit ', dateStr, RETRY_LIMIT[dateStr])
RETRY_LIMIT[dateStr] -= 1
else:
print('make today limit ', dateStr)
RETRY_LIMIT.update({dateStr: RETRY_LIMIT_CNT})
return RETRY_LIMIT[dateStr]
import telegram
TOKEN = cf.get('telegram', 'TOKEN')
VALID_USER = cf.get('telegram', 'VALID_USER')
tb = telegram.Bot(token=TOKEN)
check_flag = False
seen_set = set()
products = get_new()
result = ''
for product in products:
result += product['name'] + ':::' + product['price']
tb.sendMessage(chat_id=VALID_USER, text=result)
| [
"metalbird0@gmail.com"
] | metalbird0@gmail.com |
0f7769bb3595630e797871ee52e7f418a9816002 | 14956dbed8ae4fba1d65b9829d9405fcf43ac698 | /Cyber Security/Capture the Flag Competitions/2020/Cyberthon 2020/Livestream Training/RE/Catch No Ball/solve.py | 3dadfa22b242c6738c1a936cb4091cbcf0dd8694 | [] | no_license | Hackin7/Programming-Crappy-Solutions | ae8bbddad92a48cf70976cec91bf66234c9b4d39 | ffa3b3c26a6a06446cc49c8ac4f35b6d30b1ee0f | refs/heads/master | 2023-03-21T01:21:00.764957 | 2022-12-28T14:22:33 | 2022-12-28T14:22:33 | 201,292,128 | 12 | 7 | null | 2023-03-05T16:05:34 | 2019-08-08T16:00:21 | Roff | UTF-8 | Python | false | false | 3,455 | py | #!/usr/bin/env python
'''
Basically, to solve this problem, you HAVE to try to interpret this code
using your pseudocode knowledge and genius, and to figure out what it does
Through some analysis of the code, only the main function 'decode' is
important, along with its helper functions.
The main code was doable to debug, but I made some mistakes. After quite
a while of looking at my mistakes and stuff, I managed to interpret it
and get it working
-------------------------------------------------------------------------
My interpretation of the code to python
This may not be 100% accurate or correct or complete, but gets the job done
Use a little inference of programming constructs to figure things out
? condition : if not condition
teluwhut: def (Define a function)
okcan : return
issit: Equate 2 items ==
<<: Define/Set a variable =
$$: List []
<<<: Append Back
>>>: Append front
thing ~ $something$: for thing in something: (for loop)
mod: Modulo
then...fin : Basic Pseudocode open and close Constructs
'''
disnotflag = [0x64, 0x6c, 0x30, 0x62, 0x34, 0x5f, 0x5f, 0x33, 0x6c, 0x6d, 0x6e, 0x34, 0x62, 0x31, 0x5f, 0x33, 0x74, 0x64, 0x6e, 0x62, 0x6d, 0x30, 0x7a, 0x33]
###Debugging: Not in original code#############################
def showdisnotflag(f): # Show in ascii form, more redable
for i in f:
print(chr(i),end='')
###############################################################
def decode(something):
result = ''
count = haolung(something)
counter = []
def lll(a,b): a.append(b) #Custom Function
final = []
for thing in range(1,count+1):#counter:
if not thing%2==0:
'''
x.pop(1) means remove item at index 1 of x and return it
example, x <<- $123, 234, 345$
After running y <<- x.pop(1), x is now $123, 345$ and y is now 234
#disnotflag.pop(0)<<<final
'''
# This code was weird, as it does not make sense to append a list to a function return value
# Through trial and error, it has been decoded in the lll function
lll(final,disnotflag.pop(0))
else:
lll(final,disnotflag.pop(haolung(disnotflag)-1))
'''
disnotflag.pop(len(disnotflag)-1) <<< final
'''
###Debugging: Not in original code################################
showdisnotflag(disnotflag)
print(" ",end="")
showdisnotflag(final)
print()
###################################################################
final = sdrawkcab(final) #Reverse the string
for thing in final:
result+=chr(thing)
print(result)#Debugging
return result
def samesame(disone, datone, checker):
return checker(disone) == checker(datone)
# Gives you an array with the items in something but reversed
def sdrawkcab(something):
dis = []
for thing in something:
dis = [thing]+dis
#thing >>> dis #<rant> append thing to front of dis </rant>
return dis
# Gives you the length of an array "something"
def haolung(something):
haomehnee = 0
for thing in something:
haomehnee = haomehnee + 1
return haomehnee
# The function testflag(whatutype) and the rest of the code not important
decode(disnotflag)
# After much looking, I realised the output looked like words and put it in
# Flag: CTFSG{b41n_m3lt3d_n_b4mb00zl3d}
| [
"zunmun@gmail.com"
] | zunmun@gmail.com |
0fe44143c74995e1da342650d28d8052088b9b61 | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW03_20210706191310.py | 400efe3ada9084523c59768ccc03a50afcf192fe | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,575 | py | """
Georgia Institute of Technology - CS1301
HW03 - Strings and Lists
Collaboration Statement:
"""
#########################################
"""
Function Name: movieNight()
Parameters: subtitle (str)
Returns: fixed subtitle (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def movieNight(subtitle):
newSubtitle = ''
for i in subtitle:
if not i.isdigit():
newSubtitle += i
return newSubtitle
"""
Function Name: longestWord()
Parameters: sentence (str)
Returns: longest word (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def longestWord(sentence):
newSentence = ''
for i in sentence:
if not i == ',':
newSentence += i
list1 = newSentence.split(' ')
length = 0
longestWord = ''
for i in list1:
if len(i) >= length:
length = len(i)
longestWord = i
return longestWord
"""
Function Name: tennisMatch()
Parameters: player1 (str), player2 (str), matchRecord (str)
Returns: game statement (str)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def tennisMatch(player1, player2, matchRecord):
player1Points = 0
player2Points = 0
matchesWonPlayer1 = 0
matchesWonPlayer2 = 0
for i in matchRecord:
if i == '1':
player1Points += 1
elif i == '2':
player2Points += 1
elif i == '-':
if player1Points > player2Points:
matchesWonPlayer1 += 1
elif player2Points > player1Points:
matchesWonPlayer2 += 1
player1Points = 0
player2Points = 0
if matchesWonPlayer1 > matchesWonPlayer2:
return player1 + ' won! The score was ' + str(matchesWonPlayer1) + str('-') + str(matchesWonPlayer2)
elif matchesWonPlayer2 > matchesWonPlayer1:
return player2 + ' won! The score was ' + str(matchesWonPlayer2) + str('-') + str(matchesWonPlayer1)
else:
return "It's a tie"
"""
Function Name: freshFruit()
Parameters: barcodes (list), startIndex (int), stopIndex (int)
Returns: freshest barcode (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def freshFruit(barcodes, startIndex, stopIndex):
newList = barcodes[startIndex:stopIndex+1]
maxElement = newList[0]
for i in newList:
if i > maxElement:
maxElement = i
return maxElement
"""
Function Name: highestSum()
Parameters: stringList (list)
Returns: highest sum index (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def highestSum(stringList):
for string in stringList:
for i in string:
# subtitle = "Mr. and M4rs. Dursley of nu28mber four, Privet Drive, wer903e proud to say th6at they we6re perfectly norm3al, tha894nk you ve89ry much."
# print(movieNight(subtitle))
# sentence = " abc def ghi jkl mno "
# print(longestWord(sentence))
# print(tennisMatch("Emily", "Kathleen", "1122-22211-11122-1212-"))
# print(freshFruit([313414, 2241221, 32432, 49204, 493204, 23212], 2, 4))
| [
"sanjay.mamidipaka@gmail.com"
] | sanjay.mamidipaka@gmail.com |
026d3ba9df7425cf395334e5f518b3070e753ea6 | e9c0bb90f07144e26e54b78abc9d102b7affc9f8 | /billreminder/model/bills.py | 8698c5edac475547f1409ccd362ff28afff32b0c | [] | no_license | linxaddict/billreminder | fe8b3aee275172518f1e4757e4a89350f2bd2517 | 7c8f52b8d3bdc55199b4f6417d960facf5c6857e | refs/heads/master | 2021-01-11T17:19:07.965038 | 2017-03-29T19:57:29 | 2017-03-29T19:57:29 | 79,741,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,034 | py | import datetime as dt
__author__ = 'Marcin Przepiórkowski'
__email__ = 'mprzepiorkowski@gmail.com'
class Payment:
def __init__(self, user, bill, created_at=None):
self._user = user
self._bill = bill
self._created_at = created_at or dt.datetime.utcnow()
@property
def user(self):
return self._user
@user.setter
def user(self, value):
self._user = value
@property
def bill(self):
return self._bill
@bill.setter
def bill(self, value):
self._bill = value
@property
def created_at(self):
return self._created_at
@created_at.setter
def created_at(self, value):
self._created_at = value
class Bill:
def __init__(self, id, name, description=None, amount=None, last_payment=None, due_date=None,
repeat_mode=None, repeat_value=None, owner=None, payments=None,
participants=None):
self._id = id
self._name = name
self._description = description
self._amount = amount
self._last_payment = last_payment
self._due_date = due_date
self._repeat_mode = repeat_mode
self._repeat_value = repeat_value
self._owner = owner
self._payments = payments or []
self._participants = participants or []
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def last_payment(self):
return self._last_payment
@last_payment.setter
def last_payment(self, value):
self._last_payment = value
@property
def due_date(self):
return self._due_date
@due_date.setter
def due_date(self, value):
self._due_date = value
@property
def repeat_mode(self):
return self._repeat_mode
@repeat_mode.setter
def repeat_mode(self, value):
self._repeat_mode = value
@property
def repeat_value(self):
return self._repeat_value
@repeat_value.setter
def repeat_value(self, value):
self._repeat_value = value
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, value):
self._owner = value
@property
def payments(self):
return self._payments
@payments.setter
def payments(self, value):
self._payments = value
@property
def participants(self):
return self._participants
@participants.setter
def participants(self, value):
self._participants = value
| [
"mprzepiorkowski@gmail.com"
] | mprzepiorkowski@gmail.com |
31cabd5e8920b175cf6324dd2fffcddbd08484af | 1757262f5010c5a726cbb11513d5ad88f632c5a2 | /tributary/streaming/calculations/__init__.py | 4ef83213c1f3ac2a5403467227e677722c7cc520 | [
"Apache-2.0"
] | permissive | thetradingflow/tributary | 9287e26dc63fe1320ef1950048e497ac86519ddb | 6f2c3ce0ac86ee7c3343fd970f3c3e7161c5951e | refs/heads/master | 2022-08-01T02:05:18.385140 | 2020-05-18T20:32:08 | 2020-05-18T20:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from .ops import * # noqa: F401, F403
from .rolling import Count as RollingCount, Sum as RollingSum, Min as RollingMin, Max as RollingMax, Average as RollingAverage # noqa: F401
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
b28e45c09fa3d58b6a1cd221d055dab1a81ca169 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/44/usersdata/130/14900/submittedfiles/desvpad.py | 386a55164703e424ad038db44693b637a1d910a9 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
n=input('Digite o valor de n:')
x=[]
for i in range(0,n,1):
x.append(input('Digite um elemento:'))
print(x[0])
print(x[len(x)-1])
s=0
for i in range(0,n,1):
s=s+x[i]
s=s/(len(x))
print(s)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
907bfe48899ad95c6caa0f58360b2de875021754 | 57cb9fef5efac78758f5d151b959ca2216c94083 | /edx/app/edx_ansible/venvs/edx_ansible/bin/rst2xml.py | 1739aa96c81e70299840fba73d614169ffaa6526 | [] | no_license | JosiahKennedy/openedx-branded | 9751d5362088276a87b2e0edca0913568eeb1ac4 | d16a25b035b2e810b8ab2b0a2ac032b216562e26 | refs/heads/master | 2022-12-21T02:39:17.133147 | 2020-03-25T06:03:23 | 2020-03-25T06:03:23 | 249,895,218 | 0 | 1 | null | 2022-12-08T01:23:48 | 2020-03-25T05:33:05 | null | UTF-8 | Python | false | false | 635 | py | #!/edx/app/edx_ansible/venvs/edx_ansible/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| [
"josiahk@phyziklabs.com"
] | josiahk@phyziklabs.com |
eae42f62f6186b0dc18bf54d546868beb12061dd | 0778d368a4d26382d3956b710ac928f7944ba54f | /mammoth/docx/numbering_xml.py | f7f98e0c702db3580e71be2e696ca6c7d904a52c | [
"BSD-2-Clause"
] | permissive | tsaltena/python-mammoth | 68ed9af8e0c2b4012e92a08315dc8db5ac34769d | 6746d5f17377327d9947a10a1e8101f8810122e2 | refs/heads/master | 2021-05-15T03:53:17.462401 | 2020-11-09T11:00:10 | 2020-11-09T11:00:10 | 119,989,824 | 0 | 0 | BSD-2-Clause | 2021-01-20T08:01:23 | 2018-02-02T14:11:50 | Python | UTF-8 | Python | false | false | 1,700 | py | from ..documents import numbering_level
def read_numbering_xml_element(element):
abstract_nums = _read_abstract_nums(element)
nums = _read_nums(element, abstract_nums)
return Numbering(nums)
def _read_abstract_nums(element):
abstract_num_elements = element.find_children("w:abstractNum")
return dict(map(_read_abstract_num, abstract_num_elements))
def _read_abstract_num(element):
abstract_num_id = element.attributes.get("w:abstractNumId")
levels = _read_abstract_num_levels(element)
return abstract_num_id, levels
def _read_abstract_num_levels(element):
levels = map(_read_abstract_num_level, element.find_children("w:lvl"))
return dict(
(level.level_index, level)
for level in levels
)
def _read_abstract_num_level(element):
level_index = element.attributes["w:ilvl"]
num_fmt = element.find_child_or_null("w:numFmt").attributes.get("w:val")
is_ordered = num_fmt != "bullet"
return numbering_level(level_index, is_ordered)
def _read_nums(element, abstract_nums):
num_elements = element.find_children("w:num")
return dict(
_read_num(num_element, abstract_nums)
for num_element in num_elements
)
def _read_num(element, abstract_nums):
num_id = element.attributes.get("w:numId")
abstract_num_id = element.find_child_or_null("w:abstractNumId").attributes["w:val"]
return num_id, abstract_nums[abstract_num_id]
class Numbering(object):
def __init__(self, nums):
self._nums = nums
def find_level(self, num_id, level):
num = self._nums.get(num_id)
if num is None:
return None
else:
return num.get(level)
| [
"mike@zwobble.org"
] | mike@zwobble.org |
3ec948782164962982fa48bf0a3afa512f6033a7 | 9003a00f9d529c50f7b169dce45f1380f1d466b6 | /atmel/feather/circuitpyton/build_adafruit_circuitpython_bundle_py_20181218/lib/adafruit_onewire/device.py | ce2c49b444f6b2ca7feec8513db5075863e4ffd7 | [] | no_license | 0xFF1E071F/hw | d249b8607ba40d6ce1ed9a4a267639c30019d978 | 2441df0ab45a8e2f3bed4ec7f4eff42ac0a32a7f | refs/heads/master | 2022-04-22T03:59:58.835300 | 2020-04-28T06:52:29 | 2020-04-28T06:52:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,191 | py | # The MIT License (MIT)
#
# Copyright (c) 2017 Carter Nelson for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_onewire.device`
====================================================
Provides access to a single device on the 1-Wire bus.
* Author(s): Carter Nelson
"""
__version__ = "1.1.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_OneWire.git"
_MATCH_ROM = b'\x55'
class OneWireDevice(object):
"""A class to represent a single device on the 1-Wire bus."""
def __init__(self, bus, address):
self._bus = bus
self._address = address
def __enter__(self):
self._select_rom()
return self
def __exit__(self, *exc):
return False
def readinto(self, buf, *, start=0, end=None):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
self._bus.readinto(buf, start=start, end=end)
if start == 0 and end is None and len(buf) >= 8:
if self._bus.crc8(buf):
raise RuntimeError('CRC error.')
def write(self, buf, *, start=0, end=None):
"""
Write the bytes from ``buf`` to the device.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buffer[start:end]``. This will not cause an allocation like
``buffer[start:end]`` will so it saves memory.
:param bytearray buf: buffer containing the bytes to write
:param int start: Index to start writing from
:param int end: Index to read up to but not include
"""
return self._bus.write(buf, start=start, end=end)
def _select_rom(self):
self._bus.reset()
self.write(_MATCH_ROM)
self.write(self._address.rom)
| [
"eiselekd@gmail.com"
] | eiselekd@gmail.com |
65ea3a973c4a55941bc41b7fc132a9cba4286163 | b45b3e5e7389d071161fa52340cb119a29c76907 | /DoubleBufferDemo.py | 2a5be2c67b9df315bc62b6a954771d8f827818b4 | [] | no_license | Metallicow/wxPythonDemos | 2fc6882a11a0aa6bb35c42f163cfcd6b3456f4fd | 396d1ade5930528ec7518b9c22dc93a274cb418f | refs/heads/master | 2020-12-25T11:52:18.577898 | 2013-05-19T18:58:11 | 2013-05-19T18:58:11 | 11,283,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,421 | py | #!/usr/bin/env python
import wx
import random
# This has been set up to optionally use the wx.BufferedDC if
# USE_BUFFERED_DC is True, it will be used. Otherwise, it uses the raw
# wx.Memory DC , etc.
USE_BUFFERED_DC = False
#USE_BUFFERED_DC = True
class BufferedWindow(wx.Window):
"""
A Buffered window class.
To use it, subclass it and define a Draw(DC) method that takes a DC
to draw to. In that method, put the code needed to draw the picture
you want. The window will automatically be double buffered, and the
screen will be automatically updated when a Paint event is received.
When the drawing needs to change, you app needs to call the
UpdateDrawing() method. Since the drawing is stored in a bitmap, you
can also save the drawing to file by calling the
SaveToFile(self, file_name, file_type) method.
"""
def __init__(self, *args, **kwargs):
# make sure the NO_FULL_REPAINT_ON_RESIZE style flag is set.
kwargs['style'] = kwargs.setdefault('style', wx.NO_FULL_REPAINT_ON_RESIZE) | wx.NO_FULL_REPAINT_ON_RESIZE
wx.Window.__init__(self, *args, **kwargs)
wx.EVT_PAINT(self, self.OnPaint)
wx.EVT_SIZE(self, self.OnSize)
# OnSize called to make sure the buffer is initialized.
# This might result in OnSize getting called twice on some
# platforms at initialization, but little harm done.
self.OnSize(None)
def Draw(self, dc):
## just here as a place holder.
## This method should be over-ridden when subclassed
pass
def OnPaint(self, event):
# All that is needed here is to draw the buffer to screen
if USE_BUFFERED_DC:
dc = wx.BufferedPaintDC(self, self._Buffer)
else:
dc = wx.PaintDC(self)
dc.DrawBitmap(self._Buffer, 0, 0)
def OnSize(self,event):
# The Buffer init is done here, to make sure the buffer is always
# the same size as the Window
Size = self.ClientSize
# Make new offscreen bitmap: this bitmap will always have the
# current drawing in it, so it can be used to save the image to
# a file, or whatever.
self._Buffer = wx.EmptyBitmap(*Size)
self.UpdateDrawing()
def SaveToFile(self, FileName, FileType=wx.BITMAP_TYPE_PNG):
## This will save the contents of the buffer
## to the specified file. See the wxWindows docs for
## wx.Bitmap::SaveFile for the details
self._Buffer.SaveFile(FileName, FileType)
def UpdateDrawing(self):
"""
This would get called if the drawing needed to change, for whatever reason.
The idea here is that the drawing is based on some data generated
elsewhere in the system. If that data changes, the drawing needs to
be updated.
This code re-draws the buffer, then calls Update, which forces a paint event.
"""
dc = wx.MemoryDC()
dc.SelectObject(self._Buffer)
self.Draw(dc)
del dc # need to get rid of the MemoryDC before Update() is called.
self.Refresh(eraseBackground=False)
self.Update()
class DrawWindow(BufferedWindow):
def __init__(self, *args, **kwargs):
## Any data the Draw() function needs must be initialized before
## calling BufferedWindow.__init__, as it will call the Draw
## function.
self.DrawData = {}
BufferedWindow.__init__(self, *args, **kwargs)
def Draw(self, dc):
dc.SetBackground( wx.Brush("White") )
dc.Clear() # make sure you clear the bitmap!
# Here's the actual drawing code.
for key, data in self.DrawData.items():
if key == "Rectangles":
dc.SetBrush(wx.BLUE_BRUSH)
dc.SetPen(wx.Pen('VIOLET', 4))
for r in data:
dc.DrawRectangle(*r)
elif key == "Ellipses":
dc.SetBrush(wx.Brush("GREEN YELLOW"))
dc.SetPen(wx.Pen('CADET BLUE', 2))
for r in data:
dc.DrawEllipse(*r)
elif key == "Polygons":
dc.SetBrush(wx.Brush("SALMON"))
dc.SetPen(wx.Pen('VIOLET RED', 4))
for r in data:
dc.DrawPolygon(r)
class TestFrame(wx.Frame):
def __init__(self, parent=None):
wx.Frame.__init__(self, parent,
size = (500,500),
title="Double Buffered Test",
style=wx.DEFAULT_FRAME_STYLE)
## Set up the MenuBar
MenuBar = wx.MenuBar()
file_menu = wx.Menu()
item = file_menu.Append(wx.ID_EXIT, text="&Exit")
self.Bind(wx.EVT_MENU, self.OnQuit, item)
MenuBar.Append(file_menu, "&File")
draw_menu = wx.Menu()
item = draw_menu.Append(wx.ID_ANY, "&New Drawing","Update the Drawing Data")
self.Bind(wx.EVT_MENU, self.NewDrawing, item)
item = draw_menu.Append(wx.ID_ANY,'&Save Drawing\tAlt-I','')
self.Bind(wx.EVT_MENU, self.SaveToFile, item)
MenuBar.Append(draw_menu, "&Draw")
self.SetMenuBar(MenuBar)
self.Window = DrawWindow(self)
self.Show()
# Initialize a drawing -- it has to be done after Show() is called
# so that the Windows has teh right size.
self.NewDrawing()
def OnQuit(self,event):
self.Close(True)
def NewDrawing(self, event=None):
self.Window.DrawData = self.MakeNewData()
self.Window.UpdateDrawing()
def SaveToFile(self,event):
dlg = wx.FileDialog(self, "Choose a file name to save the image as a PNG to",
defaultDir = "",
defaultFile = "",
wildcard = "*.png",
style = wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
self.Window.SaveToFile(dlg.GetPath(), wx.BITMAP_TYPE_PNG)
dlg.Destroy()
def MakeNewData(self):
## This method makes some random data to draw things with.
MaxX, MaxY = self.Window.GetClientSizeTuple()
DrawData = {}
# make some random rectangles
l = []
for i in range(5):
w = random.randint(1,MaxX/2)
h = random.randint(1,MaxY/2)
x = random.randint(1,MaxX-w)
y = random.randint(1,MaxY-h)
l.append( (x,y,w,h) )
DrawData["Rectangles"] = l
# make some random ellipses
l = []
for i in range(5):
w = random.randint(1,MaxX/2)
h = random.randint(1,MaxY/2)
x = random.randint(1,MaxX-w)
y = random.randint(1,MaxY-h)
l.append( (x,y,w,h) )
DrawData["Ellipses"] = l
# Polygons
l = []
for i in range(3):
points = []
for j in range(random.randint(3,8)):
point = (random.randint(1,MaxX),random.randint(1,MaxY))
points.append(point)
l.append(points)
DrawData["Polygons"] = l
return DrawData
class DemoApp(wx.App):
def OnInit(self):
self.frame = TestFrame()
self.SetTopWindow(self.frame)
return True
if __name__ == "__main__":
app = DemoApp(0)
app.MainLoop()
| [
"Chris.Barker@noaa.gov"
] | Chris.Barker@noaa.gov |
d3d115752075a17430f9222d060599f649fa1271 | f42f04302b4c7ed34b6e079cc334c499b10d656c | /auditware/apps.py | d7f2cf032329f184f7b9b289216fa8a456661e0e | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | un33k/django-auditware | 5edc2159c9aee2aae7c6a2c3d04f9f843665ff49 | 7ea46195aade2caa464fbc9c5646f7565e87be11 | refs/heads/master | 2021-01-10T02:01:27.106266 | 2016-04-05T20:16:37 | 2016-04-05T20:16:37 | 43,097,203 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from django.apps import apps
from django.apps import AppConfig as DjangoAppConfig
from django.utils.translation import ugettext_lazy as _
class AppConfig(DjangoAppConfig):
"""
Configuration entry point for the auditware app
"""
label = name = 'auditware'
verbose_name = _("auditware app")
def ready(self):
"""
App is imported and ready, so bootstrap it.
"""
from .receivers import latch_to_signals
latch_to_signals()
| [
"val@neekware.com"
] | val@neekware.com |
1c2c3ec6f06be19a149ce546f8991519ff2e516e | cb559124f016b2139ec2e7bd764ee95c72e0c85a | /MainProject/mypost/admin.py | 01fb05bba7b0162ed94437aa5b7cf4115ebb3b50 | [] | no_license | samirpatil2000/RestFrameWork | cb519566871052328f507a863fb5617bba148589 | 776793ef214a0365a722463df5a9e92365847235 | refs/heads/master | 2022-10-19T04:54:09.886050 | 2020-06-09T03:25:41 | 2020-06-09T03:25:41 | 270,561,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from django.contrib import admin
# Register your models here.
from .models import Post , Category
admin.site.register(Post)
admin.site.register(Category) | [
"samirspatil742099@gmail.com"
] | samirspatil742099@gmail.com |
e11d91874bdc3936c206ac66d1d9bfea012f21ee | 4cacaaebab5db2f35e3fb08f5c8b5dc6cc807d29 | /C1_L1/timer_for_website.py | 2a992921912a6676d9a9424a8d0f1c7d767802f3 | [] | no_license | M0hamedGamal/NanoDgree | f667464080927da9daab3c55daa80e10d016edb1 | 839f6c8fc5219d08d31105061ce2decbe70d9400 | refs/heads/master | 2021-09-05T19:36:08.760757 | 2018-01-30T15:47:40 | 2018-01-30T15:47:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | import time
import webbrowser
total_breaks = 3
break_count = 0
print("This Program Started On: " , time.ctime()) #Current time
while (break_count < total_breaks): #Loop For 3 Time
time.sleep(5) #To Wait 5 Sec
webbrowser.open("https://www.youtube.com/watch?v=YQHsXMglC9A")
break_count = break_count + 1
| [
"you@example.com"
] | you@example.com |
caa9b68f2388f1cebf0aed5cc468db051a5e68ae | f98e37d4dba35055905063596415aaedcfa59fa3 | /ExpenseReportSystemBE/components/secCheck/secCheckLogic.py | c8129142b343f1a5731932a7a03c86142e7f743a | [] | no_license | pyj4104/ExpenseReportSystemBE | 8c8b3fa1c02cab07bf4416ebc797c5c46c0c57cd | 67b30a342394d0df54729aa58df37d5c503592a4 | refs/heads/main | 2023-04-26T09:19:04.702310 | 2021-05-18T02:00:55 | 2021-05-18T02:00:55 | 343,976,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,109 | py | import time
class SecCodeSession:
userEmail: str
created: int
expires: int
def __init__(self, email: str):
self.userEmail = email
self.created = int(time.time())
self.expires = self.created + 600
class SecCodes:
secCodesDict: dict # security codes to user data
usrCodesDict: dict # user email to security codes data
def __init__(self):
self.secCodesDict = dict()
self.usrCodesDict = dict()
def initiateLogInProcedure(self, userEmail: str, token: str):
"""
Based on the generated token, creates double relationship from token to user and user to token.
Inputs: userEmail, token
"""
self.removeSecCode(userEmail=userEmail)
self.secCodesDict[token] = SecCodeSession(userEmail)
self.usrCodesDict[userEmail] = token
def retrieveUser(self, token: str) -> str:
"""
When either token is passed in, returns the user email.
Raises ValueError when either is empty or when both are passed in.
Inputs: token in str, userEmail in str
"""
return self.secCodesDict[token].userEmail
def isSecCodeIn(self, token: str) -> bool:
"""
Checks whether the security code is active or not.
Input: token with the length of 6
Output: True if the session is active. False if the session is not active
or the token is wrong
"""
if token not in self.secCodesDict:
return False
if int(time.time()) > self.secCodesDict[token].expires:
return False
return True
def removeSecCode(self, userEmail: str = None, token: str = None):
if userEmail and userEmail in self.usrCodesDict and not token:
token = self.usrCodesDict[userEmail]
elif not userEmail and token and token in self.secCodesDict:
userEmail = self.secCodesDict[token].userEmail
elif not userEmail and not token:
raise ValueError("Both fields cannot be empty")
self.__removeToken__(token)
self.__removeUserEmail__(userEmail)
def __removeToken__(self, token: str):
if token in self.secCodesDict:
del(self.secCodesDict[token])
def __removeUserEmail__(self, email: str):
if email in self.usrCodesDict:
del(self.usrCodesDict[email])
currentSecCodes = SecCodes()
| [
"pyj4104@hotmail.com"
] | pyj4104@hotmail.com |
32bbc1801f1410cb204d27de24ee4c38082ebf18 | e57f62ce463ae10e2a61bfee6682f92773d56520 | /simim/scenario.py | ed6f51c5e84375b5eb6d97440b074ae956fc0a1a | [
"MIT"
] | permissive | rffowler/simim | 7f5a0ac7df5abce8b13627ef3f838d4196fb657e | d5b9793ebb469fe903fdff0f98898b6f87a433b3 | refs/heads/master | 2020-05-18T19:34:46.965775 | 2019-04-24T13:58:08 | 2019-04-24T13:58:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,915 | py | """
scenario.py
Manages scenarios
"""
import pandas as pd
class Scenario():
def __init__(self, filename, factors):
self.data = pd.read_csv(filename)
if isinstance(factors, str):
factors = [factors]
# rename scenario cols with O_ or D_ prefixes as necessary
for column in [f for f in self.data.columns.values if not f.startswith("CUM_") and f not in ["GEOGRAPHY_CODE", "YEAR"]]:
if "O_" + column in factors:
self.data.rename({column: "O_" + column, "CUM_" + column: "CUM_O_" + column }, axis=1, inplace=True)
elif "D_" + column in factors:
self.data.rename({column: "D_" + column, "CUM_" + column: "CUM_D_" + column }, axis=1, inplace=True)
missing = [factor for factor in factors if factor not in self.data.columns]
# This doesnt allow for e.g. JOBS in scenario and a factor of JOBS_DISTWEIGHTED
# check scenario has no factors that aren't in model
# superfluous = [factor for factor in self.data.columns if factor not in factors and \
# not factor.startswith("CUM_") and \
# factor != "GEOGRAPHY_CODE" and \
# factor != "YEAR"]
# if superfluous:
# raise ValueError("ERROR: Factor(s) %s are in scenario but not a model factor, remove or add to model" % str(superfluous))
#print("Superfluous factors:", superfluous)
print("Available factors:", factors)
print("Scenario factors:", [f for f in self.data.columns.values if not f.startswith("CUM_") and f not in ["GEOGRAPHY_CODE", "YEAR"]])
print("Scenario timeline:", self.timeline())
print("Scenario geographies:", self.geographies())
# add columns for factors not in scenario
# TODO is this actually necessary?
for col in missing:
self.data[col] = 0
self.data["CUM_"+col] = 0
# validate
if "GEOGRAPHY_CODE" not in self.data.columns.values:
raise ValueError("Scenario definition must contain a GEOGRAPHY_CODE column")
if "YEAR" not in self.data.columns.values:
raise ValueError("Scenario definition must contain a YEAR column")
# work out factors #.remove(["GEOGRAPHY_CODE", "YEAR"])
self.factors = [f for f in self.data.columns.values if not f.startswith("CUM_") and f not in ["GEOGRAPHY_CODE", "YEAR"]]
self.current_scenario = None
self.current_time = None
def timeline(self):
return sorted(self.data.YEAR.unique())
def geographies(self):
return sorted(self.data.GEOGRAPHY_CODE.unique())
def update(self, year):
""" Returns new scenario if there is data for the given year, otherwise returns the current (cumulative) scenario """
self.current_time = year
if year in self.data.YEAR.unique():
print("Updating scenario")
self.current_scenario = self.data[self.data.YEAR==year]
return self.current_scenario
else:
print("Persisting existing scenario")
return self.current_scenario
def apply(self, dataset, year):
# if no scenario for a year, reuse the most recent (cumulative) figures
self.current_scenario = self.update(year)
# TODO we can probably get away with empty scenario?
# ensure there is a scenario
if self.current_scenario is None:
raise ValueError("Unable to find a scenario for %s" % year)
#print(most_recent_scenario.head())
dataset = dataset.merge(self.current_scenario.drop(self.factors, axis=1), how="left", left_on="D_GEOGRAPHY_CODE", right_on="GEOGRAPHY_CODE") \
.drop(["GEOGRAPHY_CODE", "YEAR"], axis=1).fillna(0)
for factor in self.factors:
#print(dataset.columns.values)
# skip constrained
if factor != "O_GEOGRAPHY_CODE" and factor != "D_GEOGRAPHY_CODE":
dataset["CHANGED_" + factor] = dataset[factor] + dataset["CUM_" + factor]
return dataset
| [
"a.p.smith@leeds.ac.uk"
] | a.p.smith@leeds.ac.uk |
297f090aa09340a17f20e473690f2254a6bbf410 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5630113748090880_0/Python/cih187/round1AproblemB.py | 7a5b2198cbb47f906cd2f88be65dc5710385d914 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | def round1a_b(path):
with open(path, mode='r') as file:
lines = file.readlines()
test_case_number = 0
line_number = 1
while test_case_number < int(lines[0]):
test_case_number += 1
# variables for this problem
n = int(lines[line_number].split()[0])
result = []
matrice = []
for i in range(2*n - 1):
matrice.append(lines[line_number + i + 1].split())
matrice_flat = [int(item) for sublist in matrice for item in sublist]
for i in matrice_flat:
count = 0
for j in matrice_flat:
if i == j:
count += 1
if count % 2 == 1:
result.append(i)
matrice_flat = remove_values_from_list(matrice_flat, i)
result.sort()
# end of problem logic
my_result = ""
for item in result:
my_result += str(item) + " "
print("Case #{}: {}".format(test_case_number, my_result))
line_number += 2*n
def remove_values_from_list(my_list, x):
return [value for value in my_list if value != x]
round1a_b('file.txt')
exit() | [
"alexandra1.back@gmail.com"
] | alexandra1.back@gmail.com |
1a4ef0108cb6f9dc1429db6da776aba96ff6f586 | fec622bc34957dd4d99f1ef0f23608eeb40ed609 | /internal/notes/builtin-SAVE/packages/autogen/package.py | 4198ab1589787f6d30b2af2fc527239452709c9b | [] | no_license | scottkwarren/hpctest | 4d5ff18d00c5eb9b7da481c9aa0824aa7082062f | a8bb99b5f601a5d088ae56ab9886ab8079c081ba | refs/heads/master | 2022-09-07T19:36:18.544795 | 2022-08-18T20:26:42 | 2022-08-18T20:26:42 | 100,518,800 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,411 | py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Autogen(AutotoolsPackage):
"""AutoGen is a tool designed to simplify the creation and maintenance of
programs that contain large amounts of repetitious text. It is especially
valuable in programs that have several blocks of text that must be kept
synchronized."""
homepage = "https://www.gnu.org/software/autogen/index.html"
url = "https://ftp.gnu.org/gnu/autogen/rel5.18.12/autogen-5.18.12.tar.gz"
list_url = "https://ftp.gnu.org/gnu/autogen"
list_depth = 1
version('5.18.12', '551d15ccbf5b5fc5658da375d5003389')
variant('xml', default=True, description='Enable XML support')
depends_on('pkg-config@0.9.0:', type='build')
depends_on('guile@1.8:2.0')
depends_on('libxml2', when='+xml')
def configure_args(self):
spec = self.spec
args = [
# `make check` fails without this
# Adding a gettext dependency does not help
'--disable-nls',
]
if '+xml' in spec:
args.append('--with-libxml2={0}'.format(spec['libxml2'].prefix))
else:
args.append('--without-libxml2')
return args
| [
"scott@rice.edu"
] | scott@rice.edu |
49de452b8c12cc429ec8cedf8b4759d6f544e7b1 | 9f1b8a1ada57198e2a06d88ddcdc0eda0c683df7 | /submission - lab9/set 2/JOHN J WELSH_19371_assignsubmission_file_lab9/lab9/P2.py | 77ac0756b0f7c7014104a22a46737f2475f62f6a | [] | no_license | sendurr/spring-grading | 90dfdced6327ddfb5c311ae8f42ae1a582768b63 | 2cc280ee3e0fba02e95b6e9f45ad7e13bc7fad54 | refs/heads/master | 2020-04-15T17:42:10.781884 | 2016-08-29T20:38:17 | 2016-08-29T20:38:17 | 50,084,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | class Simple:
def __init__(self,i):
self.i = i
def double(self):
i = self.i
i += i
self.i = i
s1 = Simple(4)
for i in range(4):
s1.double()
print s1.i
s2 = Simple('Hello')
s2.double(); s2.double()
print s2.i
s2.i = 100
print s2.i
| [
"sendurr@hotmail.com"
] | sendurr@hotmail.com |
77310ee0bff08f7d081861ed92da5079102f53e9 | 5e2dddce9c67d5b54d203776acd38d425dbd3398 | /spacy/tests/regression/test_issue1253.py | 10829768ec1bd99db43724113a00a0f41ae8b381 | [
"MIT"
] | permissive | yuxuan2015/spacy_zh_model | 8164a608b825844e9c58d946dcc8698853075e37 | e89e00497ab3dad0dd034933e25bc2c3f7888737 | refs/heads/master | 2020-05-15T11:07:52.906139 | 2019-08-27T08:28:11 | 2019-08-27T08:28:11 | 182,213,671 | 1 | 0 | null | 2019-04-19T06:27:18 | 2019-04-19T06:27:17 | null | UTF-8 | Python | false | false | 429 | py | from __future__ import unicode_literals
import pytest
import spacy
def ss(tt):
for i in range(len(tt)-1):
for j in range(i+1, len(tt)):
tt[i:j].root
@pytest.mark.models('en')
def test_access_parse_for_merged():
nlp = spacy.load('en_core_web_sm')
t_t = nlp.tokenizer("Highly rated - I'll definitely")
nlp.tagger(t_t)
nlp.parser(t_t)
nlp.parser(t_t)
ss(t_t)
| [
"yuxuan2015@example.com"
] | yuxuan2015@example.com |
5797e3f5a9e0d306f55290ff2b0c26ced31d0a12 | 13625dd7375297b066ccd69d6c229e9a1535c9b2 | /savings/migrations/0016_auto_20201221_0947.py | 75df210829866c486eea392b0c5878838082f722 | [] | no_license | rajman01/investfy | 9d5fa3ed7593ec13db575016fc839664630318af | a4c8bf16ba7a1ce38d1370e4779284a4d6426733 | refs/heads/main | 2023-09-01T19:10:18.411861 | 2023-08-28T02:30:23 | 2023-08-28T02:30:23 | 320,408,218 | 0 | 1 | null | 2023-08-28T02:30:24 | 2020-12-10T22:46:03 | null | UTF-8 | Python | false | false | 3,724 | py | # Generated by Django 3.1.3 on 2020-12-21 08:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('wallet', '0009_wallet_wallet_id'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('savings', '0015_auto_20201218_2218'),
]
operations = [
migrations.RenameField(
model_name='targetsave',
old_name='active',
new_name='joint',
),
migrations.RenameField(
model_name='targetsave',
old_name='autosave_amount',
new_name='targeted_amount',
),
migrations.RemoveField(
model_name='targetsave',
name='autosave',
),
migrations.RemoveField(
model_name='targetsave',
name='day_interval',
),
migrations.RemoveField(
model_name='targetsave',
name='last_saved',
),
migrations.RemoveField(
model_name='targetsave',
name='targeted_saving',
),
migrations.AddField(
model_name='targetsave',
name='date_created',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='targetsave',
name='description',
field=models.CharField(blank=True, max_length=128, null=True),
),
migrations.AddField(
model_name='targetsave',
name='members',
field=models.ManyToManyField(blank=True, related_name='joint_target_savings', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='targetsave',
name='name',
field=models.CharField(blank=True, max_length=64, null=True),
),
migrations.AlterField(
model_name='jointsavetransaction',
name='joint_save',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transactions', to='savings.jointsave'),
),
migrations.AlterField(
model_name='targetsave',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='target_savings', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='targetsave',
name='wallet',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='target_savings', to='wallet.wallet'),
),
migrations.AlterField(
model_name='targetsavingtransaction',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='target_saving_transactions', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='TargetSaveAutoSave',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=False)),
('day_interval', models.IntegerField(blank=True, null=True)),
('autosave_amount', models.DecimalField(decimal_places=2, default=0.0, max_digits=10)),
('last_saved', models.DateField(blank=True, null=True)),
('target_save', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='autosave', to='savings.targetsave')),
],
),
]
| [
"alameenraji31@gmail.com"
] | alameenraji31@gmail.com |
0ba67e81120fca81ce3b369ed3fe9c44049b4f7e | d7d7e13a91d0ded303cbb86a3e6c0f5071b6da74 | /metafeatures/aux/discovery.py | 2bdf474dd6e7817659a50f84327b1ef9cdbdd0fd | [] | no_license | fhpinto/systematic-metafeatures | 40c6f5f6a7a1f775918e27820c72962e5436a010 | 8d646ca5fa67efdf3caea2ca3656ef63c6c4d4d9 | refs/heads/master | 2020-04-06T06:55:26.789685 | 2016-08-26T13:36:18 | 2016-08-26T13:36:18 | 64,952,631 | 4 | 2 | null | 2016-08-19T12:38:59 | 2016-08-04T17:04:55 | Python | UTF-8 | Python | false | false | 1,036 | py | import importlib
import inspect
import pkgutil
import sys
def discover_components(package, directory, base_class):
"""Discover implementations of a base class in a package.
Parameters
----------
package : str
Package name
directory : str
Directory of the package to which is inspected.
base_class : object
Base class of objects to discover
Returns
-------
list : all subclasses of `base_class` inside `directory`
"""
components = list()
for module_loader, module_name, ispkg in pkgutil.iter_modules(
[directory]):
full_module_name = "%s.%s" % (package, module_name)
if full_module_name not in sys.modules and not ispkg:
module = importlib.import_module(full_module_name)
for member_name, obj in inspect.getmembers(module):
if inspect.isclass(obj) and issubclass(base_class, obj):
classifier = obj
components.append(classifier)
return components | [
"feurerm@informatik.uni-freiburg.de"
] | feurerm@informatik.uni-freiburg.de |
4e794f3df16cf82062bff11428a00381326cf9ea | 910c97ce255f39af7ef949664b4346e8cb5d6a0e | /monitorexecutor/dynamic/.svn/text-base/stat_service.py.svn-base | 9d791b8effad2f1cea315dda0c8df5b10a3f9731 | [] | no_license | sun3shines/manager_monitor | f3742a4fde95b456f51d0a18feb78f3d4048c560 | f49d741203d8476f2249a49d90fecc86143ac622 | refs/heads/master | 2021-01-17T06:47:14.375088 | 2016-04-29T06:43:05 | 2016-04-29T06:43:05 | 57,361,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,935 | # -*- coding: utf-8 -*-
import psutil
import datetime
import time
from monitorexecutor.globalx import PSUTIL_SERVICE_INTERVAL,SERVICE_CMDLINE
from monitorexecutor.global_cache import MONITOR_SERVICE_PROCESS
def service_iter():
while True:
for service_name,pc in SERVICE_CMDLINE.items():
service_init_data = {'name':service_name,
'cmdline':' '.join(list(pc)),
'active_status':'0/0',
'open_files':'0',
'net_connections':'0',
'thread_num':'0',
'cpu_utilization':'0.0',
'mem_utilization':'0.0',
'available':'disable',
'timestamp':str(datetime.datetime.now())}
psutil_pids = MONITOR_SERVICE_PROCESS.get(service_name)
if not psutil_pids:
# 未初始化时为None
psutil_pids = []
cmdline = ''
total = len(psutil_pids)
actives = 0
open_files = net_connections = thread_num = 0
cpu_utilization = mem_utilization = 0.0
for pid in psutil_pids:
try:
p = psutil.Process(pid)
service_init_data.update({'available':'enable'})
cmdline = ' '.join(p.cmdline)
if p.status not in [psutil.STATUS_ZOMBIE,psutil.STATUS_DEAD]:
actives = actives + 1
open_files = open_files + len(p.get_open_files())
net_connections = net_connections + len(p.get_connections())
thread_num = thread_num + p.get_num_threads()
cpu_utilization = cpu_utilization + p.get_cpu_percent()
mem_utilization = mem_utilization + p.get_memory_percent()
except:
continue
service_init_data.update({'cmdline':cmdline,
'active_status':'/'.join([str(actives),str(total)]),
'open_files':str(open_files),
'net_connections':str(net_connections),
'thread_num':str(thread_num),
'cpu_utilization':str(cpu_utilization),
'mem_utilization':str(mem_utilization)})
yield service_init_data
time.sleep(PSUTIL_SERVICE_INTERVAL)
def get_psutil_service(hostUuid):
for service_init_data in service_iter():
yield {'hostUuid':hostUuid,
'class':'statService',
'attr':service_init_data}
| [
"sun__shines@163.com"
] | sun__shines@163.com | |
39c6f807a95a6b5a77ae04b4d7f3efa4c8f2f1bf | 3dc3bbe607ab7b583eb52dbaae86636eb642960a | /mmaction/models/localizers/utils/proposal_utils.py | 7b51921684f442df8c0b28a186bc581b2f4d9e3e | [
"Apache-2.0"
] | permissive | open-mmlab/mmaction2 | 659c36c6083fd3d9d072e074a8d4b3a50342b9bd | 582b78fd6c3240500d5cacd292339d7d1ddbb056 | refs/heads/main | 2023-08-28T18:14:50.423980 | 2023-08-10T09:20:06 | 2023-08-10T09:20:06 | 278,810,244 | 3,498 | 1,028 | Apache-2.0 | 2023-09-07T06:50:44 | 2020-07-11T07:19:10 | Python | UTF-8 | Python | false | false | 5,256 | py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
def temporal_iou(proposal_min, proposal_max, gt_min, gt_max):
"""Compute IoU score between a groundtruth bbox and the proposals.
Args:
proposal_min (list[float]): List of temporal anchor min.
proposal_max (list[float]): List of temporal anchor max.
gt_min (float): Groundtruth temporal box min.
gt_max (float): Groundtruth temporal box max.
Returns:
list[float]: List of iou scores.
"""
len_anchors = proposal_max - proposal_min
int_tmin = np.maximum(proposal_min, gt_min)
int_tmax = np.minimum(proposal_max, gt_max)
inter_len = np.maximum(int_tmax - int_tmin, 0.)
union_len = len_anchors - inter_len + gt_max - gt_min
jaccard = np.divide(inter_len, union_len)
return jaccard
def temporal_iop(proposal_min, proposal_max, gt_min, gt_max):
"""Compute IoP score between a groundtruth bbox and the proposals.
Compute the IoP which is defined as the overlap ratio with
groundtruth proportional to the duration of this proposal.
Args:
proposal_min (list[float]): List of temporal anchor min.
proposal_max (list[float]): List of temporal anchor max.
gt_min (float): Groundtruth temporal box min.
gt_max (float): Groundtruth temporal box max.
Returns:
list[float]: List of intersection over anchor scores.
"""
len_anchors = np.array(proposal_max - proposal_min)
int_tmin = np.maximum(proposal_min, gt_min)
int_tmax = np.minimum(proposal_max, gt_max)
inter_len = np.maximum(int_tmax - int_tmin, 0.)
scores = np.divide(inter_len, len_anchors)
return scores
def soft_nms(proposals, alpha, low_threshold, high_threshold, top_k):
"""Soft NMS for temporal proposals.
Args:
proposals (np.ndarray): Proposals generated by network.
alpha (float): Alpha value of Gaussian decaying function.
low_threshold (float): Low threshold for soft nms.
high_threshold (float): High threshold for soft nms.
top_k (int): Top k values to be considered.
Returns:
np.ndarray: The updated proposals.
"""
proposals = proposals[proposals[:, -1].argsort()[::-1]]
tstart = list(proposals[:, 0])
tend = list(proposals[:, 1])
tscore = list(proposals[:, -1])
rstart = []
rend = []
rscore = []
while len(tscore) > 0 and len(rscore) <= top_k:
max_index = np.argmax(tscore)
max_width = tend[max_index] - tstart[max_index]
iou_list = temporal_iou(tstart[max_index], tend[max_index],
np.array(tstart), np.array(tend))
iou_exp_list = np.exp(-np.square(iou_list) / alpha)
for idx, _ in enumerate(tscore):
if idx != max_index:
current_iou = iou_list[idx]
if current_iou > low_threshold + (high_threshold -
low_threshold) * max_width:
tscore[idx] = tscore[idx] * iou_exp_list[idx]
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
rstart = np.array(rstart).reshape(-1, 1)
rend = np.array(rend).reshape(-1, 1)
rscore = np.array(rscore).reshape(-1, 1)
new_proposals = np.concatenate((rstart, rend, rscore), axis=1)
return new_proposals
def post_processing(result, video_info, soft_nms_alpha, soft_nms_low_threshold,
soft_nms_high_threshold, post_process_top_k,
feature_extraction_interval):
"""Post process for temporal proposals generation.
Args:
result (np.ndarray): Proposals generated by network.
video_info (dict): Meta data of video. Required keys are
'duration_frame', 'duration_second'.
soft_nms_alpha (float): Alpha value of Gaussian decaying function.
soft_nms_low_threshold (float): Low threshold for soft nms.
soft_nms_high_threshold (float): High threshold for soft nms.
post_process_top_k (int): Top k values to be considered.
feature_extraction_interval (int): Interval used in feature extraction.
Returns:
list[dict]: The updated proposals, e.g.
[{'score': 0.9, 'segment': [0, 1]},
{'score': 0.8, 'segment': [0, 2]},
...].
"""
if len(result) > 1:
result = soft_nms(result, soft_nms_alpha, soft_nms_low_threshold,
soft_nms_high_threshold, post_process_top_k)
result = result[result[:, -1].argsort()[::-1]]
video_duration = float(
video_info['duration_frame'] // feature_extraction_interval *
feature_extraction_interval
) / video_info['duration_frame'] * video_info['duration_second']
proposal_list = []
for j in range(min(post_process_top_k, len(result))):
proposal = {}
proposal['score'] = float(result[j, -1])
proposal['segment'] = [
max(0, result[j, 0]) * video_duration,
min(1, result[j, 1]) * video_duration
]
proposal_list.append(proposal)
return proposal_list
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
ece13f701ebfc064b3485e187b768aecae541472 | 3c6aeb458a8bec0671c1d8be18331072ac97e05f | /ohsn/stream/streaming_depressioin.py | 26a5afca34cd4c345d4b01333b7f0cef762f79ff | [] | no_license | wtgme/ohsn | d7b17ad179a789be2325e0923026a681e343a40c | 9c165d45eefa4058e7ed2c6bad348703e296362d | refs/heads/master | 2021-08-29T06:01:20.165839 | 2021-08-12T08:51:46 | 2021-08-12T08:51:46 | 44,922,360 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,601 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 03 03:43:09 2015
@author: wt
crawl stream with keyword-filtering
Keywords are in keywords.txt
https://dev.twitter.com/streaming/reference/post/statuses/filter
The track, follow, and locations fields should be considered to be combined with an OR operator.
track=foo&follow=1234 returns Tweets matching “foo” OR created by user 1234.
The United Kingdom lies between latitudes 49° to 61° N, and longitudes 9° W to 2° E.
Filter tweets with location, but few tweets have location information
Identify the location of users that post the crawled tweets, only store the users in UK
"""
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))
from twython import TwythonStreamer
import urllib
import imghdr
import os
import ConfigParser
import datetime
import logging
from ohsn.util import db_util as dbutil
config = ConfigParser.ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)), 'conf', 'TwitterAPI.cfg'))
# spin up twitter api
APP_KEY = config.get('credentials1', 'app_key')
APP_SECRET = config.get('credentials1', 'app_secret')
OAUTH_TOKEN = config.get('credentials1', 'oath_token')
OAUTH_TOKEN_SECRET = config.get('credentials1', 'oath_token_secret')
print('loaded configuation')
# spin up database
DBNAME = 'depression'
COLLECTION = 'stream'
db = dbutil.db_connect_no_auth(DBNAME)
tweets = db[COLLECTION]
# location_name = ['uk', 'u.k.', 'united kingdom', 'britain', 'england']
print("twitter connection and database connection configured")
logging.basicConfig(filename='streaming-warnings.log', level=logging.DEBUG)
class MyStreamer(TwythonStreamer):
def on_success(self, data):
if 'warning' in data:
logging.warning(data['warning']['code'] + "\t" + data['warning']['message'] + "\t percent_full=" + data['warning']['percent_full'] +"\n")
if 'text' in data:
store_tweet(data)
# print data['user']['screen_name'].encode('utf-8') + "\t" + data['text'].encode('utf-8').replace('\n', ' ')
def on_error(self, status_code, data):
print status_code
logging.error(data['warning']['code'] + "\t" + data['warning']['message'] + "\t percent_full=" + data['warning']['percent_full'] +"\n")
# Want to stop trying to get data because of the error?
# Uncomment the next line!
# self.disconnect()
def get_pictures(tweet):
# Get pictures in the tweets store as date-tweet-id-username.ext
try:
for item in tweet['entities']['media']:
print item['media_url_https']
if item['type']=='photo':
# print "PHOTO!!!"
urllib.urlretrieve(item['media_url_https'], 'api-timelines-scraper-media/' + item['id_str'])
# code to get the extension....
ext = imghdr.what('api-timelines-scraper-media/' + item['id_str'])
os.rename('api-timelines-scraper-media/' + item['id_str'], 'api-timelines-scraper-media/' + item['id_str'] + "." + ext)
except:
pass
def store_tweet(tweet, collection=tweets, pictures=False):
"""
Simple wrapper to facilitate persisting tweets. Right now, the only
pre-processing accomplished is coercing date values to datetime.
"""
# print tweet
tweet['created_at'] = datetime.datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')
collection.insert(tweet)
# global location_name
# user = tweet.get('user', None)
# if user:
# location = user['location']
# if location:
# location = location.lower()
# if any(x in location for x in location_name):
# print location
# tweet['created_at'] = datetime.datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')
# tweet['user']['created_at'] = datetime.datetime.strptime(tweet['user']['created_at'], '%a %b %d %H:%M:%S +0000 %Y')
# # get pictures in tweet...
# if pictures:
# get_pictures(tweet)
#
# #print "TODO: alter the schema of the tweet to match the edge network spec from the network miner..."
# #print "TODO: make the tweet id a unique index to avoid duplicates... db.collection.createIndex( { a: 1 }, { unique: true } )"
# collection.insert(tweet)
while True:
try:
stream = MyStreamer(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
# https://dev.twitter.com/streaming/overview/request-parameters
# stream.statuses.filter(language=['en'], track=['bulimic, anorexic, ednos, ed-nos, bulimia, anorexia, eating disorder, eating-disorder, eating disordered, eating-disordered, CW, UGW, GW2, GW1, GW'])
# track_list = []
# with open('keyword.txt', 'r') as fo:
# for line in fo.readlines():
# track_list.append(line.strip())
# Depression: http://psychcentral.com/lib/types-and-symptoms-of-depression/
# stream.statuses.filter(language=['en'], track=[
# 'dysthymia', 'dysthymic', 'bipolar', 'peripartum', 'postpartum', 'melancholic',
# 'atypical', 'catatonic'])
stream.statuses.filter(language=['en'], track=['#MyDepressionLooksLike'])
except Exception as detail:
print str(detail)
| [
"wtgmme@gmail.com"
] | wtgmme@gmail.com |
4ec74b74a15727d510b711fb2f2377004c678a3a | 029aa4fa6217dbb239037dec8f2e64f5b94795d0 | /Python算法指南/232_不同的路径II_上题的再应用.py | 38db0f1e03c0d402ee10b759d9d46ce78a45bde4 | [] | no_license | tonyyo/algorithm | 5a3f0bd4395a75703f9ee84b01e42a74283a5de9 | 60dd5281e7ce4dfb603b795aa194a67ff867caf6 | refs/heads/master | 2022-12-14T16:04:46.723771 | 2020-09-23T06:59:33 | 2020-09-23T06:59:33 | 270,216,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | class Solution:
def uniquePathsWithObstacles(self, obstacleGrid):
m, n = len(obstacleGrid), len(obstacleGrid[0])
mp = [[0] * n for _ in range(m)]
mp[0][0] = 1
for i in range(m):
for j in range(n):
if obstacleGrid[i][j] == 1: # 遇到障碍物,路径清0
mp[i][j] = 0
elif i == 0 and j == 0:
mp[i][j] = 1
elif i == 0:
mp[i][j] = mp[i][j - 1]
elif j == 0:
mp[i][j] = mp[i - 1][j]
else:
mp[i][j] = mp[i - 1][j] + mp[i][j - 1]
return mp[m - 1][n - 1]
if __name__ == '__main__':
obstacleGrid = [
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
]
print("初始网格:")
for i in range(0, len(obstacleGrid)):
print(obstacleGrid[i])
solution = Solution()
print("路径条数:", solution.uniquePathsWithObstacles(obstacleGrid)) | [
"1325338208@qq.com"
] | 1325338208@qq.com |
de99595ec3ae77bc6d35d3e293f5235910a4d554 | c325db01e798fc1d985c4e40c42a4422cd59fd2a | /python/tf/pz_test.py | 31c60bb236378d8a2a617d5ed7ccb1b19af38e94 | [
"Apache-2.0"
] | permissive | google/carfac | 5078c910994dfddb8b4e068a42fab567551a6c55 | 75970ea10092e7fa32fb7d1a236cecb6dcfa796e | refs/heads/master | 2023-09-06T00:00:09.749292 | 2023-07-21T11:21:04 | 2023-07-21T11:21:48 | 11,507,786 | 99 | 39 | Apache-2.0 | 2023-04-17T09:49:31 | 2013-07-18T16:28:12 | Jupyter Notebook | UTF-8 | Python | false | false | 5,900 | py | #!/usr/bin/env python
# Copyright 2021 The CARFAC Authors. All Rights Reserved.
#
# This file is part of an implementation of Lyon's cochlear model:
# "Cascade of Asymmetric Resonators with Fast-Acting Compression"
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for carfac.python.tf.pz."""
from typing import Callable
import unittest
from absl import app
import numpy as np
import tensorflow as tf
from . import pz
class CoeffTest(unittest.TestCase):
def testCoeffs(self):
# We have a filter H, with poles P and zeros Q:
# H = g * np.prod(Q - z) / np.prod(P - z)
# Assuming Q = [1, 2, 3, 4, 5]:
# H = g * (1 - z) * (2 - z) * (3 - z) * (4 - z) * (5 - z) / np.prod(P - z)
# = Y / X
# Y = X * g * (1 - z) * (2 - z) * (3 - z) * (4 - z) * (5 - z) /
# np.prod(P - z)
# Y = X * g * (z^-1 - 1) * (2 * z^-1 - 1) * (3 * z^-1 - 1) * (4 * z^-1 - 1)
# * (5 * z^-1 - 1) / (np.prod(P - z) * z^-5)
# Y * np.prod(P - z) * z^-5 = X * (z^-1 - 1) * (2 * z^-1 - 1) *
# (3 * z^-1 - 1) * (4 * z^-1 - 1) * (5 * z^-1 - 1)
# Y * np.prod(P - z) * z^-5 = X * (-1 + 15 * z^-1 - 85 * z^-2 + 225 * z^-3
# - 274 * z^-4 + 120 * z^-5)
# Where (-1 + 15 * z^-1 - 85 * z^-2 + 225 * z^-3 - 274 * z^-4 + 120 * z^-5)
# = -(qc0 + qc1 * z^-1 + qc2 * z^-2 + qc3 * z^-3 + qc4 * z^-4 + qc5 *
# z^-5)
# And coeffs_from_zeros returns [qc0, qc1, qc2, qc3, qc4, qc5] =>
# [1, -15, 85, -225, 274, -120]
inputs: tf.Tensor = tf.constant([1, 2, 3, 4, 5], dtype=tf.complex128)
outputs: tf.Tensor = pz.coeffs_from_zeros(inputs)
expected_outputs = [1, -15, 85, -225, 274, -120]
np.testing.assert_array_almost_equal(outputs, expected_outputs)
class PZTest(unittest.TestCase):
def assert_impulse_response(self,
filt: Callable[[tf.Tensor],
tf.Tensor],
dtype: tf.DType,
gain: tf.Tensor,
poles: tf.Tensor,
zeros: tf.Tensor):
window_size = 64
impulse: np.ndarray = np.zeros([window_size], dtype=np.float32)
impulse[0] = 1
impulse_spectrum: np.ndarray = np.fft.fft(impulse)
z: np.ndarray = np.exp(np.linspace(0,
2 * np.pi,
window_size,
endpoint=False) * 1j)
transfer_function: np.ndarray = (
tf.cast(gain, tf.complex128) *
np.prod(zeros[None, :] - z[:, None],
axis=1) /
np.prod(poles[None, :] - z[:, None],
axis=1))
expected_impulse_response: np.ndarray = np.fft.ifft(
impulse_spectrum * transfer_function)
# Since the filter requires batch and cell i/o dimensions.
impulse_response = filt(tf.cast(impulse[None, :, None], dtype))[0, :, 0]
np.testing.assert_array_almost_equal(impulse_response,
expected_impulse_response)
def testPZCell(self):
for dtype in [tf.float32, tf.float64]:
poles: np.ndarray = 0.5 * np.exp([np.pi * 0.5j])
poles: tf.Tensor = tf.concat([poles, tf.math.conj(poles)], axis=0)
zeros: np.ndarray = 0.75 * np.exp([np.pi * 0.25j])
zeros: tf.Tensor = tf.concat([zeros, tf.math.conj(zeros)], axis=0)
gain: tf.Tensor = tf.constant(1.5)
pz_cell = pz.PZCell(gain,
poles,
zeros,
dtype=dtype)
pz_layer = tf.keras.layers.RNN(pz_cell,
return_sequences=True,
dtype=dtype)
self.assert_impulse_response(pz_layer, dtype, gain, poles, zeros)
def testTFFunction(self):
for dtype in [tf.float32, tf.float64]:
poles: np.ndarray = 0.1 * np.exp(np.pi * np.array([0.7j]))
poles: tf.Tensor = tf.concat([poles, tf.math.conj(poles)], axis=0)
zeros: np.ndarray = 0.75 * np.exp(np.pi * np.array([0.25j]))
zeros: tf.Tensor = tf.concat([zeros, tf.math.conj(zeros)], axis=0)
gain: tf.Tensor = tf.constant(2.4)
pz_cell = pz.PZCell(gain,
poles,
zeros,
dtype=dtype)
pz_layer = tf.keras.layers.RNN(pz_cell,
return_sequences=True,
dtype=dtype)
@tf.function
def compute(inputs):
# pylint: disable=cell-var-from-loop
return pz_layer(inputs)
self.assert_impulse_response(compute, dtype, gain, poles, zeros)
def testGradients(self):
tape = tf.GradientTape(persistent=True)
pz_cell = pz.PZCell(1,
0.5 * np.exp([np.pi * 0.2j, np.pi * 0.5j]),
0.3 * np.exp([np.pi * 0.6j]))
with tape:
current: tf.Tensor = tf.ones([2, 1], dtype=pz_cell.dtype)
state = tuple(tf.zeros(shape=[current.shape[0], size],
dtype=pz_cell.dtype)
for size in pz_cell.state_size)
for _ in range(6):
current, state = pz_cell.call(current, state)
for v in [pz_cell.poles, pz_cell.zeros, pz_cell.gain]:
self.assertTrue(np.isfinite(tape.gradient(current, v)).all())
def main(_):
unittest.main()
if __name__ == '__main__':
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
24cbc1db1201293c58a19ff262bad4e6793375b8 | 2a97a5816f79282878855c7355f7400a36ac1839 | /UTKFace/UTKFace_128x128/CcGAN-improved/models/ResNet_regre_eval.py | 8efb719cfe621bc6eeb99a39d9c5f8bc9683c091 | [] | no_license | simonlevine/improved_CcGAN | 309040cb7ec74b5ef68c3b31f6a32e715df3029e | 3f2660c4a466240b7b3896e8e2ce7aaad759862a | refs/heads/master | 2023-08-13T02:18:55.327856 | 2021-09-24T07:56:48 | 2021-09-24T07:56:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,190 | py | '''
codes are based on
@article{
zhang2018mixup,
title={mixup: Beyond Empirical Risk Minimization},
author={Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz},
journal={International Conference on Learning Representations},
year={2018},
url={https://openreview.net/forum?id=r1Ddp1-Rb},
}
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
NC = 3
IMG_SIZE = 128
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet_regre_eval(nn.Module):
def __init__(self, block, num_blocks, nc=NC, ngpu = 1, feature_layer='f3'):
super(ResNet_regre_eval, self).__init__()
self.in_planes = 64
self.ngpu = ngpu
self.feature_layer=feature_layer
self.block1 = nn.Sequential(
nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False), # h=h
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2,2), #h=h/2 64
self._make_layer(block, 64, num_blocks[0], stride=2), # h=h/2 32
)
self.block2 = self._make_layer(block, 128, num_blocks[1], stride=2) # h=h/2 16
self.block3 = self._make_layer(block, 256, num_blocks[2], stride=2) # h=h/2 8
self.block4 = self._make_layer(block, 512, num_blocks[3], stride=2) # h=h/2 4
self.pool1 = nn.AvgPool2d(kernel_size=4)
if self.feature_layer == 'f2':
self.pool2 = nn.AdaptiveAvgPool2d((2,2))
elif self.feature_layer == 'f3':
self.pool2 = nn.AdaptiveAvgPool2d((2,2))
else:
self.pool2 = nn.AdaptiveAvgPool2d((1,1))
linear_layers = [
nn.Linear(512*block.expansion, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 1),
# nn.Sigmoid()
nn.ReLU(),
]
self.linear = nn.Sequential(*linear_layers)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
if x.is_cuda and self.ngpu > 1:
ft1 = nn.parallel.data_parallel(self.block1, x, range(self.ngpu))
ft2 = nn.parallel.data_parallel(self.block2, ft1, range(self.ngpu))
ft3 = nn.parallel.data_parallel(self.block3, ft2, range(self.ngpu))
ft4 = nn.parallel.data_parallel(self.block4, ft3, range(self.ngpu))
out = nn.parallel.data_parallel(self.pool1, ft4, range(self.ngpu))
out = out.view(out.size(0), -1)
out = nn.parallel.data_parallel(self.linear, out, range(self.ngpu))
else:
ft1 = self.block1(x)
ft2 = self.block2(ft1)
ft3 = self.block3(ft2)
ft4 = self.block4(ft3)
out = self.pool1(ft4)
out = out.view(out.size(0), -1)
out = self.linear(out)
if self.feature_layer == 'f2':
ext_features = self.pool2(ft2)
elif self.feature_layer == 'f3':
ext_features = self.pool2(ft3)
else:
ext_features = self.pool2(ft4)
ext_features = ext_features.view(ext_features.size(0), -1)
return out, ext_features
def ResNet18_regre_eval(ngpu = 1):
return ResNet_regre_eval(BasicBlock, [2,2,2,2], ngpu = ngpu)
def ResNet34_regre_eval(ngpu = 1):
return ResNet_regre_eval(BasicBlock, [3,4,6,3], ngpu = ngpu)
def ResNet50_regre_eval(ngpu = 1):
return ResNet_regre_eval(Bottleneck, [3,4,6,3], ngpu = ngpu)
def ResNet101_regre_eval(ngpu = 1):
return ResNet_regre_eval(Bottleneck, [3,4,23,3], ngpu = ngpu)
def ResNet152_regre_eval(ngpu = 1):
return ResNet_regre_eval(Bottleneck, [3,8,36,3], ngpu = ngpu)
if __name__ == "__main__":
net = ResNet34_regre_eval(ngpu = 1).cuda()
x = torch.randn(4,NC,IMG_SIZE,IMG_SIZE).cuda()
out, features = net(x)
print(out.size())
print(features.size())
| [
"dingx92@gmail.com"
] | dingx92@gmail.com |
f990494489bde1a8610d3c197a3d453e56275a20 | b47c136e077f5100478338280495193a8ab81801 | /Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/fxas21002c_simpletest.py | 03e33d75201c976b1b9d57b78db301d55e193ae0 | [
"Apache-2.0"
] | permissive | IanSMoyes/SpiderPi | 22cd8747cc389f674cc8d95f32b4d86f9b7b2d8e | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | refs/heads/master | 2023-03-20T22:30:23.362137 | 2021-03-12T17:37:33 | 2021-03-12T17:37:33 | 339,555,949 | 16 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# Simple demo of the FXAS21002C gyroscope.
# Will print the gyroscope values every second.
import time
import board
import busio
import adafruit_fxas21002c
# Initialize I2C bus and device.
i2c = busio.I2C(board.SCL, board.SDA)
sensor = adafruit_fxas21002c.FXAS21002C(i2c)
# Optionally create the sensor with a different gyroscope range (the
# default is 250 DPS, but you can use 500, 1000, or 2000 DPS values):
# sensor = adafruit_fxas21002c.FXAS21002C(i2c, gyro_range=adafruit_fxas21002c.GYRO_RANGE_500DPS)
# sensor = adafruit_fxas21002c.FXAS21002C(i2c, gyro_range=adafruit_fxas21002c.GYRO_RANGE_1000DPS)
# sensor = adafruit_fxas21002c.FXAS21002C(i2c, gyro_range=adafruit_fxas21002c.GYRO_RANGE_2000DPS)
# Main loop will read the gyroscope values every second and print them out.
while True:
# Read gyroscope.
gyro_x, gyro_y, gyro_z = sensor.gyroscope
# Print values.
print(
"Gyroscope (radians/s): ({0:0.3f}, {1:0.3f}, {2:0.3f})".format(
gyro_x, gyro_y, gyro_z
)
)
# Delay for a second.
time.sleep(1.0)
| [
"ians.moyes@gmail.com"
] | ians.moyes@gmail.com |
2543d81a8f1e8d62cca1d44ab2baf964f4eeb4e7 | cd486d096d2c92751557f4a97a4ba81a9e6efebd | /16/addons/script.icechannel.extn.common/plugins/tvandmovies/g2g_mvs.py | 63c69da9c356ca9af9df9f71a21c8688c0011255 | [] | no_license | bopopescu/firestick-loader-kodi-data | 2f8cb72b9da67854b64aa76f720bdad6d4112926 | e4d7931d8f62c94f586786cd8580108b68d3aa40 | refs/heads/master | 2022-04-28T11:14:10.452251 | 2020-05-01T03:12:13 | 2020-05-01T03:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,459 | py | '''
g2g.cm # OdrtKapH2dNRpVHxhBtg
Copyright (C) 2013
'''
from entertainment.plugnplay.interfaces import MovieSource
#from entertainment.plugnplay.interfaces import CustomSettings
from entertainment.plugnplay import Plugin
from entertainment import common
import os
from entertainment.xgoogle.search import GoogleSearch
import xbmc
import xbmcgui
class g2g(MovieSource):
implements = [MovieSource]
name = "g2g"
display_name = "g2g.cm"
base_url = 'http://g2gfmmovies.com/'
#img='https://raw.githubusercontent.com/Coolwavexunitytalk/images/92bed8a40419803f31f90e2268956db50d306997/flixanity.png'
source_enabled_by_default = 'true'
cookie_file = os.path.join(common.cookies_path, 'g2glogin.cookie')
icon = common.notify_icon
'''
def __init__(self):
xml = '<settings>\n'
xml += '<category label="Account">\n'
xml += '<setting id="tv_user" type="text" label="Email" default="Enter your noobroom email" />\n'
xml += '<setting id="tv_pwd" type="text" option="hidden" label="Password" default="xunity" />'
xml += '<setting label="Premium account will allow for 1080 movies and the TV Shows section" type="lsep" />\n'
xml += '<setting id="premium" type="bool" label="Enable Premium account" default="false" />\n'
xml += '</category>\n'
xml += '</settings>\n'
self.CreateSettings(self.name, self.display_name, xml)
'''
def GetFileHosts(self, url, list, lock, message_queue,type):
import re
from entertainment.net import Net
net = Net(cached=False)
print '################################'
print url
content = net.http_GET(url).content
if type == 'movies':
r='class="movie_version_link"> <a href="(.+?)".+?document.writeln\(\'(.+?)\'\)'
else:
r='class="movie_version_link"> <a href="(.+?)".+?version_host">(.+?)<'
match=re.compile(r,re.DOTALL).findall(content)
for item_url ,HOST in match:
self.AddFileHost(list, 'DVD', item_url,host=HOST.upper())
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue):
from entertainment.net import Net
import re
#net = Net(cached=False)
name = self.CleanTextForSearch(name)
import urllib
name = name.lower()
net = Net(cached=False)
if type == 'movies':
title = self.CleanTextForSearch(title)
name = self.CleanTextForSearch(name)
URL= self.base_url+'?type=movie&keywords=%s' %name.replace(' ','+')
content = net.http_GET(URL).content
match =re.compile('href="(.+?)" target="_blank"><img class="image" src=".+?" alt="(.+?)"').findall(content)
for item_url , name in match:
if year in name:
#print item_url
self.GetFileHosts(item_url, list, lock, message_queue,type)
elif type == 'tv_episodes':
title = self.CleanTextForSearch(title)
name = self.CleanTextForSearch(name)
URL= self.base_url+'?type=tv&keywords=%s' %name.replace(' ','+')
content = net.http_GET(URL).content
match =re.compile('href="(.+?)" target="_blank"><img class="image" src=".+?" alt="(.+?)"').findall(content)
for url , NAME in match:
if name.lower() in self.CleanTextForSearch(NAME.lower()):
url=url.replace('-online.html','')
item_url=url+'-season-%s-episode-%s-online.html' % (season,episode)
self.GetFileHosts(item_url, list, lock, message_queue,type)
def Resolve(self, url):
from entertainment.net import Net
import re
net = Net(cached=False)
import base64
print url
content = net.http_GET(url).content
URL=base64.b64decode(re.compile('&url=(.+?)&').findall(content)[0])
#print '###############################'
#print URL
from entertainment import istream
play_url = istream.ResolveUrl(URL)
#print play_url
return play_url
| [
"esc0rtd3w@gmail.com"
] | esc0rtd3w@gmail.com |
35ee20b58a930dc5be88bf349a6be48e6d7f59ea | 986769755b642932cbbf4b3f1022ff04980014cd | /WhatManager2/urls.py | 911fedfad0903f35b437e5a36f7179c3b98c8ad7 | [
"MIT"
] | permissive | jimrollenhagen/WhatManager2 | 47d4bba79dc9c80a1f91576b334235f419da3160 | 832b04396f23c19764557b7ccb6d563035d424ec | refs/heads/master | 2020-12-28T22:46:02.411169 | 2014-09-02T02:53:44 | 2014-09-02T02:53:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('home.urls')),
url(r'^json/', include('what_json.urls')),
url(r'^download/', include('download.urls')),
url(r'^user/', include('login.urls')),
url(r'^queue/', include('queue.urls')),
url(r'^profile/', include('what_profile.urls')),
url(r'^player/', include('player.urls')),
url(r'^allmusic/', include('allmusic.urls')),
url(r'^torrent_list/', include('torrent_list.urls')),
url(r'^transcode/', include('what_transcode.urls')),
url(r'^books/', include('books.urls')),
url(r'^books/', include('bibliotik.urls')),
url(r'^books/bibliotik/json/', include('bibliotik_json.urls')),
url(r'^userscript/', include('userscript.urls')),
)
| [
"ivailo@karamanolev.com"
] | ivailo@karamanolev.com |
7564e367b36c449b7e12686ed0ab34031b201f69 | 2b96be128373ddd61945bf8b34af832844867b20 | /errormsg.py | 5c4ab463a62c612c60074f07580c20d63b351711 | [] | no_license | novaliu86/apparatus3-seq | 330f7039ec1404cc3badfbd88a3f0a0e396e8421 | 3f7bae71e9844444f2b354fc3c5f5455ca67b2e4 | refs/heads/master | 2021-01-17T22:30:06.239255 | 2013-10-04T23:33:29 | 2013-10-04T23:33:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py |
from Tkinter import *
import tkMessageBox
def box(title,msg):
window = Tk()
window.wm_withdraw()
tkMessageBox.showerror(title=title,message=msg, parent=window)
if __name__ == "__main__":
box("error","Error Mesagge")
##window = Tk()
##window.wm_withdraw()
#print window.geometry()
#message at x:0,y:0
#window.geometry("500x500+100+100")#remember its .geometry("WidthxHeight(+or-)X(+or-)Y")
##tkMessageBox.showerror(title="error",message="Error Message",parent=window)
#centre screen message
#window.geometry("1x1+"+str(window.winfo_screenwidth()/2)+"+"+str(window.winfo_screenheight()/2))
#print window.geometry()
#tkMessageBox.showinfo(title="Greetings", message="Hello World!")
| [
"pmd323@gmail.com"
] | pmd323@gmail.com |
ef4c4c1f3068800382a2dbdbacfe2b987fe76892 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_156/505.py | 22a5a74feb5b7fe6de6eacca91094c15de6f4929 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | # ___ ______________________________________________ ___ #
#|_/*| |*\_|#
#|_/*| Google Code Jam - "Hello World" |*\_|#
#|_/*| 10.04.2015 - the end |*\_|#
#|_/*| Qualification |*\_|#
#|_/*|______________________________________________|*\_|#
#| |#
#| Denis Werner - denis@nobbd.de |#
#|______________________________________________________|#
# #
import math
filename = "B-large.in"
filename = "B-small.in"
filename = "B-small-attempt6.in"
lines = ()
with open(filename) as file:
lines = file.read().splitlines()
number_of_sets = int(lines[0])
with open(filename+".solved","w") as outputfile:
for i in range(0,number_of_sets):
eaters = int(lines[i*2+1])
plates = map(int,lines[i*2+2].split(" "))
solved = False
rounds = 0
max_init = max(plates)
print " "
print "###### ROUND "+str(i+1)+" ########"
while not solved:
print plates
#get max pancakes
c = max(plates)
# create log-list for current plates list
log_list = [0]*(c+1)
for li in range(0,c+1):
log_list[li] = li
for base in range(2,c):
#print "base:"+str(base)
for pi in range(0,len(plates)):
current_p = plates[pi]
#print "pi: "+str(current_p)
if current_p > base:
#cur_log = int(math.log(current_p,base))
cur_log = float(current_p)/base
#print "log: "+str(cur_log)
log_list[base] += max(1,cur_log-1)
print "log list: " + str(log_list)
log_list = log_list[2:]
if log_list:
new_best = len(log_list) - log_list[::-1].index(min(log_list)) + 1
#best_split = log_list.index(min(log_list)) + 2
best_split = new_best
print "Best split till: " + str(best_split)
#print "new: " + str(new_best)
print "max: "+str(c)
print c==best_split
if c == best_split:
solved = True
rounds += best_split
else:
for pi in range(0,len(plates)):
p = plates[pi]
if p > best_split:
print str(p)+" splitted."
rounds += 1
plates.append(best_split)
plates[pi] -= best_split
else:
rounds += c
solved = True
print "Solved no log list"
if rounds > max_init:
print "rounds bigger then max"
rounds = max_init
# number of plates with > p/2 pancakes < p/2 then split
line = "Case #"+str(i+1)+": "+str(int(rounds))
print line
outputfile.write(line+"\n")
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
c84bb7cc50aed8f41a750faecaf4c15d1962f258 | 554090526e41ab16f927e5fcb772e119923d6080 | /three_charts.py | f43d1dbcd5b0ae57c779948590b52519a681fae4 | [] | no_license | s2t2/charts-gallery-py | f95a151995b9b72ccc1e1823a6d3689a21d5489a | 64abd69be3cd4fcbd7732eb1f4e0ac4f1715bae7 | refs/heads/master | 2020-04-21T05:58:24.546261 | 2019-02-06T21:20:08 | 2019-02-06T21:20:08 | 169,354,429 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | # three_charts.py
#
# CHART 1 (PIE)
#
pie_data = [
{"company": "Company X", "market_share": 0.55},
{"company": "Company Y", "market_share": 0.30},
{"company": "Company Z", "market_share": 0.15}
]
print("----------------")
print("GENERATING PIE CHART...")
print(pie_data) # TODO: create a pie chart based on the pie_data
#
# CHART 2 (LINE)
#
line_data = [
{"date": "2019-01-01", "stock_price_usd": 100.00},
{"date": "2019-01-02", "stock_price_usd": 101.01},
{"date": "2019-01-03", "stock_price_usd": 120.20},
{"date": "2019-01-04", "stock_price_usd": 107.07},
{"date": "2019-01-05", "stock_price_usd": 142.42},
{"date": "2019-01-06", "stock_price_usd": 135.35},
{"date": "2019-01-07", "stock_price_usd": 160.60},
{"date": "2019-01-08", "stock_price_usd": 162.62},
]
print("----------------")
print("GENERATING LINE GRAPH...")
print(line_data) # TODO: create a line graph based on the line_data
#
# CHART 3 (HORIZONTAL BAR)
#
bar_data = [
{"genre": "Thriller", "viewers": 123456},
{"genre": "Mystery", "viewers": 234567},
{"genre": "Sci-Fi", "viewers": 987654},
{"genre": "Fantasy", "viewers": 876543},
{"genre": "Documentary", "viewers": 283105},
{"genre": "Action", "viewers": 544099},
{"genre": "Romantic Comedy", "viewers": 121212}
]
print("----------------")
print("GENERATING BAR CHART...")
print(bar_data) # TODO: create a horizontal bar chart based on the bar_data
| [
"s2t2@users.noreply.github.com"
] | s2t2@users.noreply.github.com |
b194d0f0623ac12adb20f646bd9719a895c6fa03 | 4292312541c9f13cb501be3ade936ff156e80fbe | /proso_models/api_test.py | d4f16107e4ddf2a2129437b51615a2a0a0822049 | [
"MIT"
] | permissive | thanhtd91/proso-apps | 995ce5a327a93a6c77ebbe21297c3c18bde92711 | 58c95ebb4da1207de8972237c383489575ce2a20 | refs/heads/master | 2022-09-23T05:03:51.276549 | 2020-03-19T22:02:12 | 2020-03-19T22:02:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,757 | py | from django.conf import settings
from django.core.management import call_command
from proso.django.test import TestCase
from proso_models.models import Item
from proso_flashcards.models import Term, Flashcard, Category, Context
import json
class PracticeAPITest(TestCase):
fixtures = [
'test_common_data.yaml',
'test_models_data.yaml',
'test_flashcards_data.yaml',
'test_testapp_data.yaml'
]
def setUp(self):
self._categories = dict([((c.identifier, c.lang), c) for c in Category.objects.all()])
self._contexts = dict([((c.identifier, c.lang), c) for c in Context.objects.all()])
self._terms = dict([((t.identifier, t.lang), t) for t in Term.objects.all()])
self._flashcards = dict([((f.identifier, f.lang), f) for f in Flashcard.objects.select_related('term', 'context').all()])
call_command('find_item_types')
call_command('fill_item_types')
def test_language(self):
for lang in [None, 'cs', 'en']:
if lang is not None:
content = self._get_practice(language=lang)
else:
content = self._get_practice()
lang = settings.LANGUAGE_CODE[:2]
for question in content['data']:
flashcard = question['payload']
self.assertEqual(flashcard['lang'], lang, 'The flashcard has an expected language.')
self.assertEqual(flashcard['term']['lang'], lang, 'The term has an expected language.')
for option in flashcard.get('options', []):
self.assertEqual(option['lang'], lang, 'The option flashcard has an expected language.')
self.assertEqual(option['term']['lang'], lang, 'The option term has an expected language.')
def test_limit(self):
for limit in [1, 5, 10]:
content = self._get_practice(language='en', limit=limit)
self.assertEqual(len(content['data']), limit, 'There is proper number of questions.')
def test_categories(self):
for category_name, term_type_name in [('world', 'state'), ('cz', 'city'), ('africa', 'state')]:
practice_filter = '[["category/{}"], ["category/{}"]]'.format(term_type_name, category_name)
content = self._get_practice(language='en', filter=practice_filter, limit=100)
for question in content['data']:
term = self._terms[question['payload']['term']['identifier'], 'en']
term_categories = Item.objects.get_parents_graph([term.item_id])[term.item_id]
category = self._categories[category_name, 'en']
term_type = self._categories[term_type_name, 'en']
self.assertTrue({term_type.item_id, category.item_id}.issubset(term_categories), "The term has expected categories.")
def test_avoid(self):
avoid = list(map(lambda f: f.item_id, [f for f in list(self._flashcards.values()) if f.lang == 'en']))[:-10]
content = self._get_practice(language='en', avoid=json.dumps(avoid), limit=10)
found = [q['payload']['item_id'] for q in content['data']]
self.assertEqual(set(found) & set(avoid), set(), "There is no flashcard with avoided id.")
def _get_practice(self, **kwargs):
kwargs_str = '&'.join(['%s=%s' % (key_val[0], key_val[1]) for key_val in list(kwargs.items())])
url = '/models/practice/?%s' % kwargs_str
response = self.client.get(url)
self.assertEqual(response.status_code, 200, 'The status code is OK, url: %s' % url)
content = json.loads(response.content.decode("utf-8"))
self.assertGreater(len(content['data']), 0, 'There is at least one question, url: %s' % url)
return content
| [
"jan.papousek@gmail.com"
] | jan.papousek@gmail.com |
e398a0c0124acbf11bc286459c836a7fd9b5c0dd | 62039bcca548f2b974d13e7ef17d78ba39cf8010 | /tests/test_ncbi_entrez_annotations.py | 692eeea55b1f89ca6b915c5827721165681c9d79 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | camiloaruiz/goatools | 8d48ef34d13df15fcc738aba2dcbe67032cdf5e3 | 3da97251ccb6c5e90b616c3f625513f8aba5aa10 | refs/heads/master | 2020-03-20T07:40:22.899604 | 2018-08-07T19:59:34 | 2018-08-07T19:59:34 | 137,287,908 | 0 | 0 | BSD-2-Clause | 2018-06-14T00:56:11 | 2018-06-14T00:56:10 | null | UTF-8 | Python | false | false | 3,653 | py | """Tests downloading and reading of the GO annotation file from NCBI Gene.
python test_NCBI_Entrez_annotations.py
"""
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
import sys
from collections import defaultdict
from goatools.associations import get_assoc_ncbi_taxids
from goatools.test_data.genes_NCBI_9606_ProteinCoding import GeneID2nt as GeneID2nt_hsa
from goatools.test_data.genes_NCBI_7227_ProteinCoding import GeneID2nt as GeneID2nt_dme
def test_ncbi_gene2go(log=sys.stdout):
"""Return GO associations to Entrez GeneIDs. Download if necessary.
Example report generated with Feb 22, 2013 download of:
NCBI Gene tables and associations in gene2go
49672 items found in gene2go from NCBI's ftp server
taxid GOs GeneIDs Description
----- ------ ------- -----------
10090 16,807 18,971 all DNA items
7227 7,022 12,019 all DNA items
7227 6,956 10,590 76% GO coverage of 13,919 protein-coding genes
9606 16,299 18,680 all DNA items
9606 16,296 18,253 87% GO coverage of 20,913 protein-coding genes
"""
# Get associations for human(9606), mouse(10090), and fly(7227)
# (optional) multi-level dictionary separate associations by taxid
taxid2asscs = defaultdict(lambda: defaultdict(lambda: defaultdict(set)))
# Simple dictionary containing id2gos
taxids = [9606, 10090, 7227]
id2gos = get_assoc_ncbi_taxids(taxids, taxid2asscs=taxid2asscs, loading_bar=None)
log.write(" {N} items found in gene2go from NCBI's ftp server\n".format(N=len(id2gos)))
taxid2pc = {9606:GeneID2nt_hsa, 7227:GeneID2nt_dme}
# Report findings
log.write(" taxid GOs GeneIDs Description\n")
log.write(" ----- ------ ------- -----------\n")
for taxid, asscs in taxid2asscs.items():
num_gene2gos_all = len(asscs['GeneID2GOs'])
num_go2genes_all = len(asscs['GO2GeneIDs'])
log.write(" {TAXID:>6} {N:>6,} {M:>7,} all DNA items\n".format(
TAXID=taxid, N=num_go2genes_all, M=num_gene2gos_all))
# Basic check to ensure gene2go was downloaded and data was returned.
assert num_gene2gos_all > 11000
assert num_go2genes_all > 6000
if taxid in taxid2pc.keys():
rpt_coverage(taxid, asscs, taxid2pc[taxid], log)
def rpt_coverage(taxid, asscs, pc2nt, log):
"""Calculate and report GO coverage on protein-coding genes.
Example report generated with Feb 22, 2013 download of:
NCBI Gene tables and associations in gene2go
taxid GOs GeneIDs Description
----- ------ ------- -----------
7227 6,956 10,590 76% GO coverage of 13,919 protein-coding genes
9606 16,296 18,253 87% GO coverage of 20,913 protein-coding genes
"""
# List of all protein-coding genes have GO terms associated with them
geneid2gos = asscs['GeneID2GOs']
pcgene_w_gos = set(geneid2gos.keys()).intersection(set(pc2nt.keys()))
num_pcgene_w_gos = len(pcgene_w_gos)
num_pc_genes = len(pc2nt)
perc_cov = 100.0*num_pcgene_w_gos/num_pc_genes
# Get list of GOs associated with protein-coding genes
gos_pcgenes = set()
for geneid in pcgene_w_gos:
gos_pcgenes |= geneid2gos[geneid]
txt = " {TAXID:>6} {N:>6,} {M:>7,} {COV:2.0f}% GO coverage of {TOT:,} protein-coding genes\n"
log.write(txt.format(
TAXID=taxid, N=len(gos_pcgenes), M=num_pcgene_w_gos, COV=perc_cov, TOT=num_pc_genes))
if __name__ == '__main__':
test_ncbi_gene2go()
| [
"dvklopfenstein@users.noreply.github.com"
] | dvklopfenstein@users.noreply.github.com |
a9b1343d26aab1a47f18de34c95953d66bfe7238 | 50e2012ecea8307e278d1132ca0094adb940aff2 | /lib/review/my_sort/xuan_ze_sort.py | 2d147534f7a757a5ad6e0191d111ab11785b0a36 | [] | no_license | Lewescaiyong/my_library | 6689cae2db4aaa980b4bd5ed9f21691eefbff2fe | 35d0d29097823ccef74fa29ca8756a7f59ceeb78 | refs/heads/master | 2020-11-25T09:20:56.484275 | 2019-12-17T10:58:20 | 2019-12-17T10:58:20 | 228,593,219 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
a_list = range(30)
random.shuffle(a_list)
print a_list
# 使用选择排序对列表进行排序
for i in range(len(a_list) - 1):
for j in range(i + 1, len(a_list)):
if a_list[i] > a_list[j]:
a_list[i], a_list[j] = a_list[j], a_list[i]
print a_list
| [
"1351153527@qq.com"
] | 1351153527@qq.com |
d14de9dd1f4ecedfcd933de9d92811fb01d16fe3 | 35fd40fbc4cfa46272c4031b9ca0cb88572e3fa4 | /xmonitor/common/scripts/utils.py | 2d365bad42cc5429e98be796e44ca4910938b1aa | [
"Apache-2.0"
] | permissive | froyobin/xmonitor | 3d662541387226a4ff1c18ef450fdc77a769d0b8 | 092dcaa01f834353ffd8dd3c40edf9e97543bfe8 | refs/heads/master | 2020-12-23T22:33:15.758127 | 2016-06-30T06:18:05 | 2016-06-30T06:18:05 | 62,284,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,574 | py | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__all__ = [
'get_task',
'unpack_task_input',
'set_base_image_properties',
'validate_location_uri',
'get_image_data_iter',
]
from oslo_log import log as logging
from six.moves import urllib
from xmonitor.common import exception
from xmonitor.i18n import _, _LE
LOG = logging.getLogger(__name__)
def get_task(task_repo, task_id):
"""Gets a TaskProxy object.
:param task_repo: TaskRepo object used to perform DB operations
:param task_id: ID of the Task
"""
task = None
try:
task = task_repo.get(task_id)
except exception.NotFound:
msg = _LE('Task not found for task_id %s') % task_id
LOG.exception(msg)
return task
def unpack_task_input(task):
"""Verifies and returns valid task input dictionary.
:param task: Task domain object
"""
task_input = task.task_input
# NOTE: until we support multiple task types, we just check for
# input fields related to 'import task'.
for key in ["import_from", "import_from_format", "image_properties"]:
if key not in task_input:
msg = _("Input does not contain '%(key)s' field") % {"key": key}
raise exception.Invalid(msg)
return task_input
def set_base_image_properties(properties=None):
"""Sets optional base properties for creating Image.
:param properties: Input dict to set some base properties
"""
if isinstance(properties, dict) and len(properties) == 0:
# TODO(nikhil): We can make these properties configurable while
# implementing the pipeline logic for the scripts. The below shown
# are placeholders to show that the scripts work on 'devstack'
# environment.
properties['disk_format'] = 'qcow2'
properties['container_format'] = 'bare'
def validate_location_uri(location):
"""Validate location uri into acceptable format.
:param location: Location uri to be validated
"""
if not location:
raise exception.BadStoreUri(_('Invalid location: %s') % location)
elif location.startswith(('http://', 'https://')):
return location
# NOTE: file type uri is being avoided for security reasons,
# see LP bug #942118 #1400966.
elif location.startswith(("file:///", "filesystem:///")):
msg = _("File based imports are not allowed. Please use a non-local "
"source of image data.")
# NOTE: raise BadStoreUri and let the encompassing block save the error
# msg in the task.message.
raise exception.BadStoreUri(msg)
else:
# TODO(nikhil): add other supported uris
supported = ['http', ]
msg = _("The given uri is not valid. Please specify a "
"valid uri from the following list of supported uri "
"%(supported)s") % {'supported': supported}
raise urllib.error.URLError(msg)
def get_image_data_iter(uri):
"""Returns iterable object either for local file or uri
:param uri: uri (remote or local) to the datasource we want to iterate
Validation/sanitization of the uri is expected to happen before we get
here.
"""
# NOTE(flaper87): This is safe because the input uri is already
# verified before the task is created.
if uri.startswith("file://"):
uri = uri.split("file://")[-1]
# NOTE(flaper87): The caller of this function expects to have
# an iterable object. FileObjects in python are iterable, therefore
# we are returning it as is.
# The file descriptor will be eventually cleaned up by the garbage
# collector once its ref-count is dropped to 0. That is, when there
# wont be any references pointing to this file.
#
# We're not using StringIO or other tools to avoid reading everything
# into memory. Some images may be quite heavy.
return open(uri, "r")
return urllib.request.urlopen(uri)
| [
"froyo.bin@gmail.com"
] | froyo.bin@gmail.com |
659ce6eedcc37d786b1fbb227a329de04172d815 | 7981914523b28c54576ce548ec4c326314a997cf | /setup.py | 39858b2cb46006739ec3dd881ee76b1df57430a3 | [] | no_license | geyang/tf_logger | 83168499c0acb45890ef68ffce2528848e574ab2 | e68f7d19e014d2d7878513c276802db4aa37d8d2 | refs/heads/master | 2021-09-12T13:13:25.920198 | 2018-04-17T04:35:40 | 2018-04-17T04:35:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | from os import path
from setuptools import setup
with open(path.join(path.abspath(path.dirname(__file__)), 'README'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(path.abspath(path.dirname(__file__)), 'VERSION'), encoding='utf-8') as f:
version = f.read()
setup(name="tf_logger",
description="A print and debugging utility that makes your error printouts look nice",
long_description=long_description,
version=version,
url="https://github.com/episodeyang/tf_logger",
author="Ge Yang",
author_email="yangge1987@gmail.com",
license=None,
keywords=["tf_logger", "tensorflow", "logging", "debug", "debugging"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3"
],
packages=["tf_logger"],
install_requires=["typing", "tensorflow", "numpy", "termcolor", "pprint"]
)
| [
"yangge1987@gmail.com"
] | yangge1987@gmail.com |
ccae434857ea91044d3f302e99af40d91f834838 | ec6cb8542c8ed962d24ca32fc1f060ef63fdfea7 | /第一阶段/review_month01/month01/day10/demo03.py | 4912dc1a0e7e42d288f7093cb349fbabc706c428 | [] | no_license | singerdo/songers | 27859a4ff704318d149b2aa6613add407d88bb5d | 9c5dcd80c6772272c933b06c156b33058cbd3ce4 | refs/heads/master | 2022-04-16T11:00:11.002138 | 2020-04-18T07:15:16 | 2020-04-18T07:15:16 | 256,686,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | """
实例成员: 对象.成员名
实例变量
对象.变量名
实例方法
对象.方法名称()
"""
# 全局变量
a = 10
def func01():
# 局部变量
b = 20
class MyClass:
def __init__(self, c):
# 实例变量
self.c = c
def func02(self):
pass
mc01 = MyClass(30)
print(mc01.c)
print(mc01.__dict__)# 系统提供的,可以获取对象所有实例变量 {'c': 30}
# mc01.d = 40 # 实例变量(不可取)
mc01.func02() # 通过对象地址访问实例方法
# MyClass.func02(mc01) # 不建议 | [
"569593546@qq.com"
] | 569593546@qq.com |
202548ecc73466cb75e0fc08f83fc70b4de52c7f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_194/ch36_2019_04_04_17_41_44_521451.py | 41f5c5173746f4ebf02bbff3c65de5db745362a3 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | def eh_primo(n):
primo = True
divisor = 2
if n < 2:
primo = False
elif n == 2:
primo = True
else:
while divisor < n:
if n % divisor == 0:
primo = False
divisor += 1
return primo | [
"you@example.com"
] | you@example.com |
268bb911d88b9c496bb99a9f29b74403225a2e3d | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/validators/scattercarpet/line/_width.py | ee5a9364a2f3fac047fec5cebbf518dc64a31c27 | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 544 | py | import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name='width', parent_name='scattercarpet.line', **kwargs
):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'style'),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
| [
"jmswank7@gmail.com"
] | jmswank7@gmail.com |
49f358698f83b5d59960551bec1a1439c00161b0 | 614271299ef8145ad40d7ff197897b1a5d598bea | /slackchannel2pdf/settings.py | cb39f8fcfec46ca3eb9dc13659ec6ce215026ad4 | [
"MIT"
] | permissive | 17500mph/slackchannel2pdf | 14498685cdb8d9ac4bdc586948560e8adbd1151a | 2848dfaaffbf9a5255c6dbe87dcc1e90d062b820 | refs/heads/master | 2023-03-04T16:49:28.638394 | 2021-02-21T11:28:16 | 2021-02-21T11:28:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,832 | py | """Defines all global settings incl. from configuration files"""
from ast import literal_eval
import configparser
from pathlib import Path
_FILE_NAME_BASE = "slackchannel2pdf"
_CONF_FILE_NAME = f"{_FILE_NAME_BASE}.ini"
_LOG_FILE_NAME = f"{_FILE_NAME_BASE}.log"
_DEFAULTS_PATH = Path(__file__).parent
def _configparser_convert_str(x):
result = literal_eval(x)
if not isinstance(result, str):
raise configparser.ParsingError(f"Needs to be a string type: {x}")
return result
def config_parser(
defaults_path: Path, home_path: Path = None, cwd_path: Path = None
) -> configparser.ConfigParser:
parser = configparser.ConfigParser(converters={"str": _configparser_convert_str})
config_file_paths = [defaults_path / _CONF_FILE_NAME]
if home_path:
config_file_paths.append(home_path / _CONF_FILE_NAME)
if cwd_path:
config_file_paths.append(cwd_path / _CONF_FILE_NAME)
found = parser.read(config_file_paths)
if not found:
raise RuntimeError("Can not find a configuration file anywhere")
return parser
_my_config = config_parser(
defaults_path=_DEFAULTS_PATH, home_path=Path.home(), cwd_path=Path.cwd()
)
# style and layout settings for PDF
PAGE_UNITS_DEFAULT = "mm"
FONT_FAMILY_DEFAULT = "NotoSans"
FONT_FAMILY_MONO_DEFAULT = "NotoSansMono"
PAGE_ORIENTATION_DEFAULT = _my_config.getstr("pdf", "page_orientation")
PAGE_FORMAT_DEFAULT = _my_config.getstr("pdf", "page_format")
FONT_SIZE_NORMAL = _my_config.getint("pdf", "font_size_normal")
FONT_SIZE_LARGE = _my_config.getint("pdf", "font_size_large")
FONT_SIZE_SMALL = _my_config.getint("pdf", "font_size_small")
LINE_HEIGHT_DEFAULT = _my_config.getint("pdf", "line_height_default")
LINE_HEIGHT_SMALL = _my_config.getint("pdf", "line_height_small")
MARGIN_LEFT = _my_config.getint("pdf", "margin_left")
TAB_WIDTH = _my_config.getint("pdf", "tab_width")
# locale
FALLBACK_LOCALE = _my_config.getstr("locale", "fallback_locale")
# slack
MINUTES_UNTIL_USERNAME_REPEATS = _my_config.getint(
"slack", "minutes_until_username_repeats"
)
MAX_MESSAGES_PER_CHANNEL = _my_config.getint("slack", "max_messages_per_channel")
SLACK_PAGE_LIMIT = _my_config.getint("slack", "slack_page_limit")
def setup_logging(config: configparser.ConfigParser) -> None:
config_logging = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"console": {"format": "[%(levelname)s] %(message)s"},
"file": {"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"},
},
"handlers": {
"console": {
"level": config.getstr("logging", "console_log_level"),
"formatter": "console",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout", # Default is stderr
}
},
"loggers": {
"": { # root logger
"handlers": ["console"],
"level": "DEBUG",
"propagate": False,
},
},
}
# add log file if configured
log_file_enabled = config.getboolean("logging", "log_file_enabled", fallback=False)
if log_file_enabled:
file_log_path_full = config.getstr("logging", "log_file_path", fallback=None)
filename = (
Path(file_log_path_full) / _LOG_FILE_NAME
if file_log_path_full
else _LOG_FILE_NAME
)
config_logging["handlers"]["file"] = {
"level": config.getstr("logging", "file_log_level"),
"formatter": "file",
"class": "logging.FileHandler",
"filename": filename,
"mode": "a",
}
config_logging["loggers"][""]["handlers"].append("file")
return config_logging
DEFAULT_LOGGING = setup_logging(_my_config)
| [
"erik.kalkoken@gmail.com"
] | erik.kalkoken@gmail.com |
7b74499dbf42c018aec13de219672e212ca40a5a | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_11/masa_project/branches/common/budget_custom_report/wizard/budget_main.py | 5ae0836ad9cdc04d75cfc1505aa9649e63578f1a | [] | no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,678 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api , _
from datetime import date, datetime, timedelta
from odoo.exceptions import UserError, ValidationError
####################################### Budget Custom Reports ##################################################################
class BudgetReportMain(models.TransientModel):
_name = 'budget.custom.report.main'
_inherit = 'budget.custom.report'
report_show = fields.Selection([('sum', 'Summation'),
('details', 'Details')],default ='sum')
report_type = fields.Selection([('cost_center', 'By Cost Centers'),
('bud_position', 'By Budgetry Position')],
required=1, default='cost_center')
budgetry_position_show = fields.Selection([('without_analytic','Only Budgetry Positons'),
('with_analytic','With Analytics')],default='without_analytic')
def print_report(self,data):
if self.date_from > self.date_to:
raise ValidationError(_('Start Date must be equal to or less than Date To'))
# starter filter ^_^
data = data
#Get all filter in data Dict
data.update({'report_type': self.report_type})
data.update({'report_show': self.report_show})
data.update({'budget_type': self.budget_type})
data.update({'date_from': self.date_from})
data.update({'date_to': self.date_to})
#read_group filters and pass it to all functions we need
filters = [('date_from', '>=', self.date_from),
('date_to', '<=', self.date_to),
('general_budget_id.type', '=', self.budget_type)
]
data.update({'filters': filters})
#read_group fields , pass it to all functions that have read_group
budget_fields = ['general_budget_id', 'general_budget_id.code', 'analytic_account_id', 'planned_amount',
'practical_amount', 'total_operation', 'transfer_amount', 'confirm','residual','percentage',
'deviation']
data.update({'fields': budget_fields})
if self.report_type == 'cost_center':
#if user not select any analytic then select all analytics
if len(self.mapped('analytic_account_ids')) == 0:
analytic_ids = self.env['account.analytic.account'].search([],order='code').ids
else:
tuple_analytic_ids = tuple(self.mapped('analytic_account_ids').ids)
analytic_ids = tuple([line.id for line in self.env['account.analytic.account'].search([('id','child_of',tuple_analytic_ids)])])
data.update({'analytic_ids':analytic_ids})
elif self.report_type == 'bud_position':
#budgetry_position_type
data.update({'budgetry_position_show': self.budgetry_position_show})
# if user not select any Budgetary then select all Budgetaries
if len(self.mapped('budgetry_position_ids')) == 0:
budgetary_ids = self.env['crossovered.budget.lines'].search([]).ids
else:
tuple_budgetary_ids = tuple(self.mapped('budgetry_position_ids').ids)
budgetary_ids = tuple([line.id for line in self.env['crossovered.budget.lines'].search(
[('id', 'in', tuple_budgetary_ids)])])
data.update({'budgetary_ids': budgetary_ids})
if self.budgetry_position_show == 'with_analytic':
# if user not select any analytic then select all analytics
if len(self.mapped('analytic_account_ids')) == 0:
analytic_ids = self.env['account.analytic.account'].search([], order='code').ids
else:
tuple_analytic_ids = tuple(self.mapped('analytic_account_ids').ids)
analytic_ids = tuple([line.id for line in self.env['account.analytic.account'].search(
[('id', 'child_of', tuple_analytic_ids)])])
data.update({'analytic_ids': analytic_ids})
return self.env.ref('budget_custom_report.action_budget_custom_report').with_context(landscape=True).report_action(
self, data=data)
class budgetCustomReport(models.AbstractModel):
_name = 'report.budget_custom_report.budget_main_report_tamplate'
@api.model
def get_report_values(self, docids, data=None):
return {
'data': data,
'get':self.env['budget.custom.report'],
'current_model': self.env['budget.custom.report.main']
}
| [
"bakry@exp-sa.com"
] | bakry@exp-sa.com |
bd7dadc54b85e08c31e8ca417b8a7925d903e09a | 5dc7dc7e33122e8c588eb6e13f23bf032c704d2e | /scripts/transfer_from_TEXT_to_SQLITE.py | d149f8918fe194ad0b74376c404e553546a1a822 | [
"Apache-2.0"
] | permissive | brianr747/platform | a3319e84858345e357c1fa9a3916f92122775b30 | 84b1bd90fc2e35a51f32156a8d414757664b4b4f | refs/heads/master | 2022-01-23T16:06:26.855556 | 2022-01-12T18:13:22 | 2022-01-12T18:13:22 | 184,085,670 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | """
Script that transfers all series from the TEXT database to the SQLite database.
Note: the TEXT database series to have the correct ticker_full as the column header.
"""
import econ_platform_core
import econ_platform.start
econ_platform_core.start_log()
econ_platform_core.Databases['SQLITE'].LogSQL = True
ticker_list = econ_platform_core.Databases['TEXT'].GetAllValidSeriesTickers()
for ticker in ticker_list:
econ_platform_core.Databases.TransferSeries(ticker, 'TEXT', 'SQLITE')
| [
"brianr747@gmail.com"
] | brianr747@gmail.com |
b020c98bdd919c3fa176f6133cb896944293d497 | 61f9553eedc2ec936ea87f06da5b986091e3b8ff | /workspace/buildout-cache/eggs/z3c.form-3.0.2-py2.7.egg/z3c/form/tests/test_doc.py | f5a7184d64deb6152f01ed8826dfdc78c10e1f9a | [] | no_license | gruhter/gso | 47880b055455cc99d63eec72498048c857e7831b | c0eb949f8a06aab6b97329d51a6d046e2fc0a653 | refs/heads/master | 2016-09-01T18:28:05.589620 | 2015-05-14T19:38:18 | 2015-05-14T19:38:18 | 35,579,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,194 | py | ##############################################################################
#
# Copyright (c) 2007 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""z3c.form Test Module"""
import doctest
import itertools
import re
import unittest
from zope.testing import renormalizing
from z3c.form import testing
# This package will allways provide z3c.pt for it's test setup.
# The Z3CPT_AVAILABLE hickup is usefull if someone will run the z3c.form tests
# in his own project and doesn't use z3c.pt.
try:
import z3c.pt
import z3c.ptcompat
Z3CPT_AVAILABLE = True
except ImportError:
Z3CPT_AVAILABLE = False
try:
import zope.app.container
except ImportError:
ADDING_AVAILABLE = False
else:
ADDING_AVAILABLE = True
def test_suite():
flags = \
doctest.NORMALIZE_WHITESPACE | \
doctest.ELLIPSIS | \
doctest.IGNORE_EXCEPTION_DETAIL
if Z3CPT_AVAILABLE:
setups = (testing.setUpZPT, testing.setUpZ3CPT)
else:
setups = (testing.setUpZPT, )
tests = ((
doctest.DocFileSuite(
'../form.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../action.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../datamanager.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../field.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../contentprovider.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../validator.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../error.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../widget.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../button.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../zcml.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../testing.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../converter.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=renormalizing.RENormalizing([
(re.compile(
r"(invalid literal for int\(\)) with base 10: '(.*)'"),
r'\1: \2'),
(re.compile(
r"Decimal\('(.*)'\)"),
r'Decimal("\1")'),
]) + testing.outputChecker
),
doctest.DocFileSuite(
'../group.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../subform.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../util.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),
doctest.DocFileSuite(
'../hint.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
))
for setUp in setups)
if ADDING_AVAILABLE:
tests = itertools.chain(tests, ((
doctest.DocFileSuite(
'../adding.txt',
setUp=setUp, tearDown=testing.tearDown,
optionflags=flags, checker=testing.outputChecker,
),)
for setUp in setups))
return unittest.TestSuite(itertools.chain(*tests))
| [
"gso@abv.bg"
] | gso@abv.bg |
68a763f7ac69f4b4787488d915ee751df8e07af7 | a913d347c5a46fd7ff28415dfebe9b10829fdef9 | /tests/test_puzzle.py | d6a4847a136bbbf2416b8eac474fb403a4a2fe16 | [] | no_license | RZachLamberty/logicpuzzlesolver | 6d9ba414e549b13396573ea0d875dae8f592c9b9 | b8a93e5bda76bb8e3afc66ef902c42b06c532e8c | refs/heads/master | 2021-01-10T16:11:38.742828 | 2015-12-30T19:42:20 | 2015-12-30T19:42:20 | 47,845,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module: test_puzzle.py
Author: zlamberty
Created: 2015-12-19
Description:
test the puzzle class
Usage:
<usage>
"""
import os
import pandas as pd
import unittest
import categories
import rulelist
import puzzle
CONFIG = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'config'
)
FMT = os.path.join(CONFIG, '{num:0>3.0f}.{ftype:}.{ext:}')
class TestLogicPuzzle(unittest.TestCase):
def __init__(self, num, *args, **kwargs):
self.num = num
self.fcatyaml = FMT.format(num=self.num, ftype='categories', ext='yaml')
self.fruleyaml = FMT.format(num=self.num, ftype='rules', ext='yaml')
self.fruletxt = FMT.format(num=self.num, ftype='rules', ext='txt')
self.fsolyaml = FMT.format(num=self.num, ftype='solution', ext='yaml')
self.fsoltxt = FMT.format(num=self.num, ftype='solution', ext='txt')
self.fsolcsv = FMT.format(num=self.num, ftype='solution', ext='csv')
super(TestLogicPuzzle, self).__init__(*args, **kwargs)
def setUp(self):
self.c = categories.CategoriesFromYaml(self.fcatyaml)
self.r = rulelist.RulesFromFile(self.fruletxt, self.c)
self.p = puzzle.LogicPuzzle(self.c, self.r)
def test_solve(self):
self.p.solve()
a = self.p.solution
a = a.reset_index()
b = pd.read_csv(self.fsolcsv)
self.assertEqual(a, b)
if __name__ == '__main__':
unittest.main()
| [
"r.zach.lamberty@gmail.com"
] | r.zach.lamberty@gmail.com |
dfc4e92940c3353a5ef279149b589cbbb49540bf | 0958c33f05f5d3922c47cccadebc9e70394b8a78 | /PowerClassification/ResultAnalysis-Test09/CompareTwoExperiments.py | 50818376fc11311b1a9bfed095e101098bca4b6b | [] | no_license | jabarragann/eeg_project_gnaut_power_band_analysis | 0b33a5ebdeffd37b64094ba01f8dbd94f9baf961 | 3f7a7183593eb54a63efcff3762fb2144a0af2df | refs/heads/master | 2021-08-10T20:47:23.266562 | 2021-07-21T02:37:53 | 2021-07-21T02:37:53 | 245,556,675 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | import pandas as pd
from pathlib import Path
import numpy as np
import re
import matplotlib.pyplot as plt
import seaborn as sns
# sns.set_theme(style="whitegrid")
# experiment1Path = {"path":"aa16b_pyprep_complete/", "condition":"FullSet"}
# experiment2Path = {"path":"aa16_pyprep_complete/" , "condition":"SubSet"}
if __name__ =='__main__':
experiment1Path = {"path":"aa14_pyprep_complete/", "condition":"FullSet"}
experiment2Path = {"path":"aa15b_pyprep_complete/" , "condition":"SubSet"}
windowToAnalyze = 20
sizeToAnalyze = 150
rootPath = Path('./').resolve().parent / 'results' / 'EegResults' /'results_transfer9'
total = []
for exp in [experiment1Path, experiment2Path]:
p = rootPath / exp['path']
for file in p.glob('*.csv'):
windowSize = int(re.findall('(?<=dow)[0-9]+(?=s)',file.name)[0][-2:])
sampleSize = int(re.findall('(?<=Size)[0-9]+(?=s\.csv)',file.name)[0])
if windowToAnalyze == windowSize and sizeToAnalyze == sampleSize:
print(file.name, windowSize, sampleSize)
#Load data
df = pd.read_csv(file, sep = ',')
df['condition'] = exp['condition']
total.append(df)
final = pd.concat(total)
# ax = sns.boxplot(x="User", y="TestAcc", hue="condition",
# data=final, palette="Set3")
ax = sns.boxplot(y="TestAcc", x="condition",
data=final, palette="Set3")
plt.show()
x = 0 | [
"barragan@purdue.edu"
] | barragan@purdue.edu |
b541fe78bc55ab89ff27e9be669b95806c47396b | b3c17f6b3b1c5322a5bf8dd262d01a85e6de2849 | /web-api/customSite/ocr/custom_ocr_module/src/detection/ctpn/utils/rpn_msr/generate_anchors.py | 8ac5cee0f28ee718219da4393b4a31c54e874f8f | [] | no_license | arxrean/CelebRecognition | 6e65a76e984e54ef6a34e9b3dc44e0d19b79bcd6 | b202c6ef8bd6314b9c43a02b5afdbad64522f5ee | refs/heads/master | 2022-11-23T07:42:13.584305 | 2019-09-02T08:03:05 | 2019-09-02T08:03:05 | 204,247,279 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | import numpy as np
# 生成基础anchor box
def generate_basic_anchors(sizes, base_size=16):
base_anchor = np.array([0, 0, base_size - 1, base_size - 1], np.int32)
anchors = np.zeros((len(sizes), 4), np.int32) # (10, 4)
index = 0
for h, w in sizes:
anchors[index] = scale_anchor(base_anchor, h, w)
index += 1
return anchors
# 根据base anchor和设定的anchor的高度和宽度进行设定的anchor生成
def scale_anchor(anchor, h, w):
x_ctr = (anchor[0] + anchor[2]) * 0.5
y_ctr = (anchor[1] + anchor[3]) * 0.5
scaled_anchor = anchor.copy()
scaled_anchor[0] = x_ctr - w / 2 # xmin
scaled_anchor[2] = x_ctr + w / 2 # xmax
scaled_anchor[1] = y_ctr - h / 2 # ymin
scaled_anchor[3] = y_ctr + h / 2 # ymax
return scaled_anchor
# 生成anchor box
# 此处使用的是宽度固定,高度不同的anchor设置
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2 ** np.arange(3, 6)):
heights = [11, 16, 23, 33, 48, 68, 97, 139, 198, 283]
widths = [16]
sizes = []
for h in heights:
for w in widths:
sizes.append((h, w)) # [(11, 16), (16, 16), (23, 16), (33, 16), ...]
return generate_basic_anchors(sizes)
if __name__ == '__main__':
import time
t = time.time()
a = generate_anchors()
print(time.time() - t)
print(a)
from IPython import embed;
embed()
| [
"602052254@qq.com"
] | 602052254@qq.com |
6d52867e4a517db70b6d40506b0c61cf314f3338 | cf025ea3bf079748472557304a290593c753b884 | /Algorithm/SWEA/시험문제/시험문제_2.py | 46481ac0b5ea15cf3b3aaba6f2a5c020c00f7f52 | [] | no_license | Silentsoul04/my_software_study | 7dbb035ceea74f42c7ce2051b2320f6cae75ed88 | c27d33c57f59fe5244a1041c11bbd826dd481546 | refs/heads/master | 2023-03-29T02:43:40.861045 | 2019-07-10T08:09:55 | 2019-07-10T08:09:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,662 | py | import sys
sys.stdin = open('test_2.txt','r')
from itertools import combinations
testcases = int(input())
for tc in range(testcases):
N, M = list(map(int,input().split())) # N 은 행의크기, M 은 열의크기
mat = [list(map(int,input().split())) for _ in range(N)]
max_ = 0
for s_ in range(1, N):
s = s_
for g1_ in range(1, M-1):
g1 = g1_
for g2_ in range(g1+1, M):
g2 = g2_
t1, t2, t3, t4, t5, t6 = 0, 0, 0, 0, 0, 0
for i in range(N):
for j in range(M):
if i < s and j < g1: # 1구역
t1 += mat[i][j]
elif i < s and g1 <= j < g2: # 2구역
t2 += mat[i][j]
elif i < s and g2 <= j: # 3구역
t3 += mat[i][j]
elif s <= i and j < g1: # 4구역
t4 += mat[i][j]
elif s <= i and g1 <= j < g2: # 5구역
t5 += mat[i][j]
elif s <= i and g2 <= j: # 6구역
t6 += mat[i][j]
scores = list(combinations([t1, t2, t3, t4, t5, t6], 3))
for _ in range(len(scores)):
score = list(combinations(scores[_], 2))
sc = 0
for __ in score:
a,b = __
c = a - b
sc+= abs(c)
scores[_] = sc
max_ = max(max_, max(scores))
print(f'#{tc+1} {max_}') | [
"pok_winter@naver.com"
] | pok_winter@naver.com |
bbb44934c8f2e091c86000447893dbfe722bdb59 | 29d0131660ab0392861df94d4a3198f963db233c | /scripts/delcomments.py | 50a8f9a01cfd2a2dc639c3c6dd5c4a538a88a0b1 | [
"MIT"
] | permissive | kalinochkind/vkbot | be7123d82063c6c0ce108e532b2798e1bde898e4 | 306a244cb15745057fd838cd7c3163f0b6754d4b | refs/heads/master | 2020-04-04T06:24:16.429204 | 2019-05-13T16:49:31 | 2019-05-13T16:49:31 | 46,568,794 | 39 | 16 | null | 2015-11-21T19:45:13 | 2015-11-20T15:06:54 | Python | UTF-8 | Python | false | false | 839 | py | import logging
import accounts
import cppbot
import log
def isBad(bot, comm):
return bot.interact('comm ' + bot.escape(comm)) == '$blacklisted'
# noinspection PyUnusedLocal
def main(a, args):
a.timeout = 10
dm = a.delayed()
bot = cppbot.CppBot('', 0, None)
self_id = a.users.get()[0]['id']
def wall_cb(req, resp):
for post in resp['items']:
dm.wall.getComments(post_id=post['id'], count=100).walk(post_cb)
def post_cb(req, resp):
for comm in resp['items']:
if comm['from_id'] != self_id and comm.get('text') and isBad(bot, comm['text']):
dm.wall.deleteComment(comment_id=comm['id'])
log.write('_delcomments', '{}: {}'.format(comm['from_id'], comm['text']))
dm.wall.get(count=100, filter='others').walk(wall_cb)
dm.sync()
| [
"kalinochkind@gmail.com"
] | kalinochkind@gmail.com |
a24dd232f54e280a80ade479a457b4adf4a3472f | 0effd6f590f0d6d17f080e4c41660df13ffa64a7 | /commands/explain_document.py | 24083bf2e441b972d713ec47253e5897d2c125f0 | [] | no_license | vinodpanicker/sublime-elasticsearch-client | a6b2b5992979fa30d1b01cc98d962c657851ae1d | 62b8894f2dbbc776569bc4c6ab0586f7c89dd8c7 | refs/heads/master | 2021-01-13T06:21:02.945999 | 2015-09-14T04:01:33 | 2015-09-14T04:01:33 | 48,857,093 | 1 | 0 | null | 2015-12-31T17:30:18 | 2015-12-31T17:30:18 | null | UTF-8 | Python | false | false | 485 | py | from .base import BaseCommand
class ExplainDocumentCommand(BaseCommand):
command_name = "elasticsearch:explain-document"
def run_request(self, id=None):
if not id:
self.show_input_panel('Document Id: ', '', self.run)
return
options = dict(
index=self.settings.index,
doc_type=self.settings.doc_type,
body=self.get_text(),
id=id
)
return self.client.explain(**options)
| [
"kido@knowledge-works.co.jp"
] | kido@knowledge-works.co.jp |
de613a6cd9d0c48a3facb4d8e463cfccd9f9b0d4 | a9c3db07c29a46baf4f88afe555564ed0d8dbf2e | /src/0059-spiral-matrix-ii/spiral-matrix-ii.py | c7d45edc7205ebac9381b2f8b6de35e9d65e0110 | [] | no_license | HLNN/leetcode | 86d2f5b390be9edfceadd55f68d94c78bc8b7644 | 35010d67341e6038ae4ddffb4beba4a9dba05d2a | refs/heads/master | 2023-03-13T16:44:58.901326 | 2023-03-03T00:01:05 | 2023-03-03T00:01:05 | 165,402,662 | 6 | 6 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | # Given a positive integer n, generate an n x n matrix filled with elements from 1 to n2 in spiral order.
#
#
# Example 1:
#
#
# Input: n = 3
# Output: [[1,2,3],[8,9,4],[7,6,5]]
#
#
# Example 2:
#
#
# Input: n = 1
# Output: [[1]]
#
#
#
# Constraints:
#
#
# 1 <= n <= 20
#
#
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
m = [[0] * n for _ in range(n)]
dr = [0, 1, 0, -1]
dc = [1, 0, -1, 0]
r = c = d = i = 0
for _ in range(n * n):
i += 1
m[r][c] = i
nr, nc = r + dr[d], c + dc[d]
if 0 <= nr < n and 0 <= nc < n and m[nr][nc] == 0:
r, c = nr, nc
else:
d = (d + 1) % 4
r, c = r + dr[d], c + dc[d]
return m
| [
"Huangln555@gmail.com"
] | Huangln555@gmail.com |
4e4eabaf2b55526f75580bb1803ec0d48f306489 | eb00755d9d0f2630ffdb21e3ab6685b2fbcb0d9e | /tests/bench/bench_scripts/bench_sum.py | 7f7d72c9637914d79320c2cdc9f9ffae2db9ff9b | [
"BSD-3-Clause"
] | permissive | mlangill/biom-format | aca45518c71b807cf30b0f548ad726880802a2b5 | 4cebfbdba8b6b64ff0d503df33634e3d52de1de0 | refs/heads/master | 2021-01-16T21:59:51.218830 | 2013-12-04T16:41:50 | 2013-12-04T16:41:50 | 9,486,201 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | #!/usr/bin/env python
from sys import argv
from gzip import open as gzip_open
from biom.parse import parse_biom_table
if __name__ == '__main__':
table = parse_biom_table(gzip_open(argv[1]))
foo = table.sum()
| [
"mcdonadt@colorado.edu"
] | mcdonadt@colorado.edu |
1150b554ca225799561fdcf23ca5e95515d27372 | 61d08e23fbb62e16f7bd9d43673b1cf4e0558c37 | /other/character.py | 660ebb5286f5391e2ab709d75d3d45db874ddaae | [] | no_license | jonntd/mira | 1a4b1f17a71cfefd20c96e0384af2d1fdff813e8 | 270f55ef5d4fecca7368887f489310f5e5094a92 | refs/heads/master | 2021-08-31T12:08:14.795480 | 2017-12-21T08:02:06 | 2017-12-21T08:02:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,280 | py | import shutil
import os
import logging
import maya.cmds as mc
import xgenm as xgen
import xgenm.xgGlobal as xgg
from PySide2.QtWidgets import *
tex_template = "M:/BA/publish/assets/Character/{asset_name}/Shd/Shd/_tex"
publish_template = "M:/BA/publish/assets/Character/{asset_name}/Shd/Shd/_publish/maya/BA_{asset_name}_Shd_Shd.mb"
xgen_template = "M:/BA/publish/assets/Character/{asset_name}/Hair/Hair/_xgen/maya"
logger = logging.getLogger("Character")
class Path(object):
def __init__(self, asset_name):
self.asset_name = asset_name
@property
def tex_dir(self):
return tex_template.format(asset_name=self.asset_name)
@property
def publish_path(self):
return publish_template.format(asset_name=self.asset_name)
@property
def xgen_dir(self):
return xgen_template.format(asset_name=self.asset_name)
class Xgen(object):
def export_palette(self, palette, xgen_path):
xgen_dir = os.path.dirname(xgen_path)
if not os.path.isdir(xgen_dir):
os.makedirs(xgen_dir)
xgen.exportPalette(palette, xgen_path)
def import_palette(self, xgen_path, deltas, namespace=None):
if isinstance(deltas, basestring):
deltas = [deltas]
if not os.path.isfile(xgen_path):
logger.warning("%s is not an exist path." % xgen_path)
return
xgen.importPalette(xgen_path, deltas, namespace)
def create_delta(self, palette, delta_path):
delta_dir = os.path.dirname(delta_path)
if not os.path.isdir(delta_dir):
os.makedirs(delta_dir)
xgen.createDelta(palette, "D:/temp.xgd")
shutil.copy("D:/temp.xgd", delta_path)
os.remove("D:/temp.gxd")
def set_abs_path(self, xgen_dir):
if not xgg.Maya:
return
# palette is collection, use palettes to get collections first.
palettes = xgen.palettes()
for palette in palettes:
# Use descriptions to get description of each collection
descriptions = xgen.descriptions(palette)
for description in descriptions:
commaon_objs = xgen.objects(palette, description, True)
fx_objs = xgen.fxModules(palette, description)
objs = commaon_objs + fx_objs
# Get active objs,e.g. SplinePrimtives
for obj in objs:
attrs = xgen.allAttrs(palette, description, obj)
for attr in attrs:
value = xgen.getAttr(attr, palette, description, obj)
if "${DESC}" in value:
print palette, description, obj, attr
description_dir = os.path.join(xgen_dir, "collections", palette, description).replace("\\", "/")
new_value = value.replace("${DESC}", description_dir)
xgen.setAttr(attr, new_value, palette, description, obj)
de = xgg.DescriptionEditor
de.refresh("Full")
class Maya(object):
def __init__(self, asset_name):
self.asset_name = asset_name
self.path = Path(self.asset_name)
self.xg = Xgen()
def copy_textures(self):
file_nodes = mc.ls(type="file")
if not file_nodes:
return
tex_dir = self.path.tex_dir
if not os.path.isdir(tex_dir):
os.makedirs(tex_dir)
for file_node in file_nodes:
texture = mc.getAttr("%s.fileTextureName" % file_node)
if not os.path.isfile(texture):
print "%s is not an exist file" % texture
continue
base_name = os.path.basename(texture)
new_path = "%s/%s" % (tex_dir, base_name)
if texture != new_path:
shutil.copy(texture, new_path)
mc.setAttr("%s.fileTextureName" % file_node, new_path, type="string")
def copy_xgen_dir(self, old_xgen_dir):
xgen_dir = self.path.xgen_dir
if not os.path.isdir(xgen_dir):
os.makedirs(xgen_dir)
from distutils.dir_util import copy_tree
copy_tree(old_xgen_dir, xgen_dir)
return xgen_dir
def set_xgen_path(self, old_xgen_dir):
xgen_dir = self.copy_xgen_dir(old_xgen_dir)
self.xg.set_abs_path(xgen_dir)
def save_to_publish(self):
publish_path = self.path.publish_path
if not os.path.isdir(os.path.dirname(publish_path)):
os.makedirs(os.path.dirname(publish_path))
mc.file(rename=publish_path)
mc.file(save=1, f=1, type="mayaBinary")
def run():
# todo delete rig
# ############
asset_name, ok = QInputDialog.getText(None, "Input", "Asset Name")
if not ok:
return
palettes = xgen.palettes()
if palettes:
xgen_dir = QFileDialog.getExistingDirectory()
if not xgen_dir:
return
maya = Maya(asset_name)
maya.copy_textures()
logger.info("Copy textures done.")
if palettes:
maya.set_xgen_path(xgen_dir)
logger.info("xgen done.")
maya.save_to_publish()
logger.info("Save to publish done.")
QMessageBox(None, "Warming Tip", "Congratulations, All done.")
if __name__ == "__main__":
run()
| [
"276575758@qq.com"
] | 276575758@qq.com |
f91de1717dfd7a91f697029607b32f03528f3f2f | c25fa1b8dd48b6dc4302f626f797d3411d1a991d | /bacon.py | 1241c3cbecb8e47a2a61a1329ee2a36a132c67c1 | [] | no_license | jasminh925/bacon | 8a3c542828f93814b7212cb734e043336e53dbad | b3303ca1f82a27282c8109f79d093fa474cf033f | refs/heads/master | 2020-05-30T23:23:17.420030 | 2016-08-03T04:21:07 | 2016-08-03T04:21:07 | 64,814,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | def bacon():
pork = raw_input("Should you eat that bacon?")
angels = raw_input("Do you want to feel like angels are frolicking on your taste buds?").lower()
if angels == "yes":
return "Eat it!"
elif angels == "no":
print "You've clearly never tasted bacon."
return "Eat it"
else:
coward = raw_input("I see you are afraid bacon might kill you. Are you a coward?").lower()
if coward == "yes":
return "You are a coward. Bacon will turn you into a true warrior."
else:
return "Then eat it!"
def main():
print bacon()
if __name__ == '__main__':
main() | [
"you@example.com"
] | you@example.com |
2117b97c99737ddfd126db5b1411d0278a6cc213 | c69e2eb04c5bff10dd1ec214e6dbe3917a156cef | /test-pandas.py | bb34c9838161b66dd3dba21b7fab57e056ad5d9f | [] | no_license | Jizishuo/daily_code | c865bb1df063dd2cfd9c0e8ab9f64bc9f33ef5c2 | cc9b7423bfc7e4a990c3554d91613ee9144ed9e2 | refs/heads/master | 2021-08-10T20:04:42.541355 | 2020-04-11T05:59:26 | 2020-04-11T05:59:26 | 154,241,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | import pandas as pd
#VIDEO_INFO_PATH = 'F:\\test-data-ex\\0729(0-12).csv'
VIDEO_INFO_PATH = 'F:\\0729-history-alarm.csv'
# Series & DataFrame是Pandas中最常用的两个对象
# Series
if __name__ == '__main__':
#video_info = pd.read_csv(VIDEO_INFO_PATH, encoding='ISO-8859-1')
video_info = pd.read_csv(VIDEO_INFO_PATH, low_memory=False)
# shape 可以得到行数和列数
print(video_info.shape)
print(video_info.head(n=5))
'''
# index保存行索引,columns保存列索引
print(video_info.columns)
print(video_info.columns.name)
# 行索引是一个表示多级索引的MultiIndex对象,每级的索引名可以通过names属性存取
print(video_info.index)
print(video_info.index.names)
# DataFrame对象有两个轴,第0轴为纵轴,第一轴为横轴
# []运算符可以通过索引标签获取指定的列,当下标是单个标签时,所得到的是Series对象
# 而当下标是列表时,则得到一个DataFrame对象
video_id = video_info['VideoID']
video_object = video_info[['VideoID', 'Start', 'End']]
# 进行去重操作
video_object = video_object.drop_duplicates()
print(video_object)
print(video_object.values)
# video_test = video_info[video_info['VideoID'].unique()]
# .loc[]可通过索引标签获取指定的行,或指定行的某个元素
# 因为这里没有行索引,所以这里报错video_one = video_info.loc['mv89psg6zh4']
s = pd.Series([1, 2, 3, 4, 5], index=["a", "b", "c", "d", "e"])
print(u" index", s.index)
print(u" values", s.values)
print(s[1:3])
print(s['b':'d'])
''' | [
"948369894@qq.com"
] | 948369894@qq.com |
9df72bb5dd9d18de36606776a1ca5a76d0e7c0b1 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_076/ch141_2020_04_01_20_36_34_858929.py | 9a70d3a247b9894c9efe74e20b17d5431601ec88 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,504 | py | import random
dinheiros = 1000
a=input("Jogo iniciado. Você quer apostar?")
while a != "não":
dinheiros -= 30
dado1 = random.randint(1,6)
dado2 = random.randint(1,6)
b= input("Os dados foram lançados. Chute o valor da soma por 30 dinheiros.")
if b == dado1 + dado2:
dinheiros += 50
print ("Você ganhou 20 dinheiros!")
print('Você terminou a partida com {0} dinheiros'.format(dinheiros))
a = input("Jogo iniciado. Você quer apostar?")
else:
c = input ("Você quer continuar apostando ou desistir? Se quiser apostar, tente novamente o valor da soma por 20 dinheiros.")
if c == "continuar":
dinheiros -= 20
if c == dado1 + dado2:
dinheiros +=50
print('Você terminou a partida com {0} dinheiros'.format(dinheiros))
else:
d = ("Um dos dados apontou {}. Você deseja continuar tentando ou desistir?".format(dado1))
if d=="continuar":
e = input("Então tente novamente. Isso lhe custara mais 10 dinheiros. ")
dinheiros -=10
if e== dado1+dado2:
dinheiros +=50
print('Você terminou a partida com {0} dinheiros'.format(dinheiros))
a = input("Jogo iniciado. Você quer apostar?")
a = input("Jogo iniciado. Você quer apostar?")
print('Você terminou a partida com {0} dinheiros'.format(dinheiros)) | [
"you@example.com"
] | you@example.com |
42684450fa04e067ffbbf06157502666d0a88556 | ea6267b0a3508fd262acdefa51e5ad0f8f2a0563 | /src/commcare_cloud/environment/constants.py | e5126c33b55b4d8e78fb02d8aa392eaa39ae8c28 | [
"BSD-3-Clause"
] | permissive | rohit-dimagi/commcare-cloud | a606bc269f36be594d38ba6ff516411d63f37aad | 55576713f3a12acc3f2df4f24c405df9c30143b3 | refs/heads/master | 2020-06-15T16:08:46.350848 | 2019-07-17T07:34:41 | 2019-07-17T12:05:13 | 195,337,852 | 0 | 0 | BSD-3-Clause | 2019-07-05T04:17:48 | 2019-07-05T04:17:48 | null | UTF-8 | Python | false | false | 479 | py | import jsonobject
class _Constants(jsonobject.JsonObject):
commcarehq_main_db_name = jsonobject.StringProperty(default='commcarehq')
formplayer_db_name = jsonobject.StringProperty(default='formplayer')
ucr_db_name = jsonobject.StringProperty(default='commcarehq_ucr')
synclogs_db_name = jsonobject.StringProperty(default='commcarehq_synclogs')
form_processing_proxy_db_name = jsonobject.StringProperty(default='commcarehq_proxy')
constants = _Constants()
| [
"droberts@dimagi.com"
] | droberts@dimagi.com |
31c86fbea013718e4e447491328b433b47f69512 | 9bd861d47402c81f9cc608dc414c9827baa88dd5 | /_estudoPython_solid/string_lista.py | 74ef590ab5c66d20b8d082b79f770e764908f505 | [
"Apache-2.0"
] | permissive | c4rl0sFr3it4s/Python_Analise_De_Dados | 8d4cf637573a5610489a2bd44e2a34c749456e73 | 74a72772179f45684f4f12acd4ad607c99ed8107 | refs/heads/master | 2023-02-02T21:13:16.150017 | 2020-08-03T17:52:19 | 2020-08-03T17:52:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,428 | py | '''
string grupo de caracteres, trata a frase como uma LISTA
de caracteres.
para imprimir a primeira letra frase[0]
lista é assim 1 caractere 0, 2 caractere 1, 3 caractere 2 etc
[0,1,2,3,4,5,6,7,8,9,10] -> indice
O,i, ,t,u,d,o, ,b,e,m
LISTA é uma estrutura de dados coleção que pode guardar varios tipos de dados dentro
separado por virgula, para o python é mesma coisa que uma string cada letra é um nome.
['João', 'Maria', 'Carlos', 'Francine']
0, 1, 2, 3 -> indice
impresso
['João', 'Maria', 'Carlos', 'Francine', 10, 10.2]
'''
#string
frase = 'Oi, tudo bem?'
#operacoes com string
frase_separada = frase.split(',')
#lista
lista_nomes = ['João', 'Maria', 'Carlos', 'Francine', 10, 10.2]
#operações com LISTAS
print('Lista -> \'', lista_nomes[0:2], '\'')
lista_nomes.append('Geralda')
lista_nomes.append('Lorena')
lista_nomes.remove('Geralda')
#lista_nomes.clear()
lista_nomes.insert(5, 'Creuza')
lista_nomes[0] = 'Robervania'
contador_carlos = lista_nomes.count('Maria')
#saida lista
print(lista_nomes)
print('Contando João \"', contador_carlos, '\"')
print('Tamanho da Coleção \"', len(lista_nomes), '\"')
print('Função de pilha Pop\"', lista_nomes.pop(), '\"')
print(lista_nomes)
'''
lista_nomes, imprime tudo
lista_nomes[0:2], imprime do indice 0 ate 1, não incluiso o 2
lista_nome[-1], de trás para frente
append adiciona no ultimo lugar da lista
remove remove da lista
clear limpa toda a lista
insert insere no indice que escolher
lista_nome[0] = 'Robervania', adicionando no indice escolhido
count('Maria') contagem quantas vezes contém na Coleção
len(lista_nomes) traz o tamanho da coleção
pop(), funcao de pilha o primeiro que entra é o ultimo que sai, e retira da lista
LISTA é mutável, e ordenada do jeito que é inserido
'''
#saida com string
print('Frase -> \'', frase[0:5:1], '\'')
print('Frase em caixa baixa -> \"', frase.lower(), '\"')
print('Frase separada com split() \"', frase_separada, '\"')
print('Acessando o indice da coleção do split() \"', frase_separada[0], '\"')
'''
pegando do indice 0 ate o 5 frase[0:5], dando um step passos frase[0:5:1], 1 e de quantos em quantos vai pular
passando somente o passo ele retorna ao contrario frase[::-1]
[inicio:dividir:passo]
lower(), traz tudo para caixa baixa
split(), transforma a frase em uma lista, indicando aode você quer
'''
| [
"ti.carlosfreitas@outlook.com"
] | ti.carlosfreitas@outlook.com |
52141d7e15eb082e5473257ada7c24392d5df779 | e85e846960750dd498431ac8412d9967646ff98d | /cms/migrations/0024_auto_20170702_0605.py | 66f85c2b82850a5caef978b4485dd70afb174bf6 | [] | no_license | onosaburo/clublink_django | 19368b4a59b3aed3632883ceffe3326bfc7a61a6 | d2f6024b6224ea7f47595481b3382b8d0670584f | refs/heads/master | 2022-03-30T05:30:12.288354 | 2020-01-27T18:09:11 | 2020-01-27T18:09:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-07-02 10:05
from __future__ import unicode_literals
from django.db import migrations, models
def generate_full_path_recursive(page):
page.full_path = page.parent.full_path if page.parent else ''
if page.slug:
page.full_path += '{}/'.format(page.slug)
page.save()
for child in page.children.all():
generate_full_path_recursive(child)
def populate_full_path(apps, schema_editor):
ClubPage = apps.get_model('cms', 'ClubPage')
CorpPage = apps.get_model('cms', 'CorpPage')
for p in ClubPage.objects.filter(parent=None):
generate_full_path_recursive(p)
for p in CorpPage.objects.filter(parent=None):
generate_full_path_recursive(p)
class Migration(migrations.Migration):
dependencies = [
('cms', '0023_auto_20170702_0309'),
]
operations = [
migrations.AddField(
model_name='clubpage',
name='full_path',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='corppage',
name='full_path',
field=models.CharField(max_length=255, null=True),
),
migrations.RunPython(populate_full_path, lambda x, y: None),
migrations.AlterField(
model_name='clubpage',
name='full_path',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='corppage',
name='full_path',
field=models.CharField(max_length=255),
),
]
| [
"bestwork888@outlook.com"
] | bestwork888@outlook.com |
11726609336d3f6cd333ea5b5aa5ab7fa4187742 | 66d05b6b42245e4df51a80b593770f761812eb92 | /PYTHON/python_code/object-oriented/turkeys/turkeys.py | 602ea90fa89e3bf24fa78291613fcf9f05ce608d | [] | no_license | techemstudios/intro_cs | 8f32b8b40974c49c65255df8f8e3a835df218df3 | dd2ee57394ab04e86b6d78b70a038ba0f04f661f | refs/heads/master | 2021-10-19T13:04:28.354963 | 2019-02-21T05:28:10 | 2019-02-21T05:28:10 | 119,568,375 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | """
Object Oriented Programming (Thanksgiving Giving style)
We start defining classes with Animals.
We can define that parent of the class, Animals, as well as the parent of that class.
However, to save time we start at Animals. Starting here; instead of
directly at the turkey class, should help keep in mind we can define everything
under the sun using OOP is we wanted to. Again, we will save time!
"""
class Animals():
pass
class Turkey(Animals):
"""Attempt to model a turkey."""
# Create an instance based on the class Turkey.
# This instance will have three parameters: self, name, age
def __init__(self, name, age):
"""Initialize name age and age attributes."""
# Make accessible attributes or variables:
self.name = name
self.age = age
# Tell Python what a turkey can do by
# defining the methods of the class
# Gobble Method
def gobble(self):
"""Simulate a turkey gobbling in response to something."""
print(self.name.title() + " is now gobbling!")
# More methods can follow here
# Make an instance representing a specific turkey
my_turkey = Turkey('scrappy', 3)
print("My turkey's name is " + my_turkey.name.title() + ".")
print("My turkey is " +str(my_turkey.age) + " years old.")
# Calling methods
my_turkey.gobble()
# Make multiple instances
your_turkey = Turkey('lassy', 1)
| [
"joe@techemstudios.com"
] | joe@techemstudios.com |
46867e9e4e92fddc64d6f93273ad2155bae22037 | efe58c533fb211d457c598fb1fdabbaf1f284a09 | /asdf.py | 2e2d3210c9c44d407d87329799d571a09a56d8ce | [] | no_license | uniphil/beep-beep-edge | 91d59fb04649a72717ca4afa4cc50d5d6bb4c3e8 | 94c11bae2527edb037cca6a1cec912e53b7a242a | refs/heads/main | 2023-01-13T16:25:23.939980 | 2020-11-15T07:33:56 | 2020-11-15T07:33:56 | 312,756,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,519 | py | #!/usr/bin/env python3
import hashlib
import json
import re
import struct
import urllib.request
def unwrap_syslog(line):
_, rest = line.split(']: ', 1)
return rest
def parse_httpd_log(line):
m = re.match(r'^\w+ (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) \-.*\- \[.*\] "GET (.*) HTTP/1\.[01]" 200 0 "(.*?)" "(.*)"\n$', line)
return m.groups()
def compress(ip, ua):
ip_bytes = bytes(map(int, ip.split('.')))
bits = hashlib.blake2b(ua.encode(), digest_size=4, key=ip_bytes)
as_uint, = struct.unpack('I', bits.digest())
bucket = as_uint & 0b111111111111 # 12
clz = 20 - (as_uint >> 12).bit_length() + 1 # never zero
return bucket, clz
def run_lines(lines):
for line in lines:
if line.strip() == '':
continue
desysed = unwrap_syslog(line)
ip, path, referrer, ua = parse_httpd_log(desysed)
bucket, zeros = compress(ip, ua)
yield json.dumps(['v1', 'ok', path, referrer, bucket, zeros])
def postit(url):
r = urllib.request.Request(url, b'', {'Content-Type': 'application/json'})
while True:
r.data = yield
with urllib.request.urlopen(r, timeout=2) as resp:
if resp.status != 202:
raise Exception('ohno', resp.status)
if __name__ == '__main__':
import fileinput
import os
post_office = postit(os.environ['DESTINATION'])
next(post_office) # unfortunate init
for compressed in run_lines(fileinput.input()):
post_office.send(compressed.encode())
| [
"uniphil@gmail.com"
] | uniphil@gmail.com |
752f484dc8427e86fe36a0e5d5f6301b62be3e66 | 209c876b1e248fd67bd156a137d961a6610f93c7 | /python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_mp_random.py | f779b3298b8dcadbbf7d846adafe260e6bb99497 | [
"Apache-2.0"
] | permissive | Qengineering/Paddle | 36e0dba37d29146ebef4fba869490ecedbf4294e | 591456c69b76ee96d04b7d15dca6bb8080301f21 | refs/heads/develop | 2023-01-24T12:40:04.551345 | 2022-10-06T10:30:56 | 2022-10-06T10:30:56 | 544,837,444 | 0 | 0 | Apache-2.0 | 2022-10-03T10:12:54 | 2022-10-03T10:12:54 | null | UTF-8 | Python | false | false | 2,235 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import numpy as np
import paddle.distributed.fleet as fleet
class TestDistTraning(unittest.TestCase):
def setUp(self):
strategy = fleet.DistributedStrategy()
self.model_parallel_size = 2
strategy.hybrid_configs = {
"dp_degree": 1,
"mp_degree": self.model_parallel_size,
"pp_degree": 1
}
fleet.init(is_collective=True, strategy=strategy)
def test_cuda_rng_tracker(self):
seed_1 = 2021
seed_2 = 1024
size = [20, 15]
paddle.seed(seed_1)
target_11 = paddle.randn(size, "float32")
target_12 = paddle.randn(size, "float32")
paddle.seed(seed_2)
target_21 = paddle.randn(size, "float32")
target_22 = paddle.randn(size, "float32")
paddle.seed(seed_1)
fleet.meta_parallel.get_rng_state_tracker().add("test", seed_2)
result_11 = paddle.randn(size, "float32")
with fleet.meta_parallel.get_rng_state_tracker().rng_state("test"):
result_21 = paddle.randn(size, "float32")
result_12 = paddle.randn(size, "float32")
with fleet.meta_parallel.get_rng_state_tracker().rng_state("test"):
result_22 = paddle.randn(size, "float32")
np.testing.assert_allclose(result_11.numpy(), target_11.numpy())
np.testing.assert_allclose(result_12.numpy(), target_12.numpy())
np.testing.assert_allclose(result_21.numpy(), target_21.numpy())
np.testing.assert_allclose(result_22.numpy(), target_22.numpy())
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | Qengineering.noreply@github.com |
8835836e75417a9af60bebd2cad24160cdc3265b | df2d967d02f004e46d44bfcd3cc8cdbf1ae54c9d | /tests/test_core/test_factory.py | f6fcce00563512aeb5481e7805c0669307edf8ad | [
"MIT",
"CC-BY-4.0"
] | permissive | skasberger/owat_api | f3df29cb1466753390f72c2b4603a7c48d1a4e8f | d40860eeef0de151d51200161baaf10c55810fb1 | refs/heads/master | 2023-01-24T11:01:05.223602 | 2020-11-14T11:26:57 | 2020-11-14T11:26:57 | 310,701,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,387 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test factory."""
from fastapi import FastAPI
import os
import pytest
from app.config import get_config
from app.database import get_engine, get_SessionLocal
from app.main import create_app
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
def test_config_development():
for config_name in ["default", "development"]:
app = create_app(config_name)
config = get_config(config_name)
if os.getenv("TRAVIS") or False:
assert config.TRAVIS == True
assert app.__dict__["extra"]["TRAVIS"] == True
else:
assert config.TRAVIS == False
assert app.__dict__["extra"]["TRAVIS"] == False
assert config.SQLALCHEMY_DATABASE_URI == "postgresql://localhost/owat_dev"
assert (
app.__dict__["extra"]["SQLALCHEMY_DATABASE_URI"]
== "postgresql://localhost/owat_dev"
)
assert config.DEBUG == True
assert app.__dict__["extra"]["DEBUG"] == True
assert app._debug == True
assert config.SECRET_KEY == "my-secret-key"
assert app.__dict__["extra"]["SECRET_KEY"] == "my-secret-key"
assert config.ADMIN_EMAIL is None
assert app.__dict__["extra"]["ADMIN_EMAIL"] is None
assert config.APP_EMAIL is None
assert app.__dict__["extra"]["APP_EMAIL"] is None
assert config.MIN_CONNECTIONS_COUNT is None
assert app.__dict__["extra"]["MIN_CONNECTIONS_COUNT"] is None
assert config.MAX_CONNECTIONS_COUNT is None
assert app.__dict__["extra"]["MAX_CONNECTIONS_COUNT"] is None
assert config.TITLE == "owat_api"
assert app.title == "owat_api"
assert app.__dict__["extra"]["TITLE"] == "owat_api"
assert app.version == "0.1.0"
assert app.description == "RESTful API for Austrian open elections data"
assert isinstance(app, FastAPI)
def test_config_testing():
config_name = "testing"
app = create_app(config_name)
config = get_config(config_name)
if os.getenv("TRAVIS") or False:
assert config.TRAVIS == True
assert app.__dict__["extra"]["TRAVIS"] == True
else:
assert config.TRAVIS == False
assert app.__dict__["extra"]["TRAVIS"] == False
assert config.SQLALCHEMY_DATABASE_URI == "postgresql://localhost/owat_test"
assert (
app.__dict__["extra"]["SQLALCHEMY_DATABASE_URI"]
== "postgresql://localhost/owat_test"
)
assert config.DEBUG == False
assert app.debug == False
assert app.__dict__["extra"]["DEBUG"] == False
assert config.SECRET_KEY == "secret-env-key"
assert app.__dict__["extra"]["SECRET_KEY"] == "secret-env-key"
assert config.ADMIN_EMAIL == "testing_admin@offenewahlen.at"
assert app.__dict__["extra"]["ADMIN_EMAIL"] == "testing_admin@offenewahlen.at"
assert config.APP_EMAIL == "testing_app@offenewahlen.at"
assert app.__dict__["extra"]["APP_EMAIL"] == "testing_app@offenewahlen.at"
assert config.MIN_CONNECTIONS_COUNT == 10
assert app.__dict__["extra"]["MIN_CONNECTIONS_COUNT"] == 10
assert config.MAX_CONNECTIONS_COUNT == 10
assert app.__dict__["extra"]["MAX_CONNECTIONS_COUNT"] == 10
assert config.TITLE == "owat_api"
assert app.title == "owat_api"
assert app.__dict__["extra"]["TITLE"] == "owat_api"
assert app.version == "0.1.0"
assert app.description == "RESTful API for Austrian open elections data"
assert isinstance(app, FastAPI)
def test_config_travis():
config_name = "travis"
app = create_app(config_name)
config = get_config(config_name)
assert config.TRAVIS == True
assert app.__dict__["extra"]["TRAVIS"] == True
assert (
config.SQLALCHEMY_DATABASE_URI
== "postgresql+psycopg2://postgres@localhost:5432/travis_ci_test"
)
assert (
app.__dict__["extra"]["SQLALCHEMY_DATABASE_URI"]
== "postgresql+psycopg2://postgres@localhost:5432/travis_ci_test"
)
assert config.DEBUG == False
assert app.__dict__["extra"]["DEBUG"] == False
assert app._debug == False
assert config.SECRET_KEY == "my-secret-key"
assert app.__dict__["extra"]["SECRET_KEY"] == "my-secret-key"
assert config.ADMIN_EMAIL is None
assert app.__dict__["extra"]["ADMIN_EMAIL"] is None
assert config.APP_EMAIL is None
assert app.__dict__["extra"]["APP_EMAIL"] is None
assert config.MIN_CONNECTIONS_COUNT is None
assert app.__dict__["extra"]["MIN_CONNECTIONS_COUNT"] is None
assert config.MAX_CONNECTIONS_COUNT is None
assert app.__dict__["extra"]["MAX_CONNECTIONS_COUNT"] is None
assert config.TITLE == "owat_api"
assert app.title == "owat_api"
assert app.__dict__["extra"]["TITLE"] == "owat_api"
assert app.version == "0.1.0"
assert app.description == "RESTful API for Austrian open elections data"
assert isinstance(app, FastAPI)
def test_config_production():
config_name = "production"
app = create_app(config_name)
config = get_config(config_name)
if os.getenv("TRAVIS") or False:
assert config.TRAVIS == True
assert app.__dict__["extra"]["TRAVIS"] == True
else:
assert config.TRAVIS == False
assert app.__dict__["extra"]["TRAVIS"] == False
assert config.SQLALCHEMY_DATABASE_URI == "postgresql://localhost/owat"
assert (
app.__dict__["extra"]["SQLALCHEMY_DATABASE_URI"]
== "postgresql://localhost/owat"
)
assert config.DEBUG == False
assert app.debug == False
assert app.__dict__["extra"]["DEBUG"] == False
assert config.SECRET_KEY == "my-secret-key"
assert app.__dict__["extra"]["SECRET_KEY"] == "my-secret-key"
assert config.ADMIN_EMAIL is None
assert app.__dict__["extra"]["ADMIN_EMAIL"] is None
assert config.APP_EMAIL is None
assert app.__dict__["extra"]["APP_EMAIL"] is None
assert config.MIN_CONNECTIONS_COUNT is None
assert app.__dict__["extra"]["MIN_CONNECTIONS_COUNT"] is None
assert config.MAX_CONNECTIONS_COUNT is None
assert app.__dict__["extra"]["MAX_CONNECTIONS_COUNT"] is None
assert app.title == "owat_api"
assert app.__dict__["extra"]["TITLE"] == "owat_api"
assert app.version == "0.1.0"
assert app.description == "RESTful API for Austrian open elections data"
assert isinstance(app, FastAPI)
| [
"mail@stefankasberger.at"
] | mail@stefankasberger.at |
0156df3dfcfc720906440e213664f5d8e437dfc7 | 81bad22641705683c68ff89f19362ba202891652 | /napari/utils/progress.py | 2f4dfaa2388ece8d1656f3c87ede44c8642dce93 | [
"BSD-3-Clause"
] | permissive | sofroniewn/napari | ee2a39a1a1132910db6f2a47994671e8138edb51 | beaa98efe5cf04ba659086e7a514b2ade05277af | refs/heads/main | 2023-07-12T02:46:41.185932 | 2022-09-14T21:57:15 | 2022-09-14T21:57:15 | 154,751,137 | 2 | 3 | BSD-3-Clause | 2023-07-01T10:26:45 | 2018-10-25T23:43:01 | Python | UTF-8 | Python | false | false | 4,659 | py | from typing import Iterable, Optional
from tqdm import tqdm
from napari.utils.events.event import EmitterGroup, Event
from ..utils.events.containers import EventedSet
from ..utils.translations import trans
class progress(tqdm):
"""This class inherits from tqdm and provides an interface for
progress bars in the napari viewer. Progress bars can be created
directly by wrapping an iterable or by providing a total number
of expected updates.
While this interface is primarily designed to be displayed in
the viewer, it can also be used without a viewer open, in which
case it behaves identically to tqdm and produces the progress
bar in the terminal.
See tqdm.tqdm API for valid args and kwargs:
https://tqdm.github.io/docs/tqdm/
Examples
--------
>>> def long_running(steps=10, delay=0.1):
... for i in progress(range(steps)):
... sleep(delay)
it can also be used as a context manager:
>>> def long_running(steps=10, repeats=4, delay=0.1):
... with progress(range(steps)) as pbr:
... for i in pbr:
... sleep(delay)
or equivalently, using the `progrange` shorthand
.. code-block:: python
with progrange(steps) as pbr:
for i in pbr:
sleep(delay)
For manual updates:
>>> def manual_updates(total):
... pbr = progress(total=total)
... sleep(10)
... pbr.set_description("Step 1 Complete")
... pbr.update(1)
... # must call pbr.close() when using outside for loop
... # or context manager
... pbr.close()
"""
monitor_interval = 0 # set to 0 to disable the thread
# to give us a way to hook into the creation and update of progress objects
# without progress knowing anything about a Viewer, we track all instances in
# this evented *class* attribute, accessed through `progress._all_instances`
# this allows the ActivityDialog to find out about new progress objects and
# hook up GUI progress bars to its update events
_all_instances: EventedSet['progress'] = EventedSet()
def __init__(
self,
iterable: Optional[Iterable] = None,
desc: Optional[str] = None,
total: Optional[int] = None,
nest_under: Optional['progress'] = None,
*args,
**kwargs,
) -> None:
self.events = EmitterGroup(
value=Event,
description=Event,
overflow=Event,
eta=Event,
total=Event,
)
self.nest_under = nest_under
self.is_init = True
super().__init__(iterable, desc, total, *args, **kwargs)
if not self.desc:
self.set_description(trans._("progress"))
progress._all_instances.add(self)
self.is_init = False
def __repr__(self) -> str:
return self.desc
@property
def total(self):
return self._total
@total.setter
def total(self, total):
self._total = total
self.events.total(value=self.total)
def display(self, msg: str = None, pos: int = None) -> None:
"""Update the display and emit eta event."""
# just plain tqdm if we don't have gui
if not self.gui and not self.is_init:
super().display(msg, pos)
return
# TODO: This could break if user is formatting their own terminal tqdm
etas = str(self).split('|')[-1] if self.total != 0 else ""
self.events.eta(value=etas)
def update(self, n=1):
"""Update progress value by n and emit value event"""
super().update(n)
self.events.value(value=self.n)
def increment_with_overflow(self):
"""Update if not exceeding total, else set indeterminate range."""
if self.n == self.total:
self.total = 0
self.events.overflow()
else:
self.update(1)
def set_description(self, desc):
"""Update progress description and emit description event."""
super().set_description(desc, refresh=True)
self.events.description(value=desc)
def close(self):
"""Close progress object and emit event."""
if self.disable:
return
progress._all_instances.remove(self)
super().close()
def progrange(*args, **kwargs):
"""Shorthand for ``progress(range(*args), **kwargs)``.
Adds tqdm based progress bar to napari viewer, if it
exists, and returns the wrapped range object.
Returns
-------
progress
wrapped range object
"""
return progress(range(*args), **kwargs)
| [
"noreply@github.com"
] | sofroniewn.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.