blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33b03bfe4606d07da07c3346d5d1ccfd40eca98f
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/crossmarketetf_bak/crossmarket_redemption_HA/YW_CETFSS_SHSH_037.py
|
b5b26bedf5acb5c58983fc121c0293984c3daf9e
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,499
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test")
from crossmarketetf.cetfservice.cetf_main_service import *
from crossmarketetf.cetfservice.cetf_get_components_asset import *
from crossmarketetf.cetfservice.cetf_utils import *
from mysql.QueryOrderErrorMsg import queryOrderErrorMsg
from service.mainService import *
from mysql.getUpOrDownPrice import getUpPrice
from crossmarketetf.cetfservice.cetf_add import cetf_add
class YW_CETFSS_SHSH_037(xtp_test_case):
def test_YW_CETFSS_SHSH_037(self):
# -----------ETF赎回-------------
title = ('上海ETF赎回--可深市股票退补现金替代:T日ETF拥股量1unit→T日赎回ETF')
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、全成、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
unit_info = {
'ticker': '530510', # etf代码
'etf_unit_buy': 1.0, # etf买入单位数
'etf_unit': 1.0, # etf赎回单位数
'etf_unit_sell': 1.0, # etf卖出单位数
'component_unit_sell': 1.0 # 成分股卖出单位数
}
# -----------T日买入etf-------------
cetf_add(Api,
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
unit_info['ticker'],
unit_info['etf_unit_buy'])
# -----------查询ETF赎回前成分股持仓-------------
component_stk_info = cetf_get_all_component_stk(Api,unit_info['ticker'])
# 查询etf最小申赎数量
unit_number = query_creation_redem_unit(unit_info['ticker'])
# etf赎回数量
quantity = int(unit_info['etf_unit'] * unit_number)
# 定义委托参数信息------------------------------------------
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_ETF'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_REDEMPTION'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity':
quantity
}
g_func.cetf_parm_init(case_goal['期望状态'])
rs1 = cetf_service_test(Api, case_goal, wt_reqs,component_stk_info)
etf_creation_log(case_goal, rs1)
self.assertEqual(rs1['用例测试结果'], True)
# --------二级市场,卖出etf-----------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = queryOrderErrorMsg(11010121)
# 二级市场卖出的etf数量
quantity = int(unit_info['etf_unit_sell'] * unit_number)
quantity_list = split_etf_quantity(quantity)
# 查询涨停价
limitup_px = getUpPrice(unit_info['ticker'])
rs2 = {}
for etf_quantity in quantity_list:
wt_reqs_etf = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
etf_quantity
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs2 = serviceTest(Api, case_goal, wt_reqs_etf)
if rs2['用例测试结果'] is False:
etf_sell_log(case_goal, rs2)
self.assertEqual(rs2['用例测试结果'], True)
return
etf_sell_log(case_goal, rs2)
# ------------二级市场卖出成份股-----------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = queryOrderErrorMsg(11010121)
# 查询etf成分股代码和数量
etf_components = query_cetf_component_share(unit_info['ticker'])
rs3 = {}
for stk_code in etf_components:
# 赎回用例1-25会有上海和深圳的成分股各一支,深圳成分股为'008000',只卖上海的
if stk_code != '008000':
components_share = etf_components[stk_code]
quantity = (int(unit_info['component_unit_sell'])
if unit_info['component_unit_sell'] >= 100
else int(components_share * unit_info['component_unit_sell']))
limitup_px = getUpPrice(stk_code)
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker':
stk_code,
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
quantity
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs3 = serviceTest(Api, case_goal, wt_reqs)
if rs3['用例测试结果'] is False:
etf_components_sell_log(case_goal, rs3)
self.assertEqual(rs3['用例测试结果'], True)
etf_components_sell_log(case_goal, rs3)
self.assertEqual(rs3['用例测试结果'], True)
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
0b3171a0d71ffcfd3d3b577218d0ac08ae5273f7
|
b01d5af63ae060b3b4b1a715823722e0e2cde603
|
/tensorflow_graphics/nn/layer/tests/pointnet_test.py
|
d5a6a3b51085aa29b0bee419989d7efc2848ef79
|
[
"Apache-2.0"
] |
permissive
|
ghosalsattam/graphics
|
9c8b313def86d4629281e9c53e0cb261703336f5
|
946aa03b5178d2fc557a81045b84df24af322afd
|
refs/heads/master
| 2022-12-06T15:42:21.729897
| 2020-06-25T14:37:26
| 2020-06-25T14:37:51
| 276,175,390
| 0
| 0
|
Apache-2.0
| 2020-06-30T18:13:34
| 2020-06-30T18:13:33
| null |
UTF-8
|
Python
| false
| false
| 3,553
|
py
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pointnet layers."""
# pylint: disable=invalid-name
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_graphics.nn.layer.pointnet import ClassificationHead
from tensorflow_graphics.nn.layer.pointnet import PointNetConv2Layer
from tensorflow_graphics.nn.layer.pointnet import PointNetDenseLayer
from tensorflow_graphics.nn.layer.pointnet import PointNetVanillaClassifier
from tensorflow_graphics.nn.layer.pointnet import VanillaEncoder
from tensorflow_graphics.util import test_case
class RandomForwardExecutionTest(test_case.TestCase):
@parameterized.parameters(
((32, 2048, 1, 3), (32), (.5), True),
((32, 2048, 1, 3), (32), (.5), False),
((32, 2048, 1, 2), (16), (.99), True),
)
def test_conv2(self, input_shape, channels, momentum, training):
B, N, X, _ = input_shape
inputs = tf.random.uniform(input_shape)
layer = PointNetConv2Layer(channels, momentum)
outputs = layer(inputs, training=training)
assert outputs.shape == (B, N, X, channels)
@parameterized.parameters(
((32, 1024), (40), (.5), True),
((32, 2048), (20), (.5), False),
((32, 512), (10), (.99), True),
)
def test_dense(self, input_shape, channels, momentum, training):
B, _ = input_shape
inputs = tf.random.uniform(input_shape)
layer = PointNetDenseLayer(channels, momentum)
outputs = layer(inputs, training=training)
assert outputs.shape == (B, channels)
@parameterized.parameters(
((32, 2048, 3), (.9), True),
((32, 2048, 2), (.5), False),
((32, 2048, 3), (.99), True),
)
def test_vanilla_encoder(self, input_shape, momentum, training):
B = input_shape[0]
inputs = tf.random.uniform(input_shape)
encoder = VanillaEncoder(momentum)
outputs = encoder(inputs, training=training)
assert outputs.shape == (B, 1024)
@parameterized.parameters(
((16, 1024), (20), (.9), True),
((8, 2048), (40), (.5), False),
((32, 512), (10), (.99), True),
)
def test_classification_head(self, input_shape, num_classes, momentum,
training):
B = input_shape[0]
inputs = tf.random.uniform(input_shape)
head = ClassificationHead(num_classes, momentum)
outputs = head(inputs, training=training)
assert outputs.shape == (B, num_classes)
@parameterized.parameters(
((32, 1024, 3), 40, True),
((32, 1024, 2), 40, False),
((16, 2048, 3), 20, True),
((16, 2048, 2), 20, False),
)
def test_vanilla_classifier(self, input_shape, num_classes, training):
B = input_shape[0]
C = num_classes
inputs = tf.random.uniform(input_shape)
model = PointNetVanillaClassifier(num_classes, momentum=.5)
logits = model(inputs, training)
assert logits.shape == (B, C)
labels = tf.random.uniform((B,), minval=0, maxval=C, dtype=tf.int64)
PointNetVanillaClassifier.loss(labels, logits)
if __name__ == "__main__":
test_case.main()
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
fbcc8dc57d65876cb88bbb56654d802ff47535ab
|
ef74d9ad851021bcb0ed12880e14269b6ed7f617
|
/Sample/Koudai/Server/src/ZyGames.Tianjiexing.Server/PyScript/Action/action4408.py
|
c30f2afc099f329d04a422f45a419fb2d45cb810
|
[
"BSD-2-Clause-Views",
"MIT"
] |
permissive
|
sunyuping/Scut
|
b5e5798e9b519941f0ac3a08a3263dc0f45beb47
|
ec2ea35c0e4de1f2da49c50d14e119a4f17cd93a
|
refs/heads/master
| 2020-12-25T23:19:26.597830
| 2013-11-16T07:50:01
| 2013-11-16T07:50:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,953
|
py
|
import clr, sys
import random
import time
import datetime
clr.AddReference('ZyGames.Framework.Common');
clr.AddReference('ZyGames.Framework');
clr.AddReference('ZyGames.Framework.Game');
clr.AddReference('ZyGames.Tianjiexing.Model');
clr.AddReference('ZyGames.Tianjiexing.BLL');
clr.AddReference('ZyGames.Tianjiexing.Lang');
clr.AddReference('ZyGames.Tianjiexing.BLL.Combat');
from lang import Lang
from action import *
from System import *
from System.Collections.Generic import *
from ZyGames.Framework.Common.Log import *
from ZyGames.Tianjiexing.Model import *
from ZyGames.Tianjiexing.BLL import *
from ZyGames.Tianjiexing.BLL.Base import *
from ZyGames.Tianjiexing.Lang import *
from ZyGames.Framework.Game.Cache import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Framework.Common import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Tianjiexing.Model.Config import *
from ZyGames.Tianjiexing.BLL.Combat import *
from ZyGames.Tianjiexing.Model.Enum import *
# 4408_圣吉塔属性兑换接口
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self);
self.propertyType = 0;
self.starNum = 0;
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self);
def getUrlElement(httpGet, parent):
urlParam = UrlParam();
if httpGet.Contains("PropertyType")\
and httpGet.Contains("StarNum"):
urlParam.propertyType = httpGet.GetEnum[PropertyType]("PropertyType");
urlParam.starNum = httpGet.GetIntValue("StarNum");
else:
urlParam.Result = False;
return urlParam;
def takeAction(urlParam, parent):
actionResult = ActionResult();
userId = parent.Current.User.PersonalId;
contextUser = parent.Current.User;
def loadError():
parent.ErrorCode = Lang.getLang("ErrorCode");
parent.ErrorInfo = Lang.getLang("LoadError");
actionResult.Result = False;
return actionResult;
# 更新属性加成
percent = 100.0;
userSJTInfo = GameDataCacheSet[UserShengJiTa]().FindKey(userId);
# 判断星星数是否足够兑换
if userSJTInfo.LastScoreStar < urlParam.starNum:
return loadError();
if urlParam.propertyType == PropertyType.Life:
userSJTInfo.LifeNum = userSJTInfo.LifeNum + (urlParam.starNum / percent);
elif urlParam.propertyType == PropertyType.WuLi:
userSJTInfo.WuLiNum = userSJTInfo.WuLiNum + (urlParam.starNum / percent);
elif urlParam.propertyType == PropertyType.Mofa:
userSJTInfo.MofaNum = userSJTInfo.MofaNum + (urlParam.starNum / percent);
elif urlParam.propertyType == PropertyType.FunJi:
userSJTInfo.FunJiNum = userSJTInfo.FunJiNum + (urlParam.starNum / percent);
else:
return loadError();
# 更新星星数
userSJTInfo.LastScoreStar -= urlParam.starNum;
return actionResult;
def buildPacket(writer, urlParam, actionResult):
return True;
|
[
"wzf_88@qq.com"
] |
wzf_88@qq.com
|
81fb43984df719ac0a69586774519e26f244c066
|
acf7457d3a799cb9bff12686d2d616688bcd4b5b
|
/packages/python/plotly/plotly/validators/scattersmith/marker/_colorsrc.py
|
1d2e5dccad1b68311d62023efc16f4abcdc15182
|
[
"MIT"
] |
permissive
|
plotly/plotly.py
|
f4f61639f08160f16195efc95b5901dc5a937346
|
975a704074f01c078e0fdfa32bdf17130bf89e69
|
refs/heads/master
| 2023-09-06T06:15:08.340035
| 2023-08-24T12:28:14
| 2023-08-24T12:28:14
| 14,579,099
| 14,751
| 2,989
|
MIT
| 2023-09-08T19:55:32
| 2013-11-21T05:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 423
|
py
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="scattersmith.marker", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
[
"nicolas@plot.ly"
] |
nicolas@plot.ly
|
5157862865c047a3f71dfc3861aa683a3ee01433
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_340/ch18_2020_03_24_20_25_22_823016.py
|
2c9bee710b1b30509eed5988debf9eecc529908b
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
def verifica_idade(idade):
if idade<21 and idade>=18:
return "Liberado BRASIL"
if idade>=21:
return "Liberado EUA e BRASIL"
else:
return "Não está liberado"
|
[
"you@example.com"
] |
you@example.com
|
387c0b0af55a652c79e797fdd431a9df93bc9ad1
|
0bce7412d58675d6cc410fa7a81c294ede72154e
|
/Python3/0840. Magic Squares In Grid.py
|
f34d8b33065ffacf2d06e6948cd16d845b526b6e
|
[] |
no_license
|
yang4978/LeetCode
|
9ddf010b0f1dda32cddc7e94c3f987509dea3214
|
6387d05b619d403414bad273fc3a7a2c58668db7
|
refs/heads/master
| 2022-01-15T04:21:54.739812
| 2021-12-28T12:28:28
| 2021-12-28T12:28:28
| 182,653,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
class Solution:
def numMagicSquaresInside(self, grid: List[List[int]]) -> int:
m = len(grid)
n = len(grid[0])
res = 0
for i in range(1,m-1):
for j in range(1,n-1):
if grid[i][j] == 5:
s = set([grid[x][y] for x in range(i-1,i+2) for y in range(j-1,j+2)])
if len(s) == 9 and max(s) == 9 and min(s) == 1 and sum(grid[i-1][j-1:j+2]) == 15 and sum(grid[i+1][j-1:j+2]) == 15 and sum(grid[i][j-1:j+2]) == 15 and grid[i-1][j-1]+grid[i][j-1]+grid[i+1][j-1] == 15 and grid[i-1][j]+grid[i][j]+grid[i+1][j] == 15 and grid[i-1][j+1]+grid[i][j+1]+grid[i+1][j+1] == 15:
res += 1
return res
|
[
"noreply@github.com"
] |
yang4978.noreply@github.com
|
8ad5873a652011eb278d4990e7ab0deaaa16d6d5
|
fa0c53ac2a91409eaf0fc7c082a40caae3ffa0d8
|
/com/lc/python_1_100_Days_Demo/Day41-55/code/oa/hrs/migrations/0002_auto_20180523_0923.py
|
8b9b835a5b6e204d799a71659938b8b97db8597e
|
[] |
no_license
|
ahviplc/pythonLCDemo
|
aba6d8deb1e766841461bd772560d1d50450057b
|
22f149600dcfd4d769e9f74f1f12e3c3564e88c2
|
refs/heads/master
| 2023-07-24T01:41:59.791913
| 2023-07-07T02:32:45
| 2023-07-07T02:32:45
| 135,969,516
| 7
| 2
| null | 2023-02-02T03:24:14
| 2018-06-04T04:12:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
# Generated by Django 2.0.5 on 2018-05-23 01:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hrs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='dept',
name='excellent',
field=models.BooleanField(default=0, verbose_name='是否优秀'),
),
migrations.AlterField(
model_name='dept',
name='location',
field=models.CharField(max_length=10, verbose_name='部门所在地'),
),
migrations.AlterField(
model_name='dept',
name='name',
field=models.CharField(max_length=20, verbose_name='部门名称'),
),
migrations.AlterField(
model_name='dept',
name='no',
field=models.IntegerField(primary_key=True, serialize=False, verbose_name='部门编号'),
),
migrations.AlterField(
model_name='emp',
name='comm',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True),
),
migrations.AlterField(
model_name='emp',
name='mgr',
field=models.IntegerField(blank=True, null=True),
),
]
|
[
"ahlc@sina.cn"
] |
ahlc@sina.cn
|
8947aacd51a8e5f7d74271633434aedd17340366
|
85ef27cfe69bf8f4c65ecf4a382742984d91ae12
|
/Python/Programs/count_letters.py
|
2fe480f4ae66114923b5425db6a3b01a6754c3cf
|
[] |
no_license
|
SenthilKumar009/100DaysOfCode-DataScience
|
7fc0d404e486b2aa1da3a242def0a307fec50f47
|
dd2d44363960c9078b73956b6587961de0185f16
|
refs/heads/master
| 2023-07-17T04:38:21.404964
| 2021-08-27T12:56:55
| 2021-08-27T12:56:55
| 191,664,483
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
message = 'It was a bright cold day in April, and the clocks were striking thirteen.'
count = {}
for character in message:
count.setdefault(character, 0)
count[character] = count[character] + 1
print(count)
|
[
"senthilkumark.nitt@gmail.com"
] |
senthilkumark.nitt@gmail.com
|
f6b2bb05fba8fa4a5e097c7a3e7a8c1d6f174bb5
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/dockerized-gists/966bacf76c3a1f815c5fe55cee798dee/snippet.py
|
cb3ed3fe07ada127bc9b67fa2db937d98cc30bc7
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 378
|
py
|
class globalizer():
def __init__(self):
global a
a = self #global self formbidden bcs self is an ARGUMENT
cloud = globalizer()
if __name__ == '__main__':
cloud.nbr = 1
cloud.string = 'Hello World'
def randFunction():
for i in range(cloud.nbr):
print(cloud.string)
randFunction()
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
19c45d471ccab8ab75f519647a61c26064197ab5
|
496a63f41fa32e2bb3ecce0d35ff4374f1c02ad5
|
/src/scripting/assembly/ge.py
|
f933b8eb895afe1b368c3d8c5beefca223b82b18
|
[
"BSD-3-Clause"
] |
permissive
|
vincent-lg/avenew.one
|
bbfa8d44e68db943b8825e9d4a32a43e985778fe
|
fb7f98d331e47e2032ee1e51bf3e4b2592807fdf
|
refs/heads/main
| 2023-02-14T00:28:53.511552
| 2021-01-13T11:13:07
| 2021-01-13T11:13:07
| 330,207,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,368
|
py
|
# Copyright (c) 2020-20201, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""GE assembly expression, to compare two values equal or greater than."""
from scripting.assembly.abc import BaseExpression
class Ge(BaseExpression):
"""
GE assembly expression.
Args:
None.
This expression's only role is to compare >= two values from the
stack. It pops these two values, compare them equal or
greater than equal and push the result back onto the stack,
as a boolean.
"""
name = "GE"
@classmethod
async def process(cls, script, stack):
"""
Process this expression.
Args:
script (Script): the script object.
stack (LifoQueue): the current stack.
"""
value2 = stack.get(block=False)
value1 = stack.get(block=False)
stack.put(value1 >= value2, block=False)
|
[
"vincent.legoff.srs@gmail.com"
] |
vincent.legoff.srs@gmail.com
|
6a63bb8312a5cd8e61b833f01e14539cb8da6134
|
088e000eb5f16e6d0d56c19833b37de4e67d1097
|
/model-optimizer/extensions/ops/interpolate.py
|
36f96e35ce8a854d22ae2d4a4234e40536375d2b
|
[
"Apache-2.0"
] |
permissive
|
projectceladon/dldt
|
614ba719a428cbb46d64ab8d1e845ac25e85a53e
|
ba6e22b1b5ee4cbefcc30e8d9493cddb0bb3dfdf
|
refs/heads/2019
| 2022-11-24T10:22:34.693033
| 2019-08-09T16:02:42
| 2019-08-09T16:02:42
| 204,383,002
| 1
| 1
|
Apache-2.0
| 2022-11-22T04:06:09
| 2019-08-26T02:48:52
|
C++
|
UTF-8
|
Python
| false
| false
| 2,543
|
py
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.common.layout import get_batch_dim, get_features_dim, shape_for_layout
from mo.graph.graph import Node, Graph
from mo.ops.op import Op, PermuteAttrs
class Interpolate(Op):
op = 'Interpolate'
enabled = False
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': __class__.op,
'op': __class__.op,
'axes': None,
'mode': None,
'align_corners': 0,
'antialias': 0,
'pads_begin': 0,
'pads_end': 0,
'in_ports_count': 2,
'out_ports_count': 1,
'force_precision_in_ports': {1:'int64'},
'infer': __class__.infer,
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return [
('axes', lambda node: ','.join(map(str, node.axes))),
'mode', 'align_corners', 'antialias', 'pads_begin', 'pads_end',
]
@staticmethod
def infer(node: Node):
layout = node.graph.graph['layout']
assert len(layout) == 4
assert len([p for p in node.in_ports().values() if not p.disconnected()])
assert node.has_valid('mode')
assert node.has_valid('axes')
src_shape = node.in_port(0).data.get_shape()
assert src_shape is not None
dst_shape = node.in_port(1).data.get_value()
assert dst_shape is not None
out_height = dst_shape[0]
out_width = dst_shape[1]
node.out_node().shape = shape_for_layout(layout,
batch=src_shape[get_batch_dim(layout, 4)],
features=src_shape[get_features_dim(layout, 4)],
height=out_height,
width=out_width)
PermuteAttrs.create_permute_attrs(node, attrs=[('axes', 'input:0')])
|
[
"44090433+openvino-pushbot@users.noreply.github.com"
] |
44090433+openvino-pushbot@users.noreply.github.com
|
b0efa79ff67011699b40d07b9eb872a0596d4486
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/84/usersdata/188/56925/submittedfiles/lista1.py
|
3ae36f6760785e4cd08b91fcf70529b819f30e26
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
# -*- coding: utf-8 -*-
lista=[]
n=int(input('Digite n:'))
for i in range (1,n+1,1):
valor=float(input('Valor:'))
lista.append(valor)
print(lista)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
29aa09304badbc52a5f19a16218b19920f47ee59
|
10af7a7c08e2d8c630f115a7f1c89dc9a2c0c007
|
/0437_pathSum.py
|
ac2d79c9e3aa80043576026113c614918875db5d
|
[] |
no_license
|
mathvolcano/leetcode
|
4218846652a8d73192d74cbf83f5a92549236568
|
2682cc975ec299d9253aa191b5453669dd1ebd58
|
refs/heads/master
| 2022-11-27T22:47:49.700110
| 2022-11-08T18:11:27
| 2022-11-08T18:11:27
| 154,909,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
"""
437. Path Sum III
https://leetcode.com/problems/path-sum-iii/
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def pathSum(self, root: TreeNode, path_sum: int) -> int:
n_paths = 0
if not root: return n_paths
n_paths += self.n_continued_paths(root, path_sum)
# Start over from lower nodes
if root.left:
n_paths += self.pathSum(root.left, path_sum)
if root.right:
n_paths += self.pathSum(root.right, path_sum)
return n_paths
def n_continued_paths(self, root, target):
if (not root): return 0
n_paths = 1 if root.val == target else 0
remainder = target - root.val
if root.left:
n_paths += self.n_continued_paths(root.left, remainder)
if root.right:
n_paths += self.n_continued_paths(root.right, remainder)
return n_paths
|
[
"mathvolcano@users.noreply.github.com"
] |
mathvolcano@users.noreply.github.com
|
880ebf9230daad74995329f1a250ec1834803007
|
1588a1d601d29c18942d220657185d3bf7b17160
|
/시뮬레이션/BOJ1966.py
|
f8f06358c959b96afa8efb9434aea0aa231e353b
|
[] |
no_license
|
geonwoomun/AlgorithmStudy
|
1f8148e981beebd2e6f70e65193ce445fa59df96
|
d43b624aad80f10d687a8f4b37cc79d88fc772b3
|
refs/heads/master
| 2020-08-01T05:24:26.980370
| 2020-07-15T05:04:35
| 2020-07-15T05:04:35
| 210,878,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
# BOJ 1966번 프린터 큐
from sys import stdin
input = stdin.readline
T = int(input())
while T > 0 :
N, M = map(int, input().split()) # N 개수 M 위치
paper = list(map(int, input().split()))
check = [i for i in range(N)]
count = 0
while 1:
maxPaper = max(paper)
temp = paper.pop(0)
tempCheck = check.pop(0)
if(tempCheck == M and maxPaper == temp):
count +=1
break
elif maxPaper == temp:
count +=1
else:
paper.append(temp)
check.append(tempCheck)
print(count)
T -=1
|
[
"ansejrrhkd@naver.com"
] |
ansejrrhkd@naver.com
|
8930a0c2a06c0c47f689a2c208024783fa6de8f4
|
769f08657e880c4b1cc085dd77277c6eef8772e5
|
/pep412.py
|
9d69fb239b7d2c34d02bda047e6e781adb4dd5ec
|
[] |
no_license
|
udemy-course/python3-new-feature
|
106160389ebf70e13a5c2bce158fbc32d4979fc9
|
b6b482bc4acffb9bf168dc9f494f28179357934c
|
refs/heads/master
| 2020-03-23T03:26:29.954700
| 2018-08-23T13:37:40
| 2018-08-23T13:37:40
| 141,030,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
"""
pep412.py
Created by Peng Xiao on 2018-08-10. xiaoquwl@gmail.com
"""
import time
class Foo:
def __init__(self, a, b):
self.a = a
self.b = b
if __name__ == "__main__":
n = 1000000
i = 0
result = []
while i < n:
result.append(Foo(1,2))
i += 1
while True:
time.sleep(4)
|
[
"xiaoquwl@gmail.com"
] |
xiaoquwl@gmail.com
|
9fedf7c194fd4216199fea7f3daae1bf67c19c59
|
62c20237dbc12a49849bc6533e5e386bc6a26cf6
|
/app.py
|
4234d25f12c085e1b347a75fbf2b2881d1665eb2
|
[] |
no_license
|
yhx189/api-server
|
03ef3b70c5734442c90dc8b118920237616da072
|
d96cfc2d437ff2b540e355ae363003629d23a1f1
|
refs/heads/master
| 2021-01-10T01:15:23.539890
| 2016-01-21T00:53:18
| 2016-01-21T00:53:18
| 50,071,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,414
|
py
|
#!flask/bin/python
import socket
import subprocess
from flask import Flask, jsonify, abort, request, make_response, url_for
app = Flask(__name__, static_url_path = "")
#app = Flask(__name__)
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify( { 'error': 'Bad request' } ), 400)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify( { 'error': 'Not found' } ), 404)
tasks = []
with open('out.txt') as f:
lines = f.readlines()
for line in lines:
words = line.split(' ')
task = {'src': words[1],
'dst': words[4],
'rtt': words[7],
'bandwidth': words[11]}
tasks.append(task)
print tasks
#tasks = [
# {
# 'id': 1,
# 'dst': u'165.124.182.209',
# 'bandwidth': u'28.05',
# 'done': False
# },
# {
# 'id': 2,
# 'dst': u'216.58.216.78',
# 'bandwidth': u'200.5',
# 'done': False
# }
#]
def make_public_task(task):
new_task = {}
for field in task:
if field == 'id':
new_task['uri'] = url_for('get_task', task_id = task['dst'], _external = True)
else:
new_task[field] = task[field]
return new_task
@app.route('/todo/api/v1.0/tasks', methods = ['GET'])
def get_tasks():
return jsonify({'tasks': tasks})
@app.route('/todo/api/v1.0/hops/<task_id>', methods = ['GET'])
def get_hop(task_id):
dest_name="google.com"
dest_addr = socket.gethostbyname(dest_name)
port = 33434
max_hops = 30
icmp = socket.getprotobyname('icmp')
udp = socket.getprotobyname('udp')
ttl = 1
while True:
recv_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)
send_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, udp)
send_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
print "ttl is:%d" % ttl
recv_socket.bind(("", port))
send_socket.sendto("", (dest_name, port))
curr_addr = None
curr_name = None
try:
_, curr_addr = recv_socket.recvfrom(512)
curr_addr = curr_addr[0]
try:
curr_name = socket.gethostbyaddr(curr_addr)[0]
except socket.error:
curr_name = curr_addr
except socket.error:
pass
finally:
send_socket.close()
recv_socket.close()
if curr_addr is not None:
curr_host = "%s (%s)" % (curr_name, curr_addr)
else:
curr_host = "*"
print "%d\t%s" % (ttl, curr_host)
if ttl == int(task_id):
ret = {'ip': curr_host}
return jsonify( { 'task': ret } )
ttl += 1
if curr_addr == dest_addr or ttl > int(task_id): #max_hops:
break
@app.route('/todo/api/v1.0/tasks/<task_id>/<src_id>', methods = ['GET'])
def get_task(task_id, src_id):
print task_id
print src_id
task = filter(lambda t: t['dst'][:5] == task_id[:5], tasks)
new_task = filter(lambda t: t['src'][:5] == src_id[:5], task)
if len(new_task) == 0:
print "cannot find the ip " + task_id + " from the database"
print "calling king service from server"
print subprocess.call(["../king/bin/king", src_id, task_id], stdout=open('log.txt','a'))
re_tasks = []
with open('out.txt') as ff:
lines = ff.readlines()
for line in lines:
words = line.split(' ')
re_task = {'src': words[1],
'dst': words[4],
'rtt': words[7],
'bandwidth': words[11]}
re_tasks.append(re_task)
print re_tasks
_task = filter(lambda t: t['dst'][:5] == task_id[:5], re_tasks)
inject_task = filter(lambda t: t['src'][:5] == src_id[:5], _task)
print inject_task
if len(inject_task) == 0:
abort(404)
print inject_task
new_task = inject_task
print new_task
return jsonify( { 'task': make_public_task(new_task[0]) } )
@app.route('/todo/api/v1.0/tasks', methods = ['POST'])
def create_task():
if not request.json or not 'title' in request.json:
abort(400)
task = {
'id': tasks[-1]['id'] + 1,
'dst': request.json['dst'],
'bandwidth': request.json.get('bandwidth', ""),
'done': False
}
tasks.append(task)
return jsonify( { 'task': make_public_task(task) } ), 201
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods = ['PUT'])
def update_task(task_id):
task = filter(lambda t: t['id'] == task_id, tasks)
if len(task) == 0:
abort(404)
if not request.json:
abort(400)
if 'title' in request.json and type(request.json['title']) != unicode:
abort(400)
if 'description' in request.json and type(request.json['description']) is not unicode:
abort(400)
if 'done' in request.json and type(request.json['done']) is not bool:
abort(400)
task[0]['dst'] = request.json.get('dst', task[0]['dst'])
task[0]['bandwidth'] = request.json.get('bandwidth', task[0]['bandwidth'])
task[0]['done'] = request.json.get('done', task[0]['done'])
return jsonify( { 'task': make_public_task(task[0]) } )
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods = ['DELETE'])
def delete_task(task_id):
task = filter(lambda t: t['id'] == task_id, tasks)
if len(task) == 0:
abort(404)
tasks.remove(task[0])
return jsonify( { 'result': True } )
if __name__ == '__main__':
app.run(debug = True, host ='0.0.0.0')
|
[
"ubuntu@ubuntu.(none)"
] |
ubuntu@ubuntu.(none)
|
09c129b6cd1a837099b4b2c25784a3808389e94e
|
eafe15761be5f857d5b756eafc9446fc375aba66
|
/video/migrations/0002_auto_20200216_1749.py
|
2c6e5d422dc6f2c4f631513f1d9151a4b2a1e24c
|
[] |
no_license
|
biletboh/tempora
|
6ff10995e7eacae8b2ac95b1dfb785749949670b
|
cec40ad8719d03e5c1d83320c38f78d3e0338687
|
refs/heads/master
| 2023-05-25T21:23:09.466591
| 2022-02-09T10:10:58
| 2022-02-09T10:10:58
| 92,310,550
| 0
| 0
| null | 2023-05-22T20:57:13
| 2017-05-24T15:56:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 507
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2020-02-16 15:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('video', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='video',
name='slug',
field=models.SlugField(blank=True, null=True, unique=True, verbose_name='Посилання на сайті'),
),
]
|
[
"biletskyboh@gmail.com"
] |
biletskyboh@gmail.com
|
6bece771f9246166f9a70f04cf16afe1303fb78c
|
fc0cc698686f4b17aafe42360dac81357bcb183e
|
/docimage/preprocessing.py
|
a2d90ef4b101fa2335edaf0ef786be4c5104e6e4
|
[
"MIT"
] |
permissive
|
Abhishek-Prusty/docimage
|
7b7bef669b3a5e03b170fed4c9f340af9e103fe7
|
c29f9a2634e6f807b108cd4237783be22baea307
|
refs/heads/master
| 2020-03-22T13:52:45.381192
| 2018-08-16T17:16:57
| 2018-08-16T17:16:57
| 140,138,183
| 2
| 0
|
MIT
| 2018-08-11T09:59:15
| 2018-07-08T04:48:38
|
Python
|
UTF-8
|
Python
| false
| false
| 6,980
|
py
|
# -*- coding: utf-8 -*-
"""
pre-processing and pattern matching.
This python module can perform the following functions:
1. Binarization - method binary_img(img) performs this function
2. Skew correction - method skew_correction(img) performs this function
Need to introduce machine learning of some sort to make the skew correction
method run faster :(
Or... A simple fix would be to resize the image first, and then apply the skew
correction method! That'll probably take lesser time...
Resizing is yielding better results.
"""
import logging
import cv2
import numpy as np
from scipy.stats import mode
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
kernel = np.ones((5, 5), np.uint8)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
"""
Method to binarize an image
Input: Grayscale image
Output: Binary image
The nature of the output is such that the text(foreground) has a colour
value of (255,255,255), and the background has a value of (0,0,0).
"""
def binary_img(img):
# img_erode = cv2.dilate(img,kernel,iterations = 2)
blur = cv2.medianBlur(img, 5)
# mask1 = np.ones(img.shape[:2],np.uint8)
"""Applying histogram equalization"""
cl1 = clahe.apply(blur)
circles_mask = cv2.dilate(cl1, kernel, iterations=1)
circles_mask = (255 - circles_mask)
thresh = 1
circles_mask = cv2.threshold(circles_mask, thresh, 255, cv2.THRESH_BINARY)[1]
edges = cv2.Canny(cl1, 100, 200)
edges = cv2.bitwise_and(edges, edges, mask=circles_mask)
dilation = cv2.dilate(edges, kernel, iterations=1)
display = cv2.bitwise_and(img, img, mask=dilation)
cl2 = clahe.apply(display)
cl2 = clahe.apply(cl2)
ret, th = cv2.threshold(cl2, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
th = 255 - th
thg = cv2.adaptiveThreshold(display, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, 11, 2)
# final = cv2.bitwise_and(dilation,dilation,mask=th)
finalg = cv2.bitwise_and(dilation, dilation, mask=thg)
finalg = 255 - finalg
abso = cv2.bitwise_and(dilation, dilation, mask=finalg)
return abso
"""
Method to resize the image. This is going to help in reducing the number
of computations, as the size of data will reduce.
"""
def resize(img):
r = 1000.0 / img.shape[1]
dim = (1000, int(img.shape[0] * r))
resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
# cv2.imshow('resized', resized)
return resized
"""
Method to correct the skew of an image
Input: Binary image
Output: Skew corrected binary image
The nature of the output is such that the binary image is rotated appropriately
to remove any angular skew.
Find out the right place to insert the resizing method call.
Try to find one bounding rectangle around all the contours
"""
def skew_correction(img):
areas = [] # stores all the areas of corresponding contours
dev_areas = [] # stores all the areas of the contours within 1st std deviation in terms of area#stores all the white pixels of the largest contour within 1st std deviation
all_angles = []
k = 0
binary = binary_img(img)
# binary = resize(binary)
im2, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# cnt = contours[0]
# upper_bound=len(contours)
height_orig, width_orig = img.shape[:2]
words = np.zeros(img.shape[:2], np.uint8)
for c in contours:
areas.append(cv2.contourArea(c))
std_dev = np.std(areas)
for i in areas:
dev_areas.append(i - std_dev)
dev_contours = np.zeros(img.shape[:2], np.uint8)
for i in dev_areas:
if ((i > (-std_dev)) and (i <= (std_dev))):
cv2.drawContours(dev_contours, contours, k, (255, 255, 255), -1)
k += 1
sobely = cv2.Sobel(dev_contours, cv2.CV_64F, 0, 1, ksize=5)
abs_sobel64f = np.absolute(sobely)
sobel_8u = np.uint8(abs_sobel64f)
# cv2.imshow('Output2',sobel_8u)
minLineLength = 100
maxLineGap = 10
lines = cv2.HoughLinesP(sobel_8u, 1, np.pi / 180, 100, minLineLength, maxLineGap)
for x1, y1, x2, y2 in lines[0]:
cv2.line(words, (x1, y1), (x2, y2), (255, 255, 255), 2)
# cv2.imshow('hough',words)
height_orig, width_orig = img.shape[:2]
all_angles = []
im2, contours, hierarchy = cv2.findContours(words, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
logging.debug(len(contours))
contour_count = 0
for c in contours:
# max_index = np.argmax(areas)
# current_contour = np.zeros(img.shape[:2],np.uint8)
current_contour = np.zeros(img.shape[:2], np.uint8)
cv2.drawContours(current_contour, contours, contour_count, (255, 255, 255), -1)
height, width = current_contour.shape[:2]
# all_white_pixels = []
current_white_pixels = []
for i in range(0, height):
for j in range(0, width):
if (current_contour.item(i, j) == 255):
current_white_pixels.append([i, j])
matrix = np.array(current_white_pixels)
"""Finding covariance matrix"""
C = np.cov(matrix.T)
eigenvalues, eigenvectors = np.linalg.eig(C)
"""Finding max eigenvalue"""
# max_ev = max(eigenvalues)
"""Finding index of max eigenvalue"""
max_index = eigenvalues.argmax(axis=0)
"""The largest eigen value gives the approximate length of the bounding
ellipse around the largest word. If we follow the index of the largest
eigen value and find the eigen vectors in the column of that index,
we'll get the x and y coordinates of it's centre."""
y = eigenvectors[1, max_index]
x = eigenvectors[0, max_index]
angle = (np.arctan2(y, x)) * (180 / np.pi)
all_angles.append(angle)
contour_count += 1
logging.debug(contour_count)
logging.debug(all_angles)
angle = np.mean(all_angles)
logging.debug(angle)
k = 0
non_zero_angles = []
for i in all_angles:
if ((i != 0) and (i != 90.0)):
non_zero_angles.append(i)
logging.debug(non_zero_angles)
rounded_angles = []
for i in non_zero_angles:
rounded_angles.append(np.round(i, 0))
logging.debug(rounded_angles)
logging.debug("mode is")
# logging.debug(np.mode(rounded_angles))
# angle = np.mean(non_zero_angles)
# angle = np.mode(rounded_angles)
mode_angle = mode(rounded_angles)[0][0]
logging.debug(mode_angle)
precision_angles = []
for i in non_zero_angles:
if (np.round(i, 0) == mode_angle):
precision_angles.append(i)
logging.debug('precision angles:')
logging.debug(precision_angles)
angle = np.mean(precision_angles)
logging.debug('Finally, the required angle is:')
logging.debug(angle)
# M = cv2.getRotationMatrix2D((width/2,height/2),-(90+angle),1)
M = cv2.getRotationMatrix2D((width / 2, height / 2), -(90 + angle), 1)
dst = cv2.warpAffine(img, M, (width_orig, height_orig))
# cv2.imshow('final',dst)
cv2.imwrite('skewcorrected2.jpg', dst)
return dst
def preprocess(img):
return skew_correction(img)
# Does not work with linux:
# cv2.destroyAllWindows()
|
[
"vishvas.vasuki@gmail.com"
] |
vishvas.vasuki@gmail.com
|
4aa5f2afb04c078af8ed9f1cb0036c1589a50253
|
4012f290d83ae7f4c09d7440f26d2acd7e63efbe
|
/1173.py
|
84b870ec4827293d81d8ffc5b50fe0a1e8918e42
|
[] |
no_license
|
jinaur/codeup
|
ffc2d0fdf73892c1f46d80021ad8f4c1293c9e2e
|
5f75ace909e2b3151171932cc3ee9f3c49dd46d9
|
refs/heads/master
| 2023-04-15T07:42:06.244806
| 2021-04-25T13:59:42
| 2021-04-25T13:59:42
| 277,760,813
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
a, b = map(int, input().split())
def my_func(a, b) :
if b < 30 and a == 0 :
a = 23
b += 30
print(a, b)
return
elif b < 30 :
a -= 1
b += 30
print(a, b)
return
b -= 30
print(a, b)
return
r = my_func(a, b)
|
[
"50763720+jinaur@users.noreply.github.com"
] |
50763720+jinaur@users.noreply.github.com
|
4f69ae38888dca5bfa8b94ef6888374f34854149
|
cb3634622480f918540ff3ff38c96990a1926fda
|
/PyProject/sparkproject/rdd/pairRdd/transformations/reduceByKey.py
|
513009ff5dc829627635a3d07fae951f883b828a
|
[] |
no_license
|
jacksonyoudi/AlgorithmCode
|
cab2e13cd148354dd50a0487667d38c25bb1fd9b
|
216299d43ee3d179c11d8ca0783ae16e2f6d7c88
|
refs/heads/master
| 2023-04-28T07:38:07.423138
| 2022-10-23T12:45:01
| 2022-10-23T12:45:01
| 248,993,623
| 3
| 0
| null | 2023-04-21T20:44:40
| 2020-03-21T14:32:15
|
Go
|
UTF-8
|
Python
| false
| false
| 393
|
py
|
# coding: Utf-8
from pyspark import SparkConf, SparkContext
if __name__ == '__main__':
conf = SparkConf().setAppName("rdd").setMaster("local[*]")
sc = SparkContext(conf=conf)
a = ["a", "a", "c", "d", "d", "c", "e"]
b = [1, 2, 3, 4, 1, 3, 7]
data = list(zip(a, b))
disData = sc.parallelize(data)
d = disData.reduceByKey(lambda x, y: x + y)
print(d.collect())
|
[
"liangchangyoujackson@gmail.com"
] |
liangchangyoujackson@gmail.com
|
e402fa1c2ce544c37f3d20d570e1726d78557508
|
d4412fbe37540e2c4cbe59ed6503d3661ccb7d9c
|
/tests/test_analyzer/test_subclasses/test_flop_tensor.py
|
4e9c9852649ba593d37e20f9a9a414fd2a2a04f4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
hpcaitech/ColossalAI
|
a082ed08a3807b53c49d1f86835b9808590d9042
|
c7b60f75470f067d1342705708810a660eabd684
|
refs/heads/main
| 2023-09-01T04:13:13.834565
| 2023-08-30T15:07:21
| 2023-08-30T15:07:21
| 422,274,596
| 32,044
| 4,084
|
Apache-2.0
| 2023-09-14T15:19:54
| 2021-10-28T16:19:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,925
|
py
|
import pytest
import torch
import torch.nn.functional as F
import torchvision.models as tm
from packaging import version
from colossalai.testing import clear_cache_before_run, parameterize
from tests.test_analyzer.test_fx.zoo import tm_models, tmm_models
try:
from colossalai._analyzer._subclasses import MetaTensorMode, flop_count
except:
pass
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.12.0'), reason='torch version < 12')
@pytest.mark.parametrize('m', tm_models + tmm_models)
def test_flop_count_module(m):
x = torch.rand(2, 3, 224, 224)
with MetaTensorMode(): # save time for testing
module = m()
rs_fwd, rs_bwd = flop_count(module, x, verbose=True)
assert rs_fwd > 0, f'fwd flop count of {m.__name__} is {rs_fwd}'
assert rs_bwd > 0, f'bwd flop count of {m.__name__} is {rs_bwd}'
odd_cases = [
(F.relu, (torch.rand(2, 3, 224, 224, requires_grad=True),), {
'inplace': True
}),
(F.max_pool2d, (torch.rand(2, 3, 224, 224, requires_grad=True),), {
'kernel_size': 3,
'stride': 2,
'padding': 1,
'dilation': 2
}),
(torch.where, (torch.rand(2, 3, 224, 224) > 0.5, torch.rand(2, 3, 224, 224, requires_grad=True),
torch.rand(2, 3, 224, 224, requires_grad=True)), {}),
]
@pytest.mark.skipif(version.parse(torch.__version__) < version.parse('1.12.0'), reason='torch version < 12')
@pytest.mark.parametrize('func, args, kwargs', odd_cases)
def test_flop_count_function(func, args, kwargs):
rs_fwd, rs_bwd = flop_count(func, *args, **kwargs, verbose=True)
assert rs_fwd > 0, f'fwd flop count of {func.__name__} is {rs_fwd}'
assert rs_bwd > 0, f'bwd flop count of {func.__name__} is {rs_bwd}'
if __name__ == '__main__':
test_flop_count_module(tm.resnet18)
test_flop_count_function(F.relu, (torch.rand(2, 3, 224, 224, requires_grad=True),), {'inplace': True})
|
[
"noreply@github.com"
] |
hpcaitech.noreply@github.com
|
310a3e2b195bd96ba7d266f470ce7bf23891162a
|
5210993914691c70076be979aa5c57c33d5d3bc4
|
/Programming101-3/Week_1/The_Final_Round/unique_words_count.py
|
daf32bc0de62796c448ac4f7695c67a3dea43851
|
[] |
no_license
|
presian/HackBulgaria
|
d29f84ab7edc85a4d8dfbf055def7d0be783539e
|
8bc95bb31daeb1f5a313d25b928f505013f5f0b0
|
refs/heads/master
| 2021-01-10T04:38:05.759005
| 2015-10-15T07:05:21
| 2015-10-15T07:05:21
| 36,889,139
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
def unique_words_count(arr):
return len({x for x in arr})
def main():
print(unique_words_count(["apple", "banana", "apple", "pie"]))
print(unique_words_count(["python", "python", "python", "ruby"]))
print(unique_words_count(["HELLO!"] * 10))
if __name__ == '__main__':
main()
|
[
"presiandanailov@gmail.com"
] |
presiandanailov@gmail.com
|
3332366bee34f0fc8461908bc0793a908fe55b86
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/purview/azext_purview/manual/_help.py
|
0508662c698375e3bf665656ff64d7a2955da878
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 5,143
|
py
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
from knack.help_files import helps
helps['purview'] = '''
type: group
short-summary: Manage Purview
'''
helps['purview account'] = """
type: group
short-summary: Manage account with purview
"""
helps['purview account list'] = """
type: command
short-summary: "List accounts in ResourceGroup And List accounts in Subscription."
examples:
- name: Accounts_ListByResourceGroup
text: |-
az purview account list --resource-group "SampleResourceGroup"
- name: Accounts_ListBySubscription
text: |-
az purview account list
"""
helps['purview account show'] = """
type: command
short-summary: "Get an account."
examples:
- name: Accounts_Get
text: |-
az purview account show --name "account1" --resource-group "SampleResourceGroup"
"""
helps['purview account create'] = """
type: command
short-summary: "Create an account."
examples:
- name: Accounts_CreateOrUpdate
text: |-
az purview account create --location "WestUS2" --managed-group-name "custom-rgname" \
--name "account1" --resource-group "SampleResourceGroup"
"""
helps['purview account update'] = """
type: command
short-summary: "Updates an account."
examples:
- name: Accounts_Update
text: |-
az purview account update --name "account1" --tags newTag="New tag value." --resource-group \
"SampleResourceGroup"
"""
helps['purview account delete'] = """
type: command
short-summary: "Deletes an account resource."
examples:
- name: Accounts_Delete
text: |-
az purview account delete --name "account1" --resource-group "SampleResourceGroup"
"""
helps['purview account add-root-collection-admin'] = """
type: command
short-summary: "Add the administrator for root collection associated with this account."
examples:
- name: Accounts_AddRootCollectionAdmin
text: |-
az purview account add-root-collection-admin --name "account1" --object-id \
"7e8de0e7-2bfc-4e1f-9659-2a5785e4356f" --resource-group "SampleResourceGroup"
"""
helps['purview account list-key'] = """
type: command
short-summary: "List the authorization keys associated with this account."
examples:
- name: Accounts_ListKeys
text: |-
az purview account list-key --name "account1" --resource-group "SampleResourceGroup"
"""
helps['purview account wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the purview account is met.
examples:
- name: Pause executing next line of CLI script until the purview account is successfully created.
text: |-
az purview account wait --name "account1" --resource-group "SampleResourceGroup" --created
- name: Pause executing next line of CLI script until the purview account is successfully updated.
text: |-
az purview account wait --name "account1" --resource-group "SampleResourceGroup" --updated
- name: Pause executing next line of CLI script until the purview account is successfully deleted.
text: |-
az purview account wait --name "account1" --resource-group "SampleResourceGroup" --deleted
"""
helps['purview default-account'] = """
type: group
short-summary: Manage default account with purview
"""
helps['purview default-account show'] = """
type: command
short-summary: "Get the default account for the scope."
examples:
- name: DefaultAccounts_Get
text: |-
az purview default-account show --scope "12345678-1234-1234-12345678abc" --scope-tenant-id \
"12345678-1234-1234-12345678abc" --scope-type "Tenant"
"""
helps['purview default-account remove'] = """
type: command
short-summary: "Removes the default account from the scope."
examples:
- name: DefaultAccounts_Remove
text: |-
az purview default-account remove --scope "12345678-1234-1234-12345678abc" --scope-tenant-id \
"12345678-1234-1234-12345678abc" --scope-type "Tenant"
"""
helps['purview default-account set'] = """
type: command
short-summary: "Sets the default account for the scope."
examples:
- name: DefaultAccounts_Set
text: |-
az purview default-account set --account-name "myDefaultAccount" --resource-group "rg-1" --scope \
"12345678-1234-1234-12345678abc" --scope-tenant-id "12345678-1234-1234-12345678abc" --scope-type "Tenant" \
--subscription-id "12345678-1234-1234-12345678aaa"
"""
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
4553cecf9a964d9436652c40173f996ef96764d9
|
3b981dfc835d36eb9bb86e4dbb0b1e332285d5cf
|
/nkcomments/tests.py
|
a18b4126c658b2f908cb0544717e797b4646133b
|
[] |
no_license
|
richraines/nuortenideat
|
d9ad5ff33e4231c7f9960b9e1a54be16395173a2
|
033f63575c52ce118f0deba1168afca743de6c26
|
refs/heads/master
| 2020-09-01T01:39:39.137935
| 2016-10-31T14:24:59
| 2016-10-31T14:24:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,945
|
py
|
# coding=utf-8
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from account.factories import UserFactory, DEFAULT_PASSWORD
from content.factories import IdeaFactory
from content.models import Idea
from nuka.test.testcases import TestCase
from .factories import CustomCommentFactory
from .models import CustomComment
"""
# TODO: Login works, FIXME.
class DeleteTest(TestCase):
def setUp(self):
self.idea = IdeaFactory()
self.idea_content_type = ContentType.objects.get_for_model(Idea)
self.group_admin = Group.objects.get(name=GROUP_NAME_ADMINS)
self.group_moderator = Group.objects.get(name=GROUP_NAME_MODERATORS)
self.user = UserFactory(settings__first_name="Matti",
settings__last_name="Meikäläinen")
def test_own(self):
self.user.groups.clear()
self.user.groups.add(self.group_moderator)
login = self.client.login(username=self.user.username, password=DEFAULT_PASSWORD)
self.assertNotEqual(login, False)
comment = CustomComment.objects.create(
content_type=self.idea_content_type,
object_pk=self.idea.pk,
user=self.user,
comment="Some comment text",
site_id=1
)
resp = self.client.post("/fi/ideat/poista_kommentti/{0}/".format(comment.pk),
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "nkcomments/comment_list_item_deleted.html")
self.assertContains(resp, "Kommentti poistettu.")
self.assertNotContains(resp, comment.comment)
with self.assertRaises(ObjectDoesNotExist):
CustomComment.objects.get(pk=comment.pk)
def test_as_moderator(self):
pass
def test_unauthorized(self):
pass
"""
class WriteCommentTest(TestCase):
def setUp(self):
pass
def manual_set_up(self, public=True, login=False):
if public:
status = Idea.STATUS_PUBLISHED
visibility = Idea.VISIBILITY_PUBLIC
else:
status = Idea.STATUS_DRAFT
visibility = Idea.VISIBILITY_DRAFT
self.user = UserFactory()
self.idea = IdeaFactory(
creator=self.user,
status=status,
visibility=visibility,
)
if login:
self.user = self.idea.creator
self.client.login(username=self.user.username,
password=DEFAULT_PASSWORD)
def test_comment_block_visibility_public_idea(self):
self.manual_set_up()
resp = self.client.get('/fi/ideat/{}'.format(self.idea.pk), follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'content/idea_detail.html')
self.assertContains(resp, '<article id="comments"')
self.assertContains(resp, '<h4>Kommentit (0)</h4>')
def test_comment_block_pvisibility_not_public_idea(self):
self.manual_set_up(public=False, login=True)
resp = self.client.get('/fi/ideat/{}'.format(self.idea.pk), follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'content/idea_detail.html')
self.assertNotContains(resp, '<div class="well" id="comments">')
def test_write_comment(self):
self.manual_set_up(login=True)
comment = CustomCommentFactory(
content_type=ContentType.objects.get_for_model(self.idea),
object_pk=self.idea.pk
)
comment_cmp = CustomComment.objects.first()
self.assertIsNotNone(comment_cmp)
self.assertEqual(comment_cmp, comment)
resp = self.client.get('/fi/ideat/{}'.format(self.idea.pk), follow=True)
self.assertContains(resp, comment.comment)
def test_comment_block_necessary_elements(self):
self.manual_set_up(login=True)
CustomCommentFactory(
content_type=ContentType.objects.get_for_model(self.idea),
object_pk=self.idea.pk,
user_id=self.user.pk
)
resp = self.client.get('/fi/ideat/{}'.format(self.idea.pk), follow=True)
self.assertNotContains(resp, '<div id="id_name_wrap"')
self.assertContains(resp, 'title="Poista kommentti"')
def test_comment_block_necessary_elements_anonymous(self):
self.manual_set_up()
CustomCommentFactory(
content_type=ContentType.objects.get_for_model(self.idea),
object_pk=self.idea.pk,
)
resp = self.client.get('/fi/ideat/{}'.format(self.idea.pk), follow=True)
self.assertNotContains(resp, '<input id="id_name" name="name" type="hidden">')
self.assertContains(resp, '<div id="id_name_wrap"')
self.assertContains(resp, '<div id="id_comment_wrap"')
self.assertNotContains(resp, 'title="Poista kommentti"')
|
[
"erno@fns.fi"
] |
erno@fns.fi
|
b5502ff8609f59db993800579ab69261e4b48e43
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02887/s396715066.py
|
5b052e1ae14ff4d5ca6061b10c85c914d5c652a3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
n = int(input())
s = input()
ans = 0
i = 0
while i < n:
ans += 1
while i + 1 < n and s[i] == s[i + 1]:
i += 1
i += 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
7cacc1bab1526fbce224cf19ba5f0e8b7d08435c
|
8b0cf90774310723282c525a491d0df5ccdbaddc
|
/blog3/apps/articles/urls.py
|
dcfde7c9c4fe4f3e8d0bd47fc031969a95517c81
|
[] |
no_license
|
LIMr1209/django-practive
|
13d0b1a02761010643405b8233bc2b1c1ebf8622
|
deb01dac652cda7ef7117d3ecef3546092cef97c
|
refs/heads/master
| 2020-03-24T06:16:58.839185
| 2018-08-19T06:25:45
| 2018-08-19T06:25:45
| 142,522,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
"""blog3 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from .views import article_detail, comment_add, comment_delete, article_add, love_add
urlpatterns = [
url(r'^article_detail/(\d+)$', article_detail, name='article_detail'),
url(r'^comment_add/(\d+)$', comment_add, name='comment_add'),
url(r'^comment_delete/(\d+)$', comment_delete, name='comment_delete'),
url(r'^article_add/$', article_add, name='article_add'),
url(r'^love_add/$', love_add, name='love_add'),
]
|
[
"aaa1058169464@126.com"
] |
aaa1058169464@126.com
|
0b2cf2939ef3f48228585318267019e47b06095b
|
6527b66fd08d9e7f833973adf421faccd8b765f5
|
/yuancloud/addons/hr_holidays/hr_department.py
|
7d85f388aed16347ff5c1459fc3ae78817e442c2
|
[] |
no_license
|
cash2one/yuancloud
|
9a41933514e57167afb70cb5daba7f352673fb4d
|
5a4fd72991c846d5cb7c5082f6bdfef5b2bca572
|
refs/heads/master
| 2021-06-19T22:11:08.260079
| 2017-06-29T06:26:15
| 2017-06-29T06:26:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,789
|
py
|
# -*- coding: utf-8 -*-
import datetime
from dateutil.relativedelta import relativedelta
from yuancloud import api, fields, models
from yuancloud.tools import DEFAULT_SERVER_DATETIME_FORMAT
class hr_department(models.Model):
_inherit = 'hr.department'
@api.multi
def _compute_leave_count(self):
Holiday = self.env['hr.holidays']
today_date = datetime.datetime.utcnow().date()
today_start = today_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT) # get the midnight of the current utc day
today_end = (today_date + relativedelta(hours=23, minutes=59, seconds=59)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
leave_data = Holiday.read_group(
[('department_id', 'in', self.ids),
('state', '=', 'confirm'), ('type', '=', 'remove')],
['department_id'], ['department_id'])
allocation_data = Holiday.read_group(
[('department_id', 'in', self.ids),
('state', '=', 'confirm'), ('type', '=', 'add')],
['department_id'], ['department_id'])
absence_data = Holiday.read_group(
[('department_id', 'in', self.ids), ('state', 'not in', ['cancel', 'refuse']),
('date_from', '<=', today_end), ('date_to', '>=', today_start), ('type', '=', 'remove')],
['department_id'], ['department_id'])
res_leave = dict((data['department_id'][0], data['department_id_count']) for data in leave_data)
res_allocation = dict((data['department_id'][0], data['department_id_count']) for data in allocation_data)
res_absence = dict((data['department_id'][0], data['department_id_count']) for data in absence_data)
for department in self:
department.leave_to_approve_count = res_leave.get(department.id, 0)
department.allocation_to_approve_count = res_allocation.get(department.id, 0)
department.absence_of_today = res_absence.get(department.id, 0)
@api.multi
def _compute_total_employee(self):
emp_data = self.env['hr.employee'].read_group([('department_id', 'in', self.ids)], ['department_id'], ['department_id'])
result = dict((data['department_id'][0], data['department_id_count']) for data in emp_data)
for department in self:
department.total_employee = result.get(department.id, 0)
absence_of_today = fields.Integer(
compute='_compute_leave_count', string='Absence by Today')
leave_to_approve_count = fields.Integer(
compute='_compute_leave_count', string='Leave to Approve')
allocation_to_approve_count = fields.Integer(
compute='_compute_leave_count', string='Allocation to Approve')
total_employee = fields.Integer(
compute='_compute_total_employee', string='Total Employee')
|
[
"liuganghao@lztogether.com"
] |
liuganghao@lztogether.com
|
7c3c0c5a5449b05da0d3b2c0e20af4bdc954d868
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/13506401.py
|
2ee175731dd094ef713916c24d807afb652a3e37
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,858
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/13506401.py generated: Wed, 25 Jan 2017 15:25:23
#
# Event Type: 13506401
#
# ASCII decay Descriptor: [B_s0 -> (tau+ -> pi+ pi- pi+ (pi0 -> gamma gamma) anti-nu_tau) (tau- -> pi+ pi- pi- (pi0 -> gamma gamma) nu_tau)]cc
#
from Configurables import Generation
Generation().EventType = 13506401
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bs_tautau,pipipipi0nu=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 531,-531 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 531
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 531,-531 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_531.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 13506401
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
47f25718bc8fa8fde5832344631f295bcac6eb3e
|
230b7714d61bbbc9a75dd9adc487706dffbf301e
|
/tools/binary_size/libsupersize/diff_test.py
|
93ded263b681ff5749aee9d022c882345bbc8fc3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later",
"MIT",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.0-only",
"APSL-2.0",
"LicenseRef-scancode-unknown",
"Zlib"
] |
permissive
|
byte4byte/cloudretro
|
efe4f8275f267e553ba82068c91ed801d02637a7
|
4d6e047d4726c1d3d1d119dfb55c8b0f29f6b39a
|
refs/heads/master
| 2023-02-22T02:59:29.357795
| 2021-01-25T02:32:24
| 2021-01-25T02:32:24
| 197,294,750
| 1
| 2
|
BSD-3-Clause
| 2019-09-11T19:35:45
| 2019-07-17T01:48:48
| null |
UTF-8
|
Python
| false
| false
| 7,313
|
py
|
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import diff
import models
def _MakeSym(section, size, path, name=None):
if name is None:
# Trailing letter is important since diffing trims numbers.
name = '{}_{}A'.format(section[1:], size)
return models.Symbol(
section,
size,
full_name=name,
template_name=name,
name=name,
object_path=path)
def _SetName(symbol, full_name, name=None):
if name is None:
name = full_name
symbol.full_name = full_name
symbol.template_name = full_name
symbol.name = name
def _CreateSizeInfo(aliases=None):
section_sizes = {'.text': 100, '.bss': 40}
TEXT = models.SECTION_TEXT
symbols = [
_MakeSym(TEXT, 10, 'a'),
_MakeSym(TEXT, 20, 'a'),
_MakeSym(TEXT, 30, 'b'),
_MakeSym(TEXT, 40, 'b'),
_MakeSym(TEXT, 50, 'b'),
_MakeSym(TEXT, 60, ''),
]
if aliases:
for tup in aliases:
syms = symbols[tup[0]:tup[1]]
for sym in syms:
sym.aliases = syms
return models.SizeInfo(section_sizes, symbols)
class DiffTest(unittest.TestCase):
def testIdentity(self):
size_info1 = _CreateSizeInfo()
size_info2 = _CreateSizeInfo()
d = diff.Diff(size_info1, size_info2)
self.assertEquals((0, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(0, d.raw_symbols.size)
self.assertEquals(0, d.raw_symbols.padding)
def testSimple_Add(self):
size_info1 = _CreateSizeInfo()
size_info2 = _CreateSizeInfo()
size_info1.raw_symbols -= [size_info1.raw_symbols[0]]
d = diff.Diff(size_info1, size_info2)
self.assertEquals((0, 1, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(10, d.raw_symbols.size)
self.assertEquals(0, d.raw_symbols.padding)
def testSimple_Delete(self):
size_info1 = _CreateSizeInfo()
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols -= [size_info2.raw_symbols[0]]
d = diff.Diff(size_info1, size_info2)
self.assertEquals((0, 0, 1), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(-10, d.raw_symbols.size)
self.assertEquals(0, d.raw_symbols.padding)
def testSimple_Change(self):
size_info1 = _CreateSizeInfo()
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols[0].size += 11
size_info2.raw_symbols[0].padding += 20
size_info2.raw_symbols[-1].size += 11
d = diff.Diff(size_info1, size_info2)
self.assertEquals((2, 1, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(22, d.raw_symbols.size)
self.assertEquals(20, d.raw_symbols.padding)
def testDontMatchAcrossSections(self):
size_info1 = _CreateSizeInfo()
size_info1.raw_symbols += [
_MakeSym(models.SECTION_TEXT, 11, 'asdf', name='Hello'),
]
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols += [
_MakeSym(models.SECTION_RODATA, 11, 'asdf', name='Hello'),
]
d = diff.Diff(size_info1, size_info2)
self.assertEquals((0, 1, 1), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(0, d.raw_symbols.size)
def testAliases_Remove(self):
size_info1 = _CreateSizeInfo(aliases=[(0, 3)])
size_info2 = _CreateSizeInfo(aliases=[(0, 2)])
d = diff.Diff(size_info1, size_info2)
# Aliases cause all sizes to change.
self.assertEquals((3, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(0, d.raw_symbols.size)
def testAliases_Add(self):
size_info1 = _CreateSizeInfo(aliases=[(0, 2)])
size_info2 = _CreateSizeInfo(aliases=[(0, 3)])
d = diff.Diff(size_info1, size_info2)
# Aliases cause all sizes to change.
self.assertEquals((3, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(0, d.raw_symbols.size)
def testAliases_ChangeGroup(self):
size_info1 = _CreateSizeInfo(aliases=[(0, 2), (2, 5)])
size_info2 = _CreateSizeInfo(aliases=[(0, 3), (3, 5)])
d = diff.Diff(size_info1, size_info2)
# Aliases cause all sizes to change.
self.assertEquals((4, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(0, d.raw_symbols.size)
def testStarSymbolNormalization(self):
size_info1 = _CreateSizeInfo()
_SetName(size_info1.raw_symbols[0], '* symbol gap 1 (end of section)')
size_info2 = _CreateSizeInfo()
_SetName(size_info2.raw_symbols[0], '* symbol gap 2 (end of section)')
d = diff.Diff(size_info1, size_info2)
self.assertEquals((0, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(0, d.raw_symbols.size)
def testNumberNormalization(self):
TEXT = models.SECTION_TEXT
size_info1 = _CreateSizeInfo()
size_info1.raw_symbols += [
_MakeSym(TEXT, 11, 'a', name='.L__unnamed_1193'),
_MakeSym(TEXT, 22, 'a', name='.L__unnamed_1194'),
_MakeSym(TEXT, 33, 'a', name='SingleCategoryPreferences$3#this$0'),
_MakeSym(TEXT, 44, 'a', name='.L.ref.tmp.2'),
]
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols += [
_MakeSym(TEXT, 11, 'a', name='.L__unnamed_2194'),
_MakeSym(TEXT, 22, 'a', name='.L__unnamed_2195'),
_MakeSym(TEXT, 33, 'a', name='SingleCategoryPreferences$9#this$009'),
_MakeSym(TEXT, 44, 'a', name='.L.ref.tmp.137'),
]
d = diff.Diff(size_info1, size_info2)
self.assertEquals((0, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(0, d.raw_symbols.size)
def testChangedParams(self):
size_info1 = _CreateSizeInfo()
size_info1.raw_symbols[0].full_name = 'Foo()'
size_info1.raw_symbols[0].name = 'Foo'
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols[0].full_name = 'Foo(bool)'
size_info2.raw_symbols[0].name = 'Foo'
d = diff.Diff(size_info1, size_info2)
self.assertEquals((0, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(0, d.raw_symbols.size)
def testChangedPaths(self):
size_info1 = _CreateSizeInfo()
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols[0].object_path = 'asdf'
d = diff.Diff(size_info1, size_info2)
self.assertEquals((0, 0, 0), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(0, d.raw_symbols.size)
def testChangedPaths_ChangedParams(self):
size_info1 = _CreateSizeInfo()
size_info1.raw_symbols[0].full_name = 'Foo()'
size_info1.raw_symbols[0].name = 'Foo'
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols[0].full_name = 'Foo(bool)'
size_info2.raw_symbols[0].name = 'Foo'
size_info2.raw_symbols[0].object_path = 'asdf'
d = diff.Diff(size_info1, size_info2)
self.assertEquals((0, 1, 1), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(0, d.raw_symbols.size)
def testChangedPaths_StringLiterals(self):
size_info1 = _CreateSizeInfo()
size_info1.raw_symbols[0].full_name = models.STRING_LITERAL_NAME
size_info2 = _CreateSizeInfo()
size_info2.raw_symbols[0].full_name = models.STRING_LITERAL_NAME
size_info2.raw_symbols[0].object_path = 'asdf'
d = diff.Diff(size_info1, size_info2)
self.assertEquals((0, 1, 1), d.raw_symbols.CountsByDiffStatus()[1:])
self.assertEquals(0, d.raw_symbols.size)
if __name__ == '__main__':
unittest.main()
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
09b6e86dc545020ba63dfe09f0a5961ed2def2ff
|
028274f08da4c616ccc1362df390dcfe58131fc6
|
/DS_Management_Tools/TaskController/EESMap/__init__.py
|
dad1405df121f6c10a6267c6a2520494f8e5c7f4
|
[] |
no_license
|
PyWilhelm/EDRIS_DS
|
8a5430515bfc7e11abf846126f4fa2388ff59dd9
|
cc0179495d8874ff5a95fd08d833388f434e1d87
|
refs/heads/master
| 2021-01-17T00:43:01.560668
| 2016-07-23T10:50:09
| 2016-07-23T10:50:09
| 64,011,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
from TaskController.SPS.MetataskOverriderSPS import MetataskOverriderSPS
from TaskController.BaseClass.Controller import Controller
from TaskController.PISystem import PostProcessing
def start_controller(tid, input_data, session={}, controller=None, block=True, method='', sample=False):
_dir = os.path.dirname(os.path.abspath(__file__))
if method == '':
controller = Controller(priority=3) if controller is None else controller
with open(os.path.join(_dir, 'metataskEESMap.json')) as f:
metatask_data = json.load(f)
else:
raise Exception("Method " + method + " unknown")
session['controller'] = controller
# metatask_data = override(metatask_data, input_data, method)
result_future = controller.add_metatask(metatask_data)
if not block:
return result_future
else:
result = result_future.get()
result.save_as_sdf()
controller.stop()
return True
def override(metatask_temp, userinput, plot):
mto = MetataskOverriderSPS(metatask_temp)
return mto.override_all(userinput, plot)
if __name__ == "__main__":
input_data = dict()
result = start_controller(tid=100, input_data=input_data)
|
[
"ziyang.li.nk@gmail.com"
] |
ziyang.li.nk@gmail.com
|
db727ffa84eef94dfc2beca0189cb6f147dd7d5b
|
065191d9e09ecda3966d96770d166371bcbba515
|
/troposphere/helpers/meta.py
|
0d96a8ed25a18bfb7ede095d1f80e67aa001fa12
|
[
"MIT"
] |
permissive
|
sabakaio/docker-registry
|
2da96e14c1a7f1d50a7998e355b6980617ce0355
|
720a800e5f7f02ff1ec5d9b1d559a2dd6114f7f1
|
refs/heads/master
| 2020-04-10T13:30:10.923260
| 2016-12-13T16:24:26
| 2016-12-13T16:30:32
| 61,976,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,225
|
py
|
from troposphere import Ref, Base64, Join
from troposphere import cloudformation as cf, ec2, autoscaling as au
def docker():
return cf.InitConfig(
'Docker',
packages={'yum': {'docker': []}},
commands={
'docker_user': {
'command': 'usermod -aG docker ec2-user'
},
'install_compose': {
'command': 'pip install docker-compose'
},
},
services={
'sysvinit': {
'docker': {
'enabled': True,
'ensureRunning': True
}
}
}
)
def htpasswd(filename):
return cf.InitConfig(
'htpasswd',
files={
filename: {
'content': 'user:password_hash',
'mode': '000660',
'owner': 'root',
'group': 'docker',
},
}
)
def docker_compose(name, compose_yml):
name = name.lower()
compose_file = '/opt/{n}/docker-compose.yml'.format(n=name)
init = cf.InitConfig(
'Compose' + name.title(),
files={
compose_file: {
'content': compose_yml,
'mode': '000664',
'owner': 'root',
'group': 'docker',
},
},
commands={
'up': {
'command': '/usr/local/bin/docker-compose -f {f} up -d'.format(f=compose_file)
},
}
)
return init, compose_file
def certbot(domain, email, conf_dir='/opt/certs/', copy_to=None,
pre_hook=None, post_hook=None):
script_name = '/opt/certbot-auto'
commands = {
'1_get_cert': {
'command': Join(' ', [
script_name, 'certonly',
'--config-dir', conf_dir,
'--standalone --debug --agree-tos --non-interactive',
'-d', domain,
'--email', email,
])
}
}
renew_script = [
'#/bin/bash -e\n',
'unset PYTHON_INSTALL_LAYOUT\n',
script_name + ' renew --config-dir ' + conf_dir,
' --debug --non-interactive',
]
if pre_hook:
renew_script.append(' --pre-hook="' + pre_hook + '"')
copy_certs = None
if copy_to:
copy_certs = Join('', [
'cp ' + conf_dir.rstrip('/') + '/live/', domain, '/*.pem ', copy_to
])
commands.update({
'2_certs_dest': {
'command': 'mkdir -p ' + copy_to,
},
'3_copy_certs': {
'command': copy_certs,
},
})
# Copy certificated and/or run a custop post-hook
if copy_certs or post_hook:
hook = [' --post-hook="']
if copy_certs:
hook.append(copy_certs)
if post_hook:
hook.extend([' && ', post_hook])
hook.append('"')
renew_script.append(hook)
return cf.InitConfig(
'Certbot',
files={
script_name: {
'source': 'https://dl.eff.org/certbot-auto',
'mode': '000755',
'owner': 'root',
'group': 'root',
},
'/etc/cron.daily/certbot_renew': {
'content': Join('', renew_script),
'mode': '000755',
'owner': 'root',
'group': 'root',
},
},
commands=commands
)
def add_init(target, *configs):
assert isinstance(target, (ec2.Instance, au.LaunchConfiguration))
params = Join('', [
'export CFN_PARAMS=\'',
' --region ', Ref('AWS::Region'),
' --stack ', Ref('AWS::StackName'),
' --resource ' + target.title + '\'',
])
target.UserData = Base64(Join('\n', [
'#!/bin/bash -xe',
'yum update -y',
params,
'/opt/aws/bin/cfn-init -v -c default $CFN_PARAMS',
'/opt/aws/bin/cfn-signal -e 0 $CFN_PARAMS'
]))
configs = [callable(c) and c() or c for c in configs]
target.Metadata = cf.Init(
cf.InitConfigSets(default=[c.title for c in configs]),
**{c.title: c for c in configs})
return target
|
[
"anton.egoroff@gmail.com"
] |
anton.egoroff@gmail.com
|
0db997ac3b71508a9edbdb5eb3f6387a318c9d51
|
2b8e7eadb920e96c75697880a9c5461aa8e0c5ed
|
/nabu/neuralnetworks/components/__init__.py
|
a5db902e5340481df5b5861383c42f9afb4ac971
|
[
"MIT"
] |
permissive
|
ishandutta2007/nabu
|
fb963ed3cd34ee340014e0c1e77927c838bba0ad
|
313018a46f68cec1d4a7eb15b8b1cf68111a959c
|
refs/heads/master
| 2020-04-03T04:57:57.911576
| 2018-12-14T11:02:52
| 2018-12-14T11:02:52
| 155,029,958
| 0
| 0
|
MIT
| 2018-12-06T18:20:12
| 2018-10-28T02:59:31
|
Python
|
UTF-8
|
Python
| false
| false
| 146
|
py
|
'''@package components
contains tensorflow components'''
from . import hooks, ops, rnn_cell, layer, beam_search_decoder, constraints,\
attention
|
[
"vincent.renkens@esat.kuleuven.be"
] |
vincent.renkens@esat.kuleuven.be
|
17e7706740822fc7a717ced217bcdb8f7770aece
|
e0db13bc8113fb7b383d0a8d09e09686668e2fb4
|
/Data-Structures-and-Algorithms/Big-O-Notations.py
|
bdb3512128e595a28ba98d081d46d63601c547a3
|
[] |
no_license
|
nirmalnishant645/Python-Programming
|
dd66acd665af8933fa14b19d01300deb1eccbb7d
|
70e97e6f35f125acfde3b38e1baa794a357b8a77
|
refs/heads/master
| 2022-06-03T12:41:56.483000
| 2022-05-12T10:54:59
| 2022-05-12T10:54:59
| 151,211,590
| 3
| 5
| null | 2020-02-12T05:48:59
| 2018-10-02T06:44:54
|
HTML
|
UTF-8
|
Python
| false
| false
| 516
|
py
|
from math import log
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('bmh')
# Set up runtime comparisons
n = np.linspace(1,10,1000)
labels = ['Constant','Logarithmic','Linear','Log Linear','Quadratic','Cubic','Exponential']
big_o = [np.ones(n.shape),np.log(n),n,n*np.log(n),n**2,n**3,2**n]
# Plot setup
plt.figure(figsize=(12,10))
plt.ylim(0,50)
for i in range(len(big_o)):
plt.plot(n,big_o[i],label = labels[i])
plt.legend(loc=0)
plt.ylabel('Relative Runtime')
plt.xlabel('n')
plt.show()
|
[
"nirmalnishant645@gmail.com"
] |
nirmalnishant645@gmail.com
|
849332128a1f20ddebff18d4d7d8abafa1b75de5
|
fdcb2cdee4d5b398eed4eefc830213234e3e83a5
|
/01_MIT_Learning/00_midterm/P4.py
|
8b625a8f64d442a8f758fb47ab1f6f2a52939a51
|
[] |
no_license
|
daftstar/learn_python
|
be1bbfd8d7ea6b9be8407a30ca47baa7075c0d4b
|
4e8727154a24c7a1d05361a559a997c8d076480d
|
refs/heads/master
| 2021-01-20T08:53:29.817701
| 2018-01-15T22:21:02
| 2018-01-15T22:21:02
| 90,194,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,705
|
py
|
# PROBLEM 4
# Write a function is_triangular that meets the
# specification below.
"""
A triangular number is a number obtained by the continued
summation of integers starting from 1.
For example, 1, 1+2, 1+2+3, 1+2+3+4, etc.,
corresponding to 1, 3, 6, 10, etc., are triangular numbers.
"""
def is_triangular(k):
"""
k, a positive integer
returns True if k is triangular and False if not
"""
if k == 1:
return True
# create a list to check values against.
# for consistent lookups, create a dictionary
check = []
# initialize triangle for calculation
triangle = 0
# create a list of triangular numbers from 0, to k
# Clean this up later since initial values in [check]
# can hog up memory when k is very large.
for i in range(0, k):
triangle += i
check.append(triangle)
# no need to continue calculating triangular numbers if the
# latest number in the list is greater than what we're
# checking for.
if check[-1] > k:
break
# for debugging / visualization purposes:
# print (check)
# print (check[-3:])
# check if k is within the last 3 values of
# generated triangular values. No need to check if
# k is in the earlier values since k will be > than
# those values.
return (k in (check[-3:]))
print (is_triangular(994755))
# ORIGINAL FUNCTION:
# PROBLEM 4
# Write a function is_triangular that meets the
# specification below.
# """
# A triangular number is a number obtained by the continued
# summation of integers starting from 1.
# For example, 1, 1+2, 1+2+3, 1+2+3+4, etc.,
# corresponding to 1, 3, 6, 10, etc., are triangular numbers.
# """
# def is_triangular(k):
# """
# k, a positive integer
# returns True if k is triangular and False if not
# """
# if k == 1:
# return True
# # create a list to check values against.
# # for consistent lookups, create a dictionary
# check = []
# # initialize triangle for calculation
# triangle = 0
# # create a list of triangular numbers from 0, to k
# for i in range(0, k):
# triangle += i
# check.append(triangle)
# # no need to continue calculating triangular numbers if the
# # latest number in the list is greater than what we're
# # checking for.
# if check[-1] > k:
# break
# # for debugging / visualization purposes:
# print (check)
# # check if k is in the list of generated triangular values.
# # print (check in range[])
# if k in check:
# return True
# else:
# return False
# print (is_triangular(1891))
|
[
"nikdaftary@gmail.com"
] |
nikdaftary@gmail.com
|
1c93cded721204389639d3cabae1b862853c3694
|
53a3c329e6f0860e840babf19e12452e94c30e39
|
/scripts/gain_stats.py
|
ca1ee1fea01ba14ec2b2c538c37a6fc3a66bc37b
|
[] |
no_license
|
galabing/qd
|
60c2602f0deaae808b519c796b24063839766071
|
9ece034832167de958ec8a56da081ab75916684d
|
refs/heads/master
| 2020-12-26T04:38:26.187729
| 2015-06-15T21:20:20
| 2015-06-15T21:20:20
| 34,972,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,291
|
py
|
#!/usr/bin/python
ticker_file = '/Users/lnyang/lab/qd/data/tickers'
gain_dir = '/Users/lnyang/lab/qd/data/tmp/gains10/12'
min_date = '2004-01-01'
max_date = '9999-99-99'
with open(ticker_file, 'r') as fp:
tickers = sorted(fp.read().splitlines())
print 'processing %d tickers' % len(tickers)
stats = dict() # y => [gain ...]
for ticker in tickers:
gain_file = '%s/%s' % (gain_dir, ticker)
with open(gain_file, 'r') as fp:
lines = fp.read().splitlines()
for line in lines:
date, gain = line.split('\t')
if date < min_date or date > max_date:
continue
y, m, d = date.split('-')
gain = float(gain)
if gain > 100:
print '!! %s %s: gain = %f' % (ticker, date, gain)
if y not in stats: stats[y] = []
stats[y].append(gain)
for y in sorted(stats.keys()):
gains = sorted(stats[y])
print '%s: %d data points, min/max/avg gain: %f / %f / %f' % (
y, len(gains), min(gains), max(gains), sum(gains)/len(gains))
print ' 1%%: %f, 10%%: %f, 25%%: %f, 50%%: %f, 75%%: %f, 90%%: %f, 99%%: %f' % (
gains[int(len(gains)*0.01)],
gains[int(len(gains)*0.1)],
gains[int(len(gains)*0.25)],
gains[int(len(gains)*0.5)],
gains[int(len(gains)*0.75)],
gains[int(len(gains)*0.9)],
gains[int(len(gains)*0.99)])
|
[
"lnyang@linkedin.com"
] |
lnyang@linkedin.com
|
f2ff6a97ff2a03d6a5c101fff002e306e7e6f9a0
|
65b69f075fd0e57d8409561087f2366f8a60bab3
|
/rieapie/trickery.py
|
2e18a5467a15a7884a202d8850e35b488e2bf501
|
[
"MIT"
] |
permissive
|
alisaifee/rieapie
|
d7e74adf8208012e00f81a5bd0a7d4232a2cde67
|
a480c09f476867a259a2b1468f5c942897cd2d3d
|
refs/heads/master
| 2023-08-10T04:07:56.319117
| 2013-12-01T02:28:41
| 2013-12-01T02:28:41
| 11,880,429
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,421
|
py
|
import requests
import logging
import functools
import inspect
import json
GET = "GET"
PUT = "PUT"
POST = "POST"
DELETE = "DELETE"
class Component(object):
def __init__(self, name, api_object, parent=None):
self.name = str(name)
self.parent = parent
self.api_object = api_object
def __getattribute__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
return Component(key, self.api_object, self)
def __full_path(self):
return "/".join([self.api_object.base_url, self.__path()])
def __path(self):
path = []
cur = self
while cur.parent:
path += [cur.name]
cur = cur.parent
path += [cur.name]
return "/".join(reversed(path))
def __repr__(self):
return self.__path().replace("/", ".")
def __call__(self, ext=""):
if ext:
return Component("%s.%s" % (self.name, ext), self.api_object,
self.parent)
return self
def __getitem__(self, key):
return Component(key, self.api_object, self)
def get(self, **kwargs):
url, params, _, headers = self.api_object.execute_pre_request(
GET, self.__full_path(), kwargs, None,
self.api_object.headers)
resp = self.api_object.session.get(url, params=params, headers=headers)
return self.api_object.execute_post_request(resp.status_code,
resp.text)
def delete(self, **kwargs):
url, params, _, headers = self.api_object.execute_pre_request(
DELETE, self.__full_path(), kwargs, None,
self.api_object.headers)
resp = self.api_object.session.delete(url, params=params,
headers=headers)
return self.api_object.execute_post_request(resp.status_code,
resp.text)
def create(self, **kwargs):
url, params, data, headers = self.api_object.execute_pre_request(
PUT, self.__full_path(), {}, kwargs,
self.api_object.headers)
resp = self.api_object.session.put(url, params=params, data=data,
headers=headers)
return self.api_object.execute_post_request(resp.status_code,
resp.text)
def update(self, **kwargs):
url, params, data, headers = self.api_object.execute_pre_request(
POST, self.__full_path(), {}, kwargs,
self.api_object.headers)
resp = self.api_object.session.post(url, params=params, data=data,
headers=headers)
return self.api_object.execute_post_request(resp.status_code,
resp.text)
def pre_request(fn):
fn.is_pre_request = True
@functools.wraps(fn)
def __inner(*args, **kwargs):
return fn(*args, **kwargs)
return __inner
def post_request(fn):
fn.is_post_request = True
@functools.wraps(fn)
def __inner(*args, **kwargs):
return fn(*args, **kwargs)
return __inner
class Api(object):
def __init__(self, base_url, request_headers={}, debug=False, pool_size=10,
connect_timeout=5, response_timeout=10):
self.base_url = base_url.rstrip("/")
self.headers = request_headers
if debug:
logging.basicConfig(level=logging.DEBUG)
self.pre_request_chain = []
self.post_request_chain = []
for name, method in inspect.getmembers(self, inspect.ismethod):
if hasattr(method, "is_pre_request"):
self.pre_request_chain.append(method)
if hasattr(method, "is_post_request"):
self.post_request_chain.append(method)
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_maxsize=pool_size,
max_retries=2)
self.session.mount("http://", adapter)
self.session.mount("https://", adapter)
self.root = Component("", self, None)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
return Component(key, self, None)
@pre_request
def default_pre_request(self, method, url, params, data, headers):
return url, params, data, headers
@post_request
def default_post_request(self, status, body):
return json.loads(body)
@post_request
def fallback_post_request(self, status, body):
return body
def execute_pre_request(self, method, url, params, data, headers):
for fn in self.pre_request_chain:
url, params, data, headers = fn(method, url, params, data, headers)
return url, params, data, headers
def execute_post_request(self, status, body):
last_error = None
num_errors = 0
for fn in self.post_request_chain:
try:
body = fn(status, body)
except Exception as e:
num_errors += 1
last_error = e
if num_errors == len(self.post_request_chain):
raise last_error
else:
return body
|
[
"ali@indydevs.org"
] |
ali@indydevs.org
|
e6f9ad93c38b7186f4e1bc7fbce807810f34015d
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_Fisher/trend_PolyTrend/cycle_5/ar_/test_artificial_32_Fisher_PolyTrend_5__20.py
|
a843775b068feeaffba3bdedfbb078ee85d324f6
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 265
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 5, transform = "Fisher", sigma = 0.0, exog_count = 20, ar_order = 0);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
9109e0a345c4b5dbe1c74ecc0b5f1c67bc8afc0a
|
93a7db386dfa0ac0dc369cc7f4b974224c801d8d
|
/deploy/dot-product/scripts/main-47.py
|
d0fa56eb54e5acb7e99d402afe69b29b3ad86d95
|
[] |
no_license
|
lingxiao/good-great-combo
|
e051f20c89b7317a14ca5cee357bda7b095ce174
|
4d2691866bc21e2c542354ad3aae6f369eb86c87
|
refs/heads/master
| 2021-01-19T19:30:43.391759
| 2017-04-09T12:35:15
| 2017-04-09T12:35:15
| 83,699,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
############################################################
# Module : A series of measures on the graph for experiments
# Date : April 2nd, 2017
# Author : Xiao Ling
############################################################
import os
import numpy as np
from utils import *
from scripts import *
from app.config import PATH
############################################################
'''
paths
'''
batch = 47
_root = os.path.join(PATH['directories']['deploy'], 'dot-product')
_pair_dir = os.path.join(_root, 'pairs')
_output_dir = os.path.join(_root, 'outputs')
word_2_vec_big = PATH['inputs']['word2vec']
word_2_vec_sm = PATH['inputs']['word2vec-sm']
word_pair_path = os.path.join(_pair_dir , 'batch-' + str(batch) + '.txt')
out_path = os.path.join(_output_dir, 'batch-' + str(batch) + '.txt')
dot(word_2_vec_big, word_pair_path, _output_dir, refresh = True)
|
[
"lingxiao@seas.upenn.edu"
] |
lingxiao@seas.upenn.edu
|
a18fc841ab746e31eab0bde79ff593d39f6893cd
|
508c5e01aa7dce530093d5796250eff8d74ba06c
|
/code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/utils/javascript/javascript_bundler.py
|
6016adb05301df6dc69f51f5715640f911d2aec2
|
[
"PostgreSQL",
"MIT"
] |
permissive
|
jhkuang11/UniTrade
|
f220b0d84db06ff17626b3daa18d4cb8b72a5d3f
|
5f68b853926e167936b58c8543b8f95ebd6f5211
|
refs/heads/master
| 2022-12-12T15:58:30.013516
| 2019-02-01T21:07:15
| 2019-02-01T21:07:15
| 166,479,655
| 0
| 0
|
MIT
| 2022-12-07T03:59:47
| 2019-01-18T22:19:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,663
|
py
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import os
from contextlib import contextmanager
from subprocess import call
from pgadmin.utils import u, fs_encoding, file_quote
# enum-like for tracking whether we have
class JsState:
NONE = 0
OLD = 1
NEW = 2
class JavascriptBundler:
"""Builds Javascript bundle files by delegating to webpack"""
def __init__(self):
self.jsState = JsState.NONE
def bundle(self):
try:
try_building_js()
self.jsState = JsState.NEW
except OSError:
webdir_path()
generatedJavascriptDir = os.path.join(webdir_path(), 'pgadmin', 'static', 'js', 'generated')
if os.path.exists(generatedJavascriptDir) and os.listdir(generatedJavascriptDir):
self.jsState = JsState.OLD
else:
self.jsState = JsState.NONE
def report(self):
return self.jsState
@contextmanager
def pushd(new_dir):
previous_dir = os.getcwd()
os.chdir(new_dir)
yield
os.chdir(previous_dir)
def webdir_path():
dirname = os.path.dirname
thisPath = os.path.realpath(u(__file__, fs_encoding))
return dirname(dirname(dirname(dirname(thisPath))))
def try_building_js():
with pushd(webdir_path()):
if call(['yarn', 'run', 'bundle:dev']) != 0:
raise OSError('Error executing bundling the application')
|
[
"jhkuang11@gmail.com"
] |
jhkuang11@gmail.com
|
a9139035aaa678ebf9365d927a8a6fff7b6545f3
|
66c3eb5a49b6b674e72ffeac00a221f16e224806
|
/HC_At_Test/PO/pageobject/betgame_page.py
|
4e4c4aa91e294bce27fe6559be1fd9845eeed4da
|
[] |
no_license
|
fan966/LX_AT_TEST
|
20ad5793ef9ab6fe9070d046935b90450321ff0b
|
7e95a399140567ff601205f8d83babbe56279ab6
|
refs/heads/master
| 2022-12-31T15:11:04.972185
| 2020-10-23T11:58:46
| 2020-10-23T11:58:46
| 258,690,216
| 0
| 1
| null | 2020-04-26T07:00:43
| 2020-04-25T04:53:19
|
Python
|
UTF-8
|
Python
| false
| false
| 952
|
py
|
# -*-coding:utf-8-*-
from selenium.webdriver.common.by import By
class BetPageLocator(object):
"""
下注页面公共部分元素定位信息
"""
# 投注倒计时
hc_game_time = (By.XPATH, r'//ul[contains(@class ,"flip")]')
tty_game_time = (By.XPATH, r'//div[@class="alert-box"]')
# 停售
div_stop_selling = (By.ID, 'stopSellingPop')
# 奖金拉杆条拖动按钮0~135px(style="left: 0px;")
bonus_percen = (By.XPATH, r'//span[@class="ui-handle"]')
# 拉杆条
pull_rod = (By.XPATH, r'//div[@class="ranger"]')
# 游戏分类
game_tyep_div = (By.XPATH, r'//div[contains(@class, "sidem_item")]//ul')
game_type_claer = (By.XPATH, r'//div[contains(@class, "sidem_item")]//a[@class="sidem_b clear"]')
# 开奖结果
run_lottery = (By.XPATH, r'//a[text()="开奖结果"]')
# 期数状态
period_tip = (By.XPATH, r'//div[@id="PeriodInfo"]//*[@data-bind="text:periodTip"]')
|
[
"664340382@qq.com"
] |
664340382@qq.com
|
e4b4376cf120624cd187c64a050c710037607475
|
538fd58e4f7d0d094fd6c93ba1d23f78a781c270
|
/689_max_sum_of_3_non_overlap_subarrays/test_solution.py
|
fc67ba8effb08041b9071ff7807d540305bb207a
|
[] |
no_license
|
FluffyFu/Leetcode
|
4633e9e91e493dfc01785fd379ab9f0788726ac1
|
5625e6396b746255f3343253c75447ead95879c7
|
refs/heads/master
| 2023-03-21T08:47:51.863360
| 2021-03-06T21:36:43
| 2021-03-06T21:36:43
| 295,880,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
from solution import max_non_overlap
import pudb
def test():
nums = [1, 2, 1, 2, 6, 7, 5, 1]
k = 2
res = max_non_overlap(nums, k)
assert [0, 3, 5] == res
|
[
"fluffyfu400@gmail.com"
] |
fluffyfu400@gmail.com
|
d4d7fc69db45b2bd9d71d42ba91520dc5d575626
|
7d07c037dbd2fbfce960c7a63debe1cb3d5f1a8a
|
/api/apps/predictions/tests/views/test_tide_windows.py
|
3dbb0d868779e043bd779a4bbc7feb1045058611
|
[] |
no_license
|
sealevelresearch-jenkins/sea-level-api
|
2fcbf309fa7388514ddf8bf9bd520f5681775939
|
382cf4d1b6981f4120d8add6d79a53493b911e24
|
refs/heads/master
| 2020-12-25T05:19:21.904701
| 2014-06-25T11:44:26
| 2014-06-25T11:44:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,838
|
py
|
import datetime
import json
import pytz
from django.test import TestCase
from nose.tools import assert_equal, assert_in
from nose.plugins.skip import SkipTest
from api.apps.predictions.models import Prediction
from api.apps.locations.models import Location
class TestTideWindowsView(TestCase):
fixtures = [
'api/apps/locations/fixtures/two_locations.json',
'api/apps/predictions/fixtures/predictions_two_locations.json',
]
def test_that_tide_windows_url_lists_available_locations(self):
raise SkipTest("Not yet implemented.")
self.client.get('/predictions/tide-windows/')
def test_that_invalid_location_gives_a_json_404(self):
raise SkipTest("Not yet implemented.")
def test_that_no_start_and_end_parameter_temporary_redirects_to_now(self):
raise SkipTest("Not yet implemented.")
def test_that_missing_tide_level_param_gives_400_error(self):
response = self.client.get(
'/predictions/tide-windows/liverpool/'
'?start=2014-06-17T09:00:00Z'
'&end=2014-06-17T09:05:00Z')
data = json.loads(response.content)
assert_equal(400, response.status_code)
assert_equal(
{'detail': u'Missing required query parameter `tide_level`'},
data)
def test_that_envelope_has_tide_windows_field(self):
response = self.client.get(
'/predictions/tide-windows/liverpool/'
'?start=2014-06-17T00:00:00Z'
'&end=2014-06-18T00:00:00Z'
'&tide_level=10.7')
data = json.loads(response.content)
assert_in('tide_windows', data)
def test_that_tide_window_records_have_correct_structure(self):
response = self.client.get(
'/predictions/tide-windows/liverpool/'
'?start=2014-06-17T00:00:00Z'
'&end=2014-06-18T00:00:00Z'
'&tide_level=10.7')
data = json.loads(response.content)
tide_windows = data['tide_windows']
expected = {
'start': {
'datetime': '2014-06-17T09:01:00Z',
'tide_level': 10.8
},
'end': {
'datetime': '2014-06-17T09:02:00Z',
'tide_level': 10.9
},
'duration': {
'total_seconds': 120
}
}
assert_equal(expected, tide_windows[0])
class TestTideWindowsCalculationsView(TestCase):
fixtures = [
'api/apps/locations/fixtures/two_locations.json',
]
@classmethod
def setUp(cls):
cls.create_double_peaked_tide()
@classmethod
def create_double_peaked_tide(cls):
location = Location.objects.get(slug='liverpool')
cls.base_time = datetime.datetime(2014, 6, 1, 10, 00, tzinfo=pytz.UTC)
for minute, level in [
(0, 4.50),
(1, 4.75),
(2, 5.00),
(3, 5.25),
(4, 5.50),
(5, 5.75),
(6, 6.00), # peak
(7, 5.60),
(8, 5.49),
(9, 5.25),
(10, 5.00), # trough
(11, 5.25),
(12, 5.49), # peak
(13, 5.25),
(14, 5.00),
(15, 4.75),
(16, 4.50)
]:
Prediction.objects.create(
location=location,
datetime=cls.base_time + datetime.timedelta(minutes=minute),
tide_level=level
)
def test_that_single_window_is_correctly_identified(self):
response = self.client.get(
'/predictions/tide-windows/liverpool/'
'?start=2014-06-01T10:00:00Z'
'&end=2014-06-02T11:00:00Z'
'&tide_level=5.5'
)
data = json.loads(response.content)
assert_equal([
{
'start': {
'datetime': '2014-06-01T10:04:00Z',
'tide_level': 5.50,
},
'end': {
'datetime': '2014-06-01T10:07:00Z',
'tide_level': 5.60,
},
'duration': {
'total_seconds': 240,
}
}],
data['tide_windows']
)
def test_that_double_window_is_correctly_identified(self):
response = self.client.get(
'/predictions/tide-windows/liverpool/'
'?start=2014-06-01T10:00:00Z'
'&end=2014-06-02T11:00:00Z'
'&tide_level=5.1'
)
data = json.loads(response.content)
assert_equal([
{
'start': {
'datetime': '2014-06-01T10:03:00Z',
'tide_level': 5.25,
},
'end': {
'datetime': '2014-06-01T10:09:00Z',
'tide_level': 5.25,
},
'duration': {
'total_seconds': 420,
}
},
{
'start': {
'datetime': '2014-06-01T10:11:00Z',
'tide_level': 5.25,
},
'end': {
'datetime': '2014-06-01T10:13:00Z',
'tide_level': 5.25,
},
'duration': {
'total_seconds': 180,
}
},
],
data['tide_windows']
)
def test_that_no_tidal_window_returned_if_tide_is_never_above_height(self):
response = self.client.get(
'/predictions/tide-windows/liverpool/'
'?start=2014-06-01T10:00:00Z'
'&end=2014-06-02T11:00:00Z'
'&tide_level=6.1'
)
data = json.loads(response.content)
assert_equal([], data['tide_windows'])
|
[
"paul@paulfurley.com"
] |
paul@paulfurley.com
|
1e7d8fb610baf2fe9c8bbfdb8ef02faabb5e813c
|
72cbe74acc97c445553d4e4468de5b491a0e7af3
|
/id3wr.py
|
77b939ac785d0df645b0a90745b4952685cafe5c
|
[] |
no_license
|
acadien/xrdAnalysis
|
e1989d2b917bcbf21a2e8cf7006082aa8fe071b0
|
c05beab696d7ebd5dd80d6a4ce50810a3ee65682
|
refs/heads/master
| 2021-01-10T21:33:15.352168
| 2015-05-19T18:21:55
| 2015-05-19T18:21:55
| 35,900,206
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
#!/usr/bin/python
import os
import re
from ID3 import *
path='./'
dirlist=os.listdir(path)
pardir=os.getcwd()
album=re.search(r'(/[\w,\s]+)+',pardir)
album=album.group(1).lstrip('/')
for fname in dirlist:
try:
m = re.search('(\d\d)-(\w+)-(\w+)_www\.file',fname)
id3info = ID3(fname)
except:
continue
print id3info
id3info['TRACKNUMBER'] = m.group(1)
artist = m.group(2)
id3info['ARTIST'] = re.sub('_',' ',artist).capitalize()
song = m.group(3)
id3info['SONG']=re.sub('_',' ',song).capitalize()
id3info['ALBUM']=album
#print track+artist+song
#convert='mp3info -f -t '+song+' -n '+track+' -a '+artist+' -l '+album+' '+fname
#os.system(convert)
|
[
"adamcadien@gmail.com"
] |
adamcadien@gmail.com
|
293f5b32b2b42456464676d82eb4d9157e70b078
|
53784d3746eccb6d8fca540be9087a12f3713d1c
|
/res/packages/scripts/scripts/common/Lib/ctypes/test/test_integers.py
|
3167244d8935295055bdcead94ecc8b9cd4c6a32
|
[] |
no_license
|
webiumsk/WOT-0.9.17.1-CT
|
736666d53cbd0da6745b970e90a8bac6ea80813d
|
d7c3cf340ae40318933e7205bf9a17c7e53bac52
|
refs/heads/master
| 2021-01-09T06:00:33.898009
| 2017-02-03T21:40:17
| 2017-02-03T21:40:17
| 80,870,824
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 428
|
py
|
# 2017.02.03 21:57:48 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/ctypes/test/test_integers.py
import unittest
if __name__ == '__main__':
unittest.main()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\common\Lib\ctypes\test\test_integers.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:57:48 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
a010e185bf8e52e2f22af54c3a9fdcb3c419e547
|
b61dedf12868e2bc511b6693af1985911a13f336
|
/src/logpipe/formats/pickle.py
|
2dfea0f71f0be7e298f161b03f7014c150a64290
|
[
"ISC"
] |
permissive
|
vitorcarmovieira/django-logpipe
|
f9eebb6674b9ba180a63448c9d71ce2e87929f7c
|
89d0543e341518f9ae49124c354e6a6c2e3f4150
|
refs/heads/main
| 2023-03-03T13:18:22.456270
| 2021-02-13T17:29:32
| 2021-02-13T17:29:32
| 326,679,534
| 1
| 1
|
ISC
| 2021-02-13T17:29:32
| 2021-01-04T12:39:30
|
Python
|
UTF-8
|
Python
| false
| false
| 562
|
py
|
from rest_framework import renderers, parsers
import pickle
class PickleRenderer(renderers.BaseRenderer):
media_type = 'application/python-pickle'
format = 'pickle'
charset = None
render_style = 'binary'
def render(self, data, media_type=None, renderer_context=None):
return pickle.dumps(data)
class PickleParser(parsers.BaseParser):
media_type = 'application/python-pickle'
def parse(self, stream, media_type=None, parser_context=None):
return pickle.load(stream)
__all__ = ['PickleRenderer', 'PickleParser']
|
[
"crgwbr@gmail.com"
] |
crgwbr@gmail.com
|
787ab2f450dbb132be7a57a5e36ccf3341ec5e94
|
6c9912ab5ff000cc9f489248de2f2687f61cac1a
|
/rrc/settings/prod.py
|
dcf462a3fa98c8ff0c7954e3e75f1577139baf02
|
[
"MIT"
] |
permissive
|
rocky-roll-call/rrc-backend
|
ed047457d4eae730168a109584c56389c4c01c09
|
02e8e11c3dab7661e48650e2e861a4a97788a4ce
|
refs/heads/master
| 2020-04-28T14:26:24.623336
| 2019-12-30T04:11:20
| 2019-12-30T04:11:20
| 175,338,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
Production Settings
"""
from decouple import config, Csv
from dj_database_url import parse as db_url
from rrc.settings.dev import *
DATABASES = {"default": config("DATABASE_URL", cast=db_url)}
DEBUG = config("SECRET_KEY")
SECRET_KEY = config("SECRET_KEY")
ALLOWED_HOSTS = config("ALLOWED_HOSTS", cast=Csv())
JWT_AUTH["JWT_EXPIRATION_DELTA"] = datetime.timedelta(seconds=600)
|
[
"michael@mdupont.com"
] |
michael@mdupont.com
|
c6c775bb54a0e2e106a903677ca605c033ab439a
|
f95d2646f8428cceed98681f8ed2407d4f044941
|
/day09/day09/exercise/myadd.py
|
a9178c6996c200cf9c3a17191158f06e47e2dcf1
|
[] |
no_license
|
q2806060/python-note
|
014e1458dcfa896f2749c7ebce68b2bbe31a3bf8
|
fbe107d668b44b78ae0094dbcc7e8ff8a4f8c983
|
refs/heads/master
| 2020-08-18T01:12:31.227654
| 2019-10-17T07:40:40
| 2019-10-17T07:40:40
| 215,731,114
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
# 练习:
# 写一个函数myadd, 此函数可以计算两个数,三个数,及四个数的和
# def myadd(.....):
# ....
# print(myadd(10, 20)) # 30
# print(myadd(100, 200, 300)) # 600
# print(myadd(1, 2, 3, 4)) # 10
# 方法1
# def myadd(a, b, c=0, d=0):
# return a + b + c + d
# 方法2
# def myadd(a, b, c=None, d=None):
# if c is None:
# c = 0
# if d is None:
# d = 0
# return a + b + c + d
# 方法3
def myadd(a, b, c=0, d=0):
return sum( (a, b, c, d) )
print(myadd(10, 20)) # 30
print(myadd(100, 200, 300)) # 600
print(myadd(1, 2, 3, 4)) # 10
|
[
"C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn"
] |
C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn
|
fb66fe333c795753e3bb1c54e9b12a0c9b1edb53
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/sonarr/__init__.py
|
c592e8435c28b8ea7f22b41bc2b6f515fbafa82d
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 4,327
|
py
|
"""The Sonarr component."""
from __future__ import annotations
from typing import Any
from aiopyarr.models.host_configuration import PyArrHostConfiguration
from aiopyarr.sonarr_client import SonarrClient
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_URL,
CONF_VERIFY_SSL,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
CONF_BASE_PATH,
CONF_UPCOMING_DAYS,
CONF_WANTED_MAX_ITEMS,
DEFAULT_UPCOMING_DAYS,
DEFAULT_WANTED_MAX_ITEMS,
DOMAIN,
LOGGER,
)
from .coordinator import (
CalendarDataUpdateCoordinator,
CommandsDataUpdateCoordinator,
DiskSpaceDataUpdateCoordinator,
QueueDataUpdateCoordinator,
SeriesDataUpdateCoordinator,
SonarrDataUpdateCoordinator,
StatusDataUpdateCoordinator,
WantedDataUpdateCoordinator,
)
PLATFORMS = [Platform.SENSOR]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Sonarr from a config entry."""
if not entry.options:
options = {
CONF_UPCOMING_DAYS: entry.data.get(
CONF_UPCOMING_DAYS, DEFAULT_UPCOMING_DAYS
),
CONF_WANTED_MAX_ITEMS: entry.data.get(
CONF_WANTED_MAX_ITEMS, DEFAULT_WANTED_MAX_ITEMS
),
}
hass.config_entries.async_update_entry(entry, options=options)
host_configuration = PyArrHostConfiguration(
api_token=entry.data[CONF_API_KEY],
url=entry.data[CONF_URL],
verify_ssl=entry.data[CONF_VERIFY_SSL],
)
sonarr = SonarrClient(
host_configuration=host_configuration,
session=async_get_clientsession(hass),
)
entry.async_on_unload(entry.add_update_listener(_async_update_listener))
coordinators: dict[str, SonarrDataUpdateCoordinator[Any]] = {
"upcoming": CalendarDataUpdateCoordinator(hass, host_configuration, sonarr),
"commands": CommandsDataUpdateCoordinator(hass, host_configuration, sonarr),
"diskspace": DiskSpaceDataUpdateCoordinator(hass, host_configuration, sonarr),
"queue": QueueDataUpdateCoordinator(hass, host_configuration, sonarr),
"series": SeriesDataUpdateCoordinator(hass, host_configuration, sonarr),
"status": StatusDataUpdateCoordinator(hass, host_configuration, sonarr),
"wanted": WantedDataUpdateCoordinator(hass, host_configuration, sonarr),
}
# Temporary, until we add diagnostic entities
_version = None
for coordinator in coordinators.values():
await coordinator.async_config_entry_first_refresh()
if isinstance(coordinator, StatusDataUpdateCoordinator):
_version = coordinator.data.version
coordinator.system_version = _version
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = coordinators
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
return True
async def async_migrate_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Migrate old entry."""
LOGGER.debug("Migrating from version %s", entry.version)
if entry.version == 1:
new_proto = "https" if entry.data[CONF_SSL] else "http"
new_host_port = f"{entry.data[CONF_HOST]}:{entry.data[CONF_PORT]}"
new_path = ""
if entry.data[CONF_BASE_PATH].rstrip("/") not in ("", "/", "/api"):
new_path = entry.data[CONF_BASE_PATH].rstrip("/")
data = {
**entry.data,
CONF_URL: f"{new_proto}://{new_host_port}{new_path}",
}
hass.config_entries.async_update_entry(entry, data=data)
entry.version = 2
LOGGER.info("Migration to version %s successful", entry.version)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
|
[
"noreply@github.com"
] |
home-assistant.noreply@github.com
|
6949ae60b99663f19494a7ea87f9d87ec0858309
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_walleyes.py
|
9095d3388cacad88aa57bdae0e7ba33f57fa39d6
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#calss header
class _WALLEYES():
def __init__(self,):
self.name = "WALLEYES"
self.definitions = walleye
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['walleye']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
59d00a0a1af3c49e1d1aa9570187d7a6f89cdc8e
|
ff52e4d5ccc7bf0bcf3c41bb304f9c28f455c57e
|
/lms/lms/doctype/tasks/tasks.py
|
20df73f8296465ab88fb9c1b6b58e2f7a92565f7
|
[
"MIT"
] |
permissive
|
vignesharumainayagam/engagex-lms-backup-
|
889e76096d80b57f1df94c4ffa0dbc87ef6328f4
|
d377c78873e66574a996c7d67b33ce9ff69f9d74
|
refs/heads/master
| 2020-03-11T18:00:28.166274
| 2018-04-19T05:36:46
| 2018-04-19T05:36:46
| 130,164,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Valiant Systems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Tasks(Document):
pass
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Task", filters)
data = frappe.db.sql("""select name, exp_start_date, exp_end_date,
subject, status from `tabTasks`
where ((ifnull(exp_start_date, '0000-00-00')!= '0000-00-00') \
and (exp_start_date <= %(end)s) \
or ((ifnull(exp_end_date, '0000-00-00')!= '0000-00-00') \
and exp_end_date >= %(start)s))
{conditions}""".format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
|
[
"vigneshwaran@valiantsystems.com"
] |
vigneshwaran@valiantsystems.com
|
2e2393fe770c6f960a4b4da26f9ebde7510eb4b4
|
526b892fa981573f26d55c361b42a9d3fa841db5
|
/haas/highlighter/forms.py
|
c1c72f9063001d89e88ee11eb8b9560d184b6781
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
uranusjr/bbshighlighter
|
13d89713245f95906a733b7aa8e7c39c58f6ec22
|
da35d483e429e0cbd0619b1bc399f4fe67de9ac3
|
refs/heads/master
| 2020-05-31T22:36:58.424739
| 2014-03-07T17:24:00
| 2014-03-07T17:24:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from pygments.lexers import get_all_lexers
class HighlighterForm(forms.Form):
language = forms.ChoiceField()
code = forms.CharField(label='', widget=forms.Textarea)
def __init__(self, *args, **kwargs):
kwargs['initial'] = dict(kwargs.get('initial', {}), language='cpp')
super(HighlighterForm, self).__init__(*args, **kwargs)
choices = []
for name, aliases, filetypes, mimetypes in get_all_lexers():
choices.append((aliases[0], name))
self.fields['language'].choices = choices
|
[
"uranusjr@gmail.com"
] |
uranusjr@gmail.com
|
45b1ffda6000b52ced756e89f276e7c99ca0fde5
|
0ed0f9b98be3eb4f87aedfb67210b01a3bd4ffe4
|
/Validation/test/sync_MC_cfg.py
|
c4c6e0f84886667de068721433af89aef78b258f
|
[] |
no_license
|
jshlee/CATTools
|
6e6714225010fa5dfcc819f578d3ad0b8458a8dc
|
3805086b4577b439ecef5369d5bd56f25cfe1ca3
|
refs/heads/cat80x
| 2021-01-21T16:44:52.978003
| 2017-10-17T18:46:58
| 2017-10-17T18:46:58
| 25,828,473
| 1
| 2
| null | 2017-10-17T18:46:59
| 2014-10-27T16:32:59
|
C++
|
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("CATeX")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) )
process.options.allowUnscheduled = cms.untracked.bool(True)
process.MessageLogger.cerr.FwkReport.reportEvery = 50000
process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring())
process.source.fileNames = [
'/store/group/CAT/TTbarXSecSynchronization/v8-0-1/TT_TuneCUETP8M1_13TeV-powheg-pythia8__PUSpring16_80X_mcRun2_asymptotic_2016_v3_ext3-v1__1671FA99-240F-E611-BF05-00266CFAE464.root'
]
process.load("CATTools.CatAnalyzer.filters_cff")
process.load("CATTools.Validation.ttllEventSelector_cfi")
process.load("CATTools.Validation.validation_cff")
eventsTTLL.electron.idName = "cutBasedElectronID-Spring15-25ns-V1-standalone-medium"
eventsTTLL.electron.applyEcalCrackVeto = True
eventsTTLL.jet.bTagName = "pfCombinedInclusiveSecondaryVertexV2BJetTags"
eventsTTLL.jet.bTagWP = "CSVM"
eventsTTLL.jet.skipJER = True
eventsTTLL.filters.ignoreTrig = True
process.TFileService = cms.Service("TFileService",
fileName = cms.string("hist.root"),
)
process.p = cms.Path(
process.gen + process.rec
* process.eventsTTLL
)
|
[
"jhgoh@cern.ch"
] |
jhgoh@cern.ch
|
4237ec9fd7869d359af8e6f194348e6758a7a910
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/io_import_mc/__init__.py
|
9987c1068ea276a0c20b744f7ea9530f0f1b2fd4
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,003
|
py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "Import Maya Cache (.xml, .mc)",
"author": "Jasper van Nieuwenhuizen",
"version": (0, 1),
"blender": (2, 6, 0),
"location": "File > Import > Maya cache (.xml, .mc)",
"description": "Imports Maya Cache to Objects",
"warning": "wip",
"wiki_url": "",
"tracker_url": "",
"category": "Learnbgame",
}
if "bpy" in locals():
import imp
if "import_mc" in locals():
imp.reload(import_mc)
if "export_mc" in locals():
imp.reload(export_mc)
import bpy
from bpy.props import (BoolProperty,
FloatProperty,
StringProperty,
EnumProperty,
)
from bpy_extras.io_utils import (ExportHelper,
ImportHelper,
path_reference_mode,
axis_conversion,
)
class ImportMC(bpy.types.Operator, ImportHelper):
"""Load a Maya Cache file"""
bl_idname = "import_shape.mc"
bl_label = "Import Maya Cache"
bl_options = {'PRESET', 'UNDO'}
filename_ext = ".xml"
filter_glob = StringProperty(
default="*.xml;*.mc",
options={'HIDDEN'},
)
use_selection = BoolProperty(
name="Selection Only",
description="Import cache for selected objects only",
default=False,
)
interpolation = EnumProperty(
name="Interpolation",
items=(('LINEAR', "Linear", ""),
('NONE', "None", ""),
)
)
time_mode = EnumProperty(
name="Method to control playback time",
items=(('FRAME', "Frame", "Control playback using a frame number"\
" (ignoring time FPS and start frame from the file"),
('TIME', "Time", "Control playback using time in seconds"),
('FACTOR', "Factor", "Control playback using a value"\
" between [0, 1]"),
)
)
play_mode = EnumProperty(
name="Play mode",
items=(('SCENE', "Scene", "Use the time from the scene"),
('CUSTOM', "Custom", "Use the modifiers own time"\
" evaluation"),
)
)
frame_start = FloatProperty(
name="Frame Start",
description="Add this to the start frame",
)
frame_scale = FloatProperty(
name="Frame Scale",
description="Evaluation time in seconds",
)
eval_frame = FloatProperty(
name="Evaluation Frame",
description="The frame to evaluate (starting at 0)",
)
eval_time = FloatProperty(
name="Evaluation Time",
description="Evaluation time in seconds",
)
eval_factor = FloatProperty(
name="Evaluation Factor",
description="Evaluation factor",
)
forward_axis = EnumProperty(
name="Forward",
items=(('X', "+X", ""),
('Y', "+Y", ""),
('Z', "+Z", ""),
('-X', "-X", ""),
('-Y', "-Y", ""),
('-Z', "-Z", ""),
),
default='-Z',
)
up_axis = EnumProperty(
name="Up",
items=(('X', "+X", ""),
('Y', "+Y", ""),
('Z', "+Z", ""),
('-X', "-X", ""),
('-Y', "-Y", ""),
('-Z', "-Z", ""),
),
default='Y',
)
def execute(self, context):
import imp
from . import import_mc
imp.reload(import_mc)
keywords = self.as_keywords(ignore=("forward_axis",
"up_axis",
"filter_glob",
))
global_matrix = axis_conversion(from_forward=self.forward_axis,
from_up=self.up_axis,
).to_4x4()
keywords["global_matrix"] = global_matrix
return import_mc.load(self, context, **keywords)
def draw(self, context):
layout = self.layout
layout.prop(self, "use_selection")
layout.label(text="Time Mapping:")
row = layout.row()
row.prop(self, "time_mode", expand=True)
row = layout.row()
row.prop(self, "play_mode", expand=True)
if self.play_mode == 'SCENE':
layout.prop(self, "frame_start")
layout.prop(self, "frame_scale")
else:
time_mode = self.time_mode
if time_mode == 'FRAME':
layout.prop(self, "eval_frame")
elif time_mode == 'TIME':
layout.prop(self, "eval_time")
elif time_mode == 'FACTOR':
layout.prop(self, "eval_factor")
layout.label(text="Axis Mapping:")
split = layout.split(percentage=0.5, align=True)
split.alert = (self.forward_axis[-1] == self.up_axis[-1])
split.label("Forward/Up Axis:")
split.prop(self, "forward_axis", text="")
split.prop(self, "up_axis", text="")
#split = layout.split(percentage=0.5)
#split.label(text="Flip Axis:")
#row = split.row()
#row.prop(self, "flip_axis")
def menu_func_import(self, context):
self.layout.operator(ImportMC.bl_idname, text="Maya Cache (.xml, .mc)")
#def menu_func_export(self, context):
# self.layout.operator(ExportMC.bl_idname, text="Maya Cache (.xml, .mc)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_import)
#bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
#bpy.types.INFO_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
01bb63ecb9346797486f1cf6af72c8fbf7e0b1e3
|
f359c953ef823cc44f7d87a3736c3e4fb1817c0b
|
/EDBRCommon/python/PromptReco/Run2015D/SingleElectron/SingleElectron_Run2015D-bo.py
|
93598b048e708643741ed759431ae1c51bb1d279
|
[] |
no_license
|
jruizvar/ExoDiBosonResonancesRun2
|
aa613200725cf6cd825d7bcbde60d2e39ba84e39
|
b407ab36504d0e04e6bddba4e57856f9f8c0ec66
|
refs/heads/Analysis76X
| 2021-01-18T20:00:57.358494
| 2016-05-30T21:30:19
| 2016-05-30T21:30:19
| 23,619,682
| 1
| 1
| null | 2016-04-22T18:38:45
| 2014-09-03T12:41:07
|
Python
|
UTF-8
|
Python
| false
| false
| 930
|
py
|
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/data/Run2015D/SingleElectron/MINIAOD/PromptReco-v3/000/257/645/00000/5CA69132-F967-E511-BF67-02163E014485.root',
'/store/data/Run2015D/SingleElectron/MINIAOD/PromptReco-v3/000/257/645/00000/6693A85A-FA67-E511-B66C-02163E01459D.root',
'/store/data/Run2015D/SingleElectron/MINIAOD/PromptReco-v3/000/257/645/00000/66A2EECE-F967-E511-98B1-02163E011C4A.root',
'/store/data/Run2015D/SingleElectron/MINIAOD/PromptReco-v3/000/257/645/00000/6AD1AB5B-FA67-E511-87A9-02163E01459D.root',
'/store/data/Run2015D/SingleElectron/MINIAOD/PromptReco-v3/000/257/645/00000/74422752-F967-E511-8224-02163E01437F.root' ] )
|
[
"jruizvar@cern.ch"
] |
jruizvar@cern.ch
|
7d645049875ce042602061c064c8c1b640251ccb
|
71764665e27f4b96bab44f38a4a591ffc2171c24
|
/hhplt/productsuite/gs10/__init__.py
|
0dba8ddde1ae1f18ae65c9240109e986b46b8791
|
[] |
no_license
|
kingdomjc/RSU_production_VAT
|
693f8c504acc0cc88af92942734ccb85f7e7d7c0
|
9a3d6d3f5a5edfaf30afdff725661630aafe434c
|
refs/heads/master
| 2020-07-31T05:03:46.699606
| 2019-09-24T02:09:53
| 2019-09-24T02:09:53
| 210,491,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,258
|
py
|
#coding:utf-8
'''GS10 OBU生产测试'''
import hhplt.testengine.product_manage as product_manage
import battery,cq_overall_unit,trading,board_mending,cq_auto_board,manual_board,trading_mending,retradeAndValidateMac
#注册产品测试项
product_manage.registerProduct('GS10 OBU',(
cq_auto_board, #单板自动测试工位
manual_board, #单板手动测试工位
board_mending, #单板维修工位
trading_mending, #单板维修交易复测工位
cq_overall_unit, #整机
trading, #交易
# retradeAndValidateMac #检查MAC
))
'''
测试中使用的参数:
名称 测试项 含义 默认值
--------------------------------------------------------------------
gs10.initParam.displayDirect 初始数据 显示方向
gs10.initParam.softwareVersion 软件版本号
gs10.initParam.hardwareVersion 硬件版本号
gs10.initParam.wakeupMode 唤醒模式
gs10.initParam.amIndex AmIndex
gs10.initParam.transPower 发射功率
gs10.initParam.txFilter TxFilter
gs10.initWanken.low.grade 初始唤醒灵敏度 初始低唤醒灵敏度粗调
gs10.initWanken.low.level 初始低唤醒灵敏度细调
gs10.initWanken.high.grade 初始高唤醒灵敏度粗调
gs10.initWanken.high.level 初始低唤醒灵敏度细调
gs10.capPower.low 电容电路电压 电容电路电压判定低阈值
gs10.capPower.high 电容电路电压判定高阈值
gs10.solarBatteryPower.board.low 太阳能电路电压(单板) 太阳能电路电压判定低阈值
gs10.solarBatteryPower.board.high 太阳能电路打压判定高阈值
gs10.solarBatteryPower.overall.low 太阳能电路电压(整机) 太阳能电路电压判定低阈值
gs10.solarBatteryPower.overall.high 太阳能电路打压判定高阈值
gs10.batteryPower.low 电池电路电压 电池电路电压判定低阈值
gs10.batteryPower.high 电池电路电压判定高阈值
gs10.wakeup.power.low 唤醒灵敏度 低唤醒功率
gs10.wakeup.power.high 高唤醒功率
gs10.receiveSensitivity.power 接收灵敏度测试 接收功率值
gs10.receiveSensitivity.frameNum 发送总帧数
gs10.receiveSensitivity.frameBorder 接收帧数判定低阈值
gs10.esamDistrictCode.[单板前缀] ESAM测试 ESAM地区分散码
gs10.boardBarPrefix 单板前缀
gs10.sendPower.low 发射功率测试 发射功率低判定阈值
gs10.sendPower.high 发射功率高判定阈值
gs10.staticCurrent.low 静态电流测试 静态电流低判定阈值
gs10.staticCurrent.high 静态电流高判定阈值
gs10.deepStaticCurrent.low 深度静态电流测试 深度静态电流低判定阈值
gs10.deepStaticCurrent.high 深度静态电流判定高阈值
gs10.batteryOpenPower.low 电池开路电压 电池开路电压低判定阈值
gs10.batteryOpenPower.high 电池开路电压高判定阈值
gs10.capOpenPower.low 电容开路电压 电容开路电压低判定阈值
gs10.capOpenPower.high 电容开路电压高判定阈值
gs10.formalVersion.filename 下载正式版本 版本文件名称(不含路径)
'''
|
[
"929593844@qq.com"
] |
929593844@qq.com
|
17b4251a831504ca745d4b00d9f775904854ff17
|
407c790da88f1c38f8800d6555cfeb9397046e70
|
/anuncios/forms.py
|
8bbcc2cb74ef19bbc8098c25ac201c3d04753801
|
[] |
no_license
|
jonatasprates/matao4x4
|
5488473812ab4dc31011199f5a055c9cca4144bb
|
ad8de86d9a9bce13363fd6417dd738430c60e9bb
|
refs/heads/master
| 2022-11-26T13:49:11.784141
| 2020-08-11T02:10:36
| 2020-08-11T02:10:36
| 286,622,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
#-*- coding: utf-8 -*-
from django.core.files.images import get_image_dimensions
from matao4x4.anuncios.models import Anunciante
from django import forms
class AnunciantesAdminForm(forms.ModelForm):
class Meta:
model = Anunciante
# validando o campo específico: imagem
def clean_imagem(self):
# pego a imagem enviada pelo usuário
imagem = self.cleaned_data.get("imagem")
# a imagem não foi enviada?
if not imagem:
# chamo um erro de validação do form
raise forms.ValidationError("Você esqueceu de enviar o banner!")
# se a imagem foi enviada
else:
# pego a largura e altura da imagem
largura = get_image_dimensions(imagem)[0]
# a largura é diferente do padrão?
if largura != 224:
# chamo o erro de validação do form, informando largura e altura necessárias
raise forms.ValidationError("A largura da imagem enviada é de %ipx. O correto é 224px." % largura)
return imagem
|
[
"jonatasluisprates@gmail.com"
] |
jonatasluisprates@gmail.com
|
7e32b253fb44929796b8569b0d1334b8e2ca7c12
|
33518b9521d8e633010b0b9d1ea0f7a937437200
|
/Python/strobogrammatic_number_ii/strobogrammatic_number_ii.py
|
3456ec0c3783ada61d9b190a8736aca60890cef3
|
[] |
no_license
|
lqs4188980/CodingPractice
|
977ddb69306c92a5e3df88f26572200622fad82a
|
c17653832269ab1bb3e411f7d74bef4c8e9985b3
|
refs/heads/master
| 2021-01-22T05:10:40.885490
| 2016-02-05T09:06:51
| 2016-02-05T09:06:51
| 25,272,652
| 0
| 1
| null | 2016-01-06T07:50:29
| 2014-10-15T20:40:34
|
Java
|
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
# corner cases:
# won't start with 0
# 0, 1, 8 good for central
# 0, 1, 6, 8, 9 for rest
class Solution(object):
def __init__(self):
self.odd = ['0','1','8']
self.whole = ['0','1','6','8','9']
self.non_zero = ['1','6','8','9']
def findStrobogrammatic(self, n):
"""
:type n: int
:rtype: List[str]
"""
nums = []
if n % 2 == 1:
for char in self.odd:
self.get_stro_num(n, char, nums)
else:
self.get_stro_num(n, '', nums)
return nums
def get_stro_num(self, n, curr_num, nums):
if n == len(curr_num):
nums.append(curr_num)
return
if n == len(curr_num) + 2:
for char in self.non_zero:
self.get_stro_num(n, \
char + curr_num + self.get_mirror(char), \
nums)
else:
for char in self.whole:
self.get_stro_num(n, \
char + curr_num + self.get_mirror(char), \
nums)
def get_mirror(self, char):
if char == '0' or \
char == '1' or \
char == '8':
return char
elif char == '6':
return '9'
else:
return '6'
# else raise exception
|
[
"xiaoqin.zhu.4@gmail.com"
] |
xiaoqin.zhu.4@gmail.com
|
20c27ce9740fff5beebeaf33e73bf00621d46f2f
|
ef6a64f2e1276b2312fe69be5fb0f79de654f613
|
/backend/breach/migrations/0014_auto_20160504_1753.py
|
dfab67023f3403bba72ecf71953d90b5076e106a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MasterScott/rupture
|
cd1d1fd410f376d1b5c32587a71fd9565ce6aae6
|
131c61a28550f082ca1598aa40ac37ca59204b40
|
refs/heads/develop
| 2021-07-06T04:29:04.791439
| 2019-07-28T10:47:39
| 2019-07-28T10:47:39
| 218,162,821
| 0
| 0
|
MIT
| 2021-01-12T08:59:19
| 2019-10-28T23:16:52
| null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-04 17:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0013_auto_20160330_1632'),
]
operations = [
migrations.AlterField(
model_name='victim',
name='method',
field=models.IntegerField(choices=[(1, 1), (2, 2)], default=2, help_text='Method of building candidate samplesets.'),
),
]
|
[
"dimit.karakostas@gmail.com"
] |
dimit.karakostas@gmail.com
|
ab57c4b29c3146737651116ef31d6aee8c1efed7
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_138/658.py
|
25615d80a7676ddb94779696a868e8215c273d28
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
def dwar(NAOMI , KEN):
naomi = NAOMI[:]
ken = KEN[:]
naomi.sort()
ken.sort()
N = len(naomi)
count = 0
for turn in range(N):
if naomi[0] > ken[0]:
count += 1
naomi = naomi[1:]
ken = ken[1:]
else:
naomi = naomi[1:]
ken = ken[:-1]
return count
def war(NAOMI ,KEN):
naomi = NAOMI[:]
ken = KEN[:]
naomi.sort()
ken.sort()
N = len(naomi)
count = 0
for turn in range(N):
if naomi[-1] > ken[-1]:
count += 1
naomi = naomi[:-1]
ken = ken[1:]
else:
naomi = naomi[:-1]
ken = ken[:-1]
return count
f = open('D-large.in', 'r')
line1 = f.readline()
cases = int(line1)
for case in range(1,cases+1):
line = f.readline()
N = int(line)
line = f.readline()
naomi_str = line.split()
naomi = [float(naomi_str[i]) for i in range(N)]
line = f.readline()
ken_str = line.split()
ken = [float(ken_str[i]) for i in range(N)]
print "Case #"+str(case)+ ": " + str(dwar(naomi,ken)) + " " + str(war(naomi,ken))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
0c2d777b143221ee4e5b6e86f30dff1da4b5cc8b
|
5b173a0e8a10a4f9ff0e3559cdaf8a211e239376
|
/scoring_360/migrations/0002_auto_20190408_1246.py
|
7d00471115c41ced34ee64497736eade78e325b8
|
[] |
no_license
|
Tur-4000/MTLCrewing
|
9eb6a59ab0d8d0362cae01459470eb4cd3e4f9b5
|
388029cc0fbf60dacd2118dcc80ce3ec3d77d07b
|
refs/heads/master
| 2022-05-02T11:05:30.205355
| 2019-04-08T14:05:19
| 2019-04-08T14:05:19
| 176,301,948
| 0
| 0
| null | 2022-03-11T23:42:58
| 2019-03-18T14:19:56
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 817
|
py
|
# Generated by Django 2.1.7 on 2019-04-08 09:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crewing', '0028_auto_20190408_1246'),
('scoring_360', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='ability360',
name='ranks',
field=models.ManyToManyField(related_name='abilities', to='crewing.Ranks', verbose_name='Должность'),
),
migrations.AlterField(
model_name='question360',
name='ability',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='scoring_360.Ability360', verbose_name='Компетенция'),
),
]
|
[
"tur.4000@gmail.com"
] |
tur.4000@gmail.com
|
ba19fd6a44365f2760be541f3b378daf2577f033
|
d19731cd2271ea9cba5f07935964e798c8a4f73b
|
/modules/compiled/tests/test_condensate.py
|
5b23b7518e4fc9ccad8c03922c8295888b33cd79
|
[] |
no_license
|
jkiesele/HGCalML
|
3949532f6b3f6a501670ffb60cedf8474caa934c
|
5101b8cdca876a92a9dc87ceca9598bbbfaf980e
|
refs/heads/master
| 2023-07-25T03:21:36.973479
| 2021-10-19T08:12:04
| 2021-10-19T08:12:04
| 186,409,038
| 0
| 5
| null | 2020-02-27T15:00:55
| 2019-05-13T11:46:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,956
|
py
|
from condensate_op import BuildCondensates
import tensorflow as tf
from ragged_plotting_tools import make_cluster_coordinates_plot
import matplotlib.pyplot as plt
import numpy as np
import time
print('starting test')
n_vert=4000
n_ccoords=2
n_feat=3
soft=False
radius=0.7
betas = tf.random.uniform((n_vert,1), dtype='float32',minval=0.01 , maxval=0.1+1e-3,seed=2)
ccoords = 3.*tf.random.uniform((n_vert,n_ccoords), dtype='float32',seed=1)
row_splits = tf.constant([0,n_vert//2,n_vert], dtype='int32')
print('first call')
asso_idx, is_cpoint,n = BuildCondensates(ccoords=ccoords, betas=betas, row_splits=row_splits, radius=radius, min_beta=0.1, soft=soft)
#print(ccoords)
#print(asso_idx)
#print(is_cpoint)
#exit()
print('starting taking time')
t0 = time.time()
for _ in range(0):
asso_idx, is_cpoint,n = BuildCondensates(ccoords=ccoords, betas=betas, row_splits=row_splits, radius=radius, min_beta=0.1, soft=soft)
totaltime = (time.time()-t0)/100.
print('op time', totaltime)
#exit()
#exit()
#print('betas',betas)
#print('ccoords',ccoords)
#print('summed_features',summed_features)
#print('asso_idx',asso_idx)
#print('n condensates', tf.unique(asso_idx))
def makecolours(asso):
uasso = np.unique(asso)
cols = asso.copy()
for i in range(len(uasso)):
cols[asso == uasso[i]] = i
return np.array(cols,dtype='float')
for radius in [0.6, 1.3]:
asso_idx, is_cpoint,n = BuildCondensates(ccoords=ccoords, betas=betas, row_splits=row_splits,
radius=radius, min_beta=0.1, soft=soft)
print('refs', np.unique(asso_idx))
print('N',n)
for i in range(len(row_splits)-1):
truthHitAssignementIdx = np.array(asso_idx[row_splits[i]:row_splits[i+1]].numpy())
ncond = n[i:i+1]
print('N condensates', ncond.numpy())
truthHitAssignementIdx = makecolours(truthHitAssignementIdx)+1.
predBeta = betas[row_splits[i]:row_splits[i+1]].numpy()
#predBeta = np.ones_like(predBeta,dtype='float')-1e-2
predCCoords = ccoords[row_splits[i]:row_splits[i+1]].numpy()
fig = plt.figure(figsize=(5,4))
ax = fig.add_subplot(111)
make_cluster_coordinates_plot(plt, ax,
truthHitAssignementIdx, #[ V ] or [ V x 1 ]
predBeta, #[ V ] or [ V x 1 ]
predCCoords, #[ V x 2 ]
identified_coords=None,
beta_threshold=0.1,
distance_threshold=radius,
cmap=None,
noalpha=True
)
plt.show()
#plt.savefig("plot_"+str(i)+"_rad_"+str(radius)+".pdf")
fig.clear()
plt.close(fig)
#exit()
|
[
"jkiesele@cern.ch"
] |
jkiesele@cern.ch
|
acf208104074450f635a9fcd79eae320b3a08fc3
|
34a0d7fc7ec039cf57bf0a33bc5abb423dfdc97f
|
/s3_app.py
|
db98d4ba7e241119900adc70c77bb3ea1d0c6e02
|
[] |
no_license
|
jrieke/db-playbooks
|
422330a6e5cc6cebc105bcc704a265fc1164df65
|
9128b6bce9fa53c4ef89bc727f5b7b3e300b6904
|
refs/heads/main
| 2023-04-17T23:19:47.714623
| 2021-05-07T21:49:50
| 2021-05-07T21:49:50
| 357,368,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
import streamlit as st
import s3fs
import time
import os
start_time = time.time()
# Create connection object.
# `anon=False` means not anonymous, i.e. it uses auth to pull data.
fs = s3fs.S3FileSystem(anon=False)
f"Creating fs took {time.time() - start_time} s"
start_time_read = time.time()
# Retrieve file contents.
@st.cache(ttl=600)
def read_file(filename):
with fs.open(filename) as f:
return f.read().decode("utf-8")
content = read_file("testbucket-jrieke/myfile.csv")
st.write(content)
# Print results.
for line in content.strip().split("\n"):
name, pet = line.split(",")
st.write(f"{name} has a :{pet}:")
# for line in f:
# name, pet = line.decode("utf-8").strip().split(",")
# st.write(f"{name} has a :{pet}:")
f"Reading file took {time.time() - start_time_read} s"
|
[
"johannes.rieke@gmail.com"
] |
johannes.rieke@gmail.com
|
23d44eef4f0999f81021921c669b70a8abf10c82
|
daeb851843a55ca3f34008765ebf8ff18c0d1ecd
|
/LL/page_object.py
|
b343f01ab97ca8d9b5a5378d41005f0f05b768ae
|
[] |
no_license
|
Cola1995/Py
|
f5066df0ef5b60183f5e5e2ec6b77b2be536e7f8
|
bab82d8f5aae18e612dbe9bcd38f0d4260463b16
|
refs/heads/master
| 2020-04-08T06:30:36.212074
| 2018-12-27T06:14:53
| 2018-12-27T06:14:53
| 159,099,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,832
|
py
|
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
class Page(object):
'''基础类,用于页面对象类的继承'''
login='http://www.126.com'
def __init__(self,selenium_driver,base_url=login_url):
self.base_url=base_url
self.driver=selenium_driver
self.timeout=30
def on_page(self):
return self.driver.current_url==(self.base_url+self.url)
def _open(self,url):
url=self.base_url+url
self.driver.get(url)
assert self.on_page, 'Did not land on %s' % url
def open(self):
self._open(self.url)
def find_element(self,*loc):
return self.driver.find_element(*loc)
class LoginPage(Page):
'''126邮箱登录页面模型'''
url='/'
#定位器
username_loc=(By.ID,"idInput")
password_loc=(By.ID,"pwdInput")
submit_loc=(By.ID,"loginBtn")
def type_username(self,username):
self.find_element(*self.username_loc).send_keys(username)
def type_password(self,password):
self.find_element(*self.password_loc).send_keys(password)
def submit(self):
self.find_element(*self.submit_loc).click()
def test_user_login(driver,username,password):
login_page=LoginPage(driver)
login_page.open()
login_page.type_username(username)
login_page.type_password(password)
login_page.submit()
def main():
try:
driver=webdriver.Chrome()
username='testingwtb'
password='a123456'
test_user_login(driver,username,password)
sleep(3)
text=driver.find_element_by_xpath("//span[@id='spnUid']").text
assert(text=='username@126.com'),"用户名不匹配,登录失败"
finally:
driver.close()
if __name__=='__main__':
main()
|
[
"991571566@qq.com"
] |
991571566@qq.com
|
7e27e21ec2be3f0cfe1472b07ba46412a460a797
|
759f52976ad2cd9236da561ca254e11e08003487
|
/part7/ex45/v2-replace-config/replacement_result.py
|
4adea4014216b0100cfdcd8bde50cb4b26d7dd94
|
[] |
no_license
|
mbaeumer/fiftyseven
|
57b571c3e09640a2ab0ed41e5d06643c12b48001
|
d79b603d5b37bf1f4127d9253f8526ea3897dc08
|
refs/heads/master
| 2020-06-10T20:52:25.311992
| 2017-11-15T18:28:38
| 2017-11-15T18:28:38
| 75,877,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
#!/usr/bin/python
class ReplacementResult:
def __init__(self, config, content):
self.config = config
self.content = content
|
[
"martin.baeumer@gmail.com"
] |
martin.baeumer@gmail.com
|
7a3e609fcf05b7eca4cfe7b4769b1f7eb50fe39a
|
e3b5698bc6a63551e0f30dc958428f2dd548eada
|
/homework/webtoon/migrations/0002_auto_20180215_1109.py
|
a86e1b9aba0e782e7cc0e3731216de107e578db3
|
[] |
no_license
|
standbyme227/stopout
|
472021047af57a23c1a6c73db05d7c989e5baa16
|
c33981e9ca143cdf6fd9c93664d90a50a07b45a3
|
refs/heads/master
| 2021-04-29T20:51:33.550436
| 2018-02-16T06:47:04
| 2018-02-16T06:47:04
| 121,605,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
# Generated by Django 2.0.2 on 2018-02-15 11:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webtoon', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='webtoon',
name='img_url',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='webtoon',
name='week_webtoon',
field=models.CharField(blank=True, max_length=100),
),
]
|
[
"standbyme227@gmail.com"
] |
standbyme227@gmail.com
|
f7097802ca48e1dc7adb1d85d44f5f6130f7a905
|
85554f18eb2f4db5fa57326e55c84c91abd97fb2
|
/graphlayer/core.py
|
ba96c502660606fdb0bb2707941c6e4d2713b572
|
[
"BSD-2-Clause"
] |
permissive
|
mwilliamson/python-graphlayer
|
f6e58bd1e812aabb365a569d579ebcb5f8aaa51c
|
7fe773fb2e07028d5cfc1f30523ad17ba405c689
|
refs/heads/master
| 2023-05-12T03:37:36.607591
| 2023-01-14T21:26:33
| 2023-01-14T21:26:33
| 144,635,452
| 28
| 7
|
BSD-2-Clause
| 2023-01-14T21:27:20
| 2018-08-13T21:25:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,117
|
py
|
from . import iterables
def create_graph(resolvers):
return define_graph(resolvers).create_graph({})
def define_graph(resolvers):
return GraphDefinition(resolvers)
class GraphDefinition(object):
def __init__(self, resolvers):
self._resolvers = iterables.to_dict(
(resolver.type, resolver)
for resolver in _flatten(resolvers)
)
def create_graph(self, dependencies):
return Graph(self._resolvers, dependencies)
class Graph(object):
def __init__(self, resolvers, dependencies):
self._resolvers = resolvers
self._injector = Injector(dependencies)
def resolve(self, *args, type=None):
if type is None:
type = args[0].type
resolver = self._resolvers.get(type)
if resolver is None:
raise GraphError("could not find resolver for query of type: {}".format(type))
else:
return self._injector.call_with_dependencies(resolver, self, *args)
class Injector(object):
def __init__(self, dependencies):
self._dependencies = dependencies.copy()
self._dependencies[Injector] = self
def get(self, key):
return self._dependencies[key]
def call_with_dependencies(self, func, *args, **kwargs):
dependencies = getattr(func, "dependencies", dict())
dependency_kwargs = iterables.to_dict(
(arg_name, self.get(dependency_key))
for arg_name, dependency_key in dependencies.items()
)
return func(*args, **kwargs, **dependency_kwargs)
def _flatten(value):
if isinstance(value, (list, tuple)):
return [
subelement
for element in value
for subelement in _flatten(element)
]
else:
return [value]
def resolver(type):
def register_resolver(func):
func.type = type
return func
return register_resolver
def dependencies(**kwargs):
def register_dependency(func):
func.dependencies = kwargs
return func
return register_dependency
class GraphError(Exception):
pass
|
[
"mike@zwobble.org"
] |
mike@zwobble.org
|
7b15c8ad37a0ad03a79f07873b58187b10f10f51
|
38d93c5fd72fee380ec431b2ca60a069eef8579d
|
/Baekjoon,SWEA, etc/SWEA/SWEA2105.py
|
4e3c17854fc9101aa88779572cadeffd409f6287
|
[] |
no_license
|
whgusdn321/Competitive-programming
|
5d1b681f5bee90de5678219d91cd0fa764476ddd
|
3ff8e6b1d2facd31a8210eddeef851ffd0dce02a
|
refs/heads/master
| 2023-01-01T01:34:22.936373
| 2020-10-24T11:05:08
| 2020-10-24T11:05:08
| 299,181,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,511
|
py
|
"""
이 문제는 상당히 복잡하다. 약 3시간에 걸쳐서 푼 문제로, 한번 다시 풀어볼 필요성이 굉장히 크다.
생각해야 할 부분이 많아서 문제 자체가 복잡하다..
이 문제를 다 풀고, 문제를 풀기위해서 생각해야 하는 부분들을 다시한번 정리해보았다.
1.가는 방향은 어딘데? -> 원래 기초적인 DFS/BFS문제들은 대부분 상,하,좌,우 만 나와서 거기에 항상 익숙해져 있다. 하지만 이 문제는
기본적으로 가는방향이 다르다. 따라서 우선은 가는방향(마름모꼴을 이루면서, 나중에 합쳐지게 dfs가 돌게) 잘 구현을 해야 한다는 것을 알 수 있다.
1-1)그리고 가면서 문을 열어둔다.(return되고 다음줄에 또 함수 호출)
2.go(specific_i, specific_j, dy, dx, visited, sofar)는 그래서 불려서 뭘 하는데?
-> 이것을 그림(maap)과 연동시켜서 약간 이미지를 그리면서 도데체 이 함수가 call되었을때 무엇을 하는지, 즉 특정한 순간에 무엇을 하는지! 약간 느낌을 가지자.
-> 이 함수는 다음에 갈 곳이 갈 수 있다면(범위안에 들고, 중복 방문이 아닌경우), visited에 추가시켜 준다.
3. 어떤경우에 종료조건이 되는가? 그리고 종료되면서 visited, sofar는 각 함수에서 pop하는데, 함수가 return이 되면서 visited, sofar의 배열들이 어떻게 변하는지 잘 알아보자.
->개인적으로 이부분이 가장 힘들었다. 일반적으로 문을 열어두어 모든 가능한 경로를 방문하는 dfs방식은, 가기전에 visited에 추가하고 return 조건에서 pop을 해서 하는
사전지식이 있었다. 그래서 약간 감과 이러한 지식을 이용하여 코드를 짜서, 어찌어찌 맞았다. 하지만 이부분에 대하여 약간 보강을 해야할 것 같다.
"""
def go(i, j, dy, dx, visited, sofar):
# visited
global N, maap, results
if (dy, dx) == (1, 1):
next1 = (i+1, j+1)
next2 = (i+1, j-1)
if 0<=next1[0]<N and 0<=next1[1]<N and maap[next1[0]][next1[1]] not in visited:
visited.append(maap[next1[0]][next1[1]])
sofar.append('SE')
go(next1[0], next1[1], 1,1, visited, sofar)
sofar.pop()
visited.pop()
if visited and 0 <= next2[0] < N and 0 <= next2[1] <N and maap[next2[0]][next2[1]] not in visited:
visited.append(maap[next2[0]][next2[1]])
sofar.append('SW')
go(next2[0], next2[1], 1, -1, visited, sofar)
sofar.pop()
visited.pop()
elif (dy, dx) == (1, -1):
next1 = (i+1, j-1)
next2 = (i-1, j-1)
if 0<=next1[0]<N and 0<=next1[1]<N and maap[next1[0]][next1[1]] not in visited:
visited.append(maap[next1[0]][next1[1]])
sofar.append('SW')
go(next1[0], next1[1], 1, -1, visited, sofar)
sofar.pop()
visited.pop()
if 0<=next2[0]<N and 0<=next2[1] <N and maap[next2[0]][next2[1]] not in visited:
go(i, j, -1, -1, visited, sofar)
elif (dy, dx) == (-1, -1):
boool = True
next = [i, j]
_N = len(visited)
for i in range(len(sofar)):
if sofar[i] == 'SE':
next = [next[0]-1, next[1]-1]
if 0<=next[0]<N and 0<=next[1]<N and maap[next[0]][next[1]] not in visited:
visited.append(maap[next[0]][next[1]])
else:
boool = False
break
else:
next = [next[0]-1, next[1]+1]
if 0<=next[0]<N and 0<=next[1]<N and maap[next[0]][next[1]] not in visited:
visited.append(maap[next[0]][next[1]])
else:
boool = False
break
if boool:
results.append(len(visited))
while len(visited) != _N:
visited.pop()
return
test_cases = int(input())
for test_case in range(1, test_cases + 1):
N = int(input())
maap = []
for _ in range(N):
temp = [int(a) for a in input().split()]
maap.append(temp)
results = []
for y in range(N):
for x in range(N):
go(y, x, 1, 1, [], [])
if results:
print('#{} {}'.format(test_case, max(results)))
else:
print('#{} {}'.format(test_case, -1))
|
[
"blackgoldace@naver.com"
] |
blackgoldace@naver.com
|
6f408f78f4985ecc1686c686582bc51783bc1df1
|
e81e09cdc62dcd196020dc01b07adc24faaee582
|
/czsc/trader.py
|
b12513bed9ae6a6c6df4c34877ae940e6a8b09a8
|
[
"MIT"
] |
permissive
|
dingfengqian/czsc
|
41cbd4155245ae8a50b51af485ae5516fea94156
|
00bfd91f0a0e27533cdd45b83b2fec197056b4b3
|
refs/heads/master
| 2023-02-27T18:58:48.810874
| 2021-01-28T11:48:56
| 2021-01-28T11:48:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,444
|
py
|
# coding: utf-8
import pandas as pd
from datetime import datetime
from .factors import KlineGeneratorBy1Min, CzscFactors
from .data.jq import get_kline
from .enum import FdNine, FdFive, FdSeven, FdThree, Factors
class CzscTrader:
"""缠中说禅股票 选股/择时"""
def __init__(self, symbol, max_count=1000, end_date=None):
"""
:param symbol:
"""
self.symbol = symbol
if end_date:
self.end_date = pd.to_datetime(end_date)
else:
self.end_date = datetime.now()
self.max_count = max_count
self.__generate_factors()
self.freqs = ['1分钟', '5分钟', '30分钟', '日线']
def __generate_factors(self):
symbol = self.symbol
max_count = self.max_count
end_date = self.end_date
kg = KlineGeneratorBy1Min(max_count=max_count*2, freqs=['1分钟', '5分钟', '15分钟', '30分钟', '日线'])
k1min = get_kline(symbol, end_date=end_date, freq="1min", count=max_count)
k5min = get_kline(symbol, end_date=end_date, freq="5min", count=max_count)
k15min = get_kline(symbol, end_date=end_date, freq="15min", count=max_count)
k30min = get_kline(symbol, end_date=end_date, freq="30min", count=max_count)
kd = get_kline(symbol, end_date=end_date, freq="D", count=max_count)
kg.init_kline("1分钟", k1min)
kg.init_kline("5分钟", k5min)
kg.init_kline("15分钟", k15min)
kg.init_kline("30分钟", k30min)
kg.init_kline("日线", kd)
kf = CzscFactors(kg, max_count=max_count)
self.kf = kf
self.s = kf.s
self.end_dt = self.kf.end_dt
def run_selector(self):
"""执行选股:优先输出大级别的机会"""
s = self.s
if s['日线右侧多头因子'] in [Factors.DLA1.value, Factors.DLA2.value, Factors.DLA3.value, Factors.DLA4.value]:
return s['日线右侧多头因子']
ka = self.kf.kas['30分钟']
max_high = max([x.high for x in ka.bi_list[-10:]])
# third_bs = ["三买A1", "三买B1", "三买C1", "三买D1"]
if "三买" in s['30分钟_第N笔的五笔形态']:
if s['1分钟_第N笔的七笔形态'] == FdSeven.L3A1.value:
return "30分钟第三买点且BaA式右侧底A"
elif max_high == ka.bi_list[-2].high:
return "30分钟第三买点且第4笔创近9笔新高"
else:
return "30分钟第三买点"
# nine_values = [x.value for x in FdNine.__members__.values() if x.name[0] in ["L", "S"]]
# seven_values = [x.value for x in FdSeven.__members__.values() if x.name[0] in ["L", "S"]]
# if s['30分钟_第N笔的七笔形态'] in seven_values:
# return "30分钟_第N笔的七笔形态_{}".format(s['30分钟_第N笔的七笔形态'])
# if s['30分钟_第N笔的九笔形态'] in nine_values:
# return "30分钟_第N笔的九笔形态_{}".format(s['30分钟_第N笔的九笔形态'])
return "other"
def run_history(self):
"""对已经完成的三买走势进行研究"""
s = self.s
if "三买" in s['30分钟_第N-2笔的五笔形态']:
return "30分钟第N-2笔第三买点"
return "other"
def take_snapshot(self, file_html, width="1400px", height="680px"):
self.kf.take_snapshot(file_html, width, height)
|
[
"zeng_bin8888@163.com"
] |
zeng_bin8888@163.com
|
a4ca3c5c84201afc3162db257b96a975c1498516
|
85f5dff291acf1fe7ab59ca574ea9f4f45c33e3b
|
/api/tacticalrmm/checks/migrations/0013_auto_20210110_0505.py
|
abfbe322f1a66fb00b4864d1b01ac60ec64e5721
|
[
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sadnub/tacticalrmm
|
a4ecaf994abe39244a6d75ed2166222abb00d4f4
|
0af95aa9b1084973642da80e9b01a18dcacec74a
|
refs/heads/develop
| 2023-08-30T16:48:33.504137
| 2023-04-10T22:57:44
| 2023-04-10T22:57:44
| 243,405,684
| 0
| 2
|
MIT
| 2020-09-08T13:03:30
| 2020-02-27T01:43:56
|
Python
|
UTF-8
|
Python
| false
| false
| 393
|
py
|
# Generated by Django 3.1.4 on 2021-01-10 05:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("checks", "0012_auto_20210110_0503"),
]
operations = [
migrations.AlterField(
model_name="checkhistory",
name="y",
field=models.PositiveIntegerField(null=True),
),
]
|
[
"josh@torchlake.com"
] |
josh@torchlake.com
|
616e64a571f1f9e005417d5543fe7d7c7e3e386b
|
47e819cb96e0e9f45419960d30753f027047c224
|
/stockSpider/stockSpider/spiders/stockSpider_Mysql.py
|
07da58c3433aa9dc81c8f259d7e65b28dc803254
|
[] |
no_license
|
1141938529/ClassExercises
|
3600d75aa559b6938f3efb5831dbec803489cac0
|
0082e81a6cded1c8332c4a8d79a4104bfd8ab1c9
|
refs/heads/master
| 2021-05-08T16:38:30.391977
| 2018-02-04T13:10:45
| 2018-02-04T13:10:45
| 120,165,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,442
|
py
|
import scrapy
import time
from stockSpider.items import stockItem
class StockSpider(scrapy.Spider):
name = 'stockspider2'
start_urls = ['http://stock.10jqka.com.cn/']
stock_data = stockItem()
custom_settings = {
'ITEM_PIPELINES': {'stockSpider.pipelines.StockPipeline': 100, },
}
def parse(self, response):
stock_list = response.xpath("//div[@id='rzrq']//table[@class='m-table']/tbody/tr/td[2]/a")
for stock in stock_list:
stock_name = stock.xpath("./text()").extract()[0]
link = stock.xpath("./@href").extract()[0]
stock_id = link.split('/')[-2]
# print(stock_name + '===============' + link)
# with open("D://PyDownload//stockdata//" + stock_name + '.txt', mode='w', encoding='utf8'):
# pass
yield scrapy.Request(url=link,
callback=self.get_stock_data,
meta={'stock_name': stock_name, 'base_url': link, 'index': 2})
pass
def get_stock_data(self, response):
print(response.url)
time.sleep(3)
data_table = response.xpath("//table[@class='m-table']/tbody/tr")
# stock_id = response.url.split('/')[6]
stock_name = response.meta['stock_name']
for row in data_table:
data_list = row.xpath("./td/text()").extract()
self.stock_data['xuhao'] = data_list[0].strip()
self.stock_data['jysj'] = data_list[1].strip()
self.stock_data['rz_ye'] = data_list[2].strip()
self.stock_data['rz_mr'] = data_list[3].strip()
self.stock_data['rz_ch'] = data_list[4].strip()
self.stock_data['rz_jmr'] = data_list[5].strip()
self.stock_data['rq_yl'] = data_list[6].strip()
self.stock_data['rq_mc'] = data_list[7].strip()
self.stock_data['rq_ch'] = data_list[8].strip()
self.stock_data['rq_jmc'] = data_list[9].strip()
self.stock_data['rzqye'] = data_list[10].strip()
yield self.stock_data
index = response.meta['index']
if index > 4:
return
url_str = response.meta['base_url'] + "order/desc/page/" + str(index) + "/ajax/1/"
yield scrapy.Request(url=url_str, callback=self.get_stock_data, meta={
'stock_name': stock_name, 'base_url': response.meta['base_url'], 'index': index + 1
})
|
[
"1141938529@qq.com"
] |
1141938529@qq.com
|
ffc0214e985810cf932673154e187b4d8ef072ba
|
060ce17de7b5cdbd5f7064d1fceb4ded17a23649
|
/fn_remedy/tests/mocks/datatable_mock.py
|
6afc05d176fb51eb4f60e7d1d4634a555752613f
|
[
"MIT"
] |
permissive
|
ibmresilient/resilient-community-apps
|
74bbd770062a22801cef585d4415c29cbb4d34e2
|
6878c78b94eeca407998a41ce8db2cc00f2b6758
|
refs/heads/main
| 2023-06-26T20:47:15.059297
| 2023-06-23T16:33:58
| 2023-06-23T16:33:58
| 101,410,006
| 81
| 107
|
MIT
| 2023-03-29T20:40:31
| 2017-08-25T14:07:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,534
|
py
|
# (c) Copyright IBM Corp. 2021. All Rights Reserved.
from pytest_resilient_circuits import BasicResilientMock, resilient_endpoint
import requests_mock
import json
import six
class DTResilientMock(BasicResilientMock):
"""DTResilientMock a Mock class which inherits the base
endpoints and adds mock endpoints for the DataTable object
for testing.
:param BasicResilientMock: A mock object which covers some of the most common endpoints, this class inherits from it to avoid you needing to use it seperately
:type BasicResilientMock: object
"""
mock_data_table_rows = [
{
"dt_col_id": 3001,
"dt_col_name": "Joe Blogs",
"dt_col_email": "joe@example.com",
"dt_col_status": "In Progress"
},
{
"dt_col_id": 3002,
"dt_col_name": "Mary Blogs",
"dt_col_email": "mary@example.com",
"dt_col_status": "In Progress"
},
{
"dt_col_id": 3003,
"dt_col_name": "Mary Blogs",
"dt_col_email": "mary@example.com",
"dt_col_status": "Active"
}
]
mock_data_table_updated_rows = [{
"dt_col_id": 3002,
"dt_col_name": "Mary Blogs",
"dt_col_email": "mary@example.com",
"dt_col_status": "Complete"
}]
mock_success_delete = {
'message': None,
'hints': [],
'success': True,
'title': None
}
@staticmethod
def format_datatable_row(row, row_id):
formatted_row = {}
for key, value in row.items():
formatted_row[key] = {
"row_id": row_id,
"id": key,
"value": value
}
return {"id": row_id, "cells": formatted_row}
@staticmethod
def get_datatable_rows(rows):
row_id = 0
return_rows = []
for row in rows:
row_id += 1
return_rows.append(
DTResilientMock.format_datatable_row(row, row_id))
return return_rows
@resilient_endpoint("GET", r"/incidents/[0-9]+/table_data/mock_data_table\?handle_format=names$")
def mock_datatable_get(self, request):
""" Handle GET request for mock_data_table """
data = {"rows": DTResilientMock.get_datatable_rows(
self.mock_data_table_rows)}
return requests_mock.create_response(request,
status_code=200,
content=six.b(json.dumps(data)))
@resilient_endpoint("DELETE", r"/incidents/[0-9]+/table_data/mock_data_table/row_data/[0-9]\?handle_format=names$")
def mock_datatable_delete_row(self, request):
""" Handle DELETE request for mock_data_table """
data = self.mock_success_delete
return requests_mock.create_response(request,
status_code=200,
content=six.b(json.dumps(data)))
@resilient_endpoint("PUT", r"/incidents/[0-9]+/table_data/mock_data_table/row_data/2\?handle_format=names$")
def mock_datatable_put(self, request):
""" Handle PUT request for mock_data_table """
data = DTResilientMock.get_datatable_rows(
self.mock_data_table_updated_rows)[0]
return requests_mock.create_response(request,
status_code=200,
content=six.b(json.dumps(data)))
|
[
"shane.curtin@ie.ibm.com"
] |
shane.curtin@ie.ibm.com
|
3fa858904b5ef254d2669f70c272e376e8302d88
|
cf5b2850dc9794eb0fc11826da4fd3ea6c22e9b1
|
/xlsxwriter/test/comparison/test_print_options02.py
|
0c43e2d0203b0635a207340a0ab2f4eec5d330e6
|
[
"BSD-2-Clause"
] |
permissive
|
glasah/XlsxWriter
|
bcf74b43b9c114e45e1a3dd679b5ab49ee20a0ec
|
1e8aaeb03000dc2f294ccb89b33806ac40dabc13
|
refs/heads/main
| 2023-09-05T03:03:53.857387
| 2021-11-01T07:35:46
| 2021-11-01T07:35:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,175
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('print_options02.xlsx')
self.ignore_files = ['xl/printerSettings/printerSettings1.bin',
'xl/worksheets/_rels/sheet1.xml.rels']
self.ignore_elements = {'[Content_Types].xml': ['<Default Extension="bin"'],
'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with print options."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.center_horizontally()
worksheet.write('A1', 'Foo')
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
c7b2ada29b4f10fd565b5a55c71e32c9fae15022
|
536584e323161a97db79453b1aa192c89979a755
|
/tests/fixtures/defxmlschema/chapter21/example21083.py
|
7c2028b325e23a198f25c7a11efe69dc5736746a
|
[
"MIT"
] |
permissive
|
brunato/xsdata
|
c792ae2749afb4ac5704f77b138f0f871002100c
|
dfa84cfd6be5373c929f5bc4178fe1fb7b7d2e3b
|
refs/heads/master
| 2022-04-13T09:59:57.008871
| 2020-04-04T09:01:42
| 2020-04-04T09:06:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class CustomerType:
"""
:ivar name:
"""
name: Optional[str] = field(
default=None,
metadata=dict(
name="name",
type="Element",
namespace="http://datypic.com/all",
required=True
)
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
3c56f6119b20f0df091f3ba81613185aebbd9d45
|
c54a6b935007f71c7e5fba8caf590d948cfa65cf
|
/contributors/models.py
|
e601c727116d75ebc1ba8892eaa77e69cb6c65a7
|
[] |
no_license
|
nescode/contributor
|
6896de8fb0edaf8c1054e8a49958d3373f9aacec
|
18383e86309e5f1db221cda5d09b02bf98bad623
|
refs/heads/master
| 2023-08-07T02:40:18.214732
| 2020-07-07T06:44:22
| 2020-07-07T06:44:22
| 262,589,144
| 0
| 3
| null | 2021-09-22T18:59:30
| 2020-05-09T14:31:18
|
Python
|
UTF-8
|
Python
| false
| false
| 601
|
py
|
from django.db import models
ROLE_CHOICES = (
('developer', 'developer'),
('designer', ' designer'),
('documentation team', 'documentation team'),
)
class CreateContributor(models.Model):
name = models.CharField(max_length=255)
email = models.EmailField(max_length=255)
phone = models.CharField(max_length=13)
resume = models.FileField(upload_to='resumes')
contributed = models.BooleanField(default=False)
role = models.CharField(choices=ROLE_CHOICES, max_length=25)
about = models.CharField(max_length=255)
def __str__(self):
return self.name
|
[
"dilipsapkota.d@gmail.com"
] |
dilipsapkota.d@gmail.com
|
361d6992dff363f16c32d032157aec6bf96b77e7
|
930c207e245c320b108e9699bbbb036260a36d6a
|
/BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/FCU_Discharge_Fan_VFD_Speed_Status.py
|
67d7b76a4f9139b1e9c6dea660ff014753e989cf
|
[] |
no_license
|
InnovationSE/BRICK-Generated-By-OLGA
|
24d278f543471e1ce622f5f45d9e305790181fff
|
7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2
|
refs/heads/master
| 2021-07-01T14:13:11.302860
| 2017-09-21T12:44:17
| 2017-09-21T12:44:17
| 104,251,784
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.FCU_Fan_Status import FCU_Fan_Status
from brick.brickschema.org.schema._1_0_2.Brick.Discharge_Fan_VFD_Speed_Status import Discharge_Fan_VFD_Speed_Status
from brick.brickschema.org.schema._1_0_2.Brick.Supply_Fan_VFD_Speed_Status import Supply_Fan_VFD_Speed_Status
class FCU_Discharge_Fan_VFD_Speed_Status(FCU_Fan_Status,Discharge_Fan_VFD_Speed_Status,Supply_Fan_VFD_Speed_Status):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').FCU_Discharge_Fan_VFD_Speed_Status
|
[
"Andre.Ponnouradjane@non.schneider-electric.com"
] |
Andre.Ponnouradjane@non.schneider-electric.com
|
29cddd507b018e9e9a65049588042e6995ffd44f
|
8d90e2eae476ecbe88d46ef2f03fe7ba92cc733b
|
/Programming Basics with Python/For-cycle/For_C_lab_ex9_left_right_sum.py
|
04d51d4f9ce78c72726b3f293a70e9907e509856
|
[] |
no_license
|
KaterinaMutafova/SoftUni
|
c3f8bae3c2bf7bd4038da010ca03edc412672468
|
7aeef6f25c3479a8d677676cb1d66df20ca0d411
|
refs/heads/main
| 2023-03-08T10:53:49.748153
| 2021-02-19T15:55:13
| 2021-02-19T15:55:13
| 317,597,660
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
n = int(input())
left_sum = 0
right_sum = 0
for i in range(n):
num_1 = int(input())
left_sum += num_1
for i in range(n):
num_2 = int(input())
right_sum += num_2
if left_sum == right_sum:
print(f"Yes, sum = {left_sum}")
else:
diff = abs(left_sum - right_sum)
print(f"No, diff = {diff}")
|
[
"noreply@github.com"
] |
KaterinaMutafova.noreply@github.com
|
15b34b27bee228e2af26d52019fb4474cccf8260
|
ef8b6298ae5d247c65454eeeb7b1198b18fe5580
|
/CodingDojangPython/turtle_graphics_8.py
|
b1a6b1cd79d1f51ce5914ab864c944f2c257fb85
|
[] |
no_license
|
Supreme-YS/PythonWorkspace
|
01728fc06b9f1ce9d48de8ab662c0b528c27df1d
|
9b1d17c805e45285eb3594da84df02939cab2a85
|
refs/heads/master
| 2023-07-28T16:44:36.930819
| 2021-09-06T14:03:50
| 2021-09-06T14:03:50
| 289,924,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
import turtle as t
t.shape('turtle')
t.speed('fastest') # 거북이 속도를 가장 빠르게 설정
for i in range(300): # 300번 반복
t.forward(i) # i만큼 앞으로 이동. 반복할 때마다 선이 길어짐
t.right(91) # 오른쪽으로 91도 회전
|
[
"dudtjr1225@gmail.com"
] |
dudtjr1225@gmail.com
|
fd6d861b9e060cf2c1d270f546716f557e8a67da
|
aab5143d8ee3ba15c9367b146287b7d5a6a41052
|
/tests/command_download_aoj.py
|
53a02cb26d8c3e6fc67e0fa6750e8afed449ff91
|
[
"MIT"
] |
permissive
|
kfaRabi/online-judge-tools
|
fc1e5796446a3f1b729507e018203aeff3193e3f
|
79de8d37e1aa78a7c4c82c6a666f1f1602caf545
|
refs/heads/master
| 2020-04-23T19:49:57.394790
| 2019-02-25T20:43:43
| 2019-02-26T09:15:27
| 171,419,052
| 0
| 0
|
MIT
| 2019-02-19T06:34:26
| 2019-02-19T06:34:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,208
|
py
|
import os
import unittest
import tests.command_download
class DownloadAOJTest(unittest.TestCase):
def snippet_call_download(self, *args, **kwargs):
tests.command_download.snippet_call_download(self, *args, **kwargs)
def test_call_download_aoj_DSL_1_A(self):
self.snippet_call_download('http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=DSL_1_A', {
'sample-1.in': 'cb3a243a13637cddedf245cd0f6eab86',
'sample-1.out': '29cc7a34bb5a15da3d14ef4a82a4c530',
})
def test_call_download_aoj_0100(self):
self.snippet_call_download('http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=0100', {
'sample-1.in': '4f0f7b3b0b73c97c5283395edde3dbe8',
'sample-1.out': '26d3b085a160c028485f3865d07b9192',
})
def test_call_download_aoj_1371(self):
self.snippet_call_download('http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=1371', {
'sample-6.in': '3521658c02c291ad5a4e5cbaa3cb0260',
'sample-2.out': 'b026324c6904b2a9cb4b88d6d61c81d1',
'sample-3.in': 'b9775d52323c110b406d53b9805cee01',
'sample-3.out': '6d7fce9fee471194aa8b5b6e47267f03',
'sample-1.out': '897316929176464ebc9ad085f31e7284',
'sample-5.in': '0b06c70869a30733379a72e2a8c03758',
'sample-4.out': 'b026324c6904b2a9cb4b88d6d61c81d1',
'sample-7.out': '897316929176464ebc9ad085f31e7284',
'sample-6.out': 'b026324c6904b2a9cb4b88d6d61c81d1',
'sample-5.out': '897316929176464ebc9ad085f31e7284',
'sample-2.in': 'f3c536f039be83a4ef0e8f026984d87d',
'sample-1.in': '56092c4794d713f93d2bb70a66aa6ca1',
'sample-4.in': '318d4b3abfa30cc8fad4b1d34430aea3',
'sample-7.in': 'dcac31a5a6542979ce45064ab0bfa83d',
})
def test_call_download_aoj_2256(self):
self.snippet_call_download('http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=2256&lang=jp', {
'sample-1.in': 'c89817f1ee0b53209d66abc94e457f7f',
'sample-1.out': 'b9c2c5761360aad068453f4e64dd5a4e',
})
def test_call_download_aoj_2310(self):
self.snippet_call_download('http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=2310&lang=jp', {
'sample-1.in': '27ed9e879684b438fa6cc80c4261daf7',
'sample-1.out': '48a24b70a0b376535542b996af517398',
'sample-2.in': 'bb84849858ca512e14e071e25120ed78',
'sample-2.out': '6d7fce9fee471194aa8b5b6e47267f03',
'sample-3.in': '4c4ae7fb491ec5c6ad57d9d5711e44a6',
'sample-3.out': '9ae0ea9e3c9c6e1b9b6252c8395efdc1',
'sample-4.in': 'ad1109594a97eabe9bee60a743006de7',
'sample-4.out': '84bc3da1b3e33a18e8d5e1bdd7a18d7a',
'sample-5.in': 'b80447e0bc0c4ecc6fb3001b6a4e79f6',
'sample-5.out': 'c30f7472766d25af1dc80b3ffc9a58c7',
})
def test_call_download_aoj_2511(self):
self.snippet_call_download('http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=2511', {
'sample-1.in': '0483a0080de977d5e1db1ab87eae3fa9',
'sample-1.out': '346ce6367eff6bb3c9915601f2ae1e75',
})
def test_call_download_aoj_DSL_3_B(self):
self.snippet_call_download('https://onlinejudge.u-aizu.ac.jp/courses/library/3/DSL/3/DSL_3_B', {
'sample-1.in': '36adbcbb268e04ef7667fb2d965eed2c',
'sample-1.out': '26ab0db90d72e28ad0ba1e22ee510510',
'sample-2.in': '89a4280a03ec0001ec91f3fedbafadc1',
'sample-2.out': '6d7fce9fee471194aa8b5b6e47267f03',
'sample-3.in': '22d823cf994ebee157a0cdc8219a600d',
'sample-3.out': '897316929176464ebc9ad085f31e7284',
})
def test_call_download_aoj_2394(self):
self.snippet_call_download('https://onlinejudge.u-aizu.ac.jp/challenges/sources/JAG/Spring/2394?year=2011', {
'sample-1.in': '05dfaf25ae93e601a10cfb278db7679c',
'sample-1.out': '80982df7f6dac58f828e2e8b12e87a0a',
})
def test_call_download_aoj_system_ITP1_1_B(self):
self.snippet_call_download(
'http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ITP1_1_B', {
'1.in': 'b026324c6904b2a9cb4b88d6d61c81d1',
'1.out': 'b026324c6904b2a9cb4b88d6d61c81d1',
'2.in': '6d7fce9fee471194aa8b5b6e47267f03',
'2.out': '66a7c1d5cb75ef2542524d888fd32f4a',
'3.in': '9caff0735bc6e80121cedcb98ca51821',
'3.out': 'fef5f767008b27f5c3801382264f46ef',
'4.in': '919d117956d3135c4c683ff021352f5c',
'4.out': 'b39ffd5aa5029d696193c8362dcb1d19',
}, is_system=True)
def test_call_download_aoj_system_1169(self):
# NOTE: the data exists, but AOJ says "..... (terminated because of the limitation)"
self.snippet_call_download(
'http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=1169&lang=jp',
{
# '1.in': 'f0ecaede832a038d0e940c2c4d0ab5e5',
# '1.out': '8d2f7846dc2fc10ef37dcb548635c788',
},
is_system=True)
|
[
"kimiyuki95@gmail.com"
] |
kimiyuki95@gmail.com
|
e68f96a3c8e0dccbe87352d539826d189e75c6ea
|
663d429e1f552ef958d37cfe4a0707354b544a9a
|
/新建文件夹/theading_demo.py
|
7d32b29371111338cbc1370c36a639394c73a531
|
[] |
no_license
|
nie000/mylinuxlearn
|
72a33024648fc4393442511c85d7c439e169a960
|
813ed75a0018446cd661001e8803f50880d09fff
|
refs/heads/main
| 2023-06-20T07:46:11.842538
| 2021-07-15T13:46:43
| 2021-07-15T13:46:43
| 307,377,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,187
|
py
|
# import time
#
# def lo1():
# time.sleep(4)
# print('<------lo1-------->')
#
#
# def lo2():
# time.sleep(2)
# print('<------lo2-------->')
#
#
# def main():
# t1 = time.time()
# lo1()
# lo2()
# t2 = time.time()
#
# print('total time: {}'.format(t2-t1))
#
# if __name__ == "__main__":
# main()
# import time
# import threading
#
# def lo1():
# time.sleep(4)
# print('<------lo1-------->')
#
#
# def lo2():
# time.sleep(2)
# print('<------lo2-------->')
#
#
# def main():
# t1 = time.time()
# f1 = threading.Thread(target=lo1)
# f2 = threading.Thread(target=lo2)
# f1.start()
# f2.start()
# print('没有等到')
# f1.join()
# f2.join()
# t2 = time.time()
#
# print('total time: {}'.format(t2-t1))
#
# if __name__ == "__main__":
# main()
# import time
# #
# def lo1():
# a=0
# for index in range(100000000):
# a+=index
# print('<------lo1-------->')
#
#
# def lo2():
# a = 0
# for index in range(100000000):
# a += index
# print('<------lo2-------->')
#
#
# def main():
# t1 = time.time()
# lo1()
# lo2()
# t2 = time.time()
#
# print('total time: {}'.format(t2-t1))
#
# if __name__ == "__main__":
# main()
# import time
# import threading
#
# def lo1():
# a=0
# for index in range(100000000):
# a+=index
# print('<------lo1-------->')
#
#
# def lo2():
# a = 0
# for index in range(100000000):
# a += index
# print('<------lo2-------->')
#
#
# def main():
# t1 = time.time()
# f1 = threading.Thread(target=lo1)
# f2 = threading.Thread(target=lo2)
# f1.start()
# f2.start()
# print('没有等到')
# f1.join()
# f2.join()
# t2 = time.time()
#
# print('total time: {}'.format(t2-t1))
#
# if __name__ == "__main__":
# main()
# import time
# import threading
# from multiprocessing import Process
# def lo1():
# a=0
# for index in range(100000000):
# a+=index
# print('<------lo1-------->')
# def lo2():
# a = 0
# for index in range(100000000):
# a += index
# print('<------lo2-------->')
# def main():
# t1 = time.time()
# f1 =Process(target=lo1) #进程
# f2 =Process(target=lo2) #进程
# f1.start()
# f2.start()
# print('没有等到')
# f1.join()
# f2.join()
# t2 = time.time()
#
# print('total time: {}'.format(t2-t1))
#
# if __name__ == "__main__":
# main()
# import time
# import threading
#
# def lo1(a):
# a=0
# for index in range(100000000):
# a+=index
# print('<------lo1-------->')
#
#
# def lo2(b):
# a = 0
# for index in range(100000000):
# a += index
# print('<------lo2-------->')
#
#
# def main():
# t1 = time.time()
# f1 = threading.Thread(target=lo1,args=(1,))
# f2 = threading.Thread(target=lo2,args=(2,))
# f1.start()
# f2.start()
# print('没有等到')
# f1.join()
# f2.join()
# t2 = time.time()
#
# print('total time: {}'.format(t2-t1))
#
# if __name__ == "__main__":
# main()
# import threading
# import time
#
#
# class TestThread(threading.Thread):
# def __init__(self, target=None, args=None):
# # 调用父类方法
# super().__init__()
# self.target = target
# self.args = args
#
# # 当调用函数的时候使用的方法
# def run(self):
#
# self.target(*self.args)
#
# def test(i):
# time.sleep(i)
# print('execute thread:{}'.format(i))
#
# def loop():
# my_tasks = []
# for i in range(5):
# my_tasks.append(TestThread(target=test, args=(i,)))
# for i in my_tasks:
# i.start()
# for i in my_tasks:
# i.join()
# print("all down")
# loop()
import threading
import time
a = 0
def add():
global a
for i in range(1000000):
a += 1
def minus():
global a
for i in range(1000000):
a -= 1
def main():
t1=threading.Thread(target=add)
t2=threading.Thread(target=minus)
t1.start()
t2.start()
t1.join()
t2.join()
time.sleep(2)
print(a)
if __name__ == '__main__':
main()
|
[
"1073438012@qq.com"
] |
1073438012@qq.com
|
f7108362ca0be8b69099cbdc38f4759264539fd8
|
dc3c39d4dcaa1c4e4bd0e9405642159580a87767
|
/findlove/settings.py
|
869d4bdd5287dbfc4c64cd3e62d58556558361bc
|
[] |
no_license
|
ylearner/findlove
|
f0d98a5ed99565ee8db77ab8b767b4c874d28260
|
d5dd1ebe98fbb827e52088e98357b16accbcf3c8
|
refs/heads/master
| 2022-02-23T04:54:31.036032
| 2019-07-20T10:35:16
| 2019-07-20T10:35:16
| 197,905,314
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,273
|
py
|
"""
Django settings for findlove project.
Generated by 'django-admin startproject' using Django 1.11.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z2*tpu#wbcawhed3a+k8u5sc1j(#hz*+l(v5*-k$$71!^hnx2p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'user',
'index',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'findlove.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'findlove.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
|
[
"you@example.com"
] |
you@example.com
|
862748235151ed238905e5074eb14d4850a9d11c
|
20927c6b6dbb360bf0fd13d70115bdb27e7196e7
|
/0x0A-python-inheritance/0-main.py~
|
a0817dce1ea8a7a0fc19bf8c91bb07dc3abd1dcc
|
[] |
no_license
|
PauloMorillo/holbertonschool-higher_level_programming
|
27fc1c0a1ae5784bd22d07daaedb602ee618867d
|
8a42a60aa4ea52b5cc2fb73e57f38aa6c5196c98
|
refs/heads/master
| 2021-08-16T17:13:45.568038
| 2020-07-29T01:20:25
| 2020-07-29T01:20:25
| 207,305,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
#!/usr/bin/python3
lookup = __import__('0-lookup').lookup
class MyClass1(object):
pass
class MyClass2(object):
my_attr1 = 3
def my_meth(self):
pass
print(lookup(MyClass1))
print(lookup(MyClass2))
print(lookup(int))
|
[
"pauloan@hotmail.com"
] |
pauloan@hotmail.com
|
|
413873a9e910423c4d7a172c6080316e21adde8a
|
e2e08d7c97398a42e6554f913ee27340226994d9
|
/pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_Custom_Rules/test_c155535.py
|
686cb9c5c388b3fdef651d4013c96e2f837745fe
|
[] |
no_license
|
lizhuoya1111/Automated_testing_practice
|
88e7be512e831d279324ad710946232377fb4c01
|
b3a532d33ddeb8d01fff315bcd59b451befdef23
|
refs/heads/master
| 2022-12-04T08:19:29.806445
| 2020-08-14T03:51:20
| 2020-08-14T03:51:20
| 287,426,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,636
|
py
|
import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def import *
from page_obj.scg.scg_button import *
from page_obj.scg.scg_def_firewall import *
from page_obj.scg.scg_def_sslvpn import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
from page_obj.scg.scg_def_machine_learning import *
from page_obj.scg.scg_def_custom_rules import *
test_id = "155535"
# 1. 协议选择modbus,事件处理选择“阻断”,其余配置均正确,点击保存。
def test_c155535(browser):
try:
login_web(browser, url=dev1)
# # 清空自定义规则
# del_all_custom_rules_lzy(browser)
# 增加自定义规则
add_custom_rules_complete_lzy(browser, protocol_modubus='yes', protocol_s7='yes/no', protocol='modbus',
function='读取线圈状态', start_address='0',
end_address_or_length='end_address', end_address='9',
length='', start_data='', end_data='', action_modbus='阻断',
PduType='', FunctionType='', action_s7='', save='yes', cancel='no')
sleep(0.5)
# 获取信息
sleep(0.5)
info1 = browser.find_element_by_xpath('//*[@id="table"]/tbody/tr[2]').text
print(info1)
# 还原
# 删除自定义规则 //*[@id="table"]/tbody/tr[2]/td[7]/a[2]/img
try_num = 2
while try_num < 999:
try:
browser.find_element_by_xpath('//*[@id="table"]/tbody/tr['+str(try_num+1)+']/td[7]/a[2]/img').click()
break
except:
print('没有多余条目')
break
sleep(12)
delete_sslvpn_safe_site_lzy(browser, number='1')
try:
assert '阻断' in info1
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert '阻断' in info1
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
reload(hostip=dev1)
# 删除自定义规则
sleep(1)
login_web(browser, url=dev1)
delete_sslvpn_safe_site_lzy(browser, number='1')
rail_fail(test_run_id, test_id)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
|
[
"15501866985@163.com"
] |
15501866985@163.com
|
79c1195ef4085e2d612189df158046cecb1a24af
|
425db5a849281d333e68c26a26678e7c8ce11b66
|
/LeetCodeSolutions/LeetCode_0763.py
|
ef5ed806ec733581044ae63e06a8c6bc47b835e1
|
[
"MIT"
] |
permissive
|
lih627/python-algorithm-templates
|
e8092b327a02506086414df41bbfb2af5d6b06dc
|
a61fd583e33a769b44ab758990625d3381793768
|
refs/heads/master
| 2021-07-23T17:10:43.814639
| 2021-01-21T17:14:55
| 2021-01-21T17:14:55
| 238,456,498
| 29
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
class Solution:
def partitionLabels(self, S: str) -> List[int]:
parts = {}
for idx, c in enumerate(S):
if c not in parts:
parts[c] = [idx, idx]
else:
parts[c][1] = idx
_parts = [v for k, v in parts.items()]
_parts.sort()
ret = []
cur = _parts[0]
for s, e in _parts[1:]:
if s > cur[1]:
ret.append(cur[1] - cur[0] + 1)
cur = [s, e]
else:
cur[1] = max(e, cur[1])
ret.append(cur[1] - cur[0] + 1)
return ret
|
[
"lih627@outlook.com"
] |
lih627@outlook.com
|
24f2c153c0a666812ed68fc440c33f3285d0bae6
|
44cbc067afcced7fac7ad7f4584d0d16d66bf5b4
|
/ansiblemetrics/general/lines_blank.py
|
15d2145c0b6d973e6c3bf69221cf9e16cf3bbb2d
|
[
"Apache-2.0"
] |
permissive
|
ElsevierSoftwareX/SOFTX_2020_231
|
fb75820e99dbd6f2380146ecf5b8893d69942260
|
e3ad95ebdc324ae308669d437ec60bd726580102
|
refs/heads/master
| 2023-01-18T22:53:04.785576
| 2020-11-18T11:52:39
| 2020-11-18T11:52:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
from ansiblemetrics.lines_metric import LinesMetric
class LinesBlank(LinesMetric):
""" This class is responsible for providing the methods to count the blank lines of code (bloc) in a given .yaml file"""
def count(self):
bloc = 0
for l in self.yml.splitlines():
if not l.strip():
bloc += 1
return bloc
|
[
"stefano.dallapalma0@gmail.com"
] |
stefano.dallapalma0@gmail.com
|
46721e360b7369375cc7838c189ffee765b1c1cf
|
8d78ee989a82bbff99d72facaa471a686961cb5b
|
/djangoProject/venv/Lib/site-packages/PIL/PcdImagePlugin.py
|
47a8708f7f16402699fe1f1bac76fcb0c71268b4
|
[] |
no_license
|
jaydevdesai/Course_Enrollment_System
|
f12ad576bba73f23f49093a73e363742f87f86a7
|
0e3ebe5af76ab6b42c31d18ac45a7ef5b4d6bf59
|
refs/heads/master
| 2023-06-07T06:31:51.921429
| 2021-07-04T13:52:02
| 2021-07-04T13:52:02
| 382,835,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,504
|
py
|
#
# The Python Imaging Library.
# $Id$
#
# PCD file handling
#
# History:
# 96-05-10 fl Created
# 96-05-27 fl Added draft mode (128x192, 256x384)
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from . import Image, ImageFile
from ._binary import i8
##
# Image plugin for PhotoCD images. This plugin only reads the 768x512
# image from the file; higher resolutions are encoded in a proprietary
# encoding.
class PcdImageFile(ImageFile.ImageFile):
format = "PCD"
format_description = "Kodak PhotoCD"
def _open(self):
# rough
self.fp.seek(2048)
s = self.fp.read(2048)
if s[:4] != b"PCD_":
raise SyntaxError("not a PCD file")
orientation = i8(s[1538]) & 3
self.tile_post_rotate = None
if orientation == 1:
self.tile_post_rotate = 90
elif orientation == 3:
self.tile_post_rotate = -90
self.mode = "RGB"
self._size = 768, 512 # FIXME: not correct for rotated images!
self.tile = [("pcd", (0, 0) + self.size, 96 * 2048, None)]
def load_end(self):
if self.tile_post_rotate:
# Handle rotated PCDs
self.im = self.im.rotate(self.tile_post_rotate)
self._size = self.im.size
#
# registry
Image.register_open(PcdImageFile.format, PcdImageFile)
Image.register_extension(PcdImageFile.format, ".pcd")
|
[
"jaydevdesai15@gmail.com"
] |
jaydevdesai15@gmail.com
|
397f19d4b75b7aea4234abb4b8304525b1030cbe
|
f4b60f5e49baf60976987946c20a8ebca4880602
|
/lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/mon/target.py
|
cabbe846fbe9d58285c5d527f867aaecdc092217
|
[] |
no_license
|
cqbomb/qytang_aci
|
12e508d54d9f774b537c33563762e694783d6ba8
|
a7fab9d6cda7fadcc995672e55c0ef7e7187696e
|
refs/heads/master
| 2022-12-21T13:30:05.240231
| 2018-12-04T01:46:53
| 2018-12-04T01:46:53
| 159,911,666
| 0
| 0
| null | 2022-12-07T23:53:02
| 2018-12-01T05:17:50
|
Python
|
UTF-8
|
Python
| false
| false
| 5,412
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Target(Mo):
meta = ClassMeta("cobra.model.mon.Target")
meta.isAbstract = True
meta.moClassName = "monTarget"
meta.moClassName = "monTarget"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Monitoring Target"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x800040000000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.stats.Reportable")
meta.childClasses.add("cobra.model.syslog.Src")
meta.childClasses.add("cobra.model.fault.LcP")
meta.childClasses.add("cobra.model.snmp.Src")
meta.childClasses.add("cobra.model.stats.HierColl")
meta.childClasses.add("cobra.model.event.SevAsnP")
meta.childClasses.add("cobra.model.callhome.Src")
meta.childClasses.add("cobra.model.health.Pol")
meta.childClasses.add("cobra.model.fault.SevAsnP")
meta.childClasses.add("cobra.model.stats.ExportP")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.snmp.Src", "snmpsrc-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Pol", "health-"))
meta.childNamesAndRnPrefix.append(("cobra.model.syslog.Src", "slsrc-"))
meta.childNamesAndRnPrefix.append(("cobra.model.event.SevAsnP", "esevp-"))
meta.childNamesAndRnPrefix.append(("cobra.model.callhome.Src", "chsrc-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.SevAsnP", "fsevp-"))
meta.childNamesAndRnPrefix.append(("cobra.model.stats.Reportable", "stat-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.LcP", "flcp-"))
meta.childNamesAndRnPrefix.append(("cobra.model.stats.HierColl", "coll-"))
meta.childNamesAndRnPrefix.append(("cobra.model.stats.ExportP", "exp-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Comp")
meta.superClasses.add("cobra.model.mon.ATarget")
meta.concreteSubClasses.add("cobra.model.mon.FabricTarget")
meta.concreteSubClasses.add("cobra.model.mon.EPGTarget")
meta.concreteSubClasses.add("cobra.model.mon.InfraTarget")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "scope", "scope", 5, PropCategory.REGULAR)
prop.label = "Target Scope"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("scope", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"collinsctk@qytang.com"
] |
collinsctk@qytang.com
|
2fc476b25c16b8bdd9f4e0e6b1bd076670904495
|
733ce69fcc11ea5ceed3783c6aa256f15510fcad
|
/venv/lib/python3.8/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py
|
232dd95d2b2e6939f429a9ac6d5ebb44041e57fc
|
[] |
no_license
|
ethanbahmanyari0122/scooteq
|
c7fc25ab6619b43d8dbe5c5e44e9412ebbc700ba
|
ca5bf60cdacd92f41e318b23766316f4cd4db5fa
|
refs/heads/master
| 2023-06-01T23:57:03.671517
| 2021-06-18T10:35:31
| 2021-06-18T10:35:31
| 377,186,463
| 1
| 0
| null | 2021-06-18T08:53:09
| 2021-06-15T14:12:29
|
Python
|
UTF-8
|
Python
| false
| false
| 5,723
|
py
|
"""A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a scooteq type of tree, you need to
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method which takes a tree as sole argument and
returns an iterator which generates tokens.
"""
from __future__ import absolute_import, division, unicode_literals
from .. import constants
from .._utils import default_etree
__all__ = ["getTreeWalker", "pprint"]
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
:arg str treeType: the name of the tree type required (case-insensitive).
Supported values are:
* "dom": The xml.dom.minidom DOM implementation
* "etree": A generic walker for tree implementations exposing an
elementtree-like interface (known to work with ElementTree,
cElementTree and lxml.etree).
* "lxml": Optimized walker for lxml.etree
* "genshi": a Genshi stream
:arg implementation: A module implementing the tree type e.g.
xml.etree.ElementTree or cElementTree (Currently applies to the "etree"
tree type only).
:arg kwargs: keyword arguments passed to the etree walker--for other
walkers, this has no effect
:returns: a TreeWalker class
"""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType == "dom":
from . import dom
treeWalkerCache[treeType] = dom.TreeWalker
elif treeType == "genshi":
from . import genshi
treeWalkerCache[treeType] = genshi.TreeWalker
elif treeType == "lxml":
from . import etree_lxml
treeWalkerCache[treeType] = etree_lxml.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
def concatenateCharacterTokens(tokens):
pendingCharacters = []
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
pendingCharacters.append(token["data"])
else:
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
pendingCharacters = []
yield token
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
def pprint(walker):
"""Pretty printer for tree walkers
Takes a TreeWalker instance and pretty prints the output of walking the tree.
:arg walker: a TreeWalker instance
"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output)
|
[
"70198067+ethanbahmanyari0122@users.noreply.github.com"
] |
70198067+ethanbahmanyari0122@users.noreply.github.com
|
109b898a3d962708caae6619f13ebf7b6b6a54c2
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02239/s041311605.py
|
e539baf989142410359c23b219a92f876dc2bf59
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
# -*- coding: utf-8 -*-
import sys
from collections import deque
sys.setrecursionlimit(10 ** 9)
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def LIST(): return list(map(int, input().split()))
INF=float('inf')
N=INT()
nodes=[[] for i in range(N)]
for i in range(N):
l=LIST()
u=l[0]
l=l[2:]
for v in l:
nodes[u-1].append(v-1)
nodes[u-1].sort()
que=deque()
que.append((0, 0))
costs=[-1]*N
while len(que):
cost,node=que.popleft()
if costs[node]==-1:
costs[node]=cost
for v in nodes[node]:
que.append((cost+1, v))
for i in range(N):
print(i+1, costs[i])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
fe289312a128747beaccaa044fd07f24db8438cf
|
c098a0f39cc448ea06bb9e61f4b8155d9feeee82
|
/vsts/vsts/test/v4_1/models/test_iteration_details_model.py
|
535993773b4bc11bd85c9542a49f8cf105bc9dac
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
nimisha-srinivas/vsts-python-api
|
360713f009f948a425ccf5c65ded4ed9d79df07e
|
666db9dc30b5bdee026a2534dc2ab3965fad285c
|
refs/heads/master
| 2020-03-13T02:38:13.461082
| 2018-04-23T16:27:04
| 2018-04-23T16:27:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,904
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TestIterationDetailsModel(Model):
"""TestIterationDetailsModel.
:param action_results:
:type action_results: list of :class:`TestActionResultModel <test.v4_1.models.TestActionResultModel>`
:param attachments:
:type attachments: list of :class:`TestCaseResultAttachmentModel <test.v4_1.models.TestCaseResultAttachmentModel>`
:param comment:
:type comment: str
:param completed_date:
:type completed_date: datetime
:param duration_in_ms:
:type duration_in_ms: number
:param error_message:
:type error_message: str
:param id:
:type id: int
:param outcome:
:type outcome: str
:param parameters:
:type parameters: list of :class:`TestResultParameterModel <test.v4_1.models.TestResultParameterModel>`
:param started_date:
:type started_date: datetime
:param url:
:type url: str
"""
_attribute_map = {
'action_results': {'key': 'actionResults', 'type': '[TestActionResultModel]'},
'attachments': {'key': 'attachments', 'type': '[TestCaseResultAttachmentModel]'},
'comment': {'key': 'comment', 'type': 'str'},
'completed_date': {'key': 'completedDate', 'type': 'iso-8601'},
'duration_in_ms': {'key': 'durationInMs', 'type': 'number'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'outcome': {'key': 'outcome', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[TestResultParameterModel]'},
'started_date': {'key': 'startedDate', 'type': 'iso-8601'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, action_results=None, attachments=None, comment=None, completed_date=None, duration_in_ms=None, error_message=None, id=None, outcome=None, parameters=None, started_date=None, url=None):
super(TestIterationDetailsModel, self).__init__()
self.action_results = action_results
self.attachments = attachments
self.comment = comment
self.completed_date = completed_date
self.duration_in_ms = duration_in_ms
self.error_message = error_message
self.id = id
self.outcome = outcome
self.parameters = parameters
self.started_date = started_date
self.url = url
|
[
"tedchamb@microsoft.com"
] |
tedchamb@microsoft.com
|
147f74e7695a23f54e4d60422aa134405e303107
|
b167407960a3b69b16752590def1a62b297a4b0c
|
/tools/project-creator/Python2.6.6/Lib/ctypes/test/test_varsize_struct.py
|
f4c25715a02e5cb28aa1eee72294595b8126bbb1
|
[
"MIT"
] |
permissive
|
xcode1986/nineck.ca
|
543d1be2066e88a7db3745b483f61daedf5f378a
|
637dfec24407d220bb745beacebea4a375bfd78f
|
refs/heads/master
| 2020-04-15T14:48:08.551821
| 2019-01-15T07:36:06
| 2019-01-15T07:36:06
| 164,768,581
| 1
| 1
|
MIT
| 2019-01-15T08:30:27
| 2019-01-09T02:09:21
|
C++
|
UTF-8
|
Python
| false
| false
| 1,956
|
py
|
from ctypes import *
import unittest
class VarSizeTest(unittest.TestCase):
def test_resize(self):
class X(Structure):
_fields_ = [("item", c_int),
("array", c_int * 1)]
self.failUnlessEqual(sizeof(X), sizeof(c_int) * 2)
x = X()
x.item = 42
x.array[0] = 100
self.failUnlessEqual(sizeof(x), sizeof(c_int) * 2)
# make room for one additional item
new_size = sizeof(X) + sizeof(c_int) * 1
resize(x, new_size)
self.failUnlessEqual(sizeof(x), new_size)
self.failUnlessEqual((x.item, x.array[0]), (42, 100))
# make room for 10 additional items
new_size = sizeof(X) + sizeof(c_int) * 9
resize(x, new_size)
self.failUnlessEqual(sizeof(x), new_size)
self.failUnlessEqual((x.item, x.array[0]), (42, 100))
# make room for one additional item
new_size = sizeof(X) + sizeof(c_int) * 1
resize(x, new_size)
self.failUnlessEqual(sizeof(x), new_size)
self.failUnlessEqual((x.item, x.array[0]), (42, 100))
def test_array_invalid_length(self):
# cannot create arrays with non-positive size
self.failUnlessRaises(ValueError, lambda: c_int * -1)
self.failUnlessRaises(ValueError, lambda: c_int * -3)
def test_zerosized_array(self):
array = (c_int * 0)()
# accessing elements of zero-sized arrays raise IndexError
self.failUnlessRaises(IndexError, array.__setitem__, 0, None)
self.failUnlessRaises(IndexError, array.__getitem__, 0)
self.failUnlessRaises(IndexError, array.__setitem__, 1, None)
self.failUnlessRaises(IndexError, array.__getitem__, 1)
self.failUnlessRaises(IndexError, array.__setitem__, -1, None)
self.failUnlessRaises(IndexError, array.__getitem__, -1)
if __name__ == "__main__":
unittest.main()
|
[
"278688386@qq.com"
] |
278688386@qq.com
|
4749151995a180d653b3e898e082677a1668e88a
|
139617b9e7c7dbbc592170a761a2f9fb1ee06734
|
/main_kaggle.py
|
0300f2b589b53fdf7d932cb6bf1cd133dfcab12b
|
[] |
no_license
|
ANKITPODDER2000/FSP_ML_Project
|
09aecb7d803a217a710914e6369ac4a790ec6424
|
bba6cf8f70f9c6977654856a8be8de75ebf22f77
|
refs/heads/master
| 2022-12-14T06:21:18.370814
| 2020-09-18T17:15:50
| 2020-09-18T17:15:50
| 295,822,511
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,817
|
py
|
# -*- coding: utf-8 -*-
"""Untitled7.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1N_Vdb2XxxCwvUDqTVVT7hoS0qYL73aSw
from google.colab import files
files.upload()
! mkdir ~/.kaggle
! cp kaggle.json ~/.kaggle/
! chmod 600 ~/.kaggle/kaggle.json
!kaggle competitions download -c titanic
"""
import pandas as pd
print("Version of pandas : ",pd.__version__)
import seaborn as sns
print("Version of seaborn : ",sns.__version__)
import matplotlib.pyplot as plt
import numpy as np
print("Version of numpy : ",np.__version__)
#%matplotlib inline
import sklearn as sk
print("Version of scikit-learnt : ",sk.__version__)
import math
train_data = pd.read_csv("./dataset.csv")
train_data.head()
#Name isn't needed
train_data.drop("Name" , axis = 1 , inplace = True)
sns.heatmap(train_data.isna())
plt.show()
train_data.drop("Cabin" , axis = 1 , inplace = True)
sns.heatmap(train_data.isna())
plt.show()
plt.plot(train_data['Age'].dropna())
plt.show()
data = train_data[['Age' , 'Pclass']].dropna()
sns.scatterplot(data = data , x = 'Pclass' , y = 'Age' , hue = 'Pclass' , cmap = "virdis")
plt.show()
sns.countplot(x = 'Pclass' , data = train_data)
plt.show()
sns.countplot(x = 'Survived' , data = train_data)
plt.show()
sns.countplot(x = 'Survived' , data = train_data , hue = "Pclass")
plt.show()
sns.countplot(x = 'Survived' , data = train_data , hue = "Sex")
plt.show()
avg_age_class1 = math.ceil(data[data['Pclass'] == 1]['Age'].mean())
print("avg_age_class1 : ",avg_age_class1)
avg_age_class2 = math.ceil(data[data['Pclass'] == 2]['Age'].mean())
print("avg_age_class2 : ",avg_age_class2)
avg_age_class3 = math.ceil(data[data['Pclass'] == 3]['Age'].mean())
print("avg_age_class3 : ",avg_age_class3)
def updateAge(List):
age = List[0]
Pclass = List[1]
if pd.isnull(age):
if Pclass == 1:
age = avg_age_class1
elif Pclass == 2:
age = avg_age_class2
else:
age = avg_age_class1
return age
train_data['Age'] = train_data[['Age' , "Pclass"]].apply(updateAge , axis = 1)
sns.heatmap(train_data.isna())
plt.show()
train_data.head(n = 3)
print("Parch value : " , train_data['Parch'].unique())
train_data.drop(columns = ['Embarked' , "Ticket" ] , axis = 1 , inplace = True)
def replaceSex(s):
if s == "male":
return 1
return 0
train_data['Sex'] = train_data['Sex'].apply(replaceSex) #1->male || 0->female
train_data.head(n = 3)
train_data.drop("PassengerId" , axis = 1 , inplace = True)
train_data.head(n = 3)
X = train_data.drop("Survived" , axis = 1)
y = train_data['Survived']
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=100)
print("Details of train_data : ")
print("Shape of X_train : " , X_train.shape)
print("Shape of y_train : " , y_train.shape)
print("Features name : ",X_train.columns)
print("Target name : Survived")
print("\n================================\n")
print("Details of test_data : ")
print("Shape of X_test : " , X_test.shape)
print("Shape of y_test : " , y_test.shape)
C_ = [0.0001,0.001,0.01,0.1,1,5,10,20,30,40,50,100]
model = {}
acc = []
val_acc = []
for i in C_:
model_name = "model_c_" + str(i)
model[model_name] = {}
model_LR = LogisticRegression(C = i).fit(X_train , y_train)
model[model_name]['model'] = model_LR
model[model_name]['acc'] = model_LR.score(X_train , y_train)
acc.append(model[model_name]['acc'])
model[model_name]['val_acc'] = model_LR.score(X_test , y_test)
val_acc.append(model[model_name]['val_acc'])
plt.plot(acc , label = "training_data")
plt.plot(val_acc,'o--' , label = "testing_data")
plt.ylabel("Accurecy")
plt.xlabel("C->")
plt.legend()
plt.show()
take_model = model["model_c_10"]['model']
from sklearn.metrics import classification_report , confusion_matrix , accuracy_score
print("Confusion matrix for train _data : ")
print(confusion_matrix(y_train , take_model.predict(X_train)))
print("Confusion matrix for test _data : ")
print(confusion_matrix(y_test , take_model.predict(X_test)))
print("Accurecy score for training data : %.3f %%"%( accuracy_score(y_train , take_model.predict(X_train)) * 100))
print("Accurecy score for training data : %.3f %%"%( accuracy_score(y_test , take_model.predict(X_test)) * 100))
print("Classification report for training data : \n============================================================\n")
print(classification_report(y_train , take_model.predict(X_train)))
print("\nClassification report for testing data : \n============================================================\n")
print(classification_report(y_test , take_model.predict(X_test)))
|
[
"ankitpodder0211@gmail.com"
] |
ankitpodder0211@gmail.com
|
52f4054f55228ea0919d84539947c10ee02a97c2
|
ad963dc590fe3ee16fe70674ffa9a77a3462a2d2
|
/taskManager/migrations/0011_auto_20200210_1539.py
|
3b5756687a8bcd93a9876ba0408a8f364cf10d02
|
[] |
no_license
|
ShuheiKuriki/task_manager
|
564dc1a646efdd288ff31bc9044981aecbd6db78
|
f5d4a53a758c64615f22c69baae59b36dd5dab1f
|
refs/heads/master
| 2023-05-12T11:06:11.388036
| 2023-01-15T09:12:37
| 2023-01-15T09:12:37
| 234,110,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
# Generated by Django 3.0.2 on 2020-02-10 06:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taskManager', '0010_delete_done'),
]
operations = [
migrations.RenameField(
model_name='linepush',
old_name='user_id',
new_name='line_id',
),
migrations.AddField(
model_name='linepush',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"shukuri.7336.8@gmail.com"
] |
shukuri.7336.8@gmail.com
|
b3fcb829703ca9ecdd728c1793c5b8f9213bfd5c
|
1a2ca64839723ede3134a0781128b0dc0b5f6ab8
|
/ExtractFeatures/Data/kracekumar/gettowork.py
|
fe439f5f1fff77243040ee035dd7483d80428c15
|
[] |
no_license
|
vivekaxl/LexisNexis
|
bc8ee0b92ae95a200c41bd077082212243ee248c
|
5fa3a818c3d41bd9c3eb25122e1d376c8910269c
|
refs/heads/master
| 2021-01-13T01:44:41.814348
| 2015-07-08T15:42:35
| 2015-07-08T15:42:35
| 29,705,371
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
def reducelist(elem):
nitems = len(elem)
if nitems > sum(elem):
return -1 # IMPOSSIBLE
empty_list = [0 for i in range(nitems)]
elem.sort()
#elem.reverse()
count = 0
while (sum(empty_list) != nitems):
each = elem.pop()
count += 1
for i in range(0,len(empty_list)):
if each == 0:
break
elif empty_list[i] == 1:
pass
else:
empty_list[i] = 1
each -= 1
return count
C = int(raw_input())
impossible = 0
for tc in range(C):
N, T = [int(x) for x in raw_input().split()]
E = int(raw_input())
res = [0 for i in range(N)]
for e in range(E):
H, P = [int(x) for x in raw_input().split()]
if H == T:
res[H-1] = 0
else:
if res[H-1] == 0:
res[H-1] = [P]
else:
res[H-1].append(P)
resstr = ''
for element in res:
if type(element) == type([]):
element = reducelist(element)
if element == -1:
resstr = 'IMPOSSIBLE'
break
else:
resstr = resstr + ' ' + str(element)
print 'Case #%d: %s' % (tc+1, resstr)
|
[
"vivekaxl@gmail.com"
] |
vivekaxl@gmail.com
|
9ef4bc80084e942c3e0264adedc1dce137002ed9
|
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
|
/Configurations/VBSjjlnu/Full2017v7/configuration_test_tau21wp_2017.py
|
5a6dff8102aa14fe4ce1a29741c1c293111bfbf4
|
[] |
no_license
|
latinos/PlotsConfigurations
|
6d88a5ad828dde4a7f45c68765081ed182fcda21
|
02417839021e2112e740607b0fb78e09b58c930f
|
refs/heads/master
| 2023-08-18T20:39:31.954943
| 2023-08-18T09:23:34
| 2023-08-18T09:23:34
| 39,819,875
| 10
| 63
| null | 2023-08-10T14:08:04
| 2015-07-28T07:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
# example of configuration file
treeName= 'Events'
tag = 'test_tau21wp_2017'
direc = "conf_test_tau21wp"
# used by mkShape to define output directory for root files
outputDir = 'rootFile_'+tag
# file with TTree aliases
aliasesFile = direc+'/aliases.py'
# file with list of variables
variablesFile = direc+'/variables.py'
# file with list of cuts
cutsFile = direc +'/cuts.py'
# file with list of samples
samplesFile = direc+'/samples.py'
#samplesFile = direc+'/samples.py'
#t file with list of samples
plotFile = direc+'/plot.py'
# luminosity to normalize to (in 1/fb)
lumi = 41.5
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
#outputDirPlots = 'plot_'+tag +"_rescaled/detajpt_ext"
outputDirPlots = 'plot_'+tag + "/"
# used by mkDatacards to define output directory for datacards
#outputDirDatacard = 'datacards_'+tag
#outputDirDatacard = 'datacards_'+tag + "/Wjets_njets"
outputDirDatacard = 'datacards_'+tag + "_v4"
# structure file for datacard
structureFile = direc+'/structure.py'
# nuisances file for mkDatacards and for mkShape
# nuisancesFile = direc+'/nuisances_reduced.py'
# nuisancesFile = direc+'/nuisances_datacard.py'
# nuisancesFile = direc + '/nuisances_recover.py'
customizeScript = direc + "/customize.py"
|
[
"davide.valsecchi@cern.ch"
] |
davide.valsecchi@cern.ch
|
9db9ca8bc1f8cda57c69c3a2f593776bdb60eeb6
|
bfbe642d689b5595fc7a8e8ae97462c863ba267a
|
/src/CyPhyPET/Resources/zip.py
|
5ee5bf64d4dce688e9cf9be0eb40b6c5af188e56
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
mcanthony/meta-core
|
0c0a8cde1669f749a4880aca6f816d28742a9c68
|
3844cce391c1e6be053572810bad2b8405a9839b
|
refs/heads/master
| 2020-12-26T03:11:11.338182
| 2015-11-04T22:58:13
| 2015-11-04T22:58:13
| 45,806,011
| 1
| 0
| null | 2015-11-09T00:34:22
| 2015-11-09T00:34:22
| null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
#!/usr/bin/python
import zipfile
import os
import sys
import os.path
path_join = os.path.join
if sys.platform == 'win32':
def path_join(*args):
return '\\\\?\\' + os.path.join(os.getcwd(), os.path.join(*args))
output_filename = 'source_data.zip'
if os.path.exists(output_filename):
os.remove(output_filename)
with zipfile.ZipFile(output_filename, 'w', allowZip64=True) as z:
parent_dir_name = os.path.basename(os.getcwd())
os.chdir('..\\')
for dirpath,dirs,files in os.walk(parent_dir_name):
if '.git' in dirpath or '.svn' in dirpath:
continue
for f in files:
if output_filename == f or f.endswith('.suo'):
continue
fn = path_join(dirpath, f)
#print fn
z.write(fn, arcname=os.path.join(dirpath, f), compress_type=zipfile.ZIP_DEFLATED)
|
[
"kevin.m.smyth@gmail.com"
] |
kevin.m.smyth@gmail.com
|
0472e752dc24ddaf0d91505984789b83cdf34efd
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/components/feature_engagement/DEPS
|
f25c147bc846d9c605930e33ad2974793f4f91cd
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 226
|
include_rules = [
"-content",
"+components/feature_engagement/internal/jni_headers",
"+components/feature_engagement/features.h",
"+components/flags_ui",
"+components/keyed_service",
"+components/leveldb_proto",
]
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
|
277f7795b863efce66e36fe0d3af9d010aeeb97f
|
9ae08906602af5eacec43d60e5e428269bf24eb1
|
/detection.py
|
648538a99d8aa1cf5d30ae48f7af2a87f8b59b77
|
[] |
no_license
|
yangzhaonan18/TSDcv2
|
9f73278979542d1a40ced5aa152bbc7fa363398c
|
e9cb0fefc7177db93510b7bc5ca1bb86e32571c6
|
refs/heads/master
| 2020-04-04T19:39:08.138349
| 2019-03-18T04:54:36
| 2019-03-18T04:54:36
| 156,214,910
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,944
|
py
|
# -*- coding:utf-8 -*-
import cv2
from cal_wh_ratio import cal_wh_ratio
from Crop_cnt import Crop_cnt
from cal_color_ratio import cal_ratio
from find_crop_center import find_crop_center
def detection(frame, BinColors, color, contours, i): # 判断是否是需要识别的对象 是返回1 否为0
"""
:param frame: 一张没有处理过的原始图片
:param BinColors: 经过颜色选择 二值化处理之后对应彩色部分的图片
:param color: 当前处理的颜色
:param contours: 当前颜色提取出的所有轮廓
:param i: 正在处理的轮廓下表号
:return: -1 表示当前编号对应的轮廓是不需要的后续对象(直接放弃的对象),1 表示是需要后续分类的对象
"""
print("def detection(frame, BinColors, color, contours, i): >>>")
# 输入只有一个轮廓
BinColors_show = BinColors.copy()
print("i = ", i)
cv2.drawContours(BinColors_show, contours, i, (0, 255, 255), 2) # 最后一个数字表示线条的粗细 -1时表示填充
cv2.imshow("detection/BinColors_show", BinColors_show) # 二值彩图上显示当前处理的轮廓
wh_ratio = cal_wh_ratio(contours[i]) # 返回轮廓的比例 [1,判断外接矩形的长宽比例 不应该很大
CropThing = Crop_cnt(frame, contours[i], color, wh_ratio) # 裁剪图片, 将图片变成水平的
color_ratio, cnt_ratio, rect_ratio, circle_ratio = cal_ratio(CropThing, color) # 计算轮廓面积 与 凸包面积的比例 不应该很大
if color_ratio == -1: # 排除计算异常的情况
print(">>> case: color_ratio == -1")
return None, -1
if wh_ratio[0] == -1: # 排除计算异常的情况
print(">>> case: wh_ratio[0] == -1 :", wh_ratio)
return None, -1
if wh_ratio[1] > 9: # 排除长宽比例和合理的情况
print(">>> case: wh_ratio[1] > 9 :", wh_ratio)
return None, -1
# if rect_ratio < 0.5: # 矩形度小于0.5的情况 三角形刚好0.5 红绿灯不可能小于0.5
# print(">>> case: rect_ratio < 0.5: ")
# 下面讨论只符合条件的情况 可能是红绿灯的情况:
# 红灯 = 红色 + 长窄比为1 + 尺寸(10:50)
if color == "red" and wh_ratio[1] == 1:
if wh_ratio[2][0] > 10 and wh_ratio[2][0] < 100 and color_ratio > 0.5 and color_ratio / cnt_ratio >= 1:
print(">>> a red light" * 10)
cv2.waitKey(0)
return CropThing, 1
if wh_ratio[2][0] > 15 and wh_ratio[2][0] < 150 and color_ratio / cnt_ratio != 1:
if color_ratio / cnt_ratio < 0.99: # 图标中间有非红色
print(">>> a red sign " * 10)
cv2.waitKey(0)
return CropThing, 1
elif color == "red" and wh_ratio[1] > 1 and wh_ratio[1] < 10: # 长宽比限制
if wh_ratio[2][0] > 15 and wh_ratio[2][
1] > 15 and color_ratio / cnt_ratio < 1 and color_ratio < 0.85 and color_ratio > 0.3:
print(">>> many red sign " * 10)
cv2.waitKey(0)
CropThing_show, center, radius = find_crop_center(CropThing, color)
return CropThing_show, 1
if color == "green" and wh_ratio[1] == 1 and color_ratio > 0.4 and wh_ratio[2][0] > 10 and wh_ratio[2][
0] < 100 and color_ratio / cnt_ratio >= 1:
print(">>> a green light" * 10)
cv2.waitKey(0)
return CropThing, 1
if color == "blue" and wh_ratio[1] == 1:
print(">>> a blue sign" * 10)
cv2.waitKey(0)
return CropThing, 1
elif color == "blue" and wh_ratio[0] == 1 and wh_ratio[2][0] > 20 and wh_ratio[2][0] < 150 and (
wh_ratio[1] == 2 or wh_ratio[1] == 3):
print(">>> many longitudinal blue sign" * 10)
cv2.waitKey(0)
CropThing_show, center, radius = find_crop_center(CropThing, color)
return CropThing_show, 1
if color == "yellow" and wh_ratio[1] == 1 and color_ratio > 0.4 and wh_ratio[2][0] > 10 and wh_ratio[2][
0] < 100 and color_ratio / cnt_ratio >= 1:
print(">>> a yellow light" * 10)
cv2.waitKey(0)
return CropThing, 1
if color == "yellow" and wh_ratio[0] == 0 and wh_ratio[1] == 2 and wh_ratio[2][0] > 50 and wh_ratio[2][
0] < 400 and color_ratio / cnt_ratio < 0.9 and color_ratio > 0.5 and cnt_ratio > 0.9:
print(">>> a yellow ETC sign " * 10)
cv2.waitKey(0)
return CropThing, 1
elif color == "yellow" and wh_ratio[1] == 1 and color_ratio > 0.5:
print(">>> mabey a yellow work sign")
cv2.waitKey(0)
return CropThing, 1
# center, radius = find_crop_center(CropThing, color)
# cv2.drawContours(frame, [box[0:2]], 0, (0, 0, 255), 2) # 画外接矩形
# cv2.imshow("frame", frame)
# print("wh_ratio:", wh_ratio)
# print("color_ratio:", color, "=", color_ratio)
# print("good " * 10)
else:
return None, -1
|
[
"1040238158@qq.com"
] |
1040238158@qq.com
|
9e87c0e877171415faf85154197b17b0bd660a82
|
5173c3e3956387a3f2ae8fcf4aed7c7a600dac78
|
/Algorithm_Practice/Subset_BackTrackinig.py
|
3179a5128a63a3e2612e05b9a06d76ea6127d75a
|
[] |
no_license
|
ma0723/Min_Algorithm
|
df75f53f6e89b7817d4b52d686effb8236a4ddac
|
b02d1043008cb32e22daa9d4207b9a45f111d66f
|
refs/heads/master
| 2023-07-25T11:00:15.397093
| 2021-08-30T02:08:05
| 2021-08-30T02:08:05
| 375,613,927
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
#s: 선택된 원소들의 합
#r:남은 원소들의 합(선택가능한 것들중에)
def f(i, N, K,s, r ):
global cnt
if s == K:
# 부분집합의 원소의 합이 K인 것을 찾음
cnt+=1
return
# 이미 K를 찾았으므로 원소가 추가되면 K보다 커지므로 고려 X
elif i == N:
# 모든 원소를 고려함 K는 못찾아냄
return
elif s > K:
# 현재까지의 합이 K보다 커지는 경우
return
elif s + r < K :
# 남은 원소를 모두 포함해도 K가 안되는 경우
return
else:
f(i+1,N,K,s,r-(i+1))
# i번째 원소를 선택하지 않은 경우
f(i+1,N,K,s+(i+1),r-(i+1))
# i번째 원소 선택
cnt = 0
N = 10 #1에서부터 N까지 집합의 원소
K = 10 #부분집합의 합
f(0,N,K,0, (N+1) * N //2 ) #선택된 원소의 합, 아직 선택되지 않은 원소의 합
print(cnt)
|
[
"ma0723@naver.com"
] |
ma0723@naver.com
|
93ebf812cf2dc1ab6fe7dacb49a0940fadec933e
|
9e549ee54faa8b037f90eac8ecb36f853e460e5e
|
/venv/lib/python3.6/site-packages/pylint/test/functional/bad_reversed_sequence.py
|
5a7b1f3dad23ad88d9b3fc7280087bcf9eae9d3b
|
[
"MIT"
] |
permissive
|
aitoehigie/britecore_flask
|
e8df68e71dd0eac980a7de8c0f20b5a5a16979fe
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
refs/heads/master
| 2022-12-09T22:07:45.930238
| 2019-05-15T04:10:37
| 2019-05-15T04:10:37
| 177,354,667
| 0
| 0
|
MIT
| 2022-12-08T04:54:09
| 2019-03-24T00:38:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,181
|
py
|
""" Checks that reversed() receive proper argument """
# pylint: disable=missing-docstring, useless-object-inheritance
# pylint: disable=too-few-public-methods,no-self-use,no-absolute-import
from collections import deque
__revision__ = 0
class GoodReversed(object):
""" Implements __reversed__ """
def __reversed__(self):
return [1, 2, 3]
class SecondGoodReversed(object):
""" Implements __len__ and __getitem__ """
def __len__(self):
return 3
def __getitem__(self, index):
return index
class BadReversed(object):
""" implements only len() """
def __len__(self):
return 3
class SecondBadReversed(object):
""" implements only __getitem__ """
def __getitem__(self, index):
return index
class ThirdBadReversed(dict):
""" dict subclass """
def uninferable(seq):
""" This can't be infered at this moment,
make sure we don't have a false positive.
"""
return reversed(seq)
def test(path):
""" test function """
seq = reversed() # No argument given
seq = reversed(None) # [bad-reversed-sequence]
seq = reversed([1, 2, 3])
seq = reversed((1, 2, 3))
seq = reversed(set()) # [bad-reversed-sequence]
seq = reversed({"a": 1, "b": 2}) # [bad-reversed-sequence]
seq = reversed(iter([1, 2, 3])) # [bad-reversed-sequence]
seq = reversed(GoodReversed())
seq = reversed(SecondGoodReversed())
seq = reversed(BadReversed()) # [bad-reversed-sequence]
seq = reversed(SecondBadReversed()) # [bad-reversed-sequence]
seq = reversed(range(100))
seq = reversed(ThirdBadReversed()) # [bad-reversed-sequence]
seq = reversed(lambda: None) # [bad-reversed-sequence]
seq = reversed(deque([]))
seq = reversed("123")
seq = uninferable([1, 2, 3])
seq = reversed(path.split("/"))
return seq
def test_dict_ancestor_and_reversed():
"""Don't emit for subclasses of dict, with __reversed__ implemented."""
from collections import OrderedDict
class Child(dict):
def __reversed__(self):
return reversed(range(10))
seq = reversed(OrderedDict())
return reversed(Child()), seq
|
[
"aitoehigie@gmail.com"
] |
aitoehigie@gmail.com
|
14f4384b3f417db7ee5e8c97bf8f2c110123b40a
|
bfbe642d689b5595fc7a8e8ae97462c863ba267a
|
/bin/Python27/Lib/site-packages/openmdao.main-0.8.1-py2.7.egg/openmdao/main/api.py
|
829c20fd341079e51ad207c522ec775c63353842
|
[
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
mcanthony/meta-core
|
0c0a8cde1669f749a4880aca6f816d28742a9c68
|
3844cce391c1e6be053572810bad2b8405a9839b
|
refs/heads/master
| 2020-12-26T03:11:11.338182
| 2015-11-04T22:58:13
| 2015-11-04T22:58:13
| 45,806,011
| 1
| 0
| null | 2015-11-09T00:34:22
| 2015-11-09T00:34:22
| null |
UTF-8
|
Python
| false
| false
| 1,672
|
py
|
"""
Pseudo package containing all of the main classes/objects in the
openmdao.main API.
"""
from openmdao.util.log import logger, enable_console
from openmdao.main.expreval import ExprEvaluator
from openmdao.main.factory import Factory
from openmdao.main.factorymanager import create, get_available_types
from openmdao.main.container import Container, get_default_name, \
create_io_traits
from openmdao.main.vartree import VariableTree
from openmdao.main.component import Component, SimulationRoot
from openmdao.main.component_with_derivatives import ComponentWithDerivatives
from openmdao.main.driver_uses_derivatives import DriverUsesDerivatives
from openmdao.main.assembly import Assembly, set_as_top, dump_iteration_tree
from openmdao.main.driver import Driver, Run_Once
from openmdao.main.workflow import Workflow
from openmdao.main.dataflow import Dataflow
from openmdao.main.sequentialflow import SequentialWorkflow
from openmdao.main.cyclicflow import CyclicWorkflow
from openmdao.main.variable import Variable
from openmdao.main.exceptions import ConstraintError
from openmdao.main.interfaces import implements, Attribute, Interface
from openmdao.main.file_supp import FileMetadata
from openmdao.main.case import Case
from openmdao.main.arch import Architecture
from openmdao.main.problem_formulation import ArchitectureAssembly, OptProblem
from openmdao.util.eggsaver import SAVE_PICKLE, SAVE_CPICKLE #, SAVE_YAML, SAVE_LIBYAML
from openmdao.units import convert_units
# TODO: This probably shouldn't be here. Removing it will require edits to some
# of our plugins
from openmdao.main.datatypes.slot import Slot
|
[
"kevin.m.smyth@gmail.com"
] |
kevin.m.smyth@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.